blob: e5512f26e20a69f22ae0f8797d736d41fdfe1550 [file] [log] [blame]
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
Chris Wilson10195b12018-06-28 14:22:06 +010024
Chris Wilson09480072019-07-03 10:17:19 +010025#include <linux/sched/mm.h>
Jani Nikuladf0566a2019-06-13 11:44:16 +030026#include <drm/drm_gem.h>
Chris Wilson112ed2d2019-04-24 18:48:39 +010027
Jani Nikuladf0566a2019-06-13 11:44:16 +030028#include "display/intel_frontbuffer.h"
29
30#include "gt/intel_engine.h"
Tvrtko Ursulina1c8a092019-06-21 08:08:01 +010031#include "gt/intel_gt.h"
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020032
33#include "i915_drv.h"
Chris Wilson103b76ee2019-03-05 21:38:30 +000034#include "i915_globals.h"
Chris Wilson28507482019-10-04 14:39:58 +010035#include "i915_sw_fence_work.h"
Jani Nikulaa09d9a82019-08-06 13:07:28 +030036#include "i915_trace.h"
Jani Nikuladf0566a2019-06-13 11:44:16 +030037#include "i915_vma.h"
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020038
Chris Wilson13f1bfd2019-02-28 10:20:34 +000039static struct i915_global_vma {
Chris Wilson103b76ee2019-03-05 21:38:30 +000040 struct i915_global base;
Chris Wilson13f1bfd2019-02-28 10:20:34 +000041 struct kmem_cache *slab_vmas;
42} global;
43
44struct i915_vma *i915_vma_alloc(void)
45{
46 return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
47}
48
49void i915_vma_free(struct i915_vma *vma)
50{
51 return kmem_cache_free(global.slab_vmas, vma);
52}
53
Chris Wilson1eca65d2018-07-06 07:53:06 +010054#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
Chris Wilson10195b12018-06-28 14:22:06 +010055
56#include <linux/stackdepot.h>
57
58static void vma_print_allocator(struct i915_vma *vma, const char *reason)
59{
Thomas Gleixner487f3c72019-04-25 11:45:09 +020060 unsigned long *entries;
61 unsigned int nr_entries;
Chris Wilson10195b12018-06-28 14:22:06 +010062 char buf[512];
63
64 if (!vma->node.stack) {
65 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
66 vma->node.start, vma->node.size, reason);
67 return;
68 }
69
Thomas Gleixner487f3c72019-04-25 11:45:09 +020070 nr_entries = stack_depot_fetch(vma->node.stack, &entries);
71 stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
Chris Wilson10195b12018-06-28 14:22:06 +010072 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
73 vma->node.start, vma->node.size, reason, buf);
74}
75
76#else
77
78static void vma_print_allocator(struct i915_vma *vma, const char *reason)
79{
80}
81
82#endif
83
Chris Wilson12c255b2019-06-21 19:38:00 +010084static inline struct i915_vma *active_to_vma(struct i915_active *ref)
85{
86 return container_of(ref, typeof(struct i915_vma), active);
87}
88
89static int __i915_vma_active(struct i915_active *ref)
90{
Chris Wilson2833ddc2019-08-20 11:05:31 +010091 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
Chris Wilson12c255b2019-06-21 19:38:00 +010092}
93
Chris Wilson274cbf22019-10-04 14:39:59 +010094__i915_active_call
Chris Wilson64d6c502019-02-05 13:00:02 +000095static void __i915_vma_retire(struct i915_active *ref)
96{
Chris Wilson12c255b2019-06-21 19:38:00 +010097 i915_vma_put(active_to_vma(ref));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020098}
99
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200100static struct i915_vma *
Chris Wilsona01cb37a2017-01-16 15:21:30 +0000101vma_create(struct drm_i915_gem_object *obj,
102 struct i915_address_space *vm,
103 const struct i915_ggtt_view *view)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200104{
105 struct i915_vma *vma;
106 struct rb_node *rb, **p;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200107
Chris Wilsone1cc3db2017-02-09 11:19:33 +0000108 /* The aliasing_ppgtt should never be used directly! */
Chris Wilson71e51ca2019-10-21 19:32:35 +0100109 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
Chris Wilsone1cc3db2017-02-09 11:19:33 +0000110
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000111 vma = i915_vma_alloc();
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200112 if (vma == NULL)
113 return ERR_PTR(-ENOMEM);
114
Chris Wilson28507482019-10-04 14:39:58 +0100115 mutex_init(&vma->pages_mutex);
116 vma->vm = i915_vm_get(vm);
Chris Wilson93f2cde2018-06-07 16:40:46 +0100117 vma->ops = &vm->vma_ops;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200118 vma->obj = obj;
Chris Wilsonef78f7b2019-06-18 13:58:58 +0100119 vma->resv = obj->base.resv;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200120 vma->size = obj->base.size;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000121 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200122
Chris Wilsonb1e31772019-10-04 14:40:00 +0100123 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
Chris Wilson155ab882019-06-06 12:23:20 +0100124
Chris Wilson09480072019-07-03 10:17:19 +0100125 /* Declare ourselves safe for use inside shrinkers */
126 if (IS_ENABLED(CONFIG_LOCKDEP)) {
127 fs_reclaim_acquire(GFP_KERNEL);
128 might_lock(&vma->active.mutex);
129 fs_reclaim_release(GFP_KERNEL);
130 }
131
Chris Wilson155ab882019-06-06 12:23:20 +0100132 INIT_LIST_HEAD(&vma->closed_link);
133
Chris Wilson7c518462017-01-23 14:52:45 +0000134 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200135 vma->ggtt_view = *view;
136 if (view->type == I915_GGTT_VIEW_PARTIAL) {
Chris Wilson07e19ea2016-12-23 14:57:59 +0000137 GEM_BUG_ON(range_overflows_t(u64,
Chris Wilson8bab11932017-01-14 00:28:25 +0000138 view->partial.offset,
139 view->partial.size,
Chris Wilson07e19ea2016-12-23 14:57:59 +0000140 obj->base.size >> PAGE_SHIFT));
Chris Wilson8bab11932017-01-14 00:28:25 +0000141 vma->size = view->partial.size;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200142 vma->size <<= PAGE_SHIFT;
Chris Wilson7e7367d2018-06-30 10:05:09 +0100143 GEM_BUG_ON(vma->size > obj->base.size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200144 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
Chris Wilson8bab11932017-01-14 00:28:25 +0000145 vma->size = intel_rotation_info_size(&view->rotated);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200146 vma->size <<= PAGE_SHIFT;
Ville Syrjälä1a74fc02019-05-09 15:21:52 +0300147 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
148 vma->size = intel_remapped_info_size(&view->remapped);
149 vma->size <<= PAGE_SHIFT;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200150 }
151 }
152
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000153 if (unlikely(vma->size > vm->total))
154 goto err_vma;
155
Chris Wilsonb00ddb22017-01-19 19:26:59 +0000156 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
157
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200158 if (i915_is_ggtt(vm)) {
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000159 if (unlikely(overflows_type(vma->size, u32)))
160 goto err_vma;
161
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000162 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
163 i915_gem_object_get_tiling(obj),
164 i915_gem_object_get_stride(obj));
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000165 if (unlikely(vma->fence_size < vma->size || /* overflow */
166 vma->fence_size > vm->total))
167 goto err_vma;
168
Chris Wilsonf51455d2017-01-10 14:47:34 +0000169 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
Chris Wilson944397f2017-01-09 16:16:11 +0000170
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000171 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
172 i915_gem_object_get_tiling(obj),
173 i915_gem_object_get_stride(obj));
Chris Wilson944397f2017-01-09 16:16:11 +0000174 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
175
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100176 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
Chris Wilson528cbd12019-01-28 10:23:54 +0000177 }
178
179 spin_lock(&obj->vma.lock);
180
181 rb = NULL;
182 p = &obj->vma.tree.rb_node;
183 while (*p) {
184 struct i915_vma *pos;
185 long cmp;
186
187 rb = *p;
188 pos = rb_entry(rb, struct i915_vma, obj_node);
189
190 /*
191 * If the view already exists in the tree, another thread
192 * already created a matching vma, so return the older instance
193 * and dispose of ours.
194 */
195 cmp = i915_vma_compare(pos, vm, view);
196 if (cmp == 0) {
197 spin_unlock(&obj->vma.lock);
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000198 i915_vma_free(vma);
Chris Wilson528cbd12019-01-28 10:23:54 +0000199 return pos;
200 }
201
202 if (cmp < 0)
203 p = &rb->rb_right;
204 else
205 p = &rb->rb_left;
206 }
207 rb_link_node(&vma->obj_node, rb, p);
208 rb_insert_color(&vma->obj_node, &obj->vma.tree);
209
210 if (i915_vma_is_ggtt(vma))
Chris Wilsone2189dd2017-12-07 21:14:07 +0000211 /*
212 * We put the GGTT vma at the start of the vma-list, followed
213 * by the ppGGTT vma. This allows us to break early when
214 * iterating over only the GGTT vma for an object, see
215 * for_each_ggtt_vma()
216 */
Chris Wilson528cbd12019-01-28 10:23:54 +0000217 list_add(&vma->obj_link, &obj->vma.list);
218 else
219 list_add_tail(&vma->obj_link, &obj->vma.list);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200220
Chris Wilson528cbd12019-01-28 10:23:54 +0000221 spin_unlock(&obj->vma.lock);
Chris Wilson09d7e462019-01-28 10:23:53 +0000222
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200223 return vma;
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000224
225err_vma:
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000226 i915_vma_free(vma);
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000227 return ERR_PTR(-E2BIG);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200228}
229
Chris Wilson481a6f72017-01-16 15:21:31 +0000230static struct i915_vma *
231vma_lookup(struct drm_i915_gem_object *obj,
232 struct i915_address_space *vm,
233 const struct i915_ggtt_view *view)
Chris Wilson718659a2017-01-16 15:21:28 +0000234{
235 struct rb_node *rb;
236
Chris Wilson528cbd12019-01-28 10:23:54 +0000237 rb = obj->vma.tree.rb_node;
Chris Wilson718659a2017-01-16 15:21:28 +0000238 while (rb) {
239 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
240 long cmp;
241
242 cmp = i915_vma_compare(vma, vm, view);
243 if (cmp == 0)
244 return vma;
245
246 if (cmp < 0)
247 rb = rb->rb_right;
248 else
249 rb = rb->rb_left;
250 }
251
252 return NULL;
253}
254
255/**
Chris Wilson718659a2017-01-16 15:21:28 +0000256 * i915_vma_instance - return the singleton instance of the VMA
257 * @obj: parent &struct drm_i915_gem_object to be mapped
258 * @vm: address space in which the mapping is located
259 * @view: additional mapping requirements
260 *
261 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
262 * the same @view characteristics. If a match is not found, one is created.
263 * Once created, the VMA is kept until either the object is freed, or the
264 * address space is closed.
265 *
Chris Wilson718659a2017-01-16 15:21:28 +0000266 * Returns the vma, or an error pointer.
267 */
268struct i915_vma *
269i915_vma_instance(struct drm_i915_gem_object *obj,
270 struct i915_address_space *vm,
271 const struct i915_ggtt_view *view)
272{
273 struct i915_vma *vma;
274
Chris Wilson718659a2017-01-16 15:21:28 +0000275 GEM_BUG_ON(view && !i915_is_ggtt(vm));
Chris Wilson28507482019-10-04 14:39:58 +0100276 GEM_BUG_ON(!atomic_read(&vm->open));
Chris Wilson718659a2017-01-16 15:21:28 +0000277
Chris Wilson528cbd12019-01-28 10:23:54 +0000278 spin_lock(&obj->vma.lock);
Chris Wilson481a6f72017-01-16 15:21:31 +0000279 vma = vma_lookup(obj, vm, view);
Chris Wilson528cbd12019-01-28 10:23:54 +0000280 spin_unlock(&obj->vma.lock);
281
282 /* vma_create() will resolve the race if another creates the vma */
283 if (unlikely(!vma))
Chris Wilsona01cb37a2017-01-16 15:21:30 +0000284 vma = vma_create(obj, vm, view);
Chris Wilson718659a2017-01-16 15:21:28 +0000285
Chris Wilson4ea95272017-01-16 15:21:29 +0000286 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
Chris Wilson718659a2017-01-16 15:21:28 +0000287 return vma;
288}
289
Chris Wilson28507482019-10-04 14:39:58 +0100290struct i915_vma_work {
291 struct dma_fence_work base;
292 struct i915_vma *vma;
293 enum i915_cache_level cache_level;
294 unsigned int flags;
295};
296
297static int __vma_bind(struct dma_fence_work *work)
298{
299 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
300 struct i915_vma *vma = vw->vma;
301 int err;
302
303 err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags);
304 if (err)
305 atomic_or(I915_VMA_ERROR, &vma->flags);
306
307 if (vma->obj)
308 __i915_gem_object_unpin_pages(vma->obj);
309
310 return err;
311}
312
313static const struct dma_fence_work_ops bind_ops = {
314 .name = "bind",
315 .work = __vma_bind,
316};
317
318struct i915_vma_work *i915_vma_work(void)
319{
320 struct i915_vma_work *vw;
321
322 vw = kzalloc(sizeof(*vw), GFP_KERNEL);
323 if (!vw)
324 return NULL;
325
326 dma_fence_work_init(&vw->base, &bind_ops);
327 vw->base.dma.error = -EAGAIN; /* disable the worker by default */
328
329 return vw;
330}
331
Chris Wilson718659a2017-01-16 15:21:28 +0000332/**
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200333 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
334 * @vma: VMA to map
335 * @cache_level: mapping cache level
336 * @flags: flags like global or local mapping
Chris Wilson28507482019-10-04 14:39:58 +0100337 * @work: preallocated worker for allocating and binding the PTE
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200338 *
339 * DMA addresses are taken from the scatter-gather table of this object (or of
340 * this VMA in case of non-default GGTT views) and PTE entries set up.
341 * Note that DMA addresses are also the only part of the SG table we care about.
342 */
Chris Wilson28507482019-10-04 14:39:58 +0100343int i915_vma_bind(struct i915_vma *vma,
344 enum i915_cache_level cache_level,
345 u32 flags,
346 struct i915_vma_work *work)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200347{
348 u32 bind_flags;
349 u32 vma_flags;
350 int ret;
351
Chris Wilsonaa149432017-02-25 18:11:21 +0000352 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
353 GEM_BUG_ON(vma->size > vma->node.size);
354
Tvrtko Ursulinbbb8a9d2018-10-12 07:31:42 +0100355 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
356 vma->node.size,
357 vma->vm->total)))
Chris Wilsonaa149432017-02-25 18:11:21 +0000358 return -ENODEV;
359
Tvrtko Ursulinbbb8a9d2018-10-12 07:31:42 +0100360 if (GEM_DEBUG_WARN_ON(!flags))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200361 return -EINVAL;
362
Chris Wilson28507482019-10-04 14:39:58 +0100363 bind_flags = flags;
364 bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200365
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100366 vma_flags = atomic_read(&vma->flags);
367 vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200368 if (flags & PIN_UPDATE)
369 bind_flags |= vma_flags;
370 else
371 bind_flags &= ~vma_flags;
372 if (bind_flags == 0)
373 return 0;
374
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100375 GEM_BUG_ON(!vma->pages);
376
Daniele Ceraolo Spurio6146e6d2017-01-20 13:51:23 -0800377 trace_i915_vma_bind(vma, bind_flags);
Chris Wilson28507482019-10-04 14:39:58 +0100378 if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) {
379 work->vma = vma;
380 work->cache_level = cache_level;
381 work->flags = bind_flags | I915_VMA_ALLOC;
382
383 /*
384 * Note we only want to chain up to the migration fence on
385 * the pages (not the object itself). As we don't track that,
386 * yet, we have to use the exclusive fence instead.
387 *
388 * Also note that we do not want to track the async vma as
389 * part of the obj->resv->excl_fence as it only affects
390 * execution and not content or object's backing store lifetime.
391 */
392 GEM_BUG_ON(i915_active_has_exclusive(&vma->active));
393 i915_active_set_exclusive(&vma->active, &work->base.dma);
394 work->base.dma.error = 0; /* enable the queue_work() */
395
396 if (vma->obj)
397 __i915_gem_object_pin_pages(vma->obj);
398 } else {
399 GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags);
400 ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
401 if (ret)
402 return ret;
403 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200404
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100405 atomic_or(bind_flags, &vma->flags);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200406 return 0;
407}
408
409void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
410{
411 void __iomem *ptr;
Chris Wilsonb4563f52017-10-09 09:43:55 +0100412 int err;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200413
414 /* Access through the GTT requires the device to be awake. */
Chris Wilson71e51ca2019-10-21 19:32:35 +0100415 assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm);
Chris Wilson28507482019-10-04 14:39:58 +0100416 if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
Chris Wilsonb4563f52017-10-09 09:43:55 +0100417 err = -ENODEV;
418 goto err;
419 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200420
421 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100422 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200423
Chris Wilson28507482019-10-04 14:39:58 +0100424 ptr = READ_ONCE(vma->iomap);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200425 if (ptr == NULL) {
Matthew Auld73ebd502017-12-11 15:18:20 +0000426 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200427 vma->node.start,
428 vma->node.size);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100429 if (ptr == NULL) {
430 err = -ENOMEM;
431 goto err;
432 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200433
Chris Wilson28507482019-10-04 14:39:58 +0100434 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
435 io_mapping_unmap(ptr);
436 ptr = vma->iomap;
437 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200438 }
439
440 __i915_vma_pin(vma);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100441
Chris Wilson3bd40732017-10-09 09:43:56 +0100442 err = i915_vma_pin_fence(vma);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100443 if (err)
444 goto err_unpin;
445
Chris Wilson7125397b2017-12-06 12:49:14 +0000446 i915_vma_set_ggtt_write(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200447 return ptr;
Chris Wilsonb4563f52017-10-09 09:43:55 +0100448
449err_unpin:
450 __i915_vma_unpin(vma);
451err:
452 return IO_ERR_PTR(err);
453}
454
Chris Wilson7125397b2017-12-06 12:49:14 +0000455void i915_vma_flush_writes(struct i915_vma *vma)
456{
Chris Wilson28507482019-10-04 14:39:58 +0100457 if (i915_vma_unset_ggtt_write(vma))
458 intel_gt_flush_ggtt_writes(vma->vm->gt);
Chris Wilson7125397b2017-12-06 12:49:14 +0000459}
460
Chris Wilsonb4563f52017-10-09 09:43:55 +0100461void i915_vma_unpin_iomap(struct i915_vma *vma)
462{
Chris Wilsonb4563f52017-10-09 09:43:55 +0100463 GEM_BUG_ON(vma->iomap == NULL);
464
Chris Wilson7125397b2017-12-06 12:49:14 +0000465 i915_vma_flush_writes(vma);
466
Chris Wilsonb4563f52017-10-09 09:43:55 +0100467 i915_vma_unpin_fence(vma);
468 i915_vma_unpin(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200469}
470
Chris Wilson6a2f59e2018-07-21 13:50:37 +0100471void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200472{
473 struct i915_vma *vma;
474 struct drm_i915_gem_object *obj;
475
476 vma = fetch_and_zero(p_vma);
477 if (!vma)
478 return;
479
480 obj = vma->obj;
Chris Wilson520ea7c2018-06-07 16:40:45 +0100481 GEM_BUG_ON(!obj);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200482
483 i915_vma_unpin(vma);
484 i915_vma_close(vma);
485
Chris Wilson6a2f59e2018-07-21 13:50:37 +0100486 if (flags & I915_VMA_RELEASE_MAP)
487 i915_gem_object_unpin_map(obj);
488
Chris Wilsonc017cf62019-05-28 10:29:56 +0100489 i915_gem_object_put(obj);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200490}
491
Chris Wilson782a3e92017-02-13 17:15:46 +0000492bool i915_vma_misplaced(const struct i915_vma *vma,
493 u64 size, u64 alignment, u64 flags)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200494{
495 if (!drm_mm_node_allocated(&vma->node))
496 return false;
497
Chris Wilson28507482019-10-04 14:39:58 +0100498 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
499 return true;
500
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200501 if (vma->node.size < size)
502 return true;
503
Chris Wilsonf51455d2017-01-10 14:47:34 +0000504 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
505 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200506 return true;
507
508 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
509 return true;
510
511 if (flags & PIN_OFFSET_BIAS &&
512 vma->node.start < (flags & PIN_OFFSET_MASK))
513 return true;
514
515 if (flags & PIN_OFFSET_FIXED &&
516 vma->node.start != (flags & PIN_OFFSET_MASK))
517 return true;
518
519 return false;
520}
521
522void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
523{
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200524 bool mappable, fenceable;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200525
Chris Wilson944397f2017-01-09 16:16:11 +0000526 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
527 GEM_BUG_ON(!vma->fence_size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200528
Chris Wilson944397f2017-01-09 16:16:11 +0000529 fenceable = (vma->node.size >= vma->fence_size &&
Chris Wilsonf51455d2017-01-10 14:47:34 +0000530 IS_ALIGNED(vma->node.start, vma->fence_alignment));
Chris Wilson944397f2017-01-09 16:16:11 +0000531
532 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
533
534 if (mappable && fenceable)
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100535 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200536 else
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100537 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200538}
539
Matthew Auld33dd8892019-09-09 13:40:52 +0100540bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000541{
542 struct drm_mm_node *node = &vma->node;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200543 struct drm_mm_node *other;
544
545 /*
546 * On some machines we have to be careful when putting differing types
547 * of snoopable memory together to avoid the prefetcher crossing memory
548 * domains and dying. During vm initialisation, we decide whether or not
549 * these constraints apply and set the drm_mm.color_adjust
550 * appropriately.
551 */
Matthew Auld33dd8892019-09-09 13:40:52 +0100552 if (!i915_vm_has_cache_coloring(vma->vm))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200553 return true;
554
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000555 /* Only valid to be called on an already inserted vma */
556 GEM_BUG_ON(!drm_mm_node_allocated(node));
557 GEM_BUG_ON(list_empty(&node->node_list));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200558
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000559 other = list_prev_entry(node, node_list);
Matthew Auld33dd8892019-09-09 13:40:52 +0100560 if (i915_node_color_differs(other, color) &&
Matthew Auld1e0a96e2019-09-09 13:40:50 +0100561 !drm_mm_hole_follows(other))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200562 return false;
563
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000564 other = list_next_entry(node, node_list);
Matthew Auld33dd8892019-09-09 13:40:52 +0100565 if (i915_node_color_differs(other, color) &&
Matthew Auld1e0a96e2019-09-09 13:40:50 +0100566 !drm_mm_hole_follows(node))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200567 return false;
568
569 return true;
570}
571
Chris Wilson83d317a2018-06-05 10:41:07 +0100572static void assert_bind_count(const struct drm_i915_gem_object *obj)
573{
574 /*
575 * Combine the assertion that the object is bound and that we have
576 * pinned its pages. But we should never have bound the object
577 * more than we have pinned its pages. (For complete accuracy, we
578 * assume that no else is pinning the pages, but as a rough assertion
579 * that we will not run into problems later, this will do!)
580 */
Chris Wilsonecab9be2019-06-12 11:57:20 +0100581 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
Chris Wilson83d317a2018-06-05 10:41:07 +0100582}
583
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200584/**
585 * i915_vma_insert - finds a slot for the vma in its address space
586 * @vma: the vma
587 * @size: requested size in bytes (can be larger than the VMA)
588 * @alignment: required alignment
589 * @flags: mask of PIN_* flags to use
590 *
591 * First we try to allocate some free space that meets the requirements for
592 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
593 * preferrably the oldest idle entry to make room for the new VMA.
594 *
595 * Returns:
596 * 0 on success, negative error code otherwise.
597 */
598static int
599i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
600{
Matthew Auld33dd8892019-09-09 13:40:52 +0100601 unsigned long color;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200602 u64 start, end;
603 int ret;
604
Chris Wilson010e3e62017-12-06 12:49:13 +0000605 GEM_BUG_ON(i915_vma_is_closed(vma));
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100606 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200607 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
608
609 size = max(size, vma->size);
Chris Wilson944397f2017-01-09 16:16:11 +0000610 alignment = max(alignment, vma->display_alignment);
611 if (flags & PIN_MAPPABLE) {
612 size = max_t(typeof(size), size, vma->fence_size);
613 alignment = max_t(typeof(alignment),
614 alignment, vma->fence_alignment);
615 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200616
Chris Wilsonf51455d2017-01-10 14:47:34 +0000617 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
618 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
619 GEM_BUG_ON(!is_power_of_2(alignment));
620
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200621 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000622 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200623
624 end = vma->vm->total;
625 if (flags & PIN_MAPPABLE)
Chris Wilson28507482019-10-04 14:39:58 +0100626 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200627 if (flags & PIN_ZONE_4G)
Chris Wilsonf51455d2017-01-10 14:47:34 +0000628 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
629 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200630
631 /* If binding the object/GGTT view requires more space than the entire
632 * aperture has, reject it early before evicting everything in a vain
633 * attempt to find space.
634 */
635 if (size > end) {
Chris Wilson520ea7c2018-06-07 16:40:45 +0100636 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
637 size, flags & PIN_MAPPABLE ? "mappable" : "total",
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200638 end);
Chris Wilson2889caa2017-06-16 15:05:19 +0100639 return -ENOSPC;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200640 }
641
Matthew Auld33dd8892019-09-09 13:40:52 +0100642 color = 0;
Chris Wilson28507482019-10-04 14:39:58 +0100643 if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
644 color = vma->obj->cache_level;
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100645
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200646 if (flags & PIN_OFFSET_FIXED) {
647 u64 offset = flags & PIN_OFFSET_MASK;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000648 if (!IS_ALIGNED(offset, alignment) ||
Chris Wilson28507482019-10-04 14:39:58 +0100649 range_overflows(offset, size, end))
650 return -EINVAL;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200651
Chris Wilson625d9882017-01-11 11:23:11 +0000652 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
Matthew Auld33dd8892019-09-09 13:40:52 +0100653 size, offset, color,
Chris Wilson625d9882017-01-11 11:23:11 +0000654 flags);
655 if (ret)
Chris Wilson28507482019-10-04 14:39:58 +0100656 return ret;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200657 } else {
Matthew Auld74642842017-10-06 23:18:20 +0100658 /*
659 * We only support huge gtt pages through the 48b PPGTT,
660 * however we also don't want to force any alignment for
661 * objects which need to be tightly packed into the low 32bits.
662 *
663 * Note that we assume that GGTT are limited to 4GiB for the
664 * forseeable future. See also i915_ggtt_offset().
665 */
666 if (upper_32_bits(end - 1) &&
667 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
Matthew Auld855822b2017-10-06 23:18:21 +0100668 /*
669 * We can't mix 64K and 4K PTEs in the same page-table
670 * (2M block), and so to avoid the ugliness and
671 * complexity of coloring we opt for just aligning 64K
672 * objects to 2M.
673 */
Matthew Auld74642842017-10-06 23:18:20 +0100674 u64 page_alignment =
Matthew Auld855822b2017-10-06 23:18:21 +0100675 rounddown_pow_of_two(vma->page_sizes.sg |
676 I915_GTT_PAGE_SIZE_2M);
Matthew Auld74642842017-10-06 23:18:20 +0100677
Chris Wilsonbef27bdb2017-10-09 10:20:19 +0100678 /*
679 * Check we don't expand for the limited Global GTT
680 * (mappable aperture is even more precious!). This
681 * also checks that we exclude the aliasing-ppgtt.
682 */
683 GEM_BUG_ON(i915_vma_is_ggtt(vma));
684
Matthew Auld74642842017-10-06 23:18:20 +0100685 alignment = max(alignment, page_alignment);
Matthew Auld855822b2017-10-06 23:18:21 +0100686
687 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
688 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
Matthew Auld74642842017-10-06 23:18:20 +0100689 }
690
Chris Wilsone007b192017-01-11 11:23:10 +0000691 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
Matthew Auld33dd8892019-09-09 13:40:52 +0100692 size, alignment, color,
Chris Wilsone007b192017-01-11 11:23:10 +0000693 start, end, flags);
694 if (ret)
Chris Wilson28507482019-10-04 14:39:58 +0100695 return ret;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200696
697 GEM_BUG_ON(vma->node.start < start);
698 GEM_BUG_ON(vma->node.start + vma->node.size > end);
699 }
Chris Wilson44a0ec02017-01-19 19:26:58 +0000700 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Matthew Auld33dd8892019-09-09 13:40:52 +0100701 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200702
Chris Wilson520ea7c2018-06-07 16:40:45 +0100703 if (vma->obj) {
Chris Wilsondde01d92019-10-30 19:21:49 +0000704 struct drm_i915_gem_object *obj = vma->obj;
705
706 atomic_inc(&obj->bind_count);
707 assert_bind_count(obj);
Chris Wilson520ea7c2018-06-07 16:40:45 +0100708 }
Chris Wilsondde01d92019-10-30 19:21:49 +0000709 list_add_tail(&vma->vm_link, &vma->vm->bound_list);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200710
711 return 0;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200712}
713
Chris Wilson31c7eff2017-02-27 12:26:54 +0000714static void
Chris Wilsondde01d92019-10-30 19:21:49 +0000715i915_vma_detach(struct i915_vma *vma)
Chris Wilson31c7eff2017-02-27 12:26:54 +0000716{
Chris Wilson31c7eff2017-02-27 12:26:54 +0000717 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100718 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
Chris Wilson31c7eff2017-02-27 12:26:54 +0000719
Chris Wilson520ea7c2018-06-07 16:40:45 +0100720 /*
Chris Wilsondde01d92019-10-30 19:21:49 +0000721 * And finally now the object is completely decoupled from this
722 * vma, we can drop its hold on the backing storage and allow
723 * it to be reaped by the shrinker.
Chris Wilson31c7eff2017-02-27 12:26:54 +0000724 */
Chris Wilsondde01d92019-10-30 19:21:49 +0000725 list_del(&vma->vm_link);
Chris Wilson520ea7c2018-06-07 16:40:45 +0100726 if (vma->obj) {
727 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilson31c7eff2017-02-27 12:26:54 +0000728
Chris Wilson520ea7c2018-06-07 16:40:45 +0100729 assert_bind_count(obj);
Chris Wilsondde01d92019-10-30 19:21:49 +0000730 atomic_dec(&obj->bind_count);
Chris Wilson520ea7c2018-06-07 16:40:45 +0100731 }
Chris Wilson31c7eff2017-02-27 12:26:54 +0000732}
733
Chris Wilson28507482019-10-04 14:39:58 +0100734static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200735{
Chris Wilson28507482019-10-04 14:39:58 +0100736 unsigned int bound;
737 bool pinned = true;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200738
Chris Wilson28507482019-10-04 14:39:58 +0100739 bound = atomic_read(&vma->flags);
740 do {
741 if (unlikely(flags & ~bound))
742 return false;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200743
Chris Wilson28507482019-10-04 14:39:58 +0100744 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
745 return false;
746
747 if (!(bound & I915_VMA_PIN_MASK))
748 goto unpinned;
749
750 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
751 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
752
753 return true;
754
755unpinned:
756 /*
757 * If pin_count==0, but we are bound, check under the lock to avoid
758 * racing with a concurrent i915_vma_unbind().
759 */
760 mutex_lock(&vma->vm->mutex);
761 do {
762 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
763 pinned = false;
764 break;
765 }
766
767 if (unlikely(flags & ~bound)) {
768 pinned = false;
769 break;
770 }
771 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
772 mutex_unlock(&vma->vm->mutex);
773
774 return pinned;
775}
776
777static int vma_get_pages(struct i915_vma *vma)
778{
779 int err = 0;
780
781 if (atomic_add_unless(&vma->pages_count, 1, 0))
782 return 0;
783
784 /* Allocations ahoy! */
785 if (mutex_lock_interruptible(&vma->pages_mutex))
786 return -EINTR;
787
788 if (!atomic_read(&vma->pages_count)) {
789 if (vma->obj) {
790 err = i915_gem_object_pin_pages(vma->obj);
791 if (err)
792 goto unlock;
793 }
794
795 err = vma->ops->set_pages(vma);
Chris Wilson56184a22019-10-15 10:39:15 +0100796 if (err) {
797 if (vma->obj)
798 i915_gem_object_unpin_pages(vma->obj);
Chris Wilson28507482019-10-04 14:39:58 +0100799 goto unlock;
Chris Wilson56184a22019-10-15 10:39:15 +0100800 }
Chris Wilson28507482019-10-04 14:39:58 +0100801 }
802 atomic_inc(&vma->pages_count);
803
804unlock:
805 mutex_unlock(&vma->pages_mutex);
806
807 return err;
808}
809
810static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
811{
812 /* We allocate under vma_get_pages, so beware the shrinker */
813 mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
814 GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
815 if (atomic_sub_return(count, &vma->pages_count) == 0) {
816 vma->ops->clear_pages(vma);
817 GEM_BUG_ON(vma->pages);
818 if (vma->obj)
819 i915_gem_object_unpin_pages(vma->obj);
820 }
821 mutex_unlock(&vma->pages_mutex);
822}
823
824static void vma_put_pages(struct i915_vma *vma)
825{
826 if (atomic_add_unless(&vma->pages_count, -1, 1))
827 return;
828
829 __vma_put_pages(vma, 1);
830}
831
832static void vma_unbind_pages(struct i915_vma *vma)
833{
834 unsigned int count;
835
836 lockdep_assert_held(&vma->vm->mutex);
837
838 /* The upper portion of pages_count is the number of bindings */
839 count = atomic_read(&vma->pages_count);
840 count >>= I915_VMA_PAGES_BIAS;
841 GEM_BUG_ON(!count);
842
843 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
844}
845
846int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
847{
848 struct i915_vma_work *work = NULL;
849 unsigned int bound;
850 int err;
851
852 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
853 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
854
855 GEM_BUG_ON(flags & PIN_UPDATE);
856 GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
857
858 /* First try and grab the pin without rebinding the vma */
859 if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
860 return 0;
861
862 err = vma_get_pages(vma);
863 if (err)
864 return err;
865
866 if (flags & vma->vm->bind_async_flags) {
867 work = i915_vma_work();
868 if (!work) {
869 err = -ENOMEM;
870 goto err_pages;
871 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200872 }
873
Chris Wilson28507482019-10-04 14:39:58 +0100874 /* No more allocations allowed once we hold vm->mutex */
875 err = mutex_lock_interruptible(&vma->vm->mutex);
876 if (err)
877 goto err_fence;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200878
Chris Wilson28507482019-10-04 14:39:58 +0100879 bound = atomic_read(&vma->flags);
880 if (unlikely(bound & I915_VMA_ERROR)) {
881 err = -ENOMEM;
882 goto err_unlock;
883 }
884
885 if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
886 err = -EAGAIN; /* pins are meant to be fairly temporary */
887 goto err_unlock;
888 }
889
890 if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
891 __i915_vma_pin(vma);
892 goto err_unlock;
893 }
894
895 err = i915_active_acquire(&vma->active);
896 if (err)
897 goto err_unlock;
898
899 if (!(bound & I915_VMA_BIND_MASK)) {
900 err = i915_vma_insert(vma, size, alignment, flags);
901 if (err)
902 goto err_active;
903
904 if (i915_is_ggtt(vma->vm))
905 __i915_vma_set_map_and_fenceable(vma);
906 }
907
908 GEM_BUG_ON(!vma->pages);
909 err = i915_vma_bind(vma,
910 vma->obj ? vma->obj->cache_level : 0,
911 flags, work);
912 if (err)
Chris Wilson31c7eff2017-02-27 12:26:54 +0000913 goto err_remove;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200914
Chris Wilson28507482019-10-04 14:39:58 +0100915 /* There should only be at most 2 active bindings (user, global) */
916 GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
917 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
918 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
Chris Wilsond36caee2017-11-05 12:45:50 +0000919
Chris Wilson28507482019-10-04 14:39:58 +0100920 __i915_vma_pin(vma);
921 GEM_BUG_ON(!i915_vma_is_pinned(vma));
922 GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200923 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200924
Chris Wilson31c7eff2017-02-27 12:26:54 +0000925err_remove:
Chris Wilsondde01d92019-10-30 19:21:49 +0000926 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
927 i915_vma_detach(vma);
928 drm_mm_remove_node(&vma->node);
929 }
Chris Wilson28507482019-10-04 14:39:58 +0100930err_active:
931 i915_active_release(&vma->active);
932err_unlock:
933 mutex_unlock(&vma->vm->mutex);
934err_fence:
935 if (work)
936 dma_fence_work_commit(&work->base);
937err_pages:
938 vma_put_pages(vma);
939 return err;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200940}
941
Chris Wilson3365e222018-05-03 20:51:14 +0100942void i915_vma_close(struct i915_vma *vma)
943{
Chris Wilson71e51ca2019-10-21 19:32:35 +0100944 struct intel_gt *gt = vma->vm->gt;
Chris Wilson155ab882019-06-06 12:23:20 +0100945 unsigned long flags;
Chris Wilson3365e222018-05-03 20:51:14 +0100946
947 GEM_BUG_ON(i915_vma_is_closed(vma));
Chris Wilson3365e222018-05-03 20:51:14 +0100948
949 /*
950 * We defer actually closing, unbinding and destroying the VMA until
951 * the next idle point, or if the object is freed in the meantime. By
952 * postponing the unbind, we allow for it to be resurrected by the
953 * client, avoiding the work required to rebind the VMA. This is
954 * advantageous for DRI, where the client/server pass objects
955 * between themselves, temporarily opening a local VMA to the
956 * object, and then closing it again. The same object is then reused
957 * on the next frame (or two, depending on the depth of the swap queue)
958 * causing us to rebind the VMA once more. This ends up being a lot
959 * of wasted work for the steady state.
960 */
Chris Wilson71e51ca2019-10-21 19:32:35 +0100961 spin_lock_irqsave(&gt->closed_lock, flags);
962 list_add(&vma->closed_link, &gt->closed_vma);
963 spin_unlock_irqrestore(&gt->closed_lock, flags);
Chris Wilson155ab882019-06-06 12:23:20 +0100964}
965
966static void __i915_vma_remove_closed(struct i915_vma *vma)
967{
Chris Wilson71e51ca2019-10-21 19:32:35 +0100968 struct intel_gt *gt = vma->vm->gt;
Chris Wilson155ab882019-06-06 12:23:20 +0100969
Chris Wilson71e51ca2019-10-21 19:32:35 +0100970 spin_lock_irq(&gt->closed_lock);
Chris Wilson155ab882019-06-06 12:23:20 +0100971 list_del_init(&vma->closed_link);
Chris Wilson71e51ca2019-10-21 19:32:35 +0100972 spin_unlock_irq(&gt->closed_lock);
Chris Wilson3365e222018-05-03 20:51:14 +0100973}
974
975void i915_vma_reopen(struct i915_vma *vma)
976{
Chris Wilson28507482019-10-04 14:39:58 +0100977 if (i915_vma_is_closed(vma))
978 __i915_vma_remove_closed(vma);
Chris Wilson3365e222018-05-03 20:51:14 +0100979}
980
Chris Wilson28507482019-10-04 14:39:58 +0100981void i915_vma_destroy(struct i915_vma *vma)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200982{
Chris Wilson28507482019-10-04 14:39:58 +0100983 if (drm_mm_node_allocated(&vma->node)) {
984 mutex_lock(&vma->vm->mutex);
985 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
986 WARN_ON(__i915_vma_unbind(vma));
987 mutex_unlock(&vma->vm->mutex);
988 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
989 }
990 GEM_BUG_ON(i915_vma_is_active(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200991
Chris Wilson528cbd12019-01-28 10:23:54 +0000992 if (vma->obj) {
993 struct drm_i915_gem_object *obj = vma->obj;
994
995 spin_lock(&obj->vma.lock);
996 list_del(&vma->obj_link);
Chris Wilson28507482019-10-04 14:39:58 +0100997 rb_erase(&vma->obj_node, &obj->vma.tree);
Chris Wilson528cbd12019-01-28 10:23:54 +0000998 spin_unlock(&obj->vma.lock);
999 }
Chris Wilson010e3e62017-12-06 12:49:13 +00001000
Chris Wilson155ab882019-06-06 12:23:20 +01001001 __i915_vma_remove_closed(vma);
Chris Wilson28507482019-10-04 14:39:58 +01001002 i915_vm_put(vma->vm);
Chris Wilson3365e222018-05-03 20:51:14 +01001003
Chris Wilson28507482019-10-04 14:39:58 +01001004 i915_active_fini(&vma->active);
1005 i915_vma_free(vma);
Chris Wilson3365e222018-05-03 20:51:14 +01001006}
1007
Chris Wilson71e51ca2019-10-21 19:32:35 +01001008void i915_vma_parked(struct intel_gt *gt)
Chris Wilson3365e222018-05-03 20:51:14 +01001009{
1010 struct i915_vma *vma, *next;
1011
Chris Wilson71e51ca2019-10-21 19:32:35 +01001012 spin_lock_irq(&gt->closed_lock);
1013 list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
Chris Wilson28507482019-10-04 14:39:58 +01001014 struct drm_i915_gem_object *obj = vma->obj;
1015 struct i915_address_space *vm = vma->vm;
1016
1017 /* XXX All to avoid keeping a reference on i915_vma itself */
1018
1019 if (!kref_get_unless_zero(&obj->base.refcount))
1020 continue;
1021
1022 if (!i915_vm_tryopen(vm)) {
1023 i915_gem_object_put(obj);
1024 obj = NULL;
1025 }
1026
Chris Wilson71e51ca2019-10-21 19:32:35 +01001027 spin_unlock_irq(&gt->closed_lock);
Chris Wilson3365e222018-05-03 20:51:14 +01001028
Chris Wilson28507482019-10-04 14:39:58 +01001029 if (obj) {
1030 i915_vma_destroy(vma);
1031 i915_gem_object_put(obj);
1032 }
Chris Wilson155ab882019-06-06 12:23:20 +01001033
Chris Wilson28507482019-10-04 14:39:58 +01001034 i915_vm_close(vm);
1035
1036 /* Restart after dropping lock */
Chris Wilson71e51ca2019-10-21 19:32:35 +01001037 spin_lock_irq(&gt->closed_lock);
1038 next = list_first_entry(&gt->closed_vma,
Chris Wilson28507482019-10-04 14:39:58 +01001039 typeof(*next), closed_link);
Chris Wilson155ab882019-06-06 12:23:20 +01001040 }
Chris Wilson71e51ca2019-10-21 19:32:35 +01001041 spin_unlock_irq(&gt->closed_lock);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001042}
1043
1044static void __i915_vma_iounmap(struct i915_vma *vma)
1045{
1046 GEM_BUG_ON(i915_vma_is_pinned(vma));
1047
1048 if (vma->iomap == NULL)
1049 return;
1050
1051 io_mapping_unmap(vma->iomap);
1052 vma->iomap = NULL;
1053}
1054
Chris Wilsona65adaf2017-10-09 09:43:57 +01001055void i915_vma_revoke_mmap(struct i915_vma *vma)
1056{
1057 struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
1058 u64 vma_offset;
1059
Chris Wilsonb7d151b2019-08-22 07:09:13 +01001060 lockdep_assert_held(&vma->vm->mutex);
Chris Wilsona65adaf2017-10-09 09:43:57 +01001061
1062 if (!i915_vma_has_userfault(vma))
1063 return;
1064
1065 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1066 GEM_BUG_ON(!vma->obj->userfault_count);
1067
1068 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
1069 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1070 drm_vma_node_offset_addr(node) + vma_offset,
1071 vma->size,
1072 1);
1073
1074 i915_vma_unset_userfault(vma);
1075 if (!--vma->obj->userfault_count)
1076 list_del(&vma->obj->userfault_link);
1077}
1078
Chris Wilson28507482019-10-04 14:39:58 +01001079int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1080{
1081 int err;
1082
1083 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1084
1085 /* Wait for the vma to be bound before we start! */
1086 err = i915_request_await_active(rq, &vma->active);
1087 if (err)
1088 return err;
1089
1090 return i915_active_add_request(&vma->active, rq);
1091}
1092
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001093int i915_vma_move_to_active(struct i915_vma *vma,
1094 struct i915_request *rq,
1095 unsigned int flags)
1096{
1097 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilsona93615f2019-06-21 19:37:59 +01001098 int err;
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001099
Chris Wilson6951e582019-05-28 10:29:51 +01001100 assert_object_held(obj);
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001101
Chris Wilson28507482019-10-04 14:39:58 +01001102 err = __i915_vma_move_to_active(vma, rq);
Chris Wilsona93615f2019-06-21 19:37:59 +01001103 if (unlikely(err))
1104 return err;
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001105
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001106 if (flags & EXEC_OBJECT_WRITE) {
Chris Wilson8e7cb172019-08-16 08:46:35 +01001107 if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS))
Chris Wilsond19d71f2019-09-19 12:19:10 +01001108 i915_active_add_request(&obj->frontbuffer->write, rq);
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001109
Rodrigo Vivi829e8de2019-08-21 22:47:35 -07001110 dma_resv_add_excl_fence(vma->resv, &rq->fence);
Chris Wilsoncd2a4ea2019-07-30 21:58:05 +01001111 obj->write_domain = I915_GEM_DOMAIN_RENDER;
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001112 obj->read_domains = 0;
Chris Wilsoncd2a4ea2019-07-30 21:58:05 +01001113 } else {
Rodrigo Vivi829e8de2019-08-21 22:47:35 -07001114 err = dma_resv_reserve_shared(vma->resv, 1);
Chris Wilsoncd2a4ea2019-07-30 21:58:05 +01001115 if (unlikely(err))
1116 return err;
1117
Rodrigo Vivi829e8de2019-08-21 22:47:35 -07001118 dma_resv_add_shared_fence(vma->resv, &rq->fence);
Chris Wilsoncd2a4ea2019-07-30 21:58:05 +01001119 obj->write_domain = 0;
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001120 }
1121 obj->read_domains |= I915_GEM_GPU_DOMAINS;
Chris Wilsona93615f2019-06-21 19:37:59 +01001122 obj->mm.dirty = true;
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001123
Chris Wilsona93615f2019-06-21 19:37:59 +01001124 GEM_BUG_ON(!i915_vma_is_active(vma));
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001125 return 0;
1126}
1127
Chris Wilson28507482019-10-04 14:39:58 +01001128int __i915_vma_unbind(struct i915_vma *vma)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001129{
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001130 int ret;
1131
Chris Wilson28507482019-10-04 14:39:58 +01001132 lockdep_assert_held(&vma->vm->mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001133
Chris Wilson520ea7c2018-06-07 16:40:45 +01001134 /*
1135 * First wait upon any activity as retiring the request may
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001136 * have side-effects such as unpinning or even unbinding this vma.
Chris Wilson28507482019-10-04 14:39:58 +01001137 *
1138 * XXX Actually waiting under the vm->mutex is a hinderance and
1139 * should be pipelined wherever possible. In cases where that is
1140 * unavoidable, we should lift the wait to before the mutex.
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001141 */
Chris Wilson28507482019-10-04 14:39:58 +01001142 ret = i915_vma_sync(vma);
1143 if (ret)
1144 return ret;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001145
Chris Wilsonb1e31772019-10-04 14:40:00 +01001146 GEM_BUG_ON(i915_vma_is_active(vma));
Chris Wilson10195b12018-06-28 14:22:06 +01001147 if (i915_vma_is_pinned(vma)) {
1148 vma_print_allocator(vma, "is pinned");
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001149 return -EBUSY;
Chris Wilson10195b12018-06-28 14:22:06 +01001150 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001151
Chris Wilson274cbf22019-10-04 14:39:59 +01001152 GEM_BUG_ON(i915_vma_is_active(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001153 if (!drm_mm_node_allocated(&vma->node))
Chris Wilson3365e222018-05-03 20:51:14 +01001154 return 0;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001155
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001156 if (i915_vma_is_map_and_fenceable(vma)) {
Chris Wilson7125397b2017-12-06 12:49:14 +00001157 /*
1158 * Check that we have flushed all writes through the GGTT
1159 * before the unbind, other due to non-strict nature of those
1160 * indirect writes they may end up referencing the GGTT PTE
1161 * after the unbind.
1162 */
1163 i915_vma_flush_writes(vma);
1164 GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
1165
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001166 /* release the fence reg _after_ flushing */
Chris Wilson1f7fd482019-08-22 07:15:57 +01001167 ret = i915_vma_revoke_fence(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001168 if (ret)
1169 return ret;
1170
1171 /* Force a pagefault for domain tracking on next user access */
Chris Wilsona65adaf2017-10-09 09:43:57 +01001172 i915_vma_revoke_mmap(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001173
1174 __i915_vma_iounmap(vma);
Chris Wilson4dd2fbb2019-09-11 10:02:43 +01001175 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001176 }
Chris Wilsona65adaf2017-10-09 09:43:57 +01001177 GEM_BUG_ON(vma->fence);
1178 GEM_BUG_ON(i915_vma_has_userfault(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001179
Chris Wilson28507482019-10-04 14:39:58 +01001180 if (likely(atomic_read(&vma->vm->open))) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001181 trace_i915_vma_unbind(vma);
Chris Wilson93f2cde2018-06-07 16:40:46 +01001182 vma->ops->unbind_vma(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001183 }
Chris Wilson28507482019-10-04 14:39:58 +01001184 atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR), &vma->flags);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001185
Chris Wilsondde01d92019-10-30 19:21:49 +00001186 i915_vma_detach(vma);
Chris Wilson28507482019-10-04 14:39:58 +01001187 vma_unbind_pages(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001188
Chris Wilsondde01d92019-10-30 19:21:49 +00001189 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_destroy() */
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001190 return 0;
1191}
1192
Chris Wilson28507482019-10-04 14:39:58 +01001193int i915_vma_unbind(struct i915_vma *vma)
1194{
1195 struct i915_address_space *vm = vma->vm;
1196 int err;
1197
1198 err = mutex_lock_interruptible(&vm->mutex);
1199 if (err)
1200 return err;
1201
1202 err = __i915_vma_unbind(vma);
1203 mutex_unlock(&vm->mutex);
1204
1205 return err;
1206}
1207
Chris Wilson1aff1902019-08-02 22:21:36 +01001208struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
1209{
1210 i915_gem_object_make_unshrinkable(vma->obj);
1211 return vma;
1212}
1213
1214void i915_vma_make_shrinkable(struct i915_vma *vma)
1215{
1216 i915_gem_object_make_shrinkable(vma->obj);
1217}
1218
1219void i915_vma_make_purgeable(struct i915_vma *vma)
1220{
1221 i915_gem_object_make_purgeable(vma->obj);
1222}
1223
Chris Wilsone3c7a1c2017-02-13 17:15:45 +00001224#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1225#include "selftests/i915_vma.c"
1226#endif
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001227
Chris Wilson103b76ee2019-03-05 21:38:30 +00001228static void i915_global_vma_shrink(void)
1229{
1230 kmem_cache_shrink(global.slab_vmas);
1231}
1232
1233static void i915_global_vma_exit(void)
1234{
1235 kmem_cache_destroy(global.slab_vmas);
1236}
1237
1238static struct i915_global_vma global = { {
1239 .shrink = i915_global_vma_shrink,
1240 .exit = i915_global_vma_exit,
1241} };
1242
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001243int __init i915_global_vma_init(void)
1244{
1245 global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1246 if (!global.slab_vmas)
1247 return -ENOMEM;
1248
Chris Wilson103b76ee2019-03-05 21:38:30 +00001249 i915_global_register(&global.base);
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001250 return 0;
1251}