blob: 191577a983900f1f75608c978272e0fcf7d115bb [file] [log] [blame]
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
Chris Wilson10195b12018-06-28 14:22:06 +010024
Chris Wilson09480072019-07-03 10:17:19 +010025#include <linux/sched/mm.h>
Jani Nikuladf0566a2019-06-13 11:44:16 +030026#include <drm/drm_gem.h>
Chris Wilson112ed2d2019-04-24 18:48:39 +010027
Jani Nikuladf0566a2019-06-13 11:44:16 +030028#include "display/intel_frontbuffer.h"
29
30#include "gt/intel_engine.h"
Chris Wilsonccd20942019-12-05 11:37:25 +000031#include "gt/intel_engine_heartbeat.h"
Tvrtko Ursulina1c8a092019-06-21 08:08:01 +010032#include "gt/intel_gt.h"
Chris Wilsonccd20942019-12-05 11:37:25 +000033#include "gt/intel_gt_requests.h"
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020034
35#include "i915_drv.h"
Chris Wilson103b76ee2019-03-05 21:38:30 +000036#include "i915_globals.h"
Chris Wilson28507482019-10-04 14:39:58 +010037#include "i915_sw_fence_work.h"
Jani Nikulaa09d9a82019-08-06 13:07:28 +030038#include "i915_trace.h"
Jani Nikuladf0566a2019-06-13 11:44:16 +030039#include "i915_vma.h"
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020040
Chris Wilson13f1bfd2019-02-28 10:20:34 +000041static struct i915_global_vma {
Chris Wilson103b76ee2019-03-05 21:38:30 +000042 struct i915_global base;
Chris Wilson13f1bfd2019-02-28 10:20:34 +000043 struct kmem_cache *slab_vmas;
44} global;
45
46struct i915_vma *i915_vma_alloc(void)
47{
48 return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
49}
50
51void i915_vma_free(struct i915_vma *vma)
52{
53 return kmem_cache_free(global.slab_vmas, vma);
54}
55
Chris Wilson1eca65d2018-07-06 07:53:06 +010056#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
Chris Wilson10195b12018-06-28 14:22:06 +010057
58#include <linux/stackdepot.h>
59
60static void vma_print_allocator(struct i915_vma *vma, const char *reason)
61{
Thomas Gleixner487f3c72019-04-25 11:45:09 +020062 unsigned long *entries;
63 unsigned int nr_entries;
Chris Wilson10195b12018-06-28 14:22:06 +010064 char buf[512];
65
66 if (!vma->node.stack) {
67 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
68 vma->node.start, vma->node.size, reason);
69 return;
70 }
71
Thomas Gleixner487f3c72019-04-25 11:45:09 +020072 nr_entries = stack_depot_fetch(vma->node.stack, &entries);
73 stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
Chris Wilson10195b12018-06-28 14:22:06 +010074 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
75 vma->node.start, vma->node.size, reason, buf);
76}
77
78#else
79
80static void vma_print_allocator(struct i915_vma *vma, const char *reason)
81{
82}
83
84#endif
85
Chris Wilson12c255b2019-06-21 19:38:00 +010086static inline struct i915_vma *active_to_vma(struct i915_active *ref)
87{
88 return container_of(ref, typeof(struct i915_vma), active);
89}
90
91static int __i915_vma_active(struct i915_active *ref)
92{
Chris Wilson2833ddc2019-08-20 11:05:31 +010093 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
Chris Wilson12c255b2019-06-21 19:38:00 +010094}
95
Chris Wilson274cbf22019-10-04 14:39:59 +010096__i915_active_call
Chris Wilson64d6c502019-02-05 13:00:02 +000097static void __i915_vma_retire(struct i915_active *ref)
98{
Chris Wilson12c255b2019-06-21 19:38:00 +010099 i915_vma_put(active_to_vma(ref));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200100}
101
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200102static struct i915_vma *
Chris Wilsona01cb37a2017-01-16 15:21:30 +0000103vma_create(struct drm_i915_gem_object *obj,
104 struct i915_address_space *vm,
105 const struct i915_ggtt_view *view)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200106{
107 struct i915_vma *vma;
108 struct rb_node *rb, **p;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200109
Chris Wilsone1cc3db2017-02-09 11:19:33 +0000110 /* The aliasing_ppgtt should never be used directly! */
Chris Wilson71e51ca2019-10-21 19:32:35 +0100111 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
Chris Wilsone1cc3db2017-02-09 11:19:33 +0000112
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000113 vma = i915_vma_alloc();
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200114 if (vma == NULL)
115 return ERR_PTR(-ENOMEM);
116
Chris Wilson76f97642019-12-22 21:02:55 +0000117 kref_init(&vma->ref);
Chris Wilson28507482019-10-04 14:39:58 +0100118 mutex_init(&vma->pages_mutex);
119 vma->vm = i915_vm_get(vm);
Chris Wilson93f2cde2018-06-07 16:40:46 +0100120 vma->ops = &vm->vma_ops;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200121 vma->obj = obj;
Chris Wilsonef78f7b2019-06-18 13:58:58 +0100122 vma->resv = obj->base.resv;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200123 vma->size = obj->base.size;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000124 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200125
Chris Wilsonb1e31772019-10-04 14:40:00 +0100126 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
Chris Wilson155ab882019-06-06 12:23:20 +0100127
Chris Wilson09480072019-07-03 10:17:19 +0100128 /* Declare ourselves safe for use inside shrinkers */
129 if (IS_ENABLED(CONFIG_LOCKDEP)) {
130 fs_reclaim_acquire(GFP_KERNEL);
131 might_lock(&vma->active.mutex);
132 fs_reclaim_release(GFP_KERNEL);
133 }
134
Chris Wilson155ab882019-06-06 12:23:20 +0100135 INIT_LIST_HEAD(&vma->closed_link);
136
Chris Wilson7c518462017-01-23 14:52:45 +0000137 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200138 vma->ggtt_view = *view;
139 if (view->type == I915_GGTT_VIEW_PARTIAL) {
Chris Wilson07e19ea2016-12-23 14:57:59 +0000140 GEM_BUG_ON(range_overflows_t(u64,
Chris Wilson8bab11932017-01-14 00:28:25 +0000141 view->partial.offset,
142 view->partial.size,
Chris Wilson07e19ea2016-12-23 14:57:59 +0000143 obj->base.size >> PAGE_SHIFT));
Chris Wilson8bab11932017-01-14 00:28:25 +0000144 vma->size = view->partial.size;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200145 vma->size <<= PAGE_SHIFT;
Chris Wilson7e7367d2018-06-30 10:05:09 +0100146 GEM_BUG_ON(vma->size > obj->base.size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200147 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
Chris Wilson8bab11932017-01-14 00:28:25 +0000148 vma->size = intel_rotation_info_size(&view->rotated);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200149 vma->size <<= PAGE_SHIFT;
Ville Syrjälä1a74fc02019-05-09 15:21:52 +0300150 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
151 vma->size = intel_remapped_info_size(&view->remapped);
152 vma->size <<= PAGE_SHIFT;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200153 }
154 }
155
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000156 if (unlikely(vma->size > vm->total))
157 goto err_vma;
158
Chris Wilsonb00ddb22017-01-19 19:26:59 +0000159 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
160
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200161 if (i915_is_ggtt(vm)) {
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000162 if (unlikely(overflows_type(vma->size, u32)))
163 goto err_vma;
164
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000165 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
166 i915_gem_object_get_tiling(obj),
167 i915_gem_object_get_stride(obj));
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000168 if (unlikely(vma->fence_size < vma->size || /* overflow */
169 vma->fence_size > vm->total))
170 goto err_vma;
171
Chris Wilsonf51455d2017-01-10 14:47:34 +0000172 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
Chris Wilson944397f2017-01-09 16:16:11 +0000173
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000174 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
175 i915_gem_object_get_tiling(obj),
176 i915_gem_object_get_stride(obj));
Chris Wilson944397f2017-01-09 16:16:11 +0000177 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
178
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100179 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
Chris Wilson528cbd12019-01-28 10:23:54 +0000180 }
181
182 spin_lock(&obj->vma.lock);
183
184 rb = NULL;
185 p = &obj->vma.tree.rb_node;
186 while (*p) {
187 struct i915_vma *pos;
188 long cmp;
189
190 rb = *p;
191 pos = rb_entry(rb, struct i915_vma, obj_node);
192
193 /*
194 * If the view already exists in the tree, another thread
195 * already created a matching vma, so return the older instance
196 * and dispose of ours.
197 */
198 cmp = i915_vma_compare(pos, vm, view);
199 if (cmp == 0) {
200 spin_unlock(&obj->vma.lock);
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000201 i915_vma_free(vma);
Chris Wilson528cbd12019-01-28 10:23:54 +0000202 return pos;
203 }
204
205 if (cmp < 0)
206 p = &rb->rb_right;
207 else
208 p = &rb->rb_left;
209 }
210 rb_link_node(&vma->obj_node, rb, p);
211 rb_insert_color(&vma->obj_node, &obj->vma.tree);
212
213 if (i915_vma_is_ggtt(vma))
Chris Wilsone2189dd2017-12-07 21:14:07 +0000214 /*
215 * We put the GGTT vma at the start of the vma-list, followed
216 * by the ppGGTT vma. This allows us to break early when
217 * iterating over only the GGTT vma for an object, see
218 * for_each_ggtt_vma()
219 */
Chris Wilson528cbd12019-01-28 10:23:54 +0000220 list_add(&vma->obj_link, &obj->vma.list);
221 else
222 list_add_tail(&vma->obj_link, &obj->vma.list);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200223
Chris Wilson528cbd12019-01-28 10:23:54 +0000224 spin_unlock(&obj->vma.lock);
Chris Wilson09d7e462019-01-28 10:23:53 +0000225
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200226 return vma;
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000227
228err_vma:
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000229 i915_vma_free(vma);
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000230 return ERR_PTR(-E2BIG);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200231}
232
Chris Wilson481a6f72017-01-16 15:21:31 +0000233static struct i915_vma *
234vma_lookup(struct drm_i915_gem_object *obj,
235 struct i915_address_space *vm,
236 const struct i915_ggtt_view *view)
Chris Wilson718659a2017-01-16 15:21:28 +0000237{
238 struct rb_node *rb;
239
Chris Wilson528cbd12019-01-28 10:23:54 +0000240 rb = obj->vma.tree.rb_node;
Chris Wilson718659a2017-01-16 15:21:28 +0000241 while (rb) {
242 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
243 long cmp;
244
245 cmp = i915_vma_compare(vma, vm, view);
246 if (cmp == 0)
247 return vma;
248
249 if (cmp < 0)
250 rb = rb->rb_right;
251 else
252 rb = rb->rb_left;
253 }
254
255 return NULL;
256}
257
258/**
Chris Wilson718659a2017-01-16 15:21:28 +0000259 * i915_vma_instance - return the singleton instance of the VMA
260 * @obj: parent &struct drm_i915_gem_object to be mapped
261 * @vm: address space in which the mapping is located
262 * @view: additional mapping requirements
263 *
264 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
265 * the same @view characteristics. If a match is not found, one is created.
266 * Once created, the VMA is kept until either the object is freed, or the
267 * address space is closed.
268 *
Chris Wilson718659a2017-01-16 15:21:28 +0000269 * Returns the vma, or an error pointer.
270 */
271struct i915_vma *
272i915_vma_instance(struct drm_i915_gem_object *obj,
273 struct i915_address_space *vm,
274 const struct i915_ggtt_view *view)
275{
276 struct i915_vma *vma;
277
Chris Wilson718659a2017-01-16 15:21:28 +0000278 GEM_BUG_ON(view && !i915_is_ggtt(vm));
Chris Wilson28507482019-10-04 14:39:58 +0100279 GEM_BUG_ON(!atomic_read(&vm->open));
Chris Wilson718659a2017-01-16 15:21:28 +0000280
Chris Wilson528cbd12019-01-28 10:23:54 +0000281 spin_lock(&obj->vma.lock);
Chris Wilson481a6f72017-01-16 15:21:31 +0000282 vma = vma_lookup(obj, vm, view);
Chris Wilson528cbd12019-01-28 10:23:54 +0000283 spin_unlock(&obj->vma.lock);
284
285 /* vma_create() will resolve the race if another creates the vma */
286 if (unlikely(!vma))
Chris Wilsona01cb37a2017-01-16 15:21:30 +0000287 vma = vma_create(obj, vm, view);
Chris Wilson718659a2017-01-16 15:21:28 +0000288
Chris Wilson4ea95272017-01-16 15:21:29 +0000289 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
Chris Wilson718659a2017-01-16 15:21:28 +0000290 return vma;
291}
292
Chris Wilson28507482019-10-04 14:39:58 +0100293struct i915_vma_work {
294 struct dma_fence_work base;
295 struct i915_vma *vma;
Chris Wilson54d71952019-12-16 16:17:16 +0000296 struct drm_i915_gem_object *pinned;
Chris Wilsone3793462020-01-30 18:17:10 +0000297 struct i915_sw_dma_fence_cb cb;
Chris Wilson28507482019-10-04 14:39:58 +0100298 enum i915_cache_level cache_level;
299 unsigned int flags;
300};
301
302static int __vma_bind(struct dma_fence_work *work)
303{
304 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
305 struct i915_vma *vma = vw->vma;
306 int err;
307
308 err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags);
309 if (err)
310 atomic_or(I915_VMA_ERROR, &vma->flags);
311
Chris Wilson28507482019-10-04 14:39:58 +0100312 return err;
313}
314
Chris Wilson54d71952019-12-16 16:17:16 +0000315static void __vma_release(struct dma_fence_work *work)
316{
317 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
318
319 if (vw->pinned)
320 __i915_gem_object_unpin_pages(vw->pinned);
321}
322
Chris Wilson28507482019-10-04 14:39:58 +0100323static const struct dma_fence_work_ops bind_ops = {
324 .name = "bind",
325 .work = __vma_bind,
Chris Wilson54d71952019-12-16 16:17:16 +0000326 .release = __vma_release,
Chris Wilson28507482019-10-04 14:39:58 +0100327};
328
329struct i915_vma_work *i915_vma_work(void)
330{
331 struct i915_vma_work *vw;
332
333 vw = kzalloc(sizeof(*vw), GFP_KERNEL);
334 if (!vw)
335 return NULL;
336
337 dma_fence_work_init(&vw->base, &bind_ops);
338 vw->base.dma.error = -EAGAIN; /* disable the worker by default */
339
340 return vw;
341}
342
Chris Wilsone3793462020-01-30 18:17:10 +0000343int i915_vma_wait_for_bind(struct i915_vma *vma)
344{
345 int err = 0;
346
347 if (rcu_access_pointer(vma->active.excl.fence)) {
348 struct dma_fence *fence;
349
350 rcu_read_lock();
351 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
352 rcu_read_unlock();
353 if (fence) {
354 err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT);
355 dma_fence_put(fence);
356 }
357 }
358
359 return err;
360}
361
Chris Wilson718659a2017-01-16 15:21:28 +0000362/**
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200363 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
364 * @vma: VMA to map
365 * @cache_level: mapping cache level
366 * @flags: flags like global or local mapping
Chris Wilson28507482019-10-04 14:39:58 +0100367 * @work: preallocated worker for allocating and binding the PTE
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200368 *
369 * DMA addresses are taken from the scatter-gather table of this object (or of
370 * this VMA in case of non-default GGTT views) and PTE entries set up.
371 * Note that DMA addresses are also the only part of the SG table we care about.
372 */
Chris Wilson28507482019-10-04 14:39:58 +0100373int i915_vma_bind(struct i915_vma *vma,
374 enum i915_cache_level cache_level,
375 u32 flags,
376 struct i915_vma_work *work)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200377{
378 u32 bind_flags;
379 u32 vma_flags;
380 int ret;
381
Chris Wilsonaa149432017-02-25 18:11:21 +0000382 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
383 GEM_BUG_ON(vma->size > vma->node.size);
384
Tvrtko Ursulinbbb8a9d2018-10-12 07:31:42 +0100385 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
386 vma->node.size,
387 vma->vm->total)))
Chris Wilsonaa149432017-02-25 18:11:21 +0000388 return -ENODEV;
389
Tvrtko Ursulinbbb8a9d2018-10-12 07:31:42 +0100390 if (GEM_DEBUG_WARN_ON(!flags))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200391 return -EINVAL;
392
Chris Wilson28507482019-10-04 14:39:58 +0100393 bind_flags = flags;
394 bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200395
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100396 vma_flags = atomic_read(&vma->flags);
397 vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200398 if (flags & PIN_UPDATE)
399 bind_flags |= vma_flags;
400 else
401 bind_flags &= ~vma_flags;
402 if (bind_flags == 0)
403 return 0;
404
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100405 GEM_BUG_ON(!vma->pages);
406
Daniele Ceraolo Spurio6146e6d2017-01-20 13:51:23 -0800407 trace_i915_vma_bind(vma, bind_flags);
Chris Wilson28507482019-10-04 14:39:58 +0100408 if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) {
Chris Wilsone3793462020-01-30 18:17:10 +0000409 struct dma_fence *prev;
410
Chris Wilson28507482019-10-04 14:39:58 +0100411 work->vma = vma;
412 work->cache_level = cache_level;
413 work->flags = bind_flags | I915_VMA_ALLOC;
414
415 /*
416 * Note we only want to chain up to the migration fence on
417 * the pages (not the object itself). As we don't track that,
418 * yet, we have to use the exclusive fence instead.
419 *
420 * Also note that we do not want to track the async vma as
421 * part of the obj->resv->excl_fence as it only affects
422 * execution and not content or object's backing store lifetime.
423 */
Chris Wilsone3793462020-01-30 18:17:10 +0000424 prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
Chris Wilson30ca04e2020-02-03 09:41:47 +0000425 if (prev) {
Chris Wilsone3793462020-01-30 18:17:10 +0000426 __i915_sw_fence_await_dma_fence(&work->base.chain,
427 prev,
428 &work->cb);
Chris Wilson30ca04e2020-02-03 09:41:47 +0000429 dma_fence_put(prev);
430 }
Chris Wilsone3793462020-01-30 18:17:10 +0000431
Chris Wilson28507482019-10-04 14:39:58 +0100432 work->base.dma.error = 0; /* enable the queue_work() */
433
Chris Wilson54d71952019-12-16 16:17:16 +0000434 if (vma->obj) {
Chris Wilson28507482019-10-04 14:39:58 +0100435 __i915_gem_object_pin_pages(vma->obj);
Chris Wilson54d71952019-12-16 16:17:16 +0000436 work->pinned = vma->obj;
437 }
Chris Wilson28507482019-10-04 14:39:58 +0100438 } else {
Chris Wilson28507482019-10-04 14:39:58 +0100439 ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
440 if (ret)
441 return ret;
442 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200443
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100444 atomic_or(bind_flags, &vma->flags);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200445 return 0;
446}
447
448void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
449{
450 void __iomem *ptr;
Chris Wilsonb4563f52017-10-09 09:43:55 +0100451 int err;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200452
Chris Wilson28507482019-10-04 14:39:58 +0100453 if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
Chris Wilsonb4563f52017-10-09 09:43:55 +0100454 err = -ENODEV;
455 goto err;
456 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200457
458 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100459 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200460
Chris Wilson28507482019-10-04 14:39:58 +0100461 ptr = READ_ONCE(vma->iomap);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200462 if (ptr == NULL) {
Matthew Auld73ebd502017-12-11 15:18:20 +0000463 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200464 vma->node.start,
465 vma->node.size);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100466 if (ptr == NULL) {
467 err = -ENOMEM;
468 goto err;
469 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200470
Chris Wilson28507482019-10-04 14:39:58 +0100471 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
472 io_mapping_unmap(ptr);
473 ptr = vma->iomap;
474 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200475 }
476
477 __i915_vma_pin(vma);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100478
Chris Wilson3bd40732017-10-09 09:43:56 +0100479 err = i915_vma_pin_fence(vma);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100480 if (err)
481 goto err_unpin;
482
Chris Wilson7125397b2017-12-06 12:49:14 +0000483 i915_vma_set_ggtt_write(vma);
Chris Wilsona5972e92020-01-08 15:35:50 +0000484
485 /* NB Access through the GTT requires the device to be awake. */
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200486 return ptr;
Chris Wilsonb4563f52017-10-09 09:43:55 +0100487
488err_unpin:
489 __i915_vma_unpin(vma);
490err:
491 return IO_ERR_PTR(err);
492}
493
Chris Wilson7125397b2017-12-06 12:49:14 +0000494void i915_vma_flush_writes(struct i915_vma *vma)
495{
Chris Wilson28507482019-10-04 14:39:58 +0100496 if (i915_vma_unset_ggtt_write(vma))
497 intel_gt_flush_ggtt_writes(vma->vm->gt);
Chris Wilson7125397b2017-12-06 12:49:14 +0000498}
499
Chris Wilsonb4563f52017-10-09 09:43:55 +0100500void i915_vma_unpin_iomap(struct i915_vma *vma)
501{
Chris Wilsonb4563f52017-10-09 09:43:55 +0100502 GEM_BUG_ON(vma->iomap == NULL);
503
Chris Wilson7125397b2017-12-06 12:49:14 +0000504 i915_vma_flush_writes(vma);
505
Chris Wilsonb4563f52017-10-09 09:43:55 +0100506 i915_vma_unpin_fence(vma);
507 i915_vma_unpin(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200508}
509
Chris Wilson6a2f59e2018-07-21 13:50:37 +0100510void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200511{
512 struct i915_vma *vma;
513 struct drm_i915_gem_object *obj;
514
515 vma = fetch_and_zero(p_vma);
516 if (!vma)
517 return;
518
519 obj = vma->obj;
Chris Wilson520ea7c2018-06-07 16:40:45 +0100520 GEM_BUG_ON(!obj);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200521
522 i915_vma_unpin(vma);
523 i915_vma_close(vma);
524
Chris Wilson6a2f59e2018-07-21 13:50:37 +0100525 if (flags & I915_VMA_RELEASE_MAP)
526 i915_gem_object_unpin_map(obj);
527
Chris Wilsonc017cf62019-05-28 10:29:56 +0100528 i915_gem_object_put(obj);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200529}
530
Chris Wilson782a3e92017-02-13 17:15:46 +0000531bool i915_vma_misplaced(const struct i915_vma *vma,
532 u64 size, u64 alignment, u64 flags)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200533{
534 if (!drm_mm_node_allocated(&vma->node))
535 return false;
536
Chris Wilson28507482019-10-04 14:39:58 +0100537 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
538 return true;
539
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200540 if (vma->node.size < size)
541 return true;
542
Chris Wilsonf51455d2017-01-10 14:47:34 +0000543 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
544 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200545 return true;
546
547 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
548 return true;
549
550 if (flags & PIN_OFFSET_BIAS &&
551 vma->node.start < (flags & PIN_OFFSET_MASK))
552 return true;
553
554 if (flags & PIN_OFFSET_FIXED &&
555 vma->node.start != (flags & PIN_OFFSET_MASK))
556 return true;
557
558 return false;
559}
560
561void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
562{
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200563 bool mappable, fenceable;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200564
Chris Wilson944397f2017-01-09 16:16:11 +0000565 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
566 GEM_BUG_ON(!vma->fence_size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200567
Chris Wilson944397f2017-01-09 16:16:11 +0000568 fenceable = (vma->node.size >= vma->fence_size &&
Chris Wilsonf51455d2017-01-10 14:47:34 +0000569 IS_ALIGNED(vma->node.start, vma->fence_alignment));
Chris Wilson944397f2017-01-09 16:16:11 +0000570
571 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
572
573 if (mappable && fenceable)
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100574 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200575 else
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100576 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200577}
578
Matthew Auld33dd8892019-09-09 13:40:52 +0100579bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000580{
581 struct drm_mm_node *node = &vma->node;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200582 struct drm_mm_node *other;
583
584 /*
585 * On some machines we have to be careful when putting differing types
586 * of snoopable memory together to avoid the prefetcher crossing memory
587 * domains and dying. During vm initialisation, we decide whether or not
588 * these constraints apply and set the drm_mm.color_adjust
589 * appropriately.
590 */
Matthew Auld33dd8892019-09-09 13:40:52 +0100591 if (!i915_vm_has_cache_coloring(vma->vm))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200592 return true;
593
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000594 /* Only valid to be called on an already inserted vma */
595 GEM_BUG_ON(!drm_mm_node_allocated(node));
596 GEM_BUG_ON(list_empty(&node->node_list));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200597
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000598 other = list_prev_entry(node, node_list);
Matthew Auld33dd8892019-09-09 13:40:52 +0100599 if (i915_node_color_differs(other, color) &&
Matthew Auld1e0a96e2019-09-09 13:40:50 +0100600 !drm_mm_hole_follows(other))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200601 return false;
602
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000603 other = list_next_entry(node, node_list);
Matthew Auld33dd8892019-09-09 13:40:52 +0100604 if (i915_node_color_differs(other, color) &&
Matthew Auld1e0a96e2019-09-09 13:40:50 +0100605 !drm_mm_hole_follows(node))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200606 return false;
607
608 return true;
609}
610
Chris Wilson83d317a2018-06-05 10:41:07 +0100611static void assert_bind_count(const struct drm_i915_gem_object *obj)
612{
613 /*
614 * Combine the assertion that the object is bound and that we have
615 * pinned its pages. But we should never have bound the object
616 * more than we have pinned its pages. (For complete accuracy, we
617 * assume that no else is pinning the pages, but as a rough assertion
618 * that we will not run into problems later, this will do!)
619 */
Chris Wilsonecab9be2019-06-12 11:57:20 +0100620 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
Chris Wilson83d317a2018-06-05 10:41:07 +0100621}
622
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200623/**
624 * i915_vma_insert - finds a slot for the vma in its address space
625 * @vma: the vma
626 * @size: requested size in bytes (can be larger than the VMA)
627 * @alignment: required alignment
628 * @flags: mask of PIN_* flags to use
629 *
630 * First we try to allocate some free space that meets the requirements for
631 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
632 * preferrably the oldest idle entry to make room for the new VMA.
633 *
634 * Returns:
635 * 0 on success, negative error code otherwise.
636 */
637static int
638i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
639{
Matthew Auld33dd8892019-09-09 13:40:52 +0100640 unsigned long color;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200641 u64 start, end;
642 int ret;
643
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100644 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200645 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
646
647 size = max(size, vma->size);
Chris Wilson944397f2017-01-09 16:16:11 +0000648 alignment = max(alignment, vma->display_alignment);
649 if (flags & PIN_MAPPABLE) {
650 size = max_t(typeof(size), size, vma->fence_size);
651 alignment = max_t(typeof(alignment),
652 alignment, vma->fence_alignment);
653 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200654
Chris Wilsonf51455d2017-01-10 14:47:34 +0000655 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
656 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
657 GEM_BUG_ON(!is_power_of_2(alignment));
658
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200659 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000660 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200661
662 end = vma->vm->total;
663 if (flags & PIN_MAPPABLE)
Chris Wilson28507482019-10-04 14:39:58 +0100664 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200665 if (flags & PIN_ZONE_4G)
Chris Wilsonf51455d2017-01-10 14:47:34 +0000666 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
667 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200668
669 /* If binding the object/GGTT view requires more space than the entire
670 * aperture has, reject it early before evicting everything in a vain
671 * attempt to find space.
672 */
673 if (size > end) {
Chris Wilson520ea7c2018-06-07 16:40:45 +0100674 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
675 size, flags & PIN_MAPPABLE ? "mappable" : "total",
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200676 end);
Chris Wilson2889caa2017-06-16 15:05:19 +0100677 return -ENOSPC;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200678 }
679
Matthew Auld33dd8892019-09-09 13:40:52 +0100680 color = 0;
Chris Wilson28507482019-10-04 14:39:58 +0100681 if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
682 color = vma->obj->cache_level;
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100683
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200684 if (flags & PIN_OFFSET_FIXED) {
685 u64 offset = flags & PIN_OFFSET_MASK;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000686 if (!IS_ALIGNED(offset, alignment) ||
Chris Wilson28507482019-10-04 14:39:58 +0100687 range_overflows(offset, size, end))
688 return -EINVAL;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200689
Chris Wilson625d9882017-01-11 11:23:11 +0000690 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
Matthew Auld33dd8892019-09-09 13:40:52 +0100691 size, offset, color,
Chris Wilson625d9882017-01-11 11:23:11 +0000692 flags);
693 if (ret)
Chris Wilson28507482019-10-04 14:39:58 +0100694 return ret;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200695 } else {
Matthew Auld74642842017-10-06 23:18:20 +0100696 /*
697 * We only support huge gtt pages through the 48b PPGTT,
698 * however we also don't want to force any alignment for
699 * objects which need to be tightly packed into the low 32bits.
700 *
701 * Note that we assume that GGTT are limited to 4GiB for the
702 * forseeable future. See also i915_ggtt_offset().
703 */
704 if (upper_32_bits(end - 1) &&
705 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
Matthew Auld855822b2017-10-06 23:18:21 +0100706 /*
707 * We can't mix 64K and 4K PTEs in the same page-table
708 * (2M block), and so to avoid the ugliness and
709 * complexity of coloring we opt for just aligning 64K
710 * objects to 2M.
711 */
Matthew Auld74642842017-10-06 23:18:20 +0100712 u64 page_alignment =
Matthew Auld855822b2017-10-06 23:18:21 +0100713 rounddown_pow_of_two(vma->page_sizes.sg |
714 I915_GTT_PAGE_SIZE_2M);
Matthew Auld74642842017-10-06 23:18:20 +0100715
Chris Wilsonbef27bdb2017-10-09 10:20:19 +0100716 /*
717 * Check we don't expand for the limited Global GTT
718 * (mappable aperture is even more precious!). This
719 * also checks that we exclude the aliasing-ppgtt.
720 */
721 GEM_BUG_ON(i915_vma_is_ggtt(vma));
722
Matthew Auld74642842017-10-06 23:18:20 +0100723 alignment = max(alignment, page_alignment);
Matthew Auld855822b2017-10-06 23:18:21 +0100724
725 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
726 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
Matthew Auld74642842017-10-06 23:18:20 +0100727 }
728
Chris Wilsone007b192017-01-11 11:23:10 +0000729 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
Matthew Auld33dd8892019-09-09 13:40:52 +0100730 size, alignment, color,
Chris Wilsone007b192017-01-11 11:23:10 +0000731 start, end, flags);
732 if (ret)
Chris Wilson28507482019-10-04 14:39:58 +0100733 return ret;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200734
735 GEM_BUG_ON(vma->node.start < start);
736 GEM_BUG_ON(vma->node.start + vma->node.size > end);
737 }
Chris Wilson44a0ec02017-01-19 19:26:58 +0000738 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Matthew Auld33dd8892019-09-09 13:40:52 +0100739 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200740
Chris Wilson520ea7c2018-06-07 16:40:45 +0100741 if (vma->obj) {
Chris Wilsondde01d92019-10-30 19:21:49 +0000742 struct drm_i915_gem_object *obj = vma->obj;
743
744 atomic_inc(&obj->bind_count);
745 assert_bind_count(obj);
Chris Wilson520ea7c2018-06-07 16:40:45 +0100746 }
Chris Wilsondde01d92019-10-30 19:21:49 +0000747 list_add_tail(&vma->vm_link, &vma->vm->bound_list);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200748
749 return 0;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200750}
751
Chris Wilson31c7eff2017-02-27 12:26:54 +0000752static void
Chris Wilsondde01d92019-10-30 19:21:49 +0000753i915_vma_detach(struct i915_vma *vma)
Chris Wilson31c7eff2017-02-27 12:26:54 +0000754{
Chris Wilson31c7eff2017-02-27 12:26:54 +0000755 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100756 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
Chris Wilson31c7eff2017-02-27 12:26:54 +0000757
Chris Wilson520ea7c2018-06-07 16:40:45 +0100758 /*
Chris Wilsondde01d92019-10-30 19:21:49 +0000759 * And finally now the object is completely decoupled from this
760 * vma, we can drop its hold on the backing storage and allow
761 * it to be reaped by the shrinker.
Chris Wilson31c7eff2017-02-27 12:26:54 +0000762 */
Chris Wilsondde01d92019-10-30 19:21:49 +0000763 list_del(&vma->vm_link);
Chris Wilson520ea7c2018-06-07 16:40:45 +0100764 if (vma->obj) {
765 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilson31c7eff2017-02-27 12:26:54 +0000766
Chris Wilson520ea7c2018-06-07 16:40:45 +0100767 assert_bind_count(obj);
Chris Wilsondde01d92019-10-30 19:21:49 +0000768 atomic_dec(&obj->bind_count);
Chris Wilson520ea7c2018-06-07 16:40:45 +0100769 }
Chris Wilson31c7eff2017-02-27 12:26:54 +0000770}
771
Chris Wilson28507482019-10-04 14:39:58 +0100772static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200773{
Chris Wilson28507482019-10-04 14:39:58 +0100774 unsigned int bound;
775 bool pinned = true;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200776
Chris Wilson28507482019-10-04 14:39:58 +0100777 bound = atomic_read(&vma->flags);
778 do {
779 if (unlikely(flags & ~bound))
780 return false;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200781
Chris Wilson28507482019-10-04 14:39:58 +0100782 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
783 return false;
784
785 if (!(bound & I915_VMA_PIN_MASK))
786 goto unpinned;
787
788 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
789 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
790
791 return true;
792
793unpinned:
794 /*
795 * If pin_count==0, but we are bound, check under the lock to avoid
796 * racing with a concurrent i915_vma_unbind().
797 */
798 mutex_lock(&vma->vm->mutex);
799 do {
800 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
801 pinned = false;
802 break;
803 }
804
805 if (unlikely(flags & ~bound)) {
806 pinned = false;
807 break;
808 }
809 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
810 mutex_unlock(&vma->vm->mutex);
811
812 return pinned;
813}
814
815static int vma_get_pages(struct i915_vma *vma)
816{
817 int err = 0;
818
819 if (atomic_add_unless(&vma->pages_count, 1, 0))
820 return 0;
821
822 /* Allocations ahoy! */
823 if (mutex_lock_interruptible(&vma->pages_mutex))
824 return -EINTR;
825
826 if (!atomic_read(&vma->pages_count)) {
827 if (vma->obj) {
828 err = i915_gem_object_pin_pages(vma->obj);
829 if (err)
830 goto unlock;
831 }
832
833 err = vma->ops->set_pages(vma);
Chris Wilson56184a22019-10-15 10:39:15 +0100834 if (err) {
835 if (vma->obj)
836 i915_gem_object_unpin_pages(vma->obj);
Chris Wilson28507482019-10-04 14:39:58 +0100837 goto unlock;
Chris Wilson56184a22019-10-15 10:39:15 +0100838 }
Chris Wilson28507482019-10-04 14:39:58 +0100839 }
840 atomic_inc(&vma->pages_count);
841
842unlock:
843 mutex_unlock(&vma->pages_mutex);
844
845 return err;
846}
847
848static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
849{
850 /* We allocate under vma_get_pages, so beware the shrinker */
851 mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
852 GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
853 if (atomic_sub_return(count, &vma->pages_count) == 0) {
854 vma->ops->clear_pages(vma);
855 GEM_BUG_ON(vma->pages);
856 if (vma->obj)
857 i915_gem_object_unpin_pages(vma->obj);
858 }
859 mutex_unlock(&vma->pages_mutex);
860}
861
862static void vma_put_pages(struct i915_vma *vma)
863{
864 if (atomic_add_unless(&vma->pages_count, -1, 1))
865 return;
866
867 __vma_put_pages(vma, 1);
868}
869
870static void vma_unbind_pages(struct i915_vma *vma)
871{
872 unsigned int count;
873
874 lockdep_assert_held(&vma->vm->mutex);
875
876 /* The upper portion of pages_count is the number of bindings */
877 count = atomic_read(&vma->pages_count);
878 count >>= I915_VMA_PAGES_BIAS;
879 GEM_BUG_ON(!count);
880
881 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
882}
883
884int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
885{
886 struct i915_vma_work *work = NULL;
Chris Wilsonc0e60342020-01-10 14:44:18 +0000887 intel_wakeref_t wakeref = 0;
Chris Wilson28507482019-10-04 14:39:58 +0100888 unsigned int bound;
889 int err;
890
891 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
892 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
893
894 GEM_BUG_ON(flags & PIN_UPDATE);
895 GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
896
897 /* First try and grab the pin without rebinding the vma */
898 if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
899 return 0;
900
901 err = vma_get_pages(vma);
902 if (err)
903 return err;
904
905 if (flags & vma->vm->bind_async_flags) {
906 work = i915_vma_work();
907 if (!work) {
908 err = -ENOMEM;
909 goto err_pages;
910 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200911 }
912
Chris Wilsonc0e60342020-01-10 14:44:18 +0000913 if (flags & PIN_GLOBAL)
914 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
915
Chris Wilson28507482019-10-04 14:39:58 +0100916 /* No more allocations allowed once we hold vm->mutex */
917 err = mutex_lock_interruptible(&vma->vm->mutex);
918 if (err)
919 goto err_fence;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200920
Chris Wilson00de7022020-02-21 12:19:40 +0000921 if (unlikely(i915_vma_is_closed(vma))) {
922 err = -ENOENT;
923 goto err_unlock;
924 }
925
Chris Wilson28507482019-10-04 14:39:58 +0100926 bound = atomic_read(&vma->flags);
927 if (unlikely(bound & I915_VMA_ERROR)) {
928 err = -ENOMEM;
929 goto err_unlock;
930 }
931
932 if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
933 err = -EAGAIN; /* pins are meant to be fairly temporary */
934 goto err_unlock;
935 }
936
937 if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
938 __i915_vma_pin(vma);
939 goto err_unlock;
940 }
941
942 err = i915_active_acquire(&vma->active);
943 if (err)
944 goto err_unlock;
945
946 if (!(bound & I915_VMA_BIND_MASK)) {
947 err = i915_vma_insert(vma, size, alignment, flags);
948 if (err)
949 goto err_active;
950
951 if (i915_is_ggtt(vma->vm))
952 __i915_vma_set_map_and_fenceable(vma);
953 }
954
955 GEM_BUG_ON(!vma->pages);
956 err = i915_vma_bind(vma,
957 vma->obj ? vma->obj->cache_level : 0,
958 flags, work);
959 if (err)
Chris Wilson31c7eff2017-02-27 12:26:54 +0000960 goto err_remove;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200961
Chris Wilson28507482019-10-04 14:39:58 +0100962 /* There should only be at most 2 active bindings (user, global) */
963 GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
964 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
965 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
Chris Wilsond36caee2017-11-05 12:45:50 +0000966
Chris Wilson28507482019-10-04 14:39:58 +0100967 __i915_vma_pin(vma);
968 GEM_BUG_ON(!i915_vma_is_pinned(vma));
969 GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200970 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200971
Chris Wilson31c7eff2017-02-27 12:26:54 +0000972err_remove:
Chris Wilsondde01d92019-10-30 19:21:49 +0000973 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
974 i915_vma_detach(vma);
975 drm_mm_remove_node(&vma->node);
976 }
Chris Wilson28507482019-10-04 14:39:58 +0100977err_active:
978 i915_active_release(&vma->active);
979err_unlock:
980 mutex_unlock(&vma->vm->mutex);
981err_fence:
982 if (work)
Chris Wilson92581f92020-03-25 12:02:27 +0000983 dma_fence_work_commit_imm(&work->base);
Chris Wilsonc0e60342020-01-10 14:44:18 +0000984 if (wakeref)
985 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
Chris Wilson28507482019-10-04 14:39:58 +0100986err_pages:
987 vma_put_pages(vma);
988 return err;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200989}
990
Chris Wilsonccd20942019-12-05 11:37:25 +0000991static void flush_idle_contexts(struct intel_gt *gt)
992{
993 struct intel_engine_cs *engine;
994 enum intel_engine_id id;
995
996 for_each_engine(engine, gt, id)
997 intel_engine_flush_barriers(engine);
998
999 intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1000}
1001
1002int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
1003{
1004 struct i915_address_space *vm = vma->vm;
1005 int err;
1006
1007 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1008
1009 do {
1010 err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
Chris Wilsone3793462020-01-30 18:17:10 +00001011 if (err != -ENOSPC) {
1012 if (!err) {
1013 err = i915_vma_wait_for_bind(vma);
1014 if (err)
1015 i915_vma_unpin(vma);
1016 }
Chris Wilsonccd20942019-12-05 11:37:25 +00001017 return err;
Chris Wilsone3793462020-01-30 18:17:10 +00001018 }
Chris Wilsonccd20942019-12-05 11:37:25 +00001019
1020 /* Unlike i915_vma_pin, we don't take no for an answer! */
1021 flush_idle_contexts(vm->gt);
1022 if (mutex_lock_interruptible(&vm->mutex) == 0) {
1023 i915_gem_evict_vm(vm);
1024 mutex_unlock(&vm->mutex);
1025 }
1026 } while (1);
1027}
1028
Chris Wilson3365e222018-05-03 20:51:14 +01001029void i915_vma_close(struct i915_vma *vma)
1030{
Chris Wilson71e51ca2019-10-21 19:32:35 +01001031 struct intel_gt *gt = vma->vm->gt;
Chris Wilson155ab882019-06-06 12:23:20 +01001032 unsigned long flags;
Chris Wilson3365e222018-05-03 20:51:14 +01001033
1034 GEM_BUG_ON(i915_vma_is_closed(vma));
Chris Wilson3365e222018-05-03 20:51:14 +01001035
1036 /*
1037 * We defer actually closing, unbinding and destroying the VMA until
1038 * the next idle point, or if the object is freed in the meantime. By
1039 * postponing the unbind, we allow for it to be resurrected by the
1040 * client, avoiding the work required to rebind the VMA. This is
1041 * advantageous for DRI, where the client/server pass objects
1042 * between themselves, temporarily opening a local VMA to the
1043 * object, and then closing it again. The same object is then reused
1044 * on the next frame (or two, depending on the depth of the swap queue)
1045 * causing us to rebind the VMA once more. This ends up being a lot
1046 * of wasted work for the steady state.
1047 */
Chris Wilson71e51ca2019-10-21 19:32:35 +01001048 spin_lock_irqsave(&gt->closed_lock, flags);
1049 list_add(&vma->closed_link, &gt->closed_vma);
1050 spin_unlock_irqrestore(&gt->closed_lock, flags);
Chris Wilson155ab882019-06-06 12:23:20 +01001051}
1052
1053static void __i915_vma_remove_closed(struct i915_vma *vma)
1054{
Chris Wilson71e51ca2019-10-21 19:32:35 +01001055 struct intel_gt *gt = vma->vm->gt;
Chris Wilson155ab882019-06-06 12:23:20 +01001056
Chris Wilson71e51ca2019-10-21 19:32:35 +01001057 spin_lock_irq(&gt->closed_lock);
Chris Wilson155ab882019-06-06 12:23:20 +01001058 list_del_init(&vma->closed_link);
Chris Wilson71e51ca2019-10-21 19:32:35 +01001059 spin_unlock_irq(&gt->closed_lock);
Chris Wilson3365e222018-05-03 20:51:14 +01001060}
1061
1062void i915_vma_reopen(struct i915_vma *vma)
1063{
Chris Wilson28507482019-10-04 14:39:58 +01001064 if (i915_vma_is_closed(vma))
1065 __i915_vma_remove_closed(vma);
Chris Wilson3365e222018-05-03 20:51:14 +01001066}
1067
Chris Wilson76f97642019-12-22 21:02:55 +00001068void i915_vma_release(struct kref *ref)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001069{
Chris Wilson76f97642019-12-22 21:02:55 +00001070 struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
1071
Chris Wilson28507482019-10-04 14:39:58 +01001072 if (drm_mm_node_allocated(&vma->node)) {
1073 mutex_lock(&vma->vm->mutex);
1074 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1075 WARN_ON(__i915_vma_unbind(vma));
1076 mutex_unlock(&vma->vm->mutex);
1077 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1078 }
1079 GEM_BUG_ON(i915_vma_is_active(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001080
Chris Wilson528cbd12019-01-28 10:23:54 +00001081 if (vma->obj) {
1082 struct drm_i915_gem_object *obj = vma->obj;
1083
1084 spin_lock(&obj->vma.lock);
1085 list_del(&vma->obj_link);
Chris Wilson28507482019-10-04 14:39:58 +01001086 rb_erase(&vma->obj_node, &obj->vma.tree);
Chris Wilson528cbd12019-01-28 10:23:54 +00001087 spin_unlock(&obj->vma.lock);
1088 }
Chris Wilson010e3e62017-12-06 12:49:13 +00001089
Chris Wilson155ab882019-06-06 12:23:20 +01001090 __i915_vma_remove_closed(vma);
Chris Wilson28507482019-10-04 14:39:58 +01001091 i915_vm_put(vma->vm);
Chris Wilson3365e222018-05-03 20:51:14 +01001092
Chris Wilson28507482019-10-04 14:39:58 +01001093 i915_active_fini(&vma->active);
1094 i915_vma_free(vma);
Chris Wilson3365e222018-05-03 20:51:14 +01001095}
1096
Chris Wilson71e51ca2019-10-21 19:32:35 +01001097void i915_vma_parked(struct intel_gt *gt)
Chris Wilson3365e222018-05-03 20:51:14 +01001098{
1099 struct i915_vma *vma, *next;
Chris Wilson3447c4c2020-03-23 09:28:35 +00001100 LIST_HEAD(closed);
Chris Wilson3365e222018-05-03 20:51:14 +01001101
Chris Wilson71e51ca2019-10-21 19:32:35 +01001102 spin_lock_irq(&gt->closed_lock);
1103 list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
Chris Wilson28507482019-10-04 14:39:58 +01001104 struct drm_i915_gem_object *obj = vma->obj;
1105 struct i915_address_space *vm = vma->vm;
1106
1107 /* XXX All to avoid keeping a reference on i915_vma itself */
1108
1109 if (!kref_get_unless_zero(&obj->base.refcount))
1110 continue;
1111
Chris Wilson3447c4c2020-03-23 09:28:35 +00001112 if (!i915_vm_tryopen(vm)) {
Chris Wilson28507482019-10-04 14:39:58 +01001113 i915_gem_object_put(obj);
Chris Wilson3447c4c2020-03-23 09:28:35 +00001114 continue;
Chris Wilson28507482019-10-04 14:39:58 +01001115 }
1116
Chris Wilson3447c4c2020-03-23 09:28:35 +00001117 list_move(&vma->closed_link, &closed);
Chris Wilson155ab882019-06-06 12:23:20 +01001118 }
Chris Wilson71e51ca2019-10-21 19:32:35 +01001119 spin_unlock_irq(&gt->closed_lock);
Chris Wilson3447c4c2020-03-23 09:28:35 +00001120
1121 /* As the GT is held idle, no vma can be reopened as we destroy them */
1122 list_for_each_entry_safe(vma, next, &closed, closed_link) {
1123 struct drm_i915_gem_object *obj = vma->obj;
1124 struct i915_address_space *vm = vma->vm;
1125
1126 INIT_LIST_HEAD(&vma->closed_link);
1127 __i915_vma_put(vma);
1128
1129 i915_gem_object_put(obj);
1130 i915_vm_close(vm);
1131 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001132}
1133
1134static void __i915_vma_iounmap(struct i915_vma *vma)
1135{
1136 GEM_BUG_ON(i915_vma_is_pinned(vma));
1137
1138 if (vma->iomap == NULL)
1139 return;
1140
1141 io_mapping_unmap(vma->iomap);
1142 vma->iomap = NULL;
1143}
1144
Chris Wilsona65adaf2017-10-09 09:43:57 +01001145void i915_vma_revoke_mmap(struct i915_vma *vma)
1146{
Abdiel Janulguecc662122019-12-04 12:00:32 +00001147 struct drm_vma_offset_node *node;
Chris Wilsona65adaf2017-10-09 09:43:57 +01001148 u64 vma_offset;
1149
Chris Wilsona65adaf2017-10-09 09:43:57 +01001150 if (!i915_vma_has_userfault(vma))
1151 return;
1152
1153 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1154 GEM_BUG_ON(!vma->obj->userfault_count);
1155
Abdiel Janulguecc662122019-12-04 12:00:32 +00001156 node = &vma->mmo->vma_node;
Chris Wilsona65adaf2017-10-09 09:43:57 +01001157 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
1158 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1159 drm_vma_node_offset_addr(node) + vma_offset,
1160 vma->size,
1161 1);
1162
1163 i915_vma_unset_userfault(vma);
1164 if (!--vma->obj->userfault_count)
1165 list_del(&vma->obj->userfault_link);
1166}
1167
Chris Wilson28507482019-10-04 14:39:58 +01001168int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1169{
1170 int err;
1171
1172 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1173
1174 /* Wait for the vma to be bound before we start! */
Chris Wilson29e6ecf2020-03-11 09:20:44 +00001175 err = i915_request_await_active(rq, &vma->active, 0);
Chris Wilson28507482019-10-04 14:39:58 +01001176 if (err)
1177 return err;
1178
1179 return i915_active_add_request(&vma->active, rq);
1180}
1181
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001182int i915_vma_move_to_active(struct i915_vma *vma,
1183 struct i915_request *rq,
1184 unsigned int flags)
1185{
1186 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilsona93615f2019-06-21 19:37:59 +01001187 int err;
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001188
Chris Wilson6951e582019-05-28 10:29:51 +01001189 assert_object_held(obj);
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001190
Chris Wilson28507482019-10-04 14:39:58 +01001191 err = __i915_vma_move_to_active(vma, rq);
Chris Wilsona93615f2019-06-21 19:37:59 +01001192 if (unlikely(err))
1193 return err;
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001194
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001195 if (flags & EXEC_OBJECT_WRITE) {
Chris Wilsonda421042019-12-18 10:40:43 +00001196 struct intel_frontbuffer *front;
1197
1198 front = __intel_frontbuffer_get(obj);
1199 if (unlikely(front)) {
1200 if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1201 i915_active_add_request(&front->write, rq);
1202 intel_frontbuffer_put(front);
1203 }
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001204
Rodrigo Vivi829e8de2019-08-21 22:47:35 -07001205 dma_resv_add_excl_fence(vma->resv, &rq->fence);
Chris Wilsoncd2a4ea2019-07-30 21:58:05 +01001206 obj->write_domain = I915_GEM_DOMAIN_RENDER;
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001207 obj->read_domains = 0;
Chris Wilsoncd2a4ea2019-07-30 21:58:05 +01001208 } else {
Rodrigo Vivi829e8de2019-08-21 22:47:35 -07001209 err = dma_resv_reserve_shared(vma->resv, 1);
Chris Wilsoncd2a4ea2019-07-30 21:58:05 +01001210 if (unlikely(err))
1211 return err;
1212
Rodrigo Vivi829e8de2019-08-21 22:47:35 -07001213 dma_resv_add_shared_fence(vma->resv, &rq->fence);
Chris Wilsoncd2a4ea2019-07-30 21:58:05 +01001214 obj->write_domain = 0;
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001215 }
1216 obj->read_domains |= I915_GEM_GPU_DOMAINS;
Chris Wilsona93615f2019-06-21 19:37:59 +01001217 obj->mm.dirty = true;
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001218
Chris Wilsona93615f2019-06-21 19:37:59 +01001219 GEM_BUG_ON(!i915_vma_is_active(vma));
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001220 return 0;
1221}
1222
Chris Wilson28507482019-10-04 14:39:58 +01001223int __i915_vma_unbind(struct i915_vma *vma)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001224{
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001225 int ret;
1226
Chris Wilson28507482019-10-04 14:39:58 +01001227 lockdep_assert_held(&vma->vm->mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001228
Chris Wilson520ea7c2018-06-07 16:40:45 +01001229 /*
1230 * First wait upon any activity as retiring the request may
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001231 * have side-effects such as unpinning or even unbinding this vma.
Chris Wilson28507482019-10-04 14:39:58 +01001232 *
1233 * XXX Actually waiting under the vm->mutex is a hinderance and
1234 * should be pipelined wherever possible. In cases where that is
1235 * unavoidable, we should lift the wait to before the mutex.
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001236 */
Chris Wilson28507482019-10-04 14:39:58 +01001237 ret = i915_vma_sync(vma);
1238 if (ret)
1239 return ret;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001240
Chris Wilson10195b12018-06-28 14:22:06 +01001241 if (i915_vma_is_pinned(vma)) {
1242 vma_print_allocator(vma, "is pinned");
Chris Wilsond3e48352019-12-08 16:12:52 +00001243 return -EAGAIN;
Chris Wilson10195b12018-06-28 14:22:06 +01001244 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001245
Chris Wilson60e94552020-01-23 22:44:58 +00001246 /*
1247 * After confirming that no one else is pinning this vma, wait for
1248 * any laggards who may have crept in during the wait (through
1249 * a residual pin skipping the vm->mutex) to complete.
1250 */
1251 ret = i915_vma_sync(vma);
1252 if (ret)
1253 return ret;
1254
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001255 if (!drm_mm_node_allocated(&vma->node))
Chris Wilson3365e222018-05-03 20:51:14 +01001256 return 0;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001257
Chris Wilson60e94552020-01-23 22:44:58 +00001258 GEM_BUG_ON(i915_vma_is_pinned(vma));
1259 GEM_BUG_ON(i915_vma_is_active(vma));
1260
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001261 if (i915_vma_is_map_and_fenceable(vma)) {
Chris Wilson7125397b2017-12-06 12:49:14 +00001262 /*
1263 * Check that we have flushed all writes through the GGTT
1264 * before the unbind, other due to non-strict nature of those
1265 * indirect writes they may end up referencing the GGTT PTE
1266 * after the unbind.
Chris Wilson5424f5d2020-01-21 22:24:41 +00001267 *
1268 * Note that we may be concurrently poking at the GGTT_WRITE
1269 * bit from set-domain, as we mark all GGTT vma associated
1270 * with an object. We know this is for another vma, as we
1271 * are currently unbinding this one -- so if this vma will be
1272 * reused, it will be refaulted and have its dirty bit set
1273 * before the next write.
Chris Wilson7125397b2017-12-06 12:49:14 +00001274 */
1275 i915_vma_flush_writes(vma);
Chris Wilson7125397b2017-12-06 12:49:14 +00001276
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001277 /* release the fence reg _after_ flushing */
Chris Wilson1f7fd482019-08-22 07:15:57 +01001278 ret = i915_vma_revoke_fence(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001279 if (ret)
1280 return ret;
1281
1282 /* Force a pagefault for domain tracking on next user access */
Chris Wilsona65adaf2017-10-09 09:43:57 +01001283 i915_vma_revoke_mmap(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001284
1285 __i915_vma_iounmap(vma);
Chris Wilson4dd2fbb2019-09-11 10:02:43 +01001286 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001287 }
Chris Wilsona65adaf2017-10-09 09:43:57 +01001288 GEM_BUG_ON(vma->fence);
1289 GEM_BUG_ON(i915_vma_has_userfault(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001290
Chris Wilson28507482019-10-04 14:39:58 +01001291 if (likely(atomic_read(&vma->vm->open))) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001292 trace_i915_vma_unbind(vma);
Chris Wilson93f2cde2018-06-07 16:40:46 +01001293 vma->ops->unbind_vma(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001294 }
Chris Wilson5424f5d2020-01-21 22:24:41 +00001295 atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
1296 &vma->flags);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001297
Chris Wilsondde01d92019-10-30 19:21:49 +00001298 i915_vma_detach(vma);
Chris Wilson28507482019-10-04 14:39:58 +01001299 vma_unbind_pages(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001300
Chris Wilson76f97642019-12-22 21:02:55 +00001301 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001302 return 0;
1303}
1304
Chris Wilson28507482019-10-04 14:39:58 +01001305int i915_vma_unbind(struct i915_vma *vma)
1306{
1307 struct i915_address_space *vm = vma->vm;
Chris Wilsonc0e60342020-01-10 14:44:18 +00001308 intel_wakeref_t wakeref = 0;
Chris Wilson28507482019-10-04 14:39:58 +01001309 int err;
1310
Chris Wilsone6ba7642019-12-21 16:03:24 +00001311 if (!drm_mm_node_allocated(&vma->node))
1312 return 0;
1313
Chris Wilsonc0e60342020-01-10 14:44:18 +00001314 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
1315 /* XXX not always required: nop_clear_range */
1316 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
1317
Chris Wilsond62f416f2020-01-23 22:44:59 +00001318 /* Optimistic wait before taking the mutex */
1319 err = i915_vma_sync(vma);
1320 if (err)
1321 goto out_rpm;
1322
Chris Wilson28507482019-10-04 14:39:58 +01001323 err = mutex_lock_interruptible(&vm->mutex);
1324 if (err)
Chris Wilsond62f416f2020-01-23 22:44:59 +00001325 goto out_rpm;
Chris Wilson28507482019-10-04 14:39:58 +01001326
1327 err = __i915_vma_unbind(vma);
1328 mutex_unlock(&vm->mutex);
1329
Chris Wilsond62f416f2020-01-23 22:44:59 +00001330out_rpm:
Chris Wilsonc0e60342020-01-10 14:44:18 +00001331 if (wakeref)
1332 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
Chris Wilson28507482019-10-04 14:39:58 +01001333 return err;
1334}
1335
Chris Wilson1aff1902019-08-02 22:21:36 +01001336struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
1337{
1338 i915_gem_object_make_unshrinkable(vma->obj);
1339 return vma;
1340}
1341
1342void i915_vma_make_shrinkable(struct i915_vma *vma)
1343{
1344 i915_gem_object_make_shrinkable(vma->obj);
1345}
1346
1347void i915_vma_make_purgeable(struct i915_vma *vma)
1348{
1349 i915_gem_object_make_purgeable(vma->obj);
1350}
1351
Chris Wilsone3c7a1c2017-02-13 17:15:45 +00001352#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1353#include "selftests/i915_vma.c"
1354#endif
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001355
Chris Wilson103b76ee2019-03-05 21:38:30 +00001356static void i915_global_vma_shrink(void)
1357{
1358 kmem_cache_shrink(global.slab_vmas);
1359}
1360
1361static void i915_global_vma_exit(void)
1362{
1363 kmem_cache_destroy(global.slab_vmas);
1364}
1365
1366static struct i915_global_vma global = { {
1367 .shrink = i915_global_vma_shrink,
1368 .exit = i915_global_vma_exit,
1369} };
1370
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001371int __init i915_global_vma_init(void)
1372{
1373 global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1374 if (!global.slab_vmas)
1375 return -ENOMEM;
1376
Chris Wilson103b76ee2019-03-05 21:38:30 +00001377 i915_global_register(&global.base);
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001378 return 0;
1379}