blob: 5b9dce0f443b034e65711c03f710548b9755417f [file] [log] [blame]
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
Chris Wilson10195b12018-06-28 14:22:06 +010024
Chris Wilson09480072019-07-03 10:17:19 +010025#include <linux/sched/mm.h>
Jani Nikuladf0566a2019-06-13 11:44:16 +030026#include <drm/drm_gem.h>
Chris Wilson112ed2d2019-04-24 18:48:39 +010027
Jani Nikuladf0566a2019-06-13 11:44:16 +030028#include "display/intel_frontbuffer.h"
29
Anusha Srivatsa4bc91db2021-04-27 09:54:16 +010030#include "gem/i915_gem_lmem.h"
Jani Nikuladf0566a2019-06-13 11:44:16 +030031#include "gt/intel_engine.h"
Chris Wilsonccd20942019-12-05 11:37:25 +000032#include "gt/intel_engine_heartbeat.h"
Tvrtko Ursulina1c8a092019-06-21 08:08:01 +010033#include "gt/intel_gt.h"
Chris Wilsonccd20942019-12-05 11:37:25 +000034#include "gt/intel_gt_requests.h"
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020035
36#include "i915_drv.h"
Chris Wilson103b76ee2019-03-05 21:38:30 +000037#include "i915_globals.h"
Chris Wilson28507482019-10-04 14:39:58 +010038#include "i915_sw_fence_work.h"
Jani Nikulaa09d9a82019-08-06 13:07:28 +030039#include "i915_trace.h"
Jani Nikuladf0566a2019-06-13 11:44:16 +030040#include "i915_vma.h"
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020041
Chris Wilson13f1bfd2019-02-28 10:20:34 +000042static struct i915_global_vma {
Chris Wilson103b76ee2019-03-05 21:38:30 +000043 struct i915_global base;
Chris Wilson13f1bfd2019-02-28 10:20:34 +000044 struct kmem_cache *slab_vmas;
45} global;
46
47struct i915_vma *i915_vma_alloc(void)
48{
49 return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
50}
51
52void i915_vma_free(struct i915_vma *vma)
53{
54 return kmem_cache_free(global.slab_vmas, vma);
55}
56
Chris Wilson1eca65d2018-07-06 07:53:06 +010057#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
Chris Wilson10195b12018-06-28 14:22:06 +010058
59#include <linux/stackdepot.h>
60
61static void vma_print_allocator(struct i915_vma *vma, const char *reason)
62{
Thomas Gleixner487f3c72019-04-25 11:45:09 +020063 unsigned long *entries;
64 unsigned int nr_entries;
Chris Wilson10195b12018-06-28 14:22:06 +010065 char buf[512];
66
67 if (!vma->node.stack) {
68 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
69 vma->node.start, vma->node.size, reason);
70 return;
71 }
72
Thomas Gleixner487f3c72019-04-25 11:45:09 +020073 nr_entries = stack_depot_fetch(vma->node.stack, &entries);
74 stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
Chris Wilson10195b12018-06-28 14:22:06 +010075 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
76 vma->node.start, vma->node.size, reason, buf);
77}
78
79#else
80
81static void vma_print_allocator(struct i915_vma *vma, const char *reason)
82{
83}
84
85#endif
86
Chris Wilson12c255b2019-06-21 19:38:00 +010087static inline struct i915_vma *active_to_vma(struct i915_active *ref)
88{
89 return container_of(ref, typeof(struct i915_vma), active);
90}
91
92static int __i915_vma_active(struct i915_active *ref)
93{
Chris Wilson2833ddc2019-08-20 11:05:31 +010094 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
Chris Wilson12c255b2019-06-21 19:38:00 +010095}
96
Chris Wilson64d6c502019-02-05 13:00:02 +000097static void __i915_vma_retire(struct i915_active *ref)
98{
Chris Wilson12c255b2019-06-21 19:38:00 +010099 i915_vma_put(active_to_vma(ref));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200100}
101
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200102static struct i915_vma *
Chris Wilsona01cb37a2017-01-16 15:21:30 +0000103vma_create(struct drm_i915_gem_object *obj,
104 struct i915_address_space *vm,
105 const struct i915_ggtt_view *view)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200106{
Chris Wilson03fca662020-07-02 22:10:15 +0100107 struct i915_vma *pos = ERR_PTR(-E2BIG);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200108 struct i915_vma *vma;
109 struct rb_node *rb, **p;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200110
Chris Wilsone1cc3db2017-02-09 11:19:33 +0000111 /* The aliasing_ppgtt should never be used directly! */
Chris Wilson71e51ca2019-10-21 19:32:35 +0100112 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
Chris Wilsone1cc3db2017-02-09 11:19:33 +0000113
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000114 vma = i915_vma_alloc();
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200115 if (vma == NULL)
116 return ERR_PTR(-ENOMEM);
117
Chris Wilson76f97642019-12-22 21:02:55 +0000118 kref_init(&vma->ref);
Chris Wilson28507482019-10-04 14:39:58 +0100119 mutex_init(&vma->pages_mutex);
120 vma->vm = i915_vm_get(vm);
Chris Wilson93f2cde2018-06-07 16:40:46 +0100121 vma->ops = &vm->vma_ops;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200122 vma->obj = obj;
Chris Wilsonef78f7b2019-06-18 13:58:58 +0100123 vma->resv = obj->base.resv;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200124 vma->size = obj->base.size;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000125 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200126
Matthew Auldc3b14762021-05-04 17:41:36 +0100127 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0);
Chris Wilson155ab882019-06-06 12:23:20 +0100128
Chris Wilson09480072019-07-03 10:17:19 +0100129 /* Declare ourselves safe for use inside shrinkers */
130 if (IS_ENABLED(CONFIG_LOCKDEP)) {
131 fs_reclaim_acquire(GFP_KERNEL);
132 might_lock(&vma->active.mutex);
133 fs_reclaim_release(GFP_KERNEL);
134 }
135
Chris Wilson155ab882019-06-06 12:23:20 +0100136 INIT_LIST_HEAD(&vma->closed_link);
137
Chris Wilson7c518462017-01-23 14:52:45 +0000138 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200139 vma->ggtt_view = *view;
140 if (view->type == I915_GGTT_VIEW_PARTIAL) {
Chris Wilson07e19ea2016-12-23 14:57:59 +0000141 GEM_BUG_ON(range_overflows_t(u64,
Chris Wilson8bab11932017-01-14 00:28:25 +0000142 view->partial.offset,
143 view->partial.size,
Chris Wilson07e19ea2016-12-23 14:57:59 +0000144 obj->base.size >> PAGE_SHIFT));
Chris Wilson8bab11932017-01-14 00:28:25 +0000145 vma->size = view->partial.size;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200146 vma->size <<= PAGE_SHIFT;
Chris Wilson7e7367d2018-06-30 10:05:09 +0100147 GEM_BUG_ON(vma->size > obj->base.size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200148 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
Chris Wilson8bab11932017-01-14 00:28:25 +0000149 vma->size = intel_rotation_info_size(&view->rotated);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200150 vma->size <<= PAGE_SHIFT;
Ville Syrjälä1a74fc02019-05-09 15:21:52 +0300151 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
152 vma->size = intel_remapped_info_size(&view->remapped);
153 vma->size <<= PAGE_SHIFT;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200154 }
155 }
156
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000157 if (unlikely(vma->size > vm->total))
158 goto err_vma;
159
Chris Wilsonb00ddb22017-01-19 19:26:59 +0000160 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
161
Chris Wilsoncb593e52020-04-22 08:28:05 +0100162 spin_lock(&obj->vma.lock);
163
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200164 if (i915_is_ggtt(vm)) {
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000165 if (unlikely(overflows_type(vma->size, u32)))
Chris Wilsoncb593e52020-04-22 08:28:05 +0100166 goto err_unlock;
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000167
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000168 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
169 i915_gem_object_get_tiling(obj),
170 i915_gem_object_get_stride(obj));
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000171 if (unlikely(vma->fence_size < vma->size || /* overflow */
172 vma->fence_size > vm->total))
Chris Wilsoncb593e52020-04-22 08:28:05 +0100173 goto err_unlock;
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000174
Chris Wilsonf51455d2017-01-10 14:47:34 +0000175 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
Chris Wilson944397f2017-01-09 16:16:11 +0000176
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000177 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
178 i915_gem_object_get_tiling(obj),
179 i915_gem_object_get_stride(obj));
Chris Wilson944397f2017-01-09 16:16:11 +0000180 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
181
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100182 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
Chris Wilson528cbd12019-01-28 10:23:54 +0000183 }
184
Chris Wilson528cbd12019-01-28 10:23:54 +0000185 rb = NULL;
186 p = &obj->vma.tree.rb_node;
187 while (*p) {
Chris Wilson528cbd12019-01-28 10:23:54 +0000188 long cmp;
189
190 rb = *p;
191 pos = rb_entry(rb, struct i915_vma, obj_node);
192
193 /*
194 * If the view already exists in the tree, another thread
195 * already created a matching vma, so return the older instance
196 * and dispose of ours.
197 */
198 cmp = i915_vma_compare(pos, vm, view);
Chris Wilson528cbd12019-01-28 10:23:54 +0000199 if (cmp < 0)
200 p = &rb->rb_right;
Chris Wilson03fca662020-07-02 22:10:15 +0100201 else if (cmp > 0)
Chris Wilson528cbd12019-01-28 10:23:54 +0000202 p = &rb->rb_left;
Chris Wilson03fca662020-07-02 22:10:15 +0100203 else
204 goto err_unlock;
Chris Wilson528cbd12019-01-28 10:23:54 +0000205 }
206 rb_link_node(&vma->obj_node, rb, p);
207 rb_insert_color(&vma->obj_node, &obj->vma.tree);
208
209 if (i915_vma_is_ggtt(vma))
Chris Wilsone2189dd2017-12-07 21:14:07 +0000210 /*
211 * We put the GGTT vma at the start of the vma-list, followed
212 * by the ppGGTT vma. This allows us to break early when
213 * iterating over only the GGTT vma for an object, see
214 * for_each_ggtt_vma()
215 */
Chris Wilson528cbd12019-01-28 10:23:54 +0000216 list_add(&vma->obj_link, &obj->vma.list);
217 else
218 list_add_tail(&vma->obj_link, &obj->vma.list);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200219
Chris Wilson528cbd12019-01-28 10:23:54 +0000220 spin_unlock(&obj->vma.lock);
Chris Wilson09d7e462019-01-28 10:23:53 +0000221
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200222 return vma;
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000223
Chris Wilsoncb593e52020-04-22 08:28:05 +0100224err_unlock:
225 spin_unlock(&obj->vma.lock);
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000226err_vma:
Chris Wilson03fca662020-07-02 22:10:15 +0100227 i915_vm_put(vm);
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000228 i915_vma_free(vma);
Chris Wilson03fca662020-07-02 22:10:15 +0100229 return pos;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200230}
231
Chris Wilson481a6f72017-01-16 15:21:31 +0000232static struct i915_vma *
Liam Howlett547be6a2021-03-23 13:42:21 +0000233i915_vma_lookup(struct drm_i915_gem_object *obj,
Chris Wilson481a6f72017-01-16 15:21:31 +0000234 struct i915_address_space *vm,
235 const struct i915_ggtt_view *view)
Chris Wilson718659a2017-01-16 15:21:28 +0000236{
237 struct rb_node *rb;
238
Chris Wilson528cbd12019-01-28 10:23:54 +0000239 rb = obj->vma.tree.rb_node;
Chris Wilson718659a2017-01-16 15:21:28 +0000240 while (rb) {
241 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
242 long cmp;
243
244 cmp = i915_vma_compare(vma, vm, view);
245 if (cmp == 0)
246 return vma;
247
248 if (cmp < 0)
249 rb = rb->rb_right;
250 else
251 rb = rb->rb_left;
252 }
253
254 return NULL;
255}
256
257/**
Chris Wilson718659a2017-01-16 15:21:28 +0000258 * i915_vma_instance - return the singleton instance of the VMA
259 * @obj: parent &struct drm_i915_gem_object to be mapped
260 * @vm: address space in which the mapping is located
261 * @view: additional mapping requirements
262 *
263 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
264 * the same @view characteristics. If a match is not found, one is created.
265 * Once created, the VMA is kept until either the object is freed, or the
266 * address space is closed.
267 *
Chris Wilson718659a2017-01-16 15:21:28 +0000268 * Returns the vma, or an error pointer.
269 */
270struct i915_vma *
271i915_vma_instance(struct drm_i915_gem_object *obj,
272 struct i915_address_space *vm,
273 const struct i915_ggtt_view *view)
274{
275 struct i915_vma *vma;
276
Imre Deak74862d42021-05-24 20:27:02 +0300277 GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
Chris Wilson28507482019-10-04 14:39:58 +0100278 GEM_BUG_ON(!atomic_read(&vm->open));
Chris Wilson718659a2017-01-16 15:21:28 +0000279
Chris Wilson528cbd12019-01-28 10:23:54 +0000280 spin_lock(&obj->vma.lock);
Liam Howlett547be6a2021-03-23 13:42:21 +0000281 vma = i915_vma_lookup(obj, vm, view);
Chris Wilson528cbd12019-01-28 10:23:54 +0000282 spin_unlock(&obj->vma.lock);
283
284 /* vma_create() will resolve the race if another creates the vma */
285 if (unlikely(!vma))
Chris Wilsona01cb37a2017-01-16 15:21:30 +0000286 vma = vma_create(obj, vm, view);
Chris Wilson718659a2017-01-16 15:21:28 +0000287
Chris Wilson4ea95272017-01-16 15:21:29 +0000288 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
Chris Wilson718659a2017-01-16 15:21:28 +0000289 return vma;
290}
291
Chris Wilson28507482019-10-04 14:39:58 +0100292struct i915_vma_work {
293 struct dma_fence_work base;
Chris Wilsoncd0452a2020-07-29 17:42:17 +0100294 struct i915_address_space *vm;
295 struct i915_vm_pt_stash stash;
Chris Wilson28507482019-10-04 14:39:58 +0100296 struct i915_vma *vma;
Chris Wilson54d71952019-12-16 16:17:16 +0000297 struct drm_i915_gem_object *pinned;
Chris Wilsone3793462020-01-30 18:17:10 +0000298 struct i915_sw_dma_fence_cb cb;
Chris Wilson28507482019-10-04 14:39:58 +0100299 enum i915_cache_level cache_level;
300 unsigned int flags;
301};
302
Jason Ekstranddc194182021-07-14 14:34:18 -0500303static void __vma_bind(struct dma_fence_work *work)
Chris Wilson28507482019-10-04 14:39:58 +0100304{
305 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
306 struct i915_vma *vma = vw->vma;
Chris Wilson28507482019-10-04 14:39:58 +0100307
Chris Wilsoncd0452a2020-07-29 17:42:17 +0100308 vma->ops->bind_vma(vw->vm, &vw->stash,
309 vma, vw->cache_level, vw->flags);
Chris Wilson28507482019-10-04 14:39:58 +0100310}
311
Chris Wilson54d71952019-12-16 16:17:16 +0000312static void __vma_release(struct dma_fence_work *work)
313{
314 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
315
Chris Wilson537457a2020-11-02 16:19:31 +0000316 if (vw->pinned) {
Chris Wilson54d71952019-12-16 16:17:16 +0000317 __i915_gem_object_unpin_pages(vw->pinned);
Chris Wilson537457a2020-11-02 16:19:31 +0000318 i915_gem_object_put(vw->pinned);
319 }
Chris Wilsoncd0452a2020-07-29 17:42:17 +0100320
321 i915_vm_free_pt_stash(vw->vm, &vw->stash);
322 i915_vm_put(vw->vm);
Chris Wilson54d71952019-12-16 16:17:16 +0000323}
324
Chris Wilson28507482019-10-04 14:39:58 +0100325static const struct dma_fence_work_ops bind_ops = {
326 .name = "bind",
327 .work = __vma_bind,
Chris Wilson54d71952019-12-16 16:17:16 +0000328 .release = __vma_release,
Chris Wilson28507482019-10-04 14:39:58 +0100329};
330
331struct i915_vma_work *i915_vma_work(void)
332{
333 struct i915_vma_work *vw;
334
335 vw = kzalloc(sizeof(*vw), GFP_KERNEL);
336 if (!vw)
337 return NULL;
338
339 dma_fence_work_init(&vw->base, &bind_ops);
340 vw->base.dma.error = -EAGAIN; /* disable the worker by default */
341
342 return vw;
343}
344
Chris Wilsone3793462020-01-30 18:17:10 +0000345int i915_vma_wait_for_bind(struct i915_vma *vma)
346{
347 int err = 0;
348
349 if (rcu_access_pointer(vma->active.excl.fence)) {
350 struct dma_fence *fence;
351
352 rcu_read_lock();
353 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
354 rcu_read_unlock();
355 if (fence) {
356 err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT);
357 dma_fence_put(fence);
358 }
359 }
360
361 return err;
362}
363
Chris Wilson718659a2017-01-16 15:21:28 +0000364/**
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200365 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
366 * @vma: VMA to map
367 * @cache_level: mapping cache level
368 * @flags: flags like global or local mapping
Chris Wilson28507482019-10-04 14:39:58 +0100369 * @work: preallocated worker for allocating and binding the PTE
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200370 *
371 * DMA addresses are taken from the scatter-gather table of this object (or of
372 * this VMA in case of non-default GGTT views) and PTE entries set up.
373 * Note that DMA addresses are also the only part of the SG table we care about.
374 */
Chris Wilson28507482019-10-04 14:39:58 +0100375int i915_vma_bind(struct i915_vma *vma,
376 enum i915_cache_level cache_level,
377 u32 flags,
378 struct i915_vma_work *work)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200379{
380 u32 bind_flags;
381 u32 vma_flags;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200382
Chris Wilsonaa149432017-02-25 18:11:21 +0000383 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
384 GEM_BUG_ON(vma->size > vma->node.size);
385
Tvrtko Ursulinbbb8a9d2018-10-12 07:31:42 +0100386 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
387 vma->node.size,
388 vma->vm->total)))
Chris Wilsonaa149432017-02-25 18:11:21 +0000389 return -ENODEV;
390
Tvrtko Ursulinbbb8a9d2018-10-12 07:31:42 +0100391 if (GEM_DEBUG_WARN_ON(!flags))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200392 return -EINVAL;
393
Chris Wilson28507482019-10-04 14:39:58 +0100394 bind_flags = flags;
395 bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200396
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100397 vma_flags = atomic_read(&vma->flags);
398 vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
Chris Wilsonaedbe0a2020-05-21 15:49:49 +0100399
400 bind_flags &= ~vma_flags;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200401 if (bind_flags == 0)
402 return 0;
403
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100404 GEM_BUG_ON(!vma->pages);
405
Daniele Ceraolo Spurio6146e6d2017-01-20 13:51:23 -0800406 trace_i915_vma_bind(vma, bind_flags);
Chris Wilsonaedbe0a2020-05-21 15:49:49 +0100407 if (work && bind_flags & vma->vm->bind_async_flags) {
Chris Wilsone3793462020-01-30 18:17:10 +0000408 struct dma_fence *prev;
409
Chris Wilson28507482019-10-04 14:39:58 +0100410 work->vma = vma;
411 work->cache_level = cache_level;
Chris Wilson12b07252020-07-03 11:25:19 +0100412 work->flags = bind_flags;
Chris Wilson28507482019-10-04 14:39:58 +0100413
414 /*
415 * Note we only want to chain up to the migration fence on
416 * the pages (not the object itself). As we don't track that,
417 * yet, we have to use the exclusive fence instead.
418 *
419 * Also note that we do not want to track the async vma as
420 * part of the obj->resv->excl_fence as it only affects
421 * execution and not content or object's backing store lifetime.
422 */
Chris Wilsone3793462020-01-30 18:17:10 +0000423 prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
Chris Wilson30ca04e2020-02-03 09:41:47 +0000424 if (prev) {
Chris Wilsone3793462020-01-30 18:17:10 +0000425 __i915_sw_fence_await_dma_fence(&work->base.chain,
426 prev,
427 &work->cb);
Chris Wilson30ca04e2020-02-03 09:41:47 +0000428 dma_fence_put(prev);
429 }
Chris Wilsone3793462020-01-30 18:17:10 +0000430
Chris Wilson28507482019-10-04 14:39:58 +0100431 work->base.dma.error = 0; /* enable the queue_work() */
432
Chris Wilson54d71952019-12-16 16:17:16 +0000433 if (vma->obj) {
Chris Wilson28507482019-10-04 14:39:58 +0100434 __i915_gem_object_pin_pages(vma->obj);
Chris Wilson537457a2020-11-02 16:19:31 +0000435 work->pinned = i915_gem_object_get(vma->obj);
Chris Wilson54d71952019-12-16 16:17:16 +0000436 }
Chris Wilson28507482019-10-04 14:39:58 +0100437 } else {
Chris Wilsoncd0452a2020-07-29 17:42:17 +0100438 vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
Chris Wilson28507482019-10-04 14:39:58 +0100439 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200440
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100441 atomic_or(bind_flags, &vma->flags);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200442 return 0;
443}
444
445void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
446{
447 void __iomem *ptr;
Chris Wilsonb4563f52017-10-09 09:43:55 +0100448 int err;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200449
Anusha Srivatsa4bc91db2021-04-27 09:54:16 +0100450 if (!i915_gem_object_is_lmem(vma->obj)) {
451 if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
452 err = -ENODEV;
453 goto err;
454 }
Chris Wilsonb4563f52017-10-09 09:43:55 +0100455 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200456
457 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100458 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200459
Chris Wilson28507482019-10-04 14:39:58 +0100460 ptr = READ_ONCE(vma->iomap);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200461 if (ptr == NULL) {
Anusha Srivatsa4bc91db2021-04-27 09:54:16 +0100462 /*
463 * TODO: consider just using i915_gem_object_pin_map() for lmem
464 * instead, which already supports mapping non-contiguous chunks
465 * of pages, that way we can also drop the
466 * I915_BO_ALLOC_CONTIGUOUS when allocating the object.
467 */
468 if (i915_gem_object_is_lmem(vma->obj))
469 ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
470 vma->obj->base.size);
471 else
472 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
473 vma->node.start,
474 vma->node.size);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100475 if (ptr == NULL) {
476 err = -ENOMEM;
477 goto err;
478 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200479
Chris Wilson28507482019-10-04 14:39:58 +0100480 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
481 io_mapping_unmap(ptr);
482 ptr = vma->iomap;
483 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200484 }
485
486 __i915_vma_pin(vma);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100487
Chris Wilson3bd40732017-10-09 09:43:56 +0100488 err = i915_vma_pin_fence(vma);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100489 if (err)
490 goto err_unpin;
491
Chris Wilson7125397b2017-12-06 12:49:14 +0000492 i915_vma_set_ggtt_write(vma);
Chris Wilsona5972e92020-01-08 15:35:50 +0000493
494 /* NB Access through the GTT requires the device to be awake. */
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200495 return ptr;
Chris Wilsonb4563f52017-10-09 09:43:55 +0100496
497err_unpin:
498 __i915_vma_unpin(vma);
499err:
500 return IO_ERR_PTR(err);
501}
502
Chris Wilson7125397b2017-12-06 12:49:14 +0000503void i915_vma_flush_writes(struct i915_vma *vma)
504{
Chris Wilson28507482019-10-04 14:39:58 +0100505 if (i915_vma_unset_ggtt_write(vma))
506 intel_gt_flush_ggtt_writes(vma->vm->gt);
Chris Wilson7125397b2017-12-06 12:49:14 +0000507}
508
Chris Wilsonb4563f52017-10-09 09:43:55 +0100509void i915_vma_unpin_iomap(struct i915_vma *vma)
510{
Chris Wilsonb4563f52017-10-09 09:43:55 +0100511 GEM_BUG_ON(vma->iomap == NULL);
512
Chris Wilson7125397b2017-12-06 12:49:14 +0000513 i915_vma_flush_writes(vma);
514
Chris Wilsonb4563f52017-10-09 09:43:55 +0100515 i915_vma_unpin_fence(vma);
516 i915_vma_unpin(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200517}
518
Chris Wilson6a2f59e2018-07-21 13:50:37 +0100519void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200520{
521 struct i915_vma *vma;
522 struct drm_i915_gem_object *obj;
523
524 vma = fetch_and_zero(p_vma);
525 if (!vma)
526 return;
527
528 obj = vma->obj;
Chris Wilson520ea7c2018-06-07 16:40:45 +0100529 GEM_BUG_ON(!obj);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200530
531 i915_vma_unpin(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200532
Chris Wilson6a2f59e2018-07-21 13:50:37 +0100533 if (flags & I915_VMA_RELEASE_MAP)
534 i915_gem_object_unpin_map(obj);
535
Chris Wilsonc017cf62019-05-28 10:29:56 +0100536 i915_gem_object_put(obj);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200537}
538
Chris Wilson782a3e92017-02-13 17:15:46 +0000539bool i915_vma_misplaced(const struct i915_vma *vma,
540 u64 size, u64 alignment, u64 flags)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200541{
542 if (!drm_mm_node_allocated(&vma->node))
543 return false;
544
Chris Wilson28507482019-10-04 14:39:58 +0100545 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
546 return true;
547
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200548 if (vma->node.size < size)
549 return true;
550
Chris Wilsonf51455d2017-01-10 14:47:34 +0000551 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
552 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200553 return true;
554
555 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
556 return true;
557
558 if (flags & PIN_OFFSET_BIAS &&
559 vma->node.start < (flags & PIN_OFFSET_MASK))
560 return true;
561
562 if (flags & PIN_OFFSET_FIXED &&
563 vma->node.start != (flags & PIN_OFFSET_MASK))
564 return true;
565
566 return false;
567}
568
569void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
570{
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200571 bool mappable, fenceable;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200572
Chris Wilson944397f2017-01-09 16:16:11 +0000573 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
574 GEM_BUG_ON(!vma->fence_size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200575
Chris Wilson944397f2017-01-09 16:16:11 +0000576 fenceable = (vma->node.size >= vma->fence_size &&
Chris Wilsonf51455d2017-01-10 14:47:34 +0000577 IS_ALIGNED(vma->node.start, vma->fence_alignment));
Chris Wilson944397f2017-01-09 16:16:11 +0000578
579 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
580
581 if (mappable && fenceable)
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100582 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200583 else
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100584 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200585}
586
Matthew Auld33dd8892019-09-09 13:40:52 +0100587bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000588{
589 struct drm_mm_node *node = &vma->node;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200590 struct drm_mm_node *other;
591
592 /*
593 * On some machines we have to be careful when putting differing types
594 * of snoopable memory together to avoid the prefetcher crossing memory
595 * domains and dying. During vm initialisation, we decide whether or not
596 * these constraints apply and set the drm_mm.color_adjust
597 * appropriately.
598 */
Matthew Auld33dd8892019-09-09 13:40:52 +0100599 if (!i915_vm_has_cache_coloring(vma->vm))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200600 return true;
601
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000602 /* Only valid to be called on an already inserted vma */
603 GEM_BUG_ON(!drm_mm_node_allocated(node));
604 GEM_BUG_ON(list_empty(&node->node_list));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200605
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000606 other = list_prev_entry(node, node_list);
Matthew Auld33dd8892019-09-09 13:40:52 +0100607 if (i915_node_color_differs(other, color) &&
Matthew Auld1e0a96e2019-09-09 13:40:50 +0100608 !drm_mm_hole_follows(other))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200609 return false;
610
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000611 other = list_next_entry(node, node_list);
Matthew Auld33dd8892019-09-09 13:40:52 +0100612 if (i915_node_color_differs(other, color) &&
Matthew Auld1e0a96e2019-09-09 13:40:50 +0100613 !drm_mm_hole_follows(node))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200614 return false;
615
616 return true;
617}
618
619/**
620 * i915_vma_insert - finds a slot for the vma in its address space
621 * @vma: the vma
622 * @size: requested size in bytes (can be larger than the VMA)
623 * @alignment: required alignment
624 * @flags: mask of PIN_* flags to use
625 *
626 * First we try to allocate some free space that meets the requirements for
627 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
628 * preferrably the oldest idle entry to make room for the new VMA.
629 *
630 * Returns:
631 * 0 on success, negative error code otherwise.
632 */
633static int
634i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
635{
Matthew Auld33dd8892019-09-09 13:40:52 +0100636 unsigned long color;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200637 u64 start, end;
638 int ret;
639
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100640 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200641 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
642
643 size = max(size, vma->size);
Chris Wilson944397f2017-01-09 16:16:11 +0000644 alignment = max(alignment, vma->display_alignment);
645 if (flags & PIN_MAPPABLE) {
646 size = max_t(typeof(size), size, vma->fence_size);
647 alignment = max_t(typeof(alignment),
648 alignment, vma->fence_alignment);
649 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200650
Chris Wilsonf51455d2017-01-10 14:47:34 +0000651 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
652 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
653 GEM_BUG_ON(!is_power_of_2(alignment));
654
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200655 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000656 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200657
658 end = vma->vm->total;
659 if (flags & PIN_MAPPABLE)
Chris Wilson28507482019-10-04 14:39:58 +0100660 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200661 if (flags & PIN_ZONE_4G)
Chris Wilsonf51455d2017-01-10 14:47:34 +0000662 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
663 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200664
665 /* If binding the object/GGTT view requires more space than the entire
666 * aperture has, reject it early before evicting everything in a vain
667 * attempt to find space.
668 */
669 if (size > end) {
Chris Wilson520ea7c2018-06-07 16:40:45 +0100670 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
671 size, flags & PIN_MAPPABLE ? "mappable" : "total",
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200672 end);
Chris Wilson2889caa2017-06-16 15:05:19 +0100673 return -ENOSPC;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200674 }
675
Matthew Auld33dd8892019-09-09 13:40:52 +0100676 color = 0;
Chris Wilson28507482019-10-04 14:39:58 +0100677 if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
678 color = vma->obj->cache_level;
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100679
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200680 if (flags & PIN_OFFSET_FIXED) {
681 u64 offset = flags & PIN_OFFSET_MASK;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000682 if (!IS_ALIGNED(offset, alignment) ||
Chris Wilson28507482019-10-04 14:39:58 +0100683 range_overflows(offset, size, end))
684 return -EINVAL;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200685
Chris Wilson625d9882017-01-11 11:23:11 +0000686 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
Matthew Auld33dd8892019-09-09 13:40:52 +0100687 size, offset, color,
Chris Wilson625d9882017-01-11 11:23:11 +0000688 flags);
689 if (ret)
Chris Wilson28507482019-10-04 14:39:58 +0100690 return ret;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200691 } else {
Matthew Auld74642842017-10-06 23:18:20 +0100692 /*
693 * We only support huge gtt pages through the 48b PPGTT,
694 * however we also don't want to force any alignment for
695 * objects which need to be tightly packed into the low 32bits.
696 *
697 * Note that we assume that GGTT are limited to 4GiB for the
698 * forseeable future. See also i915_ggtt_offset().
699 */
700 if (upper_32_bits(end - 1) &&
701 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
Matthew Auld855822b2017-10-06 23:18:21 +0100702 /*
703 * We can't mix 64K and 4K PTEs in the same page-table
704 * (2M block), and so to avoid the ugliness and
705 * complexity of coloring we opt for just aligning 64K
706 * objects to 2M.
707 */
Matthew Auld74642842017-10-06 23:18:20 +0100708 u64 page_alignment =
Matthew Auld855822b2017-10-06 23:18:21 +0100709 rounddown_pow_of_two(vma->page_sizes.sg |
710 I915_GTT_PAGE_SIZE_2M);
Matthew Auld74642842017-10-06 23:18:20 +0100711
Chris Wilsonbef27bdb2017-10-09 10:20:19 +0100712 /*
713 * Check we don't expand for the limited Global GTT
714 * (mappable aperture is even more precious!). This
715 * also checks that we exclude the aliasing-ppgtt.
716 */
717 GEM_BUG_ON(i915_vma_is_ggtt(vma));
718
Matthew Auld74642842017-10-06 23:18:20 +0100719 alignment = max(alignment, page_alignment);
Matthew Auld855822b2017-10-06 23:18:21 +0100720
721 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
722 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
Matthew Auld74642842017-10-06 23:18:20 +0100723 }
724
Chris Wilsone007b192017-01-11 11:23:10 +0000725 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
Matthew Auld33dd8892019-09-09 13:40:52 +0100726 size, alignment, color,
Chris Wilsone007b192017-01-11 11:23:10 +0000727 start, end, flags);
728 if (ret)
Chris Wilson28507482019-10-04 14:39:58 +0100729 return ret;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200730
731 GEM_BUG_ON(vma->node.start < start);
732 GEM_BUG_ON(vma->node.start + vma->node.size > end);
733 }
Chris Wilson44a0ec02017-01-19 19:26:58 +0000734 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Matthew Auld33dd8892019-09-09 13:40:52 +0100735 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200736
Chris Wilsondde01d92019-10-30 19:21:49 +0000737 list_add_tail(&vma->vm_link, &vma->vm->bound_list);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200738
739 return 0;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200740}
741
Chris Wilson31c7eff2017-02-27 12:26:54 +0000742static void
Chris Wilsondde01d92019-10-30 19:21:49 +0000743i915_vma_detach(struct i915_vma *vma)
Chris Wilson31c7eff2017-02-27 12:26:54 +0000744{
Chris Wilson31c7eff2017-02-27 12:26:54 +0000745 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100746 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
Chris Wilson31c7eff2017-02-27 12:26:54 +0000747
Chris Wilson520ea7c2018-06-07 16:40:45 +0100748 /*
Chris Wilsondde01d92019-10-30 19:21:49 +0000749 * And finally now the object is completely decoupled from this
750 * vma, we can drop its hold on the backing storage and allow
751 * it to be reaped by the shrinker.
Chris Wilson31c7eff2017-02-27 12:26:54 +0000752 */
Chris Wilsondde01d92019-10-30 19:21:49 +0000753 list_del(&vma->vm_link);
Chris Wilson31c7eff2017-02-27 12:26:54 +0000754}
755
Chris Wilson28507482019-10-04 14:39:58 +0100756static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200757{
Chris Wilson28507482019-10-04 14:39:58 +0100758 unsigned int bound;
759 bool pinned = true;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200760
Chris Wilson28507482019-10-04 14:39:58 +0100761 bound = atomic_read(&vma->flags);
762 do {
763 if (unlikely(flags & ~bound))
764 return false;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200765
Chris Wilson28507482019-10-04 14:39:58 +0100766 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
767 return false;
768
769 if (!(bound & I915_VMA_PIN_MASK))
770 goto unpinned;
771
772 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
773 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
774
775 return true;
776
777unpinned:
778 /*
779 * If pin_count==0, but we are bound, check under the lock to avoid
780 * racing with a concurrent i915_vma_unbind().
781 */
782 mutex_lock(&vma->vm->mutex);
783 do {
784 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
785 pinned = false;
786 break;
787 }
788
789 if (unlikely(flags & ~bound)) {
790 pinned = false;
791 break;
792 }
793 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
794 mutex_unlock(&vma->vm->mutex);
795
796 return pinned;
797}
798
799static int vma_get_pages(struct i915_vma *vma)
800{
801 int err = 0;
Thomas Hellström0f4308d2021-06-01 09:46:40 +0200802 bool pinned_pages = false;
Chris Wilson28507482019-10-04 14:39:58 +0100803
804 if (atomic_add_unless(&vma->pages_count, 1, 0))
805 return 0;
806
Thomas Hellström0f4308d2021-06-01 09:46:40 +0200807 if (vma->obj) {
808 err = i915_gem_object_pin_pages(vma->obj);
809 if (err)
810 return err;
811 pinned_pages = true;
812 }
813
Chris Wilson28507482019-10-04 14:39:58 +0100814 /* Allocations ahoy! */
Thomas Hellström0f4308d2021-06-01 09:46:40 +0200815 if (mutex_lock_interruptible(&vma->pages_mutex)) {
816 err = -EINTR;
817 goto unpin;
818 }
Chris Wilson28507482019-10-04 14:39:58 +0100819
820 if (!atomic_read(&vma->pages_count)) {
Chris Wilson28507482019-10-04 14:39:58 +0100821 err = vma->ops->set_pages(vma);
Thomas Hellström0f4308d2021-06-01 09:46:40 +0200822 if (err)
Chris Wilson28507482019-10-04 14:39:58 +0100823 goto unlock;
Thomas Hellström0f4308d2021-06-01 09:46:40 +0200824 pinned_pages = false;
Chris Wilson28507482019-10-04 14:39:58 +0100825 }
826 atomic_inc(&vma->pages_count);
827
828unlock:
829 mutex_unlock(&vma->pages_mutex);
Thomas Hellström0f4308d2021-06-01 09:46:40 +0200830unpin:
831 if (pinned_pages)
832 __i915_gem_object_unpin_pages(vma->obj);
Chris Wilson28507482019-10-04 14:39:58 +0100833
834 return err;
835}
836
837static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
838{
839 /* We allocate under vma_get_pages, so beware the shrinker */
840 mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
841 GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
842 if (atomic_sub_return(count, &vma->pages_count) == 0) {
843 vma->ops->clear_pages(vma);
844 GEM_BUG_ON(vma->pages);
845 if (vma->obj)
846 i915_gem_object_unpin_pages(vma->obj);
847 }
848 mutex_unlock(&vma->pages_mutex);
849}
850
851static void vma_put_pages(struct i915_vma *vma)
852{
853 if (atomic_add_unless(&vma->pages_count, -1, 1))
854 return;
855
856 __vma_put_pages(vma, 1);
857}
858
859static void vma_unbind_pages(struct i915_vma *vma)
860{
861 unsigned int count;
862
863 lockdep_assert_held(&vma->vm->mutex);
864
865 /* The upper portion of pages_count is the number of bindings */
866 count = atomic_read(&vma->pages_count);
867 count >>= I915_VMA_PAGES_BIAS;
868 GEM_BUG_ON(!count);
869
870 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
871}
872
Maarten Lankhorst47b08692020-08-19 16:08:54 +0200873int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
874 u64 size, u64 alignment, u64 flags)
Chris Wilson28507482019-10-04 14:39:58 +0100875{
876 struct i915_vma_work *work = NULL;
Chris Wilsonc0e60342020-01-10 14:44:18 +0000877 intel_wakeref_t wakeref = 0;
Chris Wilson28507482019-10-04 14:39:58 +0100878 unsigned int bound;
879 int err;
880
Maarten Lankhorst47b08692020-08-19 16:08:54 +0200881#ifdef CONFIG_PROVE_LOCKING
Maarten Lankhorst7d1c2612021-03-23 16:50:14 +0100882 if (debug_locks && !WARN_ON(!ww) && vma->resv)
Maarten Lankhorst1eef0de12021-03-23 16:49:54 +0100883 assert_vma_held(vma);
Maarten Lankhorst47b08692020-08-19 16:08:54 +0200884#endif
885
Chris Wilson28507482019-10-04 14:39:58 +0100886 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
887 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
888
Chris Wilson28507482019-10-04 14:39:58 +0100889 GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
890
891 /* First try and grab the pin without rebinding the vma */
892 if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
893 return 0;
894
895 err = vma_get_pages(vma);
896 if (err)
897 return err;
898
Chris Wilson89351922020-07-29 17:42:18 +0100899 if (flags & PIN_GLOBAL)
900 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
901
Chris Wilson28507482019-10-04 14:39:58 +0100902 if (flags & vma->vm->bind_async_flags) {
Maarten Lankhorst26ad4f82021-03-23 16:50:29 +0100903 /* lock VM */
904 err = i915_vm_lock_objects(vma->vm, ww);
905 if (err)
906 goto err_rpm;
907
Chris Wilson28507482019-10-04 14:39:58 +0100908 work = i915_vma_work();
909 if (!work) {
910 err = -ENOMEM;
Chris Wilson89351922020-07-29 17:42:18 +0100911 goto err_rpm;
Chris Wilson28507482019-10-04 14:39:58 +0100912 }
Chris Wilsoncd0452a2020-07-29 17:42:17 +0100913
914 work->vm = i915_vm_get(vma->vm);
915
916 /* Allocate enough page directories to used PTE */
Chris Wilson89351922020-07-29 17:42:18 +0100917 if (vma->vm->allocate_va_range) {
Matthew Auldcef8ce52020-09-21 17:08:44 +0100918 err = i915_vm_alloc_pt_stash(vma->vm,
919 &work->stash,
920 vma->size);
921 if (err)
922 goto err_fence;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200923
Matthew Auld529b9ec2021-04-27 09:54:13 +0100924 err = i915_vm_map_pt_stash(vma->vm, &work->stash);
Chris Wilson89351922020-07-29 17:42:18 +0100925 if (err)
926 goto err_fence;
927 }
928 }
Chris Wilsonc0e60342020-01-10 14:44:18 +0000929
Chris Wilsond0024912020-03-26 14:27:27 +0000930 /*
931 * Differentiate between user/kernel vma inside the aliasing-ppgtt.
932 *
933 * We conflate the Global GTT with the user's vma when using the
934 * aliasing-ppgtt, but it is still vitally important to try and
935 * keep the use cases distinct. For example, userptr objects are
936 * not allowed inside the Global GTT as that will cause lock
937 * inversions when we have to evict them the mmu_notifier callbacks -
938 * but they are allowed to be part of the user ppGTT which can never
939 * be mapped. As such we try to give the distinct users of the same
940 * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
941 * and i915_ppgtt separate].
942 *
943 * NB this may cause us to mask real lock inversions -- while the
944 * code is safe today, lockdep may not be able to spot future
945 * transgressions.
946 */
947 err = mutex_lock_interruptible_nested(&vma->vm->mutex,
948 !(flags & PIN_GLOBAL));
Chris Wilson28507482019-10-04 14:39:58 +0100949 if (err)
950 goto err_fence;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200951
Chris Wilsond0024912020-03-26 14:27:27 +0000952 /* No more allocations allowed now we hold vm->mutex */
953
Chris Wilson00de7022020-02-21 12:19:40 +0000954 if (unlikely(i915_vma_is_closed(vma))) {
955 err = -ENOENT;
956 goto err_unlock;
957 }
958
Chris Wilson28507482019-10-04 14:39:58 +0100959 bound = atomic_read(&vma->flags);
960 if (unlikely(bound & I915_VMA_ERROR)) {
961 err = -ENOMEM;
962 goto err_unlock;
963 }
964
965 if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
966 err = -EAGAIN; /* pins are meant to be fairly temporary */
967 goto err_unlock;
968 }
969
970 if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
971 __i915_vma_pin(vma);
972 goto err_unlock;
973 }
974
975 err = i915_active_acquire(&vma->active);
976 if (err)
977 goto err_unlock;
978
979 if (!(bound & I915_VMA_BIND_MASK)) {
980 err = i915_vma_insert(vma, size, alignment, flags);
981 if (err)
982 goto err_active;
983
984 if (i915_is_ggtt(vma->vm))
985 __i915_vma_set_map_and_fenceable(vma);
986 }
987
988 GEM_BUG_ON(!vma->pages);
989 err = i915_vma_bind(vma,
990 vma->obj ? vma->obj->cache_level : 0,
991 flags, work);
992 if (err)
Chris Wilson31c7eff2017-02-27 12:26:54 +0000993 goto err_remove;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200994
Chris Wilson28507482019-10-04 14:39:58 +0100995 /* There should only be at most 2 active bindings (user, global) */
996 GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
997 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
998 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
Chris Wilsond36caee2017-11-05 12:45:50 +0000999
Chris Wilson28507482019-10-04 14:39:58 +01001000 __i915_vma_pin(vma);
1001 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1002 GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001003 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001004
Chris Wilson31c7eff2017-02-27 12:26:54 +00001005err_remove:
Chris Wilsondde01d92019-10-30 19:21:49 +00001006 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
1007 i915_vma_detach(vma);
1008 drm_mm_remove_node(&vma->node);
1009 }
Chris Wilson28507482019-10-04 14:39:58 +01001010err_active:
1011 i915_active_release(&vma->active);
1012err_unlock:
1013 mutex_unlock(&vma->vm->mutex);
1014err_fence:
1015 if (work)
Chris Wilson92581f92020-03-25 12:02:27 +00001016 dma_fence_work_commit_imm(&work->base);
Chris Wilson89351922020-07-29 17:42:18 +01001017err_rpm:
Chris Wilsonc0e60342020-01-10 14:44:18 +00001018 if (wakeref)
1019 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
Chris Wilson28507482019-10-04 14:39:58 +01001020 vma_put_pages(vma);
1021 return err;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001022}
1023
Chris Wilsonccd20942019-12-05 11:37:25 +00001024static void flush_idle_contexts(struct intel_gt *gt)
1025{
1026 struct intel_engine_cs *engine;
1027 enum intel_engine_id id;
1028
1029 for_each_engine(engine, gt, id)
1030 intel_engine_flush_barriers(engine);
1031
1032 intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1033}
1034
Maarten Lankhorst47b08692020-08-19 16:08:54 +02001035int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1036 u32 align, unsigned int flags)
Chris Wilsonccd20942019-12-05 11:37:25 +00001037{
1038 struct i915_address_space *vm = vma->vm;
1039 int err;
1040
1041 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1042
Maarten Lankhorst1eef0de12021-03-23 16:49:54 +01001043#ifdef CONFIG_LOCKDEP
1044 WARN_ON(!ww && vma->resv && dma_resv_held(vma->resv));
1045#endif
1046
Chris Wilsonccd20942019-12-05 11:37:25 +00001047 do {
Maarten Lankhorst1eef0de12021-03-23 16:49:54 +01001048 if (ww)
1049 err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
1050 else
1051 err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
Chris Wilsone3793462020-01-30 18:17:10 +00001052 if (err != -ENOSPC) {
1053 if (!err) {
1054 err = i915_vma_wait_for_bind(vma);
1055 if (err)
1056 i915_vma_unpin(vma);
1057 }
Chris Wilsonccd20942019-12-05 11:37:25 +00001058 return err;
Chris Wilsone3793462020-01-30 18:17:10 +00001059 }
Chris Wilsonccd20942019-12-05 11:37:25 +00001060
1061 /* Unlike i915_vma_pin, we don't take no for an answer! */
1062 flush_idle_contexts(vm->gt);
1063 if (mutex_lock_interruptible(&vm->mutex) == 0) {
1064 i915_gem_evict_vm(vm);
1065 mutex_unlock(&vm->mutex);
1066 }
1067 } while (1);
1068}
1069
Chris Wilson50689772020-04-22 20:05:58 +01001070static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
Chris Wilson3365e222018-05-03 20:51:14 +01001071{
Chris Wilson3365e222018-05-03 20:51:14 +01001072 /*
1073 * We defer actually closing, unbinding and destroying the VMA until
1074 * the next idle point, or if the object is freed in the meantime. By
1075 * postponing the unbind, we allow for it to be resurrected by the
1076 * client, avoiding the work required to rebind the VMA. This is
1077 * advantageous for DRI, where the client/server pass objects
1078 * between themselves, temporarily opening a local VMA to the
1079 * object, and then closing it again. The same object is then reused
1080 * on the next frame (or two, depending on the depth of the swap queue)
1081 * causing us to rebind the VMA once more. This ends up being a lot
1082 * of wasted work for the steady state.
1083 */
Chris Wilson50689772020-04-22 20:05:58 +01001084 GEM_BUG_ON(i915_vma_is_closed(vma));
Chris Wilson71e51ca2019-10-21 19:32:35 +01001085 list_add(&vma->closed_link, &gt->closed_vma);
Chris Wilson50689772020-04-22 20:05:58 +01001086}
1087
1088void i915_vma_close(struct i915_vma *vma)
1089{
1090 struct intel_gt *gt = vma->vm->gt;
1091 unsigned long flags;
1092
1093 if (i915_vma_is_ggtt(vma))
1094 return;
1095
1096 GEM_BUG_ON(!atomic_read(&vma->open_count));
1097 if (atomic_dec_and_lock_irqsave(&vma->open_count,
1098 &gt->closed_lock,
1099 flags)) {
1100 __vma_close(vma, gt);
1101 spin_unlock_irqrestore(&gt->closed_lock, flags);
1102 }
Chris Wilson155ab882019-06-06 12:23:20 +01001103}
1104
1105static void __i915_vma_remove_closed(struct i915_vma *vma)
1106{
Chris Wilson71e51ca2019-10-21 19:32:35 +01001107 struct intel_gt *gt = vma->vm->gt;
Chris Wilson155ab882019-06-06 12:23:20 +01001108
Chris Wilson71e51ca2019-10-21 19:32:35 +01001109 spin_lock_irq(&gt->closed_lock);
Chris Wilson155ab882019-06-06 12:23:20 +01001110 list_del_init(&vma->closed_link);
Chris Wilson71e51ca2019-10-21 19:32:35 +01001111 spin_unlock_irq(&gt->closed_lock);
Chris Wilson3365e222018-05-03 20:51:14 +01001112}
1113
1114void i915_vma_reopen(struct i915_vma *vma)
1115{
Chris Wilson28507482019-10-04 14:39:58 +01001116 if (i915_vma_is_closed(vma))
1117 __i915_vma_remove_closed(vma);
Chris Wilson3365e222018-05-03 20:51:14 +01001118}
1119
Chris Wilson76f97642019-12-22 21:02:55 +00001120void i915_vma_release(struct kref *ref)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001121{
Chris Wilson76f97642019-12-22 21:02:55 +00001122 struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
1123
Chris Wilson28507482019-10-04 14:39:58 +01001124 if (drm_mm_node_allocated(&vma->node)) {
1125 mutex_lock(&vma->vm->mutex);
1126 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1127 WARN_ON(__i915_vma_unbind(vma));
1128 mutex_unlock(&vma->vm->mutex);
1129 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1130 }
1131 GEM_BUG_ON(i915_vma_is_active(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001132
Chris Wilson528cbd12019-01-28 10:23:54 +00001133 if (vma->obj) {
1134 struct drm_i915_gem_object *obj = vma->obj;
1135
1136 spin_lock(&obj->vma.lock);
1137 list_del(&vma->obj_link);
Chris Wilson51dc2762020-06-11 19:04:21 +01001138 if (!RB_EMPTY_NODE(&vma->obj_node))
1139 rb_erase(&vma->obj_node, &obj->vma.tree);
Chris Wilson528cbd12019-01-28 10:23:54 +00001140 spin_unlock(&obj->vma.lock);
1141 }
Chris Wilson010e3e62017-12-06 12:49:13 +00001142
Chris Wilson155ab882019-06-06 12:23:20 +01001143 __i915_vma_remove_closed(vma);
Chris Wilson28507482019-10-04 14:39:58 +01001144 i915_vm_put(vma->vm);
Chris Wilson3365e222018-05-03 20:51:14 +01001145
Chris Wilson28507482019-10-04 14:39:58 +01001146 i915_active_fini(&vma->active);
1147 i915_vma_free(vma);
Chris Wilson3365e222018-05-03 20:51:14 +01001148}
1149
Chris Wilson71e51ca2019-10-21 19:32:35 +01001150void i915_vma_parked(struct intel_gt *gt)
Chris Wilson3365e222018-05-03 20:51:14 +01001151{
1152 struct i915_vma *vma, *next;
Chris Wilson3447c4c2020-03-23 09:28:35 +00001153 LIST_HEAD(closed);
Chris Wilson3365e222018-05-03 20:51:14 +01001154
Chris Wilson71e51ca2019-10-21 19:32:35 +01001155 spin_lock_irq(&gt->closed_lock);
1156 list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
Chris Wilson28507482019-10-04 14:39:58 +01001157 struct drm_i915_gem_object *obj = vma->obj;
1158 struct i915_address_space *vm = vma->vm;
1159
1160 /* XXX All to avoid keeping a reference on i915_vma itself */
1161
1162 if (!kref_get_unless_zero(&obj->base.refcount))
1163 continue;
1164
Chris Wilson3447c4c2020-03-23 09:28:35 +00001165 if (!i915_vm_tryopen(vm)) {
Chris Wilson28507482019-10-04 14:39:58 +01001166 i915_gem_object_put(obj);
Chris Wilson3447c4c2020-03-23 09:28:35 +00001167 continue;
Chris Wilson28507482019-10-04 14:39:58 +01001168 }
1169
Chris Wilson3447c4c2020-03-23 09:28:35 +00001170 list_move(&vma->closed_link, &closed);
Chris Wilson155ab882019-06-06 12:23:20 +01001171 }
Chris Wilson71e51ca2019-10-21 19:32:35 +01001172 spin_unlock_irq(&gt->closed_lock);
Chris Wilson3447c4c2020-03-23 09:28:35 +00001173
1174 /* As the GT is held idle, no vma can be reopened as we destroy them */
1175 list_for_each_entry_safe(vma, next, &closed, closed_link) {
1176 struct drm_i915_gem_object *obj = vma->obj;
1177 struct i915_address_space *vm = vma->vm;
1178
1179 INIT_LIST_HEAD(&vma->closed_link);
1180 __i915_vma_put(vma);
1181
1182 i915_gem_object_put(obj);
1183 i915_vm_close(vm);
1184 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001185}
1186
1187static void __i915_vma_iounmap(struct i915_vma *vma)
1188{
1189 GEM_BUG_ON(i915_vma_is_pinned(vma));
1190
1191 if (vma->iomap == NULL)
1192 return;
1193
1194 io_mapping_unmap(vma->iomap);
1195 vma->iomap = NULL;
1196}
1197
Chris Wilsona65adaf2017-10-09 09:43:57 +01001198void i915_vma_revoke_mmap(struct i915_vma *vma)
1199{
Abdiel Janulguecc662122019-12-04 12:00:32 +00001200 struct drm_vma_offset_node *node;
Chris Wilsona65adaf2017-10-09 09:43:57 +01001201 u64 vma_offset;
1202
Chris Wilsona65adaf2017-10-09 09:43:57 +01001203 if (!i915_vma_has_userfault(vma))
1204 return;
1205
1206 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1207 GEM_BUG_ON(!vma->obj->userfault_count);
1208
Abdiel Janulguecc662122019-12-04 12:00:32 +00001209 node = &vma->mmo->vma_node;
Chris Wilsona65adaf2017-10-09 09:43:57 +01001210 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
1211 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1212 drm_vma_node_offset_addr(node) + vma_offset,
1213 vma->size,
1214 1);
1215
1216 i915_vma_unset_userfault(vma);
1217 if (!--vma->obj->userfault_count)
1218 list_del(&vma->obj->userfault_link);
1219}
1220
Chris Wilsonaf5c6fc2020-07-31 09:50:15 +01001221static int
1222__i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
1223{
1224 return __i915_request_await_exclusive(rq, &vma->active);
1225}
1226
Chris Wilson28507482019-10-04 14:39:58 +01001227int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1228{
1229 int err;
1230
1231 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1232
1233 /* Wait for the vma to be bound before we start! */
Chris Wilsonaf5c6fc2020-07-31 09:50:15 +01001234 err = __i915_request_await_bind(rq, vma);
Chris Wilson28507482019-10-04 14:39:58 +01001235 if (err)
1236 return err;
1237
1238 return i915_active_add_request(&vma->active, rq);
1239}
1240
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001241int i915_vma_move_to_active(struct i915_vma *vma,
1242 struct i915_request *rq,
1243 unsigned int flags)
1244{
1245 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilsona93615f2019-06-21 19:37:59 +01001246 int err;
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001247
Chris Wilson6951e582019-05-28 10:29:51 +01001248 assert_object_held(obj);
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001249
Chris Wilson28507482019-10-04 14:39:58 +01001250 err = __i915_vma_move_to_active(vma, rq);
Chris Wilsona93615f2019-06-21 19:37:59 +01001251 if (unlikely(err))
1252 return err;
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001253
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001254 if (flags & EXEC_OBJECT_WRITE) {
Chris Wilsonda421042019-12-18 10:40:43 +00001255 struct intel_frontbuffer *front;
1256
1257 front = __intel_frontbuffer_get(obj);
1258 if (unlikely(front)) {
1259 if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1260 i915_active_add_request(&front->write, rq);
1261 intel_frontbuffer_put(front);
1262 }
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001263
Rodrigo Vivi829e8de2019-08-21 22:47:35 -07001264 dma_resv_add_excl_fence(vma->resv, &rq->fence);
Chris Wilsoncd2a4ea2019-07-30 21:58:05 +01001265 obj->write_domain = I915_GEM_DOMAIN_RENDER;
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001266 obj->read_domains = 0;
Chris Wilsoncd2a4ea2019-07-30 21:58:05 +01001267 } else {
Maarten Lankhorstbfaae472021-03-23 16:49:59 +01001268 if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
1269 err = dma_resv_reserve_shared(vma->resv, 1);
1270 if (unlikely(err))
1271 return err;
1272 }
Chris Wilsoncd2a4ea2019-07-30 21:58:05 +01001273
Rodrigo Vivi829e8de2019-08-21 22:47:35 -07001274 dma_resv_add_shared_fence(vma->resv, &rq->fence);
Chris Wilsoncd2a4ea2019-07-30 21:58:05 +01001275 obj->write_domain = 0;
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001276 }
Chris Wilson63baf4f2020-04-01 22:01:02 +01001277
1278 if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
1279 i915_active_add_request(&vma->fence->active, rq);
1280
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001281 obj->read_domains |= I915_GEM_GPU_DOMAINS;
Chris Wilsona93615f2019-06-21 19:37:59 +01001282 obj->mm.dirty = true;
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001283
Chris Wilsona93615f2019-06-21 19:37:59 +01001284 GEM_BUG_ON(!i915_vma_is_active(vma));
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001285 return 0;
1286}
1287
Chris Wilsonbffa18d2020-05-28 09:24:27 +01001288void __i915_vma_evict(struct i915_vma *vma)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001289{
Chris Wilson60e94552020-01-23 22:44:58 +00001290 GEM_BUG_ON(i915_vma_is_pinned(vma));
Chris Wilson60e94552020-01-23 22:44:58 +00001291
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001292 if (i915_vma_is_map_and_fenceable(vma)) {
Chris Wilson9657aaa2020-04-03 17:09:51 +01001293 /* Force a pagefault for domain tracking on next user access */
1294 i915_vma_revoke_mmap(vma);
1295
Chris Wilson7125397b2017-12-06 12:49:14 +00001296 /*
1297 * Check that we have flushed all writes through the GGTT
1298 * before the unbind, other due to non-strict nature of those
1299 * indirect writes they may end up referencing the GGTT PTE
1300 * after the unbind.
Chris Wilson5424f5d2020-01-21 22:24:41 +00001301 *
1302 * Note that we may be concurrently poking at the GGTT_WRITE
1303 * bit from set-domain, as we mark all GGTT vma associated
1304 * with an object. We know this is for another vma, as we
1305 * are currently unbinding this one -- so if this vma will be
1306 * reused, it will be refaulted and have its dirty bit set
1307 * before the next write.
Chris Wilson7125397b2017-12-06 12:49:14 +00001308 */
1309 i915_vma_flush_writes(vma);
Chris Wilson7125397b2017-12-06 12:49:14 +00001310
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001311 /* release the fence reg _after_ flushing */
Chris Wilson0d86ee32020-04-01 22:01:04 +01001312 i915_vma_revoke_fence(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001313
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001314 __i915_vma_iounmap(vma);
Chris Wilson4dd2fbb2019-09-11 10:02:43 +01001315 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001316 }
Chris Wilsona65adaf2017-10-09 09:43:57 +01001317 GEM_BUG_ON(vma->fence);
1318 GEM_BUG_ON(i915_vma_has_userfault(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001319
Chris Wilson28507482019-10-04 14:39:58 +01001320 if (likely(atomic_read(&vma->vm->open))) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001321 trace_i915_vma_unbind(vma);
Chris Wilson12b07252020-07-03 11:25:19 +01001322 vma->ops->unbind_vma(vma->vm, vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001323 }
Chris Wilson5424f5d2020-01-21 22:24:41 +00001324 atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
1325 &vma->flags);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001326
Chris Wilsondde01d92019-10-30 19:21:49 +00001327 i915_vma_detach(vma);
Chris Wilson28507482019-10-04 14:39:58 +01001328 vma_unbind_pages(vma);
Chris Wilsonbffa18d2020-05-28 09:24:27 +01001329}
1330
1331int __i915_vma_unbind(struct i915_vma *vma)
1332{
1333 int ret;
1334
1335 lockdep_assert_held(&vma->vm->mutex);
1336
1337 if (!drm_mm_node_allocated(&vma->node))
1338 return 0;
1339
1340 if (i915_vma_is_pinned(vma)) {
1341 vma_print_allocator(vma, "is pinned");
1342 return -EAGAIN;
1343 }
1344
1345 /*
1346 * After confirming that no one else is pinning this vma, wait for
1347 * any laggards who may have crept in during the wait (through
1348 * a residual pin skipping the vm->mutex) to complete.
1349 */
1350 ret = i915_vma_sync(vma);
1351 if (ret)
1352 return ret;
1353
1354 GEM_BUG_ON(i915_vma_is_active(vma));
1355 __i915_vma_evict(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001356
Chris Wilson76f97642019-12-22 21:02:55 +00001357 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001358 return 0;
1359}
1360
Chris Wilson28507482019-10-04 14:39:58 +01001361int i915_vma_unbind(struct i915_vma *vma)
1362{
1363 struct i915_address_space *vm = vma->vm;
Chris Wilsonc0e60342020-01-10 14:44:18 +00001364 intel_wakeref_t wakeref = 0;
Chris Wilson28507482019-10-04 14:39:58 +01001365 int err;
1366
Chris Wilsond62f416f2020-01-23 22:44:59 +00001367 /* Optimistic wait before taking the mutex */
1368 err = i915_vma_sync(vma);
1369 if (err)
Chris Wilsonbffa18d2020-05-28 09:24:27 +01001370 return err;
1371
1372 if (!drm_mm_node_allocated(&vma->node))
1373 return 0;
Chris Wilsond62f416f2020-01-23 22:44:59 +00001374
Chris Wilson614654a2020-04-03 13:01:50 +01001375 if (i915_vma_is_pinned(vma)) {
1376 vma_print_allocator(vma, "is pinned");
1377 return -EAGAIN;
1378 }
1379
1380 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
1381 /* XXX not always required: nop_clear_range */
1382 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
1383
Chris Wilsond0024912020-03-26 14:27:27 +00001384 err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
Chris Wilson28507482019-10-04 14:39:58 +01001385 if (err)
Chris Wilsond62f416f2020-01-23 22:44:59 +00001386 goto out_rpm;
Chris Wilson28507482019-10-04 14:39:58 +01001387
1388 err = __i915_vma_unbind(vma);
1389 mutex_unlock(&vm->mutex);
1390
Chris Wilsond62f416f2020-01-23 22:44:59 +00001391out_rpm:
Chris Wilsonc0e60342020-01-10 14:44:18 +00001392 if (wakeref)
1393 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
Chris Wilson28507482019-10-04 14:39:58 +01001394 return err;
1395}
1396
Chris Wilson1aff1902019-08-02 22:21:36 +01001397struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
1398{
1399 i915_gem_object_make_unshrinkable(vma->obj);
1400 return vma;
1401}
1402
1403void i915_vma_make_shrinkable(struct i915_vma *vma)
1404{
1405 i915_gem_object_make_shrinkable(vma->obj);
1406}
1407
1408void i915_vma_make_purgeable(struct i915_vma *vma)
1409{
1410 i915_gem_object_make_purgeable(vma->obj);
1411}
1412
Chris Wilsone3c7a1c2017-02-13 17:15:45 +00001413#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1414#include "selftests/i915_vma.c"
1415#endif
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001416
Chris Wilson103b76ee2019-03-05 21:38:30 +00001417static void i915_global_vma_shrink(void)
1418{
1419 kmem_cache_shrink(global.slab_vmas);
1420}
1421
1422static void i915_global_vma_exit(void)
1423{
1424 kmem_cache_destroy(global.slab_vmas);
1425}
1426
1427static struct i915_global_vma global = { {
1428 .shrink = i915_global_vma_shrink,
1429 .exit = i915_global_vma_exit,
1430} };
1431
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001432int __init i915_global_vma_init(void)
1433{
1434 global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1435 if (!global.slab_vmas)
1436 return -ENOMEM;
1437
Chris Wilson103b76ee2019-03-05 21:38:30 +00001438 i915_global_register(&global.base);
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001439 return 0;
1440}