blob: 927f0d4f8e118f886062a8992fc08a187e925633 [file] [log] [blame]
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
Chris Wilson10195b12018-06-28 14:22:06 +010024
Chris Wilson09480072019-07-03 10:17:19 +010025#include <linux/sched/mm.h>
Jani Nikuladf0566a2019-06-13 11:44:16 +030026#include <drm/drm_gem.h>
Chris Wilson112ed2d2019-04-24 18:48:39 +010027
Jani Nikuladf0566a2019-06-13 11:44:16 +030028#include "display/intel_frontbuffer.h"
29
Anusha Srivatsa4bc91db2021-04-27 09:54:16 +010030#include "gem/i915_gem_lmem.h"
Jani Nikuladf0566a2019-06-13 11:44:16 +030031#include "gt/intel_engine.h"
Chris Wilsonccd20942019-12-05 11:37:25 +000032#include "gt/intel_engine_heartbeat.h"
Tvrtko Ursulina1c8a092019-06-21 08:08:01 +010033#include "gt/intel_gt.h"
Chris Wilsonccd20942019-12-05 11:37:25 +000034#include "gt/intel_gt_requests.h"
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020035
36#include "i915_drv.h"
Chris Wilson28507482019-10-04 14:39:58 +010037#include "i915_sw_fence_work.h"
Jani Nikulaa09d9a82019-08-06 13:07:28 +030038#include "i915_trace.h"
Jani Nikuladf0566a2019-06-13 11:44:16 +030039#include "i915_vma.h"
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020040
Daniel Vetter64fc7cc2021-07-27 14:10:35 +020041static struct kmem_cache *slab_vmas;
Chris Wilson13f1bfd2019-02-28 10:20:34 +000042
Maarten Lankhorste6e1a302021-11-17 14:20:22 +000043static struct i915_vma *i915_vma_alloc(void)
Chris Wilson13f1bfd2019-02-28 10:20:34 +000044{
Daniel Vetter64fc7cc2021-07-27 14:10:35 +020045 return kmem_cache_zalloc(slab_vmas, GFP_KERNEL);
Chris Wilson13f1bfd2019-02-28 10:20:34 +000046}
47
Maarten Lankhorste6e1a302021-11-17 14:20:22 +000048static void i915_vma_free(struct i915_vma *vma)
Chris Wilson13f1bfd2019-02-28 10:20:34 +000049{
Daniel Vetter64fc7cc2021-07-27 14:10:35 +020050 return kmem_cache_free(slab_vmas, vma);
Chris Wilson13f1bfd2019-02-28 10:20:34 +000051}
52
Chris Wilson1eca65d2018-07-06 07:53:06 +010053#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
Chris Wilson10195b12018-06-28 14:22:06 +010054
55#include <linux/stackdepot.h>
56
57static void vma_print_allocator(struct i915_vma *vma, const char *reason)
58{
Chris Wilson10195b12018-06-28 14:22:06 +010059 char buf[512];
60
61 if (!vma->node.stack) {
62 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
63 vma->node.start, vma->node.size, reason);
64 return;
65 }
66
Imran Khan0f68d452021-11-08 18:33:16 -080067 stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0);
Chris Wilson10195b12018-06-28 14:22:06 +010068 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
69 vma->node.start, vma->node.size, reason, buf);
70}
71
72#else
73
74static void vma_print_allocator(struct i915_vma *vma, const char *reason)
75{
76}
77
78#endif
79
Chris Wilson12c255b2019-06-21 19:38:00 +010080static inline struct i915_vma *active_to_vma(struct i915_active *ref)
81{
82 return container_of(ref, typeof(struct i915_vma), active);
83}
84
85static int __i915_vma_active(struct i915_active *ref)
86{
Chris Wilson2833ddc2019-08-20 11:05:31 +010087 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
Chris Wilson12c255b2019-06-21 19:38:00 +010088}
89
Chris Wilson64d6c502019-02-05 13:00:02 +000090static void __i915_vma_retire(struct i915_active *ref)
91{
Chris Wilson12c255b2019-06-21 19:38:00 +010092 i915_vma_put(active_to_vma(ref));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020093}
94
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020095static struct i915_vma *
Chris Wilsona01cb37a2017-01-16 15:21:30 +000096vma_create(struct drm_i915_gem_object *obj,
97 struct i915_address_space *vm,
98 const struct i915_ggtt_view *view)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020099{
Chris Wilson03fca662020-07-02 22:10:15 +0100100 struct i915_vma *pos = ERR_PTR(-E2BIG);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200101 struct i915_vma *vma;
102 struct rb_node *rb, **p;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200103
Chris Wilsone1cc3db2017-02-09 11:19:33 +0000104 /* The aliasing_ppgtt should never be used directly! */
Chris Wilson71e51ca2019-10-21 19:32:35 +0100105 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
Chris Wilsone1cc3db2017-02-09 11:19:33 +0000106
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000107 vma = i915_vma_alloc();
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200108 if (vma == NULL)
109 return ERR_PTR(-ENOMEM);
110
Chris Wilson76f97642019-12-22 21:02:55 +0000111 kref_init(&vma->ref);
Chris Wilson28507482019-10-04 14:39:58 +0100112 mutex_init(&vma->pages_mutex);
113 vma->vm = i915_vm_get(vm);
Chris Wilson93f2cde2018-06-07 16:40:46 +0100114 vma->ops = &vm->vma_ops;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200115 vma->obj = obj;
116 vma->size = obj->base.size;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000117 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200118
Matthew Auldc3b14762021-05-04 17:41:36 +0100119 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0);
Chris Wilson155ab882019-06-06 12:23:20 +0100120
Chris Wilson09480072019-07-03 10:17:19 +0100121 /* Declare ourselves safe for use inside shrinkers */
122 if (IS_ENABLED(CONFIG_LOCKDEP)) {
123 fs_reclaim_acquire(GFP_KERNEL);
124 might_lock(&vma->active.mutex);
125 fs_reclaim_release(GFP_KERNEL);
126 }
127
Chris Wilson155ab882019-06-06 12:23:20 +0100128 INIT_LIST_HEAD(&vma->closed_link);
129
Chris Wilson7c518462017-01-23 14:52:45 +0000130 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200131 vma->ggtt_view = *view;
132 if (view->type == I915_GGTT_VIEW_PARTIAL) {
Chris Wilson07e19ea2016-12-23 14:57:59 +0000133 GEM_BUG_ON(range_overflows_t(u64,
Chris Wilson8bab11932017-01-14 00:28:25 +0000134 view->partial.offset,
135 view->partial.size,
Chris Wilson07e19ea2016-12-23 14:57:59 +0000136 obj->base.size >> PAGE_SHIFT));
Chris Wilson8bab11932017-01-14 00:28:25 +0000137 vma->size = view->partial.size;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200138 vma->size <<= PAGE_SHIFT;
Chris Wilson7e7367d2018-06-30 10:05:09 +0100139 GEM_BUG_ON(vma->size > obj->base.size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200140 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
Chris Wilson8bab11932017-01-14 00:28:25 +0000141 vma->size = intel_rotation_info_size(&view->rotated);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200142 vma->size <<= PAGE_SHIFT;
Ville Syrjälä1a74fc02019-05-09 15:21:52 +0300143 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
144 vma->size = intel_remapped_info_size(&view->remapped);
145 vma->size <<= PAGE_SHIFT;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200146 }
147 }
148
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000149 if (unlikely(vma->size > vm->total))
150 goto err_vma;
151
Chris Wilsonb00ddb22017-01-19 19:26:59 +0000152 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
153
Chris Wilsoncb593e52020-04-22 08:28:05 +0100154 spin_lock(&obj->vma.lock);
155
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200156 if (i915_is_ggtt(vm)) {
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000157 if (unlikely(overflows_type(vma->size, u32)))
Chris Wilsoncb593e52020-04-22 08:28:05 +0100158 goto err_unlock;
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000159
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000160 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
161 i915_gem_object_get_tiling(obj),
162 i915_gem_object_get_stride(obj));
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000163 if (unlikely(vma->fence_size < vma->size || /* overflow */
164 vma->fence_size > vm->total))
Chris Wilsoncb593e52020-04-22 08:28:05 +0100165 goto err_unlock;
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000166
Chris Wilsonf51455d2017-01-10 14:47:34 +0000167 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
Chris Wilson944397f2017-01-09 16:16:11 +0000168
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000169 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
170 i915_gem_object_get_tiling(obj),
171 i915_gem_object_get_stride(obj));
Chris Wilson944397f2017-01-09 16:16:11 +0000172 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
173
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100174 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
Chris Wilson528cbd12019-01-28 10:23:54 +0000175 }
176
Chris Wilson528cbd12019-01-28 10:23:54 +0000177 rb = NULL;
178 p = &obj->vma.tree.rb_node;
179 while (*p) {
Chris Wilson528cbd12019-01-28 10:23:54 +0000180 long cmp;
181
182 rb = *p;
183 pos = rb_entry(rb, struct i915_vma, obj_node);
184
185 /*
186 * If the view already exists in the tree, another thread
187 * already created a matching vma, so return the older instance
188 * and dispose of ours.
189 */
190 cmp = i915_vma_compare(pos, vm, view);
Chris Wilson528cbd12019-01-28 10:23:54 +0000191 if (cmp < 0)
192 p = &rb->rb_right;
Chris Wilson03fca662020-07-02 22:10:15 +0100193 else if (cmp > 0)
Chris Wilson528cbd12019-01-28 10:23:54 +0000194 p = &rb->rb_left;
Chris Wilson03fca662020-07-02 22:10:15 +0100195 else
196 goto err_unlock;
Chris Wilson528cbd12019-01-28 10:23:54 +0000197 }
198 rb_link_node(&vma->obj_node, rb, p);
199 rb_insert_color(&vma->obj_node, &obj->vma.tree);
200
201 if (i915_vma_is_ggtt(vma))
Chris Wilsone2189dd2017-12-07 21:14:07 +0000202 /*
203 * We put the GGTT vma at the start of the vma-list, followed
204 * by the ppGGTT vma. This allows us to break early when
205 * iterating over only the GGTT vma for an object, see
206 * for_each_ggtt_vma()
207 */
Chris Wilson528cbd12019-01-28 10:23:54 +0000208 list_add(&vma->obj_link, &obj->vma.list);
209 else
210 list_add_tail(&vma->obj_link, &obj->vma.list);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200211
Chris Wilson528cbd12019-01-28 10:23:54 +0000212 spin_unlock(&obj->vma.lock);
Chris Wilson09d7e462019-01-28 10:23:53 +0000213
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200214 return vma;
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000215
Chris Wilsoncb593e52020-04-22 08:28:05 +0100216err_unlock:
217 spin_unlock(&obj->vma.lock);
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000218err_vma:
Chris Wilson03fca662020-07-02 22:10:15 +0100219 i915_vm_put(vm);
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000220 i915_vma_free(vma);
Chris Wilson03fca662020-07-02 22:10:15 +0100221 return pos;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200222}
223
Chris Wilson481a6f72017-01-16 15:21:31 +0000224static struct i915_vma *
Liam Howlett547be6a2021-03-23 13:42:21 +0000225i915_vma_lookup(struct drm_i915_gem_object *obj,
Chris Wilson481a6f72017-01-16 15:21:31 +0000226 struct i915_address_space *vm,
227 const struct i915_ggtt_view *view)
Chris Wilson718659a2017-01-16 15:21:28 +0000228{
229 struct rb_node *rb;
230
Chris Wilson528cbd12019-01-28 10:23:54 +0000231 rb = obj->vma.tree.rb_node;
Chris Wilson718659a2017-01-16 15:21:28 +0000232 while (rb) {
233 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
234 long cmp;
235
236 cmp = i915_vma_compare(vma, vm, view);
237 if (cmp == 0)
238 return vma;
239
240 if (cmp < 0)
241 rb = rb->rb_right;
242 else
243 rb = rb->rb_left;
244 }
245
246 return NULL;
247}
248
249/**
Chris Wilson718659a2017-01-16 15:21:28 +0000250 * i915_vma_instance - return the singleton instance of the VMA
251 * @obj: parent &struct drm_i915_gem_object to be mapped
252 * @vm: address space in which the mapping is located
253 * @view: additional mapping requirements
254 *
255 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
256 * the same @view characteristics. If a match is not found, one is created.
257 * Once created, the VMA is kept until either the object is freed, or the
258 * address space is closed.
259 *
Chris Wilson718659a2017-01-16 15:21:28 +0000260 * Returns the vma, or an error pointer.
261 */
262struct i915_vma *
263i915_vma_instance(struct drm_i915_gem_object *obj,
264 struct i915_address_space *vm,
265 const struct i915_ggtt_view *view)
266{
267 struct i915_vma *vma;
268
Imre Deak74862d42021-05-24 20:27:02 +0300269 GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
Chris Wilson28507482019-10-04 14:39:58 +0100270 GEM_BUG_ON(!atomic_read(&vm->open));
Chris Wilson718659a2017-01-16 15:21:28 +0000271
Chris Wilson528cbd12019-01-28 10:23:54 +0000272 spin_lock(&obj->vma.lock);
Liam Howlett547be6a2021-03-23 13:42:21 +0000273 vma = i915_vma_lookup(obj, vm, view);
Chris Wilson528cbd12019-01-28 10:23:54 +0000274 spin_unlock(&obj->vma.lock);
275
276 /* vma_create() will resolve the race if another creates the vma */
277 if (unlikely(!vma))
Chris Wilsona01cb37a2017-01-16 15:21:30 +0000278 vma = vma_create(obj, vm, view);
Chris Wilson718659a2017-01-16 15:21:28 +0000279
Chris Wilson4ea95272017-01-16 15:21:29 +0000280 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
Chris Wilson718659a2017-01-16 15:21:28 +0000281 return vma;
282}
283
Chris Wilson28507482019-10-04 14:39:58 +0100284struct i915_vma_work {
285 struct dma_fence_work base;
Chris Wilsoncd0452a2020-07-29 17:42:17 +0100286 struct i915_address_space *vm;
287 struct i915_vm_pt_stash stash;
Chris Wilson28507482019-10-04 14:39:58 +0100288 struct i915_vma *vma;
Chris Wilson54d71952019-12-16 16:17:16 +0000289 struct drm_i915_gem_object *pinned;
Chris Wilsone3793462020-01-30 18:17:10 +0000290 struct i915_sw_dma_fence_cb cb;
Chris Wilson28507482019-10-04 14:39:58 +0100291 enum i915_cache_level cache_level;
292 unsigned int flags;
293};
294
Jason Ekstranddc194182021-07-14 14:34:18 -0500295static void __vma_bind(struct dma_fence_work *work)
Chris Wilson28507482019-10-04 14:39:58 +0100296{
297 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
298 struct i915_vma *vma = vw->vma;
Chris Wilson28507482019-10-04 14:39:58 +0100299
Chris Wilsoncd0452a2020-07-29 17:42:17 +0100300 vma->ops->bind_vma(vw->vm, &vw->stash,
301 vma, vw->cache_level, vw->flags);
Chris Wilson28507482019-10-04 14:39:58 +0100302}
303
Chris Wilson54d71952019-12-16 16:17:16 +0000304static void __vma_release(struct dma_fence_work *work)
305{
306 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
307
Chris Wilson537457a2020-11-02 16:19:31 +0000308 if (vw->pinned) {
Chris Wilson54d71952019-12-16 16:17:16 +0000309 __i915_gem_object_unpin_pages(vw->pinned);
Chris Wilson537457a2020-11-02 16:19:31 +0000310 i915_gem_object_put(vw->pinned);
311 }
Chris Wilsoncd0452a2020-07-29 17:42:17 +0100312
313 i915_vm_free_pt_stash(vw->vm, &vw->stash);
314 i915_vm_put(vw->vm);
Chris Wilson54d71952019-12-16 16:17:16 +0000315}
316
Chris Wilson28507482019-10-04 14:39:58 +0100317static const struct dma_fence_work_ops bind_ops = {
318 .name = "bind",
319 .work = __vma_bind,
Chris Wilson54d71952019-12-16 16:17:16 +0000320 .release = __vma_release,
Chris Wilson28507482019-10-04 14:39:58 +0100321};
322
323struct i915_vma_work *i915_vma_work(void)
324{
325 struct i915_vma_work *vw;
326
327 vw = kzalloc(sizeof(*vw), GFP_KERNEL);
328 if (!vw)
329 return NULL;
330
331 dma_fence_work_init(&vw->base, &bind_ops);
332 vw->base.dma.error = -EAGAIN; /* disable the worker by default */
333
334 return vw;
335}
336
Chris Wilsone3793462020-01-30 18:17:10 +0000337int i915_vma_wait_for_bind(struct i915_vma *vma)
338{
339 int err = 0;
340
341 if (rcu_access_pointer(vma->active.excl.fence)) {
342 struct dma_fence *fence;
343
344 rcu_read_lock();
345 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
346 rcu_read_unlock();
347 if (fence) {
Matthew Auldfbd4cf32021-11-02 15:50:55 +0000348 err = dma_fence_wait(fence, true);
Chris Wilsone3793462020-01-30 18:17:10 +0000349 dma_fence_put(fence);
350 }
351 }
352
353 return err;
354}
355
Maarten Lankhorstf6c466b2021-11-22 22:45:49 +0100356#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
357static int i915_vma_verify_bind_complete(struct i915_vma *vma)
358{
359 int err = 0;
360
361 if (i915_active_has_exclusive(&vma->active)) {
362 struct dma_fence *fence =
363 i915_active_fence_get(&vma->active.excl);
364
365 if (!fence)
366 return 0;
367
368 if (dma_fence_is_signaled(fence))
369 err = fence->error;
370 else
371 err = -EBUSY;
372
373 dma_fence_put(fence);
374 }
375
376 return err;
377}
378#else
379#define i915_vma_verify_bind_complete(_vma) 0
380#endif
381
Chris Wilson718659a2017-01-16 15:21:28 +0000382/**
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200383 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
384 * @vma: VMA to map
385 * @cache_level: mapping cache level
386 * @flags: flags like global or local mapping
Chris Wilson28507482019-10-04 14:39:58 +0100387 * @work: preallocated worker for allocating and binding the PTE
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200388 *
389 * DMA addresses are taken from the scatter-gather table of this object (or of
390 * this VMA in case of non-default GGTT views) and PTE entries set up.
391 * Note that DMA addresses are also the only part of the SG table we care about.
392 */
Chris Wilson28507482019-10-04 14:39:58 +0100393int i915_vma_bind(struct i915_vma *vma,
394 enum i915_cache_level cache_level,
395 u32 flags,
396 struct i915_vma_work *work)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200397{
398 u32 bind_flags;
399 u32 vma_flags;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200400
Chris Wilsonaa149432017-02-25 18:11:21 +0000401 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
402 GEM_BUG_ON(vma->size > vma->node.size);
403
Tvrtko Ursulinbbb8a9d2018-10-12 07:31:42 +0100404 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
405 vma->node.size,
406 vma->vm->total)))
Chris Wilsonaa149432017-02-25 18:11:21 +0000407 return -ENODEV;
408
Tvrtko Ursulinbbb8a9d2018-10-12 07:31:42 +0100409 if (GEM_DEBUG_WARN_ON(!flags))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200410 return -EINVAL;
411
Chris Wilson28507482019-10-04 14:39:58 +0100412 bind_flags = flags;
413 bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200414
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100415 vma_flags = atomic_read(&vma->flags);
416 vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
Chris Wilsonaedbe0a2020-05-21 15:49:49 +0100417
418 bind_flags &= ~vma_flags;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200419 if (bind_flags == 0)
420 return 0;
421
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100422 GEM_BUG_ON(!vma->pages);
423
Daniele Ceraolo Spurio6146e6d2017-01-20 13:51:23 -0800424 trace_i915_vma_bind(vma, bind_flags);
Chris Wilsonaedbe0a2020-05-21 15:49:49 +0100425 if (work && bind_flags & vma->vm->bind_async_flags) {
Chris Wilsone3793462020-01-30 18:17:10 +0000426 struct dma_fence *prev;
427
Chris Wilson28507482019-10-04 14:39:58 +0100428 work->vma = vma;
429 work->cache_level = cache_level;
Chris Wilson12b07252020-07-03 11:25:19 +0100430 work->flags = bind_flags;
Chris Wilson28507482019-10-04 14:39:58 +0100431
432 /*
433 * Note we only want to chain up to the migration fence on
434 * the pages (not the object itself). As we don't track that,
435 * yet, we have to use the exclusive fence instead.
436 *
437 * Also note that we do not want to track the async vma as
438 * part of the obj->resv->excl_fence as it only affects
439 * execution and not content or object's backing store lifetime.
440 */
Chris Wilsone3793462020-01-30 18:17:10 +0000441 prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
Chris Wilson30ca04e2020-02-03 09:41:47 +0000442 if (prev) {
Chris Wilsone3793462020-01-30 18:17:10 +0000443 __i915_sw_fence_await_dma_fence(&work->base.chain,
444 prev,
445 &work->cb);
Chris Wilson30ca04e2020-02-03 09:41:47 +0000446 dma_fence_put(prev);
447 }
Chris Wilsone3793462020-01-30 18:17:10 +0000448
Chris Wilson28507482019-10-04 14:39:58 +0100449 work->base.dma.error = 0; /* enable the queue_work() */
450
Maarten Lankhorste6e1a302021-11-17 14:20:22 +0000451 __i915_gem_object_pin_pages(vma->obj);
452 work->pinned = i915_gem_object_get(vma->obj);
Chris Wilson28507482019-10-04 14:39:58 +0100453 } else {
Maarten Lankhorstf6c466b2021-11-22 22:45:49 +0100454 if (vma->obj) {
455 int ret;
456
457 ret = i915_gem_object_wait_moving_fence(vma->obj, true);
458 if (ret)
459 return ret;
460 }
Chris Wilsoncd0452a2020-07-29 17:42:17 +0100461 vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
Chris Wilson28507482019-10-04 14:39:58 +0100462 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200463
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100464 atomic_or(bind_flags, &vma->flags);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200465 return 0;
466}
467
468void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
469{
470 void __iomem *ptr;
Chris Wilsonb4563f52017-10-09 09:43:55 +0100471 int err;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200472
Anusha Srivatsa4bc91db2021-04-27 09:54:16 +0100473 if (!i915_gem_object_is_lmem(vma->obj)) {
474 if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
475 err = -ENODEV;
476 goto err;
477 }
Chris Wilsonb4563f52017-10-09 09:43:55 +0100478 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200479
480 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100481 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
Maarten Lankhorstf6c466b2021-11-22 22:45:49 +0100482 GEM_BUG_ON(i915_vma_verify_bind_complete(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200483
Chris Wilson28507482019-10-04 14:39:58 +0100484 ptr = READ_ONCE(vma->iomap);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200485 if (ptr == NULL) {
Anusha Srivatsa4bc91db2021-04-27 09:54:16 +0100486 /*
487 * TODO: consider just using i915_gem_object_pin_map() for lmem
488 * instead, which already supports mapping non-contiguous chunks
489 * of pages, that way we can also drop the
490 * I915_BO_ALLOC_CONTIGUOUS when allocating the object.
491 */
492 if (i915_gem_object_is_lmem(vma->obj))
493 ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
494 vma->obj->base.size);
495 else
496 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
497 vma->node.start,
498 vma->node.size);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100499 if (ptr == NULL) {
500 err = -ENOMEM;
501 goto err;
502 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200503
Chris Wilson28507482019-10-04 14:39:58 +0100504 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
505 io_mapping_unmap(ptr);
506 ptr = vma->iomap;
507 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200508 }
509
510 __i915_vma_pin(vma);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100511
Chris Wilson3bd40732017-10-09 09:43:56 +0100512 err = i915_vma_pin_fence(vma);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100513 if (err)
514 goto err_unpin;
515
Chris Wilson7125397b2017-12-06 12:49:14 +0000516 i915_vma_set_ggtt_write(vma);
Chris Wilsona5972e92020-01-08 15:35:50 +0000517
518 /* NB Access through the GTT requires the device to be awake. */
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200519 return ptr;
Chris Wilsonb4563f52017-10-09 09:43:55 +0100520
521err_unpin:
522 __i915_vma_unpin(vma);
523err:
524 return IO_ERR_PTR(err);
525}
526
Chris Wilson7125397b2017-12-06 12:49:14 +0000527void i915_vma_flush_writes(struct i915_vma *vma)
528{
Chris Wilson28507482019-10-04 14:39:58 +0100529 if (i915_vma_unset_ggtt_write(vma))
530 intel_gt_flush_ggtt_writes(vma->vm->gt);
Chris Wilson7125397b2017-12-06 12:49:14 +0000531}
532
Chris Wilsonb4563f52017-10-09 09:43:55 +0100533void i915_vma_unpin_iomap(struct i915_vma *vma)
534{
Chris Wilsonb4563f52017-10-09 09:43:55 +0100535 GEM_BUG_ON(vma->iomap == NULL);
536
Chris Wilson7125397b2017-12-06 12:49:14 +0000537 i915_vma_flush_writes(vma);
538
Chris Wilsonb4563f52017-10-09 09:43:55 +0100539 i915_vma_unpin_fence(vma);
540 i915_vma_unpin(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200541}
542
Chris Wilson6a2f59e2018-07-21 13:50:37 +0100543void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200544{
545 struct i915_vma *vma;
546 struct drm_i915_gem_object *obj;
547
548 vma = fetch_and_zero(p_vma);
549 if (!vma)
550 return;
551
552 obj = vma->obj;
Chris Wilson520ea7c2018-06-07 16:40:45 +0100553 GEM_BUG_ON(!obj);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200554
555 i915_vma_unpin(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200556
Chris Wilson6a2f59e2018-07-21 13:50:37 +0100557 if (flags & I915_VMA_RELEASE_MAP)
558 i915_gem_object_unpin_map(obj);
559
Chris Wilsonc017cf62019-05-28 10:29:56 +0100560 i915_gem_object_put(obj);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200561}
562
Chris Wilson782a3e92017-02-13 17:15:46 +0000563bool i915_vma_misplaced(const struct i915_vma *vma,
564 u64 size, u64 alignment, u64 flags)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200565{
566 if (!drm_mm_node_allocated(&vma->node))
567 return false;
568
Chris Wilson28507482019-10-04 14:39:58 +0100569 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
570 return true;
571
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200572 if (vma->node.size < size)
573 return true;
574
Chris Wilsonf51455d2017-01-10 14:47:34 +0000575 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
576 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200577 return true;
578
579 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
580 return true;
581
582 if (flags & PIN_OFFSET_BIAS &&
583 vma->node.start < (flags & PIN_OFFSET_MASK))
584 return true;
585
586 if (flags & PIN_OFFSET_FIXED &&
587 vma->node.start != (flags & PIN_OFFSET_MASK))
588 return true;
589
590 return false;
591}
592
593void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
594{
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200595 bool mappable, fenceable;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200596
Chris Wilson944397f2017-01-09 16:16:11 +0000597 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
598 GEM_BUG_ON(!vma->fence_size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200599
Chris Wilson944397f2017-01-09 16:16:11 +0000600 fenceable = (vma->node.size >= vma->fence_size &&
Chris Wilsonf51455d2017-01-10 14:47:34 +0000601 IS_ALIGNED(vma->node.start, vma->fence_alignment));
Chris Wilson944397f2017-01-09 16:16:11 +0000602
603 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
604
605 if (mappable && fenceable)
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100606 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200607 else
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100608 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200609}
610
Matthew Auld33dd8892019-09-09 13:40:52 +0100611bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000612{
613 struct drm_mm_node *node = &vma->node;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200614 struct drm_mm_node *other;
615
616 /*
617 * On some machines we have to be careful when putting differing types
618 * of snoopable memory together to avoid the prefetcher crossing memory
619 * domains and dying. During vm initialisation, we decide whether or not
620 * these constraints apply and set the drm_mm.color_adjust
621 * appropriately.
622 */
Matthew Auld33dd8892019-09-09 13:40:52 +0100623 if (!i915_vm_has_cache_coloring(vma->vm))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200624 return true;
625
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000626 /* Only valid to be called on an already inserted vma */
627 GEM_BUG_ON(!drm_mm_node_allocated(node));
628 GEM_BUG_ON(list_empty(&node->node_list));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200629
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000630 other = list_prev_entry(node, node_list);
Matthew Auld33dd8892019-09-09 13:40:52 +0100631 if (i915_node_color_differs(other, color) &&
Matthew Auld1e0a96e2019-09-09 13:40:50 +0100632 !drm_mm_hole_follows(other))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200633 return false;
634
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000635 other = list_next_entry(node, node_list);
Matthew Auld33dd8892019-09-09 13:40:52 +0100636 if (i915_node_color_differs(other, color) &&
Matthew Auld1e0a96e2019-09-09 13:40:50 +0100637 !drm_mm_hole_follows(node))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200638 return false;
639
640 return true;
641}
642
643/**
644 * i915_vma_insert - finds a slot for the vma in its address space
645 * @vma: the vma
646 * @size: requested size in bytes (can be larger than the VMA)
647 * @alignment: required alignment
648 * @flags: mask of PIN_* flags to use
649 *
650 * First we try to allocate some free space that meets the requirements for
651 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
652 * preferrably the oldest idle entry to make room for the new VMA.
653 *
654 * Returns:
655 * 0 on success, negative error code otherwise.
656 */
657static int
658i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
659{
Matthew Auld33dd8892019-09-09 13:40:52 +0100660 unsigned long color;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200661 u64 start, end;
662 int ret;
663
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100664 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200665 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
666
667 size = max(size, vma->size);
Chris Wilson944397f2017-01-09 16:16:11 +0000668 alignment = max(alignment, vma->display_alignment);
669 if (flags & PIN_MAPPABLE) {
670 size = max_t(typeof(size), size, vma->fence_size);
671 alignment = max_t(typeof(alignment),
672 alignment, vma->fence_alignment);
673 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200674
Chris Wilsonf51455d2017-01-10 14:47:34 +0000675 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
676 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
677 GEM_BUG_ON(!is_power_of_2(alignment));
678
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200679 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000680 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200681
682 end = vma->vm->total;
683 if (flags & PIN_MAPPABLE)
Chris Wilson28507482019-10-04 14:39:58 +0100684 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200685 if (flags & PIN_ZONE_4G)
Chris Wilsonf51455d2017-01-10 14:47:34 +0000686 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
687 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200688
689 /* If binding the object/GGTT view requires more space than the entire
690 * aperture has, reject it early before evicting everything in a vain
691 * attempt to find space.
692 */
693 if (size > end) {
Chris Wilson520ea7c2018-06-07 16:40:45 +0100694 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
695 size, flags & PIN_MAPPABLE ? "mappable" : "total",
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200696 end);
Chris Wilson2889caa2017-06-16 15:05:19 +0100697 return -ENOSPC;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200698 }
699
Matthew Auld33dd8892019-09-09 13:40:52 +0100700 color = 0;
Maarten Lankhorste6e1a302021-11-17 14:20:22 +0000701 if (i915_vm_has_cache_coloring(vma->vm))
Chris Wilson28507482019-10-04 14:39:58 +0100702 color = vma->obj->cache_level;
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100703
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200704 if (flags & PIN_OFFSET_FIXED) {
705 u64 offset = flags & PIN_OFFSET_MASK;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000706 if (!IS_ALIGNED(offset, alignment) ||
Chris Wilson28507482019-10-04 14:39:58 +0100707 range_overflows(offset, size, end))
708 return -EINVAL;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200709
Chris Wilson625d9882017-01-11 11:23:11 +0000710 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
Matthew Auld33dd8892019-09-09 13:40:52 +0100711 size, offset, color,
Chris Wilson625d9882017-01-11 11:23:11 +0000712 flags);
713 if (ret)
Chris Wilson28507482019-10-04 14:39:58 +0100714 return ret;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200715 } else {
Matthew Auld74642842017-10-06 23:18:20 +0100716 /*
717 * We only support huge gtt pages through the 48b PPGTT,
718 * however we also don't want to force any alignment for
719 * objects which need to be tightly packed into the low 32bits.
720 *
721 * Note that we assume that GGTT are limited to 4GiB for the
722 * forseeable future. See also i915_ggtt_offset().
723 */
724 if (upper_32_bits(end - 1) &&
725 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
Matthew Auld855822b2017-10-06 23:18:21 +0100726 /*
727 * We can't mix 64K and 4K PTEs in the same page-table
728 * (2M block), and so to avoid the ugliness and
729 * complexity of coloring we opt for just aligning 64K
730 * objects to 2M.
731 */
Matthew Auld74642842017-10-06 23:18:20 +0100732 u64 page_alignment =
Matthew Auld855822b2017-10-06 23:18:21 +0100733 rounddown_pow_of_two(vma->page_sizes.sg |
734 I915_GTT_PAGE_SIZE_2M);
Matthew Auld74642842017-10-06 23:18:20 +0100735
Chris Wilsonbef27bdb2017-10-09 10:20:19 +0100736 /*
737 * Check we don't expand for the limited Global GTT
738 * (mappable aperture is even more precious!). This
739 * also checks that we exclude the aliasing-ppgtt.
740 */
741 GEM_BUG_ON(i915_vma_is_ggtt(vma));
742
Matthew Auld74642842017-10-06 23:18:20 +0100743 alignment = max(alignment, page_alignment);
Matthew Auld855822b2017-10-06 23:18:21 +0100744
745 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
746 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
Matthew Auld74642842017-10-06 23:18:20 +0100747 }
748
Chris Wilsone007b192017-01-11 11:23:10 +0000749 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
Matthew Auld33dd8892019-09-09 13:40:52 +0100750 size, alignment, color,
Chris Wilsone007b192017-01-11 11:23:10 +0000751 start, end, flags);
752 if (ret)
Chris Wilson28507482019-10-04 14:39:58 +0100753 return ret;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200754
755 GEM_BUG_ON(vma->node.start < start);
756 GEM_BUG_ON(vma->node.start + vma->node.size > end);
757 }
Chris Wilson44a0ec02017-01-19 19:26:58 +0000758 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Matthew Auld33dd8892019-09-09 13:40:52 +0100759 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200760
Chris Wilsondde01d92019-10-30 19:21:49 +0000761 list_add_tail(&vma->vm_link, &vma->vm->bound_list);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200762
763 return 0;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200764}
765
Chris Wilson31c7eff2017-02-27 12:26:54 +0000766static void
Chris Wilsondde01d92019-10-30 19:21:49 +0000767i915_vma_detach(struct i915_vma *vma)
Chris Wilson31c7eff2017-02-27 12:26:54 +0000768{
Chris Wilson31c7eff2017-02-27 12:26:54 +0000769 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100770 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
Chris Wilson31c7eff2017-02-27 12:26:54 +0000771
Chris Wilson520ea7c2018-06-07 16:40:45 +0100772 /*
Chris Wilsondde01d92019-10-30 19:21:49 +0000773 * And finally now the object is completely decoupled from this
774 * vma, we can drop its hold on the backing storage and allow
775 * it to be reaped by the shrinker.
Chris Wilson31c7eff2017-02-27 12:26:54 +0000776 */
Chris Wilsondde01d92019-10-30 19:21:49 +0000777 list_del(&vma->vm_link);
Chris Wilson31c7eff2017-02-27 12:26:54 +0000778}
779
Chris Wilson28507482019-10-04 14:39:58 +0100780static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200781{
Chris Wilson28507482019-10-04 14:39:58 +0100782 unsigned int bound;
783 bool pinned = true;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200784
Chris Wilson28507482019-10-04 14:39:58 +0100785 bound = atomic_read(&vma->flags);
786 do {
787 if (unlikely(flags & ~bound))
788 return false;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200789
Chris Wilson28507482019-10-04 14:39:58 +0100790 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
791 return false;
792
793 if (!(bound & I915_VMA_PIN_MASK))
794 goto unpinned;
795
796 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
797 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
798
799 return true;
800
801unpinned:
802 /*
803 * If pin_count==0, but we are bound, check under the lock to avoid
804 * racing with a concurrent i915_vma_unbind().
805 */
806 mutex_lock(&vma->vm->mutex);
807 do {
808 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
809 pinned = false;
810 break;
811 }
812
813 if (unlikely(flags & ~bound)) {
814 pinned = false;
815 break;
816 }
817 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
818 mutex_unlock(&vma->vm->mutex);
819
820 return pinned;
821}
822
823static int vma_get_pages(struct i915_vma *vma)
824{
825 int err = 0;
Maarten Lankhorste6e1a302021-11-17 14:20:22 +0000826 bool pinned_pages = true;
Chris Wilson28507482019-10-04 14:39:58 +0100827
828 if (atomic_add_unless(&vma->pages_count, 1, 0))
829 return 0;
830
Maarten Lankhorste6e1a302021-11-17 14:20:22 +0000831 err = i915_gem_object_pin_pages(vma->obj);
832 if (err)
833 return err;
Thomas Hellström0f4308d2021-06-01 09:46:40 +0200834
Chris Wilson28507482019-10-04 14:39:58 +0100835 /* Allocations ahoy! */
Thomas Hellström0f4308d2021-06-01 09:46:40 +0200836 if (mutex_lock_interruptible(&vma->pages_mutex)) {
837 err = -EINTR;
838 goto unpin;
839 }
Chris Wilson28507482019-10-04 14:39:58 +0100840
841 if (!atomic_read(&vma->pages_count)) {
Chris Wilson28507482019-10-04 14:39:58 +0100842 err = vma->ops->set_pages(vma);
Thomas Hellström0f4308d2021-06-01 09:46:40 +0200843 if (err)
Chris Wilson28507482019-10-04 14:39:58 +0100844 goto unlock;
Thomas Hellström0f4308d2021-06-01 09:46:40 +0200845 pinned_pages = false;
Chris Wilson28507482019-10-04 14:39:58 +0100846 }
847 atomic_inc(&vma->pages_count);
848
849unlock:
850 mutex_unlock(&vma->pages_mutex);
Thomas Hellström0f4308d2021-06-01 09:46:40 +0200851unpin:
852 if (pinned_pages)
853 __i915_gem_object_unpin_pages(vma->obj);
Chris Wilson28507482019-10-04 14:39:58 +0100854
855 return err;
856}
857
858static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
859{
860 /* We allocate under vma_get_pages, so beware the shrinker */
861 mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
862 GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
863 if (atomic_sub_return(count, &vma->pages_count) == 0) {
864 vma->ops->clear_pages(vma);
865 GEM_BUG_ON(vma->pages);
Maarten Lankhorste6e1a302021-11-17 14:20:22 +0000866
867 i915_gem_object_unpin_pages(vma->obj);
Chris Wilson28507482019-10-04 14:39:58 +0100868 }
869 mutex_unlock(&vma->pages_mutex);
870}
871
872static void vma_put_pages(struct i915_vma *vma)
873{
874 if (atomic_add_unless(&vma->pages_count, -1, 1))
875 return;
876
877 __vma_put_pages(vma, 1);
878}
879
880static void vma_unbind_pages(struct i915_vma *vma)
881{
882 unsigned int count;
883
884 lockdep_assert_held(&vma->vm->mutex);
885
886 /* The upper portion of pages_count is the number of bindings */
887 count = atomic_read(&vma->pages_count);
888 count >>= I915_VMA_PAGES_BIAS;
889 GEM_BUG_ON(!count);
890
891 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
892}
893
Maarten Lankhorst47b08692020-08-19 16:08:54 +0200894int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
895 u64 size, u64 alignment, u64 flags)
Chris Wilson28507482019-10-04 14:39:58 +0100896{
897 struct i915_vma_work *work = NULL;
Maarten Lankhorstf6c466b2021-11-22 22:45:49 +0100898 struct dma_fence *moving = NULL;
Chris Wilsonc0e60342020-01-10 14:44:18 +0000899 intel_wakeref_t wakeref = 0;
Chris Wilson28507482019-10-04 14:39:58 +0100900 unsigned int bound;
901 int err;
902
Maarten Lankhorst47b08692020-08-19 16:08:54 +0200903#ifdef CONFIG_PROVE_LOCKING
Maarten Lankhorste6e1a302021-11-17 14:20:22 +0000904 if (debug_locks && !WARN_ON(!ww))
Maarten Lankhorst1eef0de12021-03-23 16:49:54 +0100905 assert_vma_held(vma);
Maarten Lankhorst47b08692020-08-19 16:08:54 +0200906#endif
907
Chris Wilson28507482019-10-04 14:39:58 +0100908 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
909 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
910
Chris Wilson28507482019-10-04 14:39:58 +0100911 GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
912
913 /* First try and grab the pin without rebinding the vma */
914 if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
915 return 0;
916
917 err = vma_get_pages(vma);
918 if (err)
919 return err;
920
Chris Wilson89351922020-07-29 17:42:18 +0100921 if (flags & PIN_GLOBAL)
922 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
923
Maarten Lankhorstf6c466b2021-11-22 22:45:49 +0100924 moving = vma->obj ? i915_gem_object_get_moving_fence(vma->obj) : NULL;
925 if (flags & vma->vm->bind_async_flags || moving) {
Maarten Lankhorst26ad4f82021-03-23 16:50:29 +0100926 /* lock VM */
927 err = i915_vm_lock_objects(vma->vm, ww);
928 if (err)
929 goto err_rpm;
930
Chris Wilson28507482019-10-04 14:39:58 +0100931 work = i915_vma_work();
932 if (!work) {
933 err = -ENOMEM;
Chris Wilson89351922020-07-29 17:42:18 +0100934 goto err_rpm;
Chris Wilson28507482019-10-04 14:39:58 +0100935 }
Chris Wilsoncd0452a2020-07-29 17:42:17 +0100936
937 work->vm = i915_vm_get(vma->vm);
938
Maarten Lankhorstf6c466b2021-11-22 22:45:49 +0100939 dma_fence_work_chain(&work->base, moving);
940
Chris Wilsoncd0452a2020-07-29 17:42:17 +0100941 /* Allocate enough page directories to used PTE */
Chris Wilson89351922020-07-29 17:42:18 +0100942 if (vma->vm->allocate_va_range) {
Matthew Auldcef8ce52020-09-21 17:08:44 +0100943 err = i915_vm_alloc_pt_stash(vma->vm,
944 &work->stash,
945 vma->size);
946 if (err)
947 goto err_fence;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200948
Matthew Auld529b9ec2021-04-27 09:54:13 +0100949 err = i915_vm_map_pt_stash(vma->vm, &work->stash);
Chris Wilson89351922020-07-29 17:42:18 +0100950 if (err)
951 goto err_fence;
952 }
953 }
Chris Wilsonc0e60342020-01-10 14:44:18 +0000954
Chris Wilsond0024912020-03-26 14:27:27 +0000955 /*
956 * Differentiate between user/kernel vma inside the aliasing-ppgtt.
957 *
958 * We conflate the Global GTT with the user's vma when using the
959 * aliasing-ppgtt, but it is still vitally important to try and
960 * keep the use cases distinct. For example, userptr objects are
961 * not allowed inside the Global GTT as that will cause lock
962 * inversions when we have to evict them the mmu_notifier callbacks -
963 * but they are allowed to be part of the user ppGTT which can never
964 * be mapped. As such we try to give the distinct users of the same
965 * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
966 * and i915_ppgtt separate].
967 *
968 * NB this may cause us to mask real lock inversions -- while the
969 * code is safe today, lockdep may not be able to spot future
970 * transgressions.
971 */
972 err = mutex_lock_interruptible_nested(&vma->vm->mutex,
973 !(flags & PIN_GLOBAL));
Chris Wilson28507482019-10-04 14:39:58 +0100974 if (err)
975 goto err_fence;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200976
Chris Wilsond0024912020-03-26 14:27:27 +0000977 /* No more allocations allowed now we hold vm->mutex */
978
Chris Wilson00de7022020-02-21 12:19:40 +0000979 if (unlikely(i915_vma_is_closed(vma))) {
980 err = -ENOENT;
981 goto err_unlock;
982 }
983
Chris Wilson28507482019-10-04 14:39:58 +0100984 bound = atomic_read(&vma->flags);
985 if (unlikely(bound & I915_VMA_ERROR)) {
986 err = -ENOMEM;
987 goto err_unlock;
988 }
989
990 if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
991 err = -EAGAIN; /* pins are meant to be fairly temporary */
992 goto err_unlock;
993 }
994
995 if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
996 __i915_vma_pin(vma);
997 goto err_unlock;
998 }
999
1000 err = i915_active_acquire(&vma->active);
1001 if (err)
1002 goto err_unlock;
1003
1004 if (!(bound & I915_VMA_BIND_MASK)) {
1005 err = i915_vma_insert(vma, size, alignment, flags);
1006 if (err)
1007 goto err_active;
1008
1009 if (i915_is_ggtt(vma->vm))
1010 __i915_vma_set_map_and_fenceable(vma);
1011 }
1012
1013 GEM_BUG_ON(!vma->pages);
1014 err = i915_vma_bind(vma,
Maarten Lankhorste6e1a302021-11-17 14:20:22 +00001015 vma->obj->cache_level,
Chris Wilson28507482019-10-04 14:39:58 +01001016 flags, work);
1017 if (err)
Chris Wilson31c7eff2017-02-27 12:26:54 +00001018 goto err_remove;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001019
Chris Wilson28507482019-10-04 14:39:58 +01001020 /* There should only be at most 2 active bindings (user, global) */
1021 GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
1022 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
1023 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
Chris Wilsond36caee2017-11-05 12:45:50 +00001024
Chris Wilson28507482019-10-04 14:39:58 +01001025 __i915_vma_pin(vma);
1026 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1027 GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001028 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001029
Chris Wilson31c7eff2017-02-27 12:26:54 +00001030err_remove:
Chris Wilsondde01d92019-10-30 19:21:49 +00001031 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
1032 i915_vma_detach(vma);
1033 drm_mm_remove_node(&vma->node);
1034 }
Chris Wilson28507482019-10-04 14:39:58 +01001035err_active:
1036 i915_active_release(&vma->active);
1037err_unlock:
1038 mutex_unlock(&vma->vm->mutex);
1039err_fence:
1040 if (work)
Chris Wilson92581f92020-03-25 12:02:27 +00001041 dma_fence_work_commit_imm(&work->base);
Chris Wilson89351922020-07-29 17:42:18 +01001042err_rpm:
Chris Wilsonc0e60342020-01-10 14:44:18 +00001043 if (wakeref)
1044 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
Maarten Lankhorstf6c466b2021-11-22 22:45:49 +01001045 if (moving)
1046 dma_fence_put(moving);
Chris Wilson28507482019-10-04 14:39:58 +01001047 vma_put_pages(vma);
Maarten Lankhorstf6c466b2021-11-22 22:45:49 +01001048
Chris Wilson28507482019-10-04 14:39:58 +01001049 return err;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001050}
1051
Chris Wilsonccd20942019-12-05 11:37:25 +00001052static void flush_idle_contexts(struct intel_gt *gt)
1053{
1054 struct intel_engine_cs *engine;
1055 enum intel_engine_id id;
1056
1057 for_each_engine(engine, gt, id)
1058 intel_engine_flush_barriers(engine);
1059
1060 intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1061}
1062
Maarten Lankhorst47b08692020-08-19 16:08:54 +02001063int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1064 u32 align, unsigned int flags)
Chris Wilsonccd20942019-12-05 11:37:25 +00001065{
1066 struct i915_address_space *vm = vma->vm;
1067 int err;
1068
1069 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1070
Maarten Lankhorst1eef0de12021-03-23 16:49:54 +01001071#ifdef CONFIG_LOCKDEP
Maarten Lankhorst95c3d272021-11-17 14:20:23 +00001072 WARN_ON(!ww && dma_resv_held(vma->obj->base.resv));
Maarten Lankhorst1eef0de12021-03-23 16:49:54 +01001073#endif
1074
Chris Wilsonccd20942019-12-05 11:37:25 +00001075 do {
Maarten Lankhorst1eef0de12021-03-23 16:49:54 +01001076 if (ww)
1077 err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
1078 else
1079 err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
Chris Wilsone3793462020-01-30 18:17:10 +00001080 if (err != -ENOSPC) {
1081 if (!err) {
1082 err = i915_vma_wait_for_bind(vma);
1083 if (err)
1084 i915_vma_unpin(vma);
1085 }
Chris Wilsonccd20942019-12-05 11:37:25 +00001086 return err;
Chris Wilsone3793462020-01-30 18:17:10 +00001087 }
Chris Wilsonccd20942019-12-05 11:37:25 +00001088
1089 /* Unlike i915_vma_pin, we don't take no for an answer! */
1090 flush_idle_contexts(vm->gt);
1091 if (mutex_lock_interruptible(&vm->mutex) == 0) {
1092 i915_gem_evict_vm(vm);
1093 mutex_unlock(&vm->mutex);
1094 }
1095 } while (1);
1096}
1097
Chris Wilson50689772020-04-22 20:05:58 +01001098static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
Chris Wilson3365e222018-05-03 20:51:14 +01001099{
Chris Wilson3365e222018-05-03 20:51:14 +01001100 /*
1101 * We defer actually closing, unbinding and destroying the VMA until
1102 * the next idle point, or if the object is freed in the meantime. By
1103 * postponing the unbind, we allow for it to be resurrected by the
1104 * client, avoiding the work required to rebind the VMA. This is
1105 * advantageous for DRI, where the client/server pass objects
1106 * between themselves, temporarily opening a local VMA to the
1107 * object, and then closing it again. The same object is then reused
1108 * on the next frame (or two, depending on the depth of the swap queue)
1109 * causing us to rebind the VMA once more. This ends up being a lot
1110 * of wasted work for the steady state.
1111 */
Chris Wilson50689772020-04-22 20:05:58 +01001112 GEM_BUG_ON(i915_vma_is_closed(vma));
Chris Wilson71e51ca2019-10-21 19:32:35 +01001113 list_add(&vma->closed_link, &gt->closed_vma);
Chris Wilson50689772020-04-22 20:05:58 +01001114}
1115
1116void i915_vma_close(struct i915_vma *vma)
1117{
1118 struct intel_gt *gt = vma->vm->gt;
1119 unsigned long flags;
1120
1121 if (i915_vma_is_ggtt(vma))
1122 return;
1123
1124 GEM_BUG_ON(!atomic_read(&vma->open_count));
1125 if (atomic_dec_and_lock_irqsave(&vma->open_count,
1126 &gt->closed_lock,
1127 flags)) {
1128 __vma_close(vma, gt);
1129 spin_unlock_irqrestore(&gt->closed_lock, flags);
1130 }
Chris Wilson155ab882019-06-06 12:23:20 +01001131}
1132
1133static void __i915_vma_remove_closed(struct i915_vma *vma)
1134{
Chris Wilson71e51ca2019-10-21 19:32:35 +01001135 struct intel_gt *gt = vma->vm->gt;
Chris Wilson155ab882019-06-06 12:23:20 +01001136
Chris Wilson71e51ca2019-10-21 19:32:35 +01001137 spin_lock_irq(&gt->closed_lock);
Chris Wilson155ab882019-06-06 12:23:20 +01001138 list_del_init(&vma->closed_link);
Chris Wilson71e51ca2019-10-21 19:32:35 +01001139 spin_unlock_irq(&gt->closed_lock);
Chris Wilson3365e222018-05-03 20:51:14 +01001140}
1141
1142void i915_vma_reopen(struct i915_vma *vma)
1143{
Chris Wilson28507482019-10-04 14:39:58 +01001144 if (i915_vma_is_closed(vma))
1145 __i915_vma_remove_closed(vma);
Chris Wilson3365e222018-05-03 20:51:14 +01001146}
1147
Chris Wilson76f97642019-12-22 21:02:55 +00001148void i915_vma_release(struct kref *ref)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001149{
Chris Wilson76f97642019-12-22 21:02:55 +00001150 struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
Maarten Lankhorste6e1a302021-11-17 14:20:22 +00001151 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilson76f97642019-12-22 21:02:55 +00001152
Chris Wilson28507482019-10-04 14:39:58 +01001153 if (drm_mm_node_allocated(&vma->node)) {
1154 mutex_lock(&vma->vm->mutex);
1155 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1156 WARN_ON(__i915_vma_unbind(vma));
1157 mutex_unlock(&vma->vm->mutex);
1158 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1159 }
1160 GEM_BUG_ON(i915_vma_is_active(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001161
Maarten Lankhorste6e1a302021-11-17 14:20:22 +00001162 spin_lock(&obj->vma.lock);
1163 list_del(&vma->obj_link);
1164 if (!RB_EMPTY_NODE(&vma->obj_node))
1165 rb_erase(&vma->obj_node, &obj->vma.tree);
1166 spin_unlock(&obj->vma.lock);
Chris Wilson010e3e62017-12-06 12:49:13 +00001167
Chris Wilson155ab882019-06-06 12:23:20 +01001168 __i915_vma_remove_closed(vma);
Chris Wilson28507482019-10-04 14:39:58 +01001169 i915_vm_put(vma->vm);
Chris Wilson3365e222018-05-03 20:51:14 +01001170
Chris Wilson28507482019-10-04 14:39:58 +01001171 i915_active_fini(&vma->active);
1172 i915_vma_free(vma);
Chris Wilson3365e222018-05-03 20:51:14 +01001173}
1174
Chris Wilson71e51ca2019-10-21 19:32:35 +01001175void i915_vma_parked(struct intel_gt *gt)
Chris Wilson3365e222018-05-03 20:51:14 +01001176{
1177 struct i915_vma *vma, *next;
Chris Wilson3447c4c2020-03-23 09:28:35 +00001178 LIST_HEAD(closed);
Chris Wilson3365e222018-05-03 20:51:14 +01001179
Chris Wilson71e51ca2019-10-21 19:32:35 +01001180 spin_lock_irq(&gt->closed_lock);
1181 list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
Chris Wilson28507482019-10-04 14:39:58 +01001182 struct drm_i915_gem_object *obj = vma->obj;
1183 struct i915_address_space *vm = vma->vm;
1184
1185 /* XXX All to avoid keeping a reference on i915_vma itself */
1186
1187 if (!kref_get_unless_zero(&obj->base.refcount))
1188 continue;
1189
Chris Wilson3447c4c2020-03-23 09:28:35 +00001190 if (!i915_vm_tryopen(vm)) {
Chris Wilson28507482019-10-04 14:39:58 +01001191 i915_gem_object_put(obj);
Chris Wilson3447c4c2020-03-23 09:28:35 +00001192 continue;
Chris Wilson28507482019-10-04 14:39:58 +01001193 }
1194
Chris Wilson3447c4c2020-03-23 09:28:35 +00001195 list_move(&vma->closed_link, &closed);
Chris Wilson155ab882019-06-06 12:23:20 +01001196 }
Chris Wilson71e51ca2019-10-21 19:32:35 +01001197 spin_unlock_irq(&gt->closed_lock);
Chris Wilson3447c4c2020-03-23 09:28:35 +00001198
1199 /* As the GT is held idle, no vma can be reopened as we destroy them */
1200 list_for_each_entry_safe(vma, next, &closed, closed_link) {
1201 struct drm_i915_gem_object *obj = vma->obj;
1202 struct i915_address_space *vm = vma->vm;
1203
1204 INIT_LIST_HEAD(&vma->closed_link);
1205 __i915_vma_put(vma);
1206
1207 i915_gem_object_put(obj);
1208 i915_vm_close(vm);
1209 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001210}
1211
1212static void __i915_vma_iounmap(struct i915_vma *vma)
1213{
1214 GEM_BUG_ON(i915_vma_is_pinned(vma));
1215
1216 if (vma->iomap == NULL)
1217 return;
1218
1219 io_mapping_unmap(vma->iomap);
1220 vma->iomap = NULL;
1221}
1222
Chris Wilsona65adaf2017-10-09 09:43:57 +01001223void i915_vma_revoke_mmap(struct i915_vma *vma)
1224{
Abdiel Janulguecc662122019-12-04 12:00:32 +00001225 struct drm_vma_offset_node *node;
Chris Wilsona65adaf2017-10-09 09:43:57 +01001226 u64 vma_offset;
1227
Chris Wilsona65adaf2017-10-09 09:43:57 +01001228 if (!i915_vma_has_userfault(vma))
1229 return;
1230
1231 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1232 GEM_BUG_ON(!vma->obj->userfault_count);
1233
Abdiel Janulguecc662122019-12-04 12:00:32 +00001234 node = &vma->mmo->vma_node;
Chris Wilsona65adaf2017-10-09 09:43:57 +01001235 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
1236 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1237 drm_vma_node_offset_addr(node) + vma_offset,
1238 vma->size,
1239 1);
1240
1241 i915_vma_unset_userfault(vma);
1242 if (!--vma->obj->userfault_count)
1243 list_del(&vma->obj->userfault_link);
1244}
1245
Chris Wilsonaf5c6fc2020-07-31 09:50:15 +01001246static int
1247__i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
1248{
1249 return __i915_request_await_exclusive(rq, &vma->active);
1250}
1251
Chris Wilson28507482019-10-04 14:39:58 +01001252int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1253{
1254 int err;
1255
1256 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1257
1258 /* Wait for the vma to be bound before we start! */
Chris Wilsonaf5c6fc2020-07-31 09:50:15 +01001259 err = __i915_request_await_bind(rq, vma);
Chris Wilson28507482019-10-04 14:39:58 +01001260 if (err)
1261 return err;
1262
1263 return i915_active_add_request(&vma->active, rq);
1264}
1265
Matthew Brost544460c2021-10-14 10:20:00 -07001266int _i915_vma_move_to_active(struct i915_vma *vma,
1267 struct i915_request *rq,
1268 struct dma_fence *fence,
1269 unsigned int flags)
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001270{
1271 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilsona93615f2019-06-21 19:37:59 +01001272 int err;
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001273
Chris Wilson6951e582019-05-28 10:29:51 +01001274 assert_object_held(obj);
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001275
Chris Wilson28507482019-10-04 14:39:58 +01001276 err = __i915_vma_move_to_active(vma, rq);
Chris Wilsona93615f2019-06-21 19:37:59 +01001277 if (unlikely(err))
1278 return err;
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001279
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001280 if (flags & EXEC_OBJECT_WRITE) {
Chris Wilsonda421042019-12-18 10:40:43 +00001281 struct intel_frontbuffer *front;
1282
1283 front = __intel_frontbuffer_get(obj);
1284 if (unlikely(front)) {
1285 if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1286 i915_active_add_request(&front->write, rq);
1287 intel_frontbuffer_put(front);
1288 }
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001289
Matthew Brost544460c2021-10-14 10:20:00 -07001290 if (fence) {
Maarten Lankhorst95c3d272021-11-17 14:20:23 +00001291 dma_resv_add_excl_fence(vma->obj->base.resv, fence);
Matthew Brost544460c2021-10-14 10:20:00 -07001292 obj->write_domain = I915_GEM_DOMAIN_RENDER;
1293 obj->read_domains = 0;
1294 }
Chris Wilsoncd2a4ea2019-07-30 21:58:05 +01001295 } else {
Maarten Lankhorstbfaae472021-03-23 16:49:59 +01001296 if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
Maarten Lankhorst95c3d272021-11-17 14:20:23 +00001297 err = dma_resv_reserve_shared(vma->obj->base.resv, 1);
Maarten Lankhorstbfaae472021-03-23 16:49:59 +01001298 if (unlikely(err))
1299 return err;
1300 }
Chris Wilsoncd2a4ea2019-07-30 21:58:05 +01001301
Matthew Brost544460c2021-10-14 10:20:00 -07001302 if (fence) {
Maarten Lankhorst95c3d272021-11-17 14:20:23 +00001303 dma_resv_add_shared_fence(vma->obj->base.resv, fence);
Matthew Brost544460c2021-10-14 10:20:00 -07001304 obj->write_domain = 0;
1305 }
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001306 }
Chris Wilson63baf4f2020-04-01 22:01:02 +01001307
1308 if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
1309 i915_active_add_request(&vma->fence->active, rq);
1310
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001311 obj->read_domains |= I915_GEM_GPU_DOMAINS;
Chris Wilsona93615f2019-06-21 19:37:59 +01001312 obj->mm.dirty = true;
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001313
Chris Wilsona93615f2019-06-21 19:37:59 +01001314 GEM_BUG_ON(!i915_vma_is_active(vma));
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001315 return 0;
1316}
1317
Chris Wilsonbffa18d2020-05-28 09:24:27 +01001318void __i915_vma_evict(struct i915_vma *vma)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001319{
Chris Wilson60e94552020-01-23 22:44:58 +00001320 GEM_BUG_ON(i915_vma_is_pinned(vma));
Chris Wilson60e94552020-01-23 22:44:58 +00001321
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001322 if (i915_vma_is_map_and_fenceable(vma)) {
Chris Wilson9657aaa2020-04-03 17:09:51 +01001323 /* Force a pagefault for domain tracking on next user access */
1324 i915_vma_revoke_mmap(vma);
1325
Chris Wilson7125397b2017-12-06 12:49:14 +00001326 /*
1327 * Check that we have flushed all writes through the GGTT
1328 * before the unbind, other due to non-strict nature of those
1329 * indirect writes they may end up referencing the GGTT PTE
1330 * after the unbind.
Chris Wilson5424f5d2020-01-21 22:24:41 +00001331 *
1332 * Note that we may be concurrently poking at the GGTT_WRITE
1333 * bit from set-domain, as we mark all GGTT vma associated
1334 * with an object. We know this is for another vma, as we
1335 * are currently unbinding this one -- so if this vma will be
1336 * reused, it will be refaulted and have its dirty bit set
1337 * before the next write.
Chris Wilson7125397b2017-12-06 12:49:14 +00001338 */
1339 i915_vma_flush_writes(vma);
Chris Wilson7125397b2017-12-06 12:49:14 +00001340
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001341 /* release the fence reg _after_ flushing */
Chris Wilson0d86ee32020-04-01 22:01:04 +01001342 i915_vma_revoke_fence(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001343
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001344 __i915_vma_iounmap(vma);
Chris Wilson4dd2fbb2019-09-11 10:02:43 +01001345 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001346 }
Chris Wilsona65adaf2017-10-09 09:43:57 +01001347 GEM_BUG_ON(vma->fence);
1348 GEM_BUG_ON(i915_vma_has_userfault(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001349
Chris Wilson28507482019-10-04 14:39:58 +01001350 if (likely(atomic_read(&vma->vm->open))) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001351 trace_i915_vma_unbind(vma);
Chris Wilson12b07252020-07-03 11:25:19 +01001352 vma->ops->unbind_vma(vma->vm, vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001353 }
Chris Wilson5424f5d2020-01-21 22:24:41 +00001354 atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
1355 &vma->flags);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001356
Chris Wilsondde01d92019-10-30 19:21:49 +00001357 i915_vma_detach(vma);
Chris Wilson28507482019-10-04 14:39:58 +01001358 vma_unbind_pages(vma);
Chris Wilsonbffa18d2020-05-28 09:24:27 +01001359}
1360
1361int __i915_vma_unbind(struct i915_vma *vma)
1362{
1363 int ret;
1364
1365 lockdep_assert_held(&vma->vm->mutex);
1366
1367 if (!drm_mm_node_allocated(&vma->node))
1368 return 0;
1369
1370 if (i915_vma_is_pinned(vma)) {
1371 vma_print_allocator(vma, "is pinned");
1372 return -EAGAIN;
1373 }
1374
1375 /*
1376 * After confirming that no one else is pinning this vma, wait for
1377 * any laggards who may have crept in during the wait (through
1378 * a residual pin skipping the vm->mutex) to complete.
1379 */
1380 ret = i915_vma_sync(vma);
1381 if (ret)
1382 return ret;
1383
1384 GEM_BUG_ON(i915_vma_is_active(vma));
1385 __i915_vma_evict(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001386
Chris Wilson76f97642019-12-22 21:02:55 +00001387 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001388 return 0;
1389}
1390
Chris Wilson28507482019-10-04 14:39:58 +01001391int i915_vma_unbind(struct i915_vma *vma)
1392{
1393 struct i915_address_space *vm = vma->vm;
Chris Wilsonc0e60342020-01-10 14:44:18 +00001394 intel_wakeref_t wakeref = 0;
Chris Wilson28507482019-10-04 14:39:58 +01001395 int err;
1396
Chris Wilsond62f416f2020-01-23 22:44:59 +00001397 /* Optimistic wait before taking the mutex */
1398 err = i915_vma_sync(vma);
1399 if (err)
Chris Wilsonbffa18d2020-05-28 09:24:27 +01001400 return err;
1401
1402 if (!drm_mm_node_allocated(&vma->node))
1403 return 0;
Chris Wilsond62f416f2020-01-23 22:44:59 +00001404
Chris Wilson614654a2020-04-03 13:01:50 +01001405 if (i915_vma_is_pinned(vma)) {
1406 vma_print_allocator(vma, "is pinned");
1407 return -EAGAIN;
1408 }
1409
1410 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
1411 /* XXX not always required: nop_clear_range */
1412 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
1413
Chris Wilsond0024912020-03-26 14:27:27 +00001414 err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
Chris Wilson28507482019-10-04 14:39:58 +01001415 if (err)
Chris Wilsond62f416f2020-01-23 22:44:59 +00001416 goto out_rpm;
Chris Wilson28507482019-10-04 14:39:58 +01001417
1418 err = __i915_vma_unbind(vma);
1419 mutex_unlock(&vm->mutex);
1420
Chris Wilsond62f416f2020-01-23 22:44:59 +00001421out_rpm:
Chris Wilsonc0e60342020-01-10 14:44:18 +00001422 if (wakeref)
1423 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
Chris Wilson28507482019-10-04 14:39:58 +01001424 return err;
1425}
1426
Chris Wilson1aff1902019-08-02 22:21:36 +01001427struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
1428{
1429 i915_gem_object_make_unshrinkable(vma->obj);
1430 return vma;
1431}
1432
1433void i915_vma_make_shrinkable(struct i915_vma *vma)
1434{
1435 i915_gem_object_make_shrinkable(vma->obj);
1436}
1437
1438void i915_vma_make_purgeable(struct i915_vma *vma)
1439{
1440 i915_gem_object_make_purgeable(vma->obj);
1441}
1442
Chris Wilsone3c7a1c2017-02-13 17:15:45 +00001443#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1444#include "selftests/i915_vma.c"
1445#endif
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001446
Daniel Vetter64fc7cc2021-07-27 14:10:35 +02001447void i915_vma_module_exit(void)
Chris Wilson103b76ee2019-03-05 21:38:30 +00001448{
Daniel Vetter64fc7cc2021-07-27 14:10:35 +02001449 kmem_cache_destroy(slab_vmas);
Chris Wilson103b76ee2019-03-05 21:38:30 +00001450}
1451
Daniel Vetter64fc7cc2021-07-27 14:10:35 +02001452int __init i915_vma_module_init(void)
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001453{
Daniel Vetter64fc7cc2021-07-27 14:10:35 +02001454 slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1455 if (!slab_vmas)
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001456 return -ENOMEM;
1457
1458 return 0;
1459}