blob: c5330f36687dba5871a1bddedfb8b0cca789780b [file] [log] [blame]
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
Chris Wilson10195b12018-06-28 14:22:06 +010024
Chris Wilson09480072019-07-03 10:17:19 +010025#include <linux/sched/mm.h>
Jani Nikuladf0566a2019-06-13 11:44:16 +030026#include <drm/drm_gem.h>
Chris Wilson112ed2d2019-04-24 18:48:39 +010027
Jani Nikuladf0566a2019-06-13 11:44:16 +030028#include "display/intel_frontbuffer.h"
29
30#include "gt/intel_engine.h"
Chris Wilsonccd20942019-12-05 11:37:25 +000031#include "gt/intel_engine_heartbeat.h"
Tvrtko Ursulina1c8a092019-06-21 08:08:01 +010032#include "gt/intel_gt.h"
Chris Wilsonccd20942019-12-05 11:37:25 +000033#include "gt/intel_gt_requests.h"
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020034
35#include "i915_drv.h"
Chris Wilson103b76ee2019-03-05 21:38:30 +000036#include "i915_globals.h"
Chris Wilson28507482019-10-04 14:39:58 +010037#include "i915_sw_fence_work.h"
Jani Nikulaa09d9a82019-08-06 13:07:28 +030038#include "i915_trace.h"
Jani Nikuladf0566a2019-06-13 11:44:16 +030039#include "i915_vma.h"
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020040
Chris Wilson13f1bfd2019-02-28 10:20:34 +000041static struct i915_global_vma {
Chris Wilson103b76ee2019-03-05 21:38:30 +000042 struct i915_global base;
Chris Wilson13f1bfd2019-02-28 10:20:34 +000043 struct kmem_cache *slab_vmas;
44} global;
45
46struct i915_vma *i915_vma_alloc(void)
47{
48 return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
49}
50
51void i915_vma_free(struct i915_vma *vma)
52{
53 return kmem_cache_free(global.slab_vmas, vma);
54}
55
Chris Wilson1eca65d2018-07-06 07:53:06 +010056#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
Chris Wilson10195b12018-06-28 14:22:06 +010057
58#include <linux/stackdepot.h>
59
60static void vma_print_allocator(struct i915_vma *vma, const char *reason)
61{
Thomas Gleixner487f3c72019-04-25 11:45:09 +020062 unsigned long *entries;
63 unsigned int nr_entries;
Chris Wilson10195b12018-06-28 14:22:06 +010064 char buf[512];
65
66 if (!vma->node.stack) {
67 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
68 vma->node.start, vma->node.size, reason);
69 return;
70 }
71
Thomas Gleixner487f3c72019-04-25 11:45:09 +020072 nr_entries = stack_depot_fetch(vma->node.stack, &entries);
73 stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
Chris Wilson10195b12018-06-28 14:22:06 +010074 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
75 vma->node.start, vma->node.size, reason, buf);
76}
77
78#else
79
80static void vma_print_allocator(struct i915_vma *vma, const char *reason)
81{
82}
83
84#endif
85
Chris Wilson12c255b2019-06-21 19:38:00 +010086static inline struct i915_vma *active_to_vma(struct i915_active *ref)
87{
88 return container_of(ref, typeof(struct i915_vma), active);
89}
90
91static int __i915_vma_active(struct i915_active *ref)
92{
Chris Wilson2833ddc2019-08-20 11:05:31 +010093 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
Chris Wilson12c255b2019-06-21 19:38:00 +010094}
95
Chris Wilson274cbf22019-10-04 14:39:59 +010096__i915_active_call
Chris Wilson64d6c502019-02-05 13:00:02 +000097static void __i915_vma_retire(struct i915_active *ref)
98{
Chris Wilson12c255b2019-06-21 19:38:00 +010099 i915_vma_put(active_to_vma(ref));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200100}
101
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200102static struct i915_vma *
Chris Wilsona01cb37a2017-01-16 15:21:30 +0000103vma_create(struct drm_i915_gem_object *obj,
104 struct i915_address_space *vm,
105 const struct i915_ggtt_view *view)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200106{
107 struct i915_vma *vma;
108 struct rb_node *rb, **p;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200109
Chris Wilsone1cc3db2017-02-09 11:19:33 +0000110 /* The aliasing_ppgtt should never be used directly! */
Chris Wilson71e51ca2019-10-21 19:32:35 +0100111 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
Chris Wilsone1cc3db2017-02-09 11:19:33 +0000112
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000113 vma = i915_vma_alloc();
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200114 if (vma == NULL)
115 return ERR_PTR(-ENOMEM);
116
Chris Wilson28507482019-10-04 14:39:58 +0100117 mutex_init(&vma->pages_mutex);
118 vma->vm = i915_vm_get(vm);
Chris Wilson93f2cde2018-06-07 16:40:46 +0100119 vma->ops = &vm->vma_ops;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200120 vma->obj = obj;
Chris Wilsonef78f7b2019-06-18 13:58:58 +0100121 vma->resv = obj->base.resv;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200122 vma->size = obj->base.size;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000123 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200124
Chris Wilsonb1e31772019-10-04 14:40:00 +0100125 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
Chris Wilson155ab882019-06-06 12:23:20 +0100126
Chris Wilson09480072019-07-03 10:17:19 +0100127 /* Declare ourselves safe for use inside shrinkers */
128 if (IS_ENABLED(CONFIG_LOCKDEP)) {
129 fs_reclaim_acquire(GFP_KERNEL);
130 might_lock(&vma->active.mutex);
131 fs_reclaim_release(GFP_KERNEL);
132 }
133
Chris Wilson155ab882019-06-06 12:23:20 +0100134 INIT_LIST_HEAD(&vma->closed_link);
135
Chris Wilson7c518462017-01-23 14:52:45 +0000136 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200137 vma->ggtt_view = *view;
138 if (view->type == I915_GGTT_VIEW_PARTIAL) {
Chris Wilson07e19ea2016-12-23 14:57:59 +0000139 GEM_BUG_ON(range_overflows_t(u64,
Chris Wilson8bab11932017-01-14 00:28:25 +0000140 view->partial.offset,
141 view->partial.size,
Chris Wilson07e19ea2016-12-23 14:57:59 +0000142 obj->base.size >> PAGE_SHIFT));
Chris Wilson8bab11932017-01-14 00:28:25 +0000143 vma->size = view->partial.size;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200144 vma->size <<= PAGE_SHIFT;
Chris Wilson7e7367d2018-06-30 10:05:09 +0100145 GEM_BUG_ON(vma->size > obj->base.size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200146 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
Chris Wilson8bab11932017-01-14 00:28:25 +0000147 vma->size = intel_rotation_info_size(&view->rotated);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200148 vma->size <<= PAGE_SHIFT;
Ville Syrjälä1a74fc02019-05-09 15:21:52 +0300149 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
150 vma->size = intel_remapped_info_size(&view->remapped);
151 vma->size <<= PAGE_SHIFT;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200152 }
153 }
154
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000155 if (unlikely(vma->size > vm->total))
156 goto err_vma;
157
Chris Wilsonb00ddb22017-01-19 19:26:59 +0000158 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
159
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200160 if (i915_is_ggtt(vm)) {
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000161 if (unlikely(overflows_type(vma->size, u32)))
162 goto err_vma;
163
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000164 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
165 i915_gem_object_get_tiling(obj),
166 i915_gem_object_get_stride(obj));
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000167 if (unlikely(vma->fence_size < vma->size || /* overflow */
168 vma->fence_size > vm->total))
169 goto err_vma;
170
Chris Wilsonf51455d2017-01-10 14:47:34 +0000171 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
Chris Wilson944397f2017-01-09 16:16:11 +0000172
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000173 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
174 i915_gem_object_get_tiling(obj),
175 i915_gem_object_get_stride(obj));
Chris Wilson944397f2017-01-09 16:16:11 +0000176 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
177
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100178 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
Chris Wilson528cbd12019-01-28 10:23:54 +0000179 }
180
181 spin_lock(&obj->vma.lock);
182
183 rb = NULL;
184 p = &obj->vma.tree.rb_node;
185 while (*p) {
186 struct i915_vma *pos;
187 long cmp;
188
189 rb = *p;
190 pos = rb_entry(rb, struct i915_vma, obj_node);
191
192 /*
193 * If the view already exists in the tree, another thread
194 * already created a matching vma, so return the older instance
195 * and dispose of ours.
196 */
197 cmp = i915_vma_compare(pos, vm, view);
198 if (cmp == 0) {
199 spin_unlock(&obj->vma.lock);
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000200 i915_vma_free(vma);
Chris Wilson528cbd12019-01-28 10:23:54 +0000201 return pos;
202 }
203
204 if (cmp < 0)
205 p = &rb->rb_right;
206 else
207 p = &rb->rb_left;
208 }
209 rb_link_node(&vma->obj_node, rb, p);
210 rb_insert_color(&vma->obj_node, &obj->vma.tree);
211
212 if (i915_vma_is_ggtt(vma))
Chris Wilsone2189dd2017-12-07 21:14:07 +0000213 /*
214 * We put the GGTT vma at the start of the vma-list, followed
215 * by the ppGGTT vma. This allows us to break early when
216 * iterating over only the GGTT vma for an object, see
217 * for_each_ggtt_vma()
218 */
Chris Wilson528cbd12019-01-28 10:23:54 +0000219 list_add(&vma->obj_link, &obj->vma.list);
220 else
221 list_add_tail(&vma->obj_link, &obj->vma.list);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200222
Chris Wilson528cbd12019-01-28 10:23:54 +0000223 spin_unlock(&obj->vma.lock);
Chris Wilson09d7e462019-01-28 10:23:53 +0000224
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200225 return vma;
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000226
227err_vma:
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000228 i915_vma_free(vma);
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000229 return ERR_PTR(-E2BIG);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200230}
231
Chris Wilson481a6f72017-01-16 15:21:31 +0000232static struct i915_vma *
233vma_lookup(struct drm_i915_gem_object *obj,
234 struct i915_address_space *vm,
235 const struct i915_ggtt_view *view)
Chris Wilson718659a2017-01-16 15:21:28 +0000236{
237 struct rb_node *rb;
238
Chris Wilson528cbd12019-01-28 10:23:54 +0000239 rb = obj->vma.tree.rb_node;
Chris Wilson718659a2017-01-16 15:21:28 +0000240 while (rb) {
241 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
242 long cmp;
243
244 cmp = i915_vma_compare(vma, vm, view);
245 if (cmp == 0)
246 return vma;
247
248 if (cmp < 0)
249 rb = rb->rb_right;
250 else
251 rb = rb->rb_left;
252 }
253
254 return NULL;
255}
256
257/**
Chris Wilson718659a2017-01-16 15:21:28 +0000258 * i915_vma_instance - return the singleton instance of the VMA
259 * @obj: parent &struct drm_i915_gem_object to be mapped
260 * @vm: address space in which the mapping is located
261 * @view: additional mapping requirements
262 *
263 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
264 * the same @view characteristics. If a match is not found, one is created.
265 * Once created, the VMA is kept until either the object is freed, or the
266 * address space is closed.
267 *
Chris Wilson718659a2017-01-16 15:21:28 +0000268 * Returns the vma, or an error pointer.
269 */
270struct i915_vma *
271i915_vma_instance(struct drm_i915_gem_object *obj,
272 struct i915_address_space *vm,
273 const struct i915_ggtt_view *view)
274{
275 struct i915_vma *vma;
276
Chris Wilson718659a2017-01-16 15:21:28 +0000277 GEM_BUG_ON(view && !i915_is_ggtt(vm));
Chris Wilson28507482019-10-04 14:39:58 +0100278 GEM_BUG_ON(!atomic_read(&vm->open));
Chris Wilson718659a2017-01-16 15:21:28 +0000279
Chris Wilson528cbd12019-01-28 10:23:54 +0000280 spin_lock(&obj->vma.lock);
Chris Wilson481a6f72017-01-16 15:21:31 +0000281 vma = vma_lookup(obj, vm, view);
Chris Wilson528cbd12019-01-28 10:23:54 +0000282 spin_unlock(&obj->vma.lock);
283
284 /* vma_create() will resolve the race if another creates the vma */
285 if (unlikely(!vma))
Chris Wilsona01cb37a2017-01-16 15:21:30 +0000286 vma = vma_create(obj, vm, view);
Chris Wilson718659a2017-01-16 15:21:28 +0000287
Chris Wilson4ea95272017-01-16 15:21:29 +0000288 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
Chris Wilson718659a2017-01-16 15:21:28 +0000289 return vma;
290}
291
Chris Wilson28507482019-10-04 14:39:58 +0100292struct i915_vma_work {
293 struct dma_fence_work base;
294 struct i915_vma *vma;
Chris Wilson54d71952019-12-16 16:17:16 +0000295 struct drm_i915_gem_object *pinned;
Chris Wilson28507482019-10-04 14:39:58 +0100296 enum i915_cache_level cache_level;
297 unsigned int flags;
298};
299
300static int __vma_bind(struct dma_fence_work *work)
301{
302 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
303 struct i915_vma *vma = vw->vma;
304 int err;
305
306 err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags);
307 if (err)
308 atomic_or(I915_VMA_ERROR, &vma->flags);
309
Chris Wilson28507482019-10-04 14:39:58 +0100310 return err;
311}
312
Chris Wilson54d71952019-12-16 16:17:16 +0000313static void __vma_release(struct dma_fence_work *work)
314{
315 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
316
317 if (vw->pinned)
318 __i915_gem_object_unpin_pages(vw->pinned);
319}
320
Chris Wilson28507482019-10-04 14:39:58 +0100321static const struct dma_fence_work_ops bind_ops = {
322 .name = "bind",
323 .work = __vma_bind,
Chris Wilson54d71952019-12-16 16:17:16 +0000324 .release = __vma_release,
Chris Wilson28507482019-10-04 14:39:58 +0100325};
326
327struct i915_vma_work *i915_vma_work(void)
328{
329 struct i915_vma_work *vw;
330
331 vw = kzalloc(sizeof(*vw), GFP_KERNEL);
332 if (!vw)
333 return NULL;
334
335 dma_fence_work_init(&vw->base, &bind_ops);
336 vw->base.dma.error = -EAGAIN; /* disable the worker by default */
337
338 return vw;
339}
340
Chris Wilson718659a2017-01-16 15:21:28 +0000341/**
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200342 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
343 * @vma: VMA to map
344 * @cache_level: mapping cache level
345 * @flags: flags like global or local mapping
Chris Wilson28507482019-10-04 14:39:58 +0100346 * @work: preallocated worker for allocating and binding the PTE
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200347 *
348 * DMA addresses are taken from the scatter-gather table of this object (or of
349 * this VMA in case of non-default GGTT views) and PTE entries set up.
350 * Note that DMA addresses are also the only part of the SG table we care about.
351 */
Chris Wilson28507482019-10-04 14:39:58 +0100352int i915_vma_bind(struct i915_vma *vma,
353 enum i915_cache_level cache_level,
354 u32 flags,
355 struct i915_vma_work *work)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200356{
357 u32 bind_flags;
358 u32 vma_flags;
359 int ret;
360
Chris Wilsonaa149432017-02-25 18:11:21 +0000361 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
362 GEM_BUG_ON(vma->size > vma->node.size);
363
Tvrtko Ursulinbbb8a9d2018-10-12 07:31:42 +0100364 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
365 vma->node.size,
366 vma->vm->total)))
Chris Wilsonaa149432017-02-25 18:11:21 +0000367 return -ENODEV;
368
Tvrtko Ursulinbbb8a9d2018-10-12 07:31:42 +0100369 if (GEM_DEBUG_WARN_ON(!flags))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200370 return -EINVAL;
371
Chris Wilson28507482019-10-04 14:39:58 +0100372 bind_flags = flags;
373 bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200374
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100375 vma_flags = atomic_read(&vma->flags);
376 vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200377 if (flags & PIN_UPDATE)
378 bind_flags |= vma_flags;
379 else
380 bind_flags &= ~vma_flags;
381 if (bind_flags == 0)
382 return 0;
383
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100384 GEM_BUG_ON(!vma->pages);
385
Daniele Ceraolo Spurio6146e6d2017-01-20 13:51:23 -0800386 trace_i915_vma_bind(vma, bind_flags);
Chris Wilson28507482019-10-04 14:39:58 +0100387 if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) {
388 work->vma = vma;
389 work->cache_level = cache_level;
390 work->flags = bind_flags | I915_VMA_ALLOC;
391
392 /*
393 * Note we only want to chain up to the migration fence on
394 * the pages (not the object itself). As we don't track that,
395 * yet, we have to use the exclusive fence instead.
396 *
397 * Also note that we do not want to track the async vma as
398 * part of the obj->resv->excl_fence as it only affects
399 * execution and not content or object's backing store lifetime.
400 */
401 GEM_BUG_ON(i915_active_has_exclusive(&vma->active));
402 i915_active_set_exclusive(&vma->active, &work->base.dma);
403 work->base.dma.error = 0; /* enable the queue_work() */
404
Chris Wilson54d71952019-12-16 16:17:16 +0000405 if (vma->obj) {
Chris Wilson28507482019-10-04 14:39:58 +0100406 __i915_gem_object_pin_pages(vma->obj);
Chris Wilson54d71952019-12-16 16:17:16 +0000407 work->pinned = vma->obj;
408 }
Chris Wilson28507482019-10-04 14:39:58 +0100409 } else {
410 GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags);
411 ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
412 if (ret)
413 return ret;
414 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200415
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100416 atomic_or(bind_flags, &vma->flags);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200417 return 0;
418}
419
420void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
421{
422 void __iomem *ptr;
Chris Wilsonb4563f52017-10-09 09:43:55 +0100423 int err;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200424
425 /* Access through the GTT requires the device to be awake. */
Chris Wilson71e51ca2019-10-21 19:32:35 +0100426 assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm);
Chris Wilson28507482019-10-04 14:39:58 +0100427 if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
Chris Wilsonb4563f52017-10-09 09:43:55 +0100428 err = -ENODEV;
429 goto err;
430 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200431
432 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100433 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200434
Chris Wilson28507482019-10-04 14:39:58 +0100435 ptr = READ_ONCE(vma->iomap);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200436 if (ptr == NULL) {
Matthew Auld73ebd502017-12-11 15:18:20 +0000437 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200438 vma->node.start,
439 vma->node.size);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100440 if (ptr == NULL) {
441 err = -ENOMEM;
442 goto err;
443 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200444
Chris Wilson28507482019-10-04 14:39:58 +0100445 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
446 io_mapping_unmap(ptr);
447 ptr = vma->iomap;
448 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200449 }
450
451 __i915_vma_pin(vma);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100452
Chris Wilson3bd40732017-10-09 09:43:56 +0100453 err = i915_vma_pin_fence(vma);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100454 if (err)
455 goto err_unpin;
456
Chris Wilson7125397b2017-12-06 12:49:14 +0000457 i915_vma_set_ggtt_write(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200458 return ptr;
Chris Wilsonb4563f52017-10-09 09:43:55 +0100459
460err_unpin:
461 __i915_vma_unpin(vma);
462err:
463 return IO_ERR_PTR(err);
464}
465
Chris Wilson7125397b2017-12-06 12:49:14 +0000466void i915_vma_flush_writes(struct i915_vma *vma)
467{
Chris Wilson28507482019-10-04 14:39:58 +0100468 if (i915_vma_unset_ggtt_write(vma))
469 intel_gt_flush_ggtt_writes(vma->vm->gt);
Chris Wilson7125397b2017-12-06 12:49:14 +0000470}
471
Chris Wilsonb4563f52017-10-09 09:43:55 +0100472void i915_vma_unpin_iomap(struct i915_vma *vma)
473{
Chris Wilsonb4563f52017-10-09 09:43:55 +0100474 GEM_BUG_ON(vma->iomap == NULL);
475
Chris Wilson7125397b2017-12-06 12:49:14 +0000476 i915_vma_flush_writes(vma);
477
Chris Wilsonb4563f52017-10-09 09:43:55 +0100478 i915_vma_unpin_fence(vma);
479 i915_vma_unpin(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200480}
481
Chris Wilson6a2f59e2018-07-21 13:50:37 +0100482void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200483{
484 struct i915_vma *vma;
485 struct drm_i915_gem_object *obj;
486
487 vma = fetch_and_zero(p_vma);
488 if (!vma)
489 return;
490
491 obj = vma->obj;
Chris Wilson520ea7c2018-06-07 16:40:45 +0100492 GEM_BUG_ON(!obj);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200493
494 i915_vma_unpin(vma);
495 i915_vma_close(vma);
496
Chris Wilson6a2f59e2018-07-21 13:50:37 +0100497 if (flags & I915_VMA_RELEASE_MAP)
498 i915_gem_object_unpin_map(obj);
499
Chris Wilsonc017cf62019-05-28 10:29:56 +0100500 i915_gem_object_put(obj);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200501}
502
Chris Wilson782a3e92017-02-13 17:15:46 +0000503bool i915_vma_misplaced(const struct i915_vma *vma,
504 u64 size, u64 alignment, u64 flags)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200505{
506 if (!drm_mm_node_allocated(&vma->node))
507 return false;
508
Chris Wilson28507482019-10-04 14:39:58 +0100509 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
510 return true;
511
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200512 if (vma->node.size < size)
513 return true;
514
Chris Wilsonf51455d2017-01-10 14:47:34 +0000515 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
516 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200517 return true;
518
519 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
520 return true;
521
522 if (flags & PIN_OFFSET_BIAS &&
523 vma->node.start < (flags & PIN_OFFSET_MASK))
524 return true;
525
526 if (flags & PIN_OFFSET_FIXED &&
527 vma->node.start != (flags & PIN_OFFSET_MASK))
528 return true;
529
530 return false;
531}
532
533void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
534{
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200535 bool mappable, fenceable;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200536
Chris Wilson944397f2017-01-09 16:16:11 +0000537 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
538 GEM_BUG_ON(!vma->fence_size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200539
Chris Wilson944397f2017-01-09 16:16:11 +0000540 fenceable = (vma->node.size >= vma->fence_size &&
Chris Wilsonf51455d2017-01-10 14:47:34 +0000541 IS_ALIGNED(vma->node.start, vma->fence_alignment));
Chris Wilson944397f2017-01-09 16:16:11 +0000542
543 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
544
545 if (mappable && fenceable)
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100546 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200547 else
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100548 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200549}
550
Matthew Auld33dd8892019-09-09 13:40:52 +0100551bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000552{
553 struct drm_mm_node *node = &vma->node;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200554 struct drm_mm_node *other;
555
556 /*
557 * On some machines we have to be careful when putting differing types
558 * of snoopable memory together to avoid the prefetcher crossing memory
559 * domains and dying. During vm initialisation, we decide whether or not
560 * these constraints apply and set the drm_mm.color_adjust
561 * appropriately.
562 */
Matthew Auld33dd8892019-09-09 13:40:52 +0100563 if (!i915_vm_has_cache_coloring(vma->vm))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200564 return true;
565
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000566 /* Only valid to be called on an already inserted vma */
567 GEM_BUG_ON(!drm_mm_node_allocated(node));
568 GEM_BUG_ON(list_empty(&node->node_list));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200569
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000570 other = list_prev_entry(node, node_list);
Matthew Auld33dd8892019-09-09 13:40:52 +0100571 if (i915_node_color_differs(other, color) &&
Matthew Auld1e0a96e2019-09-09 13:40:50 +0100572 !drm_mm_hole_follows(other))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200573 return false;
574
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000575 other = list_next_entry(node, node_list);
Matthew Auld33dd8892019-09-09 13:40:52 +0100576 if (i915_node_color_differs(other, color) &&
Matthew Auld1e0a96e2019-09-09 13:40:50 +0100577 !drm_mm_hole_follows(node))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200578 return false;
579
580 return true;
581}
582
Chris Wilson83d317a2018-06-05 10:41:07 +0100583static void assert_bind_count(const struct drm_i915_gem_object *obj)
584{
585 /*
586 * Combine the assertion that the object is bound and that we have
587 * pinned its pages. But we should never have bound the object
588 * more than we have pinned its pages. (For complete accuracy, we
589 * assume that no else is pinning the pages, but as a rough assertion
590 * that we will not run into problems later, this will do!)
591 */
Chris Wilsonecab9be2019-06-12 11:57:20 +0100592 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
Chris Wilson83d317a2018-06-05 10:41:07 +0100593}
594
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200595/**
596 * i915_vma_insert - finds a slot for the vma in its address space
597 * @vma: the vma
598 * @size: requested size in bytes (can be larger than the VMA)
599 * @alignment: required alignment
600 * @flags: mask of PIN_* flags to use
601 *
602 * First we try to allocate some free space that meets the requirements for
603 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
604 * preferrably the oldest idle entry to make room for the new VMA.
605 *
606 * Returns:
607 * 0 on success, negative error code otherwise.
608 */
609static int
610i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
611{
Matthew Auld33dd8892019-09-09 13:40:52 +0100612 unsigned long color;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200613 u64 start, end;
614 int ret;
615
Chris Wilson010e3e62017-12-06 12:49:13 +0000616 GEM_BUG_ON(i915_vma_is_closed(vma));
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100617 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200618 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
619
620 size = max(size, vma->size);
Chris Wilson944397f2017-01-09 16:16:11 +0000621 alignment = max(alignment, vma->display_alignment);
622 if (flags & PIN_MAPPABLE) {
623 size = max_t(typeof(size), size, vma->fence_size);
624 alignment = max_t(typeof(alignment),
625 alignment, vma->fence_alignment);
626 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200627
Chris Wilsonf51455d2017-01-10 14:47:34 +0000628 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
629 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
630 GEM_BUG_ON(!is_power_of_2(alignment));
631
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200632 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000633 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200634
635 end = vma->vm->total;
636 if (flags & PIN_MAPPABLE)
Chris Wilson28507482019-10-04 14:39:58 +0100637 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200638 if (flags & PIN_ZONE_4G)
Chris Wilsonf51455d2017-01-10 14:47:34 +0000639 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
640 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200641
642 /* If binding the object/GGTT view requires more space than the entire
643 * aperture has, reject it early before evicting everything in a vain
644 * attempt to find space.
645 */
646 if (size > end) {
Chris Wilson520ea7c2018-06-07 16:40:45 +0100647 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
648 size, flags & PIN_MAPPABLE ? "mappable" : "total",
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200649 end);
Chris Wilson2889caa2017-06-16 15:05:19 +0100650 return -ENOSPC;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200651 }
652
Matthew Auld33dd8892019-09-09 13:40:52 +0100653 color = 0;
Chris Wilson28507482019-10-04 14:39:58 +0100654 if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
655 color = vma->obj->cache_level;
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100656
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200657 if (flags & PIN_OFFSET_FIXED) {
658 u64 offset = flags & PIN_OFFSET_MASK;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000659 if (!IS_ALIGNED(offset, alignment) ||
Chris Wilson28507482019-10-04 14:39:58 +0100660 range_overflows(offset, size, end))
661 return -EINVAL;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200662
Chris Wilson625d9882017-01-11 11:23:11 +0000663 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
Matthew Auld33dd8892019-09-09 13:40:52 +0100664 size, offset, color,
Chris Wilson625d9882017-01-11 11:23:11 +0000665 flags);
666 if (ret)
Chris Wilson28507482019-10-04 14:39:58 +0100667 return ret;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200668 } else {
Matthew Auld74642842017-10-06 23:18:20 +0100669 /*
670 * We only support huge gtt pages through the 48b PPGTT,
671 * however we also don't want to force any alignment for
672 * objects which need to be tightly packed into the low 32bits.
673 *
674 * Note that we assume that GGTT are limited to 4GiB for the
675 * forseeable future. See also i915_ggtt_offset().
676 */
677 if (upper_32_bits(end - 1) &&
678 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
Matthew Auld855822b2017-10-06 23:18:21 +0100679 /*
680 * We can't mix 64K and 4K PTEs in the same page-table
681 * (2M block), and so to avoid the ugliness and
682 * complexity of coloring we opt for just aligning 64K
683 * objects to 2M.
684 */
Matthew Auld74642842017-10-06 23:18:20 +0100685 u64 page_alignment =
Matthew Auld855822b2017-10-06 23:18:21 +0100686 rounddown_pow_of_two(vma->page_sizes.sg |
687 I915_GTT_PAGE_SIZE_2M);
Matthew Auld74642842017-10-06 23:18:20 +0100688
Chris Wilsonbef27bdb2017-10-09 10:20:19 +0100689 /*
690 * Check we don't expand for the limited Global GTT
691 * (mappable aperture is even more precious!). This
692 * also checks that we exclude the aliasing-ppgtt.
693 */
694 GEM_BUG_ON(i915_vma_is_ggtt(vma));
695
Matthew Auld74642842017-10-06 23:18:20 +0100696 alignment = max(alignment, page_alignment);
Matthew Auld855822b2017-10-06 23:18:21 +0100697
698 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
699 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
Matthew Auld74642842017-10-06 23:18:20 +0100700 }
701
Chris Wilsone007b192017-01-11 11:23:10 +0000702 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
Matthew Auld33dd8892019-09-09 13:40:52 +0100703 size, alignment, color,
Chris Wilsone007b192017-01-11 11:23:10 +0000704 start, end, flags);
705 if (ret)
Chris Wilson28507482019-10-04 14:39:58 +0100706 return ret;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200707
708 GEM_BUG_ON(vma->node.start < start);
709 GEM_BUG_ON(vma->node.start + vma->node.size > end);
710 }
Chris Wilson44a0ec02017-01-19 19:26:58 +0000711 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Matthew Auld33dd8892019-09-09 13:40:52 +0100712 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200713
Chris Wilson520ea7c2018-06-07 16:40:45 +0100714 if (vma->obj) {
Chris Wilsondde01d92019-10-30 19:21:49 +0000715 struct drm_i915_gem_object *obj = vma->obj;
716
717 atomic_inc(&obj->bind_count);
718 assert_bind_count(obj);
Chris Wilson520ea7c2018-06-07 16:40:45 +0100719 }
Chris Wilsondde01d92019-10-30 19:21:49 +0000720 list_add_tail(&vma->vm_link, &vma->vm->bound_list);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200721
722 return 0;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200723}
724
Chris Wilson31c7eff2017-02-27 12:26:54 +0000725static void
Chris Wilsondde01d92019-10-30 19:21:49 +0000726i915_vma_detach(struct i915_vma *vma)
Chris Wilson31c7eff2017-02-27 12:26:54 +0000727{
Chris Wilson31c7eff2017-02-27 12:26:54 +0000728 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Chris Wilson4dd2fbb2019-09-11 10:02:43 +0100729 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
Chris Wilson31c7eff2017-02-27 12:26:54 +0000730
Chris Wilson520ea7c2018-06-07 16:40:45 +0100731 /*
Chris Wilsondde01d92019-10-30 19:21:49 +0000732 * And finally now the object is completely decoupled from this
733 * vma, we can drop its hold on the backing storage and allow
734 * it to be reaped by the shrinker.
Chris Wilson31c7eff2017-02-27 12:26:54 +0000735 */
Chris Wilsondde01d92019-10-30 19:21:49 +0000736 list_del(&vma->vm_link);
Chris Wilson520ea7c2018-06-07 16:40:45 +0100737 if (vma->obj) {
738 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilson31c7eff2017-02-27 12:26:54 +0000739
Chris Wilson520ea7c2018-06-07 16:40:45 +0100740 assert_bind_count(obj);
Chris Wilsondde01d92019-10-30 19:21:49 +0000741 atomic_dec(&obj->bind_count);
Chris Wilson520ea7c2018-06-07 16:40:45 +0100742 }
Chris Wilson31c7eff2017-02-27 12:26:54 +0000743}
744
Chris Wilson28507482019-10-04 14:39:58 +0100745static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200746{
Chris Wilson28507482019-10-04 14:39:58 +0100747 unsigned int bound;
748 bool pinned = true;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200749
Chris Wilson28507482019-10-04 14:39:58 +0100750 bound = atomic_read(&vma->flags);
751 do {
752 if (unlikely(flags & ~bound))
753 return false;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200754
Chris Wilson28507482019-10-04 14:39:58 +0100755 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
756 return false;
757
758 if (!(bound & I915_VMA_PIN_MASK))
759 goto unpinned;
760
761 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
762 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
763
764 return true;
765
766unpinned:
767 /*
768 * If pin_count==0, but we are bound, check under the lock to avoid
769 * racing with a concurrent i915_vma_unbind().
770 */
771 mutex_lock(&vma->vm->mutex);
772 do {
773 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
774 pinned = false;
775 break;
776 }
777
778 if (unlikely(flags & ~bound)) {
779 pinned = false;
780 break;
781 }
782 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
783 mutex_unlock(&vma->vm->mutex);
784
785 return pinned;
786}
787
788static int vma_get_pages(struct i915_vma *vma)
789{
790 int err = 0;
791
792 if (atomic_add_unless(&vma->pages_count, 1, 0))
793 return 0;
794
795 /* Allocations ahoy! */
796 if (mutex_lock_interruptible(&vma->pages_mutex))
797 return -EINTR;
798
799 if (!atomic_read(&vma->pages_count)) {
800 if (vma->obj) {
801 err = i915_gem_object_pin_pages(vma->obj);
802 if (err)
803 goto unlock;
804 }
805
806 err = vma->ops->set_pages(vma);
Chris Wilson56184a22019-10-15 10:39:15 +0100807 if (err) {
808 if (vma->obj)
809 i915_gem_object_unpin_pages(vma->obj);
Chris Wilson28507482019-10-04 14:39:58 +0100810 goto unlock;
Chris Wilson56184a22019-10-15 10:39:15 +0100811 }
Chris Wilson28507482019-10-04 14:39:58 +0100812 }
813 atomic_inc(&vma->pages_count);
814
815unlock:
816 mutex_unlock(&vma->pages_mutex);
817
818 return err;
819}
820
821static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
822{
823 /* We allocate under vma_get_pages, so beware the shrinker */
824 mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
825 GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
826 if (atomic_sub_return(count, &vma->pages_count) == 0) {
827 vma->ops->clear_pages(vma);
828 GEM_BUG_ON(vma->pages);
829 if (vma->obj)
830 i915_gem_object_unpin_pages(vma->obj);
831 }
832 mutex_unlock(&vma->pages_mutex);
833}
834
835static void vma_put_pages(struct i915_vma *vma)
836{
837 if (atomic_add_unless(&vma->pages_count, -1, 1))
838 return;
839
840 __vma_put_pages(vma, 1);
841}
842
843static void vma_unbind_pages(struct i915_vma *vma)
844{
845 unsigned int count;
846
847 lockdep_assert_held(&vma->vm->mutex);
848
849 /* The upper portion of pages_count is the number of bindings */
850 count = atomic_read(&vma->pages_count);
851 count >>= I915_VMA_PAGES_BIAS;
852 GEM_BUG_ON(!count);
853
854 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
855}
856
857int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
858{
859 struct i915_vma_work *work = NULL;
860 unsigned int bound;
861 int err;
862
863 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
864 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
865
866 GEM_BUG_ON(flags & PIN_UPDATE);
867 GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
868
869 /* First try and grab the pin without rebinding the vma */
870 if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
871 return 0;
872
873 err = vma_get_pages(vma);
874 if (err)
875 return err;
876
877 if (flags & vma->vm->bind_async_flags) {
878 work = i915_vma_work();
879 if (!work) {
880 err = -ENOMEM;
881 goto err_pages;
882 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200883 }
884
Chris Wilson28507482019-10-04 14:39:58 +0100885 /* No more allocations allowed once we hold vm->mutex */
886 err = mutex_lock_interruptible(&vma->vm->mutex);
887 if (err)
888 goto err_fence;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200889
Chris Wilson28507482019-10-04 14:39:58 +0100890 bound = atomic_read(&vma->flags);
891 if (unlikely(bound & I915_VMA_ERROR)) {
892 err = -ENOMEM;
893 goto err_unlock;
894 }
895
896 if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
897 err = -EAGAIN; /* pins are meant to be fairly temporary */
898 goto err_unlock;
899 }
900
901 if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
902 __i915_vma_pin(vma);
903 goto err_unlock;
904 }
905
906 err = i915_active_acquire(&vma->active);
907 if (err)
908 goto err_unlock;
909
910 if (!(bound & I915_VMA_BIND_MASK)) {
911 err = i915_vma_insert(vma, size, alignment, flags);
912 if (err)
913 goto err_active;
914
915 if (i915_is_ggtt(vma->vm))
916 __i915_vma_set_map_and_fenceable(vma);
917 }
918
919 GEM_BUG_ON(!vma->pages);
920 err = i915_vma_bind(vma,
921 vma->obj ? vma->obj->cache_level : 0,
922 flags, work);
923 if (err)
Chris Wilson31c7eff2017-02-27 12:26:54 +0000924 goto err_remove;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200925
Chris Wilson28507482019-10-04 14:39:58 +0100926 /* There should only be at most 2 active bindings (user, global) */
927 GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
928 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
929 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
Chris Wilsond36caee2017-11-05 12:45:50 +0000930
Chris Wilson28507482019-10-04 14:39:58 +0100931 __i915_vma_pin(vma);
932 GEM_BUG_ON(!i915_vma_is_pinned(vma));
933 GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200934 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200935
Chris Wilson31c7eff2017-02-27 12:26:54 +0000936err_remove:
Chris Wilsondde01d92019-10-30 19:21:49 +0000937 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
938 i915_vma_detach(vma);
939 drm_mm_remove_node(&vma->node);
940 }
Chris Wilson28507482019-10-04 14:39:58 +0100941err_active:
942 i915_active_release(&vma->active);
943err_unlock:
944 mutex_unlock(&vma->vm->mutex);
945err_fence:
946 if (work)
947 dma_fence_work_commit(&work->base);
948err_pages:
949 vma_put_pages(vma);
950 return err;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200951}
952
Chris Wilsonccd20942019-12-05 11:37:25 +0000953static void flush_idle_contexts(struct intel_gt *gt)
954{
955 struct intel_engine_cs *engine;
956 enum intel_engine_id id;
957
958 for_each_engine(engine, gt, id)
959 intel_engine_flush_barriers(engine);
960
961 intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
962}
963
964int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
965{
966 struct i915_address_space *vm = vma->vm;
967 int err;
968
969 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
970
971 do {
972 err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
973 if (err != -ENOSPC)
974 return err;
975
976 /* Unlike i915_vma_pin, we don't take no for an answer! */
977 flush_idle_contexts(vm->gt);
978 if (mutex_lock_interruptible(&vm->mutex) == 0) {
979 i915_gem_evict_vm(vm);
980 mutex_unlock(&vm->mutex);
981 }
982 } while (1);
983}
984
Chris Wilson3365e222018-05-03 20:51:14 +0100985void i915_vma_close(struct i915_vma *vma)
986{
Chris Wilson71e51ca2019-10-21 19:32:35 +0100987 struct intel_gt *gt = vma->vm->gt;
Chris Wilson155ab882019-06-06 12:23:20 +0100988 unsigned long flags;
Chris Wilson3365e222018-05-03 20:51:14 +0100989
990 GEM_BUG_ON(i915_vma_is_closed(vma));
Chris Wilson3365e222018-05-03 20:51:14 +0100991
992 /*
993 * We defer actually closing, unbinding and destroying the VMA until
994 * the next idle point, or if the object is freed in the meantime. By
995 * postponing the unbind, we allow for it to be resurrected by the
996 * client, avoiding the work required to rebind the VMA. This is
997 * advantageous for DRI, where the client/server pass objects
998 * between themselves, temporarily opening a local VMA to the
999 * object, and then closing it again. The same object is then reused
1000 * on the next frame (or two, depending on the depth of the swap queue)
1001 * causing us to rebind the VMA once more. This ends up being a lot
1002 * of wasted work for the steady state.
1003 */
Chris Wilson71e51ca2019-10-21 19:32:35 +01001004 spin_lock_irqsave(&gt->closed_lock, flags);
1005 list_add(&vma->closed_link, &gt->closed_vma);
1006 spin_unlock_irqrestore(&gt->closed_lock, flags);
Chris Wilson155ab882019-06-06 12:23:20 +01001007}
1008
1009static void __i915_vma_remove_closed(struct i915_vma *vma)
1010{
Chris Wilson71e51ca2019-10-21 19:32:35 +01001011 struct intel_gt *gt = vma->vm->gt;
Chris Wilson155ab882019-06-06 12:23:20 +01001012
Chris Wilson71e51ca2019-10-21 19:32:35 +01001013 spin_lock_irq(&gt->closed_lock);
Chris Wilson155ab882019-06-06 12:23:20 +01001014 list_del_init(&vma->closed_link);
Chris Wilson71e51ca2019-10-21 19:32:35 +01001015 spin_unlock_irq(&gt->closed_lock);
Chris Wilson3365e222018-05-03 20:51:14 +01001016}
1017
1018void i915_vma_reopen(struct i915_vma *vma)
1019{
Chris Wilson28507482019-10-04 14:39:58 +01001020 if (i915_vma_is_closed(vma))
1021 __i915_vma_remove_closed(vma);
Chris Wilson3365e222018-05-03 20:51:14 +01001022}
1023
Chris Wilson28507482019-10-04 14:39:58 +01001024void i915_vma_destroy(struct i915_vma *vma)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001025{
Chris Wilson28507482019-10-04 14:39:58 +01001026 if (drm_mm_node_allocated(&vma->node)) {
1027 mutex_lock(&vma->vm->mutex);
1028 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1029 WARN_ON(__i915_vma_unbind(vma));
1030 mutex_unlock(&vma->vm->mutex);
1031 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1032 }
1033 GEM_BUG_ON(i915_vma_is_active(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001034
Chris Wilson528cbd12019-01-28 10:23:54 +00001035 if (vma->obj) {
1036 struct drm_i915_gem_object *obj = vma->obj;
1037
1038 spin_lock(&obj->vma.lock);
1039 list_del(&vma->obj_link);
Chris Wilson28507482019-10-04 14:39:58 +01001040 rb_erase(&vma->obj_node, &obj->vma.tree);
Chris Wilson528cbd12019-01-28 10:23:54 +00001041 spin_unlock(&obj->vma.lock);
1042 }
Chris Wilson010e3e62017-12-06 12:49:13 +00001043
Chris Wilson155ab882019-06-06 12:23:20 +01001044 __i915_vma_remove_closed(vma);
Chris Wilson28507482019-10-04 14:39:58 +01001045 i915_vm_put(vma->vm);
Chris Wilson3365e222018-05-03 20:51:14 +01001046
Chris Wilson28507482019-10-04 14:39:58 +01001047 i915_active_fini(&vma->active);
1048 i915_vma_free(vma);
Chris Wilson3365e222018-05-03 20:51:14 +01001049}
1050
Chris Wilson71e51ca2019-10-21 19:32:35 +01001051void i915_vma_parked(struct intel_gt *gt)
Chris Wilson3365e222018-05-03 20:51:14 +01001052{
1053 struct i915_vma *vma, *next;
1054
Chris Wilson71e51ca2019-10-21 19:32:35 +01001055 spin_lock_irq(&gt->closed_lock);
1056 list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
Chris Wilson28507482019-10-04 14:39:58 +01001057 struct drm_i915_gem_object *obj = vma->obj;
1058 struct i915_address_space *vm = vma->vm;
1059
1060 /* XXX All to avoid keeping a reference on i915_vma itself */
1061
1062 if (!kref_get_unless_zero(&obj->base.refcount))
1063 continue;
1064
Chris Wilson77853182019-12-05 21:41:59 +00001065 if (i915_vm_tryopen(vm)) {
1066 list_del_init(&vma->closed_link);
1067 } else {
Chris Wilson28507482019-10-04 14:39:58 +01001068 i915_gem_object_put(obj);
1069 obj = NULL;
1070 }
1071
Chris Wilson71e51ca2019-10-21 19:32:35 +01001072 spin_unlock_irq(&gt->closed_lock);
Chris Wilson3365e222018-05-03 20:51:14 +01001073
Chris Wilson28507482019-10-04 14:39:58 +01001074 if (obj) {
1075 i915_vma_destroy(vma);
1076 i915_gem_object_put(obj);
1077 }
Chris Wilson155ab882019-06-06 12:23:20 +01001078
Chris Wilson28507482019-10-04 14:39:58 +01001079 i915_vm_close(vm);
1080
1081 /* Restart after dropping lock */
Chris Wilson71e51ca2019-10-21 19:32:35 +01001082 spin_lock_irq(&gt->closed_lock);
1083 next = list_first_entry(&gt->closed_vma,
Chris Wilson28507482019-10-04 14:39:58 +01001084 typeof(*next), closed_link);
Chris Wilson155ab882019-06-06 12:23:20 +01001085 }
Chris Wilson71e51ca2019-10-21 19:32:35 +01001086 spin_unlock_irq(&gt->closed_lock);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001087}
1088
1089static void __i915_vma_iounmap(struct i915_vma *vma)
1090{
1091 GEM_BUG_ON(i915_vma_is_pinned(vma));
1092
1093 if (vma->iomap == NULL)
1094 return;
1095
1096 io_mapping_unmap(vma->iomap);
1097 vma->iomap = NULL;
1098}
1099
Chris Wilsona65adaf2017-10-09 09:43:57 +01001100void i915_vma_revoke_mmap(struct i915_vma *vma)
1101{
Abdiel Janulguecc662122019-12-04 12:00:32 +00001102 struct drm_vma_offset_node *node;
Chris Wilsona65adaf2017-10-09 09:43:57 +01001103 u64 vma_offset;
1104
Chris Wilsona65adaf2017-10-09 09:43:57 +01001105 if (!i915_vma_has_userfault(vma))
1106 return;
1107
1108 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1109 GEM_BUG_ON(!vma->obj->userfault_count);
1110
Abdiel Janulguecc662122019-12-04 12:00:32 +00001111 node = &vma->mmo->vma_node;
Chris Wilsona65adaf2017-10-09 09:43:57 +01001112 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
1113 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1114 drm_vma_node_offset_addr(node) + vma_offset,
1115 vma->size,
1116 1);
1117
1118 i915_vma_unset_userfault(vma);
1119 if (!--vma->obj->userfault_count)
1120 list_del(&vma->obj->userfault_link);
1121}
1122
Chris Wilson28507482019-10-04 14:39:58 +01001123int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1124{
1125 int err;
1126
1127 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1128
1129 /* Wait for the vma to be bound before we start! */
1130 err = i915_request_await_active(rq, &vma->active);
1131 if (err)
1132 return err;
1133
1134 return i915_active_add_request(&vma->active, rq);
1135}
1136
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001137int i915_vma_move_to_active(struct i915_vma *vma,
1138 struct i915_request *rq,
1139 unsigned int flags)
1140{
1141 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilsona93615f2019-06-21 19:37:59 +01001142 int err;
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001143
Chris Wilson6951e582019-05-28 10:29:51 +01001144 assert_object_held(obj);
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001145
Chris Wilson28507482019-10-04 14:39:58 +01001146 err = __i915_vma_move_to_active(vma, rq);
Chris Wilsona93615f2019-06-21 19:37:59 +01001147 if (unlikely(err))
1148 return err;
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001149
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001150 if (flags & EXEC_OBJECT_WRITE) {
Chris Wilsonda421042019-12-18 10:40:43 +00001151 struct intel_frontbuffer *front;
1152
1153 front = __intel_frontbuffer_get(obj);
1154 if (unlikely(front)) {
1155 if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1156 i915_active_add_request(&front->write, rq);
1157 intel_frontbuffer_put(front);
1158 }
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001159
Rodrigo Vivi829e8de2019-08-21 22:47:35 -07001160 dma_resv_add_excl_fence(vma->resv, &rq->fence);
Chris Wilsoncd2a4ea2019-07-30 21:58:05 +01001161 obj->write_domain = I915_GEM_DOMAIN_RENDER;
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001162 obj->read_domains = 0;
Chris Wilsoncd2a4ea2019-07-30 21:58:05 +01001163 } else {
Rodrigo Vivi829e8de2019-08-21 22:47:35 -07001164 err = dma_resv_reserve_shared(vma->resv, 1);
Chris Wilsoncd2a4ea2019-07-30 21:58:05 +01001165 if (unlikely(err))
1166 return err;
1167
Rodrigo Vivi829e8de2019-08-21 22:47:35 -07001168 dma_resv_add_shared_fence(vma->resv, &rq->fence);
Chris Wilsoncd2a4ea2019-07-30 21:58:05 +01001169 obj->write_domain = 0;
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001170 }
1171 obj->read_domains |= I915_GEM_GPU_DOMAINS;
Chris Wilsona93615f2019-06-21 19:37:59 +01001172 obj->mm.dirty = true;
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001173
Chris Wilsona93615f2019-06-21 19:37:59 +01001174 GEM_BUG_ON(!i915_vma_is_active(vma));
Chris Wilsone6bb1d72018-07-06 11:39:45 +01001175 return 0;
1176}
1177
Chris Wilson28507482019-10-04 14:39:58 +01001178int __i915_vma_unbind(struct i915_vma *vma)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001179{
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001180 int ret;
1181
Chris Wilson28507482019-10-04 14:39:58 +01001182 lockdep_assert_held(&vma->vm->mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001183
Chris Wilson520ea7c2018-06-07 16:40:45 +01001184 /*
1185 * First wait upon any activity as retiring the request may
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001186 * have side-effects such as unpinning or even unbinding this vma.
Chris Wilson28507482019-10-04 14:39:58 +01001187 *
1188 * XXX Actually waiting under the vm->mutex is a hinderance and
1189 * should be pipelined wherever possible. In cases where that is
1190 * unavoidable, we should lift the wait to before the mutex.
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001191 */
Chris Wilson28507482019-10-04 14:39:58 +01001192 ret = i915_vma_sync(vma);
1193 if (ret)
1194 return ret;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001195
Chris Wilsonb1e31772019-10-04 14:40:00 +01001196 GEM_BUG_ON(i915_vma_is_active(vma));
Chris Wilson10195b12018-06-28 14:22:06 +01001197 if (i915_vma_is_pinned(vma)) {
1198 vma_print_allocator(vma, "is pinned");
Chris Wilsond3e48352019-12-08 16:12:52 +00001199 return -EAGAIN;
Chris Wilson10195b12018-06-28 14:22:06 +01001200 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001201
Chris Wilson274cbf22019-10-04 14:39:59 +01001202 GEM_BUG_ON(i915_vma_is_active(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001203 if (!drm_mm_node_allocated(&vma->node))
Chris Wilson3365e222018-05-03 20:51:14 +01001204 return 0;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001205
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001206 if (i915_vma_is_map_and_fenceable(vma)) {
Chris Wilson7125397b2017-12-06 12:49:14 +00001207 /*
1208 * Check that we have flushed all writes through the GGTT
1209 * before the unbind, other due to non-strict nature of those
1210 * indirect writes they may end up referencing the GGTT PTE
1211 * after the unbind.
1212 */
1213 i915_vma_flush_writes(vma);
1214 GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
1215
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001216 /* release the fence reg _after_ flushing */
Chris Wilson1f7fd482019-08-22 07:15:57 +01001217 ret = i915_vma_revoke_fence(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001218 if (ret)
1219 return ret;
1220
1221 /* Force a pagefault for domain tracking on next user access */
Chris Wilsona65adaf2017-10-09 09:43:57 +01001222 i915_vma_revoke_mmap(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001223
1224 __i915_vma_iounmap(vma);
Chris Wilson4dd2fbb2019-09-11 10:02:43 +01001225 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001226 }
Chris Wilsona65adaf2017-10-09 09:43:57 +01001227 GEM_BUG_ON(vma->fence);
1228 GEM_BUG_ON(i915_vma_has_userfault(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001229
Chris Wilson28507482019-10-04 14:39:58 +01001230 if (likely(atomic_read(&vma->vm->open))) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001231 trace_i915_vma_unbind(vma);
Chris Wilson93f2cde2018-06-07 16:40:46 +01001232 vma->ops->unbind_vma(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001233 }
Chris Wilson28507482019-10-04 14:39:58 +01001234 atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR), &vma->flags);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001235
Chris Wilsondde01d92019-10-30 19:21:49 +00001236 i915_vma_detach(vma);
Chris Wilson28507482019-10-04 14:39:58 +01001237 vma_unbind_pages(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001238
Chris Wilsondde01d92019-10-30 19:21:49 +00001239 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_destroy() */
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001240 return 0;
1241}
1242
Chris Wilson28507482019-10-04 14:39:58 +01001243int i915_vma_unbind(struct i915_vma *vma)
1244{
1245 struct i915_address_space *vm = vma->vm;
1246 int err;
1247
1248 err = mutex_lock_interruptible(&vm->mutex);
1249 if (err)
1250 return err;
1251
1252 err = __i915_vma_unbind(vma);
1253 mutex_unlock(&vm->mutex);
1254
1255 return err;
1256}
1257
Chris Wilson1aff1902019-08-02 22:21:36 +01001258struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
1259{
1260 i915_gem_object_make_unshrinkable(vma->obj);
1261 return vma;
1262}
1263
1264void i915_vma_make_shrinkable(struct i915_vma *vma)
1265{
1266 i915_gem_object_make_shrinkable(vma->obj);
1267}
1268
1269void i915_vma_make_purgeable(struct i915_vma *vma)
1270{
1271 i915_gem_object_make_purgeable(vma->obj);
1272}
1273
Chris Wilsone3c7a1c2017-02-13 17:15:45 +00001274#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1275#include "selftests/i915_vma.c"
1276#endif
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001277
Chris Wilson103b76ee2019-03-05 21:38:30 +00001278static void i915_global_vma_shrink(void)
1279{
1280 kmem_cache_shrink(global.slab_vmas);
1281}
1282
1283static void i915_global_vma_exit(void)
1284{
1285 kmem_cache_destroy(global.slab_vmas);
1286}
1287
1288static struct i915_global_vma global = { {
1289 .shrink = i915_global_vma_shrink,
1290 .exit = i915_global_vma_exit,
1291} };
1292
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001293int __init i915_global_vma_init(void)
1294{
1295 global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1296 if (!global.slab_vmas)
1297 return -ENOMEM;
1298
Chris Wilson103b76ee2019-03-05 21:38:30 +00001299 i915_global_register(&global.base);
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001300 return 0;
1301}