Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2016 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | */ |
Chris Wilson | 10195b1 | 2018-06-28 14:22:06 +0100 | [diff] [blame] | 24 | |
Chris Wilson | 0948007 | 2019-07-03 10:17:19 +0100 | [diff] [blame] | 25 | #include <linux/sched/mm.h> |
Jani Nikula | df0566a | 2019-06-13 11:44:16 +0300 | [diff] [blame] | 26 | #include <drm/drm_gem.h> |
Chris Wilson | 112ed2d | 2019-04-24 18:48:39 +0100 | [diff] [blame] | 27 | |
Jani Nikula | df0566a | 2019-06-13 11:44:16 +0300 | [diff] [blame] | 28 | #include "display/intel_frontbuffer.h" |
| 29 | |
Anusha Srivatsa | 4bc91db | 2021-04-27 09:54:16 +0100 | [diff] [blame] | 30 | #include "gem/i915_gem_lmem.h" |
Jani Nikula | df0566a | 2019-06-13 11:44:16 +0300 | [diff] [blame] | 31 | #include "gt/intel_engine.h" |
Chris Wilson | ccd2094 | 2019-12-05 11:37:25 +0000 | [diff] [blame] | 32 | #include "gt/intel_engine_heartbeat.h" |
Tvrtko Ursulin | a1c8a09 | 2019-06-21 08:08:01 +0100 | [diff] [blame] | 33 | #include "gt/intel_gt.h" |
Chris Wilson | ccd2094 | 2019-12-05 11:37:25 +0000 | [diff] [blame] | 34 | #include "gt/intel_gt_requests.h" |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 35 | |
| 36 | #include "i915_drv.h" |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 37 | #include "i915_sw_fence_work.h" |
Jani Nikula | a09d9a8 | 2019-08-06 13:07:28 +0300 | [diff] [blame] | 38 | #include "i915_trace.h" |
Jani Nikula | df0566a | 2019-06-13 11:44:16 +0300 | [diff] [blame] | 39 | #include "i915_vma.h" |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 40 | |
Daniel Vetter | 64fc7cc | 2021-07-27 14:10:35 +0200 | [diff] [blame] | 41 | static struct kmem_cache *slab_vmas; |
Chris Wilson | 13f1bfd | 2019-02-28 10:20:34 +0000 | [diff] [blame] | 42 | |
Maarten Lankhorst | e6e1a30 | 2021-11-17 14:20:22 +0000 | [diff] [blame] | 43 | static struct i915_vma *i915_vma_alloc(void) |
Chris Wilson | 13f1bfd | 2019-02-28 10:20:34 +0000 | [diff] [blame] | 44 | { |
Daniel Vetter | 64fc7cc | 2021-07-27 14:10:35 +0200 | [diff] [blame] | 45 | return kmem_cache_zalloc(slab_vmas, GFP_KERNEL); |
Chris Wilson | 13f1bfd | 2019-02-28 10:20:34 +0000 | [diff] [blame] | 46 | } |
| 47 | |
Maarten Lankhorst | e6e1a30 | 2021-11-17 14:20:22 +0000 | [diff] [blame] | 48 | static void i915_vma_free(struct i915_vma *vma) |
Chris Wilson | 13f1bfd | 2019-02-28 10:20:34 +0000 | [diff] [blame] | 49 | { |
Daniel Vetter | 64fc7cc | 2021-07-27 14:10:35 +0200 | [diff] [blame] | 50 | return kmem_cache_free(slab_vmas, vma); |
Chris Wilson | 13f1bfd | 2019-02-28 10:20:34 +0000 | [diff] [blame] | 51 | } |
| 52 | |
Chris Wilson | 1eca65d | 2018-07-06 07:53:06 +0100 | [diff] [blame] | 53 | #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM) |
Chris Wilson | 10195b1 | 2018-06-28 14:22:06 +0100 | [diff] [blame] | 54 | |
| 55 | #include <linux/stackdepot.h> |
| 56 | |
| 57 | static void vma_print_allocator(struct i915_vma *vma, const char *reason) |
| 58 | { |
Chris Wilson | 10195b1 | 2018-06-28 14:22:06 +0100 | [diff] [blame] | 59 | char buf[512]; |
| 60 | |
| 61 | if (!vma->node.stack) { |
| 62 | DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n", |
| 63 | vma->node.start, vma->node.size, reason); |
| 64 | return; |
| 65 | } |
| 66 | |
Imran Khan | 0f68d45 | 2021-11-08 18:33:16 -0800 | [diff] [blame] | 67 | stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0); |
Chris Wilson | 10195b1 | 2018-06-28 14:22:06 +0100 | [diff] [blame] | 68 | DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n", |
| 69 | vma->node.start, vma->node.size, reason, buf); |
| 70 | } |
| 71 | |
| 72 | #else |
| 73 | |
| 74 | static void vma_print_allocator(struct i915_vma *vma, const char *reason) |
| 75 | { |
| 76 | } |
| 77 | |
| 78 | #endif |
| 79 | |
Chris Wilson | 12c255b | 2019-06-21 19:38:00 +0100 | [diff] [blame] | 80 | static inline struct i915_vma *active_to_vma(struct i915_active *ref) |
| 81 | { |
| 82 | return container_of(ref, typeof(struct i915_vma), active); |
| 83 | } |
| 84 | |
| 85 | static int __i915_vma_active(struct i915_active *ref) |
| 86 | { |
Chris Wilson | 2833ddc | 2019-08-20 11:05:31 +0100 | [diff] [blame] | 87 | return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT; |
Chris Wilson | 12c255b | 2019-06-21 19:38:00 +0100 | [diff] [blame] | 88 | } |
| 89 | |
Chris Wilson | 64d6c50 | 2019-02-05 13:00:02 +0000 | [diff] [blame] | 90 | static void __i915_vma_retire(struct i915_active *ref) |
| 91 | { |
Chris Wilson | 12c255b | 2019-06-21 19:38:00 +0100 | [diff] [blame] | 92 | i915_vma_put(active_to_vma(ref)); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 93 | } |
| 94 | |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 95 | static struct i915_vma * |
Chris Wilson | a01cb37a | 2017-01-16 15:21:30 +0000 | [diff] [blame] | 96 | vma_create(struct drm_i915_gem_object *obj, |
| 97 | struct i915_address_space *vm, |
| 98 | const struct i915_ggtt_view *view) |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 99 | { |
Chris Wilson | 03fca66 | 2020-07-02 22:10:15 +0100 | [diff] [blame] | 100 | struct i915_vma *pos = ERR_PTR(-E2BIG); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 101 | struct i915_vma *vma; |
| 102 | struct rb_node *rb, **p; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 103 | |
Chris Wilson | e1cc3db | 2017-02-09 11:19:33 +0000 | [diff] [blame] | 104 | /* The aliasing_ppgtt should never be used directly! */ |
Chris Wilson | 71e51ca | 2019-10-21 19:32:35 +0100 | [diff] [blame] | 105 | GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm); |
Chris Wilson | e1cc3db | 2017-02-09 11:19:33 +0000 | [diff] [blame] | 106 | |
Chris Wilson | 13f1bfd | 2019-02-28 10:20:34 +0000 | [diff] [blame] | 107 | vma = i915_vma_alloc(); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 108 | if (vma == NULL) |
| 109 | return ERR_PTR(-ENOMEM); |
| 110 | |
Chris Wilson | 76f9764 | 2019-12-22 21:02:55 +0000 | [diff] [blame] | 111 | kref_init(&vma->ref); |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 112 | vma->vm = i915_vm_get(vm); |
Chris Wilson | 93f2cde | 2018-06-07 16:40:46 +0100 | [diff] [blame] | 113 | vma->ops = &vm->vma_ops; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 114 | vma->obj = obj; |
| 115 | vma->size = obj->base.size; |
Chris Wilson | f51455d | 2017-01-10 14:47:34 +0000 | [diff] [blame] | 116 | vma->display_alignment = I915_GTT_MIN_ALIGNMENT; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 117 | |
Matthew Auld | c3b1476 | 2021-05-04 17:41:36 +0100 | [diff] [blame] | 118 | i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0); |
Chris Wilson | 155ab88 | 2019-06-06 12:23:20 +0100 | [diff] [blame] | 119 | |
Chris Wilson | 0948007 | 2019-07-03 10:17:19 +0100 | [diff] [blame] | 120 | /* Declare ourselves safe for use inside shrinkers */ |
| 121 | if (IS_ENABLED(CONFIG_LOCKDEP)) { |
| 122 | fs_reclaim_acquire(GFP_KERNEL); |
| 123 | might_lock(&vma->active.mutex); |
| 124 | fs_reclaim_release(GFP_KERNEL); |
| 125 | } |
| 126 | |
Chris Wilson | 155ab88 | 2019-06-06 12:23:20 +0100 | [diff] [blame] | 127 | INIT_LIST_HEAD(&vma->closed_link); |
| 128 | |
Chris Wilson | 7c51846 | 2017-01-23 14:52:45 +0000 | [diff] [blame] | 129 | if (view && view->type != I915_GGTT_VIEW_NORMAL) { |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 130 | vma->ggtt_view = *view; |
| 131 | if (view->type == I915_GGTT_VIEW_PARTIAL) { |
Chris Wilson | 07e19ea | 2016-12-23 14:57:59 +0000 | [diff] [blame] | 132 | GEM_BUG_ON(range_overflows_t(u64, |
Chris Wilson | 8bab1193 | 2017-01-14 00:28:25 +0000 | [diff] [blame] | 133 | view->partial.offset, |
| 134 | view->partial.size, |
Chris Wilson | 07e19ea | 2016-12-23 14:57:59 +0000 | [diff] [blame] | 135 | obj->base.size >> PAGE_SHIFT)); |
Chris Wilson | 8bab1193 | 2017-01-14 00:28:25 +0000 | [diff] [blame] | 136 | vma->size = view->partial.size; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 137 | vma->size <<= PAGE_SHIFT; |
Chris Wilson | 7e7367d | 2018-06-30 10:05:09 +0100 | [diff] [blame] | 138 | GEM_BUG_ON(vma->size > obj->base.size); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 139 | } else if (view->type == I915_GGTT_VIEW_ROTATED) { |
Chris Wilson | 8bab1193 | 2017-01-14 00:28:25 +0000 | [diff] [blame] | 140 | vma->size = intel_rotation_info_size(&view->rotated); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 141 | vma->size <<= PAGE_SHIFT; |
Ville Syrjälä | 1a74fc0 | 2019-05-09 15:21:52 +0300 | [diff] [blame] | 142 | } else if (view->type == I915_GGTT_VIEW_REMAPPED) { |
| 143 | vma->size = intel_remapped_info_size(&view->remapped); |
| 144 | vma->size <<= PAGE_SHIFT; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 145 | } |
| 146 | } |
| 147 | |
Chris Wilson | 1fcdaa7 | 2017-01-19 19:26:56 +0000 | [diff] [blame] | 148 | if (unlikely(vma->size > vm->total)) |
| 149 | goto err_vma; |
| 150 | |
Chris Wilson | b00ddb2 | 2017-01-19 19:26:59 +0000 | [diff] [blame] | 151 | GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); |
| 152 | |
Chris Wilson | cb593e5 | 2020-04-22 08:28:05 +0100 | [diff] [blame] | 153 | spin_lock(&obj->vma.lock); |
| 154 | |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 155 | if (i915_is_ggtt(vm)) { |
Chris Wilson | 1fcdaa7 | 2017-01-19 19:26:56 +0000 | [diff] [blame] | 156 | if (unlikely(overflows_type(vma->size, u32))) |
Chris Wilson | cb593e5 | 2020-04-22 08:28:05 +0100 | [diff] [blame] | 157 | goto err_unlock; |
Chris Wilson | 1fcdaa7 | 2017-01-19 19:26:56 +0000 | [diff] [blame] | 158 | |
Chris Wilson | 91d4e0aa | 2017-01-09 16:16:13 +0000 | [diff] [blame] | 159 | vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, |
| 160 | i915_gem_object_get_tiling(obj), |
| 161 | i915_gem_object_get_stride(obj)); |
Chris Wilson | 1fcdaa7 | 2017-01-19 19:26:56 +0000 | [diff] [blame] | 162 | if (unlikely(vma->fence_size < vma->size || /* overflow */ |
| 163 | vma->fence_size > vm->total)) |
Chris Wilson | cb593e5 | 2020-04-22 08:28:05 +0100 | [diff] [blame] | 164 | goto err_unlock; |
Chris Wilson | 1fcdaa7 | 2017-01-19 19:26:56 +0000 | [diff] [blame] | 165 | |
Chris Wilson | f51455d | 2017-01-10 14:47:34 +0000 | [diff] [blame] | 166 | GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); |
Chris Wilson | 944397f | 2017-01-09 16:16:11 +0000 | [diff] [blame] | 167 | |
Chris Wilson | 91d4e0aa | 2017-01-09 16:16:13 +0000 | [diff] [blame] | 168 | vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size, |
| 169 | i915_gem_object_get_tiling(obj), |
| 170 | i915_gem_object_get_stride(obj)); |
Chris Wilson | 944397f | 2017-01-09 16:16:11 +0000 | [diff] [blame] | 171 | GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); |
| 172 | |
Chris Wilson | 4dd2fbb | 2019-09-11 10:02:43 +0100 | [diff] [blame] | 173 | __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); |
Chris Wilson | 528cbd1 | 2019-01-28 10:23:54 +0000 | [diff] [blame] | 174 | } |
| 175 | |
Chris Wilson | 528cbd1 | 2019-01-28 10:23:54 +0000 | [diff] [blame] | 176 | rb = NULL; |
| 177 | p = &obj->vma.tree.rb_node; |
| 178 | while (*p) { |
Chris Wilson | 528cbd1 | 2019-01-28 10:23:54 +0000 | [diff] [blame] | 179 | long cmp; |
| 180 | |
| 181 | rb = *p; |
| 182 | pos = rb_entry(rb, struct i915_vma, obj_node); |
| 183 | |
| 184 | /* |
| 185 | * If the view already exists in the tree, another thread |
| 186 | * already created a matching vma, so return the older instance |
| 187 | * and dispose of ours. |
| 188 | */ |
| 189 | cmp = i915_vma_compare(pos, vm, view); |
Chris Wilson | 528cbd1 | 2019-01-28 10:23:54 +0000 | [diff] [blame] | 190 | if (cmp < 0) |
| 191 | p = &rb->rb_right; |
Chris Wilson | 03fca66 | 2020-07-02 22:10:15 +0100 | [diff] [blame] | 192 | else if (cmp > 0) |
Chris Wilson | 528cbd1 | 2019-01-28 10:23:54 +0000 | [diff] [blame] | 193 | p = &rb->rb_left; |
Chris Wilson | 03fca66 | 2020-07-02 22:10:15 +0100 | [diff] [blame] | 194 | else |
| 195 | goto err_unlock; |
Chris Wilson | 528cbd1 | 2019-01-28 10:23:54 +0000 | [diff] [blame] | 196 | } |
| 197 | rb_link_node(&vma->obj_node, rb, p); |
| 198 | rb_insert_color(&vma->obj_node, &obj->vma.tree); |
| 199 | |
| 200 | if (i915_vma_is_ggtt(vma)) |
Chris Wilson | e2189dd | 2017-12-07 21:14:07 +0000 | [diff] [blame] | 201 | /* |
| 202 | * We put the GGTT vma at the start of the vma-list, followed |
| 203 | * by the ppGGTT vma. This allows us to break early when |
| 204 | * iterating over only the GGTT vma for an object, see |
| 205 | * for_each_ggtt_vma() |
| 206 | */ |
Chris Wilson | 528cbd1 | 2019-01-28 10:23:54 +0000 | [diff] [blame] | 207 | list_add(&vma->obj_link, &obj->vma.list); |
| 208 | else |
| 209 | list_add_tail(&vma->obj_link, &obj->vma.list); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 210 | |
Chris Wilson | 528cbd1 | 2019-01-28 10:23:54 +0000 | [diff] [blame] | 211 | spin_unlock(&obj->vma.lock); |
Chris Wilson | 09d7e46 | 2019-01-28 10:23:53 +0000 | [diff] [blame] | 212 | |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 213 | return vma; |
Chris Wilson | 1fcdaa7 | 2017-01-19 19:26:56 +0000 | [diff] [blame] | 214 | |
Chris Wilson | cb593e5 | 2020-04-22 08:28:05 +0100 | [diff] [blame] | 215 | err_unlock: |
| 216 | spin_unlock(&obj->vma.lock); |
Chris Wilson | 1fcdaa7 | 2017-01-19 19:26:56 +0000 | [diff] [blame] | 217 | err_vma: |
Chris Wilson | 03fca66 | 2020-07-02 22:10:15 +0100 | [diff] [blame] | 218 | i915_vm_put(vm); |
Chris Wilson | 13f1bfd | 2019-02-28 10:20:34 +0000 | [diff] [blame] | 219 | i915_vma_free(vma); |
Chris Wilson | 03fca66 | 2020-07-02 22:10:15 +0100 | [diff] [blame] | 220 | return pos; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 221 | } |
| 222 | |
Chris Wilson | 481a6f7 | 2017-01-16 15:21:31 +0000 | [diff] [blame] | 223 | static struct i915_vma * |
Liam Howlett | 547be6a | 2021-03-23 13:42:21 +0000 | [diff] [blame] | 224 | i915_vma_lookup(struct drm_i915_gem_object *obj, |
Chris Wilson | 481a6f7 | 2017-01-16 15:21:31 +0000 | [diff] [blame] | 225 | struct i915_address_space *vm, |
| 226 | const struct i915_ggtt_view *view) |
Chris Wilson | 718659a | 2017-01-16 15:21:28 +0000 | [diff] [blame] | 227 | { |
| 228 | struct rb_node *rb; |
| 229 | |
Chris Wilson | 528cbd1 | 2019-01-28 10:23:54 +0000 | [diff] [blame] | 230 | rb = obj->vma.tree.rb_node; |
Chris Wilson | 718659a | 2017-01-16 15:21:28 +0000 | [diff] [blame] | 231 | while (rb) { |
| 232 | struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node); |
| 233 | long cmp; |
| 234 | |
| 235 | cmp = i915_vma_compare(vma, vm, view); |
| 236 | if (cmp == 0) |
| 237 | return vma; |
| 238 | |
| 239 | if (cmp < 0) |
| 240 | rb = rb->rb_right; |
| 241 | else |
| 242 | rb = rb->rb_left; |
| 243 | } |
| 244 | |
| 245 | return NULL; |
| 246 | } |
| 247 | |
| 248 | /** |
Chris Wilson | 718659a | 2017-01-16 15:21:28 +0000 | [diff] [blame] | 249 | * i915_vma_instance - return the singleton instance of the VMA |
| 250 | * @obj: parent &struct drm_i915_gem_object to be mapped |
| 251 | * @vm: address space in which the mapping is located |
| 252 | * @view: additional mapping requirements |
| 253 | * |
| 254 | * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with |
| 255 | * the same @view characteristics. If a match is not found, one is created. |
| 256 | * Once created, the VMA is kept until either the object is freed, or the |
| 257 | * address space is closed. |
| 258 | * |
Chris Wilson | 718659a | 2017-01-16 15:21:28 +0000 | [diff] [blame] | 259 | * Returns the vma, or an error pointer. |
| 260 | */ |
| 261 | struct i915_vma * |
| 262 | i915_vma_instance(struct drm_i915_gem_object *obj, |
| 263 | struct i915_address_space *vm, |
| 264 | const struct i915_ggtt_view *view) |
| 265 | { |
| 266 | struct i915_vma *vma; |
| 267 | |
Imre Deak | 74862d4 | 2021-05-24 20:27:02 +0300 | [diff] [blame] | 268 | GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm)); |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 269 | GEM_BUG_ON(!atomic_read(&vm->open)); |
Chris Wilson | 718659a | 2017-01-16 15:21:28 +0000 | [diff] [blame] | 270 | |
Chris Wilson | 528cbd1 | 2019-01-28 10:23:54 +0000 | [diff] [blame] | 271 | spin_lock(&obj->vma.lock); |
Liam Howlett | 547be6a | 2021-03-23 13:42:21 +0000 | [diff] [blame] | 272 | vma = i915_vma_lookup(obj, vm, view); |
Chris Wilson | 528cbd1 | 2019-01-28 10:23:54 +0000 | [diff] [blame] | 273 | spin_unlock(&obj->vma.lock); |
| 274 | |
| 275 | /* vma_create() will resolve the race if another creates the vma */ |
| 276 | if (unlikely(!vma)) |
Chris Wilson | a01cb37a | 2017-01-16 15:21:30 +0000 | [diff] [blame] | 277 | vma = vma_create(obj, vm, view); |
Chris Wilson | 718659a | 2017-01-16 15:21:28 +0000 | [diff] [blame] | 278 | |
Chris Wilson | 4ea9527 | 2017-01-16 15:21:29 +0000 | [diff] [blame] | 279 | GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view)); |
Chris Wilson | 718659a | 2017-01-16 15:21:28 +0000 | [diff] [blame] | 280 | return vma; |
| 281 | } |
| 282 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 283 | struct i915_vma_work { |
| 284 | struct dma_fence_work base; |
Chris Wilson | cd0452a | 2020-07-29 17:42:17 +0100 | [diff] [blame] | 285 | struct i915_address_space *vm; |
| 286 | struct i915_vm_pt_stash stash; |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 287 | struct i915_vma *vma; |
Chris Wilson | 54d7195 | 2019-12-16 16:17:16 +0000 | [diff] [blame] | 288 | struct drm_i915_gem_object *pinned; |
Chris Wilson | e379346 | 2020-01-30 18:17:10 +0000 | [diff] [blame] | 289 | struct i915_sw_dma_fence_cb cb; |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 290 | enum i915_cache_level cache_level; |
| 291 | unsigned int flags; |
| 292 | }; |
| 293 | |
Jason Ekstrand | dc19418 | 2021-07-14 14:34:18 -0500 | [diff] [blame] | 294 | static void __vma_bind(struct dma_fence_work *work) |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 295 | { |
| 296 | struct i915_vma_work *vw = container_of(work, typeof(*vw), base); |
| 297 | struct i915_vma *vma = vw->vma; |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 298 | |
Chris Wilson | cd0452a | 2020-07-29 17:42:17 +0100 | [diff] [blame] | 299 | vma->ops->bind_vma(vw->vm, &vw->stash, |
| 300 | vma, vw->cache_level, vw->flags); |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 301 | } |
| 302 | |
Chris Wilson | 54d7195 | 2019-12-16 16:17:16 +0000 | [diff] [blame] | 303 | static void __vma_release(struct dma_fence_work *work) |
| 304 | { |
| 305 | struct i915_vma_work *vw = container_of(work, typeof(*vw), base); |
| 306 | |
Chris Wilson | 537457a | 2020-11-02 16:19:31 +0000 | [diff] [blame] | 307 | if (vw->pinned) { |
Chris Wilson | 54d7195 | 2019-12-16 16:17:16 +0000 | [diff] [blame] | 308 | __i915_gem_object_unpin_pages(vw->pinned); |
Chris Wilson | 537457a | 2020-11-02 16:19:31 +0000 | [diff] [blame] | 309 | i915_gem_object_put(vw->pinned); |
| 310 | } |
Chris Wilson | cd0452a | 2020-07-29 17:42:17 +0100 | [diff] [blame] | 311 | |
| 312 | i915_vm_free_pt_stash(vw->vm, &vw->stash); |
| 313 | i915_vm_put(vw->vm); |
Chris Wilson | 54d7195 | 2019-12-16 16:17:16 +0000 | [diff] [blame] | 314 | } |
| 315 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 316 | static const struct dma_fence_work_ops bind_ops = { |
| 317 | .name = "bind", |
| 318 | .work = __vma_bind, |
Chris Wilson | 54d7195 | 2019-12-16 16:17:16 +0000 | [diff] [blame] | 319 | .release = __vma_release, |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 320 | }; |
| 321 | |
| 322 | struct i915_vma_work *i915_vma_work(void) |
| 323 | { |
| 324 | struct i915_vma_work *vw; |
| 325 | |
| 326 | vw = kzalloc(sizeof(*vw), GFP_KERNEL); |
| 327 | if (!vw) |
| 328 | return NULL; |
| 329 | |
| 330 | dma_fence_work_init(&vw->base, &bind_ops); |
| 331 | vw->base.dma.error = -EAGAIN; /* disable the worker by default */ |
| 332 | |
| 333 | return vw; |
| 334 | } |
| 335 | |
Chris Wilson | e379346 | 2020-01-30 18:17:10 +0000 | [diff] [blame] | 336 | int i915_vma_wait_for_bind(struct i915_vma *vma) |
| 337 | { |
| 338 | int err = 0; |
| 339 | |
| 340 | if (rcu_access_pointer(vma->active.excl.fence)) { |
| 341 | struct dma_fence *fence; |
| 342 | |
| 343 | rcu_read_lock(); |
| 344 | fence = dma_fence_get_rcu_safe(&vma->active.excl.fence); |
| 345 | rcu_read_unlock(); |
| 346 | if (fence) { |
Matthew Auld | fbd4cf3 | 2021-11-02 15:50:55 +0000 | [diff] [blame] | 347 | err = dma_fence_wait(fence, true); |
Chris Wilson | e379346 | 2020-01-30 18:17:10 +0000 | [diff] [blame] | 348 | dma_fence_put(fence); |
| 349 | } |
| 350 | } |
| 351 | |
| 352 | return err; |
| 353 | } |
| 354 | |
Maarten Lankhorst | f6c466b | 2021-11-22 22:45:49 +0100 | [diff] [blame] | 355 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) |
| 356 | static int i915_vma_verify_bind_complete(struct i915_vma *vma) |
| 357 | { |
Maarten Lankhorst | ad5c99e | 2021-12-16 15:27:33 +0100 | [diff] [blame] | 358 | struct dma_fence *fence = i915_active_fence_get(&vma->active.excl); |
| 359 | int err; |
Maarten Lankhorst | f6c466b | 2021-11-22 22:45:49 +0100 | [diff] [blame] | 360 | |
Maarten Lankhorst | ad5c99e | 2021-12-16 15:27:33 +0100 | [diff] [blame] | 361 | if (!fence) |
| 362 | return 0; |
Maarten Lankhorst | f6c466b | 2021-11-22 22:45:49 +0100 | [diff] [blame] | 363 | |
Maarten Lankhorst | ad5c99e | 2021-12-16 15:27:33 +0100 | [diff] [blame] | 364 | if (dma_fence_is_signaled(fence)) |
| 365 | err = fence->error; |
| 366 | else |
| 367 | err = -EBUSY; |
Maarten Lankhorst | f6c466b | 2021-11-22 22:45:49 +0100 | [diff] [blame] | 368 | |
Maarten Lankhorst | ad5c99e | 2021-12-16 15:27:33 +0100 | [diff] [blame] | 369 | dma_fence_put(fence); |
Maarten Lankhorst | f6c466b | 2021-11-22 22:45:49 +0100 | [diff] [blame] | 370 | |
| 371 | return err; |
| 372 | } |
| 373 | #else |
| 374 | #define i915_vma_verify_bind_complete(_vma) 0 |
| 375 | #endif |
| 376 | |
Chris Wilson | 718659a | 2017-01-16 15:21:28 +0000 | [diff] [blame] | 377 | /** |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 378 | * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. |
| 379 | * @vma: VMA to map |
| 380 | * @cache_level: mapping cache level |
| 381 | * @flags: flags like global or local mapping |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 382 | * @work: preallocated worker for allocating and binding the PTE |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 383 | * |
| 384 | * DMA addresses are taken from the scatter-gather table of this object (or of |
| 385 | * this VMA in case of non-default GGTT views) and PTE entries set up. |
| 386 | * Note that DMA addresses are also the only part of the SG table we care about. |
| 387 | */ |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 388 | int i915_vma_bind(struct i915_vma *vma, |
| 389 | enum i915_cache_level cache_level, |
| 390 | u32 flags, |
| 391 | struct i915_vma_work *work) |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 392 | { |
| 393 | u32 bind_flags; |
| 394 | u32 vma_flags; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 395 | |
Thomas Hellström | c2ea703 | 2021-12-21 21:00:50 +0100 | [diff] [blame] | 396 | lockdep_assert_held(&vma->vm->mutex); |
Chris Wilson | aa14943 | 2017-02-25 18:11:21 +0000 | [diff] [blame] | 397 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
| 398 | GEM_BUG_ON(vma->size > vma->node.size); |
| 399 | |
Tvrtko Ursulin | bbb8a9d | 2018-10-12 07:31:42 +0100 | [diff] [blame] | 400 | if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, |
| 401 | vma->node.size, |
| 402 | vma->vm->total))) |
Chris Wilson | aa14943 | 2017-02-25 18:11:21 +0000 | [diff] [blame] | 403 | return -ENODEV; |
| 404 | |
Tvrtko Ursulin | bbb8a9d | 2018-10-12 07:31:42 +0100 | [diff] [blame] | 405 | if (GEM_DEBUG_WARN_ON(!flags)) |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 406 | return -EINVAL; |
| 407 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 408 | bind_flags = flags; |
| 409 | bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 410 | |
Chris Wilson | 4dd2fbb | 2019-09-11 10:02:43 +0100 | [diff] [blame] | 411 | vma_flags = atomic_read(&vma->flags); |
| 412 | vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; |
Chris Wilson | aedbe0a | 2020-05-21 15:49:49 +0100 | [diff] [blame] | 413 | |
| 414 | bind_flags &= ~vma_flags; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 415 | if (bind_flags == 0) |
| 416 | return 0; |
| 417 | |
Maarten Lankhorst | 0b4d1f0 | 2021-12-16 15:27:35 +0100 | [diff] [blame] | 418 | GEM_BUG_ON(!atomic_read(&vma->pages_count)); |
Matthew Auld | fa3f46a | 2017-10-06 23:18:19 +0100 | [diff] [blame] | 419 | |
Daniele Ceraolo Spurio | 6146e6d | 2017-01-20 13:51:23 -0800 | [diff] [blame] | 420 | trace_i915_vma_bind(vma, bind_flags); |
Chris Wilson | aedbe0a | 2020-05-21 15:49:49 +0100 | [diff] [blame] | 421 | if (work && bind_flags & vma->vm->bind_async_flags) { |
Chris Wilson | e379346 | 2020-01-30 18:17:10 +0000 | [diff] [blame] | 422 | struct dma_fence *prev; |
| 423 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 424 | work->vma = vma; |
| 425 | work->cache_level = cache_level; |
Chris Wilson | 12b0725 | 2020-07-03 11:25:19 +0100 | [diff] [blame] | 426 | work->flags = bind_flags; |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 427 | |
| 428 | /* |
| 429 | * Note we only want to chain up to the migration fence on |
| 430 | * the pages (not the object itself). As we don't track that, |
| 431 | * yet, we have to use the exclusive fence instead. |
| 432 | * |
| 433 | * Also note that we do not want to track the async vma as |
| 434 | * part of the obj->resv->excl_fence as it only affects |
| 435 | * execution and not content or object's backing store lifetime. |
| 436 | */ |
Chris Wilson | e379346 | 2020-01-30 18:17:10 +0000 | [diff] [blame] | 437 | prev = i915_active_set_exclusive(&vma->active, &work->base.dma); |
Chris Wilson | 30ca04e | 2020-02-03 09:41:47 +0000 | [diff] [blame] | 438 | if (prev) { |
Chris Wilson | e379346 | 2020-01-30 18:17:10 +0000 | [diff] [blame] | 439 | __i915_sw_fence_await_dma_fence(&work->base.chain, |
| 440 | prev, |
| 441 | &work->cb); |
Chris Wilson | 30ca04e | 2020-02-03 09:41:47 +0000 | [diff] [blame] | 442 | dma_fence_put(prev); |
| 443 | } |
Chris Wilson | e379346 | 2020-01-30 18:17:10 +0000 | [diff] [blame] | 444 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 445 | work->base.dma.error = 0; /* enable the queue_work() */ |
| 446 | |
Maarten Lankhorst | e6e1a30 | 2021-11-17 14:20:22 +0000 | [diff] [blame] | 447 | __i915_gem_object_pin_pages(vma->obj); |
| 448 | work->pinned = i915_gem_object_get(vma->obj); |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 449 | } else { |
Maarten Lankhorst | f6c466b | 2021-11-22 22:45:49 +0100 | [diff] [blame] | 450 | if (vma->obj) { |
| 451 | int ret; |
| 452 | |
| 453 | ret = i915_gem_object_wait_moving_fence(vma->obj, true); |
| 454 | if (ret) |
| 455 | return ret; |
| 456 | } |
Chris Wilson | cd0452a | 2020-07-29 17:42:17 +0100 | [diff] [blame] | 457 | vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags); |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 458 | } |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 459 | |
Chris Wilson | 4dd2fbb | 2019-09-11 10:02:43 +0100 | [diff] [blame] | 460 | atomic_or(bind_flags, &vma->flags); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 461 | return 0; |
| 462 | } |
| 463 | |
| 464 | void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) |
| 465 | { |
| 466 | void __iomem *ptr; |
Chris Wilson | b4563f5 | 2017-10-09 09:43:55 +0100 | [diff] [blame] | 467 | int err; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 468 | |
Anusha Srivatsa | 4bc91db | 2021-04-27 09:54:16 +0100 | [diff] [blame] | 469 | if (!i915_gem_object_is_lmem(vma->obj)) { |
| 470 | if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { |
| 471 | err = -ENODEV; |
| 472 | goto err; |
| 473 | } |
Chris Wilson | b4563f5 | 2017-10-09 09:43:55 +0100 | [diff] [blame] | 474 | } |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 475 | |
| 476 | GEM_BUG_ON(!i915_vma_is_ggtt(vma)); |
Chris Wilson | 4dd2fbb | 2019-09-11 10:02:43 +0100 | [diff] [blame] | 477 | GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)); |
Maarten Lankhorst | f6c466b | 2021-11-22 22:45:49 +0100 | [diff] [blame] | 478 | GEM_BUG_ON(i915_vma_verify_bind_complete(vma)); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 479 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 480 | ptr = READ_ONCE(vma->iomap); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 481 | if (ptr == NULL) { |
Anusha Srivatsa | 4bc91db | 2021-04-27 09:54:16 +0100 | [diff] [blame] | 482 | /* |
| 483 | * TODO: consider just using i915_gem_object_pin_map() for lmem |
| 484 | * instead, which already supports mapping non-contiguous chunks |
| 485 | * of pages, that way we can also drop the |
| 486 | * I915_BO_ALLOC_CONTIGUOUS when allocating the object. |
| 487 | */ |
| 488 | if (i915_gem_object_is_lmem(vma->obj)) |
| 489 | ptr = i915_gem_object_lmem_io_map(vma->obj, 0, |
| 490 | vma->obj->base.size); |
| 491 | else |
| 492 | ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, |
| 493 | vma->node.start, |
| 494 | vma->node.size); |
Chris Wilson | b4563f5 | 2017-10-09 09:43:55 +0100 | [diff] [blame] | 495 | if (ptr == NULL) { |
| 496 | err = -ENOMEM; |
| 497 | goto err; |
| 498 | } |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 499 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 500 | if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) { |
| 501 | io_mapping_unmap(ptr); |
| 502 | ptr = vma->iomap; |
| 503 | } |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 504 | } |
| 505 | |
| 506 | __i915_vma_pin(vma); |
Chris Wilson | b4563f5 | 2017-10-09 09:43:55 +0100 | [diff] [blame] | 507 | |
Chris Wilson | 3bd4073 | 2017-10-09 09:43:56 +0100 | [diff] [blame] | 508 | err = i915_vma_pin_fence(vma); |
Chris Wilson | b4563f5 | 2017-10-09 09:43:55 +0100 | [diff] [blame] | 509 | if (err) |
| 510 | goto err_unpin; |
| 511 | |
Chris Wilson | 7125397b | 2017-12-06 12:49:14 +0000 | [diff] [blame] | 512 | i915_vma_set_ggtt_write(vma); |
Chris Wilson | a5972e9 | 2020-01-08 15:35:50 +0000 | [diff] [blame] | 513 | |
| 514 | /* NB Access through the GTT requires the device to be awake. */ |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 515 | return ptr; |
Chris Wilson | b4563f5 | 2017-10-09 09:43:55 +0100 | [diff] [blame] | 516 | |
| 517 | err_unpin: |
| 518 | __i915_vma_unpin(vma); |
| 519 | err: |
| 520 | return IO_ERR_PTR(err); |
| 521 | } |
| 522 | |
Chris Wilson | 7125397b | 2017-12-06 12:49:14 +0000 | [diff] [blame] | 523 | void i915_vma_flush_writes(struct i915_vma *vma) |
| 524 | { |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 525 | if (i915_vma_unset_ggtt_write(vma)) |
| 526 | intel_gt_flush_ggtt_writes(vma->vm->gt); |
Chris Wilson | 7125397b | 2017-12-06 12:49:14 +0000 | [diff] [blame] | 527 | } |
| 528 | |
Chris Wilson | b4563f5 | 2017-10-09 09:43:55 +0100 | [diff] [blame] | 529 | void i915_vma_unpin_iomap(struct i915_vma *vma) |
| 530 | { |
Chris Wilson | b4563f5 | 2017-10-09 09:43:55 +0100 | [diff] [blame] | 531 | GEM_BUG_ON(vma->iomap == NULL); |
| 532 | |
Chris Wilson | 7125397b | 2017-12-06 12:49:14 +0000 | [diff] [blame] | 533 | i915_vma_flush_writes(vma); |
| 534 | |
Chris Wilson | b4563f5 | 2017-10-09 09:43:55 +0100 | [diff] [blame] | 535 | i915_vma_unpin_fence(vma); |
| 536 | i915_vma_unpin(vma); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 537 | } |
| 538 | |
Chris Wilson | 6a2f59e | 2018-07-21 13:50:37 +0100 | [diff] [blame] | 539 | void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags) |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 540 | { |
| 541 | struct i915_vma *vma; |
| 542 | struct drm_i915_gem_object *obj; |
| 543 | |
| 544 | vma = fetch_and_zero(p_vma); |
| 545 | if (!vma) |
| 546 | return; |
| 547 | |
| 548 | obj = vma->obj; |
Chris Wilson | 520ea7c | 2018-06-07 16:40:45 +0100 | [diff] [blame] | 549 | GEM_BUG_ON(!obj); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 550 | |
| 551 | i915_vma_unpin(vma); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 552 | |
Chris Wilson | 6a2f59e | 2018-07-21 13:50:37 +0100 | [diff] [blame] | 553 | if (flags & I915_VMA_RELEASE_MAP) |
| 554 | i915_gem_object_unpin_map(obj); |
| 555 | |
Chris Wilson | c017cf6 | 2019-05-28 10:29:56 +0100 | [diff] [blame] | 556 | i915_gem_object_put(obj); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 557 | } |
| 558 | |
Chris Wilson | 782a3e9 | 2017-02-13 17:15:46 +0000 | [diff] [blame] | 559 | bool i915_vma_misplaced(const struct i915_vma *vma, |
| 560 | u64 size, u64 alignment, u64 flags) |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 561 | { |
| 562 | if (!drm_mm_node_allocated(&vma->node)) |
| 563 | return false; |
| 564 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 565 | if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma))) |
| 566 | return true; |
| 567 | |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 568 | if (vma->node.size < size) |
| 569 | return true; |
| 570 | |
Chris Wilson | f51455d | 2017-01-10 14:47:34 +0000 | [diff] [blame] | 571 | GEM_BUG_ON(alignment && !is_power_of_2(alignment)); |
| 572 | if (alignment && !IS_ALIGNED(vma->node.start, alignment)) |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 573 | return true; |
| 574 | |
| 575 | if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) |
| 576 | return true; |
| 577 | |
| 578 | if (flags & PIN_OFFSET_BIAS && |
| 579 | vma->node.start < (flags & PIN_OFFSET_MASK)) |
| 580 | return true; |
| 581 | |
| 582 | if (flags & PIN_OFFSET_FIXED && |
| 583 | vma->node.start != (flags & PIN_OFFSET_MASK)) |
| 584 | return true; |
| 585 | |
| 586 | return false; |
| 587 | } |
| 588 | |
| 589 | void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) |
| 590 | { |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 591 | bool mappable, fenceable; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 592 | |
Chris Wilson | 944397f | 2017-01-09 16:16:11 +0000 | [diff] [blame] | 593 | GEM_BUG_ON(!i915_vma_is_ggtt(vma)); |
| 594 | GEM_BUG_ON(!vma->fence_size); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 595 | |
Chris Wilson | 944397f | 2017-01-09 16:16:11 +0000 | [diff] [blame] | 596 | fenceable = (vma->node.size >= vma->fence_size && |
Chris Wilson | f51455d | 2017-01-10 14:47:34 +0000 | [diff] [blame] | 597 | IS_ALIGNED(vma->node.start, vma->fence_alignment)); |
Chris Wilson | 944397f | 2017-01-09 16:16:11 +0000 | [diff] [blame] | 598 | |
| 599 | mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; |
| 600 | |
| 601 | if (mappable && fenceable) |
Chris Wilson | 4dd2fbb | 2019-09-11 10:02:43 +0100 | [diff] [blame] | 602 | set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 603 | else |
Chris Wilson | 4dd2fbb | 2019-09-11 10:02:43 +0100 | [diff] [blame] | 604 | clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 605 | } |
| 606 | |
Matthew Auld | 33dd889 | 2019-09-09 13:40:52 +0100 | [diff] [blame] | 607 | bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color) |
Chris Wilson | 7d1d9ae | 2016-12-05 14:29:38 +0000 | [diff] [blame] | 608 | { |
| 609 | struct drm_mm_node *node = &vma->node; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 610 | struct drm_mm_node *other; |
| 611 | |
| 612 | /* |
| 613 | * On some machines we have to be careful when putting differing types |
| 614 | * of snoopable memory together to avoid the prefetcher crossing memory |
| 615 | * domains and dying. During vm initialisation, we decide whether or not |
| 616 | * these constraints apply and set the drm_mm.color_adjust |
| 617 | * appropriately. |
| 618 | */ |
Matthew Auld | 33dd889 | 2019-09-09 13:40:52 +0100 | [diff] [blame] | 619 | if (!i915_vm_has_cache_coloring(vma->vm)) |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 620 | return true; |
| 621 | |
Chris Wilson | 7d1d9ae | 2016-12-05 14:29:38 +0000 | [diff] [blame] | 622 | /* Only valid to be called on an already inserted vma */ |
| 623 | GEM_BUG_ON(!drm_mm_node_allocated(node)); |
| 624 | GEM_BUG_ON(list_empty(&node->node_list)); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 625 | |
Chris Wilson | 7d1d9ae | 2016-12-05 14:29:38 +0000 | [diff] [blame] | 626 | other = list_prev_entry(node, node_list); |
Matthew Auld | 33dd889 | 2019-09-09 13:40:52 +0100 | [diff] [blame] | 627 | if (i915_node_color_differs(other, color) && |
Matthew Auld | 1e0a96e | 2019-09-09 13:40:50 +0100 | [diff] [blame] | 628 | !drm_mm_hole_follows(other)) |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 629 | return false; |
| 630 | |
Chris Wilson | 7d1d9ae | 2016-12-05 14:29:38 +0000 | [diff] [blame] | 631 | other = list_next_entry(node, node_list); |
Matthew Auld | 33dd889 | 2019-09-09 13:40:52 +0100 | [diff] [blame] | 632 | if (i915_node_color_differs(other, color) && |
Matthew Auld | 1e0a96e | 2019-09-09 13:40:50 +0100 | [diff] [blame] | 633 | !drm_mm_hole_follows(node)) |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 634 | return false; |
| 635 | |
| 636 | return true; |
| 637 | } |
| 638 | |
| 639 | /** |
| 640 | * i915_vma_insert - finds a slot for the vma in its address space |
| 641 | * @vma: the vma |
| 642 | * @size: requested size in bytes (can be larger than the VMA) |
| 643 | * @alignment: required alignment |
| 644 | * @flags: mask of PIN_* flags to use |
| 645 | * |
| 646 | * First we try to allocate some free space that meets the requirements for |
| 647 | * the VMA. Failiing that, if the flags permit, it will evict an old VMA, |
| 648 | * preferrably the oldest idle entry to make room for the new VMA. |
| 649 | * |
| 650 | * Returns: |
| 651 | * 0 on success, negative error code otherwise. |
| 652 | */ |
| 653 | static int |
| 654 | i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) |
| 655 | { |
Matthew Auld | 33dd889 | 2019-09-09 13:40:52 +0100 | [diff] [blame] | 656 | unsigned long color; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 657 | u64 start, end; |
| 658 | int ret; |
| 659 | |
Chris Wilson | 4dd2fbb | 2019-09-11 10:02:43 +0100 | [diff] [blame] | 660 | GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 661 | GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); |
| 662 | |
| 663 | size = max(size, vma->size); |
Chris Wilson | 944397f | 2017-01-09 16:16:11 +0000 | [diff] [blame] | 664 | alignment = max(alignment, vma->display_alignment); |
| 665 | if (flags & PIN_MAPPABLE) { |
| 666 | size = max_t(typeof(size), size, vma->fence_size); |
| 667 | alignment = max_t(typeof(alignment), |
| 668 | alignment, vma->fence_alignment); |
| 669 | } |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 670 | |
Chris Wilson | f51455d | 2017-01-10 14:47:34 +0000 | [diff] [blame] | 671 | GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); |
| 672 | GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); |
| 673 | GEM_BUG_ON(!is_power_of_2(alignment)); |
| 674 | |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 675 | start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; |
Chris Wilson | f51455d | 2017-01-10 14:47:34 +0000 | [diff] [blame] | 676 | GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 677 | |
| 678 | end = vma->vm->total; |
| 679 | if (flags & PIN_MAPPABLE) |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 680 | end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 681 | if (flags & PIN_ZONE_4G) |
Chris Wilson | f51455d | 2017-01-10 14:47:34 +0000 | [diff] [blame] | 682 | end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE); |
| 683 | GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 684 | |
| 685 | /* If binding the object/GGTT view requires more space than the entire |
| 686 | * aperture has, reject it early before evicting everything in a vain |
| 687 | * attempt to find space. |
| 688 | */ |
| 689 | if (size > end) { |
Chris Wilson | 520ea7c | 2018-06-07 16:40:45 +0100 | [diff] [blame] | 690 | DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n", |
| 691 | size, flags & PIN_MAPPABLE ? "mappable" : "total", |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 692 | end); |
Chris Wilson | 2889caa | 2017-06-16 15:05:19 +0100 | [diff] [blame] | 693 | return -ENOSPC; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 694 | } |
| 695 | |
Matthew Auld | 33dd889 | 2019-09-09 13:40:52 +0100 | [diff] [blame] | 696 | color = 0; |
Maarten Lankhorst | e6e1a30 | 2021-11-17 14:20:22 +0000 | [diff] [blame] | 697 | if (i915_vm_has_cache_coloring(vma->vm)) |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 698 | color = vma->obj->cache_level; |
Matthew Auld | fa3f46a | 2017-10-06 23:18:19 +0100 | [diff] [blame] | 699 | |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 700 | if (flags & PIN_OFFSET_FIXED) { |
| 701 | u64 offset = flags & PIN_OFFSET_MASK; |
Chris Wilson | f51455d | 2017-01-10 14:47:34 +0000 | [diff] [blame] | 702 | if (!IS_ALIGNED(offset, alignment) || |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 703 | range_overflows(offset, size, end)) |
| 704 | return -EINVAL; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 705 | |
Chris Wilson | 625d988 | 2017-01-11 11:23:11 +0000 | [diff] [blame] | 706 | ret = i915_gem_gtt_reserve(vma->vm, &vma->node, |
Matthew Auld | 33dd889 | 2019-09-09 13:40:52 +0100 | [diff] [blame] | 707 | size, offset, color, |
Chris Wilson | 625d988 | 2017-01-11 11:23:11 +0000 | [diff] [blame] | 708 | flags); |
| 709 | if (ret) |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 710 | return ret; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 711 | } else { |
Matthew Auld | 7464284 | 2017-10-06 23:18:20 +0100 | [diff] [blame] | 712 | /* |
| 713 | * We only support huge gtt pages through the 48b PPGTT, |
| 714 | * however we also don't want to force any alignment for |
| 715 | * objects which need to be tightly packed into the low 32bits. |
| 716 | * |
| 717 | * Note that we assume that GGTT are limited to 4GiB for the |
| 718 | * forseeable future. See also i915_ggtt_offset(). |
| 719 | */ |
| 720 | if (upper_32_bits(end - 1) && |
| 721 | vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { |
Matthew Auld | 855822b | 2017-10-06 23:18:21 +0100 | [diff] [blame] | 722 | /* |
| 723 | * We can't mix 64K and 4K PTEs in the same page-table |
| 724 | * (2M block), and so to avoid the ugliness and |
| 725 | * complexity of coloring we opt for just aligning 64K |
| 726 | * objects to 2M. |
| 727 | */ |
Matthew Auld | 7464284 | 2017-10-06 23:18:20 +0100 | [diff] [blame] | 728 | u64 page_alignment = |
Matthew Auld | 855822b | 2017-10-06 23:18:21 +0100 | [diff] [blame] | 729 | rounddown_pow_of_two(vma->page_sizes.sg | |
| 730 | I915_GTT_PAGE_SIZE_2M); |
Matthew Auld | 7464284 | 2017-10-06 23:18:20 +0100 | [diff] [blame] | 731 | |
Chris Wilson | bef27bdb | 2017-10-09 10:20:19 +0100 | [diff] [blame] | 732 | /* |
| 733 | * Check we don't expand for the limited Global GTT |
| 734 | * (mappable aperture is even more precious!). This |
| 735 | * also checks that we exclude the aliasing-ppgtt. |
| 736 | */ |
| 737 | GEM_BUG_ON(i915_vma_is_ggtt(vma)); |
| 738 | |
Matthew Auld | 7464284 | 2017-10-06 23:18:20 +0100 | [diff] [blame] | 739 | alignment = max(alignment, page_alignment); |
Matthew Auld | 855822b | 2017-10-06 23:18:21 +0100 | [diff] [blame] | 740 | |
| 741 | if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) |
| 742 | size = round_up(size, I915_GTT_PAGE_SIZE_2M); |
Matthew Auld | 7464284 | 2017-10-06 23:18:20 +0100 | [diff] [blame] | 743 | } |
| 744 | |
Chris Wilson | e007b19 | 2017-01-11 11:23:10 +0000 | [diff] [blame] | 745 | ret = i915_gem_gtt_insert(vma->vm, &vma->node, |
Matthew Auld | 33dd889 | 2019-09-09 13:40:52 +0100 | [diff] [blame] | 746 | size, alignment, color, |
Chris Wilson | e007b19 | 2017-01-11 11:23:10 +0000 | [diff] [blame] | 747 | start, end, flags); |
| 748 | if (ret) |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 749 | return ret; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 750 | |
| 751 | GEM_BUG_ON(vma->node.start < start); |
| 752 | GEM_BUG_ON(vma->node.start + vma->node.size > end); |
| 753 | } |
Chris Wilson | 44a0ec0 | 2017-01-19 19:26:58 +0000 | [diff] [blame] | 754 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
Matthew Auld | 33dd889 | 2019-09-09 13:40:52 +0100 | [diff] [blame] | 755 | GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color)); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 756 | |
Chris Wilson | dde01d9 | 2019-10-30 19:21:49 +0000 | [diff] [blame] | 757 | list_add_tail(&vma->vm_link, &vma->vm->bound_list); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 758 | |
| 759 | return 0; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 760 | } |
| 761 | |
Chris Wilson | 31c7eff | 2017-02-27 12:26:54 +0000 | [diff] [blame] | 762 | static void |
Chris Wilson | dde01d9 | 2019-10-30 19:21:49 +0000 | [diff] [blame] | 763 | i915_vma_detach(struct i915_vma *vma) |
Chris Wilson | 31c7eff | 2017-02-27 12:26:54 +0000 | [diff] [blame] | 764 | { |
Chris Wilson | 31c7eff | 2017-02-27 12:26:54 +0000 | [diff] [blame] | 765 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
Chris Wilson | 4dd2fbb | 2019-09-11 10:02:43 +0100 | [diff] [blame] | 766 | GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); |
Chris Wilson | 31c7eff | 2017-02-27 12:26:54 +0000 | [diff] [blame] | 767 | |
Chris Wilson | 520ea7c | 2018-06-07 16:40:45 +0100 | [diff] [blame] | 768 | /* |
Chris Wilson | dde01d9 | 2019-10-30 19:21:49 +0000 | [diff] [blame] | 769 | * And finally now the object is completely decoupled from this |
| 770 | * vma, we can drop its hold on the backing storage and allow |
| 771 | * it to be reaped by the shrinker. |
Chris Wilson | 31c7eff | 2017-02-27 12:26:54 +0000 | [diff] [blame] | 772 | */ |
Chris Wilson | dde01d9 | 2019-10-30 19:21:49 +0000 | [diff] [blame] | 773 | list_del(&vma->vm_link); |
Chris Wilson | 31c7eff | 2017-02-27 12:26:54 +0000 | [diff] [blame] | 774 | } |
| 775 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 776 | static bool try_qad_pin(struct i915_vma *vma, unsigned int flags) |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 777 | { |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 778 | unsigned int bound; |
| 779 | bool pinned = true; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 780 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 781 | bound = atomic_read(&vma->flags); |
| 782 | do { |
| 783 | if (unlikely(flags & ~bound)) |
| 784 | return false; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 785 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 786 | if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) |
| 787 | return false; |
| 788 | |
| 789 | if (!(bound & I915_VMA_PIN_MASK)) |
| 790 | goto unpinned; |
| 791 | |
| 792 | GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0); |
| 793 | } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); |
| 794 | |
| 795 | return true; |
| 796 | |
| 797 | unpinned: |
| 798 | /* |
| 799 | * If pin_count==0, but we are bound, check under the lock to avoid |
| 800 | * racing with a concurrent i915_vma_unbind(). |
| 801 | */ |
| 802 | mutex_lock(&vma->vm->mutex); |
| 803 | do { |
| 804 | if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) { |
| 805 | pinned = false; |
| 806 | break; |
| 807 | } |
| 808 | |
| 809 | if (unlikely(flags & ~bound)) { |
| 810 | pinned = false; |
| 811 | break; |
| 812 | } |
| 813 | } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); |
| 814 | mutex_unlock(&vma->vm->mutex); |
| 815 | |
| 816 | return pinned; |
| 817 | } |
| 818 | |
Maarten Lankhorst | 0b4d1f0 | 2021-12-16 15:27:35 +0100 | [diff] [blame] | 819 | static struct scatterlist * |
| 820 | rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset, |
| 821 | unsigned int width, unsigned int height, |
| 822 | unsigned int src_stride, unsigned int dst_stride, |
| 823 | struct sg_table *st, struct scatterlist *sg) |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 824 | { |
Maarten Lankhorst | 0b4d1f0 | 2021-12-16 15:27:35 +0100 | [diff] [blame] | 825 | unsigned int column, row; |
| 826 | unsigned int src_idx; |
| 827 | |
| 828 | for (column = 0; column < width; column++) { |
| 829 | unsigned int left; |
| 830 | |
| 831 | src_idx = src_stride * (height - 1) + column + offset; |
| 832 | for (row = 0; row < height; row++) { |
| 833 | st->nents++; |
| 834 | /* |
| 835 | * We don't need the pages, but need to initialize |
| 836 | * the entries so the sg list can be happily traversed. |
| 837 | * The only thing we need are DMA addresses. |
| 838 | */ |
| 839 | sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); |
| 840 | sg_dma_address(sg) = |
| 841 | i915_gem_object_get_dma_address(obj, src_idx); |
| 842 | sg_dma_len(sg) = I915_GTT_PAGE_SIZE; |
| 843 | sg = sg_next(sg); |
| 844 | src_idx -= src_stride; |
| 845 | } |
| 846 | |
| 847 | left = (dst_stride - height) * I915_GTT_PAGE_SIZE; |
| 848 | |
| 849 | if (!left) |
| 850 | continue; |
| 851 | |
| 852 | st->nents++; |
| 853 | |
| 854 | /* |
| 855 | * The DE ignores the PTEs for the padding tiles, the sg entry |
| 856 | * here is just a conenience to indicate how many padding PTEs |
| 857 | * to insert at this spot. |
| 858 | */ |
| 859 | sg_set_page(sg, NULL, left, 0); |
| 860 | sg_dma_address(sg) = 0; |
| 861 | sg_dma_len(sg) = left; |
| 862 | sg = sg_next(sg); |
| 863 | } |
| 864 | |
| 865 | return sg; |
| 866 | } |
| 867 | |
| 868 | static noinline struct sg_table * |
| 869 | intel_rotate_pages(struct intel_rotation_info *rot_info, |
| 870 | struct drm_i915_gem_object *obj) |
| 871 | { |
| 872 | unsigned int size = intel_rotation_info_size(rot_info); |
| 873 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
| 874 | struct sg_table *st; |
| 875 | struct scatterlist *sg; |
| 876 | int ret = -ENOMEM; |
| 877 | int i; |
| 878 | |
| 879 | /* Allocate target SG list. */ |
| 880 | st = kmalloc(sizeof(*st), GFP_KERNEL); |
| 881 | if (!st) |
| 882 | goto err_st_alloc; |
| 883 | |
| 884 | ret = sg_alloc_table(st, size, GFP_KERNEL); |
| 885 | if (ret) |
| 886 | goto err_sg_alloc; |
| 887 | |
| 888 | st->nents = 0; |
| 889 | sg = st->sgl; |
| 890 | |
| 891 | for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) |
| 892 | sg = rotate_pages(obj, rot_info->plane[i].offset, |
| 893 | rot_info->plane[i].width, rot_info->plane[i].height, |
| 894 | rot_info->plane[i].src_stride, |
| 895 | rot_info->plane[i].dst_stride, |
| 896 | st, sg); |
| 897 | |
| 898 | return st; |
| 899 | |
| 900 | err_sg_alloc: |
| 901 | kfree(st); |
| 902 | err_st_alloc: |
| 903 | |
| 904 | drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", |
| 905 | obj->base.size, rot_info->plane[0].width, |
| 906 | rot_info->plane[0].height, size); |
| 907 | |
| 908 | return ERR_PTR(ret); |
| 909 | } |
| 910 | |
| 911 | static struct scatterlist * |
| 912 | remap_pages(struct drm_i915_gem_object *obj, |
| 913 | unsigned int offset, unsigned int alignment_pad, |
| 914 | unsigned int width, unsigned int height, |
| 915 | unsigned int src_stride, unsigned int dst_stride, |
| 916 | struct sg_table *st, struct scatterlist *sg) |
| 917 | { |
| 918 | unsigned int row; |
| 919 | |
| 920 | if (!width || !height) |
| 921 | return sg; |
| 922 | |
| 923 | if (alignment_pad) { |
| 924 | st->nents++; |
| 925 | |
| 926 | /* |
| 927 | * The DE ignores the PTEs for the padding tiles, the sg entry |
| 928 | * here is just a convenience to indicate how many padding PTEs |
| 929 | * to insert at this spot. |
| 930 | */ |
| 931 | sg_set_page(sg, NULL, alignment_pad * 4096, 0); |
| 932 | sg_dma_address(sg) = 0; |
| 933 | sg_dma_len(sg) = alignment_pad * 4096; |
| 934 | sg = sg_next(sg); |
| 935 | } |
| 936 | |
| 937 | for (row = 0; row < height; row++) { |
| 938 | unsigned int left = width * I915_GTT_PAGE_SIZE; |
| 939 | |
| 940 | while (left) { |
| 941 | dma_addr_t addr; |
| 942 | unsigned int length; |
| 943 | |
| 944 | /* |
| 945 | * We don't need the pages, but need to initialize |
| 946 | * the entries so the sg list can be happily traversed. |
| 947 | * The only thing we need are DMA addresses. |
| 948 | */ |
| 949 | |
| 950 | addr = i915_gem_object_get_dma_address_len(obj, offset, &length); |
| 951 | |
| 952 | length = min(left, length); |
| 953 | |
| 954 | st->nents++; |
| 955 | |
| 956 | sg_set_page(sg, NULL, length, 0); |
| 957 | sg_dma_address(sg) = addr; |
| 958 | sg_dma_len(sg) = length; |
| 959 | sg = sg_next(sg); |
| 960 | |
| 961 | offset += length / I915_GTT_PAGE_SIZE; |
| 962 | left -= length; |
| 963 | } |
| 964 | |
| 965 | offset += src_stride - width; |
| 966 | |
| 967 | left = (dst_stride - width) * I915_GTT_PAGE_SIZE; |
| 968 | |
| 969 | if (!left) |
| 970 | continue; |
| 971 | |
| 972 | st->nents++; |
| 973 | |
| 974 | /* |
| 975 | * The DE ignores the PTEs for the padding tiles, the sg entry |
| 976 | * here is just a conenience to indicate how many padding PTEs |
| 977 | * to insert at this spot. |
| 978 | */ |
| 979 | sg_set_page(sg, NULL, left, 0); |
| 980 | sg_dma_address(sg) = 0; |
| 981 | sg_dma_len(sg) = left; |
| 982 | sg = sg_next(sg); |
| 983 | } |
| 984 | |
| 985 | return sg; |
| 986 | } |
| 987 | |
| 988 | static noinline struct sg_table * |
| 989 | intel_remap_pages(struct intel_remapped_info *rem_info, |
| 990 | struct drm_i915_gem_object *obj) |
| 991 | { |
| 992 | unsigned int size = intel_remapped_info_size(rem_info); |
| 993 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
| 994 | struct sg_table *st; |
| 995 | struct scatterlist *sg; |
| 996 | unsigned int gtt_offset = 0; |
| 997 | int ret = -ENOMEM; |
| 998 | int i; |
| 999 | |
| 1000 | /* Allocate target SG list. */ |
| 1001 | st = kmalloc(sizeof(*st), GFP_KERNEL); |
| 1002 | if (!st) |
| 1003 | goto err_st_alloc; |
| 1004 | |
| 1005 | ret = sg_alloc_table(st, size, GFP_KERNEL); |
| 1006 | if (ret) |
| 1007 | goto err_sg_alloc; |
| 1008 | |
| 1009 | st->nents = 0; |
| 1010 | sg = st->sgl; |
| 1011 | |
| 1012 | for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { |
| 1013 | unsigned int alignment_pad = 0; |
| 1014 | |
| 1015 | if (rem_info->plane_alignment) |
| 1016 | alignment_pad = ALIGN(gtt_offset, rem_info->plane_alignment) - gtt_offset; |
| 1017 | |
| 1018 | sg = remap_pages(obj, |
| 1019 | rem_info->plane[i].offset, alignment_pad, |
| 1020 | rem_info->plane[i].width, rem_info->plane[i].height, |
| 1021 | rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride, |
| 1022 | st, sg); |
| 1023 | |
| 1024 | gtt_offset += alignment_pad + |
| 1025 | rem_info->plane[i].dst_stride * rem_info->plane[i].height; |
| 1026 | } |
| 1027 | |
| 1028 | i915_sg_trim(st); |
| 1029 | |
| 1030 | return st; |
| 1031 | |
| 1032 | err_sg_alloc: |
| 1033 | kfree(st); |
| 1034 | err_st_alloc: |
| 1035 | |
| 1036 | drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n", |
| 1037 | obj->base.size, rem_info->plane[0].width, |
| 1038 | rem_info->plane[0].height, size); |
| 1039 | |
| 1040 | return ERR_PTR(ret); |
| 1041 | } |
| 1042 | |
| 1043 | static noinline struct sg_table * |
| 1044 | intel_partial_pages(const struct i915_ggtt_view *view, |
| 1045 | struct drm_i915_gem_object *obj) |
| 1046 | { |
| 1047 | struct sg_table *st; |
| 1048 | struct scatterlist *sg, *iter; |
| 1049 | unsigned int count = view->partial.size; |
| 1050 | unsigned int offset; |
| 1051 | int ret = -ENOMEM; |
| 1052 | |
| 1053 | st = kmalloc(sizeof(*st), GFP_KERNEL); |
| 1054 | if (!st) |
| 1055 | goto err_st_alloc; |
| 1056 | |
| 1057 | ret = sg_alloc_table(st, count, GFP_KERNEL); |
| 1058 | if (ret) |
| 1059 | goto err_sg_alloc; |
| 1060 | |
| 1061 | iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset); |
| 1062 | GEM_BUG_ON(!iter); |
| 1063 | |
| 1064 | sg = st->sgl; |
| 1065 | st->nents = 0; |
| 1066 | do { |
| 1067 | unsigned int len; |
| 1068 | |
| 1069 | len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), |
| 1070 | count << PAGE_SHIFT); |
| 1071 | sg_set_page(sg, NULL, len, 0); |
| 1072 | sg_dma_address(sg) = |
| 1073 | sg_dma_address(iter) + (offset << PAGE_SHIFT); |
| 1074 | sg_dma_len(sg) = len; |
| 1075 | |
| 1076 | st->nents++; |
| 1077 | count -= len >> PAGE_SHIFT; |
| 1078 | if (count == 0) { |
| 1079 | sg_mark_end(sg); |
| 1080 | i915_sg_trim(st); /* Drop any unused tail entries. */ |
| 1081 | |
| 1082 | return st; |
| 1083 | } |
| 1084 | |
| 1085 | sg = __sg_next(sg); |
| 1086 | iter = __sg_next(iter); |
| 1087 | offset = 0; |
| 1088 | } while (1); |
| 1089 | |
| 1090 | err_sg_alloc: |
| 1091 | kfree(st); |
| 1092 | err_st_alloc: |
| 1093 | return ERR_PTR(ret); |
| 1094 | } |
| 1095 | |
| 1096 | static int |
| 1097 | __i915_vma_get_pages(struct i915_vma *vma) |
| 1098 | { |
| 1099 | struct sg_table *pages; |
| 1100 | int ret; |
| 1101 | |
| 1102 | /* |
| 1103 | * The vma->pages are only valid within the lifespan of the borrowed |
| 1104 | * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so |
| 1105 | * must be the vma->pages. A simple rule is that vma->pages must only |
| 1106 | * be accessed when the obj->mm.pages are pinned. |
| 1107 | */ |
| 1108 | GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); |
| 1109 | |
| 1110 | switch (vma->ggtt_view.type) { |
| 1111 | default: |
| 1112 | GEM_BUG_ON(vma->ggtt_view.type); |
| 1113 | fallthrough; |
| 1114 | case I915_GGTT_VIEW_NORMAL: |
| 1115 | pages = vma->obj->mm.pages; |
| 1116 | break; |
| 1117 | |
| 1118 | case I915_GGTT_VIEW_ROTATED: |
| 1119 | pages = |
| 1120 | intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj); |
| 1121 | break; |
| 1122 | |
| 1123 | case I915_GGTT_VIEW_REMAPPED: |
| 1124 | pages = |
| 1125 | intel_remap_pages(&vma->ggtt_view.remapped, vma->obj); |
| 1126 | break; |
| 1127 | |
| 1128 | case I915_GGTT_VIEW_PARTIAL: |
| 1129 | pages = intel_partial_pages(&vma->ggtt_view, vma->obj); |
| 1130 | break; |
| 1131 | } |
| 1132 | |
| 1133 | ret = 0; |
| 1134 | if (IS_ERR(pages)) { |
| 1135 | ret = PTR_ERR(pages); |
| 1136 | pages = NULL; |
| 1137 | drm_err(&vma->vm->i915->drm, |
| 1138 | "Failed to get pages for VMA view type %u (%d)!\n", |
| 1139 | vma->ggtt_view.type, ret); |
| 1140 | } |
| 1141 | |
| 1142 | vma->pages = pages; |
| 1143 | |
| 1144 | return ret; |
| 1145 | } |
| 1146 | |
| 1147 | I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma) |
| 1148 | { |
| 1149 | int err; |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1150 | |
| 1151 | if (atomic_add_unless(&vma->pages_count, 1, 0)) |
| 1152 | return 0; |
| 1153 | |
Maarten Lankhorst | e6e1a30 | 2021-11-17 14:20:22 +0000 | [diff] [blame] | 1154 | err = i915_gem_object_pin_pages(vma->obj); |
| 1155 | if (err) |
| 1156 | return err; |
Thomas Hellström | 0f4308d | 2021-06-01 09:46:40 +0200 | [diff] [blame] | 1157 | |
Maarten Lankhorst | 0b4d1f0 | 2021-12-16 15:27:35 +0100 | [diff] [blame] | 1158 | err = __i915_vma_get_pages(vma); |
| 1159 | if (err) |
| 1160 | goto err_unpin; |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1161 | |
Maarten Lankhorst | 0b4d1f0 | 2021-12-16 15:27:35 +0100 | [diff] [blame] | 1162 | vma->page_sizes = vma->obj->mm.page_sizes; |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1163 | atomic_inc(&vma->pages_count); |
| 1164 | |
Maarten Lankhorst | 0b4d1f0 | 2021-12-16 15:27:35 +0100 | [diff] [blame] | 1165 | return 0; |
| 1166 | |
| 1167 | err_unpin: |
| 1168 | __i915_gem_object_unpin_pages(vma->obj); |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1169 | |
| 1170 | return err; |
| 1171 | } |
| 1172 | |
| 1173 | static void __vma_put_pages(struct i915_vma *vma, unsigned int count) |
| 1174 | { |
| 1175 | /* We allocate under vma_get_pages, so beware the shrinker */ |
Maarten Lankhorst | 0b4d1f0 | 2021-12-16 15:27:35 +0100 | [diff] [blame] | 1176 | struct sg_table *pages = READ_ONCE(vma->pages); |
| 1177 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1178 | GEM_BUG_ON(atomic_read(&vma->pages_count) < count); |
Maarten Lankhorst | 0b4d1f0 | 2021-12-16 15:27:35 +0100 | [diff] [blame] | 1179 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1180 | if (atomic_sub_return(count, &vma->pages_count) == 0) { |
Maarten Lankhorst | 0b4d1f0 | 2021-12-16 15:27:35 +0100 | [diff] [blame] | 1181 | /* |
| 1182 | * The atomic_sub_return is a read barrier for the READ_ONCE of |
| 1183 | * vma->pages above. |
| 1184 | * |
| 1185 | * READ_ONCE is safe because this is either called from the same |
| 1186 | * function (i915_vma_pin_ww), or guarded by vma->vm->mutex. |
| 1187 | * |
| 1188 | * TODO: We're leaving vma->pages dangling, until vma->obj->resv |
| 1189 | * lock is required. |
| 1190 | */ |
| 1191 | if (pages != vma->obj->mm.pages) { |
| 1192 | sg_free_table(pages); |
| 1193 | kfree(pages); |
| 1194 | } |
Maarten Lankhorst | e6e1a30 | 2021-11-17 14:20:22 +0000 | [diff] [blame] | 1195 | |
| 1196 | i915_gem_object_unpin_pages(vma->obj); |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1197 | } |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1198 | } |
| 1199 | |
Maarten Lankhorst | 0b4d1f0 | 2021-12-16 15:27:35 +0100 | [diff] [blame] | 1200 | I915_SELFTEST_EXPORT void i915_vma_put_pages(struct i915_vma *vma) |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1201 | { |
| 1202 | if (atomic_add_unless(&vma->pages_count, -1, 1)) |
| 1203 | return; |
| 1204 | |
| 1205 | __vma_put_pages(vma, 1); |
| 1206 | } |
| 1207 | |
| 1208 | static void vma_unbind_pages(struct i915_vma *vma) |
| 1209 | { |
| 1210 | unsigned int count; |
| 1211 | |
| 1212 | lockdep_assert_held(&vma->vm->mutex); |
| 1213 | |
| 1214 | /* The upper portion of pages_count is the number of bindings */ |
| 1215 | count = atomic_read(&vma->pages_count); |
| 1216 | count >>= I915_VMA_PAGES_BIAS; |
| 1217 | GEM_BUG_ON(!count); |
| 1218 | |
| 1219 | __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS); |
| 1220 | } |
| 1221 | |
Maarten Lankhorst | 47b0869 | 2020-08-19 16:08:54 +0200 | [diff] [blame] | 1222 | int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, |
| 1223 | u64 size, u64 alignment, u64 flags) |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1224 | { |
| 1225 | struct i915_vma_work *work = NULL; |
Maarten Lankhorst | f6c466b | 2021-11-22 22:45:49 +0100 | [diff] [blame] | 1226 | struct dma_fence *moving = NULL; |
Chris Wilson | c0e6034 | 2020-01-10 14:44:18 +0000 | [diff] [blame] | 1227 | intel_wakeref_t wakeref = 0; |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1228 | unsigned int bound; |
| 1229 | int err; |
| 1230 | |
Maarten Lankhorst | 0b4d1f0 | 2021-12-16 15:27:35 +0100 | [diff] [blame] | 1231 | assert_vma_held(vma); |
| 1232 | GEM_BUG_ON(!ww); |
Maarten Lankhorst | 47b0869 | 2020-08-19 16:08:54 +0200 | [diff] [blame] | 1233 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1234 | BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND); |
| 1235 | BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND); |
| 1236 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1237 | GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL))); |
| 1238 | |
| 1239 | /* First try and grab the pin without rebinding the vma */ |
| 1240 | if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK)) |
| 1241 | return 0; |
| 1242 | |
Maarten Lankhorst | 0b4d1f0 | 2021-12-16 15:27:35 +0100 | [diff] [blame] | 1243 | err = i915_vma_get_pages(vma); |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1244 | if (err) |
| 1245 | return err; |
| 1246 | |
Chris Wilson | 8935192 | 2020-07-29 17:42:18 +0100 | [diff] [blame] | 1247 | if (flags & PIN_GLOBAL) |
| 1248 | wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); |
| 1249 | |
Maarten Lankhorst | f6c466b | 2021-11-22 22:45:49 +0100 | [diff] [blame] | 1250 | moving = vma->obj ? i915_gem_object_get_moving_fence(vma->obj) : NULL; |
| 1251 | if (flags & vma->vm->bind_async_flags || moving) { |
Maarten Lankhorst | 26ad4f8 | 2021-03-23 16:50:29 +0100 | [diff] [blame] | 1252 | /* lock VM */ |
| 1253 | err = i915_vm_lock_objects(vma->vm, ww); |
| 1254 | if (err) |
| 1255 | goto err_rpm; |
| 1256 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1257 | work = i915_vma_work(); |
| 1258 | if (!work) { |
| 1259 | err = -ENOMEM; |
Chris Wilson | 8935192 | 2020-07-29 17:42:18 +0100 | [diff] [blame] | 1260 | goto err_rpm; |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1261 | } |
Chris Wilson | cd0452a | 2020-07-29 17:42:17 +0100 | [diff] [blame] | 1262 | |
| 1263 | work->vm = i915_vm_get(vma->vm); |
| 1264 | |
Maarten Lankhorst | f6c466b | 2021-11-22 22:45:49 +0100 | [diff] [blame] | 1265 | dma_fence_work_chain(&work->base, moving); |
| 1266 | |
Chris Wilson | cd0452a | 2020-07-29 17:42:17 +0100 | [diff] [blame] | 1267 | /* Allocate enough page directories to used PTE */ |
Chris Wilson | 8935192 | 2020-07-29 17:42:18 +0100 | [diff] [blame] | 1268 | if (vma->vm->allocate_va_range) { |
Matthew Auld | cef8ce5 | 2020-09-21 17:08:44 +0100 | [diff] [blame] | 1269 | err = i915_vm_alloc_pt_stash(vma->vm, |
| 1270 | &work->stash, |
| 1271 | vma->size); |
| 1272 | if (err) |
| 1273 | goto err_fence; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 1274 | |
Matthew Auld | 529b9ec | 2021-04-27 09:54:13 +0100 | [diff] [blame] | 1275 | err = i915_vm_map_pt_stash(vma->vm, &work->stash); |
Chris Wilson | 8935192 | 2020-07-29 17:42:18 +0100 | [diff] [blame] | 1276 | if (err) |
| 1277 | goto err_fence; |
| 1278 | } |
| 1279 | } |
Chris Wilson | c0e6034 | 2020-01-10 14:44:18 +0000 | [diff] [blame] | 1280 | |
Chris Wilson | d002491 | 2020-03-26 14:27:27 +0000 | [diff] [blame] | 1281 | /* |
| 1282 | * Differentiate between user/kernel vma inside the aliasing-ppgtt. |
| 1283 | * |
| 1284 | * We conflate the Global GTT with the user's vma when using the |
| 1285 | * aliasing-ppgtt, but it is still vitally important to try and |
| 1286 | * keep the use cases distinct. For example, userptr objects are |
| 1287 | * not allowed inside the Global GTT as that will cause lock |
| 1288 | * inversions when we have to evict them the mmu_notifier callbacks - |
| 1289 | * but they are allowed to be part of the user ppGTT which can never |
| 1290 | * be mapped. As such we try to give the distinct users of the same |
| 1291 | * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt |
| 1292 | * and i915_ppgtt separate]. |
| 1293 | * |
| 1294 | * NB this may cause us to mask real lock inversions -- while the |
| 1295 | * code is safe today, lockdep may not be able to spot future |
| 1296 | * transgressions. |
| 1297 | */ |
| 1298 | err = mutex_lock_interruptible_nested(&vma->vm->mutex, |
| 1299 | !(flags & PIN_GLOBAL)); |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1300 | if (err) |
| 1301 | goto err_fence; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 1302 | |
Chris Wilson | d002491 | 2020-03-26 14:27:27 +0000 | [diff] [blame] | 1303 | /* No more allocations allowed now we hold vm->mutex */ |
| 1304 | |
Chris Wilson | 00de702 | 2020-02-21 12:19:40 +0000 | [diff] [blame] | 1305 | if (unlikely(i915_vma_is_closed(vma))) { |
| 1306 | err = -ENOENT; |
| 1307 | goto err_unlock; |
| 1308 | } |
| 1309 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1310 | bound = atomic_read(&vma->flags); |
| 1311 | if (unlikely(bound & I915_VMA_ERROR)) { |
| 1312 | err = -ENOMEM; |
| 1313 | goto err_unlock; |
| 1314 | } |
| 1315 | |
| 1316 | if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) { |
| 1317 | err = -EAGAIN; /* pins are meant to be fairly temporary */ |
| 1318 | goto err_unlock; |
| 1319 | } |
| 1320 | |
| 1321 | if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) { |
| 1322 | __i915_vma_pin(vma); |
| 1323 | goto err_unlock; |
| 1324 | } |
| 1325 | |
| 1326 | err = i915_active_acquire(&vma->active); |
| 1327 | if (err) |
| 1328 | goto err_unlock; |
| 1329 | |
| 1330 | if (!(bound & I915_VMA_BIND_MASK)) { |
| 1331 | err = i915_vma_insert(vma, size, alignment, flags); |
| 1332 | if (err) |
| 1333 | goto err_active; |
| 1334 | |
| 1335 | if (i915_is_ggtt(vma->vm)) |
| 1336 | __i915_vma_set_map_and_fenceable(vma); |
| 1337 | } |
| 1338 | |
| 1339 | GEM_BUG_ON(!vma->pages); |
| 1340 | err = i915_vma_bind(vma, |
Maarten Lankhorst | e6e1a30 | 2021-11-17 14:20:22 +0000 | [diff] [blame] | 1341 | vma->obj->cache_level, |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1342 | flags, work); |
| 1343 | if (err) |
Chris Wilson | 31c7eff | 2017-02-27 12:26:54 +0000 | [diff] [blame] | 1344 | goto err_remove; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 1345 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1346 | /* There should only be at most 2 active bindings (user, global) */ |
| 1347 | GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound); |
| 1348 | atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count); |
| 1349 | list_move_tail(&vma->vm_link, &vma->vm->bound_list); |
Chris Wilson | d36caee | 2017-11-05 12:45:50 +0000 | [diff] [blame] | 1350 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1351 | __i915_vma_pin(vma); |
| 1352 | GEM_BUG_ON(!i915_vma_is_pinned(vma)); |
| 1353 | GEM_BUG_ON(!i915_vma_is_bound(vma, flags)); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 1354 | GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 1355 | |
Chris Wilson | 31c7eff | 2017-02-27 12:26:54 +0000 | [diff] [blame] | 1356 | err_remove: |
Chris Wilson | dde01d9 | 2019-10-30 19:21:49 +0000 | [diff] [blame] | 1357 | if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) { |
| 1358 | i915_vma_detach(vma); |
| 1359 | drm_mm_remove_node(&vma->node); |
| 1360 | } |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1361 | err_active: |
| 1362 | i915_active_release(&vma->active); |
| 1363 | err_unlock: |
| 1364 | mutex_unlock(&vma->vm->mutex); |
| 1365 | err_fence: |
| 1366 | if (work) |
Chris Wilson | 92581f9 | 2020-03-25 12:02:27 +0000 | [diff] [blame] | 1367 | dma_fence_work_commit_imm(&work->base); |
Chris Wilson | 8935192 | 2020-07-29 17:42:18 +0100 | [diff] [blame] | 1368 | err_rpm: |
Chris Wilson | c0e6034 | 2020-01-10 14:44:18 +0000 | [diff] [blame] | 1369 | if (wakeref) |
| 1370 | intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); |
Maarten Lankhorst | 0b4d1f0 | 2021-12-16 15:27:35 +0100 | [diff] [blame] | 1371 | |
Maarten Lankhorst | f6c466b | 2021-11-22 22:45:49 +0100 | [diff] [blame] | 1372 | if (moving) |
| 1373 | dma_fence_put(moving); |
Maarten Lankhorst | f6c466b | 2021-11-22 22:45:49 +0100 | [diff] [blame] | 1374 | |
Maarten Lankhorst | 0b4d1f0 | 2021-12-16 15:27:35 +0100 | [diff] [blame] | 1375 | i915_vma_put_pages(vma); |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1376 | return err; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 1377 | } |
| 1378 | |
Chris Wilson | ccd2094 | 2019-12-05 11:37:25 +0000 | [diff] [blame] | 1379 | static void flush_idle_contexts(struct intel_gt *gt) |
| 1380 | { |
| 1381 | struct intel_engine_cs *engine; |
| 1382 | enum intel_engine_id id; |
| 1383 | |
| 1384 | for_each_engine(engine, gt, id) |
| 1385 | intel_engine_flush_barriers(engine); |
| 1386 | |
| 1387 | intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); |
| 1388 | } |
| 1389 | |
Maarten Lankhorst | 2abb619 | 2021-12-16 15:27:36 +0100 | [diff] [blame] | 1390 | static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, |
| 1391 | u32 align, unsigned int flags) |
Chris Wilson | ccd2094 | 2019-12-05 11:37:25 +0000 | [diff] [blame] | 1392 | { |
| 1393 | struct i915_address_space *vm = vma->vm; |
| 1394 | int err; |
| 1395 | |
Chris Wilson | ccd2094 | 2019-12-05 11:37:25 +0000 | [diff] [blame] | 1396 | do { |
Maarten Lankhorst | 2abb619 | 2021-12-16 15:27:36 +0100 | [diff] [blame] | 1397 | err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL); |
| 1398 | |
Chris Wilson | e379346 | 2020-01-30 18:17:10 +0000 | [diff] [blame] | 1399 | if (err != -ENOSPC) { |
| 1400 | if (!err) { |
| 1401 | err = i915_vma_wait_for_bind(vma); |
| 1402 | if (err) |
| 1403 | i915_vma_unpin(vma); |
| 1404 | } |
Chris Wilson | ccd2094 | 2019-12-05 11:37:25 +0000 | [diff] [blame] | 1405 | return err; |
Chris Wilson | e379346 | 2020-01-30 18:17:10 +0000 | [diff] [blame] | 1406 | } |
Chris Wilson | ccd2094 | 2019-12-05 11:37:25 +0000 | [diff] [blame] | 1407 | |
| 1408 | /* Unlike i915_vma_pin, we don't take no for an answer! */ |
| 1409 | flush_idle_contexts(vm->gt); |
| 1410 | if (mutex_lock_interruptible(&vm->mutex) == 0) { |
| 1411 | i915_gem_evict_vm(vm); |
| 1412 | mutex_unlock(&vm->mutex); |
| 1413 | } |
| 1414 | } while (1); |
| 1415 | } |
| 1416 | |
Maarten Lankhorst | 2abb619 | 2021-12-16 15:27:36 +0100 | [diff] [blame] | 1417 | int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, |
| 1418 | u32 align, unsigned int flags) |
| 1419 | { |
| 1420 | struct i915_gem_ww_ctx _ww; |
| 1421 | int err; |
| 1422 | |
| 1423 | GEM_BUG_ON(!i915_vma_is_ggtt(vma)); |
| 1424 | |
| 1425 | if (ww) |
| 1426 | return __i915_ggtt_pin(vma, ww, align, flags); |
| 1427 | |
| 1428 | #ifdef CONFIG_LOCKDEP |
| 1429 | WARN_ON(dma_resv_held(vma->obj->base.resv)); |
| 1430 | #endif |
| 1431 | |
| 1432 | for_i915_gem_ww(&_ww, err, true) { |
| 1433 | err = i915_gem_object_lock(vma->obj, &_ww); |
| 1434 | if (!err) |
| 1435 | err = __i915_ggtt_pin(vma, &_ww, align, flags); |
| 1436 | } |
| 1437 | |
| 1438 | return err; |
| 1439 | } |
| 1440 | |
Chris Wilson | 5068977 | 2020-04-22 20:05:58 +0100 | [diff] [blame] | 1441 | static void __vma_close(struct i915_vma *vma, struct intel_gt *gt) |
Chris Wilson | 3365e22 | 2018-05-03 20:51:14 +0100 | [diff] [blame] | 1442 | { |
Chris Wilson | 3365e22 | 2018-05-03 20:51:14 +0100 | [diff] [blame] | 1443 | /* |
| 1444 | * We defer actually closing, unbinding and destroying the VMA until |
| 1445 | * the next idle point, or if the object is freed in the meantime. By |
| 1446 | * postponing the unbind, we allow for it to be resurrected by the |
| 1447 | * client, avoiding the work required to rebind the VMA. This is |
| 1448 | * advantageous for DRI, where the client/server pass objects |
| 1449 | * between themselves, temporarily opening a local VMA to the |
| 1450 | * object, and then closing it again. The same object is then reused |
| 1451 | * on the next frame (or two, depending on the depth of the swap queue) |
| 1452 | * causing us to rebind the VMA once more. This ends up being a lot |
| 1453 | * of wasted work for the steady state. |
| 1454 | */ |
Chris Wilson | 5068977 | 2020-04-22 20:05:58 +0100 | [diff] [blame] | 1455 | GEM_BUG_ON(i915_vma_is_closed(vma)); |
Chris Wilson | 71e51ca | 2019-10-21 19:32:35 +0100 | [diff] [blame] | 1456 | list_add(&vma->closed_link, >->closed_vma); |
Chris Wilson | 5068977 | 2020-04-22 20:05:58 +0100 | [diff] [blame] | 1457 | } |
| 1458 | |
| 1459 | void i915_vma_close(struct i915_vma *vma) |
| 1460 | { |
| 1461 | struct intel_gt *gt = vma->vm->gt; |
| 1462 | unsigned long flags; |
| 1463 | |
| 1464 | if (i915_vma_is_ggtt(vma)) |
| 1465 | return; |
| 1466 | |
| 1467 | GEM_BUG_ON(!atomic_read(&vma->open_count)); |
| 1468 | if (atomic_dec_and_lock_irqsave(&vma->open_count, |
| 1469 | >->closed_lock, |
| 1470 | flags)) { |
| 1471 | __vma_close(vma, gt); |
| 1472 | spin_unlock_irqrestore(>->closed_lock, flags); |
| 1473 | } |
Chris Wilson | 155ab88 | 2019-06-06 12:23:20 +0100 | [diff] [blame] | 1474 | } |
| 1475 | |
| 1476 | static void __i915_vma_remove_closed(struct i915_vma *vma) |
| 1477 | { |
Chris Wilson | 71e51ca | 2019-10-21 19:32:35 +0100 | [diff] [blame] | 1478 | struct intel_gt *gt = vma->vm->gt; |
Chris Wilson | 155ab88 | 2019-06-06 12:23:20 +0100 | [diff] [blame] | 1479 | |
Chris Wilson | 71e51ca | 2019-10-21 19:32:35 +0100 | [diff] [blame] | 1480 | spin_lock_irq(>->closed_lock); |
Chris Wilson | 155ab88 | 2019-06-06 12:23:20 +0100 | [diff] [blame] | 1481 | list_del_init(&vma->closed_link); |
Chris Wilson | 71e51ca | 2019-10-21 19:32:35 +0100 | [diff] [blame] | 1482 | spin_unlock_irq(>->closed_lock); |
Chris Wilson | 3365e22 | 2018-05-03 20:51:14 +0100 | [diff] [blame] | 1483 | } |
| 1484 | |
| 1485 | void i915_vma_reopen(struct i915_vma *vma) |
| 1486 | { |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1487 | if (i915_vma_is_closed(vma)) |
| 1488 | __i915_vma_remove_closed(vma); |
Chris Wilson | 3365e22 | 2018-05-03 20:51:14 +0100 | [diff] [blame] | 1489 | } |
| 1490 | |
Chris Wilson | 76f9764 | 2019-12-22 21:02:55 +0000 | [diff] [blame] | 1491 | void i915_vma_release(struct kref *ref) |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 1492 | { |
Chris Wilson | 76f9764 | 2019-12-22 21:02:55 +0000 | [diff] [blame] | 1493 | struct i915_vma *vma = container_of(ref, typeof(*vma), ref); |
Maarten Lankhorst | e6e1a30 | 2021-11-17 14:20:22 +0000 | [diff] [blame] | 1494 | struct drm_i915_gem_object *obj = vma->obj; |
Chris Wilson | 76f9764 | 2019-12-22 21:02:55 +0000 | [diff] [blame] | 1495 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1496 | if (drm_mm_node_allocated(&vma->node)) { |
| 1497 | mutex_lock(&vma->vm->mutex); |
| 1498 | atomic_and(~I915_VMA_PIN_MASK, &vma->flags); |
| 1499 | WARN_ON(__i915_vma_unbind(vma)); |
| 1500 | mutex_unlock(&vma->vm->mutex); |
| 1501 | GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); |
| 1502 | } |
| 1503 | GEM_BUG_ON(i915_vma_is_active(vma)); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 1504 | |
Maarten Lankhorst | e6e1a30 | 2021-11-17 14:20:22 +0000 | [diff] [blame] | 1505 | spin_lock(&obj->vma.lock); |
| 1506 | list_del(&vma->obj_link); |
| 1507 | if (!RB_EMPTY_NODE(&vma->obj_node)) |
| 1508 | rb_erase(&vma->obj_node, &obj->vma.tree); |
| 1509 | spin_unlock(&obj->vma.lock); |
Chris Wilson | 010e3e6 | 2017-12-06 12:49:13 +0000 | [diff] [blame] | 1510 | |
Chris Wilson | 155ab88 | 2019-06-06 12:23:20 +0100 | [diff] [blame] | 1511 | __i915_vma_remove_closed(vma); |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1512 | i915_vm_put(vma->vm); |
Chris Wilson | 3365e22 | 2018-05-03 20:51:14 +0100 | [diff] [blame] | 1513 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1514 | i915_active_fini(&vma->active); |
| 1515 | i915_vma_free(vma); |
Chris Wilson | 3365e22 | 2018-05-03 20:51:14 +0100 | [diff] [blame] | 1516 | } |
| 1517 | |
Chris Wilson | 71e51ca | 2019-10-21 19:32:35 +0100 | [diff] [blame] | 1518 | void i915_vma_parked(struct intel_gt *gt) |
Chris Wilson | 3365e22 | 2018-05-03 20:51:14 +0100 | [diff] [blame] | 1519 | { |
| 1520 | struct i915_vma *vma, *next; |
Chris Wilson | 3447c4c | 2020-03-23 09:28:35 +0000 | [diff] [blame] | 1521 | LIST_HEAD(closed); |
Chris Wilson | 3365e22 | 2018-05-03 20:51:14 +0100 | [diff] [blame] | 1522 | |
Chris Wilson | 71e51ca | 2019-10-21 19:32:35 +0100 | [diff] [blame] | 1523 | spin_lock_irq(>->closed_lock); |
| 1524 | list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) { |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1525 | struct drm_i915_gem_object *obj = vma->obj; |
| 1526 | struct i915_address_space *vm = vma->vm; |
| 1527 | |
| 1528 | /* XXX All to avoid keeping a reference on i915_vma itself */ |
| 1529 | |
| 1530 | if (!kref_get_unless_zero(&obj->base.refcount)) |
| 1531 | continue; |
| 1532 | |
Chris Wilson | 3447c4c | 2020-03-23 09:28:35 +0000 | [diff] [blame] | 1533 | if (!i915_vm_tryopen(vm)) { |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1534 | i915_gem_object_put(obj); |
Chris Wilson | 3447c4c | 2020-03-23 09:28:35 +0000 | [diff] [blame] | 1535 | continue; |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1536 | } |
| 1537 | |
Chris Wilson | 3447c4c | 2020-03-23 09:28:35 +0000 | [diff] [blame] | 1538 | list_move(&vma->closed_link, &closed); |
Chris Wilson | 155ab88 | 2019-06-06 12:23:20 +0100 | [diff] [blame] | 1539 | } |
Chris Wilson | 71e51ca | 2019-10-21 19:32:35 +0100 | [diff] [blame] | 1540 | spin_unlock_irq(>->closed_lock); |
Chris Wilson | 3447c4c | 2020-03-23 09:28:35 +0000 | [diff] [blame] | 1541 | |
| 1542 | /* As the GT is held idle, no vma can be reopened as we destroy them */ |
| 1543 | list_for_each_entry_safe(vma, next, &closed, closed_link) { |
| 1544 | struct drm_i915_gem_object *obj = vma->obj; |
| 1545 | struct i915_address_space *vm = vma->vm; |
| 1546 | |
| 1547 | INIT_LIST_HEAD(&vma->closed_link); |
| 1548 | __i915_vma_put(vma); |
| 1549 | |
| 1550 | i915_gem_object_put(obj); |
| 1551 | i915_vm_close(vm); |
| 1552 | } |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 1553 | } |
| 1554 | |
| 1555 | static void __i915_vma_iounmap(struct i915_vma *vma) |
| 1556 | { |
| 1557 | GEM_BUG_ON(i915_vma_is_pinned(vma)); |
| 1558 | |
| 1559 | if (vma->iomap == NULL) |
| 1560 | return; |
| 1561 | |
| 1562 | io_mapping_unmap(vma->iomap); |
| 1563 | vma->iomap = NULL; |
| 1564 | } |
| 1565 | |
Chris Wilson | a65adaf | 2017-10-09 09:43:57 +0100 | [diff] [blame] | 1566 | void i915_vma_revoke_mmap(struct i915_vma *vma) |
| 1567 | { |
Abdiel Janulgue | cc66212 | 2019-12-04 12:00:32 +0000 | [diff] [blame] | 1568 | struct drm_vma_offset_node *node; |
Chris Wilson | a65adaf | 2017-10-09 09:43:57 +0100 | [diff] [blame] | 1569 | u64 vma_offset; |
| 1570 | |
Chris Wilson | a65adaf | 2017-10-09 09:43:57 +0100 | [diff] [blame] | 1571 | if (!i915_vma_has_userfault(vma)) |
| 1572 | return; |
| 1573 | |
| 1574 | GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); |
| 1575 | GEM_BUG_ON(!vma->obj->userfault_count); |
| 1576 | |
Abdiel Janulgue | cc66212 | 2019-12-04 12:00:32 +0000 | [diff] [blame] | 1577 | node = &vma->mmo->vma_node; |
Chris Wilson | a65adaf | 2017-10-09 09:43:57 +0100 | [diff] [blame] | 1578 | vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; |
| 1579 | unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, |
| 1580 | drm_vma_node_offset_addr(node) + vma_offset, |
| 1581 | vma->size, |
| 1582 | 1); |
| 1583 | |
| 1584 | i915_vma_unset_userfault(vma); |
| 1585 | if (!--vma->obj->userfault_count) |
| 1586 | list_del(&vma->obj->userfault_link); |
| 1587 | } |
| 1588 | |
Chris Wilson | af5c6fc | 2020-07-31 09:50:15 +0100 | [diff] [blame] | 1589 | static int |
| 1590 | __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma) |
| 1591 | { |
| 1592 | return __i915_request_await_exclusive(rq, &vma->active); |
| 1593 | } |
| 1594 | |
Maarten Lankhorst | ad5c99e | 2021-12-16 15:27:33 +0100 | [diff] [blame] | 1595 | static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1596 | { |
| 1597 | int err; |
| 1598 | |
| 1599 | GEM_BUG_ON(!i915_vma_is_pinned(vma)); |
| 1600 | |
| 1601 | /* Wait for the vma to be bound before we start! */ |
Chris Wilson | af5c6fc | 2020-07-31 09:50:15 +0100 | [diff] [blame] | 1602 | err = __i915_request_await_bind(rq, vma); |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1603 | if (err) |
| 1604 | return err; |
| 1605 | |
| 1606 | return i915_active_add_request(&vma->active, rq); |
| 1607 | } |
| 1608 | |
Matthew Brost | 544460c | 2021-10-14 10:20:00 -0700 | [diff] [blame] | 1609 | int _i915_vma_move_to_active(struct i915_vma *vma, |
| 1610 | struct i915_request *rq, |
| 1611 | struct dma_fence *fence, |
| 1612 | unsigned int flags) |
Chris Wilson | e6bb1d7 | 2018-07-06 11:39:45 +0100 | [diff] [blame] | 1613 | { |
| 1614 | struct drm_i915_gem_object *obj = vma->obj; |
Chris Wilson | a93615f | 2019-06-21 19:37:59 +0100 | [diff] [blame] | 1615 | int err; |
Chris Wilson | e6bb1d7 | 2018-07-06 11:39:45 +0100 | [diff] [blame] | 1616 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 1617 | assert_object_held(obj); |
Chris Wilson | e6bb1d7 | 2018-07-06 11:39:45 +0100 | [diff] [blame] | 1618 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1619 | err = __i915_vma_move_to_active(vma, rq); |
Chris Wilson | a93615f | 2019-06-21 19:37:59 +0100 | [diff] [blame] | 1620 | if (unlikely(err)) |
| 1621 | return err; |
Chris Wilson | e6bb1d7 | 2018-07-06 11:39:45 +0100 | [diff] [blame] | 1622 | |
Chris Wilson | e6bb1d7 | 2018-07-06 11:39:45 +0100 | [diff] [blame] | 1623 | if (flags & EXEC_OBJECT_WRITE) { |
Chris Wilson | da42104 | 2019-12-18 10:40:43 +0000 | [diff] [blame] | 1624 | struct intel_frontbuffer *front; |
| 1625 | |
| 1626 | front = __intel_frontbuffer_get(obj); |
| 1627 | if (unlikely(front)) { |
| 1628 | if (intel_frontbuffer_invalidate(front, ORIGIN_CS)) |
| 1629 | i915_active_add_request(&front->write, rq); |
| 1630 | intel_frontbuffer_put(front); |
| 1631 | } |
Chris Wilson | e6bb1d7 | 2018-07-06 11:39:45 +0100 | [diff] [blame] | 1632 | |
Matthew Brost | 544460c | 2021-10-14 10:20:00 -0700 | [diff] [blame] | 1633 | if (fence) { |
Maarten Lankhorst | 95c3d27 | 2021-11-17 14:20:23 +0000 | [diff] [blame] | 1634 | dma_resv_add_excl_fence(vma->obj->base.resv, fence); |
Matthew Brost | 544460c | 2021-10-14 10:20:00 -0700 | [diff] [blame] | 1635 | obj->write_domain = I915_GEM_DOMAIN_RENDER; |
| 1636 | obj->read_domains = 0; |
| 1637 | } |
Chris Wilson | cd2a4ea | 2019-07-30 21:58:05 +0100 | [diff] [blame] | 1638 | } else { |
Maarten Lankhorst | bfaae47 | 2021-03-23 16:49:59 +0100 | [diff] [blame] | 1639 | if (!(flags & __EXEC_OBJECT_NO_RESERVE)) { |
Maarten Lankhorst | 95c3d27 | 2021-11-17 14:20:23 +0000 | [diff] [blame] | 1640 | err = dma_resv_reserve_shared(vma->obj->base.resv, 1); |
Maarten Lankhorst | bfaae47 | 2021-03-23 16:49:59 +0100 | [diff] [blame] | 1641 | if (unlikely(err)) |
| 1642 | return err; |
| 1643 | } |
Chris Wilson | cd2a4ea | 2019-07-30 21:58:05 +0100 | [diff] [blame] | 1644 | |
Matthew Brost | 544460c | 2021-10-14 10:20:00 -0700 | [diff] [blame] | 1645 | if (fence) { |
Maarten Lankhorst | 95c3d27 | 2021-11-17 14:20:23 +0000 | [diff] [blame] | 1646 | dma_resv_add_shared_fence(vma->obj->base.resv, fence); |
Matthew Brost | 544460c | 2021-10-14 10:20:00 -0700 | [diff] [blame] | 1647 | obj->write_domain = 0; |
| 1648 | } |
Chris Wilson | e6bb1d7 | 2018-07-06 11:39:45 +0100 | [diff] [blame] | 1649 | } |
Chris Wilson | 63baf4f | 2020-04-01 22:01:02 +0100 | [diff] [blame] | 1650 | |
| 1651 | if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence) |
| 1652 | i915_active_add_request(&vma->fence->active, rq); |
| 1653 | |
Chris Wilson | e6bb1d7 | 2018-07-06 11:39:45 +0100 | [diff] [blame] | 1654 | obj->read_domains |= I915_GEM_GPU_DOMAINS; |
Chris Wilson | a93615f | 2019-06-21 19:37:59 +0100 | [diff] [blame] | 1655 | obj->mm.dirty = true; |
Chris Wilson | e6bb1d7 | 2018-07-06 11:39:45 +0100 | [diff] [blame] | 1656 | |
Chris Wilson | a93615f | 2019-06-21 19:37:59 +0100 | [diff] [blame] | 1657 | GEM_BUG_ON(!i915_vma_is_active(vma)); |
Chris Wilson | e6bb1d7 | 2018-07-06 11:39:45 +0100 | [diff] [blame] | 1658 | return 0; |
| 1659 | } |
| 1660 | |
Chris Wilson | bffa18d | 2020-05-28 09:24:27 +0100 | [diff] [blame] | 1661 | void __i915_vma_evict(struct i915_vma *vma) |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 1662 | { |
Chris Wilson | 60e9455 | 2020-01-23 22:44:58 +0000 | [diff] [blame] | 1663 | GEM_BUG_ON(i915_vma_is_pinned(vma)); |
Chris Wilson | 60e9455 | 2020-01-23 22:44:58 +0000 | [diff] [blame] | 1664 | |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 1665 | if (i915_vma_is_map_and_fenceable(vma)) { |
Chris Wilson | 9657aaa | 2020-04-03 17:09:51 +0100 | [diff] [blame] | 1666 | /* Force a pagefault for domain tracking on next user access */ |
| 1667 | i915_vma_revoke_mmap(vma); |
| 1668 | |
Chris Wilson | 7125397b | 2017-12-06 12:49:14 +0000 | [diff] [blame] | 1669 | /* |
| 1670 | * Check that we have flushed all writes through the GGTT |
| 1671 | * before the unbind, other due to non-strict nature of those |
| 1672 | * indirect writes they may end up referencing the GGTT PTE |
| 1673 | * after the unbind. |
Chris Wilson | 5424f5d | 2020-01-21 22:24:41 +0000 | [diff] [blame] | 1674 | * |
| 1675 | * Note that we may be concurrently poking at the GGTT_WRITE |
| 1676 | * bit from set-domain, as we mark all GGTT vma associated |
| 1677 | * with an object. We know this is for another vma, as we |
| 1678 | * are currently unbinding this one -- so if this vma will be |
| 1679 | * reused, it will be refaulted and have its dirty bit set |
| 1680 | * before the next write. |
Chris Wilson | 7125397b | 2017-12-06 12:49:14 +0000 | [diff] [blame] | 1681 | */ |
| 1682 | i915_vma_flush_writes(vma); |
Chris Wilson | 7125397b | 2017-12-06 12:49:14 +0000 | [diff] [blame] | 1683 | |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 1684 | /* release the fence reg _after_ flushing */ |
Chris Wilson | 0d86ee3 | 2020-04-01 22:01:04 +0100 | [diff] [blame] | 1685 | i915_vma_revoke_fence(vma); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 1686 | |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 1687 | __i915_vma_iounmap(vma); |
Chris Wilson | 4dd2fbb | 2019-09-11 10:02:43 +0100 | [diff] [blame] | 1688 | clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 1689 | } |
Chris Wilson | a65adaf | 2017-10-09 09:43:57 +0100 | [diff] [blame] | 1690 | GEM_BUG_ON(vma->fence); |
| 1691 | GEM_BUG_ON(i915_vma_has_userfault(vma)); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 1692 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1693 | if (likely(atomic_read(&vma->vm->open))) { |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 1694 | trace_i915_vma_unbind(vma); |
Chris Wilson | 12b0725 | 2020-07-03 11:25:19 +0100 | [diff] [blame] | 1695 | vma->ops->unbind_vma(vma->vm, vma); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 1696 | } |
Chris Wilson | 5424f5d | 2020-01-21 22:24:41 +0000 | [diff] [blame] | 1697 | atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE), |
| 1698 | &vma->flags); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 1699 | |
Chris Wilson | dde01d9 | 2019-10-30 19:21:49 +0000 | [diff] [blame] | 1700 | i915_vma_detach(vma); |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1701 | vma_unbind_pages(vma); |
Chris Wilson | bffa18d | 2020-05-28 09:24:27 +0100 | [diff] [blame] | 1702 | } |
| 1703 | |
| 1704 | int __i915_vma_unbind(struct i915_vma *vma) |
| 1705 | { |
| 1706 | int ret; |
| 1707 | |
| 1708 | lockdep_assert_held(&vma->vm->mutex); |
| 1709 | |
| 1710 | if (!drm_mm_node_allocated(&vma->node)) |
| 1711 | return 0; |
| 1712 | |
| 1713 | if (i915_vma_is_pinned(vma)) { |
| 1714 | vma_print_allocator(vma, "is pinned"); |
| 1715 | return -EAGAIN; |
| 1716 | } |
| 1717 | |
| 1718 | /* |
| 1719 | * After confirming that no one else is pinning this vma, wait for |
| 1720 | * any laggards who may have crept in during the wait (through |
| 1721 | * a residual pin skipping the vm->mutex) to complete. |
| 1722 | */ |
| 1723 | ret = i915_vma_sync(vma); |
| 1724 | if (ret) |
| 1725 | return ret; |
| 1726 | |
| 1727 | GEM_BUG_ON(i915_vma_is_active(vma)); |
| 1728 | __i915_vma_evict(vma); |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 1729 | |
Chris Wilson | 76f9764 | 2019-12-22 21:02:55 +0000 | [diff] [blame] | 1730 | drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */ |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 1731 | return 0; |
| 1732 | } |
| 1733 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1734 | int i915_vma_unbind(struct i915_vma *vma) |
| 1735 | { |
| 1736 | struct i915_address_space *vm = vma->vm; |
Chris Wilson | c0e6034 | 2020-01-10 14:44:18 +0000 | [diff] [blame] | 1737 | intel_wakeref_t wakeref = 0; |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1738 | int err; |
| 1739 | |
Chris Wilson | d62f416f | 2020-01-23 22:44:59 +0000 | [diff] [blame] | 1740 | /* Optimistic wait before taking the mutex */ |
| 1741 | err = i915_vma_sync(vma); |
| 1742 | if (err) |
Chris Wilson | bffa18d | 2020-05-28 09:24:27 +0100 | [diff] [blame] | 1743 | return err; |
| 1744 | |
| 1745 | if (!drm_mm_node_allocated(&vma->node)) |
| 1746 | return 0; |
Chris Wilson | d62f416f | 2020-01-23 22:44:59 +0000 | [diff] [blame] | 1747 | |
Chris Wilson | 614654a | 2020-04-03 13:01:50 +0100 | [diff] [blame] | 1748 | if (i915_vma_is_pinned(vma)) { |
| 1749 | vma_print_allocator(vma, "is pinned"); |
| 1750 | return -EAGAIN; |
| 1751 | } |
| 1752 | |
| 1753 | if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) |
| 1754 | /* XXX not always required: nop_clear_range */ |
| 1755 | wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); |
| 1756 | |
Chris Wilson | d002491 | 2020-03-26 14:27:27 +0000 | [diff] [blame] | 1757 | err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref); |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1758 | if (err) |
Chris Wilson | d62f416f | 2020-01-23 22:44:59 +0000 | [diff] [blame] | 1759 | goto out_rpm; |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1760 | |
| 1761 | err = __i915_vma_unbind(vma); |
| 1762 | mutex_unlock(&vm->mutex); |
| 1763 | |
Chris Wilson | d62f416f | 2020-01-23 22:44:59 +0000 | [diff] [blame] | 1764 | out_rpm: |
Chris Wilson | c0e6034 | 2020-01-10 14:44:18 +0000 | [diff] [blame] | 1765 | if (wakeref) |
| 1766 | intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 1767 | return err; |
| 1768 | } |
| 1769 | |
Chris Wilson | 1aff190 | 2019-08-02 22:21:36 +0100 | [diff] [blame] | 1770 | struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma) |
| 1771 | { |
| 1772 | i915_gem_object_make_unshrinkable(vma->obj); |
| 1773 | return vma; |
| 1774 | } |
| 1775 | |
| 1776 | void i915_vma_make_shrinkable(struct i915_vma *vma) |
| 1777 | { |
| 1778 | i915_gem_object_make_shrinkable(vma->obj); |
| 1779 | } |
| 1780 | |
| 1781 | void i915_vma_make_purgeable(struct i915_vma *vma) |
| 1782 | { |
| 1783 | i915_gem_object_make_purgeable(vma->obj); |
| 1784 | } |
| 1785 | |
Chris Wilson | e3c7a1c | 2017-02-13 17:15:45 +0000 | [diff] [blame] | 1786 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
| 1787 | #include "selftests/i915_vma.c" |
| 1788 | #endif |
Chris Wilson | 13f1bfd | 2019-02-28 10:20:34 +0000 | [diff] [blame] | 1789 | |
Daniel Vetter | 64fc7cc | 2021-07-27 14:10:35 +0200 | [diff] [blame] | 1790 | void i915_vma_module_exit(void) |
Chris Wilson | 103b76ee | 2019-03-05 21:38:30 +0000 | [diff] [blame] | 1791 | { |
Daniel Vetter | 64fc7cc | 2021-07-27 14:10:35 +0200 | [diff] [blame] | 1792 | kmem_cache_destroy(slab_vmas); |
Chris Wilson | 103b76ee | 2019-03-05 21:38:30 +0000 | [diff] [blame] | 1793 | } |
| 1794 | |
Daniel Vetter | 64fc7cc | 2021-07-27 14:10:35 +0200 | [diff] [blame] | 1795 | int __init i915_vma_module_init(void) |
Chris Wilson | 13f1bfd | 2019-02-28 10:20:34 +0000 | [diff] [blame] | 1796 | { |
Daniel Vetter | 64fc7cc | 2021-07-27 14:10:35 +0200 | [diff] [blame] | 1797 | slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); |
| 1798 | if (!slab_vmas) |
Chris Wilson | 13f1bfd | 2019-02-28 10:20:34 +0000 | [diff] [blame] | 1799 | return -ENOMEM; |
| 1800 | |
| 1801 | return 0; |
| 1802 | } |