Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1 | /* |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 2 | * Copyright © 2008-2015 Intel Corporation |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | * Authors: |
| 24 | * Eric Anholt <eric@anholt.net> |
| 25 | * |
| 26 | */ |
| 27 | |
David Herrmann | 0de2397 | 2013-07-24 21:07:52 +0200 | [diff] [blame] | 28 | #include <drm/drm_vma_manager.h> |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 29 | #include <drm/i915_drm.h> |
Chris Wilson | 6b5e90f | 2016-11-14 20:41:05 +0000 | [diff] [blame] | 30 | #include <linux/dma-fence-array.h> |
Chris Wilson | fe3288b | 2017-02-12 17:20:01 +0000 | [diff] [blame] | 31 | #include <linux/kthread.h> |
Chris Wilson | c13d87e | 2016-07-20 09:21:15 +0100 | [diff] [blame] | 32 | #include <linux/reservation.h> |
Hugh Dickins | 5949eac | 2011-06-27 16:18:18 -0700 | [diff] [blame] | 33 | #include <linux/shmem_fs.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 34 | #include <linux/slab.h> |
Chris Wilson | 20e4933 | 2016-11-22 14:41:21 +0000 | [diff] [blame] | 35 | #include <linux/stop_machine.h> |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 36 | #include <linux/swap.h> |
Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 37 | #include <linux/pci.h> |
Daniel Vetter | 1286ff7 | 2012-05-10 15:25:09 +0200 | [diff] [blame] | 38 | #include <linux/dma-buf.h> |
Daniel Vetter | fcd70cd | 2019-01-17 22:03:34 +0100 | [diff] [blame] | 39 | #include <linux/mman.h> |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 40 | |
Chris Wilson | 10be98a | 2019-05-28 10:29:49 +0100 | [diff] [blame^] | 41 | #include "gem/i915_gem_clflush.h" |
| 42 | #include "gem/i915_gem_context.h" |
Chris Wilson | afa1308 | 2019-05-28 10:29:43 +0100 | [diff] [blame] | 43 | #include "gem/i915_gem_ioctls.h" |
Chris Wilson | 10be98a | 2019-05-28 10:29:49 +0100 | [diff] [blame^] | 44 | #include "gem/i915_gem_pm.h" |
| 45 | #include "gem/i915_gemfs.h" |
Chris Wilson | 79ffac85 | 2019-04-24 21:07:17 +0100 | [diff] [blame] | 46 | #include "gt/intel_engine_pm.h" |
| 47 | #include "gt/intel_gt_pm.h" |
Chris Wilson | 112ed2d | 2019-04-24 18:48:39 +0100 | [diff] [blame] | 48 | #include "gt/intel_mocs.h" |
| 49 | #include "gt/intel_reset.h" |
| 50 | #include "gt/intel_workarounds.h" |
| 51 | |
Chris Wilson | 9f58892 | 2019-01-16 15:33:04 +0000 | [diff] [blame] | 52 | #include "i915_drv.h" |
Chris Wilson | 9f58892 | 2019-01-16 15:33:04 +0000 | [diff] [blame] | 53 | #include "i915_trace.h" |
| 54 | #include "i915_vgpu.h" |
| 55 | |
Ville Syrjälä | aa5ca8b | 2019-05-09 15:21:57 +0300 | [diff] [blame] | 56 | #include "intel_display.h" |
Chris Wilson | 9f58892 | 2019-01-16 15:33:04 +0000 | [diff] [blame] | 57 | #include "intel_drv.h" |
| 58 | #include "intel_frontbuffer.h" |
Jani Nikula | 696173b | 2019-04-05 14:00:15 +0300 | [diff] [blame] | 59 | #include "intel_pm.h" |
Chris Wilson | 9f58892 | 2019-01-16 15:33:04 +0000 | [diff] [blame] | 60 | |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 61 | static int |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 62 | insert_mappable_node(struct i915_ggtt *ggtt, |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 63 | struct drm_mm_node *node, u32 size) |
| 64 | { |
| 65 | memset(node, 0, sizeof(*node)); |
Chris Wilson | 82ad644 | 2018-06-05 16:37:58 +0100 | [diff] [blame] | 66 | return drm_mm_insert_node_in_range(&ggtt->vm.mm, node, |
Chris Wilson | 4e64e55 | 2017-02-02 21:04:38 +0000 | [diff] [blame] | 67 | size, 0, I915_COLOR_UNEVICTABLE, |
| 68 | 0, ggtt->mappable_end, |
| 69 | DRM_MM_INSERT_LOW); |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 70 | } |
| 71 | |
| 72 | static void |
| 73 | remove_mappable_node(struct drm_mm_node *node) |
| 74 | { |
| 75 | drm_mm_remove_node(node); |
| 76 | } |
| 77 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 78 | int |
Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 79 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 80 | struct drm_file *file) |
Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 81 | { |
Chris Wilson | 09d7e46 | 2019-01-28 10:23:53 +0000 | [diff] [blame] | 82 | struct i915_ggtt *ggtt = &to_i915(dev)->ggtt; |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 83 | struct drm_i915_gem_get_aperture *args = data; |
Tvrtko Ursulin | ca1543b | 2015-07-01 11:51:10 +0100 | [diff] [blame] | 84 | struct i915_vma *vma; |
Weinan Li | ff8f797 | 2017-05-31 10:35:52 +0800 | [diff] [blame] | 85 | u64 pinned; |
Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 86 | |
Chris Wilson | 09d7e46 | 2019-01-28 10:23:53 +0000 | [diff] [blame] | 87 | mutex_lock(&ggtt->vm.mutex); |
| 88 | |
Chris Wilson | 82ad644 | 2018-06-05 16:37:58 +0100 | [diff] [blame] | 89 | pinned = ggtt->vm.reserved; |
Chris Wilson | 499197d | 2019-01-28 10:23:52 +0000 | [diff] [blame] | 90 | list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) |
Chris Wilson | 20dfbde | 2016-08-04 16:32:30 +0100 | [diff] [blame] | 91 | if (i915_vma_is_pinned(vma)) |
Tvrtko Ursulin | ca1543b | 2015-07-01 11:51:10 +0100 | [diff] [blame] | 92 | pinned += vma->node.size; |
Chris Wilson | 09d7e46 | 2019-01-28 10:23:53 +0000 | [diff] [blame] | 93 | |
| 94 | mutex_unlock(&ggtt->vm.mutex); |
Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 95 | |
Chris Wilson | 82ad644 | 2018-06-05 16:37:58 +0100 | [diff] [blame] | 96 | args->aper_size = ggtt->vm.total; |
Akshay Joshi | 0206e35 | 2011-08-16 15:34:10 -0400 | [diff] [blame] | 97 | args->aper_available_size = args->aper_size - pinned; |
Chris Wilson | 6299f99 | 2010-11-24 12:23:44 +0000 | [diff] [blame] | 98 | |
Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 99 | return 0; |
| 100 | } |
| 101 | |
Chris Wilson | 35a9611 | 2016-08-14 18:44:40 +0100 | [diff] [blame] | 102 | int i915_gem_object_unbind(struct drm_i915_gem_object *obj) |
Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 103 | { |
| 104 | struct i915_vma *vma; |
| 105 | LIST_HEAD(still_in_list); |
Chris Wilson | 02bef8f | 2016-08-14 18:44:41 +0100 | [diff] [blame] | 106 | int ret; |
Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 107 | |
Chris Wilson | 02bef8f | 2016-08-14 18:44:41 +0100 | [diff] [blame] | 108 | lockdep_assert_held(&obj->base.dev->struct_mutex); |
| 109 | |
| 110 | /* Closed vma are removed from the obj->vma_list - but they may |
| 111 | * still have an active binding on the object. To remove those we |
| 112 | * must wait for all rendering to complete to the object (as unbinding |
| 113 | * must anyway), and retire the requests. |
Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 114 | */ |
Chris Wilson | 5888fc9 | 2017-12-04 13:25:13 +0000 | [diff] [blame] | 115 | ret = i915_gem_object_set_to_cpu_domain(obj, false); |
Chris Wilson | 02bef8f | 2016-08-14 18:44:41 +0100 | [diff] [blame] | 116 | if (ret) |
| 117 | return ret; |
| 118 | |
Chris Wilson | 528cbd1 | 2019-01-28 10:23:54 +0000 | [diff] [blame] | 119 | spin_lock(&obj->vma.lock); |
| 120 | while (!ret && (vma = list_first_entry_or_null(&obj->vma.list, |
| 121 | struct i915_vma, |
| 122 | obj_link))) { |
Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 123 | list_move_tail(&vma->obj_link, &still_in_list); |
Chris Wilson | 528cbd1 | 2019-01-28 10:23:54 +0000 | [diff] [blame] | 124 | spin_unlock(&obj->vma.lock); |
| 125 | |
Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 126 | ret = i915_vma_unbind(vma); |
Chris Wilson | 528cbd1 | 2019-01-28 10:23:54 +0000 | [diff] [blame] | 127 | |
| 128 | spin_lock(&obj->vma.lock); |
Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 129 | } |
Chris Wilson | 528cbd1 | 2019-01-28 10:23:54 +0000 | [diff] [blame] | 130 | list_splice(&still_in_list, &obj->vma.list); |
| 131 | spin_unlock(&obj->vma.lock); |
Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 132 | |
| 133 | return ret; |
| 134 | } |
| 135 | |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 136 | static long |
| 137 | i915_gem_object_wait_fence(struct dma_fence *fence, |
| 138 | unsigned int flags, |
Chris Wilson | 62eb3c2 | 2019-02-13 09:25:04 +0000 | [diff] [blame] | 139 | long timeout) |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 140 | { |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 141 | struct i915_request *rq; |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 142 | |
| 143 | BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1); |
| 144 | |
| 145 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
| 146 | return timeout; |
| 147 | |
| 148 | if (!dma_fence_is_i915(fence)) |
| 149 | return dma_fence_wait_timeout(fence, |
| 150 | flags & I915_WAIT_INTERRUPTIBLE, |
| 151 | timeout); |
| 152 | |
| 153 | rq = to_request(fence); |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 154 | if (i915_request_completed(rq)) |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 155 | goto out; |
| 156 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 157 | timeout = i915_request_wait(rq, flags, timeout); |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 158 | |
| 159 | out: |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 160 | if (flags & I915_WAIT_LOCKED && i915_request_completed(rq)) |
| 161 | i915_request_retire_upto(rq); |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 162 | |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 163 | return timeout; |
| 164 | } |
| 165 | |
| 166 | static long |
| 167 | i915_gem_object_wait_reservation(struct reservation_object *resv, |
| 168 | unsigned int flags, |
Chris Wilson | 62eb3c2 | 2019-02-13 09:25:04 +0000 | [diff] [blame] | 169 | long timeout) |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 170 | { |
Chris Wilson | e54ca97 | 2017-02-17 15:13:04 +0000 | [diff] [blame] | 171 | unsigned int seq = __read_seqcount_begin(&resv->seq); |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 172 | struct dma_fence *excl; |
Chris Wilson | e54ca97 | 2017-02-17 15:13:04 +0000 | [diff] [blame] | 173 | bool prune_fences = false; |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 174 | |
| 175 | if (flags & I915_WAIT_ALL) { |
| 176 | struct dma_fence **shared; |
| 177 | unsigned int count, i; |
| 178 | int ret; |
| 179 | |
| 180 | ret = reservation_object_get_fences_rcu(resv, |
| 181 | &excl, &count, &shared); |
| 182 | if (ret) |
| 183 | return ret; |
| 184 | |
| 185 | for (i = 0; i < count; i++) { |
| 186 | timeout = i915_gem_object_wait_fence(shared[i], |
Chris Wilson | 62eb3c2 | 2019-02-13 09:25:04 +0000 | [diff] [blame] | 187 | flags, timeout); |
Chris Wilson | d892e93 | 2017-02-12 21:53:43 +0000 | [diff] [blame] | 188 | if (timeout < 0) |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 189 | break; |
| 190 | |
| 191 | dma_fence_put(shared[i]); |
| 192 | } |
| 193 | |
| 194 | for (; i < count; i++) |
| 195 | dma_fence_put(shared[i]); |
| 196 | kfree(shared); |
Chris Wilson | e54ca97 | 2017-02-17 15:13:04 +0000 | [diff] [blame] | 197 | |
Chris Wilson | fa73055 | 2018-03-07 17:13:03 +0000 | [diff] [blame] | 198 | /* |
| 199 | * If both shared fences and an exclusive fence exist, |
| 200 | * then by construction the shared fences must be later |
| 201 | * than the exclusive fence. If we successfully wait for |
| 202 | * all the shared fences, we know that the exclusive fence |
| 203 | * must all be signaled. If all the shared fences are |
| 204 | * signaled, we can prune the array and recover the |
| 205 | * floating references on the fences/requests. |
| 206 | */ |
Chris Wilson | e54ca97 | 2017-02-17 15:13:04 +0000 | [diff] [blame] | 207 | prune_fences = count && timeout >= 0; |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 208 | } else { |
| 209 | excl = reservation_object_get_excl_rcu(resv); |
| 210 | } |
| 211 | |
Chris Wilson | fa73055 | 2018-03-07 17:13:03 +0000 | [diff] [blame] | 212 | if (excl && timeout >= 0) |
Chris Wilson | 62eb3c2 | 2019-02-13 09:25:04 +0000 | [diff] [blame] | 213 | timeout = i915_gem_object_wait_fence(excl, flags, timeout); |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 214 | |
| 215 | dma_fence_put(excl); |
| 216 | |
Chris Wilson | fa73055 | 2018-03-07 17:13:03 +0000 | [diff] [blame] | 217 | /* |
| 218 | * Opportunistically prune the fences iff we know they have *all* been |
Chris Wilson | 03d1cac | 2017-03-08 13:26:28 +0000 | [diff] [blame] | 219 | * signaled and that the reservation object has not been changed (i.e. |
| 220 | * no new fences have been added). |
| 221 | */ |
Chris Wilson | e54ca97 | 2017-02-17 15:13:04 +0000 | [diff] [blame] | 222 | if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) { |
Chris Wilson | 03d1cac | 2017-03-08 13:26:28 +0000 | [diff] [blame] | 223 | if (reservation_object_trylock(resv)) { |
| 224 | if (!__read_seqcount_retry(&resv->seq, seq)) |
| 225 | reservation_object_add_excl_fence(resv, NULL); |
| 226 | reservation_object_unlock(resv); |
| 227 | } |
Chris Wilson | e54ca97 | 2017-02-17 15:13:04 +0000 | [diff] [blame] | 228 | } |
| 229 | |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 230 | return timeout; |
| 231 | } |
| 232 | |
Chris Wilson | b7268c5 | 2018-04-18 19:40:52 +0100 | [diff] [blame] | 233 | static void __fence_set_priority(struct dma_fence *fence, |
| 234 | const struct i915_sched_attr *attr) |
Chris Wilson | 6b5e90f | 2016-11-14 20:41:05 +0000 | [diff] [blame] | 235 | { |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 236 | struct i915_request *rq; |
Chris Wilson | 6b5e90f | 2016-11-14 20:41:05 +0000 | [diff] [blame] | 237 | struct intel_engine_cs *engine; |
| 238 | |
Chris Wilson | c218ee0 | 2018-01-06 10:56:18 +0000 | [diff] [blame] | 239 | if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence)) |
Chris Wilson | 6b5e90f | 2016-11-14 20:41:05 +0000 | [diff] [blame] | 240 | return; |
| 241 | |
| 242 | rq = to_request(fence); |
| 243 | engine = rq->engine; |
Chris Wilson | 6b5e90f | 2016-11-14 20:41:05 +0000 | [diff] [blame] | 244 | |
Chris Wilson | 4f6d8fc | 2018-05-07 14:57:25 +0100 | [diff] [blame] | 245 | local_bh_disable(); |
| 246 | rcu_read_lock(); /* RCU serialisation for set-wedged protection */ |
Chris Wilson | 47650db | 2018-03-07 13:42:25 +0000 | [diff] [blame] | 247 | if (engine->schedule) |
Chris Wilson | b7268c5 | 2018-04-18 19:40:52 +0100 | [diff] [blame] | 248 | engine->schedule(rq, attr); |
Chris Wilson | 47650db | 2018-03-07 13:42:25 +0000 | [diff] [blame] | 249 | rcu_read_unlock(); |
Chris Wilson | 4f6d8fc | 2018-05-07 14:57:25 +0100 | [diff] [blame] | 250 | local_bh_enable(); /* kick the tasklets if queues were reprioritised */ |
Chris Wilson | 6b5e90f | 2016-11-14 20:41:05 +0000 | [diff] [blame] | 251 | } |
| 252 | |
Chris Wilson | b7268c5 | 2018-04-18 19:40:52 +0100 | [diff] [blame] | 253 | static void fence_set_priority(struct dma_fence *fence, |
| 254 | const struct i915_sched_attr *attr) |
Chris Wilson | 6b5e90f | 2016-11-14 20:41:05 +0000 | [diff] [blame] | 255 | { |
| 256 | /* Recurse once into a fence-array */ |
| 257 | if (dma_fence_is_array(fence)) { |
| 258 | struct dma_fence_array *array = to_dma_fence_array(fence); |
| 259 | int i; |
| 260 | |
| 261 | for (i = 0; i < array->num_fences; i++) |
Chris Wilson | b7268c5 | 2018-04-18 19:40:52 +0100 | [diff] [blame] | 262 | __fence_set_priority(array->fences[i], attr); |
Chris Wilson | 6b5e90f | 2016-11-14 20:41:05 +0000 | [diff] [blame] | 263 | } else { |
Chris Wilson | b7268c5 | 2018-04-18 19:40:52 +0100 | [diff] [blame] | 264 | __fence_set_priority(fence, attr); |
Chris Wilson | 6b5e90f | 2016-11-14 20:41:05 +0000 | [diff] [blame] | 265 | } |
| 266 | } |
| 267 | |
| 268 | int |
| 269 | i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, |
| 270 | unsigned int flags, |
Chris Wilson | b7268c5 | 2018-04-18 19:40:52 +0100 | [diff] [blame] | 271 | const struct i915_sched_attr *attr) |
Chris Wilson | 6b5e90f | 2016-11-14 20:41:05 +0000 | [diff] [blame] | 272 | { |
| 273 | struct dma_fence *excl; |
| 274 | |
| 275 | if (flags & I915_WAIT_ALL) { |
| 276 | struct dma_fence **shared; |
| 277 | unsigned int count, i; |
| 278 | int ret; |
| 279 | |
| 280 | ret = reservation_object_get_fences_rcu(obj->resv, |
| 281 | &excl, &count, &shared); |
| 282 | if (ret) |
| 283 | return ret; |
| 284 | |
| 285 | for (i = 0; i < count; i++) { |
Chris Wilson | b7268c5 | 2018-04-18 19:40:52 +0100 | [diff] [blame] | 286 | fence_set_priority(shared[i], attr); |
Chris Wilson | 6b5e90f | 2016-11-14 20:41:05 +0000 | [diff] [blame] | 287 | dma_fence_put(shared[i]); |
| 288 | } |
| 289 | |
| 290 | kfree(shared); |
| 291 | } else { |
| 292 | excl = reservation_object_get_excl_rcu(obj->resv); |
| 293 | } |
| 294 | |
| 295 | if (excl) { |
Chris Wilson | b7268c5 | 2018-04-18 19:40:52 +0100 | [diff] [blame] | 296 | fence_set_priority(excl, attr); |
Chris Wilson | 6b5e90f | 2016-11-14 20:41:05 +0000 | [diff] [blame] | 297 | dma_fence_put(excl); |
| 298 | } |
| 299 | return 0; |
| 300 | } |
| 301 | |
Chris Wilson | 00e60f2 | 2016-08-04 16:32:40 +0100 | [diff] [blame] | 302 | /** |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 303 | * Waits for rendering to the object to be completed |
Chris Wilson | 00e60f2 | 2016-08-04 16:32:40 +0100 | [diff] [blame] | 304 | * @obj: i915 gem object |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 305 | * @flags: how to wait (under a lock, for all rendering or just for writes etc) |
| 306 | * @timeout: how long to wait |
Chris Wilson | 00e60f2 | 2016-08-04 16:32:40 +0100 | [diff] [blame] | 307 | */ |
| 308 | int |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 309 | i915_gem_object_wait(struct drm_i915_gem_object *obj, |
| 310 | unsigned int flags, |
Chris Wilson | 62eb3c2 | 2019-02-13 09:25:04 +0000 | [diff] [blame] | 311 | long timeout) |
Chris Wilson | 00e60f2 | 2016-08-04 16:32:40 +0100 | [diff] [blame] | 312 | { |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 313 | might_sleep(); |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 314 | GEM_BUG_ON(timeout < 0); |
Chris Wilson | 00e60f2 | 2016-08-04 16:32:40 +0100 | [diff] [blame] | 315 | |
Chris Wilson | 62eb3c2 | 2019-02-13 09:25:04 +0000 | [diff] [blame] | 316 | timeout = i915_gem_object_wait_reservation(obj->resv, flags, timeout); |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 317 | return timeout < 0 ? timeout : 0; |
Chris Wilson | 00e60f2 | 2016-08-04 16:32:40 +0100 | [diff] [blame] | 318 | } |
| 319 | |
Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 320 | static int |
| 321 | i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, |
| 322 | struct drm_i915_gem_pwrite *args, |
Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 323 | struct drm_file *file) |
Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 324 | { |
Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 325 | void *vaddr = obj->phys_handle->vaddr + args->offset; |
Gustavo Padovan | 3ed605b | 2016-04-26 12:32:27 -0300 | [diff] [blame] | 326 | char __user *user_data = u64_to_user_ptr(args->data_ptr); |
Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 327 | |
| 328 | /* We manually control the domain here and pretend that it |
| 329 | * remains coherent i.e. in the GTT domain, like shmem_pwrite. |
| 330 | */ |
Rodrigo Vivi | 77a0d1c | 2015-06-18 11:43:24 -0700 | [diff] [blame] | 331 | intel_fb_obj_invalidate(obj, ORIGIN_CPU); |
Chris Wilson | 10466d2 | 2017-01-06 15:22:38 +0000 | [diff] [blame] | 332 | if (copy_from_user(vaddr, user_data, args->size)) |
| 333 | return -EFAULT; |
Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 334 | |
Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 335 | drm_clflush_virt_range(vaddr, args->size); |
Chris Wilson | 10466d2 | 2017-01-06 15:22:38 +0000 | [diff] [blame] | 336 | i915_gem_chipset_flush(to_i915(obj->base.dev)); |
Paulo Zanoni | 063e4e6 | 2015-02-13 17:23:45 -0200 | [diff] [blame] | 337 | |
Chris Wilson | d59b21e | 2017-02-22 11:40:49 +0000 | [diff] [blame] | 338 | intel_fb_obj_flush(obj, ORIGIN_CPU); |
Chris Wilson | 10466d2 | 2017-01-06 15:22:38 +0000 | [diff] [blame] | 339 | return 0; |
Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 340 | } |
| 341 | |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 342 | static int |
| 343 | i915_gem_create(struct drm_file *file, |
Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 344 | struct drm_i915_private *dev_priv, |
Michał Winiarski | e163484 | 2019-03-26 18:02:18 +0100 | [diff] [blame] | 345 | u64 *size_p, |
Jani Nikula | 739f3ab | 2019-01-16 11:15:19 +0200 | [diff] [blame] | 346 | u32 *handle_p) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 347 | { |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 348 | struct drm_i915_gem_object *obj; |
Pekka Paalanen | a1a2d1d | 2009-08-23 12:40:55 +0300 | [diff] [blame] | 349 | u32 handle; |
Michał Winiarski | e163484 | 2019-03-26 18:02:18 +0100 | [diff] [blame] | 350 | u64 size; |
| 351 | int ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 352 | |
Michał Winiarski | e163484 | 2019-03-26 18:02:18 +0100 | [diff] [blame] | 353 | size = round_up(*size_p, PAGE_SIZE); |
Chris Wilson | 8ffc024 | 2011-09-14 14:14:28 +0200 | [diff] [blame] | 354 | if (size == 0) |
| 355 | return -EINVAL; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 356 | |
| 357 | /* Allocate the new object */ |
Chris Wilson | 8475355 | 2019-05-28 10:29:45 +0100 | [diff] [blame] | 358 | obj = i915_gem_object_create_shmem(dev_priv, size); |
Chris Wilson | fe3db79 | 2016-04-25 13:32:13 +0100 | [diff] [blame] | 359 | if (IS_ERR(obj)) |
| 360 | return PTR_ERR(obj); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 361 | |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 362 | ret = drm_gem_handle_create(file, &obj->base, &handle); |
Chris Wilson | 202f2fe | 2010-10-14 13:20:40 +0100 | [diff] [blame] | 363 | /* drop reference from allocate - handle holds it now */ |
Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 364 | i915_gem_object_put(obj); |
Daniel Vetter | d861e33 | 2013-07-24 23:25:03 +0200 | [diff] [blame] | 365 | if (ret) |
| 366 | return ret; |
Chris Wilson | 202f2fe | 2010-10-14 13:20:40 +0100 | [diff] [blame] | 367 | |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 368 | *handle_p = handle; |
Chris Wilson | 9953402 | 2019-04-17 14:25:07 +0100 | [diff] [blame] | 369 | *size_p = size; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 370 | return 0; |
| 371 | } |
| 372 | |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 373 | int |
| 374 | i915_gem_dumb_create(struct drm_file *file, |
| 375 | struct drm_device *dev, |
| 376 | struct drm_mode_create_dumb *args) |
| 377 | { |
Ville Syrjälä | aa5ca8b | 2019-05-09 15:21:57 +0300 | [diff] [blame] | 378 | int cpp = DIV_ROUND_UP(args->bpp, 8); |
| 379 | u32 format; |
| 380 | |
| 381 | switch (cpp) { |
| 382 | case 1: |
| 383 | format = DRM_FORMAT_C8; |
| 384 | break; |
| 385 | case 2: |
| 386 | format = DRM_FORMAT_RGB565; |
| 387 | break; |
| 388 | case 4: |
| 389 | format = DRM_FORMAT_XRGB8888; |
| 390 | break; |
| 391 | default: |
| 392 | return -EINVAL; |
| 393 | } |
| 394 | |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 395 | /* have to work out size/pitch and return them */ |
Ville Syrjälä | aa5ca8b | 2019-05-09 15:21:57 +0300 | [diff] [blame] | 396 | args->pitch = ALIGN(args->width * cpp, 64); |
| 397 | |
| 398 | /* align stride to page size so that we can remap */ |
| 399 | if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format, |
| 400 | DRM_FORMAT_MOD_LINEAR)) |
| 401 | args->pitch = ALIGN(args->pitch, 4096); |
| 402 | |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 403 | args->size = args->pitch * args->height; |
Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 404 | return i915_gem_create(file, to_i915(dev), |
Michał Winiarski | e163484 | 2019-03-26 18:02:18 +0100 | [diff] [blame] | 405 | &args->size, &args->handle); |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 406 | } |
| 407 | |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 408 | /** |
| 409 | * Creates a new mm object and returns a handle to it. |
Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 410 | * @dev: drm device pointer |
| 411 | * @data: ioctl data blob |
| 412 | * @file: drm file pointer |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 413 | */ |
| 414 | int |
| 415 | i915_gem_create_ioctl(struct drm_device *dev, void *data, |
| 416 | struct drm_file *file) |
| 417 | { |
Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 418 | struct drm_i915_private *dev_priv = to_i915(dev); |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 419 | struct drm_i915_gem_create *args = data; |
Daniel Vetter | 63ed2cb | 2012-04-23 16:50:50 +0200 | [diff] [blame] | 420 | |
Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 421 | i915_gem_flush_free_objects(dev_priv); |
Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 422 | |
Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 423 | return i915_gem_create(file, dev_priv, |
Michał Winiarski | e163484 | 2019-03-26 18:02:18 +0100 | [diff] [blame] | 424 | &args->size, &args->handle); |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 425 | } |
| 426 | |
Chris Wilson | 7125397b | 2017-12-06 12:49:14 +0000 | [diff] [blame] | 427 | void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv) |
Chris Wilson | ef74921 | 2017-04-12 12:01:10 +0100 | [diff] [blame] | 428 | { |
Chris Wilson | 538ef96 | 2019-01-14 14:21:18 +0000 | [diff] [blame] | 429 | intel_wakeref_t wakeref; |
| 430 | |
Chris Wilson | 7125397b | 2017-12-06 12:49:14 +0000 | [diff] [blame] | 431 | /* |
| 432 | * No actual flushing is required for the GTT write domain for reads |
| 433 | * from the GTT domain. Writes to it "immediately" go to main memory |
| 434 | * as far as we know, so there's no chipset flush. It also doesn't |
| 435 | * land in the GPU render cache. |
Chris Wilson | ef74921 | 2017-04-12 12:01:10 +0100 | [diff] [blame] | 436 | * |
| 437 | * However, we do have to enforce the order so that all writes through |
| 438 | * the GTT land before any writes to the device, such as updates to |
| 439 | * the GATT itself. |
| 440 | * |
| 441 | * We also have to wait a bit for the writes to land from the GTT. |
| 442 | * An uncached read (i.e. mmio) seems to be ideal for the round-trip |
| 443 | * timing. This issue has only been observed when switching quickly |
| 444 | * between GTT writes and CPU reads from inside the kernel on recent hw, |
| 445 | * and it appears to only affect discrete GTT blocks (i.e. on LLC |
Chris Wilson | 7125397b | 2017-12-06 12:49:14 +0000 | [diff] [blame] | 446 | * system agents we cannot reproduce this behaviour, until Cannonlake |
| 447 | * that was!). |
Chris Wilson | ef74921 | 2017-04-12 12:01:10 +0100 | [diff] [blame] | 448 | */ |
Chris Wilson | 7125397b | 2017-12-06 12:49:14 +0000 | [diff] [blame] | 449 | |
Chris Wilson | 900ccf3 | 2018-07-20 11:19:10 +0100 | [diff] [blame] | 450 | wmb(); |
| 451 | |
| 452 | if (INTEL_INFO(dev_priv)->has_coherent_ggtt) |
| 453 | return; |
| 454 | |
Chris Wilson | a8bd3b8 | 2018-07-17 10:26:55 +0100 | [diff] [blame] | 455 | i915_gem_chipset_flush(dev_priv); |
Chris Wilson | ef74921 | 2017-04-12 12:01:10 +0100 | [diff] [blame] | 456 | |
Chris Wilson | d4225a5 | 2019-01-14 14:21:23 +0000 | [diff] [blame] | 457 | with_intel_runtime_pm(dev_priv, wakeref) { |
| 458 | spin_lock_irq(&dev_priv->uncore.lock); |
Chris Wilson | 7125397b | 2017-12-06 12:49:14 +0000 | [diff] [blame] | 459 | |
Chris Wilson | d4225a5 | 2019-01-14 14:21:23 +0000 | [diff] [blame] | 460 | POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE)); |
Chris Wilson | 7125397b | 2017-12-06 12:49:14 +0000 | [diff] [blame] | 461 | |
Chris Wilson | d4225a5 | 2019-01-14 14:21:23 +0000 | [diff] [blame] | 462 | spin_unlock_irq(&dev_priv->uncore.lock); |
| 463 | } |
Chris Wilson | 7125397b | 2017-12-06 12:49:14 +0000 | [diff] [blame] | 464 | } |
| 465 | |
Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 466 | static int |
Chris Wilson | b9d126e | 2019-01-05 12:07:58 +0000 | [diff] [blame] | 467 | shmem_pread(struct page *page, int offset, int len, char __user *user_data, |
| 468 | bool needs_clflush) |
Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 469 | { |
| 470 | char *vaddr; |
| 471 | int ret; |
| 472 | |
| 473 | vaddr = kmap(page); |
Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 474 | |
Chris Wilson | b9d126e | 2019-01-05 12:07:58 +0000 | [diff] [blame] | 475 | if (needs_clflush) |
| 476 | drm_clflush_virt_range(vaddr + offset, len); |
| 477 | |
| 478 | ret = __copy_to_user(user_data, vaddr + offset, len); |
| 479 | |
Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 480 | kunmap(page); |
| 481 | |
Chris Wilson | b9d126e | 2019-01-05 12:07:58 +0000 | [diff] [blame] | 482 | return ret ? -EFAULT : 0; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 483 | } |
| 484 | |
| 485 | static int |
| 486 | i915_gem_shmem_pread(struct drm_i915_gem_object *obj, |
| 487 | struct drm_i915_gem_pread *args) |
| 488 | { |
| 489 | char __user *user_data; |
| 490 | u64 remain; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 491 | unsigned int needs_clflush; |
| 492 | unsigned int idx, offset; |
| 493 | int ret; |
| 494 | |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 495 | ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex); |
| 496 | if (ret) |
| 497 | return ret; |
| 498 | |
Chris Wilson | f0e4a06 | 2019-05-28 10:29:48 +0100 | [diff] [blame] | 499 | ret = i915_gem_object_prepare_read(obj, &needs_clflush); |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 500 | mutex_unlock(&obj->base.dev->struct_mutex); |
| 501 | if (ret) |
| 502 | return ret; |
| 503 | |
| 504 | remain = args->size; |
| 505 | user_data = u64_to_user_ptr(args->data_ptr); |
| 506 | offset = offset_in_page(args->offset); |
| 507 | for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { |
| 508 | struct page *page = i915_gem_object_get_page(obj, idx); |
Chris Wilson | a5e856a5 | 2018-10-12 15:02:28 +0100 | [diff] [blame] | 509 | unsigned int length = min_t(u64, remain, PAGE_SIZE - offset); |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 510 | |
| 511 | ret = shmem_pread(page, offset, length, user_data, |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 512 | needs_clflush); |
| 513 | if (ret) |
| 514 | break; |
| 515 | |
| 516 | remain -= length; |
| 517 | user_data += length; |
| 518 | offset = 0; |
| 519 | } |
| 520 | |
Chris Wilson | f0e4a06 | 2019-05-28 10:29:48 +0100 | [diff] [blame] | 521 | i915_gem_object_finish_access(obj); |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 522 | return ret; |
| 523 | } |
| 524 | |
| 525 | static inline bool |
| 526 | gtt_user_read(struct io_mapping *mapping, |
| 527 | loff_t base, int offset, |
| 528 | char __user *user_data, int length) |
| 529 | { |
Ville Syrjälä | afe722b | 2017-09-01 20:12:52 +0300 | [diff] [blame] | 530 | void __iomem *vaddr; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 531 | unsigned long unwritten; |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 532 | |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 533 | /* We can use the cpu mem copy function because this is X86. */ |
Ville Syrjälä | afe722b | 2017-09-01 20:12:52 +0300 | [diff] [blame] | 534 | vaddr = io_mapping_map_atomic_wc(mapping, base); |
| 535 | unwritten = __copy_to_user_inatomic(user_data, |
| 536 | (void __force *)vaddr + offset, |
| 537 | length); |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 538 | io_mapping_unmap_atomic(vaddr); |
| 539 | if (unwritten) { |
Ville Syrjälä | afe722b | 2017-09-01 20:12:52 +0300 | [diff] [blame] | 540 | vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE); |
| 541 | unwritten = copy_to_user(user_data, |
| 542 | (void __force *)vaddr + offset, |
| 543 | length); |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 544 | io_mapping_unmap(vaddr); |
| 545 | } |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 546 | return unwritten; |
| 547 | } |
| 548 | |
| 549 | static int |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 550 | i915_gem_gtt_pread(struct drm_i915_gem_object *obj, |
| 551 | const struct drm_i915_gem_pread *args) |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 552 | { |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 553 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
| 554 | struct i915_ggtt *ggtt = &i915->ggtt; |
Chris Wilson | 538ef96 | 2019-01-14 14:21:18 +0000 | [diff] [blame] | 555 | intel_wakeref_t wakeref; |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 556 | struct drm_mm_node node; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 557 | struct i915_vma *vma; |
| 558 | void __user *user_data; |
| 559 | u64 remain, offset; |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 560 | int ret; |
| 561 | |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 562 | ret = mutex_lock_interruptible(&i915->drm.struct_mutex); |
| 563 | if (ret) |
| 564 | return ret; |
| 565 | |
Chris Wilson | 538ef96 | 2019-01-14 14:21:18 +0000 | [diff] [blame] | 566 | wakeref = intel_runtime_pm_get(i915); |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 567 | vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, |
Chris Wilson | a3259ca | 2017-10-09 09:44:00 +0100 | [diff] [blame] | 568 | PIN_MAPPABLE | |
| 569 | PIN_NONFAULT | |
| 570 | PIN_NONBLOCK); |
Chris Wilson | 1803458 | 2016-08-18 17:16:45 +0100 | [diff] [blame] | 571 | if (!IS_ERR(vma)) { |
| 572 | node.start = i915_ggtt_offset(vma); |
| 573 | node.allocated = false; |
Chris Wilson | 49ef529 | 2016-08-18 17:17:00 +0100 | [diff] [blame] | 574 | ret = i915_vma_put_fence(vma); |
Chris Wilson | 1803458 | 2016-08-18 17:16:45 +0100 | [diff] [blame] | 575 | if (ret) { |
| 576 | i915_vma_unpin(vma); |
| 577 | vma = ERR_PTR(ret); |
| 578 | } |
| 579 | } |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 580 | if (IS_ERR(vma)) { |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 581 | ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 582 | if (ret) |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 583 | goto out_unlock; |
| 584 | GEM_BUG_ON(!node.allocated); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 585 | } |
| 586 | |
| 587 | ret = i915_gem_object_set_to_gtt_domain(obj, false); |
| 588 | if (ret) |
| 589 | goto out_unpin; |
| 590 | |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 591 | mutex_unlock(&i915->drm.struct_mutex); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 592 | |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 593 | user_data = u64_to_user_ptr(args->data_ptr); |
| 594 | remain = args->size; |
| 595 | offset = args->offset; |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 596 | |
| 597 | while (remain > 0) { |
| 598 | /* Operation in this page |
| 599 | * |
| 600 | * page_base = page offset within aperture |
| 601 | * page_offset = offset within page |
| 602 | * page_length = bytes to copy for this page |
| 603 | */ |
| 604 | u32 page_base = node.start; |
| 605 | unsigned page_offset = offset_in_page(offset); |
| 606 | unsigned page_length = PAGE_SIZE - page_offset; |
| 607 | page_length = remain < page_length ? remain : page_length; |
| 608 | if (node.allocated) { |
| 609 | wmb(); |
Chris Wilson | 82ad644 | 2018-06-05 16:37:58 +0100 | [diff] [blame] | 610 | ggtt->vm.insert_page(&ggtt->vm, |
| 611 | i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), |
| 612 | node.start, I915_CACHE_NONE, 0); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 613 | wmb(); |
| 614 | } else { |
| 615 | page_base += offset & PAGE_MASK; |
| 616 | } |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 617 | |
Matthew Auld | 73ebd50 | 2017-12-11 15:18:20 +0000 | [diff] [blame] | 618 | if (gtt_user_read(&ggtt->iomap, page_base, page_offset, |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 619 | user_data, page_length)) { |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 620 | ret = -EFAULT; |
| 621 | break; |
| 622 | } |
| 623 | |
| 624 | remain -= page_length; |
| 625 | user_data += page_length; |
| 626 | offset += page_length; |
| 627 | } |
| 628 | |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 629 | mutex_lock(&i915->drm.struct_mutex); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 630 | out_unpin: |
| 631 | if (node.allocated) { |
| 632 | wmb(); |
Chris Wilson | 82ad644 | 2018-06-05 16:37:58 +0100 | [diff] [blame] | 633 | ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 634 | remove_mappable_node(&node); |
| 635 | } else { |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 636 | i915_vma_unpin(vma); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 637 | } |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 638 | out_unlock: |
Chris Wilson | 538ef96 | 2019-01-14 14:21:18 +0000 | [diff] [blame] | 639 | intel_runtime_pm_put(i915, wakeref); |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 640 | mutex_unlock(&i915->drm.struct_mutex); |
Chris Wilson | f60d7f0 | 2012-09-04 21:02:56 +0100 | [diff] [blame] | 641 | |
Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 642 | return ret; |
| 643 | } |
| 644 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 645 | /** |
| 646 | * Reads data from the object referenced by handle. |
Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 647 | * @dev: drm device pointer |
| 648 | * @data: ioctl data blob |
| 649 | * @file: drm file pointer |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 650 | * |
| 651 | * On error, the contents of *data are undefined. |
| 652 | */ |
| 653 | int |
| 654 | i915_gem_pread_ioctl(struct drm_device *dev, void *data, |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 655 | struct drm_file *file) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 656 | { |
| 657 | struct drm_i915_gem_pread *args = data; |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 658 | struct drm_i915_gem_object *obj; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 659 | int ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 660 | |
Chris Wilson | 51311d0 | 2010-11-17 09:10:42 +0000 | [diff] [blame] | 661 | if (args->size == 0) |
| 662 | return 0; |
| 663 | |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 664 | if (!access_ok(u64_to_user_ptr(args->data_ptr), |
Chris Wilson | 51311d0 | 2010-11-17 09:10:42 +0000 | [diff] [blame] | 665 | args->size)) |
| 666 | return -EFAULT; |
| 667 | |
Chris Wilson | 03ac064 | 2016-07-20 13:31:51 +0100 | [diff] [blame] | 668 | obj = i915_gem_object_lookup(file, args->handle); |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 669 | if (!obj) |
| 670 | return -ENOENT; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 671 | |
Chris Wilson | 7dcd249 | 2010-09-26 20:21:44 +0100 | [diff] [blame] | 672 | /* Bounds check source. */ |
Matthew Auld | 966d5bf | 2016-12-13 20:32:22 +0000 | [diff] [blame] | 673 | if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { |
Chris Wilson | ce9d419 | 2010-09-26 20:50:05 +0100 | [diff] [blame] | 674 | ret = -EINVAL; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 675 | goto out; |
Chris Wilson | ce9d419 | 2010-09-26 20:50:05 +0100 | [diff] [blame] | 676 | } |
| 677 | |
Chris Wilson | db53a30 | 2011-02-03 11:57:46 +0000 | [diff] [blame] | 678 | trace_i915_gem_object_pread(obj, args->offset, args->size); |
| 679 | |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 680 | ret = i915_gem_object_wait(obj, |
| 681 | I915_WAIT_INTERRUPTIBLE, |
Chris Wilson | 62eb3c2 | 2019-02-13 09:25:04 +0000 | [diff] [blame] | 682 | MAX_SCHEDULE_TIMEOUT); |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 683 | if (ret) |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 684 | goto out; |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 685 | |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 686 | ret = i915_gem_object_pin_pages(obj); |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 687 | if (ret) |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 688 | goto out; |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 689 | |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 690 | ret = i915_gem_shmem_pread(obj, args); |
Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 691 | if (ret == -EFAULT || ret == -ENODEV) |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 692 | ret = i915_gem_gtt_pread(obj, args); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 693 | |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 694 | i915_gem_object_unpin_pages(obj); |
| 695 | out: |
Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 696 | i915_gem_object_put(obj); |
Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 697 | return ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 698 | } |
| 699 | |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 700 | /* This is the fast write path which cannot handle |
| 701 | * page faults in the source data |
Linus Torvalds | 9b7530cc | 2008-10-20 14:16:43 -0700 | [diff] [blame] | 702 | */ |
Linus Torvalds | 9b7530cc | 2008-10-20 14:16:43 -0700 | [diff] [blame] | 703 | |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 704 | static inline bool |
| 705 | ggtt_write(struct io_mapping *mapping, |
| 706 | loff_t base, int offset, |
| 707 | char __user *user_data, int length) |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 708 | { |
Ville Syrjälä | afe722b | 2017-09-01 20:12:52 +0300 | [diff] [blame] | 709 | void __iomem *vaddr; |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 710 | unsigned long unwritten; |
| 711 | |
Ben Widawsky | 4f0c7cf | 2012-04-16 14:07:47 -0700 | [diff] [blame] | 712 | /* We can use the cpu mem copy function because this is X86. */ |
Ville Syrjälä | afe722b | 2017-09-01 20:12:52 +0300 | [diff] [blame] | 713 | vaddr = io_mapping_map_atomic_wc(mapping, base); |
| 714 | unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset, |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 715 | user_data, length); |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 716 | io_mapping_unmap_atomic(vaddr); |
| 717 | if (unwritten) { |
Ville Syrjälä | afe722b | 2017-09-01 20:12:52 +0300 | [diff] [blame] | 718 | vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE); |
| 719 | unwritten = copy_from_user((void __force *)vaddr + offset, |
| 720 | user_data, length); |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 721 | io_mapping_unmap(vaddr); |
| 722 | } |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 723 | |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 724 | return unwritten; |
| 725 | } |
| 726 | |
Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 727 | /** |
| 728 | * This is the fast pwrite path, where we copy the data directly from the |
| 729 | * user into the GTT, uncached. |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 730 | * @obj: i915 GEM object |
Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 731 | * @args: pwrite arguments structure |
Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 732 | */ |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 733 | static int |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 734 | i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, |
| 735 | const struct drm_i915_gem_pwrite *args) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 736 | { |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 737 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 738 | struct i915_ggtt *ggtt = &i915->ggtt; |
Chris Wilson | 538ef96 | 2019-01-14 14:21:18 +0000 | [diff] [blame] | 739 | intel_wakeref_t wakeref; |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 740 | struct drm_mm_node node; |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 741 | struct i915_vma *vma; |
| 742 | u64 remain, offset; |
| 743 | void __user *user_data; |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 744 | int ret; |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 745 | |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 746 | ret = mutex_lock_interruptible(&i915->drm.struct_mutex); |
| 747 | if (ret) |
| 748 | return ret; |
Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 749 | |
Chris Wilson | 8bd81815 | 2017-10-19 07:37:33 +0100 | [diff] [blame] | 750 | if (i915_gem_object_has_struct_page(obj)) { |
| 751 | /* |
| 752 | * Avoid waking the device up if we can fallback, as |
| 753 | * waking/resuming is very slow (worst-case 10-100 ms |
| 754 | * depending on PCI sleeps and our own resume time). |
| 755 | * This easily dwarfs any performance advantage from |
| 756 | * using the cache bypass of indirect GGTT access. |
| 757 | */ |
Chris Wilson | 538ef96 | 2019-01-14 14:21:18 +0000 | [diff] [blame] | 758 | wakeref = intel_runtime_pm_get_if_in_use(i915); |
| 759 | if (!wakeref) { |
Chris Wilson | 8bd81815 | 2017-10-19 07:37:33 +0100 | [diff] [blame] | 760 | ret = -EFAULT; |
| 761 | goto out_unlock; |
| 762 | } |
| 763 | } else { |
| 764 | /* No backing pages, no fallback, we must force GGTT access */ |
Chris Wilson | 538ef96 | 2019-01-14 14:21:18 +0000 | [diff] [blame] | 765 | wakeref = intel_runtime_pm_get(i915); |
Chris Wilson | 8bd81815 | 2017-10-19 07:37:33 +0100 | [diff] [blame] | 766 | } |
| 767 | |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 768 | vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, |
Chris Wilson | a3259ca | 2017-10-09 09:44:00 +0100 | [diff] [blame] | 769 | PIN_MAPPABLE | |
| 770 | PIN_NONFAULT | |
| 771 | PIN_NONBLOCK); |
Chris Wilson | 1803458 | 2016-08-18 17:16:45 +0100 | [diff] [blame] | 772 | if (!IS_ERR(vma)) { |
| 773 | node.start = i915_ggtt_offset(vma); |
| 774 | node.allocated = false; |
Chris Wilson | 49ef529 | 2016-08-18 17:17:00 +0100 | [diff] [blame] | 775 | ret = i915_vma_put_fence(vma); |
Chris Wilson | 1803458 | 2016-08-18 17:16:45 +0100 | [diff] [blame] | 776 | if (ret) { |
| 777 | i915_vma_unpin(vma); |
| 778 | vma = ERR_PTR(ret); |
| 779 | } |
| 780 | } |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 781 | if (IS_ERR(vma)) { |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 782 | ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 783 | if (ret) |
Chris Wilson | 8bd81815 | 2017-10-19 07:37:33 +0100 | [diff] [blame] | 784 | goto out_rpm; |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 785 | GEM_BUG_ON(!node.allocated); |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 786 | } |
Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 787 | |
| 788 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
| 789 | if (ret) |
| 790 | goto out_unpin; |
| 791 | |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 792 | mutex_unlock(&i915->drm.struct_mutex); |
| 793 | |
Chris Wilson | b19482d | 2016-08-18 17:16:43 +0100 | [diff] [blame] | 794 | intel_fb_obj_invalidate(obj, ORIGIN_CPU); |
Paulo Zanoni | 063e4e6 | 2015-02-13 17:23:45 -0200 | [diff] [blame] | 795 | |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 796 | user_data = u64_to_user_ptr(args->data_ptr); |
| 797 | offset = args->offset; |
| 798 | remain = args->size; |
| 799 | while (remain) { |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 800 | /* Operation in this page |
| 801 | * |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 802 | * page_base = page offset within aperture |
| 803 | * page_offset = offset within page |
| 804 | * page_length = bytes to copy for this page |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 805 | */ |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 806 | u32 page_base = node.start; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 807 | unsigned int page_offset = offset_in_page(offset); |
| 808 | unsigned int page_length = PAGE_SIZE - page_offset; |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 809 | page_length = remain < page_length ? remain : page_length; |
| 810 | if (node.allocated) { |
| 811 | wmb(); /* flush the write before we modify the GGTT */ |
Chris Wilson | 82ad644 | 2018-06-05 16:37:58 +0100 | [diff] [blame] | 812 | ggtt->vm.insert_page(&ggtt->vm, |
| 813 | i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), |
| 814 | node.start, I915_CACHE_NONE, 0); |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 815 | wmb(); /* flush modifications to the GGTT (insert_page) */ |
| 816 | } else { |
| 817 | page_base += offset & PAGE_MASK; |
| 818 | } |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 819 | /* If we get a fault while copying data, then (presumably) our |
Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 820 | * source page isn't available. Return the error and we'll |
| 821 | * retry in the slow path. |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 822 | * If the object is non-shmem backed, we retry again with the |
| 823 | * path that handles page fault. |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 824 | */ |
Matthew Auld | 73ebd50 | 2017-12-11 15:18:20 +0000 | [diff] [blame] | 825 | if (ggtt_write(&ggtt->iomap, page_base, page_offset, |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 826 | user_data, page_length)) { |
| 827 | ret = -EFAULT; |
| 828 | break; |
Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 829 | } |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 830 | |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 831 | remain -= page_length; |
| 832 | user_data += page_length; |
| 833 | offset += page_length; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 834 | } |
Chris Wilson | d59b21e | 2017-02-22 11:40:49 +0000 | [diff] [blame] | 835 | intel_fb_obj_flush(obj, ORIGIN_CPU); |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 836 | |
| 837 | mutex_lock(&i915->drm.struct_mutex); |
Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 838 | out_unpin: |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 839 | if (node.allocated) { |
| 840 | wmb(); |
Chris Wilson | 82ad644 | 2018-06-05 16:37:58 +0100 | [diff] [blame] | 841 | ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 842 | remove_mappable_node(&node); |
| 843 | } else { |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 844 | i915_vma_unpin(vma); |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 845 | } |
Chris Wilson | 8bd81815 | 2017-10-19 07:37:33 +0100 | [diff] [blame] | 846 | out_rpm: |
Chris Wilson | 538ef96 | 2019-01-14 14:21:18 +0000 | [diff] [blame] | 847 | intel_runtime_pm_put(i915, wakeref); |
Chris Wilson | 8bd81815 | 2017-10-19 07:37:33 +0100 | [diff] [blame] | 848 | out_unlock: |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 849 | mutex_unlock(&i915->drm.struct_mutex); |
Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 850 | return ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 851 | } |
| 852 | |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 853 | /* Per-page copy function for the shmem pwrite fastpath. |
| 854 | * Flushes invalid cachelines before writing to the target if |
| 855 | * needs_clflush_before is set and flushes out any written cachelines after |
| 856 | * writing if needs_clflush is set. |
| 857 | */ |
Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 858 | static int |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 859 | shmem_pwrite(struct page *page, int offset, int len, char __user *user_data, |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 860 | bool needs_clflush_before, |
| 861 | bool needs_clflush_after) |
Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 862 | { |
Chris Wilson | b9d126e | 2019-01-05 12:07:58 +0000 | [diff] [blame] | 863 | char *vaddr; |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 864 | int ret; |
Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 865 | |
Chris Wilson | b9d126e | 2019-01-05 12:07:58 +0000 | [diff] [blame] | 866 | vaddr = kmap(page); |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 867 | |
Chris Wilson | b9d126e | 2019-01-05 12:07:58 +0000 | [diff] [blame] | 868 | if (needs_clflush_before) |
| 869 | drm_clflush_virt_range(vaddr + offset, len); |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 870 | |
Chris Wilson | b9d126e | 2019-01-05 12:07:58 +0000 | [diff] [blame] | 871 | ret = __copy_from_user(vaddr + offset, user_data, len); |
| 872 | if (!ret && needs_clflush_after) |
| 873 | drm_clflush_virt_range(vaddr + offset, len); |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 874 | |
Chris Wilson | b9d126e | 2019-01-05 12:07:58 +0000 | [diff] [blame] | 875 | kunmap(page); |
| 876 | |
| 877 | return ret ? -EFAULT : 0; |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 878 | } |
| 879 | |
| 880 | static int |
| 881 | i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, |
| 882 | const struct drm_i915_gem_pwrite *args) |
| 883 | { |
| 884 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
| 885 | void __user *user_data; |
| 886 | u64 remain; |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 887 | unsigned int partial_cacheline_write; |
| 888 | unsigned int needs_clflush; |
| 889 | unsigned int offset, idx; |
| 890 | int ret; |
| 891 | |
| 892 | ret = mutex_lock_interruptible(&i915->drm.struct_mutex); |
Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 893 | if (ret) |
| 894 | return ret; |
Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 895 | |
Chris Wilson | f0e4a06 | 2019-05-28 10:29:48 +0100 | [diff] [blame] | 896 | ret = i915_gem_object_prepare_write(obj, &needs_clflush); |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 897 | mutex_unlock(&i915->drm.struct_mutex); |
| 898 | if (ret) |
| 899 | return ret; |
| 900 | |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 901 | /* If we don't overwrite a cacheline completely we need to be |
| 902 | * careful to have up-to-date data by first clflushing. Don't |
| 903 | * overcomplicate things and flush the entire patch. |
| 904 | */ |
| 905 | partial_cacheline_write = 0; |
| 906 | if (needs_clflush & CLFLUSH_BEFORE) |
| 907 | partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1; |
| 908 | |
Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 909 | user_data = u64_to_user_ptr(args->data_ptr); |
Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 910 | remain = args->size; |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 911 | offset = offset_in_page(args->offset); |
| 912 | for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { |
| 913 | struct page *page = i915_gem_object_get_page(obj, idx); |
Chris Wilson | a5e856a5 | 2018-10-12 15:02:28 +0100 | [diff] [blame] | 914 | unsigned int length = min_t(u64, remain, PAGE_SIZE - offset); |
Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 915 | |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 916 | ret = shmem_pwrite(page, offset, length, user_data, |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 917 | (offset | length) & partial_cacheline_write, |
| 918 | needs_clflush & CLFLUSH_AFTER); |
| 919 | if (ret) |
Chris Wilson | 9da3da6 | 2012-06-01 15:20:22 +0100 | [diff] [blame] | 920 | break; |
| 921 | |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 922 | remain -= length; |
| 923 | user_data += length; |
| 924 | offset = 0; |
Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 925 | } |
| 926 | |
Chris Wilson | d59b21e | 2017-02-22 11:40:49 +0000 | [diff] [blame] | 927 | intel_fb_obj_flush(obj, ORIGIN_CPU); |
Chris Wilson | f0e4a06 | 2019-05-28 10:29:48 +0100 | [diff] [blame] | 928 | i915_gem_object_finish_access(obj); |
Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 929 | return ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 930 | } |
| 931 | |
| 932 | /** |
| 933 | * Writes data to the object referenced by handle. |
Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 934 | * @dev: drm device |
| 935 | * @data: ioctl data blob |
| 936 | * @file: drm file |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 937 | * |
| 938 | * On error, the contents of the buffer that were to be modified are undefined. |
| 939 | */ |
| 940 | int |
| 941 | i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, |
Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 942 | struct drm_file *file) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 943 | { |
| 944 | struct drm_i915_gem_pwrite *args = data; |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 945 | struct drm_i915_gem_object *obj; |
Chris Wilson | 51311d0 | 2010-11-17 09:10:42 +0000 | [diff] [blame] | 946 | int ret; |
| 947 | |
| 948 | if (args->size == 0) |
| 949 | return 0; |
| 950 | |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 951 | if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size)) |
Chris Wilson | 51311d0 | 2010-11-17 09:10:42 +0000 | [diff] [blame] | 952 | return -EFAULT; |
| 953 | |
Chris Wilson | 03ac064 | 2016-07-20 13:31:51 +0100 | [diff] [blame] | 954 | obj = i915_gem_object_lookup(file, args->handle); |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 955 | if (!obj) |
| 956 | return -ENOENT; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 957 | |
Chris Wilson | 7dcd249 | 2010-09-26 20:21:44 +0100 | [diff] [blame] | 958 | /* Bounds check destination. */ |
Matthew Auld | 966d5bf | 2016-12-13 20:32:22 +0000 | [diff] [blame] | 959 | if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { |
Chris Wilson | ce9d419 | 2010-09-26 20:50:05 +0100 | [diff] [blame] | 960 | ret = -EINVAL; |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 961 | goto err; |
Chris Wilson | ce9d419 | 2010-09-26 20:50:05 +0100 | [diff] [blame] | 962 | } |
| 963 | |
Chris Wilson | f8c1cce | 2018-07-12 19:53:14 +0100 | [diff] [blame] | 964 | /* Writes not allowed into this read-only object */ |
| 965 | if (i915_gem_object_is_readonly(obj)) { |
| 966 | ret = -EINVAL; |
| 967 | goto err; |
| 968 | } |
| 969 | |
Chris Wilson | db53a30 | 2011-02-03 11:57:46 +0000 | [diff] [blame] | 970 | trace_i915_gem_object_pwrite(obj, args->offset, args->size); |
| 971 | |
Chris Wilson | 7c55e2c | 2017-03-07 12:03:38 +0000 | [diff] [blame] | 972 | ret = -ENODEV; |
| 973 | if (obj->ops->pwrite) |
| 974 | ret = obj->ops->pwrite(obj, args); |
| 975 | if (ret != -ENODEV) |
| 976 | goto err; |
| 977 | |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 978 | ret = i915_gem_object_wait(obj, |
| 979 | I915_WAIT_INTERRUPTIBLE | |
| 980 | I915_WAIT_ALL, |
Chris Wilson | 62eb3c2 | 2019-02-13 09:25:04 +0000 | [diff] [blame] | 981 | MAX_SCHEDULE_TIMEOUT); |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 982 | if (ret) |
| 983 | goto err; |
| 984 | |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 985 | ret = i915_gem_object_pin_pages(obj); |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 986 | if (ret) |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 987 | goto err; |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 988 | |
Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 989 | ret = -EFAULT; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 990 | /* We can only do the GTT pwrite on untiled buffers, as otherwise |
| 991 | * it would end up going through the fenced access, and we'll get |
| 992 | * different detiling behavior between reading and writing. |
| 993 | * pread/pwrite currently are reading and writing from the CPU |
| 994 | * perspective, requiring manual detiling by the client. |
| 995 | */ |
Chris Wilson | 6eae005 | 2016-06-20 15:05:52 +0100 | [diff] [blame] | 996 | if (!i915_gem_object_has_struct_page(obj) || |
Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 997 | cpu_write_needs_clflush(obj)) |
Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 998 | /* Note that the gtt paths might fail with non-page-backed user |
| 999 | * pointers (e.g. gtt mappings when moving data between |
Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 1000 | * textures). Fallback to the shmem path in that case. |
| 1001 | */ |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1002 | ret = i915_gem_gtt_pwrite_fast(obj, args); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1003 | |
Chris Wilson | d1054ee | 2016-07-16 18:42:36 +0100 | [diff] [blame] | 1004 | if (ret == -EFAULT || ret == -ENOSPC) { |
Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 1005 | if (obj->phys_handle) |
| 1006 | ret = i915_gem_phys_pwrite(obj, args, file); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 1007 | else |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1008 | ret = i915_gem_shmem_pwrite(obj, args); |
Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 1009 | } |
Daniel Vetter | 5c0480f | 2011-12-14 13:57:30 +0100 | [diff] [blame] | 1010 | |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 1011 | i915_gem_object_unpin_pages(obj); |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 1012 | err: |
Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 1013 | i915_gem_object_put(obj); |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 1014 | return ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1015 | } |
| 1016 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1017 | /** |
| 1018 | * Called when user space has done writes to this buffer |
Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 1019 | * @dev: drm device |
| 1020 | * @data: ioctl data blob |
| 1021 | * @file: drm file |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1022 | */ |
| 1023 | int |
| 1024 | i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1025 | struct drm_file *file) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1026 | { |
| 1027 | struct drm_i915_gem_sw_finish *args = data; |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1028 | struct drm_i915_gem_object *obj; |
Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 1029 | |
Chris Wilson | 03ac064 | 2016-07-20 13:31:51 +0100 | [diff] [blame] | 1030 | obj = i915_gem_object_lookup(file, args->handle); |
Chris Wilson | c21724c | 2016-08-05 10:14:19 +0100 | [diff] [blame] | 1031 | if (!obj) |
| 1032 | return -ENOENT; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1033 | |
Tina Zhang | a03f395 | 2017-11-14 10:25:13 +0000 | [diff] [blame] | 1034 | /* |
| 1035 | * Proxy objects are barred from CPU access, so there is no |
| 1036 | * need to ban sw_finish as it is a nop. |
| 1037 | */ |
| 1038 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1039 | /* Pinned buffers may be scanout, so flush the cache */ |
Chris Wilson | 5a97bcc | 2017-02-22 11:40:46 +0000 | [diff] [blame] | 1040 | i915_gem_object_flush_if_display(obj); |
Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 1041 | i915_gem_object_put(obj); |
Chris Wilson | 5a97bcc | 2017-02-22 11:40:46 +0000 | [diff] [blame] | 1042 | |
| 1043 | return 0; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1044 | } |
| 1045 | |
Chris Wilson | 7c108fd | 2016-10-24 13:42:18 +0100 | [diff] [blame] | 1046 | void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) |
Chris Wilson | eedd10f | 2014-06-16 08:57:44 +0100 | [diff] [blame] | 1047 | { |
Chris Wilson | 3594a3e | 2016-10-24 13:42:16 +0100 | [diff] [blame] | 1048 | struct drm_i915_gem_object *obj, *on; |
Chris Wilson | 7c108fd | 2016-10-24 13:42:18 +0100 | [diff] [blame] | 1049 | int i; |
Chris Wilson | eedd10f | 2014-06-16 08:57:44 +0100 | [diff] [blame] | 1050 | |
Chris Wilson | 3594a3e | 2016-10-24 13:42:16 +0100 | [diff] [blame] | 1051 | /* |
| 1052 | * Only called during RPM suspend. All users of the userfault_list |
| 1053 | * must be holding an RPM wakeref to ensure that this can not |
| 1054 | * run concurrently with themselves (and use the struct_mutex for |
| 1055 | * protection between themselves). |
| 1056 | */ |
| 1057 | |
| 1058 | list_for_each_entry_safe(obj, on, |
Chris Wilson | a65adaf | 2017-10-09 09:43:57 +0100 | [diff] [blame] | 1059 | &dev_priv->mm.userfault_list, userfault_link) |
| 1060 | __i915_gem_object_release_mmap(obj); |
Chris Wilson | 7c108fd | 2016-10-24 13:42:18 +0100 | [diff] [blame] | 1061 | |
| 1062 | /* The fence will be lost when the device powers down. If any were |
| 1063 | * in use by hardware (i.e. they are pinned), we should not be powering |
| 1064 | * down! All other fences will be reacquired by the user upon waking. |
| 1065 | */ |
| 1066 | for (i = 0; i < dev_priv->num_fence_regs; i++) { |
| 1067 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; |
| 1068 | |
Chris Wilson | e0ec3ec | 2017-02-03 12:57:17 +0000 | [diff] [blame] | 1069 | /* Ideally we want to assert that the fence register is not |
| 1070 | * live at this point (i.e. that no piece of code will be |
| 1071 | * trying to write through fence + GTT, as that both violates |
| 1072 | * our tracking of activity and associated locking/barriers, |
| 1073 | * but also is illegal given that the hw is powered down). |
| 1074 | * |
| 1075 | * Previously we used reg->pin_count as a "liveness" indicator. |
| 1076 | * That is not sufficient, and we need a more fine-grained |
| 1077 | * tool if we want to have a sanity check here. |
| 1078 | */ |
Chris Wilson | 7c108fd | 2016-10-24 13:42:18 +0100 | [diff] [blame] | 1079 | |
| 1080 | if (!reg->vma) |
| 1081 | continue; |
| 1082 | |
Chris Wilson | a65adaf | 2017-10-09 09:43:57 +0100 | [diff] [blame] | 1083 | GEM_BUG_ON(i915_vma_has_userfault(reg->vma)); |
Chris Wilson | 7c108fd | 2016-10-24 13:42:18 +0100 | [diff] [blame] | 1084 | reg->dirty = true; |
| 1085 | } |
Chris Wilson | eedd10f | 2014-06-16 08:57:44 +0100 | [diff] [blame] | 1086 | } |
| 1087 | |
Tvrtko Ursulin | f8e5786 | 2018-09-26 09:03:53 +0100 | [diff] [blame] | 1088 | bool i915_sg_trim(struct sg_table *orig_st) |
Tvrtko Ursulin | 0c40ce1 | 2016-11-09 15:13:43 +0000 | [diff] [blame] | 1089 | { |
| 1090 | struct sg_table new_st; |
| 1091 | struct scatterlist *sg, *new_sg; |
| 1092 | unsigned int i; |
| 1093 | |
| 1094 | if (orig_st->nents == orig_st->orig_nents) |
Chris Wilson | 935a2f7 | 2017-02-13 17:15:13 +0000 | [diff] [blame] | 1095 | return false; |
Tvrtko Ursulin | 0c40ce1 | 2016-11-09 15:13:43 +0000 | [diff] [blame] | 1096 | |
Chris Wilson | 8bfc478f | 2016-12-23 14:57:58 +0000 | [diff] [blame] | 1097 | if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN)) |
Chris Wilson | 935a2f7 | 2017-02-13 17:15:13 +0000 | [diff] [blame] | 1098 | return false; |
Tvrtko Ursulin | 0c40ce1 | 2016-11-09 15:13:43 +0000 | [diff] [blame] | 1099 | |
| 1100 | new_sg = new_st.sgl; |
| 1101 | for_each_sg(orig_st->sgl, sg, orig_st->nents, i) { |
| 1102 | sg_set_page(new_sg, sg_page(sg), sg->length, 0); |
Matthew Auld | c6d22ab | 2018-09-20 15:27:06 +0100 | [diff] [blame] | 1103 | sg_dma_address(new_sg) = sg_dma_address(sg); |
| 1104 | sg_dma_len(new_sg) = sg_dma_len(sg); |
| 1105 | |
Tvrtko Ursulin | 0c40ce1 | 2016-11-09 15:13:43 +0000 | [diff] [blame] | 1106 | new_sg = sg_next(new_sg); |
| 1107 | } |
Chris Wilson | c2dc6cc | 2016-12-19 12:43:46 +0000 | [diff] [blame] | 1108 | GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */ |
Tvrtko Ursulin | 0c40ce1 | 2016-11-09 15:13:43 +0000 | [diff] [blame] | 1109 | |
| 1110 | sg_free_table(orig_st); |
| 1111 | |
| 1112 | *orig_st = new_st; |
Chris Wilson | 935a2f7 | 2017-02-13 17:15:13 +0000 | [diff] [blame] | 1113 | return true; |
Tvrtko Ursulin | 0c40ce1 | 2016-11-09 15:13:43 +0000 | [diff] [blame] | 1114 | } |
| 1115 | |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 1116 | static unsigned long to_wait_timeout(s64 timeout_ns) |
| 1117 | { |
| 1118 | if (timeout_ns < 0) |
| 1119 | return MAX_SCHEDULE_TIMEOUT; |
| 1120 | |
| 1121 | if (timeout_ns == 0) |
| 1122 | return 0; |
| 1123 | |
| 1124 | return nsecs_to_jiffies_timeout(timeout_ns); |
| 1125 | } |
| 1126 | |
Ben Widawsky | 5816d64 | 2012-04-11 11:18:19 -0700 | [diff] [blame] | 1127 | /** |
Ben Widawsky | 23ba4fd | 2012-05-24 15:03:10 -0700 | [diff] [blame] | 1128 | * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT |
Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 1129 | * @dev: drm device pointer |
| 1130 | * @data: ioctl data blob |
| 1131 | * @file: drm file pointer |
Ben Widawsky | 23ba4fd | 2012-05-24 15:03:10 -0700 | [diff] [blame] | 1132 | * |
| 1133 | * Returns 0 if successful, else an error is returned with the remaining time in |
| 1134 | * the timeout parameter. |
| 1135 | * -ETIME: object is still busy after timeout |
| 1136 | * -ERESTARTSYS: signal interrupted the wait |
| 1137 | * -ENONENT: object doesn't exist |
| 1138 | * Also possible, but rare: |
Chris Wilson | b805014 | 2017-08-11 11:57:31 +0100 | [diff] [blame] | 1139 | * -EAGAIN: incomplete, restart syscall |
Ben Widawsky | 23ba4fd | 2012-05-24 15:03:10 -0700 | [diff] [blame] | 1140 | * -ENOMEM: damn |
| 1141 | * -ENODEV: Internal IRQ fail |
| 1142 | * -E?: The add request failed |
| 1143 | * |
| 1144 | * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any |
| 1145 | * non-zero timeout parameter the wait ioctl will wait for the given number of |
| 1146 | * nanoseconds on an object becoming unbusy. Since the wait itself does so |
| 1147 | * without holding struct_mutex the object may become re-busied before this |
| 1148 | * function completes. A similar but shorter * race condition exists in the busy |
| 1149 | * ioctl |
| 1150 | */ |
| 1151 | int |
| 1152 | i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) |
| 1153 | { |
| 1154 | struct drm_i915_gem_wait *args = data; |
| 1155 | struct drm_i915_gem_object *obj; |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 1156 | ktime_t start; |
| 1157 | long ret; |
Ben Widawsky | 23ba4fd | 2012-05-24 15:03:10 -0700 | [diff] [blame] | 1158 | |
Daniel Vetter | 11b5d51 | 2014-09-29 15:31:26 +0200 | [diff] [blame] | 1159 | if (args->flags != 0) |
| 1160 | return -EINVAL; |
| 1161 | |
Chris Wilson | 03ac064 | 2016-07-20 13:31:51 +0100 | [diff] [blame] | 1162 | obj = i915_gem_object_lookup(file, args->bo_handle); |
Chris Wilson | 033d549 | 2016-08-05 10:14:17 +0100 | [diff] [blame] | 1163 | if (!obj) |
Ben Widawsky | 23ba4fd | 2012-05-24 15:03:10 -0700 | [diff] [blame] | 1164 | return -ENOENT; |
Chris Wilson | 033d549 | 2016-08-05 10:14:17 +0100 | [diff] [blame] | 1165 | |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 1166 | start = ktime_get(); |
| 1167 | |
| 1168 | ret = i915_gem_object_wait(obj, |
Chris Wilson | e9eaf82 | 2018-10-01 15:47:55 +0100 | [diff] [blame] | 1169 | I915_WAIT_INTERRUPTIBLE | |
| 1170 | I915_WAIT_PRIORITY | |
| 1171 | I915_WAIT_ALL, |
Chris Wilson | 62eb3c2 | 2019-02-13 09:25:04 +0000 | [diff] [blame] | 1172 | to_wait_timeout(args->timeout_ns)); |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 1173 | |
| 1174 | if (args->timeout_ns > 0) { |
| 1175 | args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); |
| 1176 | if (args->timeout_ns < 0) |
| 1177 | args->timeout_ns = 0; |
Chris Wilson | c1d2061 | 2017-02-16 12:54:41 +0000 | [diff] [blame] | 1178 | |
| 1179 | /* |
| 1180 | * Apparently ktime isn't accurate enough and occasionally has a |
| 1181 | * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch |
| 1182 | * things up to make the test happy. We allow up to 1 jiffy. |
| 1183 | * |
| 1184 | * This is a regression from the timespec->ktime conversion. |
| 1185 | */ |
| 1186 | if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns)) |
| 1187 | args->timeout_ns = 0; |
Chris Wilson | b805014 | 2017-08-11 11:57:31 +0100 | [diff] [blame] | 1188 | |
| 1189 | /* Asked to wait beyond the jiffie/scheduler precision? */ |
| 1190 | if (ret == -ETIME && args->timeout_ns) |
| 1191 | ret = -EAGAIN; |
Ben Widawsky | 23ba4fd | 2012-05-24 15:03:10 -0700 | [diff] [blame] | 1192 | } |
| 1193 | |
Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 1194 | i915_gem_object_put(obj); |
John Harrison | ff86588 | 2014-11-24 18:49:28 +0000 | [diff] [blame] | 1195 | return ret; |
Ben Widawsky | 23ba4fd | 2012-05-24 15:03:10 -0700 | [diff] [blame] | 1196 | } |
| 1197 | |
Chris Wilson | 25112b6 | 2017-03-30 15:50:39 +0100 | [diff] [blame] | 1198 | static int wait_for_engines(struct drm_i915_private *i915) |
| 1199 | { |
Chris Wilson | ee42c00 | 2017-12-11 19:41:34 +0000 | [diff] [blame] | 1200 | if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) { |
Chris Wilson | 59e4b19 | 2017-12-11 19:41:35 +0000 | [diff] [blame] | 1201 | dev_err(i915->drm.dev, |
| 1202 | "Failed to idle engines, declaring wedged!\n"); |
Chris Wilson | 629820f | 2018-03-09 10:11:14 +0000 | [diff] [blame] | 1203 | GEM_TRACE_DUMP(); |
Chris Wilson | cad9946 | 2017-08-26 12:09:33 +0100 | [diff] [blame] | 1204 | i915_gem_set_wedged(i915); |
| 1205 | return -EIO; |
Chris Wilson | 25112b6 | 2017-03-30 15:50:39 +0100 | [diff] [blame] | 1206 | } |
| 1207 | |
| 1208 | return 0; |
| 1209 | } |
| 1210 | |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 1211 | static long |
| 1212 | wait_for_timelines(struct drm_i915_private *i915, |
| 1213 | unsigned int flags, long timeout) |
| 1214 | { |
| 1215 | struct i915_gt_timelines *gt = &i915->gt.timelines; |
| 1216 | struct i915_timeline *tl; |
| 1217 | |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 1218 | mutex_lock(>->mutex); |
Chris Wilson | 9407d3b | 2019-01-28 18:18:12 +0000 | [diff] [blame] | 1219 | list_for_each_entry(tl, >->active_list, link) { |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 1220 | struct i915_request *rq; |
| 1221 | |
Chris Wilson | 21950ee | 2019-02-05 13:00:05 +0000 | [diff] [blame] | 1222 | rq = i915_active_request_get_unlocked(&tl->last_request); |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 1223 | if (!rq) |
| 1224 | continue; |
| 1225 | |
| 1226 | mutex_unlock(>->mutex); |
| 1227 | |
| 1228 | /* |
| 1229 | * "Race-to-idle". |
| 1230 | * |
| 1231 | * Switching to the kernel context is often used a synchronous |
| 1232 | * step prior to idling, e.g. in suspend for flushing all |
| 1233 | * current operations to memory before sleeping. These we |
| 1234 | * want to complete as quickly as possible to avoid prolonged |
| 1235 | * stalls, so allow the gpu to boost to maximum clocks. |
| 1236 | */ |
| 1237 | if (flags & I915_WAIT_FOR_IDLE_BOOST) |
Chris Wilson | 62eb3c2 | 2019-02-13 09:25:04 +0000 | [diff] [blame] | 1238 | gen6_rps_boost(rq); |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 1239 | |
| 1240 | timeout = i915_request_wait(rq, flags, timeout); |
| 1241 | i915_request_put(rq); |
| 1242 | if (timeout < 0) |
| 1243 | return timeout; |
| 1244 | |
| 1245 | /* restart after reacquiring the lock */ |
| 1246 | mutex_lock(>->mutex); |
Chris Wilson | 9407d3b | 2019-01-28 18:18:12 +0000 | [diff] [blame] | 1247 | tl = list_entry(>->active_list, typeof(*tl), link); |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 1248 | } |
| 1249 | mutex_unlock(>->mutex); |
| 1250 | |
| 1251 | return timeout; |
| 1252 | } |
| 1253 | |
Chris Wilson | ec625fb | 2018-07-09 13:20:42 +0100 | [diff] [blame] | 1254 | int i915_gem_wait_for_idle(struct drm_i915_private *i915, |
| 1255 | unsigned int flags, long timeout) |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 1256 | { |
Chris Wilson | 79ffac85 | 2019-04-24 21:07:17 +0100 | [diff] [blame] | 1257 | GEM_TRACE("flags=%x (%s), timeout=%ld%s, awake?=%s\n", |
Chris Wilson | ec625fb | 2018-07-09 13:20:42 +0100 | [diff] [blame] | 1258 | flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked", |
Chris Wilson | 79ffac85 | 2019-04-24 21:07:17 +0100 | [diff] [blame] | 1259 | timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "", |
| 1260 | yesno(i915->gt.awake)); |
Chris Wilson | 09a4c02 | 2018-05-24 09:11:35 +0100 | [diff] [blame] | 1261 | |
Chris Wilson | 863e9fd | 2017-05-30 13:13:32 +0100 | [diff] [blame] | 1262 | /* If the device is asleep, we have no requests outstanding */ |
| 1263 | if (!READ_ONCE(i915->gt.awake)) |
| 1264 | return 0; |
| 1265 | |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 1266 | timeout = wait_for_timelines(i915, flags, timeout); |
| 1267 | if (timeout < 0) |
| 1268 | return timeout; |
| 1269 | |
Chris Wilson | 9caa34a | 2016-11-11 14:58:08 +0000 | [diff] [blame] | 1270 | if (flags & I915_WAIT_LOCKED) { |
Chris Wilson | a89d1f9 | 2018-05-02 17:38:39 +0100 | [diff] [blame] | 1271 | int err; |
Chris Wilson | 9caa34a | 2016-11-11 14:58:08 +0000 | [diff] [blame] | 1272 | |
| 1273 | lockdep_assert_held(&i915->drm.struct_mutex); |
| 1274 | |
Chris Wilson | a61b47f | 2018-06-27 12:53:34 +0100 | [diff] [blame] | 1275 | err = wait_for_engines(i915); |
| 1276 | if (err) |
| 1277 | return err; |
| 1278 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 1279 | i915_retire_requests(i915); |
Chris Wilson | a89d1f9 | 2018-05-02 17:38:39 +0100 | [diff] [blame] | 1280 | } |
Chris Wilson | a61b47f | 2018-06-27 12:53:34 +0100 | [diff] [blame] | 1281 | |
| 1282 | return 0; |
Daniel Vetter | 4df2faf | 2010-02-19 11:52:00 +0100 | [diff] [blame] | 1283 | } |
| 1284 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1285 | /* Throttle our rendering by waiting until the ring has completed our requests |
| 1286 | * emitted over 20 msec ago. |
| 1287 | * |
Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 1288 | * Note that if we were to use the current jiffies each time around the loop, |
| 1289 | * we wouldn't escape the function with any frames outstanding if the time to |
| 1290 | * render a frame was over 20ms. |
| 1291 | * |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1292 | * This should get us reasonable parallelism between CPU and GPU but also |
| 1293 | * relatively low latency when blocking on a particular request to finish. |
| 1294 | */ |
| 1295 | static int |
Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 1296 | i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1297 | { |
Chris Wilson | fac5e23 | 2016-07-04 11:34:36 +0100 | [diff] [blame] | 1298 | struct drm_i915_private *dev_priv = to_i915(dev); |
Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 1299 | struct drm_i915_file_private *file_priv = file->driver_priv; |
Chris Wilson | d0bc54f | 2015-05-21 21:01:48 +0100 | [diff] [blame] | 1300 | unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 1301 | struct i915_request *request, *target = NULL; |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 1302 | long ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1303 | |
Chris Wilson | f4457ae | 2016-04-13 17:35:08 +0100 | [diff] [blame] | 1304 | /* ABI: return -EIO if already wedged */ |
Chris Wilson | c41166f | 2019-02-20 14:56:37 +0000 | [diff] [blame] | 1305 | ret = i915_terminally_wedged(dev_priv); |
| 1306 | if (ret) |
| 1307 | return ret; |
Chris Wilson | e110e8d | 2011-01-26 15:39:14 +0000 | [diff] [blame] | 1308 | |
Chris Wilson | 1c25595 | 2010-09-26 11:03:27 +0100 | [diff] [blame] | 1309 | spin_lock(&file_priv->mm.lock); |
Chris Wilson | c8659ef | 2017-03-02 12:25:25 +0000 | [diff] [blame] | 1310 | list_for_each_entry(request, &file_priv->mm.request_list, client_link) { |
Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 1311 | if (time_after_eq(request->emitted_jiffies, recent_enough)) |
| 1312 | break; |
| 1313 | |
Chris Wilson | c8659ef | 2017-03-02 12:25:25 +0000 | [diff] [blame] | 1314 | if (target) { |
| 1315 | list_del(&target->client_link); |
| 1316 | target->file_priv = NULL; |
| 1317 | } |
John Harrison | fcfa423c | 2015-05-29 17:44:12 +0100 | [diff] [blame] | 1318 | |
John Harrison | 54fb241 | 2014-11-24 18:49:27 +0000 | [diff] [blame] | 1319 | target = request; |
Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 1320 | } |
John Harrison | ff86588 | 2014-11-24 18:49:28 +0000 | [diff] [blame] | 1321 | if (target) |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 1322 | i915_request_get(target); |
Chris Wilson | 1c25595 | 2010-09-26 11:03:27 +0100 | [diff] [blame] | 1323 | spin_unlock(&file_priv->mm.lock); |
Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 1324 | |
John Harrison | 54fb241 | 2014-11-24 18:49:27 +0000 | [diff] [blame] | 1325 | if (target == NULL) |
Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 1326 | return 0; |
| 1327 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 1328 | ret = i915_request_wait(target, |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 1329 | I915_WAIT_INTERRUPTIBLE, |
| 1330 | MAX_SCHEDULE_TIMEOUT); |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 1331 | i915_request_put(target); |
John Harrison | ff86588 | 2014-11-24 18:49:28 +0000 | [diff] [blame] | 1332 | |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 1333 | return ret < 0 ? ret : 0; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1334 | } |
| 1335 | |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 1336 | struct i915_vma * |
Joonas Lahtinen | ec7adb6 | 2015-03-16 14:11:13 +0200 | [diff] [blame] | 1337 | i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, |
| 1338 | const struct i915_ggtt_view *view, |
Chris Wilson | 91b2db6 | 2016-08-04 16:32:23 +0100 | [diff] [blame] | 1339 | u64 size, |
Chris Wilson | 2ffffd0 | 2016-08-04 16:32:22 +0100 | [diff] [blame] | 1340 | u64 alignment, |
| 1341 | u64 flags) |
Joonas Lahtinen | ec7adb6 | 2015-03-16 14:11:13 +0200 | [diff] [blame] | 1342 | { |
Chris Wilson | ad16d2e | 2016-10-13 09:55:04 +0100 | [diff] [blame] | 1343 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); |
Chris Wilson | 82ad644 | 2018-06-05 16:37:58 +0100 | [diff] [blame] | 1344 | struct i915_address_space *vm = &dev_priv->ggtt.vm; |
Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 1345 | struct i915_vma *vma; |
| 1346 | int ret; |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 1347 | |
Chris Wilson | 4c7d62c | 2016-10-28 13:58:32 +0100 | [diff] [blame] | 1348 | lockdep_assert_held(&obj->base.dev->struct_mutex); |
| 1349 | |
Chris Wilson | ac87a6fd | 2018-02-20 13:42:05 +0000 | [diff] [blame] | 1350 | if (flags & PIN_MAPPABLE && |
| 1351 | (!view || view->type == I915_GGTT_VIEW_NORMAL)) { |
Chris Wilson | 43ae70d9 | 2017-10-09 09:44:01 +0100 | [diff] [blame] | 1352 | /* If the required space is larger than the available |
| 1353 | * aperture, we will not able to find a slot for the |
| 1354 | * object and unbinding the object now will be in |
| 1355 | * vain. Worse, doing so may cause us to ping-pong |
| 1356 | * the object in and out of the Global GTT and |
| 1357 | * waste a lot of cycles under the mutex. |
| 1358 | */ |
| 1359 | if (obj->base.size > dev_priv->ggtt.mappable_end) |
| 1360 | return ERR_PTR(-E2BIG); |
| 1361 | |
| 1362 | /* If NONBLOCK is set the caller is optimistically |
| 1363 | * trying to cache the full object within the mappable |
| 1364 | * aperture, and *must* have a fallback in place for |
| 1365 | * situations where we cannot bind the object. We |
| 1366 | * can be a little more lax here and use the fallback |
| 1367 | * more often to avoid costly migrations of ourselves |
| 1368 | * and other objects within the aperture. |
| 1369 | * |
| 1370 | * Half-the-aperture is used as a simple heuristic. |
| 1371 | * More interesting would to do search for a free |
| 1372 | * block prior to making the commitment to unbind. |
| 1373 | * That caters for the self-harm case, and with a |
| 1374 | * little more heuristics (e.g. NOFAULT, NOEVICT) |
| 1375 | * we could try to minimise harm to others. |
| 1376 | */ |
| 1377 | if (flags & PIN_NONBLOCK && |
| 1378 | obj->base.size > dev_priv->ggtt.mappable_end / 2) |
| 1379 | return ERR_PTR(-ENOSPC); |
| 1380 | } |
| 1381 | |
Chris Wilson | 718659a | 2017-01-16 15:21:28 +0000 | [diff] [blame] | 1382 | vma = i915_vma_instance(obj, vm, view); |
Chengguang Xu | 772b540 | 2019-02-21 10:08:19 +0800 | [diff] [blame] | 1383 | if (IS_ERR(vma)) |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 1384 | return vma; |
Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 1385 | |
| 1386 | if (i915_vma_misplaced(vma, size, alignment, flags)) { |
Chris Wilson | 43ae70d9 | 2017-10-09 09:44:01 +0100 | [diff] [blame] | 1387 | if (flags & PIN_NONBLOCK) { |
| 1388 | if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) |
| 1389 | return ERR_PTR(-ENOSPC); |
Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 1390 | |
Chris Wilson | 43ae70d9 | 2017-10-09 09:44:01 +0100 | [diff] [blame] | 1391 | if (flags & PIN_MAPPABLE && |
Chris Wilson | 944397f | 2017-01-09 16:16:11 +0000 | [diff] [blame] | 1392 | vma->fence_size > dev_priv->ggtt.mappable_end / 2) |
Chris Wilson | ad16d2e | 2016-10-13 09:55:04 +0100 | [diff] [blame] | 1393 | return ERR_PTR(-ENOSPC); |
| 1394 | } |
| 1395 | |
Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 1396 | WARN(i915_vma_is_pinned(vma), |
| 1397 | "bo is already pinned in ggtt with incorrect alignment:" |
Chris Wilson | 05a20d0 | 2016-08-18 17:16:55 +0100 | [diff] [blame] | 1398 | " offset=%08x, req.alignment=%llx," |
| 1399 | " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n", |
| 1400 | i915_ggtt_offset(vma), alignment, |
Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 1401 | !!(flags & PIN_MAPPABLE), |
Chris Wilson | 05a20d0 | 2016-08-18 17:16:55 +0100 | [diff] [blame] | 1402 | i915_vma_is_map_and_fenceable(vma)); |
Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 1403 | ret = i915_vma_unbind(vma); |
| 1404 | if (ret) |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 1405 | return ERR_PTR(ret); |
Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 1406 | } |
| 1407 | |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 1408 | ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); |
| 1409 | if (ret) |
| 1410 | return ERR_PTR(ret); |
Joonas Lahtinen | ec7adb6 | 2015-03-16 14:11:13 +0200 | [diff] [blame] | 1411 | |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 1412 | return vma; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1413 | } |
| 1414 | |
Chris Wilson | 6960d9c | 2019-04-04 11:19:14 +0100 | [diff] [blame] | 1415 | static __always_inline u32 __busy_read_flag(u8 id) |
Chris Wilson | 3fdc13c | 2016-08-05 10:14:18 +0100 | [diff] [blame] | 1416 | { |
Chris Wilson | 6960d9c | 2019-04-04 11:19:14 +0100 | [diff] [blame] | 1417 | if (id == (u8)I915_ENGINE_CLASS_INVALID) |
| 1418 | return 0xffff0000u; |
Chris Wilson | c8b5024 | 2019-03-05 16:26:43 +0000 | [diff] [blame] | 1419 | |
| 1420 | GEM_BUG_ON(id >= 16); |
Chris Wilson | 6960d9c | 2019-04-04 11:19:14 +0100 | [diff] [blame] | 1421 | return 0x10000u << id; |
Chris Wilson | 3fdc13c | 2016-08-05 10:14:18 +0100 | [diff] [blame] | 1422 | } |
| 1423 | |
Chris Wilson | 6960d9c | 2019-04-04 11:19:14 +0100 | [diff] [blame] | 1424 | static __always_inline u32 __busy_write_id(u8 id) |
Chris Wilson | 3fdc13c | 2016-08-05 10:14:18 +0100 | [diff] [blame] | 1425 | { |
Chris Wilson | c8b5024 | 2019-03-05 16:26:43 +0000 | [diff] [blame] | 1426 | /* |
| 1427 | * The uABI guarantees an active writer is also amongst the read |
Chris Wilson | 70cb472 | 2016-08-09 18:08:25 +0100 | [diff] [blame] | 1428 | * engines. This would be true if we accessed the activity tracking |
| 1429 | * under the lock, but as we perform the lookup of the object and |
| 1430 | * its activity locklessly we can not guarantee that the last_write |
| 1431 | * being active implies that we have set the same engine flag from |
| 1432 | * last_read - hence we always set both read and write busy for |
| 1433 | * last_write. |
| 1434 | */ |
Chris Wilson | 6960d9c | 2019-04-04 11:19:14 +0100 | [diff] [blame] | 1435 | if (id == (u8)I915_ENGINE_CLASS_INVALID) |
| 1436 | return 0xffffffffu; |
Chris Wilson | c8b5024 | 2019-03-05 16:26:43 +0000 | [diff] [blame] | 1437 | |
| 1438 | return (id + 1) | __busy_read_flag(id); |
Chris Wilson | 3fdc13c | 2016-08-05 10:14:18 +0100 | [diff] [blame] | 1439 | } |
| 1440 | |
Chris Wilson | edf6b76 | 2016-08-09 09:23:33 +0100 | [diff] [blame] | 1441 | static __always_inline unsigned int |
Chris Wilson | 6960d9c | 2019-04-04 11:19:14 +0100 | [diff] [blame] | 1442 | __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id)) |
Chris Wilson | 3fdc13c | 2016-08-05 10:14:18 +0100 | [diff] [blame] | 1443 | { |
Chris Wilson | c8b5024 | 2019-03-05 16:26:43 +0000 | [diff] [blame] | 1444 | const struct i915_request *rq; |
Chris Wilson | 1255501 | 2016-08-16 09:50:40 +0100 | [diff] [blame] | 1445 | |
Chris Wilson | c8b5024 | 2019-03-05 16:26:43 +0000 | [diff] [blame] | 1446 | /* |
| 1447 | * We have to check the current hw status of the fence as the uABI |
Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 1448 | * guarantees forward progress. We could rely on the idle worker |
| 1449 | * to eventually flush us, but to minimise latency just ask the |
| 1450 | * hardware. |
| 1451 | * |
| 1452 | * Note we only report on the status of native fences. |
| 1453 | */ |
| 1454 | if (!dma_fence_is_i915(fence)) |
Chris Wilson | 1255501 | 2016-08-16 09:50:40 +0100 | [diff] [blame] | 1455 | return 0; |
| 1456 | |
Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 1457 | /* opencode to_request() in order to avoid const warnings */ |
Chris Wilson | c8b5024 | 2019-03-05 16:26:43 +0000 | [diff] [blame] | 1458 | rq = container_of(fence, const struct i915_request, fence); |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 1459 | if (i915_request_completed(rq)) |
Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 1460 | return 0; |
| 1461 | |
Chris Wilson | 6960d9c | 2019-04-04 11:19:14 +0100 | [diff] [blame] | 1462 | /* Beware type-expansion follies! */ |
| 1463 | BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class)); |
Chris Wilson | c8b5024 | 2019-03-05 16:26:43 +0000 | [diff] [blame] | 1464 | return flag(rq->engine->uabi_class); |
Chris Wilson | 3fdc13c | 2016-08-05 10:14:18 +0100 | [diff] [blame] | 1465 | } |
| 1466 | |
Chris Wilson | edf6b76 | 2016-08-09 09:23:33 +0100 | [diff] [blame] | 1467 | static __always_inline unsigned int |
Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 1468 | busy_check_reader(const struct dma_fence *fence) |
Chris Wilson | 3fdc13c | 2016-08-05 10:14:18 +0100 | [diff] [blame] | 1469 | { |
Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 1470 | return __busy_set_if_active(fence, __busy_read_flag); |
Chris Wilson | 3fdc13c | 2016-08-05 10:14:18 +0100 | [diff] [blame] | 1471 | } |
| 1472 | |
Chris Wilson | edf6b76 | 2016-08-09 09:23:33 +0100 | [diff] [blame] | 1473 | static __always_inline unsigned int |
Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 1474 | busy_check_writer(const struct dma_fence *fence) |
Chris Wilson | 3fdc13c | 2016-08-05 10:14:18 +0100 | [diff] [blame] | 1475 | { |
Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 1476 | if (!fence) |
| 1477 | return 0; |
| 1478 | |
| 1479 | return __busy_set_if_active(fence, __busy_write_id); |
Chris Wilson | 3fdc13c | 2016-08-05 10:14:18 +0100 | [diff] [blame] | 1480 | } |
| 1481 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1482 | int |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1483 | i915_gem_busy_ioctl(struct drm_device *dev, void *data, |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1484 | struct drm_file *file) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1485 | { |
| 1486 | struct drm_i915_gem_busy *args = data; |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1487 | struct drm_i915_gem_object *obj; |
Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 1488 | struct reservation_object_list *list; |
| 1489 | unsigned int seq; |
Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 1490 | int err; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1491 | |
Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 1492 | err = -ENOENT; |
Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 1493 | rcu_read_lock(); |
| 1494 | obj = i915_gem_object_lookup_rcu(file, args->handle); |
Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 1495 | if (!obj) |
Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 1496 | goto out; |
Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 1497 | |
Chris Wilson | c8b5024 | 2019-03-05 16:26:43 +0000 | [diff] [blame] | 1498 | /* |
| 1499 | * A discrepancy here is that we do not report the status of |
Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 1500 | * non-i915 fences, i.e. even though we may report the object as idle, |
| 1501 | * a call to set-domain may still stall waiting for foreign rendering. |
| 1502 | * This also means that wait-ioctl may report an object as busy, |
| 1503 | * where busy-ioctl considers it idle. |
| 1504 | * |
| 1505 | * We trade the ability to warn of foreign fences to report on which |
| 1506 | * i915 engines are active for the object. |
| 1507 | * |
| 1508 | * Alternatively, we can trade that extra information on read/write |
| 1509 | * activity with |
| 1510 | * args->busy = |
| 1511 | * !reservation_object_test_signaled_rcu(obj->resv, true); |
| 1512 | * to report the overall busyness. This is what the wait-ioctl does. |
| 1513 | * |
| 1514 | */ |
| 1515 | retry: |
| 1516 | seq = raw_read_seqcount(&obj->resv->seq); |
| 1517 | |
| 1518 | /* Translate the exclusive fence to the READ *and* WRITE engine */ |
| 1519 | args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl)); |
| 1520 | |
| 1521 | /* Translate shared fences to READ set of engines */ |
| 1522 | list = rcu_dereference(obj->resv->fence); |
| 1523 | if (list) { |
| 1524 | unsigned int shared_count = list->shared_count, i; |
| 1525 | |
| 1526 | for (i = 0; i < shared_count; ++i) { |
| 1527 | struct dma_fence *fence = |
| 1528 | rcu_dereference(list->shared[i]); |
| 1529 | |
| 1530 | args->busy |= busy_check_reader(fence); |
| 1531 | } |
Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 1532 | } |
Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 1533 | |
Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 1534 | if (args->busy && read_seqcount_retry(&obj->resv->seq, seq)) |
| 1535 | goto retry; |
Chris Wilson | 426960b | 2016-01-15 16:51:46 +0000 | [diff] [blame] | 1536 | |
Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 1537 | err = 0; |
Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 1538 | out: |
| 1539 | rcu_read_unlock(); |
| 1540 | return err; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1541 | } |
| 1542 | |
| 1543 | int |
| 1544 | i915_gem_throttle_ioctl(struct drm_device *dev, void *data, |
| 1545 | struct drm_file *file_priv) |
| 1546 | { |
Akshay Joshi | 0206e35 | 2011-08-16 15:34:10 -0400 | [diff] [blame] | 1547 | return i915_gem_ring_throttle(dev, file_priv); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1548 | } |
| 1549 | |
Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 1550 | int |
| 1551 | i915_gem_madvise_ioctl(struct drm_device *dev, void *data, |
| 1552 | struct drm_file *file_priv) |
| 1553 | { |
Chris Wilson | fac5e23 | 2016-07-04 11:34:36 +0100 | [diff] [blame] | 1554 | struct drm_i915_private *dev_priv = to_i915(dev); |
Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 1555 | struct drm_i915_gem_madvise *args = data; |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1556 | struct drm_i915_gem_object *obj; |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 1557 | int err; |
Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 1558 | |
| 1559 | switch (args->madv) { |
| 1560 | case I915_MADV_DONTNEED: |
| 1561 | case I915_MADV_WILLNEED: |
| 1562 | break; |
| 1563 | default: |
| 1564 | return -EINVAL; |
| 1565 | } |
| 1566 | |
Chris Wilson | 03ac064 | 2016-07-20 13:31:51 +0100 | [diff] [blame] | 1567 | obj = i915_gem_object_lookup(file_priv, args->handle); |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 1568 | if (!obj) |
| 1569 | return -ENOENT; |
| 1570 | |
| 1571 | err = mutex_lock_interruptible(&obj->mm.lock); |
| 1572 | if (err) |
| 1573 | goto out; |
Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 1574 | |
Chris Wilson | f1fa4f4 | 2017-10-13 21:26:13 +0100 | [diff] [blame] | 1575 | if (i915_gem_object_has_pages(obj) && |
Chris Wilson | 3e510a8 | 2016-08-05 10:14:23 +0100 | [diff] [blame] | 1576 | i915_gem_object_is_tiled(obj) && |
Daniel Vetter | 656bfa3 | 2014-11-20 09:26:30 +0100 | [diff] [blame] | 1577 | dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { |
Chris Wilson | bc0629a | 2016-11-01 10:03:17 +0000 | [diff] [blame] | 1578 | if (obj->mm.madv == I915_MADV_WILLNEED) { |
| 1579 | GEM_BUG_ON(!obj->mm.quirked); |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 1580 | __i915_gem_object_unpin_pages(obj); |
Chris Wilson | bc0629a | 2016-11-01 10:03:17 +0000 | [diff] [blame] | 1581 | obj->mm.quirked = false; |
| 1582 | } |
| 1583 | if (args->madv == I915_MADV_WILLNEED) { |
Chris Wilson | 2c3a3f4 | 2016-11-04 10:30:01 +0000 | [diff] [blame] | 1584 | GEM_BUG_ON(obj->mm.quirked); |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 1585 | __i915_gem_object_pin_pages(obj); |
Chris Wilson | bc0629a | 2016-11-01 10:03:17 +0000 | [diff] [blame] | 1586 | obj->mm.quirked = true; |
| 1587 | } |
Daniel Vetter | 656bfa3 | 2014-11-20 09:26:30 +0100 | [diff] [blame] | 1588 | } |
| 1589 | |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 1590 | if (obj->mm.madv != __I915_MADV_PURGED) |
| 1591 | obj->mm.madv = args->madv; |
Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 1592 | |
Chris Wilson | 6c085a7 | 2012-08-20 11:40:46 +0200 | [diff] [blame] | 1593 | /* if the object is no longer attached, discard its backing storage */ |
Chris Wilson | f1fa4f4 | 2017-10-13 21:26:13 +0100 | [diff] [blame] | 1594 | if (obj->mm.madv == I915_MADV_DONTNEED && |
| 1595 | !i915_gem_object_has_pages(obj)) |
Chris Wilson | f033428 | 2019-05-28 10:29:46 +0100 | [diff] [blame] | 1596 | i915_gem_object_truncate(obj); |
Chris Wilson | 2d7ef39 | 2009-09-20 23:13:10 +0100 | [diff] [blame] | 1597 | |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 1598 | args->retained = obj->mm.madv != __I915_MADV_PURGED; |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 1599 | mutex_unlock(&obj->mm.lock); |
Chris Wilson | bb6baf7 | 2009-09-22 14:24:13 +0100 | [diff] [blame] | 1600 | |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 1601 | out: |
Chris Wilson | f8c417c | 2016-07-20 13:31:53 +0100 | [diff] [blame] | 1602 | i915_gem_object_put(obj); |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 1603 | return err; |
Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 1604 | } |
| 1605 | |
Chris Wilson | 2414551 | 2017-01-24 11:01:35 +0000 | [diff] [blame] | 1606 | void i915_gem_sanitize(struct drm_i915_private *i915) |
| 1607 | { |
Chris Wilson | 538ef96 | 2019-01-14 14:21:18 +0000 | [diff] [blame] | 1608 | intel_wakeref_t wakeref; |
| 1609 | |
Chris Wilson | c3160da | 2018-05-31 09:22:45 +0100 | [diff] [blame] | 1610 | GEM_TRACE("\n"); |
| 1611 | |
Chris Wilson | 538ef96 | 2019-01-14 14:21:18 +0000 | [diff] [blame] | 1612 | wakeref = intel_runtime_pm_get(i915); |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 1613 | intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); |
Chris Wilson | c3160da | 2018-05-31 09:22:45 +0100 | [diff] [blame] | 1614 | |
| 1615 | /* |
| 1616 | * As we have just resumed the machine and woken the device up from |
| 1617 | * deep PCI sleep (presumably D3_cold), assume the HW has been reset |
| 1618 | * back to defaults, recovering from whatever wedged state we left it |
| 1619 | * in and so worth trying to use the device once more. |
| 1620 | */ |
Chris Wilson | c41166f | 2019-02-20 14:56:37 +0000 | [diff] [blame] | 1621 | if (i915_terminally_wedged(i915)) |
Chris Wilson | f36325f | 2017-08-26 12:09:34 +0100 | [diff] [blame] | 1622 | i915_gem_unset_wedged(i915); |
Chris Wilson | f36325f | 2017-08-26 12:09:34 +0100 | [diff] [blame] | 1623 | |
Chris Wilson | 2414551 | 2017-01-24 11:01:35 +0000 | [diff] [blame] | 1624 | /* |
| 1625 | * If we inherit context state from the BIOS or earlier occupants |
| 1626 | * of the GPU, the GPU may be in an inconsistent state when we |
| 1627 | * try to take over. The only way to remove the earlier state |
| 1628 | * is by resetting. However, resetting on earlier gen is tricky as |
| 1629 | * it may impact the display and we are uncertain about the stability |
Joonas Lahtinen | ea117b8 | 2017-04-28 10:53:38 +0300 | [diff] [blame] | 1630 | * of the reset, so this could be applied to even earlier gen. |
Chris Wilson | 2414551 | 2017-01-24 11:01:35 +0000 | [diff] [blame] | 1631 | */ |
Chris Wilson | 79ffac85 | 2019-04-24 21:07:17 +0100 | [diff] [blame] | 1632 | intel_gt_sanitize(i915, false); |
Chris Wilson | c3160da | 2018-05-31 09:22:45 +0100 | [diff] [blame] | 1633 | |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 1634 | intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); |
Chris Wilson | 538ef96 | 2019-01-14 14:21:18 +0000 | [diff] [blame] | 1635 | intel_runtime_pm_put(i915, wakeref); |
Chris Wilson | c3160da | 2018-05-31 09:22:45 +0100 | [diff] [blame] | 1636 | |
Chris Wilson | eb8d0f5 | 2019-01-25 13:22:28 +0000 | [diff] [blame] | 1637 | mutex_lock(&i915->drm.struct_mutex); |
Chris Wilson | 4dfacb0 | 2018-05-31 09:22:43 +0100 | [diff] [blame] | 1638 | i915_gem_contexts_lost(i915); |
| 1639 | mutex_unlock(&i915->drm.struct_mutex); |
Chris Wilson | 2414551 | 2017-01-24 11:01:35 +0000 | [diff] [blame] | 1640 | } |
| 1641 | |
Tvrtko Ursulin | c6be607 | 2016-11-16 08:55:31 +0000 | [diff] [blame] | 1642 | void i915_gem_init_swizzling(struct drm_i915_private *dev_priv) |
Daniel Vetter | f691e2f | 2012-02-02 09:58:12 +0100 | [diff] [blame] | 1643 | { |
Tvrtko Ursulin | c6be607 | 2016-11-16 08:55:31 +0000 | [diff] [blame] | 1644 | if (INTEL_GEN(dev_priv) < 5 || |
Daniel Vetter | f691e2f | 2012-02-02 09:58:12 +0100 | [diff] [blame] | 1645 | dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) |
| 1646 | return; |
| 1647 | |
| 1648 | I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | |
| 1649 | DISP_TILE_SURFACE_SWIZZLING); |
| 1650 | |
Lucas De Marchi | cf819ef | 2018-12-12 10:10:43 -0800 | [diff] [blame] | 1651 | if (IS_GEN(dev_priv, 5)) |
Daniel Vetter | 11782b0 | 2012-01-31 16:47:55 +0100 | [diff] [blame] | 1652 | return; |
| 1653 | |
Daniel Vetter | f691e2f | 2012-02-02 09:58:12 +0100 | [diff] [blame] | 1654 | I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); |
Lucas De Marchi | cf819ef | 2018-12-12 10:10:43 -0800 | [diff] [blame] | 1655 | if (IS_GEN(dev_priv, 6)) |
Daniel Vetter | 6b26c86 | 2012-04-24 14:04:12 +0200 | [diff] [blame] | 1656 | I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); |
Lucas De Marchi | cf819ef | 2018-12-12 10:10:43 -0800 | [diff] [blame] | 1657 | else if (IS_GEN(dev_priv, 7)) |
Daniel Vetter | 6b26c86 | 2012-04-24 14:04:12 +0200 | [diff] [blame] | 1658 | I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); |
Lucas De Marchi | cf819ef | 2018-12-12 10:10:43 -0800 | [diff] [blame] | 1659 | else if (IS_GEN(dev_priv, 8)) |
Ben Widawsky | 31a5336 | 2013-11-02 21:07:04 -0700 | [diff] [blame] | 1660 | I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW)); |
Ben Widawsky | 8782e26 | 2012-12-18 10:31:23 -0800 | [diff] [blame] | 1661 | else |
| 1662 | BUG(); |
Daniel Vetter | f691e2f | 2012-02-02 09:58:12 +0100 | [diff] [blame] | 1663 | } |
Daniel Vetter | e21af88 | 2012-02-09 20:53:27 +0100 | [diff] [blame] | 1664 | |
Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 1665 | static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base) |
Ville Syrjälä | 81e7f20 | 2014-08-15 01:21:55 +0300 | [diff] [blame] | 1666 | { |
Ville Syrjälä | 81e7f20 | 2014-08-15 01:21:55 +0300 | [diff] [blame] | 1667 | I915_WRITE(RING_CTL(base), 0); |
| 1668 | I915_WRITE(RING_HEAD(base), 0); |
| 1669 | I915_WRITE(RING_TAIL(base), 0); |
| 1670 | I915_WRITE(RING_START(base), 0); |
| 1671 | } |
| 1672 | |
Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 1673 | static void init_unused_rings(struct drm_i915_private *dev_priv) |
Ville Syrjälä | 81e7f20 | 2014-08-15 01:21:55 +0300 | [diff] [blame] | 1674 | { |
Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 1675 | if (IS_I830(dev_priv)) { |
| 1676 | init_unused_ring(dev_priv, PRB1_BASE); |
| 1677 | init_unused_ring(dev_priv, SRB0_BASE); |
| 1678 | init_unused_ring(dev_priv, SRB1_BASE); |
| 1679 | init_unused_ring(dev_priv, SRB2_BASE); |
| 1680 | init_unused_ring(dev_priv, SRB3_BASE); |
Lucas De Marchi | cf819ef | 2018-12-12 10:10:43 -0800 | [diff] [blame] | 1681 | } else if (IS_GEN(dev_priv, 2)) { |
Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 1682 | init_unused_ring(dev_priv, SRB0_BASE); |
| 1683 | init_unused_ring(dev_priv, SRB1_BASE); |
Lucas De Marchi | cf819ef | 2018-12-12 10:10:43 -0800 | [diff] [blame] | 1684 | } else if (IS_GEN(dev_priv, 3)) { |
Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 1685 | init_unused_ring(dev_priv, PRB1_BASE); |
| 1686 | init_unused_ring(dev_priv, PRB2_BASE); |
Ville Syrjälä | 81e7f20 | 2014-08-15 01:21:55 +0300 | [diff] [blame] | 1687 | } |
| 1688 | } |
| 1689 | |
Chris Wilson | 20a8a74 | 2017-02-08 14:30:31 +0000 | [diff] [blame] | 1690 | int i915_gem_init_hw(struct drm_i915_private *dev_priv) |
| 1691 | { |
Chris Wilson | d200cda | 2016-04-28 09:56:44 +0100 | [diff] [blame] | 1692 | int ret; |
Ben Widawsky | 4fc7c97 | 2013-02-08 11:49:24 -0800 | [diff] [blame] | 1693 | |
Chris Wilson | de867c2 | 2016-10-25 13:16:02 +0100 | [diff] [blame] | 1694 | dev_priv->gt.last_init_time = ktime_get(); |
| 1695 | |
Chris Wilson | 5e4f518 | 2015-02-13 14:35:59 +0000 | [diff] [blame] | 1696 | /* Double layer security blanket, see i915_gem_init() */ |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 1697 | intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); |
Chris Wilson | 5e4f518 | 2015-02-13 14:35:59 +0000 | [diff] [blame] | 1698 | |
Tvrtko Ursulin | 0031fb9 | 2016-11-04 14:42:44 +0000 | [diff] [blame] | 1699 | if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9) |
Ben Widawsky | 05e21cc | 2013-07-04 11:02:04 -0700 | [diff] [blame] | 1700 | I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); |
Ben Widawsky | 4fc7c97 | 2013-02-08 11:49:24 -0800 | [diff] [blame] | 1701 | |
Tvrtko Ursulin | 772c2a5 | 2016-10-13 11:03:01 +0100 | [diff] [blame] | 1702 | if (IS_HASWELL(dev_priv)) |
Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 1703 | I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ? |
Ville Syrjälä | 0bf2134 | 2013-11-29 14:56:12 +0200 | [diff] [blame] | 1704 | LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); |
Rodrigo Vivi | 9435373 | 2013-08-28 16:45:46 -0300 | [diff] [blame] | 1705 | |
Tvrtko Ursulin | 094304b | 2018-12-03 12:50:10 +0000 | [diff] [blame] | 1706 | /* Apply the GT workarounds... */ |
Tvrtko Ursulin | 25d140f | 2018-12-03 13:33:19 +0000 | [diff] [blame] | 1707 | intel_gt_apply_workarounds(dev_priv); |
Tvrtko Ursulin | 094304b | 2018-12-03 12:50:10 +0000 | [diff] [blame] | 1708 | /* ...and determine whether they are sticking. */ |
| 1709 | intel_gt_verify_workarounds(dev_priv, "init"); |
Oscar Mateo | 59b449d | 2018-04-10 09:12:47 -0700 | [diff] [blame] | 1710 | |
Tvrtko Ursulin | c6be607 | 2016-11-16 08:55:31 +0000 | [diff] [blame] | 1711 | i915_gem_init_swizzling(dev_priv); |
Ben Widawsky | 4fc7c97 | 2013-02-08 11:49:24 -0800 | [diff] [blame] | 1712 | |
Daniel Vetter | d5abdfd | 2014-11-20 09:45:19 +0100 | [diff] [blame] | 1713 | /* |
| 1714 | * At least 830 can leave some of the unused rings |
| 1715 | * "active" (ie. head != tail) after resume which |
| 1716 | * will prevent c3 entry. Makes sure all unused rings |
| 1717 | * are totally idle. |
| 1718 | */ |
Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 1719 | init_unused_rings(dev_priv); |
Daniel Vetter | d5abdfd | 2014-11-20 09:45:19 +0100 | [diff] [blame] | 1720 | |
Dave Gordon | ed54c1a | 2016-01-19 19:02:54 +0000 | [diff] [blame] | 1721 | BUG_ON(!dev_priv->kernel_context); |
Chris Wilson | c41166f | 2019-02-20 14:56:37 +0000 | [diff] [blame] | 1722 | ret = i915_terminally_wedged(dev_priv); |
| 1723 | if (ret) |
Chris Wilson | 6f74b36 | 2017-10-15 15:37:25 +0100 | [diff] [blame] | 1724 | goto out; |
John Harrison | 90638cc | 2015-05-29 17:43:37 +0100 | [diff] [blame] | 1725 | |
Tvrtko Ursulin | c6be607 | 2016-11-16 08:55:31 +0000 | [diff] [blame] | 1726 | ret = i915_ppgtt_init_hw(dev_priv); |
John Harrison | 4ad2fd8 | 2015-06-18 13:11:20 +0100 | [diff] [blame] | 1727 | if (ret) { |
Chris Wilson | 8177e11 | 2018-02-07 11:15:45 +0000 | [diff] [blame] | 1728 | DRM_ERROR("Enabling PPGTT failed (%d)\n", ret); |
John Harrison | 4ad2fd8 | 2015-06-18 13:11:20 +0100 | [diff] [blame] | 1729 | goto out; |
| 1730 | } |
| 1731 | |
Jackie Li | f08e203 | 2018-03-13 17:32:53 -0700 | [diff] [blame] | 1732 | ret = intel_wopcm_init_hw(&dev_priv->wopcm); |
| 1733 | if (ret) { |
| 1734 | DRM_ERROR("Enabling WOPCM failed (%d)\n", ret); |
| 1735 | goto out; |
| 1736 | } |
| 1737 | |
Michał Winiarski | 9bdc357 | 2017-10-25 18:25:19 +0100 | [diff] [blame] | 1738 | /* We can't enable contexts until all firmware is loaded */ |
| 1739 | ret = intel_uc_init_hw(dev_priv); |
Chris Wilson | 8177e11 | 2018-02-07 11:15:45 +0000 | [diff] [blame] | 1740 | if (ret) { |
| 1741 | DRM_ERROR("Enabling uc failed (%d)\n", ret); |
Michał Winiarski | 9bdc357 | 2017-10-25 18:25:19 +0100 | [diff] [blame] | 1742 | goto out; |
Chris Wilson | 8177e11 | 2018-02-07 11:15:45 +0000 | [diff] [blame] | 1743 | } |
Michał Winiarski | 9bdc357 | 2017-10-25 18:25:19 +0100 | [diff] [blame] | 1744 | |
Tvrtko Ursulin | bf9e842 | 2016-12-01 14:16:38 +0000 | [diff] [blame] | 1745 | intel_mocs_init_l3cc_table(dev_priv); |
Peter Antoine | 0ccdacf | 2016-04-13 15:03:25 +0100 | [diff] [blame] | 1746 | |
Chris Wilson | 136109c | 2017-11-02 13:14:30 +0000 | [diff] [blame] | 1747 | /* Only when the HW is re-initialised, can we replay the requests */ |
Chris Wilson | 79ffac85 | 2019-04-24 21:07:17 +0100 | [diff] [blame] | 1748 | ret = intel_engines_resume(dev_priv); |
Michal Wajdeczko | b96f6eb | 2018-06-05 12:24:43 +0000 | [diff] [blame] | 1749 | if (ret) |
| 1750 | goto cleanup_uc; |
Michał Winiarski | 60c0a66 | 2018-07-12 14:48:10 +0200 | [diff] [blame] | 1751 | |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 1752 | intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); |
Michał Winiarski | 60c0a66 | 2018-07-12 14:48:10 +0200 | [diff] [blame] | 1753 | |
Chris Wilson | 79ffac85 | 2019-04-24 21:07:17 +0100 | [diff] [blame] | 1754 | intel_engines_set_scheduler_caps(dev_priv); |
Michał Winiarski | 60c0a66 | 2018-07-12 14:48:10 +0200 | [diff] [blame] | 1755 | return 0; |
Michal Wajdeczko | b96f6eb | 2018-06-05 12:24:43 +0000 | [diff] [blame] | 1756 | |
| 1757 | cleanup_uc: |
| 1758 | intel_uc_fini_hw(dev_priv); |
Michał Winiarski | 60c0a66 | 2018-07-12 14:48:10 +0200 | [diff] [blame] | 1759 | out: |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 1760 | intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); |
Michał Winiarski | 60c0a66 | 2018-07-12 14:48:10 +0200 | [diff] [blame] | 1761 | |
| 1762 | return ret; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 1763 | } |
| 1764 | |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1765 | static int __intel_engines_record_defaults(struct drm_i915_private *i915) |
| 1766 | { |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1767 | struct intel_engine_cs *engine; |
Chris Wilson | 5e2a041 | 2019-04-26 17:33:34 +0100 | [diff] [blame] | 1768 | struct i915_gem_context *ctx; |
| 1769 | struct i915_gem_engines *e; |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1770 | enum intel_engine_id id; |
Chris Wilson | 604c37d | 2019-03-08 09:36:55 +0000 | [diff] [blame] | 1771 | int err = 0; |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1772 | |
| 1773 | /* |
| 1774 | * As we reset the gpu during very early sanitisation, the current |
| 1775 | * register state on the GPU should reflect its defaults values. |
| 1776 | * We load a context onto the hw (with restore-inhibit), then switch |
| 1777 | * over to a second context to save that default register state. We |
| 1778 | * can then prime every new context with that state so they all start |
| 1779 | * from the same default HW values. |
| 1780 | */ |
| 1781 | |
| 1782 | ctx = i915_gem_context_create_kernel(i915, 0); |
| 1783 | if (IS_ERR(ctx)) |
| 1784 | return PTR_ERR(ctx); |
| 1785 | |
Chris Wilson | 5e2a041 | 2019-04-26 17:33:34 +0100 | [diff] [blame] | 1786 | e = i915_gem_context_lock_engines(ctx); |
| 1787 | |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1788 | for_each_engine(engine, i915, id) { |
Chris Wilson | 5e2a041 | 2019-04-26 17:33:34 +0100 | [diff] [blame] | 1789 | struct intel_context *ce = e->engines[id]; |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 1790 | struct i915_request *rq; |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1791 | |
Chris Wilson | 5e2a041 | 2019-04-26 17:33:34 +0100 | [diff] [blame] | 1792 | rq = intel_context_create_request(ce); |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1793 | if (IS_ERR(rq)) { |
| 1794 | err = PTR_ERR(rq); |
Chris Wilson | 5e2a041 | 2019-04-26 17:33:34 +0100 | [diff] [blame] | 1795 | goto err_active; |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1796 | } |
| 1797 | |
Chris Wilson | 3fef5cd | 2017-11-20 10:20:02 +0000 | [diff] [blame] | 1798 | err = 0; |
Chris Wilson | 5e2a041 | 2019-04-26 17:33:34 +0100 | [diff] [blame] | 1799 | if (rq->engine->init_context) |
| 1800 | err = rq->engine->init_context(rq); |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1801 | |
Chris Wilson | 697b9a8 | 2018-06-12 11:51:35 +0100 | [diff] [blame] | 1802 | i915_request_add(rq); |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1803 | if (err) |
| 1804 | goto err_active; |
| 1805 | } |
| 1806 | |
Chris Wilson | 604c37d | 2019-03-08 09:36:55 +0000 | [diff] [blame] | 1807 | /* Flush the default context image to memory, and enable powersaving. */ |
Chris Wilson | 23c3c3d | 2019-04-24 21:07:14 +0100 | [diff] [blame] | 1808 | if (!i915_gem_load_power_context(i915)) { |
Chris Wilson | 604c37d | 2019-03-08 09:36:55 +0000 | [diff] [blame] | 1809 | err = -EIO; |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1810 | goto err_active; |
Chris Wilson | 2621cef | 2018-07-09 13:20:43 +0100 | [diff] [blame] | 1811 | } |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1812 | |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1813 | for_each_engine(engine, i915, id) { |
Chris Wilson | 5e2a041 | 2019-04-26 17:33:34 +0100 | [diff] [blame] | 1814 | struct intel_context *ce = e->engines[id]; |
| 1815 | struct i915_vma *state = ce->state; |
Chris Wilson | 37d7c9c | 2018-09-14 13:35:03 +0100 | [diff] [blame] | 1816 | void *vaddr; |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1817 | |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1818 | if (!state) |
| 1819 | continue; |
| 1820 | |
Chris Wilson | 0881954 | 2019-03-08 13:25:22 +0000 | [diff] [blame] | 1821 | GEM_BUG_ON(intel_context_is_pinned(ce)); |
Chris Wilson | c4d52fe | 2019-03-08 13:25:19 +0000 | [diff] [blame] | 1822 | |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1823 | /* |
| 1824 | * As we will hold a reference to the logical state, it will |
| 1825 | * not be torn down with the context, and importantly the |
| 1826 | * object will hold onto its vma (making it possible for a |
| 1827 | * stray GTT write to corrupt our defaults). Unmap the vma |
| 1828 | * from the GTT to prevent such accidents and reclaim the |
| 1829 | * space. |
| 1830 | */ |
| 1831 | err = i915_vma_unbind(state); |
| 1832 | if (err) |
| 1833 | goto err_active; |
| 1834 | |
| 1835 | err = i915_gem_object_set_to_cpu_domain(state->obj, false); |
| 1836 | if (err) |
| 1837 | goto err_active; |
| 1838 | |
| 1839 | engine->default_state = i915_gem_object_get(state->obj); |
Chris Wilson | a679f58 | 2019-03-21 16:19:07 +0000 | [diff] [blame] | 1840 | i915_gem_object_set_cache_coherency(engine->default_state, |
| 1841 | I915_CACHE_LLC); |
Chris Wilson | 37d7c9c | 2018-09-14 13:35:03 +0100 | [diff] [blame] | 1842 | |
| 1843 | /* Check we can acquire the image of the context state */ |
| 1844 | vaddr = i915_gem_object_pin_map(engine->default_state, |
Chris Wilson | 666424a | 2018-09-14 13:35:04 +0100 | [diff] [blame] | 1845 | I915_MAP_FORCE_WB); |
Chris Wilson | 37d7c9c | 2018-09-14 13:35:03 +0100 | [diff] [blame] | 1846 | if (IS_ERR(vaddr)) { |
| 1847 | err = PTR_ERR(vaddr); |
| 1848 | goto err_active; |
| 1849 | } |
| 1850 | |
| 1851 | i915_gem_object_unpin_map(engine->default_state); |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1852 | } |
| 1853 | |
| 1854 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) { |
| 1855 | unsigned int found = intel_engines_has_context_isolation(i915); |
| 1856 | |
| 1857 | /* |
| 1858 | * Make sure that classes with multiple engine instances all |
| 1859 | * share the same basic configuration. |
| 1860 | */ |
| 1861 | for_each_engine(engine, i915, id) { |
| 1862 | unsigned int bit = BIT(engine->uabi_class); |
| 1863 | unsigned int expected = engine->default_state ? bit : 0; |
| 1864 | |
| 1865 | if ((found & bit) != expected) { |
| 1866 | DRM_ERROR("mismatching default context state for class %d on engine %s\n", |
| 1867 | engine->uabi_class, engine->name); |
| 1868 | } |
| 1869 | } |
| 1870 | } |
| 1871 | |
| 1872 | out_ctx: |
Chris Wilson | 5e2a041 | 2019-04-26 17:33:34 +0100 | [diff] [blame] | 1873 | i915_gem_context_unlock_engines(ctx); |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1874 | i915_gem_context_set_closed(ctx); |
| 1875 | i915_gem_context_put(ctx); |
| 1876 | return err; |
| 1877 | |
| 1878 | err_active: |
| 1879 | /* |
| 1880 | * If we have to abandon now, we expect the engines to be idle |
Chris Wilson | 604c37d | 2019-03-08 09:36:55 +0000 | [diff] [blame] | 1881 | * and ready to be torn-down. The quickest way we can accomplish |
| 1882 | * this is by declaring ourselves wedged. |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1883 | */ |
Chris Wilson | 604c37d | 2019-03-08 09:36:55 +0000 | [diff] [blame] | 1884 | i915_gem_set_wedged(i915); |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1885 | goto out_ctx; |
| 1886 | } |
| 1887 | |
Chris Wilson | 5179749 | 2018-12-04 14:15:16 +0000 | [diff] [blame] | 1888 | static int |
| 1889 | i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size) |
| 1890 | { |
| 1891 | struct drm_i915_gem_object *obj; |
| 1892 | struct i915_vma *vma; |
| 1893 | int ret; |
| 1894 | |
| 1895 | obj = i915_gem_object_create_stolen(i915, size); |
| 1896 | if (!obj) |
| 1897 | obj = i915_gem_object_create_internal(i915, size); |
| 1898 | if (IS_ERR(obj)) { |
| 1899 | DRM_ERROR("Failed to allocate scratch page\n"); |
| 1900 | return PTR_ERR(obj); |
| 1901 | } |
| 1902 | |
| 1903 | vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); |
| 1904 | if (IS_ERR(vma)) { |
| 1905 | ret = PTR_ERR(vma); |
| 1906 | goto err_unref; |
| 1907 | } |
| 1908 | |
| 1909 | ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); |
| 1910 | if (ret) |
| 1911 | goto err_unref; |
| 1912 | |
| 1913 | i915->gt.scratch = vma; |
| 1914 | return 0; |
| 1915 | |
| 1916 | err_unref: |
| 1917 | i915_gem_object_put(obj); |
| 1918 | return ret; |
| 1919 | } |
| 1920 | |
| 1921 | static void i915_gem_fini_scratch(struct drm_i915_private *i915) |
| 1922 | { |
| 1923 | i915_vma_unpin_and_release(&i915->gt.scratch, 0); |
| 1924 | } |
| 1925 | |
Chris Wilson | 254e118 | 2019-04-17 08:56:28 +0100 | [diff] [blame] | 1926 | static int intel_engines_verify_workarounds(struct drm_i915_private *i915) |
| 1927 | { |
| 1928 | struct intel_engine_cs *engine; |
| 1929 | enum intel_engine_id id; |
| 1930 | int err = 0; |
| 1931 | |
| 1932 | if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) |
| 1933 | return 0; |
| 1934 | |
| 1935 | for_each_engine(engine, i915, id) { |
| 1936 | if (intel_engine_verify_workarounds(engine, "load")) |
| 1937 | err = -EIO; |
| 1938 | } |
| 1939 | |
| 1940 | return err; |
| 1941 | } |
| 1942 | |
Tvrtko Ursulin | bf9e842 | 2016-12-01 14:16:38 +0000 | [diff] [blame] | 1943 | int i915_gem_init(struct drm_i915_private *dev_priv) |
Chris Wilson | 1070a42 | 2012-04-24 15:47:41 +0100 | [diff] [blame] | 1944 | { |
Chris Wilson | 1070a42 | 2012-04-24 15:47:41 +0100 | [diff] [blame] | 1945 | int ret; |
| 1946 | |
Changbin Du | 52b2416 | 2018-05-08 17:07:05 +0800 | [diff] [blame] | 1947 | /* We need to fallback to 4K pages if host doesn't support huge gtt. */ |
| 1948 | if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv)) |
Matthew Auld | da9fe3f3 | 2017-10-06 23:18:31 +0100 | [diff] [blame] | 1949 | mkwrite_device_info(dev_priv)->page_sizes = |
| 1950 | I915_GTT_PAGE_SIZE_4K; |
| 1951 | |
Chris Wilson | 9431282 | 2017-05-03 10:39:18 +0100 | [diff] [blame] | 1952 | dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1); |
Chris Wilson | 57822dc | 2017-02-22 11:40:48 +0000 | [diff] [blame] | 1953 | |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 1954 | i915_timelines_init(dev_priv); |
| 1955 | |
Chris Wilson | ee48700 | 2017-11-22 17:26:21 +0000 | [diff] [blame] | 1956 | ret = i915_gem_init_userptr(dev_priv); |
| 1957 | if (ret) |
| 1958 | return ret; |
| 1959 | |
Sagar Arun Kamble | 70deead | 2018-01-24 21:16:58 +0530 | [diff] [blame] | 1960 | ret = intel_uc_init_misc(dev_priv); |
Michał Winiarski | 3176ff4 | 2017-12-13 23:13:47 +0100 | [diff] [blame] | 1961 | if (ret) |
| 1962 | return ret; |
| 1963 | |
Michal Wajdeczko | f7dc015 | 2018-06-28 14:15:21 +0000 | [diff] [blame] | 1964 | ret = intel_wopcm_init(&dev_priv->wopcm); |
| 1965 | if (ret) |
| 1966 | goto err_uc_misc; |
| 1967 | |
Chris Wilson | 5e4f518 | 2015-02-13 14:35:59 +0000 | [diff] [blame] | 1968 | /* This is just a security blanket to placate dragons. |
| 1969 | * On some systems, we very sporadically observe that the first TLBs |
| 1970 | * used by the CS may be stale, despite us poking the TLB reset. If |
| 1971 | * we hold the forcewake during initialisation these problems |
| 1972 | * just magically go away. |
| 1973 | */ |
Chris Wilson | ee48700 | 2017-11-22 17:26:21 +0000 | [diff] [blame] | 1974 | mutex_lock(&dev_priv->drm.struct_mutex); |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 1975 | intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); |
Chris Wilson | 5e4f518 | 2015-02-13 14:35:59 +0000 | [diff] [blame] | 1976 | |
Chris Wilson | f6b9d5c | 2016-08-04 07:52:23 +0100 | [diff] [blame] | 1977 | ret = i915_gem_init_ggtt(dev_priv); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1978 | if (ret) { |
| 1979 | GEM_BUG_ON(ret == -EIO); |
| 1980 | goto err_unlock; |
| 1981 | } |
Jesse Barnes | d62b489 | 2013-03-08 10:45:53 -0800 | [diff] [blame] | 1982 | |
Chris Wilson | 5179749 | 2018-12-04 14:15:16 +0000 | [diff] [blame] | 1983 | ret = i915_gem_init_scratch(dev_priv, |
Lucas De Marchi | cf819ef | 2018-12-12 10:10:43 -0800 | [diff] [blame] | 1984 | IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1985 | if (ret) { |
| 1986 | GEM_BUG_ON(ret == -EIO); |
| 1987 | goto err_ggtt; |
| 1988 | } |
Ben Widawsky | 2fa48d8 | 2013-12-06 14:11:04 -0800 | [diff] [blame] | 1989 | |
Chris Wilson | 11334c6 | 2019-04-26 17:33:33 +0100 | [diff] [blame] | 1990 | ret = intel_engines_setup(dev_priv); |
| 1991 | if (ret) { |
| 1992 | GEM_BUG_ON(ret == -EIO); |
| 1993 | goto err_unlock; |
| 1994 | } |
| 1995 | |
Chris Wilson | 5179749 | 2018-12-04 14:15:16 +0000 | [diff] [blame] | 1996 | ret = i915_gem_contexts_init(dev_priv); |
| 1997 | if (ret) { |
| 1998 | GEM_BUG_ON(ret == -EIO); |
| 1999 | goto err_scratch; |
| 2000 | } |
| 2001 | |
Tvrtko Ursulin | bf9e842 | 2016-12-01 14:16:38 +0000 | [diff] [blame] | 2002 | ret = intel_engines_init(dev_priv); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 2003 | if (ret) { |
| 2004 | GEM_BUG_ON(ret == -EIO); |
| 2005 | goto err_context; |
| 2006 | } |
Daniel Vetter | 53ca26c | 2012-04-26 23:28:03 +0200 | [diff] [blame] | 2007 | |
Chris Wilson | f58d13d | 2017-11-10 14:26:29 +0000 | [diff] [blame] | 2008 | intel_init_gt_powersave(dev_priv); |
| 2009 | |
Michał Winiarski | 61b5c15 | 2017-12-13 23:13:48 +0100 | [diff] [blame] | 2010 | ret = intel_uc_init(dev_priv); |
Chris Wilson | cc6a818 | 2017-11-10 14:26:30 +0000 | [diff] [blame] | 2011 | if (ret) |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 2012 | goto err_pm; |
Chris Wilson | cc6a818 | 2017-11-10 14:26:30 +0000 | [diff] [blame] | 2013 | |
Michał Winiarski | 61b5c15 | 2017-12-13 23:13:48 +0100 | [diff] [blame] | 2014 | ret = i915_gem_init_hw(dev_priv); |
| 2015 | if (ret) |
| 2016 | goto err_uc_init; |
| 2017 | |
Chris Wilson | cc6a818 | 2017-11-10 14:26:30 +0000 | [diff] [blame] | 2018 | /* |
| 2019 | * Despite its name intel_init_clock_gating applies both display |
| 2020 | * clock gating workarounds; GT mmio workarounds and the occasional |
| 2021 | * GT power context workaround. Worse, sometimes it includes a context |
| 2022 | * register workaround which we need to apply before we record the |
| 2023 | * default HW state for all contexts. |
| 2024 | * |
| 2025 | * FIXME: break up the workarounds and apply them at the right time! |
| 2026 | */ |
| 2027 | intel_init_clock_gating(dev_priv); |
| 2028 | |
Chris Wilson | 254e118 | 2019-04-17 08:56:28 +0100 | [diff] [blame] | 2029 | ret = intel_engines_verify_workarounds(dev_priv); |
| 2030 | if (ret) |
| 2031 | goto err_init_hw; |
| 2032 | |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 2033 | ret = __intel_engines_record_defaults(dev_priv); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 2034 | if (ret) |
| 2035 | goto err_init_hw; |
| 2036 | |
| 2037 | if (i915_inject_load_failure()) { |
| 2038 | ret = -ENODEV; |
| 2039 | goto err_init_hw; |
| 2040 | } |
| 2041 | |
| 2042 | if (i915_inject_load_failure()) { |
| 2043 | ret = -EIO; |
| 2044 | goto err_init_hw; |
| 2045 | } |
| 2046 | |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 2047 | intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 2048 | mutex_unlock(&dev_priv->drm.struct_mutex); |
| 2049 | |
| 2050 | return 0; |
| 2051 | |
| 2052 | /* |
| 2053 | * Unwinding is complicated by that we want to handle -EIO to mean |
| 2054 | * disable GPU submission but keep KMS alive. We want to mark the |
| 2055 | * HW as irrevisibly wedged, but keep enough state around that the |
| 2056 | * driver doesn't explode during runtime. |
| 2057 | */ |
| 2058 | err_init_hw: |
Chris Wilson | 8571a05 | 2018-06-06 15:54:41 +0100 | [diff] [blame] | 2059 | mutex_unlock(&dev_priv->drm.struct_mutex); |
| 2060 | |
Chris Wilson | 79ffac85 | 2019-04-24 21:07:17 +0100 | [diff] [blame] | 2061 | i915_gem_set_wedged(dev_priv); |
Chris Wilson | 5861b01 | 2019-03-08 09:36:54 +0000 | [diff] [blame] | 2062 | i915_gem_suspend(dev_priv); |
Chris Wilson | 8571a05 | 2018-06-06 15:54:41 +0100 | [diff] [blame] | 2063 | i915_gem_suspend_late(dev_priv); |
| 2064 | |
Chris Wilson | 8bcf9f7 | 2018-07-10 10:44:20 +0100 | [diff] [blame] | 2065 | i915_gem_drain_workqueue(dev_priv); |
| 2066 | |
Chris Wilson | 8571a05 | 2018-06-06 15:54:41 +0100 | [diff] [blame] | 2067 | mutex_lock(&dev_priv->drm.struct_mutex); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 2068 | intel_uc_fini_hw(dev_priv); |
Michał Winiarski | 61b5c15 | 2017-12-13 23:13:48 +0100 | [diff] [blame] | 2069 | err_uc_init: |
| 2070 | intel_uc_fini(dev_priv); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 2071 | err_pm: |
| 2072 | if (ret != -EIO) { |
| 2073 | intel_cleanup_gt_powersave(dev_priv); |
Chris Wilson | 45b9c96 | 2019-05-01 11:32:04 +0100 | [diff] [blame] | 2074 | intel_engines_cleanup(dev_priv); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 2075 | } |
| 2076 | err_context: |
| 2077 | if (ret != -EIO) |
| 2078 | i915_gem_contexts_fini(dev_priv); |
Chris Wilson | 5179749 | 2018-12-04 14:15:16 +0000 | [diff] [blame] | 2079 | err_scratch: |
| 2080 | i915_gem_fini_scratch(dev_priv); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 2081 | err_ggtt: |
| 2082 | err_unlock: |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 2083 | intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 2084 | mutex_unlock(&dev_priv->drm.struct_mutex); |
| 2085 | |
Michal Wajdeczko | f7dc015 | 2018-06-28 14:15:21 +0000 | [diff] [blame] | 2086 | err_uc_misc: |
Sagar Arun Kamble | 70deead | 2018-01-24 21:16:58 +0530 | [diff] [blame] | 2087 | intel_uc_fini_misc(dev_priv); |
Sagar Arun Kamble | da943b5 | 2018-01-10 18:24:16 +0530 | [diff] [blame] | 2088 | |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 2089 | if (ret != -EIO) { |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 2090 | i915_gem_cleanup_userptr(dev_priv); |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 2091 | i915_timelines_fini(dev_priv); |
| 2092 | } |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 2093 | |
Chris Wilson | 6099032 | 2014-04-09 09:19:42 +0100 | [diff] [blame] | 2094 | if (ret == -EIO) { |
Chris Wilson | 7ed43df | 2018-07-26 09:50:32 +0100 | [diff] [blame] | 2095 | mutex_lock(&dev_priv->drm.struct_mutex); |
| 2096 | |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 2097 | /* |
| 2098 | * Allow engine initialisation to fail by marking the GPU as |
Chris Wilson | 6099032 | 2014-04-09 09:19:42 +0100 | [diff] [blame] | 2099 | * wedged. But we only want to do this where the GPU is angry, |
| 2100 | * for all other failure, such as an allocation failure, bail. |
| 2101 | */ |
Chris Wilson | c41166f | 2019-02-20 14:56:37 +0000 | [diff] [blame] | 2102 | if (!i915_reset_failed(dev_priv)) { |
Chris Wilson | 51c18bf | 2018-06-09 12:10:58 +0100 | [diff] [blame] | 2103 | i915_load_error(dev_priv, |
| 2104 | "Failed to initialize GPU, declaring it wedged!\n"); |
Chris Wilson | 6f74b36 | 2017-10-15 15:37:25 +0100 | [diff] [blame] | 2105 | i915_gem_set_wedged(dev_priv); |
| 2106 | } |
Chris Wilson | 7ed43df | 2018-07-26 09:50:32 +0100 | [diff] [blame] | 2107 | |
| 2108 | /* Minimal basic recovery for KMS */ |
| 2109 | ret = i915_ggtt_enable_hw(dev_priv); |
| 2110 | i915_gem_restore_gtt_mappings(dev_priv); |
| 2111 | i915_gem_restore_fences(dev_priv); |
| 2112 | intel_init_clock_gating(dev_priv); |
| 2113 | |
| 2114 | mutex_unlock(&dev_priv->drm.struct_mutex); |
Chris Wilson | 1070a42 | 2012-04-24 15:47:41 +0100 | [diff] [blame] | 2115 | } |
| 2116 | |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 2117 | i915_gem_drain_freed_objects(dev_priv); |
Chris Wilson | 6099032 | 2014-04-09 09:19:42 +0100 | [diff] [blame] | 2118 | return ret; |
Chris Wilson | 1070a42 | 2012-04-24 15:47:41 +0100 | [diff] [blame] | 2119 | } |
| 2120 | |
Michal Wajdeczko | 8979187a | 2018-06-04 09:00:32 +0000 | [diff] [blame] | 2121 | void i915_gem_fini(struct drm_i915_private *dev_priv) |
| 2122 | { |
Chris Wilson | 79ffac85 | 2019-04-24 21:07:17 +0100 | [diff] [blame] | 2123 | GEM_BUG_ON(dev_priv->gt.awake); |
| 2124 | |
Chris Wilson | b27e35a | 2019-05-27 12:51:14 +0100 | [diff] [blame] | 2125 | intel_wakeref_auto_fini(&dev_priv->mm.userfault_wakeref); |
| 2126 | |
Michal Wajdeczko | 8979187a | 2018-06-04 09:00:32 +0000 | [diff] [blame] | 2127 | i915_gem_suspend_late(dev_priv); |
Chris Wilson | 30b71084 | 2018-08-12 23:36:29 +0100 | [diff] [blame] | 2128 | intel_disable_gt_powersave(dev_priv); |
Michal Wajdeczko | 8979187a | 2018-06-04 09:00:32 +0000 | [diff] [blame] | 2129 | |
| 2130 | /* Flush any outstanding unpin_work. */ |
| 2131 | i915_gem_drain_workqueue(dev_priv); |
| 2132 | |
| 2133 | mutex_lock(&dev_priv->drm.struct_mutex); |
| 2134 | intel_uc_fini_hw(dev_priv); |
| 2135 | intel_uc_fini(dev_priv); |
Chris Wilson | 45b9c96 | 2019-05-01 11:32:04 +0100 | [diff] [blame] | 2136 | intel_engines_cleanup(dev_priv); |
Michal Wajdeczko | 8979187a | 2018-06-04 09:00:32 +0000 | [diff] [blame] | 2137 | i915_gem_contexts_fini(dev_priv); |
Chris Wilson | 5179749 | 2018-12-04 14:15:16 +0000 | [diff] [blame] | 2138 | i915_gem_fini_scratch(dev_priv); |
Michal Wajdeczko | 8979187a | 2018-06-04 09:00:32 +0000 | [diff] [blame] | 2139 | mutex_unlock(&dev_priv->drm.struct_mutex); |
| 2140 | |
Tvrtko Ursulin | 25d140f | 2018-12-03 13:33:19 +0000 | [diff] [blame] | 2141 | intel_wa_list_free(&dev_priv->gt_wa_list); |
| 2142 | |
Chris Wilson | 30b71084 | 2018-08-12 23:36:29 +0100 | [diff] [blame] | 2143 | intel_cleanup_gt_powersave(dev_priv); |
| 2144 | |
Michal Wajdeczko | 8979187a | 2018-06-04 09:00:32 +0000 | [diff] [blame] | 2145 | intel_uc_fini_misc(dev_priv); |
| 2146 | i915_gem_cleanup_userptr(dev_priv); |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 2147 | i915_timelines_fini(dev_priv); |
Michal Wajdeczko | 8979187a | 2018-06-04 09:00:32 +0000 | [diff] [blame] | 2148 | |
| 2149 | i915_gem_drain_freed_objects(dev_priv); |
| 2150 | |
| 2151 | WARN_ON(!list_empty(&dev_priv->contexts.list)); |
| 2152 | } |
| 2153 | |
Chris Wilson | 2414551 | 2017-01-24 11:01:35 +0000 | [diff] [blame] | 2154 | void i915_gem_init_mmio(struct drm_i915_private *i915) |
| 2155 | { |
| 2156 | i915_gem_sanitize(i915); |
| 2157 | } |
| 2158 | |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 2159 | void |
Imre Deak | 40ae4e1 | 2016-03-16 14:54:03 +0200 | [diff] [blame] | 2160 | i915_gem_load_init_fences(struct drm_i915_private *dev_priv) |
| 2161 | { |
Chris Wilson | 49ef529 | 2016-08-18 17:17:00 +0100 | [diff] [blame] | 2162 | int i; |
Imre Deak | 40ae4e1 | 2016-03-16 14:54:03 +0200 | [diff] [blame] | 2163 | |
Tvrtko Ursulin | c56b89f | 2018-02-09 21:58:46 +0000 | [diff] [blame] | 2164 | if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) && |
Imre Deak | 40ae4e1 | 2016-03-16 14:54:03 +0200 | [diff] [blame] | 2165 | !IS_CHERRYVIEW(dev_priv)) |
| 2166 | dev_priv->num_fence_regs = 32; |
Tvrtko Ursulin | c56b89f | 2018-02-09 21:58:46 +0000 | [diff] [blame] | 2167 | else if (INTEL_GEN(dev_priv) >= 4 || |
Jani Nikula | 73f67aa | 2016-12-07 22:48:09 +0200 | [diff] [blame] | 2168 | IS_I945G(dev_priv) || IS_I945GM(dev_priv) || |
| 2169 | IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) |
Imre Deak | 40ae4e1 | 2016-03-16 14:54:03 +0200 | [diff] [blame] | 2170 | dev_priv->num_fence_regs = 16; |
| 2171 | else |
| 2172 | dev_priv->num_fence_regs = 8; |
| 2173 | |
Chris Wilson | c033666 | 2016-05-06 15:40:21 +0100 | [diff] [blame] | 2174 | if (intel_vgpu_active(dev_priv)) |
Imre Deak | 40ae4e1 | 2016-03-16 14:54:03 +0200 | [diff] [blame] | 2175 | dev_priv->num_fence_regs = |
| 2176 | I915_READ(vgtif_reg(avail_rs.fence_num)); |
| 2177 | |
| 2178 | /* Initialize fence registers to zero */ |
Chris Wilson | 49ef529 | 2016-08-18 17:17:00 +0100 | [diff] [blame] | 2179 | for (i = 0; i < dev_priv->num_fence_regs; i++) { |
| 2180 | struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i]; |
| 2181 | |
| 2182 | fence->i915 = dev_priv; |
| 2183 | fence->id = i; |
| 2184 | list_add_tail(&fence->link, &dev_priv->mm.fence_list); |
| 2185 | } |
Tvrtko Ursulin | 4362f4f | 2016-11-16 08:55:33 +0000 | [diff] [blame] | 2186 | i915_gem_restore_fences(dev_priv); |
Imre Deak | 40ae4e1 | 2016-03-16 14:54:03 +0200 | [diff] [blame] | 2187 | |
Tvrtko Ursulin | 4362f4f | 2016-11-16 08:55:33 +0000 | [diff] [blame] | 2188 | i915_gem_detect_bit_6_swizzle(dev_priv); |
Imre Deak | 40ae4e1 | 2016-03-16 14:54:03 +0200 | [diff] [blame] | 2189 | } |
| 2190 | |
Chris Wilson | 9c52d1c | 2017-11-10 23:24:47 +0000 | [diff] [blame] | 2191 | static void i915_gem_init__mm(struct drm_i915_private *i915) |
| 2192 | { |
| 2193 | spin_lock_init(&i915->mm.object_stat_lock); |
| 2194 | spin_lock_init(&i915->mm.obj_lock); |
| 2195 | spin_lock_init(&i915->mm.free_lock); |
| 2196 | |
| 2197 | init_llist_head(&i915->mm.free_list); |
| 2198 | |
| 2199 | INIT_LIST_HEAD(&i915->mm.unbound_list); |
| 2200 | INIT_LIST_HEAD(&i915->mm.bound_list); |
| 2201 | INIT_LIST_HEAD(&i915->mm.fence_list); |
Chris Wilson | b27e35a | 2019-05-27 12:51:14 +0100 | [diff] [blame] | 2202 | |
Chris Wilson | 9c52d1c | 2017-11-10 23:24:47 +0000 | [diff] [blame] | 2203 | INIT_LIST_HEAD(&i915->mm.userfault_list); |
Chris Wilson | b27e35a | 2019-05-27 12:51:14 +0100 | [diff] [blame] | 2204 | intel_wakeref_auto_init(&i915->mm.userfault_wakeref, i915); |
Chris Wilson | 9c52d1c | 2017-11-10 23:24:47 +0000 | [diff] [blame] | 2205 | |
Chris Wilson | 8475355 | 2019-05-28 10:29:45 +0100 | [diff] [blame] | 2206 | i915_gem_init__objects(i915); |
Chris Wilson | 9c52d1c | 2017-11-10 23:24:47 +0000 | [diff] [blame] | 2207 | } |
| 2208 | |
Michal Wajdeczko | a0de908 | 2018-03-23 12:34:49 +0000 | [diff] [blame] | 2209 | int i915_gem_init_early(struct drm_i915_private *dev_priv) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2210 | { |
Chris Wilson | 13f1bfd | 2019-02-28 10:20:34 +0000 | [diff] [blame] | 2211 | int err; |
Chris Wilson | d1b48c1 | 2017-08-16 09:52:08 +0100 | [diff] [blame] | 2212 | |
Chris Wilson | 79ffac85 | 2019-04-24 21:07:17 +0100 | [diff] [blame] | 2213 | intel_gt_pm_init(dev_priv); |
| 2214 | |
Chris Wilson | 643b450 | 2018-04-30 14:15:03 +0100 | [diff] [blame] | 2215 | INIT_LIST_HEAD(&dev_priv->gt.active_rings); |
Chris Wilson | 3365e22 | 2018-05-03 20:51:14 +0100 | [diff] [blame] | 2216 | INIT_LIST_HEAD(&dev_priv->gt.closed_vma); |
Chris Wilson | 643b450 | 2018-04-30 14:15:03 +0100 | [diff] [blame] | 2217 | |
Chris Wilson | 9c52d1c | 2017-11-10 23:24:47 +0000 | [diff] [blame] | 2218 | i915_gem_init__mm(dev_priv); |
Chris Wilson | 23c3c3d | 2019-04-24 21:07:14 +0100 | [diff] [blame] | 2219 | i915_gem_init__pm(dev_priv); |
Chris Wilson | f212381 | 2017-10-16 12:40:37 +0100 | [diff] [blame] | 2220 | |
Chris Wilson | 1f15b76 | 2016-07-01 17:23:14 +0100 | [diff] [blame] | 2221 | init_waitqueue_head(&dev_priv->gpu_error.wait_queue); |
Daniel Vetter | 1f83fee | 2012-11-15 17:17:22 +0100 | [diff] [blame] | 2222 | init_waitqueue_head(&dev_priv->gpu_error.reset_queue); |
Chris Wilson | 18bb2bc | 2019-01-14 21:04:01 +0000 | [diff] [blame] | 2223 | mutex_init(&dev_priv->gpu_error.wedge_mutex); |
Chris Wilson | 2caffbf | 2019-02-08 15:37:03 +0000 | [diff] [blame] | 2224 | init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu); |
Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 2225 | |
Joonas Lahtinen | 6f63340 | 2016-09-01 14:58:21 +0300 | [diff] [blame] | 2226 | atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0); |
| 2227 | |
Chris Wilson | b5add95 | 2016-08-04 16:32:36 +0100 | [diff] [blame] | 2228 | spin_lock_init(&dev_priv->fb_tracking.lock); |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 2229 | |
Matthew Auld | 465c403 | 2017-10-06 23:18:14 +0100 | [diff] [blame] | 2230 | err = i915_gemfs_init(dev_priv); |
| 2231 | if (err) |
| 2232 | DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err); |
| 2233 | |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 2234 | return 0; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2235 | } |
Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 2236 | |
Michal Wajdeczko | a0de908 | 2018-03-23 12:34:49 +0000 | [diff] [blame] | 2237 | void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) |
Imre Deak | d64aa09 | 2016-01-19 15:26:29 +0200 | [diff] [blame] | 2238 | { |
Chris Wilson | c4d4c1c | 2017-02-10 16:35:23 +0000 | [diff] [blame] | 2239 | i915_gem_drain_freed_objects(dev_priv); |
Chris Wilson | c9c70471 | 2018-02-19 22:06:31 +0000 | [diff] [blame] | 2240 | GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list)); |
| 2241 | GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count)); |
Chris Wilson | c4d4c1c | 2017-02-10 16:35:23 +0000 | [diff] [blame] | 2242 | WARN_ON(dev_priv->mm.object_count); |
Matthew Auld | ea84aa7 | 2016-11-17 21:04:11 +0000 | [diff] [blame] | 2243 | |
Chris Wilson | 2caffbf | 2019-02-08 15:37:03 +0000 | [diff] [blame] | 2244 | cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu); |
| 2245 | |
Matthew Auld | 465c403 | 2017-10-06 23:18:14 +0100 | [diff] [blame] | 2246 | i915_gemfs_fini(dev_priv); |
Imre Deak | d64aa09 | 2016-01-19 15:26:29 +0200 | [diff] [blame] | 2247 | } |
| 2248 | |
Chris Wilson | 6a800ea | 2016-09-21 14:51:07 +0100 | [diff] [blame] | 2249 | int i915_gem_freeze(struct drm_i915_private *dev_priv) |
| 2250 | { |
Chris Wilson | d0aa301 | 2017-04-07 11:25:49 +0100 | [diff] [blame] | 2251 | /* Discard all purgeable objects, let userspace recover those as |
| 2252 | * required after resuming. |
| 2253 | */ |
Chris Wilson | 6a800ea | 2016-09-21 14:51:07 +0100 | [diff] [blame] | 2254 | i915_gem_shrink_all(dev_priv); |
Chris Wilson | 6a800ea | 2016-09-21 14:51:07 +0100 | [diff] [blame] | 2255 | |
Chris Wilson | 6a800ea | 2016-09-21 14:51:07 +0100 | [diff] [blame] | 2256 | return 0; |
| 2257 | } |
| 2258 | |
Chris Wilson | 95c778d | 2018-06-01 15:41:25 +0100 | [diff] [blame] | 2259 | int i915_gem_freeze_late(struct drm_i915_private *i915) |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 2260 | { |
| 2261 | struct drm_i915_gem_object *obj; |
Chris Wilson | 7aab2d5 | 2016-09-09 20:02:18 +0100 | [diff] [blame] | 2262 | struct list_head *phases[] = { |
Chris Wilson | 95c778d | 2018-06-01 15:41:25 +0100 | [diff] [blame] | 2263 | &i915->mm.unbound_list, |
| 2264 | &i915->mm.bound_list, |
Chris Wilson | 7aab2d5 | 2016-09-09 20:02:18 +0100 | [diff] [blame] | 2265 | NULL |
Chris Wilson | 95c778d | 2018-06-01 15:41:25 +0100 | [diff] [blame] | 2266 | }, **phase; |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 2267 | |
Chris Wilson | 95c778d | 2018-06-01 15:41:25 +0100 | [diff] [blame] | 2268 | /* |
| 2269 | * Called just before we write the hibernation image. |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 2270 | * |
| 2271 | * We need to update the domain tracking to reflect that the CPU |
| 2272 | * will be accessing all the pages to create and restore from the |
| 2273 | * hibernation, and so upon restoration those pages will be in the |
| 2274 | * CPU domain. |
| 2275 | * |
| 2276 | * To make sure the hibernation image contains the latest state, |
| 2277 | * we update that state just before writing out the image. |
Chris Wilson | 7aab2d5 | 2016-09-09 20:02:18 +0100 | [diff] [blame] | 2278 | * |
| 2279 | * To try and reduce the hibernation image, we manually shrink |
Chris Wilson | d0aa301 | 2017-04-07 11:25:49 +0100 | [diff] [blame] | 2280 | * the objects as well, see i915_gem_freeze() |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 2281 | */ |
| 2282 | |
Chris Wilson | 95c778d | 2018-06-01 15:41:25 +0100 | [diff] [blame] | 2283 | i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND); |
| 2284 | i915_gem_drain_freed_objects(i915); |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 2285 | |
Chris Wilson | 95c778d | 2018-06-01 15:41:25 +0100 | [diff] [blame] | 2286 | mutex_lock(&i915->drm.struct_mutex); |
| 2287 | for (phase = phases; *phase; phase++) { |
| 2288 | list_for_each_entry(obj, *phase, mm.link) |
| 2289 | WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true)); |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 2290 | } |
Chris Wilson | 95c778d | 2018-06-01 15:41:25 +0100 | [diff] [blame] | 2291 | mutex_unlock(&i915->drm.struct_mutex); |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 2292 | |
| 2293 | return 0; |
| 2294 | } |
| 2295 | |
Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 2296 | void i915_gem_release(struct drm_device *dev, struct drm_file *file) |
Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 2297 | { |
Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 2298 | struct drm_i915_file_private *file_priv = file->driver_priv; |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 2299 | struct i915_request *request; |
Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 2300 | |
| 2301 | /* Clean up our request list when the client is going away, so that |
| 2302 | * later retire_requests won't dereference our soon-to-be-gone |
| 2303 | * file_priv. |
| 2304 | */ |
Chris Wilson | 1c25595 | 2010-09-26 11:03:27 +0100 | [diff] [blame] | 2305 | spin_lock(&file_priv->mm.lock); |
Chris Wilson | c8659ef | 2017-03-02 12:25:25 +0000 | [diff] [blame] | 2306 | list_for_each_entry(request, &file_priv->mm.request_list, client_link) |
Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 2307 | request->file_priv = NULL; |
Chris Wilson | 1c25595 | 2010-09-26 11:03:27 +0100 | [diff] [blame] | 2308 | spin_unlock(&file_priv->mm.lock); |
Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 2309 | } |
| 2310 | |
Chris Wilson | 829a0af | 2017-06-20 12:05:45 +0100 | [diff] [blame] | 2311 | int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) |
Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 2312 | { |
| 2313 | struct drm_i915_file_private *file_priv; |
Ben Widawsky | e422b88 | 2013-12-06 14:10:58 -0800 | [diff] [blame] | 2314 | int ret; |
Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 2315 | |
Chris Wilson | c4c29d7 | 2016-11-09 10:45:07 +0000 | [diff] [blame] | 2316 | DRM_DEBUG("\n"); |
Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 2317 | |
| 2318 | file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); |
| 2319 | if (!file_priv) |
| 2320 | return -ENOMEM; |
| 2321 | |
| 2322 | file->driver_priv = file_priv; |
Chris Wilson | 829a0af | 2017-06-20 12:05:45 +0100 | [diff] [blame] | 2323 | file_priv->dev_priv = i915; |
Chris Wilson | ab0e7ff | 2014-02-25 17:11:24 +0200 | [diff] [blame] | 2324 | file_priv->file = file; |
Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 2325 | |
| 2326 | spin_lock_init(&file_priv->mm.lock); |
| 2327 | INIT_LIST_HEAD(&file_priv->mm.request_list); |
Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 2328 | |
Chris Wilson | c80ff16 | 2016-07-27 09:07:27 +0100 | [diff] [blame] | 2329 | file_priv->bsd_engine = -1; |
Mika Kuoppala | 14921f3 | 2018-06-15 13:44:29 +0300 | [diff] [blame] | 2330 | file_priv->hang_timestamp = jiffies; |
Tvrtko Ursulin | de1add3 | 2016-01-15 15:12:50 +0000 | [diff] [blame] | 2331 | |
Chris Wilson | 829a0af | 2017-06-20 12:05:45 +0100 | [diff] [blame] | 2332 | ret = i915_gem_context_open(i915, file); |
Ben Widawsky | e422b88 | 2013-12-06 14:10:58 -0800 | [diff] [blame] | 2333 | if (ret) |
| 2334 | kfree(file_priv); |
Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 2335 | |
Ben Widawsky | e422b88 | 2013-12-06 14:10:58 -0800 | [diff] [blame] | 2336 | return ret; |
Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 2337 | } |
| 2338 | |
Daniel Vetter | b680c37 | 2014-09-19 18:27:27 +0200 | [diff] [blame] | 2339 | /** |
| 2340 | * i915_gem_track_fb - update frontbuffer tracking |
Geliang Tang | d9072a3 | 2015-09-15 05:58:44 -0700 | [diff] [blame] | 2341 | * @old: current GEM buffer for the frontbuffer slots |
| 2342 | * @new: new GEM buffer for the frontbuffer slots |
| 2343 | * @frontbuffer_bits: bitmask of frontbuffer slots |
Daniel Vetter | b680c37 | 2014-09-19 18:27:27 +0200 | [diff] [blame] | 2344 | * |
| 2345 | * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them |
| 2346 | * from @old and setting them in @new. Both @old and @new can be NULL. |
| 2347 | */ |
Daniel Vetter | a071fa0 | 2014-06-18 23:28:09 +0200 | [diff] [blame] | 2348 | void i915_gem_track_fb(struct drm_i915_gem_object *old, |
| 2349 | struct drm_i915_gem_object *new, |
| 2350 | unsigned frontbuffer_bits) |
| 2351 | { |
Chris Wilson | faf5bf0 | 2016-08-04 16:32:37 +0100 | [diff] [blame] | 2352 | /* Control of individual bits within the mask are guarded by |
| 2353 | * the owning plane->mutex, i.e. we can never see concurrent |
| 2354 | * manipulation of individual bits. But since the bitfield as a whole |
| 2355 | * is updated using RMW, we need to use atomics in order to update |
| 2356 | * the bits. |
| 2357 | */ |
| 2358 | BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > |
Chris Wilson | 74f6e18 | 2018-09-26 11:47:07 +0100 | [diff] [blame] | 2359 | BITS_PER_TYPE(atomic_t)); |
Chris Wilson | faf5bf0 | 2016-08-04 16:32:37 +0100 | [diff] [blame] | 2360 | |
Daniel Vetter | a071fa0 | 2014-06-18 23:28:09 +0200 | [diff] [blame] | 2361 | if (old) { |
Chris Wilson | faf5bf0 | 2016-08-04 16:32:37 +0100 | [diff] [blame] | 2362 | WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits)); |
| 2363 | atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits); |
Daniel Vetter | a071fa0 | 2014-06-18 23:28:09 +0200 | [diff] [blame] | 2364 | } |
| 2365 | |
| 2366 | if (new) { |
Chris Wilson | faf5bf0 | 2016-08-04 16:32:37 +0100 | [diff] [blame] | 2367 | WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits); |
| 2368 | atomic_or(frontbuffer_bits, &new->frontbuffer_bits); |
Daniel Vetter | a071fa0 | 2014-06-18 23:28:09 +0200 | [diff] [blame] | 2369 | } |
| 2370 | } |
| 2371 | |
Chris Wilson | 935a2f7 | 2017-02-13 17:15:13 +0000 | [diff] [blame] | 2372 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
| 2373 | #include "selftests/scatterlist.c" |
Chris Wilson | 66d9cb5 | 2017-02-13 17:15:17 +0000 | [diff] [blame] | 2374 | #include "selftests/mock_gem_device.c" |
Chris Wilson | 3f51b7e1 | 2018-08-30 14:48:06 +0100 | [diff] [blame] | 2375 | #include "selftests/i915_gem.c" |
Chris Wilson | 935a2f7 | 2017-02-13 17:15:13 +0000 | [diff] [blame] | 2376 | #endif |