Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1 | /* |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 2 | * Copyright © 2008-2015 Intel Corporation |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | * Authors: |
| 24 | * Eric Anholt <eric@anholt.net> |
| 25 | * |
| 26 | */ |
| 27 | |
David Herrmann | 0de2397 | 2013-07-24 21:07:52 +0200 | [diff] [blame] | 28 | #include <drm/drm_vma_manager.h> |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 29 | #include <drm/i915_drm.h> |
Chris Wilson | 6b5e90f | 2016-11-14 20:41:05 +0000 | [diff] [blame] | 30 | #include <linux/dma-fence-array.h> |
Chris Wilson | fe3288b | 2017-02-12 17:20:01 +0000 | [diff] [blame] | 31 | #include <linux/kthread.h> |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 32 | #include <linux/dma-resv.h> |
Hugh Dickins | 5949eac | 2011-06-27 16:18:18 -0700 | [diff] [blame] | 33 | #include <linux/shmem_fs.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 34 | #include <linux/slab.h> |
Chris Wilson | 20e4933 | 2016-11-22 14:41:21 +0000 | [diff] [blame] | 35 | #include <linux/stop_machine.h> |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 36 | #include <linux/swap.h> |
Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 37 | #include <linux/pci.h> |
Daniel Vetter | 1286ff7 | 2012-05-10 15:25:09 +0200 | [diff] [blame] | 38 | #include <linux/dma-buf.h> |
Daniel Vetter | fcd70cd | 2019-01-17 22:03:34 +0100 | [diff] [blame] | 39 | #include <linux/mman.h> |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 40 | |
Jani Nikula | df0566a | 2019-06-13 11:44:16 +0300 | [diff] [blame] | 41 | #include "display/intel_display.h" |
| 42 | #include "display/intel_frontbuffer.h" |
| 43 | |
Chris Wilson | 10be98a | 2019-05-28 10:29:49 +0100 | [diff] [blame] | 44 | #include "gem/i915_gem_clflush.h" |
| 45 | #include "gem/i915_gem_context.h" |
Chris Wilson | afa1308 | 2019-05-28 10:29:43 +0100 | [diff] [blame] | 46 | #include "gem/i915_gem_ioctls.h" |
Chris Wilson | 10be98a | 2019-05-28 10:29:49 +0100 | [diff] [blame] | 47 | #include "gem/i915_gem_pm.h" |
| 48 | #include "gem/i915_gemfs.h" |
Chris Wilson | 750e76b | 2019-08-06 13:43:00 +0100 | [diff] [blame] | 49 | #include "gt/intel_engine_user.h" |
Tvrtko Ursulin | baea429 | 2019-06-21 08:08:02 +0100 | [diff] [blame] | 50 | #include "gt/intel_gt.h" |
Chris Wilson | 79ffac85 | 2019-04-24 21:07:17 +0100 | [diff] [blame] | 51 | #include "gt/intel_gt_pm.h" |
Chris Wilson | 112ed2d | 2019-04-24 18:48:39 +0100 | [diff] [blame] | 52 | #include "gt/intel_mocs.h" |
| 53 | #include "gt/intel_reset.h" |
Chris Wilson | a562772 | 2019-07-29 12:37:20 +0100 | [diff] [blame] | 54 | #include "gt/intel_renderstate.h" |
Chris Wilson | 112ed2d | 2019-04-24 18:48:39 +0100 | [diff] [blame] | 55 | #include "gt/intel_workarounds.h" |
| 56 | |
Chris Wilson | 9f58892 | 2019-01-16 15:33:04 +0000 | [diff] [blame] | 57 | #include "i915_drv.h" |
Chris Wilson | 37d63f8 | 2019-05-28 10:29:50 +0100 | [diff] [blame] | 58 | #include "i915_scatterlist.h" |
Chris Wilson | 9f58892 | 2019-01-16 15:33:04 +0000 | [diff] [blame] | 59 | #include "i915_trace.h" |
| 60 | #include "i915_vgpu.h" |
| 61 | |
Jani Nikula | 696173b | 2019-04-05 14:00:15 +0300 | [diff] [blame] | 62 | #include "intel_pm.h" |
Chris Wilson | 9f58892 | 2019-01-16 15:33:04 +0000 | [diff] [blame] | 63 | |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 64 | static int |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 65 | insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size) |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 66 | { |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 67 | int err; |
| 68 | |
| 69 | err = mutex_lock_interruptible(&ggtt->vm.mutex); |
| 70 | if (err) |
| 71 | return err; |
| 72 | |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 73 | memset(node, 0, sizeof(*node)); |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 74 | err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node, |
| 75 | size, 0, I915_COLOR_UNEVICTABLE, |
| 76 | 0, ggtt->mappable_end, |
| 77 | DRM_MM_INSERT_LOW); |
| 78 | |
| 79 | mutex_unlock(&ggtt->vm.mutex); |
| 80 | |
| 81 | return err; |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 82 | } |
| 83 | |
| 84 | static void |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 85 | remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node) |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 86 | { |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 87 | mutex_lock(&ggtt->vm.mutex); |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 88 | drm_mm_remove_node(node); |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 89 | mutex_unlock(&ggtt->vm.mutex); |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 90 | } |
| 91 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 92 | int |
Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 93 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 94 | struct drm_file *file) |
Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 95 | { |
Chris Wilson | 09d7e46 | 2019-01-28 10:23:53 +0000 | [diff] [blame] | 96 | struct i915_ggtt *ggtt = &to_i915(dev)->ggtt; |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 97 | struct drm_i915_gem_get_aperture *args = data; |
Tvrtko Ursulin | ca1543b | 2015-07-01 11:51:10 +0100 | [diff] [blame] | 98 | struct i915_vma *vma; |
Weinan Li | ff8f797 | 2017-05-31 10:35:52 +0800 | [diff] [blame] | 99 | u64 pinned; |
Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 100 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 101 | if (mutex_lock_interruptible(&ggtt->vm.mutex)) |
| 102 | return -EINTR; |
Chris Wilson | 09d7e46 | 2019-01-28 10:23:53 +0000 | [diff] [blame] | 103 | |
Chris Wilson | 82ad644 | 2018-06-05 16:37:58 +0100 | [diff] [blame] | 104 | pinned = ggtt->vm.reserved; |
Chris Wilson | 499197d | 2019-01-28 10:23:52 +0000 | [diff] [blame] | 105 | list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) |
Chris Wilson | 20dfbde | 2016-08-04 16:32:30 +0100 | [diff] [blame] | 106 | if (i915_vma_is_pinned(vma)) |
Tvrtko Ursulin | ca1543b | 2015-07-01 11:51:10 +0100 | [diff] [blame] | 107 | pinned += vma->node.size; |
Chris Wilson | 09d7e46 | 2019-01-28 10:23:53 +0000 | [diff] [blame] | 108 | |
| 109 | mutex_unlock(&ggtt->vm.mutex); |
Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 110 | |
Chris Wilson | 82ad644 | 2018-06-05 16:37:58 +0100 | [diff] [blame] | 111 | args->aper_size = ggtt->vm.total; |
Akshay Joshi | 0206e35 | 2011-08-16 15:34:10 -0400 | [diff] [blame] | 112 | args->aper_available_size = args->aper_size - pinned; |
Chris Wilson | 6299f99 | 2010-11-24 12:23:44 +0000 | [diff] [blame] | 113 | |
Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 114 | return 0; |
| 115 | } |
| 116 | |
Chris Wilson | c03467b | 2019-07-03 10:17:17 +0100 | [diff] [blame] | 117 | int i915_gem_object_unbind(struct drm_i915_gem_object *obj, |
| 118 | unsigned long flags) |
Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 119 | { |
| 120 | struct i915_vma *vma; |
| 121 | LIST_HEAD(still_in_list); |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 122 | int ret = 0; |
Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 123 | |
Chris Wilson | 528cbd1 | 2019-01-28 10:23:54 +0000 | [diff] [blame] | 124 | spin_lock(&obj->vma.lock); |
| 125 | while (!ret && (vma = list_first_entry_or_null(&obj->vma.list, |
| 126 | struct i915_vma, |
| 127 | obj_link))) { |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 128 | struct i915_address_space *vm = vma->vm; |
| 129 | |
| 130 | ret = -EBUSY; |
| 131 | if (!i915_vm_tryopen(vm)) |
| 132 | break; |
| 133 | |
Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 134 | list_move_tail(&vma->obj_link, &still_in_list); |
Chris Wilson | 528cbd1 | 2019-01-28 10:23:54 +0000 | [diff] [blame] | 135 | spin_unlock(&obj->vma.lock); |
| 136 | |
Chris Wilson | c03467b | 2019-07-03 10:17:17 +0100 | [diff] [blame] | 137 | if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE || |
| 138 | !i915_vma_is_active(vma)) |
| 139 | ret = i915_vma_unbind(vma); |
Chris Wilson | 528cbd1 | 2019-01-28 10:23:54 +0000 | [diff] [blame] | 140 | |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 141 | i915_vm_close(vm); |
Chris Wilson | 528cbd1 | 2019-01-28 10:23:54 +0000 | [diff] [blame] | 142 | spin_lock(&obj->vma.lock); |
Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 143 | } |
Chris Wilson | 528cbd1 | 2019-01-28 10:23:54 +0000 | [diff] [blame] | 144 | list_splice(&still_in_list, &obj->vma.list); |
| 145 | spin_unlock(&obj->vma.lock); |
Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 146 | |
| 147 | return ret; |
| 148 | } |
| 149 | |
Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 150 | static int |
| 151 | i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, |
| 152 | struct drm_i915_gem_pwrite *args, |
Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 153 | struct drm_file *file) |
Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 154 | { |
Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 155 | void *vaddr = obj->phys_handle->vaddr + args->offset; |
Gustavo Padovan | 3ed605b | 2016-04-26 12:32:27 -0300 | [diff] [blame] | 156 | char __user *user_data = u64_to_user_ptr(args->data_ptr); |
Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 157 | |
Chris Wilson | 8e7cb17 | 2019-08-16 08:46:35 +0100 | [diff] [blame] | 158 | /* |
| 159 | * We manually control the domain here and pretend that it |
Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 160 | * remains coherent i.e. in the GTT domain, like shmem_pwrite. |
| 161 | */ |
Chris Wilson | 8e7cb17 | 2019-08-16 08:46:35 +0100 | [diff] [blame] | 162 | intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU); |
| 163 | |
Chris Wilson | 10466d2 | 2017-01-06 15:22:38 +0000 | [diff] [blame] | 164 | if (copy_from_user(vaddr, user_data, args->size)) |
| 165 | return -EFAULT; |
Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 166 | |
Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 167 | drm_clflush_virt_range(vaddr, args->size); |
Tvrtko Ursulin | baea429 | 2019-06-21 08:08:02 +0100 | [diff] [blame] | 168 | intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt); |
Paulo Zanoni | 063e4e6 | 2015-02-13 17:23:45 -0200 | [diff] [blame] | 169 | |
Chris Wilson | 8e7cb17 | 2019-08-16 08:46:35 +0100 | [diff] [blame] | 170 | intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU); |
Chris Wilson | 10466d2 | 2017-01-06 15:22:38 +0000 | [diff] [blame] | 171 | return 0; |
Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 172 | } |
| 173 | |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 174 | static int |
| 175 | i915_gem_create(struct drm_file *file, |
Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 176 | struct drm_i915_private *dev_priv, |
Michał Winiarski | e163484 | 2019-03-26 18:02:18 +0100 | [diff] [blame] | 177 | u64 *size_p, |
Jani Nikula | 739f3ab | 2019-01-16 11:15:19 +0200 | [diff] [blame] | 178 | u32 *handle_p) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 179 | { |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 180 | struct drm_i915_gem_object *obj; |
Pekka Paalanen | a1a2d1d | 2009-08-23 12:40:55 +0300 | [diff] [blame] | 181 | u32 handle; |
Michał Winiarski | e163484 | 2019-03-26 18:02:18 +0100 | [diff] [blame] | 182 | u64 size; |
| 183 | int ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 184 | |
Michał Winiarski | e163484 | 2019-03-26 18:02:18 +0100 | [diff] [blame] | 185 | size = round_up(*size_p, PAGE_SIZE); |
Chris Wilson | 8ffc024 | 2011-09-14 14:14:28 +0200 | [diff] [blame] | 186 | if (size == 0) |
| 187 | return -EINVAL; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 188 | |
| 189 | /* Allocate the new object */ |
Chris Wilson | 8475355 | 2019-05-28 10:29:45 +0100 | [diff] [blame] | 190 | obj = i915_gem_object_create_shmem(dev_priv, size); |
Chris Wilson | fe3db79 | 2016-04-25 13:32:13 +0100 | [diff] [blame] | 191 | if (IS_ERR(obj)) |
| 192 | return PTR_ERR(obj); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 193 | |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 194 | ret = drm_gem_handle_create(file, &obj->base, &handle); |
Chris Wilson | 202f2fe | 2010-10-14 13:20:40 +0100 | [diff] [blame] | 195 | /* drop reference from allocate - handle holds it now */ |
Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 196 | i915_gem_object_put(obj); |
Daniel Vetter | d861e33 | 2013-07-24 23:25:03 +0200 | [diff] [blame] | 197 | if (ret) |
| 198 | return ret; |
Chris Wilson | 202f2fe | 2010-10-14 13:20:40 +0100 | [diff] [blame] | 199 | |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 200 | *handle_p = handle; |
Chris Wilson | 9953402 | 2019-04-17 14:25:07 +0100 | [diff] [blame] | 201 | *size_p = size; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 202 | return 0; |
| 203 | } |
| 204 | |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 205 | int |
| 206 | i915_gem_dumb_create(struct drm_file *file, |
| 207 | struct drm_device *dev, |
| 208 | struct drm_mode_create_dumb *args) |
| 209 | { |
Ville Syrjälä | aa5ca8b | 2019-05-09 15:21:57 +0300 | [diff] [blame] | 210 | int cpp = DIV_ROUND_UP(args->bpp, 8); |
| 211 | u32 format; |
| 212 | |
| 213 | switch (cpp) { |
| 214 | case 1: |
| 215 | format = DRM_FORMAT_C8; |
| 216 | break; |
| 217 | case 2: |
| 218 | format = DRM_FORMAT_RGB565; |
| 219 | break; |
| 220 | case 4: |
| 221 | format = DRM_FORMAT_XRGB8888; |
| 222 | break; |
| 223 | default: |
| 224 | return -EINVAL; |
| 225 | } |
| 226 | |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 227 | /* have to work out size/pitch and return them */ |
Ville Syrjälä | aa5ca8b | 2019-05-09 15:21:57 +0300 | [diff] [blame] | 228 | args->pitch = ALIGN(args->width * cpp, 64); |
| 229 | |
| 230 | /* align stride to page size so that we can remap */ |
| 231 | if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format, |
| 232 | DRM_FORMAT_MOD_LINEAR)) |
| 233 | args->pitch = ALIGN(args->pitch, 4096); |
| 234 | |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 235 | args->size = args->pitch * args->height; |
Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 236 | return i915_gem_create(file, to_i915(dev), |
Michał Winiarski | e163484 | 2019-03-26 18:02:18 +0100 | [diff] [blame] | 237 | &args->size, &args->handle); |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 238 | } |
| 239 | |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 240 | /** |
| 241 | * Creates a new mm object and returns a handle to it. |
Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 242 | * @dev: drm device pointer |
| 243 | * @data: ioctl data blob |
| 244 | * @file: drm file pointer |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 245 | */ |
| 246 | int |
| 247 | i915_gem_create_ioctl(struct drm_device *dev, void *data, |
| 248 | struct drm_file *file) |
| 249 | { |
Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 250 | struct drm_i915_private *dev_priv = to_i915(dev); |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 251 | struct drm_i915_gem_create *args = data; |
Daniel Vetter | 63ed2cb | 2012-04-23 16:50:50 +0200 | [diff] [blame] | 252 | |
Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 253 | i915_gem_flush_free_objects(dev_priv); |
Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 254 | |
Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 255 | return i915_gem_create(file, dev_priv, |
Michał Winiarski | e163484 | 2019-03-26 18:02:18 +0100 | [diff] [blame] | 256 | &args->size, &args->handle); |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 257 | } |
| 258 | |
Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 259 | static int |
Chris Wilson | b9d126e | 2019-01-05 12:07:58 +0000 | [diff] [blame] | 260 | shmem_pread(struct page *page, int offset, int len, char __user *user_data, |
| 261 | bool needs_clflush) |
Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 262 | { |
| 263 | char *vaddr; |
| 264 | int ret; |
| 265 | |
| 266 | vaddr = kmap(page); |
Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 267 | |
Chris Wilson | b9d126e | 2019-01-05 12:07:58 +0000 | [diff] [blame] | 268 | if (needs_clflush) |
| 269 | drm_clflush_virt_range(vaddr + offset, len); |
| 270 | |
| 271 | ret = __copy_to_user(user_data, vaddr + offset, len); |
| 272 | |
Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 273 | kunmap(page); |
| 274 | |
Chris Wilson | b9d126e | 2019-01-05 12:07:58 +0000 | [diff] [blame] | 275 | return ret ? -EFAULT : 0; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 276 | } |
| 277 | |
| 278 | static int |
| 279 | i915_gem_shmem_pread(struct drm_i915_gem_object *obj, |
| 280 | struct drm_i915_gem_pread *args) |
| 281 | { |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 282 | unsigned int needs_clflush; |
| 283 | unsigned int idx, offset; |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 284 | struct dma_fence *fence; |
| 285 | char __user *user_data; |
| 286 | u64 remain; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 287 | int ret; |
| 288 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 289 | ret = i915_gem_object_prepare_read(obj, &needs_clflush); |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 290 | if (ret) |
| 291 | return ret; |
| 292 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 293 | fence = i915_gem_object_lock_fence(obj); |
| 294 | i915_gem_object_finish_access(obj); |
| 295 | if (!fence) |
| 296 | return -ENOMEM; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 297 | |
| 298 | remain = args->size; |
| 299 | user_data = u64_to_user_ptr(args->data_ptr); |
| 300 | offset = offset_in_page(args->offset); |
| 301 | for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { |
| 302 | struct page *page = i915_gem_object_get_page(obj, idx); |
Chris Wilson | a5e856a5 | 2018-10-12 15:02:28 +0100 | [diff] [blame] | 303 | unsigned int length = min_t(u64, remain, PAGE_SIZE - offset); |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 304 | |
| 305 | ret = shmem_pread(page, offset, length, user_data, |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 306 | needs_clflush); |
| 307 | if (ret) |
| 308 | break; |
| 309 | |
| 310 | remain -= length; |
| 311 | user_data += length; |
| 312 | offset = 0; |
| 313 | } |
| 314 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 315 | i915_gem_object_unlock_fence(obj, fence); |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 316 | return ret; |
| 317 | } |
| 318 | |
| 319 | static inline bool |
| 320 | gtt_user_read(struct io_mapping *mapping, |
| 321 | loff_t base, int offset, |
| 322 | char __user *user_data, int length) |
| 323 | { |
Ville Syrjälä | afe722b | 2017-09-01 20:12:52 +0300 | [diff] [blame] | 324 | void __iomem *vaddr; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 325 | unsigned long unwritten; |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 326 | |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 327 | /* We can use the cpu mem copy function because this is X86. */ |
Ville Syrjälä | afe722b | 2017-09-01 20:12:52 +0300 | [diff] [blame] | 328 | vaddr = io_mapping_map_atomic_wc(mapping, base); |
| 329 | unwritten = __copy_to_user_inatomic(user_data, |
| 330 | (void __force *)vaddr + offset, |
| 331 | length); |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 332 | io_mapping_unmap_atomic(vaddr); |
| 333 | if (unwritten) { |
Ville Syrjälä | afe722b | 2017-09-01 20:12:52 +0300 | [diff] [blame] | 334 | vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE); |
| 335 | unwritten = copy_to_user(user_data, |
| 336 | (void __force *)vaddr + offset, |
| 337 | length); |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 338 | io_mapping_unmap(vaddr); |
| 339 | } |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 340 | return unwritten; |
| 341 | } |
| 342 | |
| 343 | static int |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 344 | i915_gem_gtt_pread(struct drm_i915_gem_object *obj, |
| 345 | const struct drm_i915_gem_pread *args) |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 346 | { |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 347 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
| 348 | struct i915_ggtt *ggtt = &i915->ggtt; |
Chris Wilson | 538ef96 | 2019-01-14 14:21:18 +0000 | [diff] [blame] | 349 | intel_wakeref_t wakeref; |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 350 | struct drm_mm_node node; |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 351 | struct dma_fence *fence; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 352 | void __user *user_data; |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 353 | struct i915_vma *vma; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 354 | u64 remain, offset; |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 355 | int ret; |
| 356 | |
Daniele Ceraolo Spurio | d858d56 | 2019-06-13 16:21:54 -0700 | [diff] [blame] | 357 | wakeref = intel_runtime_pm_get(&i915->runtime_pm); |
Chris Wilson | 1f7fd48 | 2019-08-22 07:15:57 +0100 | [diff] [blame] | 358 | vma = ERR_PTR(-ENODEV); |
| 359 | if (!i915_gem_object_is_tiled(obj)) |
| 360 | vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, |
| 361 | PIN_MAPPABLE | |
| 362 | PIN_NONBLOCK /* NOWARN */ | |
| 363 | PIN_NOEVICT); |
Chris Wilson | 1803458 | 2016-08-18 17:16:45 +0100 | [diff] [blame] | 364 | if (!IS_ERR(vma)) { |
| 365 | node.start = i915_ggtt_offset(vma); |
Chris Wilson | 4ee92c7 | 2019-10-03 22:00:59 +0100 | [diff] [blame] | 366 | node.flags = 0; |
Chris Wilson | 1f7fd48 | 2019-08-22 07:15:57 +0100 | [diff] [blame] | 367 | } else { |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 368 | ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 369 | if (ret) |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 370 | goto out_rpm; |
Chris Wilson | b290a78 | 2019-10-03 22:00:58 +0100 | [diff] [blame] | 371 | GEM_BUG_ON(!drm_mm_node_allocated(&node)); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 372 | } |
| 373 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 374 | ret = i915_gem_object_lock_interruptible(obj); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 375 | if (ret) |
| 376 | goto out_unpin; |
| 377 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 378 | ret = i915_gem_object_set_to_gtt_domain(obj, false); |
| 379 | if (ret) { |
| 380 | i915_gem_object_unlock(obj); |
| 381 | goto out_unpin; |
| 382 | } |
| 383 | |
| 384 | fence = i915_gem_object_lock_fence(obj); |
| 385 | i915_gem_object_unlock(obj); |
| 386 | if (!fence) { |
| 387 | ret = -ENOMEM; |
| 388 | goto out_unpin; |
| 389 | } |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 390 | |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 391 | user_data = u64_to_user_ptr(args->data_ptr); |
| 392 | remain = args->size; |
| 393 | offset = args->offset; |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 394 | |
| 395 | while (remain > 0) { |
| 396 | /* Operation in this page |
| 397 | * |
| 398 | * page_base = page offset within aperture |
| 399 | * page_offset = offset within page |
| 400 | * page_length = bytes to copy for this page |
| 401 | */ |
| 402 | u32 page_base = node.start; |
| 403 | unsigned page_offset = offset_in_page(offset); |
| 404 | unsigned page_length = PAGE_SIZE - page_offset; |
| 405 | page_length = remain < page_length ? remain : page_length; |
Chris Wilson | b290a78 | 2019-10-03 22:00:58 +0100 | [diff] [blame] | 406 | if (drm_mm_node_allocated(&node)) { |
Chris Wilson | 82ad644 | 2018-06-05 16:37:58 +0100 | [diff] [blame] | 407 | ggtt->vm.insert_page(&ggtt->vm, |
| 408 | i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), |
| 409 | node.start, I915_CACHE_NONE, 0); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 410 | } else { |
| 411 | page_base += offset & PAGE_MASK; |
| 412 | } |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 413 | |
Matthew Auld | 73ebd50 | 2017-12-11 15:18:20 +0000 | [diff] [blame] | 414 | if (gtt_user_read(&ggtt->iomap, page_base, page_offset, |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 415 | user_data, page_length)) { |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 416 | ret = -EFAULT; |
| 417 | break; |
| 418 | } |
| 419 | |
| 420 | remain -= page_length; |
| 421 | user_data += page_length; |
| 422 | offset += page_length; |
| 423 | } |
| 424 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 425 | i915_gem_object_unlock_fence(obj, fence); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 426 | out_unpin: |
Chris Wilson | b290a78 | 2019-10-03 22:00:58 +0100 | [diff] [blame] | 427 | if (drm_mm_node_allocated(&node)) { |
Chris Wilson | 82ad644 | 2018-06-05 16:37:58 +0100 | [diff] [blame] | 428 | ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 429 | remove_mappable_node(ggtt, &node); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 430 | } else { |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 431 | i915_vma_unpin(vma); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 432 | } |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 433 | out_rpm: |
Daniele Ceraolo Spurio | d858d56 | 2019-06-13 16:21:54 -0700 | [diff] [blame] | 434 | intel_runtime_pm_put(&i915->runtime_pm, wakeref); |
Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 435 | return ret; |
| 436 | } |
| 437 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 438 | /** |
| 439 | * Reads data from the object referenced by handle. |
Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 440 | * @dev: drm device pointer |
| 441 | * @data: ioctl data blob |
| 442 | * @file: drm file pointer |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 443 | * |
| 444 | * On error, the contents of *data are undefined. |
| 445 | */ |
| 446 | int |
| 447 | i915_gem_pread_ioctl(struct drm_device *dev, void *data, |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 448 | struct drm_file *file) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 449 | { |
| 450 | struct drm_i915_gem_pread *args = data; |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 451 | struct drm_i915_gem_object *obj; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 452 | int ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 453 | |
Chris Wilson | 51311d0 | 2010-11-17 09:10:42 +0000 | [diff] [blame] | 454 | if (args->size == 0) |
| 455 | return 0; |
| 456 | |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 457 | if (!access_ok(u64_to_user_ptr(args->data_ptr), |
Chris Wilson | 51311d0 | 2010-11-17 09:10:42 +0000 | [diff] [blame] | 458 | args->size)) |
| 459 | return -EFAULT; |
| 460 | |
Chris Wilson | 03ac064 | 2016-07-20 13:31:51 +0100 | [diff] [blame] | 461 | obj = i915_gem_object_lookup(file, args->handle); |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 462 | if (!obj) |
| 463 | return -ENOENT; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 464 | |
Chris Wilson | 7dcd249 | 2010-09-26 20:21:44 +0100 | [diff] [blame] | 465 | /* Bounds check source. */ |
Matthew Auld | 966d5bf | 2016-12-13 20:32:22 +0000 | [diff] [blame] | 466 | if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { |
Chris Wilson | ce9d419 | 2010-09-26 20:50:05 +0100 | [diff] [blame] | 467 | ret = -EINVAL; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 468 | goto out; |
Chris Wilson | ce9d419 | 2010-09-26 20:50:05 +0100 | [diff] [blame] | 469 | } |
| 470 | |
Chris Wilson | db53a30 | 2011-02-03 11:57:46 +0000 | [diff] [blame] | 471 | trace_i915_gem_object_pread(obj, args->offset, args->size); |
| 472 | |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 473 | ret = i915_gem_object_wait(obj, |
| 474 | I915_WAIT_INTERRUPTIBLE, |
Chris Wilson | 62eb3c2 | 2019-02-13 09:25:04 +0000 | [diff] [blame] | 475 | MAX_SCHEDULE_TIMEOUT); |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 476 | if (ret) |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 477 | goto out; |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 478 | |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 479 | ret = i915_gem_object_pin_pages(obj); |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 480 | if (ret) |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 481 | goto out; |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 482 | |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 483 | ret = i915_gem_shmem_pread(obj, args); |
Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 484 | if (ret == -EFAULT || ret == -ENODEV) |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 485 | ret = i915_gem_gtt_pread(obj, args); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 486 | |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 487 | i915_gem_object_unpin_pages(obj); |
| 488 | out: |
Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 489 | i915_gem_object_put(obj); |
Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 490 | return ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 491 | } |
| 492 | |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 493 | /* This is the fast write path which cannot handle |
| 494 | * page faults in the source data |
Linus Torvalds | 9b7530cc | 2008-10-20 14:16:43 -0700 | [diff] [blame] | 495 | */ |
Linus Torvalds | 9b7530cc | 2008-10-20 14:16:43 -0700 | [diff] [blame] | 496 | |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 497 | static inline bool |
| 498 | ggtt_write(struct io_mapping *mapping, |
| 499 | loff_t base, int offset, |
| 500 | char __user *user_data, int length) |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 501 | { |
Ville Syrjälä | afe722b | 2017-09-01 20:12:52 +0300 | [diff] [blame] | 502 | void __iomem *vaddr; |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 503 | unsigned long unwritten; |
| 504 | |
Ben Widawsky | 4f0c7cf | 2012-04-16 14:07:47 -0700 | [diff] [blame] | 505 | /* We can use the cpu mem copy function because this is X86. */ |
Ville Syrjälä | afe722b | 2017-09-01 20:12:52 +0300 | [diff] [blame] | 506 | vaddr = io_mapping_map_atomic_wc(mapping, base); |
| 507 | unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset, |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 508 | user_data, length); |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 509 | io_mapping_unmap_atomic(vaddr); |
| 510 | if (unwritten) { |
Ville Syrjälä | afe722b | 2017-09-01 20:12:52 +0300 | [diff] [blame] | 511 | vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE); |
| 512 | unwritten = copy_from_user((void __force *)vaddr + offset, |
| 513 | user_data, length); |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 514 | io_mapping_unmap(vaddr); |
| 515 | } |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 516 | |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 517 | return unwritten; |
| 518 | } |
| 519 | |
Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 520 | /** |
| 521 | * This is the fast pwrite path, where we copy the data directly from the |
| 522 | * user into the GTT, uncached. |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 523 | * @obj: i915 GEM object |
Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 524 | * @args: pwrite arguments structure |
Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 525 | */ |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 526 | static int |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 527 | i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, |
| 528 | const struct drm_i915_gem_pwrite *args) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 529 | { |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 530 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 531 | struct i915_ggtt *ggtt = &i915->ggtt; |
Daniele Ceraolo Spurio | d858d56 | 2019-06-13 16:21:54 -0700 | [diff] [blame] | 532 | struct intel_runtime_pm *rpm = &i915->runtime_pm; |
Chris Wilson | 538ef96 | 2019-01-14 14:21:18 +0000 | [diff] [blame] | 533 | intel_wakeref_t wakeref; |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 534 | struct drm_mm_node node; |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 535 | struct dma_fence *fence; |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 536 | struct i915_vma *vma; |
| 537 | u64 remain, offset; |
| 538 | void __user *user_data; |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 539 | int ret; |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 540 | |
Chris Wilson | 8bd81815 | 2017-10-19 07:37:33 +0100 | [diff] [blame] | 541 | if (i915_gem_object_has_struct_page(obj)) { |
| 542 | /* |
| 543 | * Avoid waking the device up if we can fallback, as |
| 544 | * waking/resuming is very slow (worst-case 10-100 ms |
| 545 | * depending on PCI sleeps and our own resume time). |
| 546 | * This easily dwarfs any performance advantage from |
| 547 | * using the cache bypass of indirect GGTT access. |
| 548 | */ |
Daniele Ceraolo Spurio | d858d56 | 2019-06-13 16:21:54 -0700 | [diff] [blame] | 549 | wakeref = intel_runtime_pm_get_if_in_use(rpm); |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 550 | if (!wakeref) |
| 551 | return -EFAULT; |
Chris Wilson | 8bd81815 | 2017-10-19 07:37:33 +0100 | [diff] [blame] | 552 | } else { |
| 553 | /* No backing pages, no fallback, we must force GGTT access */ |
Daniele Ceraolo Spurio | d858d56 | 2019-06-13 16:21:54 -0700 | [diff] [blame] | 554 | wakeref = intel_runtime_pm_get(rpm); |
Chris Wilson | 8bd81815 | 2017-10-19 07:37:33 +0100 | [diff] [blame] | 555 | } |
| 556 | |
Chris Wilson | 1f7fd48 | 2019-08-22 07:15:57 +0100 | [diff] [blame] | 557 | vma = ERR_PTR(-ENODEV); |
| 558 | if (!i915_gem_object_is_tiled(obj)) |
| 559 | vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, |
| 560 | PIN_MAPPABLE | |
| 561 | PIN_NONBLOCK /* NOWARN */ | |
| 562 | PIN_NOEVICT); |
Chris Wilson | 1803458 | 2016-08-18 17:16:45 +0100 | [diff] [blame] | 563 | if (!IS_ERR(vma)) { |
| 564 | node.start = i915_ggtt_offset(vma); |
Chris Wilson | 4ee92c7 | 2019-10-03 22:00:59 +0100 | [diff] [blame] | 565 | node.flags = 0; |
Chris Wilson | 1f7fd48 | 2019-08-22 07:15:57 +0100 | [diff] [blame] | 566 | } else { |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 567 | ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 568 | if (ret) |
Chris Wilson | 8bd81815 | 2017-10-19 07:37:33 +0100 | [diff] [blame] | 569 | goto out_rpm; |
Chris Wilson | b290a78 | 2019-10-03 22:00:58 +0100 | [diff] [blame] | 570 | GEM_BUG_ON(!drm_mm_node_allocated(&node)); |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 571 | } |
Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 572 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 573 | ret = i915_gem_object_lock_interruptible(obj); |
Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 574 | if (ret) |
| 575 | goto out_unpin; |
| 576 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 577 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
| 578 | if (ret) { |
| 579 | i915_gem_object_unlock(obj); |
| 580 | goto out_unpin; |
| 581 | } |
| 582 | |
| 583 | fence = i915_gem_object_lock_fence(obj); |
| 584 | i915_gem_object_unlock(obj); |
| 585 | if (!fence) { |
| 586 | ret = -ENOMEM; |
| 587 | goto out_unpin; |
| 588 | } |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 589 | |
Chris Wilson | 8e7cb17 | 2019-08-16 08:46:35 +0100 | [diff] [blame] | 590 | intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU); |
Paulo Zanoni | 063e4e6 | 2015-02-13 17:23:45 -0200 | [diff] [blame] | 591 | |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 592 | user_data = u64_to_user_ptr(args->data_ptr); |
| 593 | offset = args->offset; |
| 594 | remain = args->size; |
| 595 | while (remain) { |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 596 | /* Operation in this page |
| 597 | * |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 598 | * page_base = page offset within aperture |
| 599 | * page_offset = offset within page |
| 600 | * page_length = bytes to copy for this page |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 601 | */ |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 602 | u32 page_base = node.start; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 603 | unsigned int page_offset = offset_in_page(offset); |
| 604 | unsigned int page_length = PAGE_SIZE - page_offset; |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 605 | page_length = remain < page_length ? remain : page_length; |
Chris Wilson | b290a78 | 2019-10-03 22:00:58 +0100 | [diff] [blame] | 606 | if (drm_mm_node_allocated(&node)) { |
Chris Wilson | bdae33b | 2019-07-18 15:54:05 +0100 | [diff] [blame] | 607 | /* flush the write before we modify the GGTT */ |
| 608 | intel_gt_flush_ggtt_writes(ggtt->vm.gt); |
Chris Wilson | 82ad644 | 2018-06-05 16:37:58 +0100 | [diff] [blame] | 609 | ggtt->vm.insert_page(&ggtt->vm, |
| 610 | i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), |
| 611 | node.start, I915_CACHE_NONE, 0); |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 612 | wmb(); /* flush modifications to the GGTT (insert_page) */ |
| 613 | } else { |
| 614 | page_base += offset & PAGE_MASK; |
| 615 | } |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 616 | /* If we get a fault while copying data, then (presumably) our |
Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 617 | * source page isn't available. Return the error and we'll |
| 618 | * retry in the slow path. |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 619 | * If the object is non-shmem backed, we retry again with the |
| 620 | * path that handles page fault. |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 621 | */ |
Matthew Auld | 73ebd50 | 2017-12-11 15:18:20 +0000 | [diff] [blame] | 622 | if (ggtt_write(&ggtt->iomap, page_base, page_offset, |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 623 | user_data, page_length)) { |
| 624 | ret = -EFAULT; |
| 625 | break; |
Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 626 | } |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 627 | |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 628 | remain -= page_length; |
| 629 | user_data += page_length; |
| 630 | offset += page_length; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 631 | } |
Chris Wilson | 8e7cb17 | 2019-08-16 08:46:35 +0100 | [diff] [blame] | 632 | intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU); |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 633 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 634 | i915_gem_object_unlock_fence(obj, fence); |
Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 635 | out_unpin: |
Chris Wilson | bdae33b | 2019-07-18 15:54:05 +0100 | [diff] [blame] | 636 | intel_gt_flush_ggtt_writes(ggtt->vm.gt); |
Chris Wilson | b290a78 | 2019-10-03 22:00:58 +0100 | [diff] [blame] | 637 | if (drm_mm_node_allocated(&node)) { |
Chris Wilson | 82ad644 | 2018-06-05 16:37:58 +0100 | [diff] [blame] | 638 | ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); |
Chris Wilson | 2850748 | 2019-10-04 14:39:58 +0100 | [diff] [blame] | 639 | remove_mappable_node(ggtt, &node); |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 640 | } else { |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 641 | i915_vma_unpin(vma); |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 642 | } |
Chris Wilson | 8bd81815 | 2017-10-19 07:37:33 +0100 | [diff] [blame] | 643 | out_rpm: |
Daniele Ceraolo Spurio | d858d56 | 2019-06-13 16:21:54 -0700 | [diff] [blame] | 644 | intel_runtime_pm_put(rpm, wakeref); |
Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 645 | return ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 646 | } |
| 647 | |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 648 | /* Per-page copy function for the shmem pwrite fastpath. |
| 649 | * Flushes invalid cachelines before writing to the target if |
| 650 | * needs_clflush_before is set and flushes out any written cachelines after |
| 651 | * writing if needs_clflush is set. |
| 652 | */ |
Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 653 | static int |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 654 | shmem_pwrite(struct page *page, int offset, int len, char __user *user_data, |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 655 | bool needs_clflush_before, |
| 656 | bool needs_clflush_after) |
Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 657 | { |
Chris Wilson | b9d126e | 2019-01-05 12:07:58 +0000 | [diff] [blame] | 658 | char *vaddr; |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 659 | int ret; |
Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 660 | |
Chris Wilson | b9d126e | 2019-01-05 12:07:58 +0000 | [diff] [blame] | 661 | vaddr = kmap(page); |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 662 | |
Chris Wilson | b9d126e | 2019-01-05 12:07:58 +0000 | [diff] [blame] | 663 | if (needs_clflush_before) |
| 664 | drm_clflush_virt_range(vaddr + offset, len); |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 665 | |
Chris Wilson | b9d126e | 2019-01-05 12:07:58 +0000 | [diff] [blame] | 666 | ret = __copy_from_user(vaddr + offset, user_data, len); |
| 667 | if (!ret && needs_clflush_after) |
| 668 | drm_clflush_virt_range(vaddr + offset, len); |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 669 | |
Chris Wilson | b9d126e | 2019-01-05 12:07:58 +0000 | [diff] [blame] | 670 | kunmap(page); |
| 671 | |
| 672 | return ret ? -EFAULT : 0; |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 673 | } |
| 674 | |
| 675 | static int |
| 676 | i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, |
| 677 | const struct drm_i915_gem_pwrite *args) |
| 678 | { |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 679 | unsigned int partial_cacheline_write; |
| 680 | unsigned int needs_clflush; |
| 681 | unsigned int offset, idx; |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 682 | struct dma_fence *fence; |
| 683 | void __user *user_data; |
| 684 | u64 remain; |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 685 | int ret; |
| 686 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 687 | ret = i915_gem_object_prepare_write(obj, &needs_clflush); |
Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 688 | if (ret) |
| 689 | return ret; |
Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 690 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 691 | fence = i915_gem_object_lock_fence(obj); |
| 692 | i915_gem_object_finish_access(obj); |
| 693 | if (!fence) |
| 694 | return -ENOMEM; |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 695 | |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 696 | /* If we don't overwrite a cacheline completely we need to be |
| 697 | * careful to have up-to-date data by first clflushing. Don't |
| 698 | * overcomplicate things and flush the entire patch. |
| 699 | */ |
| 700 | partial_cacheline_write = 0; |
| 701 | if (needs_clflush & CLFLUSH_BEFORE) |
| 702 | partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1; |
| 703 | |
Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 704 | user_data = u64_to_user_ptr(args->data_ptr); |
Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 705 | remain = args->size; |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 706 | offset = offset_in_page(args->offset); |
| 707 | for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { |
| 708 | struct page *page = i915_gem_object_get_page(obj, idx); |
Chris Wilson | a5e856a5 | 2018-10-12 15:02:28 +0100 | [diff] [blame] | 709 | unsigned int length = min_t(u64, remain, PAGE_SIZE - offset); |
Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 710 | |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 711 | ret = shmem_pwrite(page, offset, length, user_data, |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 712 | (offset | length) & partial_cacheline_write, |
| 713 | needs_clflush & CLFLUSH_AFTER); |
| 714 | if (ret) |
Chris Wilson | 9da3da6 | 2012-06-01 15:20:22 +0100 | [diff] [blame] | 715 | break; |
| 716 | |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 717 | remain -= length; |
| 718 | user_data += length; |
| 719 | offset = 0; |
Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 720 | } |
| 721 | |
Chris Wilson | 8e7cb17 | 2019-08-16 08:46:35 +0100 | [diff] [blame] | 722 | intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU); |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 723 | i915_gem_object_unlock_fence(obj, fence); |
| 724 | |
Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 725 | return ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 726 | } |
| 727 | |
| 728 | /** |
| 729 | * Writes data to the object referenced by handle. |
Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 730 | * @dev: drm device |
| 731 | * @data: ioctl data blob |
| 732 | * @file: drm file |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 733 | * |
| 734 | * On error, the contents of the buffer that were to be modified are undefined. |
| 735 | */ |
| 736 | int |
| 737 | i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, |
Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 738 | struct drm_file *file) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 739 | { |
| 740 | struct drm_i915_gem_pwrite *args = data; |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 741 | struct drm_i915_gem_object *obj; |
Chris Wilson | 51311d0 | 2010-11-17 09:10:42 +0000 | [diff] [blame] | 742 | int ret; |
| 743 | |
| 744 | if (args->size == 0) |
| 745 | return 0; |
| 746 | |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 747 | if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size)) |
Chris Wilson | 51311d0 | 2010-11-17 09:10:42 +0000 | [diff] [blame] | 748 | return -EFAULT; |
| 749 | |
Chris Wilson | 03ac064 | 2016-07-20 13:31:51 +0100 | [diff] [blame] | 750 | obj = i915_gem_object_lookup(file, args->handle); |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 751 | if (!obj) |
| 752 | return -ENOENT; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 753 | |
Chris Wilson | 7dcd249 | 2010-09-26 20:21:44 +0100 | [diff] [blame] | 754 | /* Bounds check destination. */ |
Matthew Auld | 966d5bf | 2016-12-13 20:32:22 +0000 | [diff] [blame] | 755 | if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { |
Chris Wilson | ce9d419 | 2010-09-26 20:50:05 +0100 | [diff] [blame] | 756 | ret = -EINVAL; |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 757 | goto err; |
Chris Wilson | ce9d419 | 2010-09-26 20:50:05 +0100 | [diff] [blame] | 758 | } |
| 759 | |
Chris Wilson | f8c1cce | 2018-07-12 19:53:14 +0100 | [diff] [blame] | 760 | /* Writes not allowed into this read-only object */ |
| 761 | if (i915_gem_object_is_readonly(obj)) { |
| 762 | ret = -EINVAL; |
| 763 | goto err; |
| 764 | } |
| 765 | |
Chris Wilson | db53a30 | 2011-02-03 11:57:46 +0000 | [diff] [blame] | 766 | trace_i915_gem_object_pwrite(obj, args->offset, args->size); |
| 767 | |
Chris Wilson | 7c55e2c | 2017-03-07 12:03:38 +0000 | [diff] [blame] | 768 | ret = -ENODEV; |
| 769 | if (obj->ops->pwrite) |
| 770 | ret = obj->ops->pwrite(obj, args); |
| 771 | if (ret != -ENODEV) |
| 772 | goto err; |
| 773 | |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 774 | ret = i915_gem_object_wait(obj, |
| 775 | I915_WAIT_INTERRUPTIBLE | |
| 776 | I915_WAIT_ALL, |
Chris Wilson | 62eb3c2 | 2019-02-13 09:25:04 +0000 | [diff] [blame] | 777 | MAX_SCHEDULE_TIMEOUT); |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 778 | if (ret) |
| 779 | goto err; |
| 780 | |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 781 | ret = i915_gem_object_pin_pages(obj); |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 782 | if (ret) |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 783 | goto err; |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 784 | |
Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 785 | ret = -EFAULT; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 786 | /* We can only do the GTT pwrite on untiled buffers, as otherwise |
| 787 | * it would end up going through the fenced access, and we'll get |
| 788 | * different detiling behavior between reading and writing. |
| 789 | * pread/pwrite currently are reading and writing from the CPU |
| 790 | * perspective, requiring manual detiling by the client. |
| 791 | */ |
Chris Wilson | 6eae005 | 2016-06-20 15:05:52 +0100 | [diff] [blame] | 792 | if (!i915_gem_object_has_struct_page(obj) || |
Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 793 | cpu_write_needs_clflush(obj)) |
Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 794 | /* Note that the gtt paths might fail with non-page-backed user |
| 795 | * pointers (e.g. gtt mappings when moving data between |
Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 796 | * textures). Fallback to the shmem path in that case. |
| 797 | */ |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 798 | ret = i915_gem_gtt_pwrite_fast(obj, args); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 799 | |
Chris Wilson | d1054ee | 2016-07-16 18:42:36 +0100 | [diff] [blame] | 800 | if (ret == -EFAULT || ret == -ENOSPC) { |
Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 801 | if (obj->phys_handle) |
| 802 | ret = i915_gem_phys_pwrite(obj, args, file); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 803 | else |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 804 | ret = i915_gem_shmem_pwrite(obj, args); |
Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 805 | } |
Daniel Vetter | 5c0480f | 2011-12-14 13:57:30 +0100 | [diff] [blame] | 806 | |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 807 | i915_gem_object_unpin_pages(obj); |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 808 | err: |
Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 809 | i915_gem_object_put(obj); |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 810 | return ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 811 | } |
| 812 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 813 | /** |
| 814 | * Called when user space has done writes to this buffer |
Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 815 | * @dev: drm device |
| 816 | * @data: ioctl data blob |
| 817 | * @file: drm file |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 818 | */ |
| 819 | int |
| 820 | i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 821 | struct drm_file *file) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 822 | { |
| 823 | struct drm_i915_gem_sw_finish *args = data; |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 824 | struct drm_i915_gem_object *obj; |
Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 825 | |
Chris Wilson | 03ac064 | 2016-07-20 13:31:51 +0100 | [diff] [blame] | 826 | obj = i915_gem_object_lookup(file, args->handle); |
Chris Wilson | c21724c | 2016-08-05 10:14:19 +0100 | [diff] [blame] | 827 | if (!obj) |
| 828 | return -ENOENT; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 829 | |
Tina Zhang | a03f395 | 2017-11-14 10:25:13 +0000 | [diff] [blame] | 830 | /* |
| 831 | * Proxy objects are barred from CPU access, so there is no |
| 832 | * need to ban sw_finish as it is a nop. |
| 833 | */ |
| 834 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 835 | /* Pinned buffers may be scanout, so flush the cache */ |
Chris Wilson | 5a97bcc | 2017-02-22 11:40:46 +0000 | [diff] [blame] | 836 | i915_gem_object_flush_if_display(obj); |
Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 837 | i915_gem_object_put(obj); |
Chris Wilson | 5a97bcc | 2017-02-22 11:40:46 +0000 | [diff] [blame] | 838 | |
| 839 | return 0; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 840 | } |
| 841 | |
Chris Wilson | 0cf289b | 2019-06-13 08:32:54 +0100 | [diff] [blame] | 842 | void i915_gem_runtime_suspend(struct drm_i915_private *i915) |
Chris Wilson | eedd10f | 2014-06-16 08:57:44 +0100 | [diff] [blame] | 843 | { |
Chris Wilson | 3594a3e | 2016-10-24 13:42:16 +0100 | [diff] [blame] | 844 | struct drm_i915_gem_object *obj, *on; |
Chris Wilson | 7c108fd | 2016-10-24 13:42:18 +0100 | [diff] [blame] | 845 | int i; |
Chris Wilson | eedd10f | 2014-06-16 08:57:44 +0100 | [diff] [blame] | 846 | |
Chris Wilson | 3594a3e | 2016-10-24 13:42:16 +0100 | [diff] [blame] | 847 | /* |
| 848 | * Only called during RPM suspend. All users of the userfault_list |
| 849 | * must be holding an RPM wakeref to ensure that this can not |
| 850 | * run concurrently with themselves (and use the struct_mutex for |
| 851 | * protection between themselves). |
| 852 | */ |
| 853 | |
| 854 | list_for_each_entry_safe(obj, on, |
Chris Wilson | 0cf289b | 2019-06-13 08:32:54 +0100 | [diff] [blame] | 855 | &i915->ggtt.userfault_list, userfault_link) |
Chris Wilson | a65adaf | 2017-10-09 09:43:57 +0100 | [diff] [blame] | 856 | __i915_gem_object_release_mmap(obj); |
Chris Wilson | 7c108fd | 2016-10-24 13:42:18 +0100 | [diff] [blame] | 857 | |
Chris Wilson | 0cf289b | 2019-06-13 08:32:54 +0100 | [diff] [blame] | 858 | /* |
| 859 | * The fence will be lost when the device powers down. If any were |
Chris Wilson | 7c108fd | 2016-10-24 13:42:18 +0100 | [diff] [blame] | 860 | * in use by hardware (i.e. they are pinned), we should not be powering |
| 861 | * down! All other fences will be reacquired by the user upon waking. |
| 862 | */ |
Chris Wilson | 0cf289b | 2019-06-13 08:32:54 +0100 | [diff] [blame] | 863 | for (i = 0; i < i915->ggtt.num_fences; i++) { |
| 864 | struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i]; |
Chris Wilson | 7c108fd | 2016-10-24 13:42:18 +0100 | [diff] [blame] | 865 | |
Chris Wilson | 0cf289b | 2019-06-13 08:32:54 +0100 | [diff] [blame] | 866 | /* |
| 867 | * Ideally we want to assert that the fence register is not |
Chris Wilson | e0ec3ec | 2017-02-03 12:57:17 +0000 | [diff] [blame] | 868 | * live at this point (i.e. that no piece of code will be |
| 869 | * trying to write through fence + GTT, as that both violates |
| 870 | * our tracking of activity and associated locking/barriers, |
| 871 | * but also is illegal given that the hw is powered down). |
| 872 | * |
| 873 | * Previously we used reg->pin_count as a "liveness" indicator. |
| 874 | * That is not sufficient, and we need a more fine-grained |
| 875 | * tool if we want to have a sanity check here. |
| 876 | */ |
Chris Wilson | 7c108fd | 2016-10-24 13:42:18 +0100 | [diff] [blame] | 877 | |
| 878 | if (!reg->vma) |
| 879 | continue; |
| 880 | |
Chris Wilson | a65adaf | 2017-10-09 09:43:57 +0100 | [diff] [blame] | 881 | GEM_BUG_ON(i915_vma_has_userfault(reg->vma)); |
Chris Wilson | 7c108fd | 2016-10-24 13:42:18 +0100 | [diff] [blame] | 882 | reg->dirty = true; |
| 883 | } |
Chris Wilson | eedd10f | 2014-06-16 08:57:44 +0100 | [diff] [blame] | 884 | } |
| 885 | |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 886 | struct i915_vma * |
Joonas Lahtinen | ec7adb6 | 2015-03-16 14:11:13 +0200 | [diff] [blame] | 887 | i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, |
| 888 | const struct i915_ggtt_view *view, |
Chris Wilson | 91b2db6 | 2016-08-04 16:32:23 +0100 | [diff] [blame] | 889 | u64 size, |
Chris Wilson | 2ffffd0 | 2016-08-04 16:32:22 +0100 | [diff] [blame] | 890 | u64 alignment, |
| 891 | u64 flags) |
Joonas Lahtinen | ec7adb6 | 2015-03-16 14:11:13 +0200 | [diff] [blame] | 892 | { |
Chris Wilson | ad16d2e | 2016-10-13 09:55:04 +0100 | [diff] [blame] | 893 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); |
Chris Wilson | 82ad644 | 2018-06-05 16:37:58 +0100 | [diff] [blame] | 894 | struct i915_address_space *vm = &dev_priv->ggtt.vm; |
Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 895 | struct i915_vma *vma; |
| 896 | int ret; |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 897 | |
Chris Wilson | a431174 | 2019-09-28 09:25:46 +0100 | [diff] [blame] | 898 | if (i915_gem_object_never_bind_ggtt(obj)) |
| 899 | return ERR_PTR(-ENODEV); |
| 900 | |
Chris Wilson | ac87a6fd | 2018-02-20 13:42:05 +0000 | [diff] [blame] | 901 | if (flags & PIN_MAPPABLE && |
| 902 | (!view || view->type == I915_GGTT_VIEW_NORMAL)) { |
Chris Wilson | 43ae70d9 | 2017-10-09 09:44:01 +0100 | [diff] [blame] | 903 | /* If the required space is larger than the available |
| 904 | * aperture, we will not able to find a slot for the |
| 905 | * object and unbinding the object now will be in |
| 906 | * vain. Worse, doing so may cause us to ping-pong |
| 907 | * the object in and out of the Global GTT and |
| 908 | * waste a lot of cycles under the mutex. |
| 909 | */ |
| 910 | if (obj->base.size > dev_priv->ggtt.mappable_end) |
| 911 | return ERR_PTR(-E2BIG); |
| 912 | |
| 913 | /* If NONBLOCK is set the caller is optimistically |
| 914 | * trying to cache the full object within the mappable |
| 915 | * aperture, and *must* have a fallback in place for |
| 916 | * situations where we cannot bind the object. We |
| 917 | * can be a little more lax here and use the fallback |
| 918 | * more often to avoid costly migrations of ourselves |
| 919 | * and other objects within the aperture. |
| 920 | * |
| 921 | * Half-the-aperture is used as a simple heuristic. |
| 922 | * More interesting would to do search for a free |
| 923 | * block prior to making the commitment to unbind. |
| 924 | * That caters for the self-harm case, and with a |
| 925 | * little more heuristics (e.g. NOFAULT, NOEVICT) |
| 926 | * we could try to minimise harm to others. |
| 927 | */ |
| 928 | if (flags & PIN_NONBLOCK && |
| 929 | obj->base.size > dev_priv->ggtt.mappable_end / 2) |
| 930 | return ERR_PTR(-ENOSPC); |
| 931 | } |
| 932 | |
Chris Wilson | 718659a | 2017-01-16 15:21:28 +0000 | [diff] [blame] | 933 | vma = i915_vma_instance(obj, vm, view); |
Chengguang Xu | 772b540 | 2019-02-21 10:08:19 +0800 | [diff] [blame] | 934 | if (IS_ERR(vma)) |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 935 | return vma; |
Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 936 | |
| 937 | if (i915_vma_misplaced(vma, size, alignment, flags)) { |
Chris Wilson | 43ae70d9 | 2017-10-09 09:44:01 +0100 | [diff] [blame] | 938 | if (flags & PIN_NONBLOCK) { |
| 939 | if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) |
| 940 | return ERR_PTR(-ENOSPC); |
Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 941 | |
Chris Wilson | 43ae70d9 | 2017-10-09 09:44:01 +0100 | [diff] [blame] | 942 | if (flags & PIN_MAPPABLE && |
Chris Wilson | 944397f | 2017-01-09 16:16:11 +0000 | [diff] [blame] | 943 | vma->fence_size > dev_priv->ggtt.mappable_end / 2) |
Chris Wilson | ad16d2e | 2016-10-13 09:55:04 +0100 | [diff] [blame] | 944 | return ERR_PTR(-ENOSPC); |
| 945 | } |
| 946 | |
Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 947 | ret = i915_vma_unbind(vma); |
| 948 | if (ret) |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 949 | return ERR_PTR(ret); |
Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 950 | } |
| 951 | |
Chris Wilson | 636e83f | 2019-08-23 16:39:44 +0100 | [diff] [blame] | 952 | if (vma->fence && !i915_gem_object_is_tiled(obj)) { |
| 953 | mutex_lock(&vma->vm->mutex); |
| 954 | ret = i915_vma_revoke_fence(vma); |
| 955 | mutex_unlock(&vma->vm->mutex); |
| 956 | if (ret) |
| 957 | return ERR_PTR(ret); |
| 958 | } |
| 959 | |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 960 | ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); |
| 961 | if (ret) |
| 962 | return ERR_PTR(ret); |
Joonas Lahtinen | ec7adb6 | 2015-03-16 14:11:13 +0200 | [diff] [blame] | 963 | |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 964 | return vma; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 965 | } |
| 966 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 967 | int |
Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 968 | i915_gem_madvise_ioctl(struct drm_device *dev, void *data, |
| 969 | struct drm_file *file_priv) |
| 970 | { |
Chris Wilson | 3b4fa96 | 2019-05-30 21:34:59 +0100 | [diff] [blame] | 971 | struct drm_i915_private *i915 = to_i915(dev); |
Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 972 | struct drm_i915_gem_madvise *args = data; |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 973 | struct drm_i915_gem_object *obj; |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 974 | int err; |
Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 975 | |
| 976 | switch (args->madv) { |
| 977 | case I915_MADV_DONTNEED: |
| 978 | case I915_MADV_WILLNEED: |
| 979 | break; |
| 980 | default: |
| 981 | return -EINVAL; |
| 982 | } |
| 983 | |
Chris Wilson | 03ac064 | 2016-07-20 13:31:51 +0100 | [diff] [blame] | 984 | obj = i915_gem_object_lookup(file_priv, args->handle); |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 985 | if (!obj) |
| 986 | return -ENOENT; |
| 987 | |
| 988 | err = mutex_lock_interruptible(&obj->mm.lock); |
| 989 | if (err) |
| 990 | goto out; |
Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 991 | |
Chris Wilson | f1fa4f4 | 2017-10-13 21:26:13 +0100 | [diff] [blame] | 992 | if (i915_gem_object_has_pages(obj) && |
Chris Wilson | 3e510a8 | 2016-08-05 10:14:23 +0100 | [diff] [blame] | 993 | i915_gem_object_is_tiled(obj) && |
Chris Wilson | 3b4fa96 | 2019-05-30 21:34:59 +0100 | [diff] [blame] | 994 | i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { |
Chris Wilson | bc0629a | 2016-11-01 10:03:17 +0000 | [diff] [blame] | 995 | if (obj->mm.madv == I915_MADV_WILLNEED) { |
| 996 | GEM_BUG_ON(!obj->mm.quirked); |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 997 | __i915_gem_object_unpin_pages(obj); |
Chris Wilson | bc0629a | 2016-11-01 10:03:17 +0000 | [diff] [blame] | 998 | obj->mm.quirked = false; |
| 999 | } |
| 1000 | if (args->madv == I915_MADV_WILLNEED) { |
Chris Wilson | 2c3a3f4 | 2016-11-04 10:30:01 +0000 | [diff] [blame] | 1001 | GEM_BUG_ON(obj->mm.quirked); |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 1002 | __i915_gem_object_pin_pages(obj); |
Chris Wilson | bc0629a | 2016-11-01 10:03:17 +0000 | [diff] [blame] | 1003 | obj->mm.quirked = true; |
| 1004 | } |
Daniel Vetter | 656bfa3 | 2014-11-20 09:26:30 +0100 | [diff] [blame] | 1005 | } |
| 1006 | |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 1007 | if (obj->mm.madv != __I915_MADV_PURGED) |
| 1008 | obj->mm.madv = args->madv; |
Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 1009 | |
Chris Wilson | 3b4fa96 | 2019-05-30 21:34:59 +0100 | [diff] [blame] | 1010 | if (i915_gem_object_has_pages(obj)) { |
| 1011 | struct list_head *list; |
| 1012 | |
Chris Wilson | d82b4b2 | 2019-05-30 21:35:00 +0100 | [diff] [blame] | 1013 | if (i915_gem_object_is_shrinkable(obj)) { |
Chris Wilson | a8cff4c8 | 2019-06-10 15:54:30 +0100 | [diff] [blame] | 1014 | unsigned long flags; |
| 1015 | |
| 1016 | spin_lock_irqsave(&i915->mm.obj_lock, flags); |
| 1017 | |
Chris Wilson | d82b4b2 | 2019-05-30 21:35:00 +0100 | [diff] [blame] | 1018 | if (obj->mm.madv != I915_MADV_WILLNEED) |
| 1019 | list = &i915->mm.purge_list; |
Chris Wilson | d82b4b2 | 2019-05-30 21:35:00 +0100 | [diff] [blame] | 1020 | else |
Chris Wilson | ecab9be | 2019-06-12 11:57:20 +0100 | [diff] [blame] | 1021 | list = &i915->mm.shrink_list; |
Chris Wilson | d82b4b2 | 2019-05-30 21:35:00 +0100 | [diff] [blame] | 1022 | list_move_tail(&obj->mm.link, list); |
Chris Wilson | a8cff4c8 | 2019-06-10 15:54:30 +0100 | [diff] [blame] | 1023 | |
| 1024 | spin_unlock_irqrestore(&i915->mm.obj_lock, flags); |
Chris Wilson | d82b4b2 | 2019-05-30 21:35:00 +0100 | [diff] [blame] | 1025 | } |
Chris Wilson | 3b4fa96 | 2019-05-30 21:34:59 +0100 | [diff] [blame] | 1026 | } |
| 1027 | |
Chris Wilson | 6c085a7 | 2012-08-20 11:40:46 +0200 | [diff] [blame] | 1028 | /* if the object is no longer attached, discard its backing storage */ |
Chris Wilson | f1fa4f4 | 2017-10-13 21:26:13 +0100 | [diff] [blame] | 1029 | if (obj->mm.madv == I915_MADV_DONTNEED && |
| 1030 | !i915_gem_object_has_pages(obj)) |
Chris Wilson | f033428 | 2019-05-28 10:29:46 +0100 | [diff] [blame] | 1031 | i915_gem_object_truncate(obj); |
Chris Wilson | 2d7ef39 | 2009-09-20 23:13:10 +0100 | [diff] [blame] | 1032 | |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 1033 | args->retained = obj->mm.madv != __I915_MADV_PURGED; |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 1034 | mutex_unlock(&obj->mm.lock); |
Chris Wilson | bb6baf7 | 2009-09-22 14:24:13 +0100 | [diff] [blame] | 1035 | |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 1036 | out: |
Chris Wilson | f8c417c | 2016-07-20 13:31:53 +0100 | [diff] [blame] | 1037 | i915_gem_object_put(obj); |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 1038 | return err; |
Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 1039 | } |
| 1040 | |
Chris Wilson | 2414551 | 2017-01-24 11:01:35 +0000 | [diff] [blame] | 1041 | void i915_gem_sanitize(struct drm_i915_private *i915) |
| 1042 | { |
Chris Wilson | 538ef96 | 2019-01-14 14:21:18 +0000 | [diff] [blame] | 1043 | intel_wakeref_t wakeref; |
| 1044 | |
Chris Wilson | c3160da | 2018-05-31 09:22:45 +0100 | [diff] [blame] | 1045 | GEM_TRACE("\n"); |
| 1046 | |
Daniele Ceraolo Spurio | d858d56 | 2019-06-13 16:21:54 -0700 | [diff] [blame] | 1047 | wakeref = intel_runtime_pm_get(&i915->runtime_pm); |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 1048 | intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); |
Chris Wilson | c3160da | 2018-05-31 09:22:45 +0100 | [diff] [blame] | 1049 | |
| 1050 | /* |
| 1051 | * As we have just resumed the machine and woken the device up from |
| 1052 | * deep PCI sleep (presumably D3_cold), assume the HW has been reset |
| 1053 | * back to defaults, recovering from whatever wedged state we left it |
| 1054 | * in and so worth trying to use the device once more. |
| 1055 | */ |
Chris Wilson | cb823ed | 2019-07-12 20:29:53 +0100 | [diff] [blame] | 1056 | if (intel_gt_is_wedged(&i915->gt)) |
| 1057 | intel_gt_unset_wedged(&i915->gt); |
Chris Wilson | f36325f | 2017-08-26 12:09:34 +0100 | [diff] [blame] | 1058 | |
Chris Wilson | 2414551 | 2017-01-24 11:01:35 +0000 | [diff] [blame] | 1059 | /* |
| 1060 | * If we inherit context state from the BIOS or earlier occupants |
| 1061 | * of the GPU, the GPU may be in an inconsistent state when we |
| 1062 | * try to take over. The only way to remove the earlier state |
| 1063 | * is by resetting. However, resetting on earlier gen is tricky as |
| 1064 | * it may impact the display and we are uncertain about the stability |
Joonas Lahtinen | ea117b8 | 2017-04-28 10:53:38 +0300 | [diff] [blame] | 1065 | * of the reset, so this could be applied to even earlier gen. |
Chris Wilson | 2414551 | 2017-01-24 11:01:35 +0000 | [diff] [blame] | 1066 | */ |
Chris Wilson | 0c91621 | 2019-06-25 14:01:10 +0100 | [diff] [blame] | 1067 | intel_gt_sanitize(&i915->gt, false); |
Chris Wilson | c3160da | 2018-05-31 09:22:45 +0100 | [diff] [blame] | 1068 | |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 1069 | intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); |
Daniele Ceraolo Spurio | d858d56 | 2019-06-13 16:21:54 -0700 | [diff] [blame] | 1070 | intel_runtime_pm_put(&i915->runtime_pm, wakeref); |
Chris Wilson | 2414551 | 2017-01-24 11:01:35 +0000 | [diff] [blame] | 1071 | } |
| 1072 | |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1073 | static int __intel_engines_record_defaults(struct drm_i915_private *i915) |
| 1074 | { |
Chris Wilson | 3877582 | 2019-08-08 12:06:11 +0100 | [diff] [blame] | 1075 | struct i915_request *requests[I915_NUM_ENGINES] = {}; |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1076 | struct intel_engine_cs *engine; |
| 1077 | enum intel_engine_id id; |
Chris Wilson | 604c37d | 2019-03-08 09:36:55 +0000 | [diff] [blame] | 1078 | int err = 0; |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1079 | |
| 1080 | /* |
| 1081 | * As we reset the gpu during very early sanitisation, the current |
| 1082 | * register state on the GPU should reflect its defaults values. |
| 1083 | * We load a context onto the hw (with restore-inhibit), then switch |
| 1084 | * over to a second context to save that default register state. We |
| 1085 | * can then prime every new context with that state so they all start |
| 1086 | * from the same default HW values. |
| 1087 | */ |
| 1088 | |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1089 | for_each_engine(engine, i915, id) { |
Chris Wilson | 3877582 | 2019-08-08 12:06:11 +0100 | [diff] [blame] | 1090 | struct intel_context *ce; |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 1091 | struct i915_request *rq; |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1092 | |
Chris Wilson | 3877582 | 2019-08-08 12:06:11 +0100 | [diff] [blame] | 1093 | /* We must be able to switch to something! */ |
| 1094 | GEM_BUG_ON(!engine->kernel_context); |
| 1095 | engine->serial++; /* force the kernel context switch */ |
| 1096 | |
| 1097 | ce = intel_context_create(i915->kernel_context, engine); |
| 1098 | if (IS_ERR(ce)) { |
| 1099 | err = PTR_ERR(ce); |
| 1100 | goto out; |
| 1101 | } |
| 1102 | |
Chris Wilson | 5e2a041 | 2019-04-26 17:33:34 +0100 | [diff] [blame] | 1103 | rq = intel_context_create_request(ce); |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1104 | if (IS_ERR(rq)) { |
| 1105 | err = PTR_ERR(rq); |
Chris Wilson | 3877582 | 2019-08-08 12:06:11 +0100 | [diff] [blame] | 1106 | intel_context_put(ce); |
| 1107 | goto out; |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1108 | } |
| 1109 | |
Chris Wilson | a562772 | 2019-07-29 12:37:20 +0100 | [diff] [blame] | 1110 | err = intel_engine_emit_ctx_wa(rq); |
| 1111 | if (err) |
| 1112 | goto err_rq; |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1113 | |
Chris Wilson | a562772 | 2019-07-29 12:37:20 +0100 | [diff] [blame] | 1114 | err = intel_renderstate_emit(rq); |
| 1115 | if (err) |
| 1116 | goto err_rq; |
| 1117 | |
| 1118 | err_rq: |
Chris Wilson | 3877582 | 2019-08-08 12:06:11 +0100 | [diff] [blame] | 1119 | requests[id] = i915_request_get(rq); |
Chris Wilson | 697b9a8 | 2018-06-12 11:51:35 +0100 | [diff] [blame] | 1120 | i915_request_add(rq); |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1121 | if (err) |
Chris Wilson | 3877582 | 2019-08-08 12:06:11 +0100 | [diff] [blame] | 1122 | goto out; |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1123 | } |
| 1124 | |
Chris Wilson | 604c37d | 2019-03-08 09:36:55 +0000 | [diff] [blame] | 1125 | /* Flush the default context image to memory, and enable powersaving. */ |
Chris Wilson | 23c3c3d | 2019-04-24 21:07:14 +0100 | [diff] [blame] | 1126 | if (!i915_gem_load_power_context(i915)) { |
Chris Wilson | 604c37d | 2019-03-08 09:36:55 +0000 | [diff] [blame] | 1127 | err = -EIO; |
Chris Wilson | 3877582 | 2019-08-08 12:06:11 +0100 | [diff] [blame] | 1128 | goto out; |
Chris Wilson | 2621cef | 2018-07-09 13:20:43 +0100 | [diff] [blame] | 1129 | } |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1130 | |
Chris Wilson | 3877582 | 2019-08-08 12:06:11 +0100 | [diff] [blame] | 1131 | for (id = 0; id < ARRAY_SIZE(requests); id++) { |
| 1132 | struct i915_request *rq; |
| 1133 | struct i915_vma *state; |
Chris Wilson | 37d7c9c | 2018-09-14 13:35:03 +0100 | [diff] [blame] | 1134 | void *vaddr; |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1135 | |
Chris Wilson | 3877582 | 2019-08-08 12:06:11 +0100 | [diff] [blame] | 1136 | rq = requests[id]; |
| 1137 | if (!rq) |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1138 | continue; |
| 1139 | |
Chris Wilson | 3877582 | 2019-08-08 12:06:11 +0100 | [diff] [blame] | 1140 | /* We want to be able to unbind the state from the GGTT */ |
| 1141 | GEM_BUG_ON(intel_context_is_pinned(rq->hw_context)); |
| 1142 | |
| 1143 | state = rq->hw_context->state; |
| 1144 | if (!state) |
| 1145 | continue; |
Chris Wilson | c4d52fe | 2019-03-08 13:25:19 +0000 | [diff] [blame] | 1146 | |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1147 | /* |
| 1148 | * As we will hold a reference to the logical state, it will |
| 1149 | * not be torn down with the context, and importantly the |
| 1150 | * object will hold onto its vma (making it possible for a |
| 1151 | * stray GTT write to corrupt our defaults). Unmap the vma |
| 1152 | * from the GTT to prevent such accidents and reclaim the |
| 1153 | * space. |
| 1154 | */ |
| 1155 | err = i915_vma_unbind(state); |
| 1156 | if (err) |
Chris Wilson | 3877582 | 2019-08-08 12:06:11 +0100 | [diff] [blame] | 1157 | goto out; |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1158 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 1159 | i915_gem_object_lock(state->obj); |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1160 | err = i915_gem_object_set_to_cpu_domain(state->obj, false); |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 1161 | i915_gem_object_unlock(state->obj); |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1162 | if (err) |
Chris Wilson | 3877582 | 2019-08-08 12:06:11 +0100 | [diff] [blame] | 1163 | goto out; |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1164 | |
Chris Wilson | 3877582 | 2019-08-08 12:06:11 +0100 | [diff] [blame] | 1165 | i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC); |
Chris Wilson | 37d7c9c | 2018-09-14 13:35:03 +0100 | [diff] [blame] | 1166 | |
| 1167 | /* Check we can acquire the image of the context state */ |
Chris Wilson | 3877582 | 2019-08-08 12:06:11 +0100 | [diff] [blame] | 1168 | vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB); |
Chris Wilson | 37d7c9c | 2018-09-14 13:35:03 +0100 | [diff] [blame] | 1169 | if (IS_ERR(vaddr)) { |
| 1170 | err = PTR_ERR(vaddr); |
Chris Wilson | 3877582 | 2019-08-08 12:06:11 +0100 | [diff] [blame] | 1171 | goto out; |
Chris Wilson | 37d7c9c | 2018-09-14 13:35:03 +0100 | [diff] [blame] | 1172 | } |
| 1173 | |
Chris Wilson | 3877582 | 2019-08-08 12:06:11 +0100 | [diff] [blame] | 1174 | rq->engine->default_state = i915_gem_object_get(state->obj); |
| 1175 | i915_gem_object_unpin_map(state->obj); |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1176 | } |
| 1177 | |
Chris Wilson | 3877582 | 2019-08-08 12:06:11 +0100 | [diff] [blame] | 1178 | out: |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1179 | /* |
| 1180 | * If we have to abandon now, we expect the engines to be idle |
Chris Wilson | 604c37d | 2019-03-08 09:36:55 +0000 | [diff] [blame] | 1181 | * and ready to be torn-down. The quickest way we can accomplish |
| 1182 | * this is by declaring ourselves wedged. |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1183 | */ |
Chris Wilson | 3877582 | 2019-08-08 12:06:11 +0100 | [diff] [blame] | 1184 | if (err) |
| 1185 | intel_gt_set_wedged(&i915->gt); |
| 1186 | |
| 1187 | for (id = 0; id < ARRAY_SIZE(requests); id++) { |
| 1188 | struct intel_context *ce; |
| 1189 | struct i915_request *rq; |
| 1190 | |
| 1191 | rq = requests[id]; |
| 1192 | if (!rq) |
| 1193 | continue; |
| 1194 | |
| 1195 | ce = rq->hw_context; |
| 1196 | i915_request_put(rq); |
| 1197 | intel_context_put(ce); |
| 1198 | } |
| 1199 | return err; |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1200 | } |
| 1201 | |
Chris Wilson | 254e118 | 2019-04-17 08:56:28 +0100 | [diff] [blame] | 1202 | static int intel_engines_verify_workarounds(struct drm_i915_private *i915) |
| 1203 | { |
| 1204 | struct intel_engine_cs *engine; |
| 1205 | enum intel_engine_id id; |
| 1206 | int err = 0; |
| 1207 | |
| 1208 | if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) |
| 1209 | return 0; |
| 1210 | |
| 1211 | for_each_engine(engine, i915, id) { |
| 1212 | if (intel_engine_verify_workarounds(engine, "load")) |
| 1213 | err = -EIO; |
| 1214 | } |
| 1215 | |
| 1216 | return err; |
| 1217 | } |
| 1218 | |
Tvrtko Ursulin | bf9e842 | 2016-12-01 14:16:38 +0000 | [diff] [blame] | 1219 | int i915_gem_init(struct drm_i915_private *dev_priv) |
Chris Wilson | 1070a42 | 2012-04-24 15:47:41 +0100 | [diff] [blame] | 1220 | { |
Chris Wilson | 1070a42 | 2012-04-24 15:47:41 +0100 | [diff] [blame] | 1221 | int ret; |
| 1222 | |
Changbin Du | 52b2416 | 2018-05-08 17:07:05 +0800 | [diff] [blame] | 1223 | /* We need to fallback to 4K pages if host doesn't support huge gtt. */ |
| 1224 | if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv)) |
Matthew Auld | da9fe3f3 | 2017-10-06 23:18:31 +0100 | [diff] [blame] | 1225 | mkwrite_device_info(dev_priv)->page_sizes = |
| 1226 | I915_GTT_PAGE_SIZE_4K; |
| 1227 | |
Tvrtko Ursulin | f0c02c1 | 2019-06-21 08:08:10 +0100 | [diff] [blame] | 1228 | intel_timelines_init(dev_priv); |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 1229 | |
Chris Wilson | ee48700 | 2017-11-22 17:26:21 +0000 | [diff] [blame] | 1230 | ret = i915_gem_init_userptr(dev_priv); |
| 1231 | if (ret) |
| 1232 | return ret; |
| 1233 | |
Daniele Ceraolo Spurio | ca7b2c1 | 2019-07-13 11:00:13 +0100 | [diff] [blame] | 1234 | intel_uc_fetch_firmwares(&dev_priv->gt.uc); |
Michal Wajdeczko | 6bd0fbe | 2019-08-02 18:40:55 +0000 | [diff] [blame] | 1235 | intel_wopcm_init(&dev_priv->wopcm); |
Michal Wajdeczko | f7dc015 | 2018-06-28 14:15:21 +0000 | [diff] [blame] | 1236 | |
Chris Wilson | 5e4f518 | 2015-02-13 14:35:59 +0000 | [diff] [blame] | 1237 | /* This is just a security blanket to placate dragons. |
| 1238 | * On some systems, we very sporadically observe that the first TLBs |
| 1239 | * used by the CS may be stale, despite us poking the TLB reset. If |
| 1240 | * we hold the forcewake during initialisation these problems |
| 1241 | * just magically go away. |
| 1242 | */ |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 1243 | intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); |
Chris Wilson | 5e4f518 | 2015-02-13 14:35:59 +0000 | [diff] [blame] | 1244 | |
Tvrtko Ursulin | 1d66377a | 2019-06-21 08:08:05 +0100 | [diff] [blame] | 1245 | ret = i915_init_ggtt(dev_priv); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1246 | if (ret) { |
| 1247 | GEM_BUG_ON(ret == -EIO); |
| 1248 | goto err_unlock; |
| 1249 | } |
Jesse Barnes | d62b489 | 2013-03-08 10:45:53 -0800 | [diff] [blame] | 1250 | |
Andi Shyti | 42014f6 | 2019-09-05 14:14:03 +0300 | [diff] [blame] | 1251 | intel_gt_init(&dev_priv->gt); |
Ben Widawsky | 2fa48d8 | 2013-12-06 14:11:04 -0800 | [diff] [blame] | 1252 | |
Chris Wilson | 11334c6 | 2019-04-26 17:33:33 +0100 | [diff] [blame] | 1253 | ret = intel_engines_setup(dev_priv); |
| 1254 | if (ret) { |
| 1255 | GEM_BUG_ON(ret == -EIO); |
| 1256 | goto err_unlock; |
| 1257 | } |
| 1258 | |
Chris Wilson | a4e7ccd | 2019-10-04 14:40:09 +0100 | [diff] [blame] | 1259 | ret = i915_gem_init_contexts(dev_priv); |
Chris Wilson | 5179749 | 2018-12-04 14:15:16 +0000 | [diff] [blame] | 1260 | if (ret) { |
| 1261 | GEM_BUG_ON(ret == -EIO); |
| 1262 | goto err_scratch; |
| 1263 | } |
| 1264 | |
Tvrtko Ursulin | bf9e842 | 2016-12-01 14:16:38 +0000 | [diff] [blame] | 1265 | ret = intel_engines_init(dev_priv); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1266 | if (ret) { |
| 1267 | GEM_BUG_ON(ret == -EIO); |
| 1268 | goto err_context; |
| 1269 | } |
Daniel Vetter | 53ca26c | 2012-04-26 23:28:03 +0200 | [diff] [blame] | 1270 | |
Chris Wilson | f58d13d | 2017-11-10 14:26:29 +0000 | [diff] [blame] | 1271 | intel_init_gt_powersave(dev_priv); |
| 1272 | |
Michal Wajdeczko | 0075a20 | 2019-08-17 13:11:44 +0000 | [diff] [blame] | 1273 | intel_uc_init(&dev_priv->gt.uc); |
Chris Wilson | cc6a818 | 2017-11-10 14:26:30 +0000 | [diff] [blame] | 1274 | |
Tvrtko Ursulin | 61fa60f | 2019-09-10 15:38:20 +0100 | [diff] [blame] | 1275 | ret = intel_gt_init_hw(&dev_priv->gt); |
Michał Winiarski | 61b5c15 | 2017-12-13 23:13:48 +0100 | [diff] [blame] | 1276 | if (ret) |
| 1277 | goto err_uc_init; |
| 1278 | |
Chris Wilson | 092be38 | 2019-06-26 16:45:49 +0100 | [diff] [blame] | 1279 | /* Only when the HW is re-initialised, can we replay the requests */ |
| 1280 | ret = intel_gt_resume(&dev_priv->gt); |
| 1281 | if (ret) |
| 1282 | goto err_init_hw; |
| 1283 | |
Chris Wilson | cc6a818 | 2017-11-10 14:26:30 +0000 | [diff] [blame] | 1284 | /* |
| 1285 | * Despite its name intel_init_clock_gating applies both display |
| 1286 | * clock gating workarounds; GT mmio workarounds and the occasional |
| 1287 | * GT power context workaround. Worse, sometimes it includes a context |
| 1288 | * register workaround which we need to apply before we record the |
| 1289 | * default HW state for all contexts. |
| 1290 | * |
| 1291 | * FIXME: break up the workarounds and apply them at the right time! |
| 1292 | */ |
| 1293 | intel_init_clock_gating(dev_priv); |
| 1294 | |
Chris Wilson | 254e118 | 2019-04-17 08:56:28 +0100 | [diff] [blame] | 1295 | ret = intel_engines_verify_workarounds(dev_priv); |
| 1296 | if (ret) |
Chris Wilson | 092be38 | 2019-06-26 16:45:49 +0100 | [diff] [blame] | 1297 | goto err_gt; |
Chris Wilson | 254e118 | 2019-04-17 08:56:28 +0100 | [diff] [blame] | 1298 | |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1299 | ret = __intel_engines_record_defaults(dev_priv); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1300 | if (ret) |
Chris Wilson | 092be38 | 2019-06-26 16:45:49 +0100 | [diff] [blame] | 1301 | goto err_gt; |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1302 | |
Michal Wajdeczko | 50d8441 | 2019-08-02 18:40:50 +0000 | [diff] [blame] | 1303 | ret = i915_inject_load_error(dev_priv, -ENODEV); |
| 1304 | if (ret) |
Chris Wilson | 092be38 | 2019-06-26 16:45:49 +0100 | [diff] [blame] | 1305 | goto err_gt; |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1306 | |
Michal Wajdeczko | 50d8441 | 2019-08-02 18:40:50 +0000 | [diff] [blame] | 1307 | ret = i915_inject_load_error(dev_priv, -EIO); |
| 1308 | if (ret) |
Chris Wilson | 092be38 | 2019-06-26 16:45:49 +0100 | [diff] [blame] | 1309 | goto err_gt; |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1310 | |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 1311 | intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1312 | |
| 1313 | return 0; |
| 1314 | |
| 1315 | /* |
| 1316 | * Unwinding is complicated by that we want to handle -EIO to mean |
| 1317 | * disable GPU submission but keep KMS alive. We want to mark the |
| 1318 | * HW as irrevisibly wedged, but keep enough state around that the |
| 1319 | * driver doesn't explode during runtime. |
| 1320 | */ |
Chris Wilson | 092be38 | 2019-06-26 16:45:49 +0100 | [diff] [blame] | 1321 | err_gt: |
Michał Winiarski | 5311f51 | 2019-09-26 14:31:40 +0100 | [diff] [blame] | 1322 | intel_gt_set_wedged_on_init(&dev_priv->gt); |
Chris Wilson | 5861b01 | 2019-03-08 09:36:54 +0000 | [diff] [blame] | 1323 | i915_gem_suspend(dev_priv); |
Chris Wilson | 8571a05 | 2018-06-06 15:54:41 +0100 | [diff] [blame] | 1324 | i915_gem_suspend_late(dev_priv); |
| 1325 | |
Chris Wilson | 8bcf9f7 | 2018-07-10 10:44:20 +0100 | [diff] [blame] | 1326 | i915_gem_drain_workqueue(dev_priv); |
Chris Wilson | 092be38 | 2019-06-26 16:45:49 +0100 | [diff] [blame] | 1327 | err_init_hw: |
Daniele Ceraolo Spurio | ca7b2c1 | 2019-07-13 11:00:13 +0100 | [diff] [blame] | 1328 | intel_uc_fini_hw(&dev_priv->gt.uc); |
Michał Winiarski | 61b5c15 | 2017-12-13 23:13:48 +0100 | [diff] [blame] | 1329 | err_uc_init: |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1330 | if (ret != -EIO) { |
Michal Wajdeczko | 0075a20 | 2019-08-17 13:11:44 +0000 | [diff] [blame] | 1331 | intel_uc_fini(&dev_priv->gt.uc); |
Chris Wilson | 45b9c96 | 2019-05-01 11:32:04 +0100 | [diff] [blame] | 1332 | intel_engines_cleanup(dev_priv); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1333 | } |
| 1334 | err_context: |
| 1335 | if (ret != -EIO) |
Chris Wilson | a4e7ccd | 2019-10-04 14:40:09 +0100 | [diff] [blame] | 1336 | i915_gem_driver_release__contexts(dev_priv); |
Chris Wilson | 5179749 | 2018-12-04 14:15:16 +0000 | [diff] [blame] | 1337 | err_scratch: |
Andi Shyti | 42014f6 | 2019-09-05 14:14:03 +0300 | [diff] [blame] | 1338 | intel_gt_driver_release(&dev_priv->gt); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1339 | err_unlock: |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 1340 | intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1341 | |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 1342 | if (ret != -EIO) { |
Michal Wajdeczko | a5f978c | 2019-08-11 19:51:32 +0000 | [diff] [blame] | 1343 | intel_uc_cleanup_firmwares(&dev_priv->gt.uc); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1344 | i915_gem_cleanup_userptr(dev_priv); |
Tvrtko Ursulin | f0c02c1 | 2019-06-21 08:08:10 +0100 | [diff] [blame] | 1345 | intel_timelines_fini(dev_priv); |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 1346 | } |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1347 | |
Chris Wilson | 6099032 | 2014-04-09 09:19:42 +0100 | [diff] [blame] | 1348 | if (ret == -EIO) { |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1349 | /* |
Michal Wajdeczko | a5f978c | 2019-08-11 19:51:32 +0000 | [diff] [blame] | 1350 | * Allow engines or uC initialisation to fail by marking the GPU |
| 1351 | * as wedged. But we only want to do this when the GPU is angry, |
Chris Wilson | 6099032 | 2014-04-09 09:19:42 +0100 | [diff] [blame] | 1352 | * for all other failure, such as an allocation failure, bail. |
| 1353 | */ |
Chris Wilson | cb823ed | 2019-07-12 20:29:53 +0100 | [diff] [blame] | 1354 | if (!intel_gt_is_wedged(&dev_priv->gt)) { |
Janusz Krzysztofik | f2db53f | 2019-07-12 13:24:27 +0200 | [diff] [blame] | 1355 | i915_probe_error(dev_priv, |
| 1356 | "Failed to initialize GPU, declaring it wedged!\n"); |
Chris Wilson | cb823ed | 2019-07-12 20:29:53 +0100 | [diff] [blame] | 1357 | intel_gt_set_wedged(&dev_priv->gt); |
Chris Wilson | 6f74b36 | 2017-10-15 15:37:25 +0100 | [diff] [blame] | 1358 | } |
Chris Wilson | 7ed43df | 2018-07-26 09:50:32 +0100 | [diff] [blame] | 1359 | |
| 1360 | /* Minimal basic recovery for KMS */ |
| 1361 | ret = i915_ggtt_enable_hw(dev_priv); |
| 1362 | i915_gem_restore_gtt_mappings(dev_priv); |
Chris Wilson | e9d4c92 | 2019-10-16 15:32:33 +0100 | [diff] [blame^] | 1363 | i915_gem_restore_fences(&dev_priv->ggtt); |
Chris Wilson | 7ed43df | 2018-07-26 09:50:32 +0100 | [diff] [blame] | 1364 | intel_init_clock_gating(dev_priv); |
Chris Wilson | 1070a42 | 2012-04-24 15:47:41 +0100 | [diff] [blame] | 1365 | } |
| 1366 | |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1367 | i915_gem_drain_freed_objects(dev_priv); |
Chris Wilson | 6099032 | 2014-04-09 09:19:42 +0100 | [diff] [blame] | 1368 | return ret; |
Chris Wilson | 1070a42 | 2012-04-24 15:47:41 +0100 | [diff] [blame] | 1369 | } |
| 1370 | |
Chris Wilson | c29579d | 2019-08-06 13:42:59 +0100 | [diff] [blame] | 1371 | void i915_gem_driver_register(struct drm_i915_private *i915) |
| 1372 | { |
| 1373 | i915_gem_driver_register__shrinker(i915); |
Chris Wilson | 750e76b | 2019-08-06 13:43:00 +0100 | [diff] [blame] | 1374 | |
| 1375 | intel_engines_driver_register(i915); |
Chris Wilson | c29579d | 2019-08-06 13:42:59 +0100 | [diff] [blame] | 1376 | } |
| 1377 | |
| 1378 | void i915_gem_driver_unregister(struct drm_i915_private *i915) |
| 1379 | { |
| 1380 | i915_gem_driver_unregister__shrinker(i915); |
| 1381 | } |
| 1382 | |
Janusz Krzysztofik | 78dae1a | 2019-07-12 13:24:29 +0200 | [diff] [blame] | 1383 | void i915_gem_driver_remove(struct drm_i915_private *dev_priv) |
Michal Wajdeczko | 8979187a | 2018-06-04 09:00:32 +0000 | [diff] [blame] | 1384 | { |
Chris Wilson | 0cf289b | 2019-06-13 08:32:54 +0100 | [diff] [blame] | 1385 | intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref); |
Chris Wilson | b27e35a | 2019-05-27 12:51:14 +0100 | [diff] [blame] | 1386 | |
Michal Wajdeczko | 8979187a | 2018-06-04 09:00:32 +0000 | [diff] [blame] | 1387 | i915_gem_suspend_late(dev_priv); |
Andi Shyti | 42014f6 | 2019-09-05 14:14:03 +0300 | [diff] [blame] | 1388 | intel_gt_driver_remove(&dev_priv->gt); |
Michal Wajdeczko | 8979187a | 2018-06-04 09:00:32 +0000 | [diff] [blame] | 1389 | |
| 1390 | /* Flush any outstanding unpin_work. */ |
| 1391 | i915_gem_drain_workqueue(dev_priv); |
| 1392 | |
Daniele Ceraolo Spurio | ca7b2c1 | 2019-07-13 11:00:13 +0100 | [diff] [blame] | 1393 | intel_uc_fini_hw(&dev_priv->gt.uc); |
| 1394 | intel_uc_fini(&dev_priv->gt.uc); |
Janusz Krzysztofik | 47bc28d | 2019-05-30 15:31:05 +0200 | [diff] [blame] | 1395 | |
| 1396 | i915_gem_drain_freed_objects(dev_priv); |
| 1397 | } |
| 1398 | |
Janusz Krzysztofik | 3b58a94 | 2019-07-12 13:24:28 +0200 | [diff] [blame] | 1399 | void i915_gem_driver_release(struct drm_i915_private *dev_priv) |
Janusz Krzysztofik | 47bc28d | 2019-05-30 15:31:05 +0200 | [diff] [blame] | 1400 | { |
Chris Wilson | 45b9c96 | 2019-05-01 11:32:04 +0100 | [diff] [blame] | 1401 | intel_engines_cleanup(dev_priv); |
Chris Wilson | a4e7ccd | 2019-10-04 14:40:09 +0100 | [diff] [blame] | 1402 | i915_gem_driver_release__contexts(dev_priv); |
Andi Shyti | 42014f6 | 2019-09-05 14:14:03 +0300 | [diff] [blame] | 1403 | intel_gt_driver_release(&dev_priv->gt); |
Michal Wajdeczko | 8979187a | 2018-06-04 09:00:32 +0000 | [diff] [blame] | 1404 | |
Tvrtko Ursulin | 25d140f | 2018-12-03 13:33:19 +0000 | [diff] [blame] | 1405 | intel_wa_list_free(&dev_priv->gt_wa_list); |
| 1406 | |
Daniele Ceraolo Spurio | ca7b2c1 | 2019-07-13 11:00:13 +0100 | [diff] [blame] | 1407 | intel_uc_cleanup_firmwares(&dev_priv->gt.uc); |
Michal Wajdeczko | 8979187a | 2018-06-04 09:00:32 +0000 | [diff] [blame] | 1408 | i915_gem_cleanup_userptr(dev_priv); |
Tvrtko Ursulin | f0c02c1 | 2019-06-21 08:08:10 +0100 | [diff] [blame] | 1409 | intel_timelines_fini(dev_priv); |
Michal Wajdeczko | 8979187a | 2018-06-04 09:00:32 +0000 | [diff] [blame] | 1410 | |
| 1411 | i915_gem_drain_freed_objects(dev_priv); |
| 1412 | |
Chris Wilson | a4e7ccd | 2019-10-04 14:40:09 +0100 | [diff] [blame] | 1413 | WARN_ON(!list_empty(&dev_priv->gem.contexts.list)); |
Michal Wajdeczko | 8979187a | 2018-06-04 09:00:32 +0000 | [diff] [blame] | 1414 | } |
| 1415 | |
Chris Wilson | 2414551 | 2017-01-24 11:01:35 +0000 | [diff] [blame] | 1416 | void i915_gem_init_mmio(struct drm_i915_private *i915) |
| 1417 | { |
| 1418 | i915_gem_sanitize(i915); |
| 1419 | } |
| 1420 | |
Chris Wilson | 9c52d1c | 2017-11-10 23:24:47 +0000 | [diff] [blame] | 1421 | static void i915_gem_init__mm(struct drm_i915_private *i915) |
| 1422 | { |
Chris Wilson | 9c52d1c | 2017-11-10 23:24:47 +0000 | [diff] [blame] | 1423 | spin_lock_init(&i915->mm.obj_lock); |
Chris Wilson | 9c52d1c | 2017-11-10 23:24:47 +0000 | [diff] [blame] | 1424 | |
| 1425 | init_llist_head(&i915->mm.free_list); |
| 1426 | |
Chris Wilson | 3b4fa96 | 2019-05-30 21:34:59 +0100 | [diff] [blame] | 1427 | INIT_LIST_HEAD(&i915->mm.purge_list); |
Chris Wilson | ecab9be | 2019-06-12 11:57:20 +0100 | [diff] [blame] | 1428 | INIT_LIST_HEAD(&i915->mm.shrink_list); |
Chris Wilson | 9c52d1c | 2017-11-10 23:24:47 +0000 | [diff] [blame] | 1429 | |
Chris Wilson | 8475355 | 2019-05-28 10:29:45 +0100 | [diff] [blame] | 1430 | i915_gem_init__objects(i915); |
Chris Wilson | 9c52d1c | 2017-11-10 23:24:47 +0000 | [diff] [blame] | 1431 | } |
| 1432 | |
Matthew Auld | a3f356b | 2019-09-27 18:33:49 +0100 | [diff] [blame] | 1433 | void i915_gem_init_early(struct drm_i915_private *dev_priv) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1434 | { |
Chris Wilson | 13f1bfd | 2019-02-28 10:20:34 +0000 | [diff] [blame] | 1435 | int err; |
Chris Wilson | d1b48c1 | 2017-08-16 09:52:08 +0100 | [diff] [blame] | 1436 | |
Chris Wilson | 9c52d1c | 2017-11-10 23:24:47 +0000 | [diff] [blame] | 1437 | i915_gem_init__mm(dev_priv); |
Chris Wilson | 23c3c3d | 2019-04-24 21:07:14 +0100 | [diff] [blame] | 1438 | i915_gem_init__pm(dev_priv); |
Chris Wilson | f212381 | 2017-10-16 12:40:37 +0100 | [diff] [blame] | 1439 | |
Chris Wilson | b5add95 | 2016-08-04 16:32:36 +0100 | [diff] [blame] | 1440 | spin_lock_init(&dev_priv->fb_tracking.lock); |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 1441 | |
Matthew Auld | 465c403 | 2017-10-06 23:18:14 +0100 | [diff] [blame] | 1442 | err = i915_gemfs_init(dev_priv); |
| 1443 | if (err) |
| 1444 | DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1445 | } |
Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 1446 | |
Michal Wajdeczko | a0de908 | 2018-03-23 12:34:49 +0000 | [diff] [blame] | 1447 | void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) |
Imre Deak | d64aa09 | 2016-01-19 15:26:29 +0200 | [diff] [blame] | 1448 | { |
Chris Wilson | c4d4c1c | 2017-02-10 16:35:23 +0000 | [diff] [blame] | 1449 | i915_gem_drain_freed_objects(dev_priv); |
Chris Wilson | c9c70471 | 2018-02-19 22:06:31 +0000 | [diff] [blame] | 1450 | GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list)); |
| 1451 | GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count)); |
Chris Wilson | d82b4b2 | 2019-05-30 21:35:00 +0100 | [diff] [blame] | 1452 | WARN_ON(dev_priv->mm.shrink_count); |
Matthew Auld | ea84aa7 | 2016-11-17 21:04:11 +0000 | [diff] [blame] | 1453 | |
Matthew Auld | 465c403 | 2017-10-06 23:18:14 +0100 | [diff] [blame] | 1454 | i915_gemfs_fini(dev_priv); |
Imre Deak | d64aa09 | 2016-01-19 15:26:29 +0200 | [diff] [blame] | 1455 | } |
| 1456 | |
Chris Wilson | 6a800ea | 2016-09-21 14:51:07 +0100 | [diff] [blame] | 1457 | int i915_gem_freeze(struct drm_i915_private *dev_priv) |
| 1458 | { |
Chris Wilson | d0aa301 | 2017-04-07 11:25:49 +0100 | [diff] [blame] | 1459 | /* Discard all purgeable objects, let userspace recover those as |
| 1460 | * required after resuming. |
| 1461 | */ |
Chris Wilson | 6a800ea | 2016-09-21 14:51:07 +0100 | [diff] [blame] | 1462 | i915_gem_shrink_all(dev_priv); |
Chris Wilson | 6a800ea | 2016-09-21 14:51:07 +0100 | [diff] [blame] | 1463 | |
Chris Wilson | 6a800ea | 2016-09-21 14:51:07 +0100 | [diff] [blame] | 1464 | return 0; |
| 1465 | } |
| 1466 | |
Chris Wilson | 95c778d | 2018-06-01 15:41:25 +0100 | [diff] [blame] | 1467 | int i915_gem_freeze_late(struct drm_i915_private *i915) |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 1468 | { |
| 1469 | struct drm_i915_gem_object *obj; |
Chris Wilson | ecab9be | 2019-06-12 11:57:20 +0100 | [diff] [blame] | 1470 | intel_wakeref_t wakeref; |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 1471 | |
Chris Wilson | 95c778d | 2018-06-01 15:41:25 +0100 | [diff] [blame] | 1472 | /* |
| 1473 | * Called just before we write the hibernation image. |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 1474 | * |
| 1475 | * We need to update the domain tracking to reflect that the CPU |
| 1476 | * will be accessing all the pages to create and restore from the |
| 1477 | * hibernation, and so upon restoration those pages will be in the |
| 1478 | * CPU domain. |
| 1479 | * |
| 1480 | * To make sure the hibernation image contains the latest state, |
| 1481 | * we update that state just before writing out the image. |
Chris Wilson | 7aab2d5 | 2016-09-09 20:02:18 +0100 | [diff] [blame] | 1482 | * |
| 1483 | * To try and reduce the hibernation image, we manually shrink |
Chris Wilson | d0aa301 | 2017-04-07 11:25:49 +0100 | [diff] [blame] | 1484 | * the objects as well, see i915_gem_freeze() |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 1485 | */ |
| 1486 | |
Daniele Ceraolo Spurio | d858d56 | 2019-06-13 16:21:54 -0700 | [diff] [blame] | 1487 | wakeref = intel_runtime_pm_get(&i915->runtime_pm); |
Chris Wilson | ecab9be | 2019-06-12 11:57:20 +0100 | [diff] [blame] | 1488 | |
| 1489 | i915_gem_shrink(i915, -1UL, NULL, ~0); |
Chris Wilson | 95c778d | 2018-06-01 15:41:25 +0100 | [diff] [blame] | 1490 | i915_gem_drain_freed_objects(i915); |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 1491 | |
Chris Wilson | ecab9be | 2019-06-12 11:57:20 +0100 | [diff] [blame] | 1492 | list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) { |
| 1493 | i915_gem_object_lock(obj); |
| 1494 | WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true)); |
| 1495 | i915_gem_object_unlock(obj); |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 1496 | } |
Chris Wilson | ecab9be | 2019-06-12 11:57:20 +0100 | [diff] [blame] | 1497 | |
Daniele Ceraolo Spurio | d858d56 | 2019-06-13 16:21:54 -0700 | [diff] [blame] | 1498 | intel_runtime_pm_put(&i915->runtime_pm, wakeref); |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 1499 | |
| 1500 | return 0; |
| 1501 | } |
| 1502 | |
Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 1503 | void i915_gem_release(struct drm_device *dev, struct drm_file *file) |
Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 1504 | { |
Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 1505 | struct drm_i915_file_private *file_priv = file->driver_priv; |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 1506 | struct i915_request *request; |
Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 1507 | |
| 1508 | /* Clean up our request list when the client is going away, so that |
| 1509 | * later retire_requests won't dereference our soon-to-be-gone |
| 1510 | * file_priv. |
| 1511 | */ |
Chris Wilson | 1c25595 | 2010-09-26 11:03:27 +0100 | [diff] [blame] | 1512 | spin_lock(&file_priv->mm.lock); |
Chris Wilson | c8659ef | 2017-03-02 12:25:25 +0000 | [diff] [blame] | 1513 | list_for_each_entry(request, &file_priv->mm.request_list, client_link) |
Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 1514 | request->file_priv = NULL; |
Chris Wilson | 1c25595 | 2010-09-26 11:03:27 +0100 | [diff] [blame] | 1515 | spin_unlock(&file_priv->mm.lock); |
Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 1516 | } |
| 1517 | |
Chris Wilson | 829a0af | 2017-06-20 12:05:45 +0100 | [diff] [blame] | 1518 | int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) |
Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 1519 | { |
| 1520 | struct drm_i915_file_private *file_priv; |
Ben Widawsky | e422b88 | 2013-12-06 14:10:58 -0800 | [diff] [blame] | 1521 | int ret; |
Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 1522 | |
Chris Wilson | c4c29d7 | 2016-11-09 10:45:07 +0000 | [diff] [blame] | 1523 | DRM_DEBUG("\n"); |
Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 1524 | |
| 1525 | file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); |
| 1526 | if (!file_priv) |
| 1527 | return -ENOMEM; |
| 1528 | |
| 1529 | file->driver_priv = file_priv; |
Chris Wilson | 829a0af | 2017-06-20 12:05:45 +0100 | [diff] [blame] | 1530 | file_priv->dev_priv = i915; |
Chris Wilson | ab0e7ff | 2014-02-25 17:11:24 +0200 | [diff] [blame] | 1531 | file_priv->file = file; |
Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 1532 | |
| 1533 | spin_lock_init(&file_priv->mm.lock); |
| 1534 | INIT_LIST_HEAD(&file_priv->mm.request_list); |
Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 1535 | |
Chris Wilson | c80ff16 | 2016-07-27 09:07:27 +0100 | [diff] [blame] | 1536 | file_priv->bsd_engine = -1; |
Mika Kuoppala | 14921f3 | 2018-06-15 13:44:29 +0300 | [diff] [blame] | 1537 | file_priv->hang_timestamp = jiffies; |
Tvrtko Ursulin | de1add3 | 2016-01-15 15:12:50 +0000 | [diff] [blame] | 1538 | |
Chris Wilson | 829a0af | 2017-06-20 12:05:45 +0100 | [diff] [blame] | 1539 | ret = i915_gem_context_open(i915, file); |
Ben Widawsky | e422b88 | 2013-12-06 14:10:58 -0800 | [diff] [blame] | 1540 | if (ret) |
| 1541 | kfree(file_priv); |
Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 1542 | |
Ben Widawsky | e422b88 | 2013-12-06 14:10:58 -0800 | [diff] [blame] | 1543 | return ret; |
Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 1544 | } |
| 1545 | |
Chris Wilson | 935a2f7 | 2017-02-13 17:15:13 +0000 | [diff] [blame] | 1546 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
Chris Wilson | 66d9cb5 | 2017-02-13 17:15:17 +0000 | [diff] [blame] | 1547 | #include "selftests/mock_gem_device.c" |
Chris Wilson | 3f51b7e1 | 2018-08-30 14:48:06 +0100 | [diff] [blame] | 1548 | #include "selftests/i915_gem.c" |
Chris Wilson | 935a2f7 | 2017-02-13 17:15:13 +0000 | [diff] [blame] | 1549 | #endif |