Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1 | /* |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 2 | * Copyright © 2008-2015 Intel Corporation |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | * Authors: |
| 24 | * Eric Anholt <eric@anholt.net> |
| 25 | * |
| 26 | */ |
| 27 | |
David Herrmann | 0de2397 | 2013-07-24 21:07:52 +0200 | [diff] [blame] | 28 | #include <drm/drm_vma_manager.h> |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 29 | #include <drm/i915_drm.h> |
Chris Wilson | 6b5e90f | 2016-11-14 20:41:05 +0000 | [diff] [blame] | 30 | #include <linux/dma-fence-array.h> |
Chris Wilson | fe3288b | 2017-02-12 17:20:01 +0000 | [diff] [blame] | 31 | #include <linux/kthread.h> |
Chris Wilson | c13d87e | 2016-07-20 09:21:15 +0100 | [diff] [blame] | 32 | #include <linux/reservation.h> |
Hugh Dickins | 5949eac | 2011-06-27 16:18:18 -0700 | [diff] [blame] | 33 | #include <linux/shmem_fs.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 34 | #include <linux/slab.h> |
Chris Wilson | 20e4933 | 2016-11-22 14:41:21 +0000 | [diff] [blame] | 35 | #include <linux/stop_machine.h> |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 36 | #include <linux/swap.h> |
Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 37 | #include <linux/pci.h> |
Daniel Vetter | 1286ff7 | 2012-05-10 15:25:09 +0200 | [diff] [blame] | 38 | #include <linux/dma-buf.h> |
Daniel Vetter | fcd70cd | 2019-01-17 22:03:34 +0100 | [diff] [blame] | 39 | #include <linux/mman.h> |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 40 | |
Jani Nikula | df0566a | 2019-06-13 11:44:16 +0300 | [diff] [blame] | 41 | #include "display/intel_display.h" |
| 42 | #include "display/intel_frontbuffer.h" |
| 43 | |
Chris Wilson | 10be98a | 2019-05-28 10:29:49 +0100 | [diff] [blame] | 44 | #include "gem/i915_gem_clflush.h" |
| 45 | #include "gem/i915_gem_context.h" |
Chris Wilson | afa1308 | 2019-05-28 10:29:43 +0100 | [diff] [blame] | 46 | #include "gem/i915_gem_ioctls.h" |
Chris Wilson | 10be98a | 2019-05-28 10:29:49 +0100 | [diff] [blame] | 47 | #include "gem/i915_gem_pm.h" |
| 48 | #include "gem/i915_gemfs.h" |
Chris Wilson | 79ffac85 | 2019-04-24 21:07:17 +0100 | [diff] [blame] | 49 | #include "gt/intel_engine_pm.h" |
Tvrtko Ursulin | baea429 | 2019-06-21 08:08:02 +0100 | [diff] [blame] | 50 | #include "gt/intel_gt.h" |
Chris Wilson | 79ffac85 | 2019-04-24 21:07:17 +0100 | [diff] [blame] | 51 | #include "gt/intel_gt_pm.h" |
Chris Wilson | 112ed2d | 2019-04-24 18:48:39 +0100 | [diff] [blame] | 52 | #include "gt/intel_mocs.h" |
| 53 | #include "gt/intel_reset.h" |
| 54 | #include "gt/intel_workarounds.h" |
| 55 | |
Chris Wilson | 9f58892 | 2019-01-16 15:33:04 +0000 | [diff] [blame] | 56 | #include "i915_drv.h" |
Chris Wilson | 37d63f8 | 2019-05-28 10:29:50 +0100 | [diff] [blame] | 57 | #include "i915_scatterlist.h" |
Chris Wilson | 9f58892 | 2019-01-16 15:33:04 +0000 | [diff] [blame] | 58 | #include "i915_trace.h" |
| 59 | #include "i915_vgpu.h" |
| 60 | |
| 61 | #include "intel_drv.h" |
Jani Nikula | 696173b | 2019-04-05 14:00:15 +0300 | [diff] [blame] | 62 | #include "intel_pm.h" |
Chris Wilson | 9f58892 | 2019-01-16 15:33:04 +0000 | [diff] [blame] | 63 | |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 64 | static int |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 65 | insert_mappable_node(struct i915_ggtt *ggtt, |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 66 | struct drm_mm_node *node, u32 size) |
| 67 | { |
| 68 | memset(node, 0, sizeof(*node)); |
Chris Wilson | 82ad644 | 2018-06-05 16:37:58 +0100 | [diff] [blame] | 69 | return drm_mm_insert_node_in_range(&ggtt->vm.mm, node, |
Chris Wilson | 4e64e55 | 2017-02-02 21:04:38 +0000 | [diff] [blame] | 70 | size, 0, I915_COLOR_UNEVICTABLE, |
| 71 | 0, ggtt->mappable_end, |
| 72 | DRM_MM_INSERT_LOW); |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 73 | } |
| 74 | |
| 75 | static void |
| 76 | remove_mappable_node(struct drm_mm_node *node) |
| 77 | { |
| 78 | drm_mm_remove_node(node); |
| 79 | } |
| 80 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 81 | int |
Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 82 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 83 | struct drm_file *file) |
Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 84 | { |
Chris Wilson | 09d7e46 | 2019-01-28 10:23:53 +0000 | [diff] [blame] | 85 | struct i915_ggtt *ggtt = &to_i915(dev)->ggtt; |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 86 | struct drm_i915_gem_get_aperture *args = data; |
Tvrtko Ursulin | ca1543b | 2015-07-01 11:51:10 +0100 | [diff] [blame] | 87 | struct i915_vma *vma; |
Weinan Li | ff8f797 | 2017-05-31 10:35:52 +0800 | [diff] [blame] | 88 | u64 pinned; |
Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 89 | |
Chris Wilson | 09d7e46 | 2019-01-28 10:23:53 +0000 | [diff] [blame] | 90 | mutex_lock(&ggtt->vm.mutex); |
| 91 | |
Chris Wilson | 82ad644 | 2018-06-05 16:37:58 +0100 | [diff] [blame] | 92 | pinned = ggtt->vm.reserved; |
Chris Wilson | 499197d | 2019-01-28 10:23:52 +0000 | [diff] [blame] | 93 | list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) |
Chris Wilson | 20dfbde | 2016-08-04 16:32:30 +0100 | [diff] [blame] | 94 | if (i915_vma_is_pinned(vma)) |
Tvrtko Ursulin | ca1543b | 2015-07-01 11:51:10 +0100 | [diff] [blame] | 95 | pinned += vma->node.size; |
Chris Wilson | 09d7e46 | 2019-01-28 10:23:53 +0000 | [diff] [blame] | 96 | |
| 97 | mutex_unlock(&ggtt->vm.mutex); |
Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 98 | |
Chris Wilson | 82ad644 | 2018-06-05 16:37:58 +0100 | [diff] [blame] | 99 | args->aper_size = ggtt->vm.total; |
Akshay Joshi | 0206e35 | 2011-08-16 15:34:10 -0400 | [diff] [blame] | 100 | args->aper_available_size = args->aper_size - pinned; |
Chris Wilson | 6299f99 | 2010-11-24 12:23:44 +0000 | [diff] [blame] | 101 | |
Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 102 | return 0; |
| 103 | } |
| 104 | |
Chris Wilson | 35a9611 | 2016-08-14 18:44:40 +0100 | [diff] [blame] | 105 | int i915_gem_object_unbind(struct drm_i915_gem_object *obj) |
Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 106 | { |
| 107 | struct i915_vma *vma; |
| 108 | LIST_HEAD(still_in_list); |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 109 | int ret = 0; |
Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 110 | |
Chris Wilson | 02bef8f | 2016-08-14 18:44:41 +0100 | [diff] [blame] | 111 | lockdep_assert_held(&obj->base.dev->struct_mutex); |
| 112 | |
Chris Wilson | 528cbd1 | 2019-01-28 10:23:54 +0000 | [diff] [blame] | 113 | spin_lock(&obj->vma.lock); |
| 114 | while (!ret && (vma = list_first_entry_or_null(&obj->vma.list, |
| 115 | struct i915_vma, |
| 116 | obj_link))) { |
Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 117 | list_move_tail(&vma->obj_link, &still_in_list); |
Chris Wilson | 528cbd1 | 2019-01-28 10:23:54 +0000 | [diff] [blame] | 118 | spin_unlock(&obj->vma.lock); |
| 119 | |
Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 120 | ret = i915_vma_unbind(vma); |
Chris Wilson | 528cbd1 | 2019-01-28 10:23:54 +0000 | [diff] [blame] | 121 | |
| 122 | spin_lock(&obj->vma.lock); |
Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 123 | } |
Chris Wilson | 528cbd1 | 2019-01-28 10:23:54 +0000 | [diff] [blame] | 124 | list_splice(&still_in_list, &obj->vma.list); |
| 125 | spin_unlock(&obj->vma.lock); |
Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 126 | |
| 127 | return ret; |
| 128 | } |
| 129 | |
Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 130 | static int |
| 131 | i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, |
| 132 | struct drm_i915_gem_pwrite *args, |
Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 133 | struct drm_file *file) |
Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 134 | { |
Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 135 | void *vaddr = obj->phys_handle->vaddr + args->offset; |
Gustavo Padovan | 3ed605b | 2016-04-26 12:32:27 -0300 | [diff] [blame] | 136 | char __user *user_data = u64_to_user_ptr(args->data_ptr); |
Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 137 | |
| 138 | /* We manually control the domain here and pretend that it |
| 139 | * remains coherent i.e. in the GTT domain, like shmem_pwrite. |
| 140 | */ |
Rodrigo Vivi | 77a0d1c | 2015-06-18 11:43:24 -0700 | [diff] [blame] | 141 | intel_fb_obj_invalidate(obj, ORIGIN_CPU); |
Chris Wilson | 10466d2 | 2017-01-06 15:22:38 +0000 | [diff] [blame] | 142 | if (copy_from_user(vaddr, user_data, args->size)) |
| 143 | return -EFAULT; |
Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 144 | |
Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 145 | drm_clflush_virt_range(vaddr, args->size); |
Tvrtko Ursulin | baea429 | 2019-06-21 08:08:02 +0100 | [diff] [blame] | 146 | intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt); |
Paulo Zanoni | 063e4e6 | 2015-02-13 17:23:45 -0200 | [diff] [blame] | 147 | |
Chris Wilson | d59b21e | 2017-02-22 11:40:49 +0000 | [diff] [blame] | 148 | intel_fb_obj_flush(obj, ORIGIN_CPU); |
Chris Wilson | 10466d2 | 2017-01-06 15:22:38 +0000 | [diff] [blame] | 149 | return 0; |
Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 150 | } |
| 151 | |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 152 | static int |
| 153 | i915_gem_create(struct drm_file *file, |
Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 154 | struct drm_i915_private *dev_priv, |
Michał Winiarski | e163484 | 2019-03-26 18:02:18 +0100 | [diff] [blame] | 155 | u64 *size_p, |
Jani Nikula | 739f3ab | 2019-01-16 11:15:19 +0200 | [diff] [blame] | 156 | u32 *handle_p) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 157 | { |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 158 | struct drm_i915_gem_object *obj; |
Pekka Paalanen | a1a2d1d | 2009-08-23 12:40:55 +0300 | [diff] [blame] | 159 | u32 handle; |
Michał Winiarski | e163484 | 2019-03-26 18:02:18 +0100 | [diff] [blame] | 160 | u64 size; |
| 161 | int ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 162 | |
Michał Winiarski | e163484 | 2019-03-26 18:02:18 +0100 | [diff] [blame] | 163 | size = round_up(*size_p, PAGE_SIZE); |
Chris Wilson | 8ffc024 | 2011-09-14 14:14:28 +0200 | [diff] [blame] | 164 | if (size == 0) |
| 165 | return -EINVAL; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 166 | |
| 167 | /* Allocate the new object */ |
Chris Wilson | 8475355 | 2019-05-28 10:29:45 +0100 | [diff] [blame] | 168 | obj = i915_gem_object_create_shmem(dev_priv, size); |
Chris Wilson | fe3db79 | 2016-04-25 13:32:13 +0100 | [diff] [blame] | 169 | if (IS_ERR(obj)) |
| 170 | return PTR_ERR(obj); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 171 | |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 172 | ret = drm_gem_handle_create(file, &obj->base, &handle); |
Chris Wilson | 202f2fe | 2010-10-14 13:20:40 +0100 | [diff] [blame] | 173 | /* drop reference from allocate - handle holds it now */ |
Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 174 | i915_gem_object_put(obj); |
Daniel Vetter | d861e33 | 2013-07-24 23:25:03 +0200 | [diff] [blame] | 175 | if (ret) |
| 176 | return ret; |
Chris Wilson | 202f2fe | 2010-10-14 13:20:40 +0100 | [diff] [blame] | 177 | |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 178 | *handle_p = handle; |
Chris Wilson | 9953402 | 2019-04-17 14:25:07 +0100 | [diff] [blame] | 179 | *size_p = size; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 180 | return 0; |
| 181 | } |
| 182 | |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 183 | int |
| 184 | i915_gem_dumb_create(struct drm_file *file, |
| 185 | struct drm_device *dev, |
| 186 | struct drm_mode_create_dumb *args) |
| 187 | { |
Ville Syrjälä | aa5ca8b | 2019-05-09 15:21:57 +0300 | [diff] [blame] | 188 | int cpp = DIV_ROUND_UP(args->bpp, 8); |
| 189 | u32 format; |
| 190 | |
| 191 | switch (cpp) { |
| 192 | case 1: |
| 193 | format = DRM_FORMAT_C8; |
| 194 | break; |
| 195 | case 2: |
| 196 | format = DRM_FORMAT_RGB565; |
| 197 | break; |
| 198 | case 4: |
| 199 | format = DRM_FORMAT_XRGB8888; |
| 200 | break; |
| 201 | default: |
| 202 | return -EINVAL; |
| 203 | } |
| 204 | |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 205 | /* have to work out size/pitch and return them */ |
Ville Syrjälä | aa5ca8b | 2019-05-09 15:21:57 +0300 | [diff] [blame] | 206 | args->pitch = ALIGN(args->width * cpp, 64); |
| 207 | |
| 208 | /* align stride to page size so that we can remap */ |
| 209 | if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format, |
| 210 | DRM_FORMAT_MOD_LINEAR)) |
| 211 | args->pitch = ALIGN(args->pitch, 4096); |
| 212 | |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 213 | args->size = args->pitch * args->height; |
Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 214 | return i915_gem_create(file, to_i915(dev), |
Michał Winiarski | e163484 | 2019-03-26 18:02:18 +0100 | [diff] [blame] | 215 | &args->size, &args->handle); |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 216 | } |
| 217 | |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 218 | /** |
| 219 | * Creates a new mm object and returns a handle to it. |
Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 220 | * @dev: drm device pointer |
| 221 | * @data: ioctl data blob |
| 222 | * @file: drm file pointer |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 223 | */ |
| 224 | int |
| 225 | i915_gem_create_ioctl(struct drm_device *dev, void *data, |
| 226 | struct drm_file *file) |
| 227 | { |
Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 228 | struct drm_i915_private *dev_priv = to_i915(dev); |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 229 | struct drm_i915_gem_create *args = data; |
Daniel Vetter | 63ed2cb | 2012-04-23 16:50:50 +0200 | [diff] [blame] | 230 | |
Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 231 | i915_gem_flush_free_objects(dev_priv); |
Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 232 | |
Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 233 | return i915_gem_create(file, dev_priv, |
Michał Winiarski | e163484 | 2019-03-26 18:02:18 +0100 | [diff] [blame] | 234 | &args->size, &args->handle); |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 235 | } |
| 236 | |
Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 237 | static int |
Chris Wilson | b9d126e | 2019-01-05 12:07:58 +0000 | [diff] [blame] | 238 | shmem_pread(struct page *page, int offset, int len, char __user *user_data, |
| 239 | bool needs_clflush) |
Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 240 | { |
| 241 | char *vaddr; |
| 242 | int ret; |
| 243 | |
| 244 | vaddr = kmap(page); |
Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 245 | |
Chris Wilson | b9d126e | 2019-01-05 12:07:58 +0000 | [diff] [blame] | 246 | if (needs_clflush) |
| 247 | drm_clflush_virt_range(vaddr + offset, len); |
| 248 | |
| 249 | ret = __copy_to_user(user_data, vaddr + offset, len); |
| 250 | |
Daniel Vetter | d174bd6 | 2012-03-25 19:47:40 +0200 | [diff] [blame] | 251 | kunmap(page); |
| 252 | |
Chris Wilson | b9d126e | 2019-01-05 12:07:58 +0000 | [diff] [blame] | 253 | return ret ? -EFAULT : 0; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 254 | } |
| 255 | |
| 256 | static int |
| 257 | i915_gem_shmem_pread(struct drm_i915_gem_object *obj, |
| 258 | struct drm_i915_gem_pread *args) |
| 259 | { |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 260 | unsigned int needs_clflush; |
| 261 | unsigned int idx, offset; |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 262 | struct dma_fence *fence; |
| 263 | char __user *user_data; |
| 264 | u64 remain; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 265 | int ret; |
| 266 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 267 | ret = i915_gem_object_prepare_read(obj, &needs_clflush); |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 268 | if (ret) |
| 269 | return ret; |
| 270 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 271 | fence = i915_gem_object_lock_fence(obj); |
| 272 | i915_gem_object_finish_access(obj); |
| 273 | if (!fence) |
| 274 | return -ENOMEM; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 275 | |
| 276 | remain = args->size; |
| 277 | user_data = u64_to_user_ptr(args->data_ptr); |
| 278 | offset = offset_in_page(args->offset); |
| 279 | for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { |
| 280 | struct page *page = i915_gem_object_get_page(obj, idx); |
Chris Wilson | a5e856a5 | 2018-10-12 15:02:28 +0100 | [diff] [blame] | 281 | unsigned int length = min_t(u64, remain, PAGE_SIZE - offset); |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 282 | |
| 283 | ret = shmem_pread(page, offset, length, user_data, |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 284 | needs_clflush); |
| 285 | if (ret) |
| 286 | break; |
| 287 | |
| 288 | remain -= length; |
| 289 | user_data += length; |
| 290 | offset = 0; |
| 291 | } |
| 292 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 293 | i915_gem_object_unlock_fence(obj, fence); |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 294 | return ret; |
| 295 | } |
| 296 | |
| 297 | static inline bool |
| 298 | gtt_user_read(struct io_mapping *mapping, |
| 299 | loff_t base, int offset, |
| 300 | char __user *user_data, int length) |
| 301 | { |
Ville Syrjälä | afe722b | 2017-09-01 20:12:52 +0300 | [diff] [blame] | 302 | void __iomem *vaddr; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 303 | unsigned long unwritten; |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 304 | |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 305 | /* We can use the cpu mem copy function because this is X86. */ |
Ville Syrjälä | afe722b | 2017-09-01 20:12:52 +0300 | [diff] [blame] | 306 | vaddr = io_mapping_map_atomic_wc(mapping, base); |
| 307 | unwritten = __copy_to_user_inatomic(user_data, |
| 308 | (void __force *)vaddr + offset, |
| 309 | length); |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 310 | io_mapping_unmap_atomic(vaddr); |
| 311 | if (unwritten) { |
Ville Syrjälä | afe722b | 2017-09-01 20:12:52 +0300 | [diff] [blame] | 312 | vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE); |
| 313 | unwritten = copy_to_user(user_data, |
| 314 | (void __force *)vaddr + offset, |
| 315 | length); |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 316 | io_mapping_unmap(vaddr); |
| 317 | } |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 318 | return unwritten; |
| 319 | } |
| 320 | |
| 321 | static int |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 322 | i915_gem_gtt_pread(struct drm_i915_gem_object *obj, |
| 323 | const struct drm_i915_gem_pread *args) |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 324 | { |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 325 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
| 326 | struct i915_ggtt *ggtt = &i915->ggtt; |
Chris Wilson | 538ef96 | 2019-01-14 14:21:18 +0000 | [diff] [blame] | 327 | intel_wakeref_t wakeref; |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 328 | struct drm_mm_node node; |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 329 | struct dma_fence *fence; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 330 | void __user *user_data; |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 331 | struct i915_vma *vma; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 332 | u64 remain, offset; |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 333 | int ret; |
| 334 | |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 335 | ret = mutex_lock_interruptible(&i915->drm.struct_mutex); |
| 336 | if (ret) |
| 337 | return ret; |
| 338 | |
Daniele Ceraolo Spurio | d858d56 | 2019-06-13 16:21:54 -0700 | [diff] [blame] | 339 | wakeref = intel_runtime_pm_get(&i915->runtime_pm); |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 340 | vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, |
Chris Wilson | a3259ca | 2017-10-09 09:44:00 +0100 | [diff] [blame] | 341 | PIN_MAPPABLE | |
| 342 | PIN_NONFAULT | |
| 343 | PIN_NONBLOCK); |
Chris Wilson | 1803458 | 2016-08-18 17:16:45 +0100 | [diff] [blame] | 344 | if (!IS_ERR(vma)) { |
| 345 | node.start = i915_ggtt_offset(vma); |
| 346 | node.allocated = false; |
Chris Wilson | 49ef529 | 2016-08-18 17:17:00 +0100 | [diff] [blame] | 347 | ret = i915_vma_put_fence(vma); |
Chris Wilson | 1803458 | 2016-08-18 17:16:45 +0100 | [diff] [blame] | 348 | if (ret) { |
| 349 | i915_vma_unpin(vma); |
| 350 | vma = ERR_PTR(ret); |
| 351 | } |
| 352 | } |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 353 | if (IS_ERR(vma)) { |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 354 | ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 355 | if (ret) |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 356 | goto out_unlock; |
| 357 | GEM_BUG_ON(!node.allocated); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 358 | } |
| 359 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 360 | mutex_unlock(&i915->drm.struct_mutex); |
| 361 | |
| 362 | ret = i915_gem_object_lock_interruptible(obj); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 363 | if (ret) |
| 364 | goto out_unpin; |
| 365 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 366 | ret = i915_gem_object_set_to_gtt_domain(obj, false); |
| 367 | if (ret) { |
| 368 | i915_gem_object_unlock(obj); |
| 369 | goto out_unpin; |
| 370 | } |
| 371 | |
| 372 | fence = i915_gem_object_lock_fence(obj); |
| 373 | i915_gem_object_unlock(obj); |
| 374 | if (!fence) { |
| 375 | ret = -ENOMEM; |
| 376 | goto out_unpin; |
| 377 | } |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 378 | |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 379 | user_data = u64_to_user_ptr(args->data_ptr); |
| 380 | remain = args->size; |
| 381 | offset = args->offset; |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 382 | |
| 383 | while (remain > 0) { |
| 384 | /* Operation in this page |
| 385 | * |
| 386 | * page_base = page offset within aperture |
| 387 | * page_offset = offset within page |
| 388 | * page_length = bytes to copy for this page |
| 389 | */ |
| 390 | u32 page_base = node.start; |
| 391 | unsigned page_offset = offset_in_page(offset); |
| 392 | unsigned page_length = PAGE_SIZE - page_offset; |
| 393 | page_length = remain < page_length ? remain : page_length; |
| 394 | if (node.allocated) { |
| 395 | wmb(); |
Chris Wilson | 82ad644 | 2018-06-05 16:37:58 +0100 | [diff] [blame] | 396 | ggtt->vm.insert_page(&ggtt->vm, |
| 397 | i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), |
| 398 | node.start, I915_CACHE_NONE, 0); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 399 | wmb(); |
| 400 | } else { |
| 401 | page_base += offset & PAGE_MASK; |
| 402 | } |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 403 | |
Matthew Auld | 73ebd50 | 2017-12-11 15:18:20 +0000 | [diff] [blame] | 404 | if (gtt_user_read(&ggtt->iomap, page_base, page_offset, |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 405 | user_data, page_length)) { |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 406 | ret = -EFAULT; |
| 407 | break; |
| 408 | } |
| 409 | |
| 410 | remain -= page_length; |
| 411 | user_data += page_length; |
| 412 | offset += page_length; |
| 413 | } |
| 414 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 415 | i915_gem_object_unlock_fence(obj, fence); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 416 | out_unpin: |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 417 | mutex_lock(&i915->drm.struct_mutex); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 418 | if (node.allocated) { |
| 419 | wmb(); |
Chris Wilson | 82ad644 | 2018-06-05 16:37:58 +0100 | [diff] [blame] | 420 | ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 421 | remove_mappable_node(&node); |
| 422 | } else { |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 423 | i915_vma_unpin(vma); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 424 | } |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 425 | out_unlock: |
Daniele Ceraolo Spurio | d858d56 | 2019-06-13 16:21:54 -0700 | [diff] [blame] | 426 | intel_runtime_pm_put(&i915->runtime_pm, wakeref); |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 427 | mutex_unlock(&i915->drm.struct_mutex); |
Chris Wilson | f60d7f0 | 2012-09-04 21:02:56 +0100 | [diff] [blame] | 428 | |
Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 429 | return ret; |
| 430 | } |
| 431 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 432 | /** |
| 433 | * Reads data from the object referenced by handle. |
Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 434 | * @dev: drm device pointer |
| 435 | * @data: ioctl data blob |
| 436 | * @file: drm file pointer |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 437 | * |
| 438 | * On error, the contents of *data are undefined. |
| 439 | */ |
| 440 | int |
| 441 | i915_gem_pread_ioctl(struct drm_device *dev, void *data, |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 442 | struct drm_file *file) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 443 | { |
| 444 | struct drm_i915_gem_pread *args = data; |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 445 | struct drm_i915_gem_object *obj; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 446 | int ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 447 | |
Chris Wilson | 51311d0 | 2010-11-17 09:10:42 +0000 | [diff] [blame] | 448 | if (args->size == 0) |
| 449 | return 0; |
| 450 | |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 451 | if (!access_ok(u64_to_user_ptr(args->data_ptr), |
Chris Wilson | 51311d0 | 2010-11-17 09:10:42 +0000 | [diff] [blame] | 452 | args->size)) |
| 453 | return -EFAULT; |
| 454 | |
Chris Wilson | 03ac064 | 2016-07-20 13:31:51 +0100 | [diff] [blame] | 455 | obj = i915_gem_object_lookup(file, args->handle); |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 456 | if (!obj) |
| 457 | return -ENOENT; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 458 | |
Chris Wilson | 7dcd249 | 2010-09-26 20:21:44 +0100 | [diff] [blame] | 459 | /* Bounds check source. */ |
Matthew Auld | 966d5bf | 2016-12-13 20:32:22 +0000 | [diff] [blame] | 460 | if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { |
Chris Wilson | ce9d419 | 2010-09-26 20:50:05 +0100 | [diff] [blame] | 461 | ret = -EINVAL; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 462 | goto out; |
Chris Wilson | ce9d419 | 2010-09-26 20:50:05 +0100 | [diff] [blame] | 463 | } |
| 464 | |
Chris Wilson | db53a30 | 2011-02-03 11:57:46 +0000 | [diff] [blame] | 465 | trace_i915_gem_object_pread(obj, args->offset, args->size); |
| 466 | |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 467 | ret = i915_gem_object_wait(obj, |
| 468 | I915_WAIT_INTERRUPTIBLE, |
Chris Wilson | 62eb3c2 | 2019-02-13 09:25:04 +0000 | [diff] [blame] | 469 | MAX_SCHEDULE_TIMEOUT); |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 470 | if (ret) |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 471 | goto out; |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 472 | |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 473 | ret = i915_gem_object_pin_pages(obj); |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 474 | if (ret) |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 475 | goto out; |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 476 | |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 477 | ret = i915_gem_shmem_pread(obj, args); |
Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 478 | if (ret == -EFAULT || ret == -ENODEV) |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 479 | ret = i915_gem_gtt_pread(obj, args); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 480 | |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 481 | i915_gem_object_unpin_pages(obj); |
| 482 | out: |
Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 483 | i915_gem_object_put(obj); |
Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 484 | return ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 485 | } |
| 486 | |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 487 | /* This is the fast write path which cannot handle |
| 488 | * page faults in the source data |
Linus Torvalds | 9b7530cc | 2008-10-20 14:16:43 -0700 | [diff] [blame] | 489 | */ |
Linus Torvalds | 9b7530cc | 2008-10-20 14:16:43 -0700 | [diff] [blame] | 490 | |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 491 | static inline bool |
| 492 | ggtt_write(struct io_mapping *mapping, |
| 493 | loff_t base, int offset, |
| 494 | char __user *user_data, int length) |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 495 | { |
Ville Syrjälä | afe722b | 2017-09-01 20:12:52 +0300 | [diff] [blame] | 496 | void __iomem *vaddr; |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 497 | unsigned long unwritten; |
| 498 | |
Ben Widawsky | 4f0c7cf | 2012-04-16 14:07:47 -0700 | [diff] [blame] | 499 | /* We can use the cpu mem copy function because this is X86. */ |
Ville Syrjälä | afe722b | 2017-09-01 20:12:52 +0300 | [diff] [blame] | 500 | vaddr = io_mapping_map_atomic_wc(mapping, base); |
| 501 | unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset, |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 502 | user_data, length); |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 503 | io_mapping_unmap_atomic(vaddr); |
| 504 | if (unwritten) { |
Ville Syrjälä | afe722b | 2017-09-01 20:12:52 +0300 | [diff] [blame] | 505 | vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE); |
| 506 | unwritten = copy_from_user((void __force *)vaddr + offset, |
| 507 | user_data, length); |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 508 | io_mapping_unmap(vaddr); |
| 509 | } |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 510 | |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 511 | return unwritten; |
| 512 | } |
| 513 | |
Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 514 | /** |
| 515 | * This is the fast pwrite path, where we copy the data directly from the |
| 516 | * user into the GTT, uncached. |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 517 | * @obj: i915 GEM object |
Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 518 | * @args: pwrite arguments structure |
Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 519 | */ |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 520 | static int |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 521 | i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, |
| 522 | const struct drm_i915_gem_pwrite *args) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 523 | { |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 524 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 525 | struct i915_ggtt *ggtt = &i915->ggtt; |
Daniele Ceraolo Spurio | d858d56 | 2019-06-13 16:21:54 -0700 | [diff] [blame] | 526 | struct intel_runtime_pm *rpm = &i915->runtime_pm; |
Chris Wilson | 538ef96 | 2019-01-14 14:21:18 +0000 | [diff] [blame] | 527 | intel_wakeref_t wakeref; |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 528 | struct drm_mm_node node; |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 529 | struct dma_fence *fence; |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 530 | struct i915_vma *vma; |
| 531 | u64 remain, offset; |
| 532 | void __user *user_data; |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 533 | int ret; |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 534 | |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 535 | ret = mutex_lock_interruptible(&i915->drm.struct_mutex); |
| 536 | if (ret) |
| 537 | return ret; |
Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 538 | |
Chris Wilson | 8bd81815 | 2017-10-19 07:37:33 +0100 | [diff] [blame] | 539 | if (i915_gem_object_has_struct_page(obj)) { |
| 540 | /* |
| 541 | * Avoid waking the device up if we can fallback, as |
| 542 | * waking/resuming is very slow (worst-case 10-100 ms |
| 543 | * depending on PCI sleeps and our own resume time). |
| 544 | * This easily dwarfs any performance advantage from |
| 545 | * using the cache bypass of indirect GGTT access. |
| 546 | */ |
Daniele Ceraolo Spurio | d858d56 | 2019-06-13 16:21:54 -0700 | [diff] [blame] | 547 | wakeref = intel_runtime_pm_get_if_in_use(rpm); |
Chris Wilson | 538ef96 | 2019-01-14 14:21:18 +0000 | [diff] [blame] | 548 | if (!wakeref) { |
Chris Wilson | 8bd81815 | 2017-10-19 07:37:33 +0100 | [diff] [blame] | 549 | ret = -EFAULT; |
| 550 | goto out_unlock; |
| 551 | } |
| 552 | } else { |
| 553 | /* No backing pages, no fallback, we must force GGTT access */ |
Daniele Ceraolo Spurio | d858d56 | 2019-06-13 16:21:54 -0700 | [diff] [blame] | 554 | wakeref = intel_runtime_pm_get(rpm); |
Chris Wilson | 8bd81815 | 2017-10-19 07:37:33 +0100 | [diff] [blame] | 555 | } |
| 556 | |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 557 | vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, |
Chris Wilson | a3259ca | 2017-10-09 09:44:00 +0100 | [diff] [blame] | 558 | PIN_MAPPABLE | |
| 559 | PIN_NONFAULT | |
| 560 | PIN_NONBLOCK); |
Chris Wilson | 1803458 | 2016-08-18 17:16:45 +0100 | [diff] [blame] | 561 | if (!IS_ERR(vma)) { |
| 562 | node.start = i915_ggtt_offset(vma); |
| 563 | node.allocated = false; |
Chris Wilson | 49ef529 | 2016-08-18 17:17:00 +0100 | [diff] [blame] | 564 | ret = i915_vma_put_fence(vma); |
Chris Wilson | 1803458 | 2016-08-18 17:16:45 +0100 | [diff] [blame] | 565 | if (ret) { |
| 566 | i915_vma_unpin(vma); |
| 567 | vma = ERR_PTR(ret); |
| 568 | } |
| 569 | } |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 570 | if (IS_ERR(vma)) { |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 571 | ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 572 | if (ret) |
Chris Wilson | 8bd81815 | 2017-10-19 07:37:33 +0100 | [diff] [blame] | 573 | goto out_rpm; |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 574 | GEM_BUG_ON(!node.allocated); |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 575 | } |
Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 576 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 577 | mutex_unlock(&i915->drm.struct_mutex); |
| 578 | |
| 579 | ret = i915_gem_object_lock_interruptible(obj); |
Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 580 | if (ret) |
| 581 | goto out_unpin; |
| 582 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 583 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
| 584 | if (ret) { |
| 585 | i915_gem_object_unlock(obj); |
| 586 | goto out_unpin; |
| 587 | } |
| 588 | |
| 589 | fence = i915_gem_object_lock_fence(obj); |
| 590 | i915_gem_object_unlock(obj); |
| 591 | if (!fence) { |
| 592 | ret = -ENOMEM; |
| 593 | goto out_unpin; |
| 594 | } |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 595 | |
Chris Wilson | b19482d | 2016-08-18 17:16:43 +0100 | [diff] [blame] | 596 | intel_fb_obj_invalidate(obj, ORIGIN_CPU); |
Paulo Zanoni | 063e4e6 | 2015-02-13 17:23:45 -0200 | [diff] [blame] | 597 | |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 598 | user_data = u64_to_user_ptr(args->data_ptr); |
| 599 | offset = args->offset; |
| 600 | remain = args->size; |
| 601 | while (remain) { |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 602 | /* Operation in this page |
| 603 | * |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 604 | * page_base = page offset within aperture |
| 605 | * page_offset = offset within page |
| 606 | * page_length = bytes to copy for this page |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 607 | */ |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 608 | u32 page_base = node.start; |
Chris Wilson | bb6dc8d | 2016-10-28 13:58:39 +0100 | [diff] [blame] | 609 | unsigned int page_offset = offset_in_page(offset); |
| 610 | unsigned int page_length = PAGE_SIZE - page_offset; |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 611 | page_length = remain < page_length ? remain : page_length; |
| 612 | if (node.allocated) { |
| 613 | wmb(); /* flush the write before we modify the GGTT */ |
Chris Wilson | 82ad644 | 2018-06-05 16:37:58 +0100 | [diff] [blame] | 614 | ggtt->vm.insert_page(&ggtt->vm, |
| 615 | i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), |
| 616 | node.start, I915_CACHE_NONE, 0); |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 617 | wmb(); /* flush modifications to the GGTT (insert_page) */ |
| 618 | } else { |
| 619 | page_base += offset & PAGE_MASK; |
| 620 | } |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 621 | /* If we get a fault while copying data, then (presumably) our |
Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 622 | * source page isn't available. Return the error and we'll |
| 623 | * retry in the slow path. |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 624 | * If the object is non-shmem backed, we retry again with the |
| 625 | * path that handles page fault. |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 626 | */ |
Matthew Auld | 73ebd50 | 2017-12-11 15:18:20 +0000 | [diff] [blame] | 627 | if (ggtt_write(&ggtt->iomap, page_base, page_offset, |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 628 | user_data, page_length)) { |
| 629 | ret = -EFAULT; |
| 630 | break; |
Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 631 | } |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 632 | |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 633 | remain -= page_length; |
| 634 | user_data += page_length; |
| 635 | offset += page_length; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 636 | } |
Chris Wilson | d59b21e | 2017-02-22 11:40:49 +0000 | [diff] [blame] | 637 | intel_fb_obj_flush(obj, ORIGIN_CPU); |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 638 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 639 | i915_gem_object_unlock_fence(obj, fence); |
Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 640 | out_unpin: |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 641 | mutex_lock(&i915->drm.struct_mutex); |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 642 | if (node.allocated) { |
| 643 | wmb(); |
Chris Wilson | 82ad644 | 2018-06-05 16:37:58 +0100 | [diff] [blame] | 644 | ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 645 | remove_mappable_node(&node); |
| 646 | } else { |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 647 | i915_vma_unpin(vma); |
Ankitprasad Sharma | 4f1959e | 2016-06-10 14:23:01 +0530 | [diff] [blame] | 648 | } |
Chris Wilson | 8bd81815 | 2017-10-19 07:37:33 +0100 | [diff] [blame] | 649 | out_rpm: |
Daniele Ceraolo Spurio | d858d56 | 2019-06-13 16:21:54 -0700 | [diff] [blame] | 650 | intel_runtime_pm_put(rpm, wakeref); |
Chris Wilson | 8bd81815 | 2017-10-19 07:37:33 +0100 | [diff] [blame] | 651 | out_unlock: |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 652 | mutex_unlock(&i915->drm.struct_mutex); |
Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 653 | return ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 654 | } |
| 655 | |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 656 | /* Per-page copy function for the shmem pwrite fastpath. |
| 657 | * Flushes invalid cachelines before writing to the target if |
| 658 | * needs_clflush_before is set and flushes out any written cachelines after |
| 659 | * writing if needs_clflush is set. |
| 660 | */ |
Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 661 | static int |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 662 | shmem_pwrite(struct page *page, int offset, int len, char __user *user_data, |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 663 | bool needs_clflush_before, |
| 664 | bool needs_clflush_after) |
Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 665 | { |
Chris Wilson | b9d126e | 2019-01-05 12:07:58 +0000 | [diff] [blame] | 666 | char *vaddr; |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 667 | int ret; |
Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 668 | |
Chris Wilson | b9d126e | 2019-01-05 12:07:58 +0000 | [diff] [blame] | 669 | vaddr = kmap(page); |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 670 | |
Chris Wilson | b9d126e | 2019-01-05 12:07:58 +0000 | [diff] [blame] | 671 | if (needs_clflush_before) |
| 672 | drm_clflush_virt_range(vaddr + offset, len); |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 673 | |
Chris Wilson | b9d126e | 2019-01-05 12:07:58 +0000 | [diff] [blame] | 674 | ret = __copy_from_user(vaddr + offset, user_data, len); |
| 675 | if (!ret && needs_clflush_after) |
| 676 | drm_clflush_virt_range(vaddr + offset, len); |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 677 | |
Chris Wilson | b9d126e | 2019-01-05 12:07:58 +0000 | [diff] [blame] | 678 | kunmap(page); |
| 679 | |
| 680 | return ret ? -EFAULT : 0; |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 681 | } |
| 682 | |
| 683 | static int |
| 684 | i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, |
| 685 | const struct drm_i915_gem_pwrite *args) |
| 686 | { |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 687 | unsigned int partial_cacheline_write; |
| 688 | unsigned int needs_clflush; |
| 689 | unsigned int offset, idx; |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 690 | struct dma_fence *fence; |
| 691 | void __user *user_data; |
| 692 | u64 remain; |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 693 | int ret; |
| 694 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 695 | ret = i915_gem_object_prepare_write(obj, &needs_clflush); |
Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 696 | if (ret) |
| 697 | return ret; |
Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 698 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 699 | fence = i915_gem_object_lock_fence(obj); |
| 700 | i915_gem_object_finish_access(obj); |
| 701 | if (!fence) |
| 702 | return -ENOMEM; |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 703 | |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 704 | /* If we don't overwrite a cacheline completely we need to be |
| 705 | * careful to have up-to-date data by first clflushing. Don't |
| 706 | * overcomplicate things and flush the entire patch. |
| 707 | */ |
| 708 | partial_cacheline_write = 0; |
| 709 | if (needs_clflush & CLFLUSH_BEFORE) |
| 710 | partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1; |
| 711 | |
Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 712 | user_data = u64_to_user_ptr(args->data_ptr); |
Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 713 | remain = args->size; |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 714 | offset = offset_in_page(args->offset); |
| 715 | for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { |
| 716 | struct page *page = i915_gem_object_get_page(obj, idx); |
Chris Wilson | a5e856a5 | 2018-10-12 15:02:28 +0100 | [diff] [blame] | 717 | unsigned int length = min_t(u64, remain, PAGE_SIZE - offset); |
Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 718 | |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 719 | ret = shmem_pwrite(page, offset, length, user_data, |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 720 | (offset | length) & partial_cacheline_write, |
| 721 | needs_clflush & CLFLUSH_AFTER); |
| 722 | if (ret) |
Chris Wilson | 9da3da6 | 2012-06-01 15:20:22 +0100 | [diff] [blame] | 723 | break; |
| 724 | |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 725 | remain -= length; |
| 726 | user_data += length; |
| 727 | offset = 0; |
Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 728 | } |
| 729 | |
Chris Wilson | d59b21e | 2017-02-22 11:40:49 +0000 | [diff] [blame] | 730 | intel_fb_obj_flush(obj, ORIGIN_CPU); |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 731 | i915_gem_object_unlock_fence(obj, fence); |
| 732 | |
Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 733 | return ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 734 | } |
| 735 | |
| 736 | /** |
| 737 | * Writes data to the object referenced by handle. |
Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 738 | * @dev: drm device |
| 739 | * @data: ioctl data blob |
| 740 | * @file: drm file |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 741 | * |
| 742 | * On error, the contents of the buffer that were to be modified are undefined. |
| 743 | */ |
| 744 | int |
| 745 | i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, |
Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 746 | struct drm_file *file) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 747 | { |
| 748 | struct drm_i915_gem_pwrite *args = data; |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 749 | struct drm_i915_gem_object *obj; |
Chris Wilson | 51311d0 | 2010-11-17 09:10:42 +0000 | [diff] [blame] | 750 | int ret; |
| 751 | |
| 752 | if (args->size == 0) |
| 753 | return 0; |
| 754 | |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 755 | if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size)) |
Chris Wilson | 51311d0 | 2010-11-17 09:10:42 +0000 | [diff] [blame] | 756 | return -EFAULT; |
| 757 | |
Chris Wilson | 03ac064 | 2016-07-20 13:31:51 +0100 | [diff] [blame] | 758 | obj = i915_gem_object_lookup(file, args->handle); |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 759 | if (!obj) |
| 760 | return -ENOENT; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 761 | |
Chris Wilson | 7dcd249 | 2010-09-26 20:21:44 +0100 | [diff] [blame] | 762 | /* Bounds check destination. */ |
Matthew Auld | 966d5bf | 2016-12-13 20:32:22 +0000 | [diff] [blame] | 763 | if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { |
Chris Wilson | ce9d419 | 2010-09-26 20:50:05 +0100 | [diff] [blame] | 764 | ret = -EINVAL; |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 765 | goto err; |
Chris Wilson | ce9d419 | 2010-09-26 20:50:05 +0100 | [diff] [blame] | 766 | } |
| 767 | |
Chris Wilson | f8c1cce | 2018-07-12 19:53:14 +0100 | [diff] [blame] | 768 | /* Writes not allowed into this read-only object */ |
| 769 | if (i915_gem_object_is_readonly(obj)) { |
| 770 | ret = -EINVAL; |
| 771 | goto err; |
| 772 | } |
| 773 | |
Chris Wilson | db53a30 | 2011-02-03 11:57:46 +0000 | [diff] [blame] | 774 | trace_i915_gem_object_pwrite(obj, args->offset, args->size); |
| 775 | |
Chris Wilson | 7c55e2c | 2017-03-07 12:03:38 +0000 | [diff] [blame] | 776 | ret = -ENODEV; |
| 777 | if (obj->ops->pwrite) |
| 778 | ret = obj->ops->pwrite(obj, args); |
| 779 | if (ret != -ENODEV) |
| 780 | goto err; |
| 781 | |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 782 | ret = i915_gem_object_wait(obj, |
| 783 | I915_WAIT_INTERRUPTIBLE | |
| 784 | I915_WAIT_ALL, |
Chris Wilson | 62eb3c2 | 2019-02-13 09:25:04 +0000 | [diff] [blame] | 785 | MAX_SCHEDULE_TIMEOUT); |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 786 | if (ret) |
| 787 | goto err; |
| 788 | |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 789 | ret = i915_gem_object_pin_pages(obj); |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 790 | if (ret) |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 791 | goto err; |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 792 | |
Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 793 | ret = -EFAULT; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 794 | /* We can only do the GTT pwrite on untiled buffers, as otherwise |
| 795 | * it would end up going through the fenced access, and we'll get |
| 796 | * different detiling behavior between reading and writing. |
| 797 | * pread/pwrite currently are reading and writing from the CPU |
| 798 | * perspective, requiring manual detiling by the client. |
| 799 | */ |
Chris Wilson | 6eae005 | 2016-06-20 15:05:52 +0100 | [diff] [blame] | 800 | if (!i915_gem_object_has_struct_page(obj) || |
Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 801 | cpu_write_needs_clflush(obj)) |
Daniel Vetter | 935aaa6 | 2012-03-25 19:47:35 +0200 | [diff] [blame] | 802 | /* Note that the gtt paths might fail with non-page-backed user |
| 803 | * pointers (e.g. gtt mappings when moving data between |
Chris Wilson | 9c870d0 | 2016-10-24 13:42:15 +0100 | [diff] [blame] | 804 | * textures). Fallback to the shmem path in that case. |
| 805 | */ |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 806 | ret = i915_gem_gtt_pwrite_fast(obj, args); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 807 | |
Chris Wilson | d1054ee | 2016-07-16 18:42:36 +0100 | [diff] [blame] | 808 | if (ret == -EFAULT || ret == -ENOSPC) { |
Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 809 | if (obj->phys_handle) |
| 810 | ret = i915_gem_phys_pwrite(obj, args, file); |
Ankitprasad Sharma | b50a537 | 2016-06-10 14:23:03 +0530 | [diff] [blame] | 811 | else |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 812 | ret = i915_gem_shmem_pwrite(obj, args); |
Chris Wilson | 6a2c423 | 2014-11-04 04:51:40 -0800 | [diff] [blame] | 813 | } |
Daniel Vetter | 5c0480f | 2011-12-14 13:57:30 +0100 | [diff] [blame] | 814 | |
Chris Wilson | fe11562 | 2016-10-28 13:58:40 +0100 | [diff] [blame] | 815 | i915_gem_object_unpin_pages(obj); |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 816 | err: |
Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 817 | i915_gem_object_put(obj); |
Chris Wilson | 258a5ed | 2016-08-05 10:14:16 +0100 | [diff] [blame] | 818 | return ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 819 | } |
| 820 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 821 | /** |
| 822 | * Called when user space has done writes to this buffer |
Tvrtko Ursulin | 14bb2c1 | 2016-06-03 14:02:17 +0100 | [diff] [blame] | 823 | * @dev: drm device |
| 824 | * @data: ioctl data blob |
| 825 | * @file: drm file |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 826 | */ |
| 827 | int |
| 828 | i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 829 | struct drm_file *file) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 830 | { |
| 831 | struct drm_i915_gem_sw_finish *args = data; |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 832 | struct drm_i915_gem_object *obj; |
Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 833 | |
Chris Wilson | 03ac064 | 2016-07-20 13:31:51 +0100 | [diff] [blame] | 834 | obj = i915_gem_object_lookup(file, args->handle); |
Chris Wilson | c21724c | 2016-08-05 10:14:19 +0100 | [diff] [blame] | 835 | if (!obj) |
| 836 | return -ENOENT; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 837 | |
Tina Zhang | a03f395 | 2017-11-14 10:25:13 +0000 | [diff] [blame] | 838 | /* |
| 839 | * Proxy objects are barred from CPU access, so there is no |
| 840 | * need to ban sw_finish as it is a nop. |
| 841 | */ |
| 842 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 843 | /* Pinned buffers may be scanout, so flush the cache */ |
Chris Wilson | 5a97bcc | 2017-02-22 11:40:46 +0000 | [diff] [blame] | 844 | i915_gem_object_flush_if_display(obj); |
Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 845 | i915_gem_object_put(obj); |
Chris Wilson | 5a97bcc | 2017-02-22 11:40:46 +0000 | [diff] [blame] | 846 | |
| 847 | return 0; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 848 | } |
| 849 | |
Chris Wilson | 0cf289b | 2019-06-13 08:32:54 +0100 | [diff] [blame] | 850 | void i915_gem_runtime_suspend(struct drm_i915_private *i915) |
Chris Wilson | eedd10f | 2014-06-16 08:57:44 +0100 | [diff] [blame] | 851 | { |
Chris Wilson | 3594a3e | 2016-10-24 13:42:16 +0100 | [diff] [blame] | 852 | struct drm_i915_gem_object *obj, *on; |
Chris Wilson | 7c108fd | 2016-10-24 13:42:18 +0100 | [diff] [blame] | 853 | int i; |
Chris Wilson | eedd10f | 2014-06-16 08:57:44 +0100 | [diff] [blame] | 854 | |
Chris Wilson | 3594a3e | 2016-10-24 13:42:16 +0100 | [diff] [blame] | 855 | /* |
| 856 | * Only called during RPM suspend. All users of the userfault_list |
| 857 | * must be holding an RPM wakeref to ensure that this can not |
| 858 | * run concurrently with themselves (and use the struct_mutex for |
| 859 | * protection between themselves). |
| 860 | */ |
| 861 | |
| 862 | list_for_each_entry_safe(obj, on, |
Chris Wilson | 0cf289b | 2019-06-13 08:32:54 +0100 | [diff] [blame] | 863 | &i915->ggtt.userfault_list, userfault_link) |
Chris Wilson | a65adaf | 2017-10-09 09:43:57 +0100 | [diff] [blame] | 864 | __i915_gem_object_release_mmap(obj); |
Chris Wilson | 7c108fd | 2016-10-24 13:42:18 +0100 | [diff] [blame] | 865 | |
Chris Wilson | 0cf289b | 2019-06-13 08:32:54 +0100 | [diff] [blame] | 866 | /* |
| 867 | * The fence will be lost when the device powers down. If any were |
Chris Wilson | 7c108fd | 2016-10-24 13:42:18 +0100 | [diff] [blame] | 868 | * in use by hardware (i.e. they are pinned), we should not be powering |
| 869 | * down! All other fences will be reacquired by the user upon waking. |
| 870 | */ |
Chris Wilson | 0cf289b | 2019-06-13 08:32:54 +0100 | [diff] [blame] | 871 | for (i = 0; i < i915->ggtt.num_fences; i++) { |
| 872 | struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i]; |
Chris Wilson | 7c108fd | 2016-10-24 13:42:18 +0100 | [diff] [blame] | 873 | |
Chris Wilson | 0cf289b | 2019-06-13 08:32:54 +0100 | [diff] [blame] | 874 | /* |
| 875 | * Ideally we want to assert that the fence register is not |
Chris Wilson | e0ec3ec | 2017-02-03 12:57:17 +0000 | [diff] [blame] | 876 | * live at this point (i.e. that no piece of code will be |
| 877 | * trying to write through fence + GTT, as that both violates |
| 878 | * our tracking of activity and associated locking/barriers, |
| 879 | * but also is illegal given that the hw is powered down). |
| 880 | * |
| 881 | * Previously we used reg->pin_count as a "liveness" indicator. |
| 882 | * That is not sufficient, and we need a more fine-grained |
| 883 | * tool if we want to have a sanity check here. |
| 884 | */ |
Chris Wilson | 7c108fd | 2016-10-24 13:42:18 +0100 | [diff] [blame] | 885 | |
| 886 | if (!reg->vma) |
| 887 | continue; |
| 888 | |
Chris Wilson | a65adaf | 2017-10-09 09:43:57 +0100 | [diff] [blame] | 889 | GEM_BUG_ON(i915_vma_has_userfault(reg->vma)); |
Chris Wilson | 7c108fd | 2016-10-24 13:42:18 +0100 | [diff] [blame] | 890 | reg->dirty = true; |
| 891 | } |
Chris Wilson | eedd10f | 2014-06-16 08:57:44 +0100 | [diff] [blame] | 892 | } |
| 893 | |
Chris Wilson | 25112b6 | 2017-03-30 15:50:39 +0100 | [diff] [blame] | 894 | static int wait_for_engines(struct drm_i915_private *i915) |
| 895 | { |
Chris Wilson | ee42c00 | 2017-12-11 19:41:34 +0000 | [diff] [blame] | 896 | if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) { |
Chris Wilson | 59e4b19 | 2017-12-11 19:41:35 +0000 | [diff] [blame] | 897 | dev_err(i915->drm.dev, |
| 898 | "Failed to idle engines, declaring wedged!\n"); |
Chris Wilson | 629820f | 2018-03-09 10:11:14 +0000 | [diff] [blame] | 899 | GEM_TRACE_DUMP(); |
Chris Wilson | cad9946 | 2017-08-26 12:09:33 +0100 | [diff] [blame] | 900 | i915_gem_set_wedged(i915); |
| 901 | return -EIO; |
Chris Wilson | 25112b6 | 2017-03-30 15:50:39 +0100 | [diff] [blame] | 902 | } |
| 903 | |
| 904 | return 0; |
| 905 | } |
| 906 | |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 907 | static long |
| 908 | wait_for_timelines(struct drm_i915_private *i915, |
| 909 | unsigned int flags, long timeout) |
| 910 | { |
| 911 | struct i915_gt_timelines *gt = &i915->gt.timelines; |
| 912 | struct i915_timeline *tl; |
| 913 | |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 914 | mutex_lock(>->mutex); |
Chris Wilson | 9407d3b | 2019-01-28 18:18:12 +0000 | [diff] [blame] | 915 | list_for_each_entry(tl, >->active_list, link) { |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 916 | struct i915_request *rq; |
| 917 | |
Chris Wilson | 21950ee | 2019-02-05 13:00:05 +0000 | [diff] [blame] | 918 | rq = i915_active_request_get_unlocked(&tl->last_request); |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 919 | if (!rq) |
| 920 | continue; |
| 921 | |
| 922 | mutex_unlock(>->mutex); |
| 923 | |
| 924 | /* |
| 925 | * "Race-to-idle". |
| 926 | * |
| 927 | * Switching to the kernel context is often used a synchronous |
| 928 | * step prior to idling, e.g. in suspend for flushing all |
| 929 | * current operations to memory before sleeping. These we |
| 930 | * want to complete as quickly as possible to avoid prolonged |
| 931 | * stalls, so allow the gpu to boost to maximum clocks. |
| 932 | */ |
| 933 | if (flags & I915_WAIT_FOR_IDLE_BOOST) |
Chris Wilson | 62eb3c2 | 2019-02-13 09:25:04 +0000 | [diff] [blame] | 934 | gen6_rps_boost(rq); |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 935 | |
| 936 | timeout = i915_request_wait(rq, flags, timeout); |
| 937 | i915_request_put(rq); |
| 938 | if (timeout < 0) |
| 939 | return timeout; |
| 940 | |
| 941 | /* restart after reacquiring the lock */ |
| 942 | mutex_lock(>->mutex); |
Chris Wilson | 9407d3b | 2019-01-28 18:18:12 +0000 | [diff] [blame] | 943 | tl = list_entry(>->active_list, typeof(*tl), link); |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 944 | } |
| 945 | mutex_unlock(>->mutex); |
| 946 | |
| 947 | return timeout; |
| 948 | } |
| 949 | |
Chris Wilson | ec625fb | 2018-07-09 13:20:42 +0100 | [diff] [blame] | 950 | int i915_gem_wait_for_idle(struct drm_i915_private *i915, |
| 951 | unsigned int flags, long timeout) |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 952 | { |
Chris Wilson | 79ffac85 | 2019-04-24 21:07:17 +0100 | [diff] [blame] | 953 | GEM_TRACE("flags=%x (%s), timeout=%ld%s, awake?=%s\n", |
Chris Wilson | ec625fb | 2018-07-09 13:20:42 +0100 | [diff] [blame] | 954 | flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked", |
Chris Wilson | 79ffac85 | 2019-04-24 21:07:17 +0100 | [diff] [blame] | 955 | timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "", |
| 956 | yesno(i915->gt.awake)); |
Chris Wilson | 09a4c02 | 2018-05-24 09:11:35 +0100 | [diff] [blame] | 957 | |
Chris Wilson | 863e9fd | 2017-05-30 13:13:32 +0100 | [diff] [blame] | 958 | /* If the device is asleep, we have no requests outstanding */ |
| 959 | if (!READ_ONCE(i915->gt.awake)) |
| 960 | return 0; |
| 961 | |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 962 | timeout = wait_for_timelines(i915, flags, timeout); |
| 963 | if (timeout < 0) |
| 964 | return timeout; |
| 965 | |
Chris Wilson | 9caa34a | 2016-11-11 14:58:08 +0000 | [diff] [blame] | 966 | if (flags & I915_WAIT_LOCKED) { |
Chris Wilson | a89d1f9 | 2018-05-02 17:38:39 +0100 | [diff] [blame] | 967 | int err; |
Chris Wilson | 9caa34a | 2016-11-11 14:58:08 +0000 | [diff] [blame] | 968 | |
| 969 | lockdep_assert_held(&i915->drm.struct_mutex); |
| 970 | |
Chris Wilson | a61b47f | 2018-06-27 12:53:34 +0100 | [diff] [blame] | 971 | err = wait_for_engines(i915); |
| 972 | if (err) |
| 973 | return err; |
| 974 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 975 | i915_retire_requests(i915); |
Chris Wilson | a89d1f9 | 2018-05-02 17:38:39 +0100 | [diff] [blame] | 976 | } |
Chris Wilson | a61b47f | 2018-06-27 12:53:34 +0100 | [diff] [blame] | 977 | |
| 978 | return 0; |
Daniel Vetter | 4df2faf | 2010-02-19 11:52:00 +0100 | [diff] [blame] | 979 | } |
| 980 | |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 981 | struct i915_vma * |
Joonas Lahtinen | ec7adb6 | 2015-03-16 14:11:13 +0200 | [diff] [blame] | 982 | i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, |
| 983 | const struct i915_ggtt_view *view, |
Chris Wilson | 91b2db6 | 2016-08-04 16:32:23 +0100 | [diff] [blame] | 984 | u64 size, |
Chris Wilson | 2ffffd0 | 2016-08-04 16:32:22 +0100 | [diff] [blame] | 985 | u64 alignment, |
| 986 | u64 flags) |
Joonas Lahtinen | ec7adb6 | 2015-03-16 14:11:13 +0200 | [diff] [blame] | 987 | { |
Chris Wilson | ad16d2e | 2016-10-13 09:55:04 +0100 | [diff] [blame] | 988 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); |
Chris Wilson | 82ad644 | 2018-06-05 16:37:58 +0100 | [diff] [blame] | 989 | struct i915_address_space *vm = &dev_priv->ggtt.vm; |
Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 990 | struct i915_vma *vma; |
| 991 | int ret; |
Joonas Lahtinen | 72e96d6 | 2016-03-30 16:57:10 +0300 | [diff] [blame] | 992 | |
Chris Wilson | 4c7d62c | 2016-10-28 13:58:32 +0100 | [diff] [blame] | 993 | lockdep_assert_held(&obj->base.dev->struct_mutex); |
| 994 | |
Chris Wilson | ac87a6fd | 2018-02-20 13:42:05 +0000 | [diff] [blame] | 995 | if (flags & PIN_MAPPABLE && |
| 996 | (!view || view->type == I915_GGTT_VIEW_NORMAL)) { |
Chris Wilson | 43ae70d9 | 2017-10-09 09:44:01 +0100 | [diff] [blame] | 997 | /* If the required space is larger than the available |
| 998 | * aperture, we will not able to find a slot for the |
| 999 | * object and unbinding the object now will be in |
| 1000 | * vain. Worse, doing so may cause us to ping-pong |
| 1001 | * the object in and out of the Global GTT and |
| 1002 | * waste a lot of cycles under the mutex. |
| 1003 | */ |
| 1004 | if (obj->base.size > dev_priv->ggtt.mappable_end) |
| 1005 | return ERR_PTR(-E2BIG); |
| 1006 | |
| 1007 | /* If NONBLOCK is set the caller is optimistically |
| 1008 | * trying to cache the full object within the mappable |
| 1009 | * aperture, and *must* have a fallback in place for |
| 1010 | * situations where we cannot bind the object. We |
| 1011 | * can be a little more lax here and use the fallback |
| 1012 | * more often to avoid costly migrations of ourselves |
| 1013 | * and other objects within the aperture. |
| 1014 | * |
| 1015 | * Half-the-aperture is used as a simple heuristic. |
| 1016 | * More interesting would to do search for a free |
| 1017 | * block prior to making the commitment to unbind. |
| 1018 | * That caters for the self-harm case, and with a |
| 1019 | * little more heuristics (e.g. NOFAULT, NOEVICT) |
| 1020 | * we could try to minimise harm to others. |
| 1021 | */ |
| 1022 | if (flags & PIN_NONBLOCK && |
| 1023 | obj->base.size > dev_priv->ggtt.mappable_end / 2) |
| 1024 | return ERR_PTR(-ENOSPC); |
| 1025 | } |
| 1026 | |
Chris Wilson | 718659a | 2017-01-16 15:21:28 +0000 | [diff] [blame] | 1027 | vma = i915_vma_instance(obj, vm, view); |
Chengguang Xu | 772b540 | 2019-02-21 10:08:19 +0800 | [diff] [blame] | 1028 | if (IS_ERR(vma)) |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 1029 | return vma; |
Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 1030 | |
| 1031 | if (i915_vma_misplaced(vma, size, alignment, flags)) { |
Chris Wilson | 43ae70d9 | 2017-10-09 09:44:01 +0100 | [diff] [blame] | 1032 | if (flags & PIN_NONBLOCK) { |
| 1033 | if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) |
| 1034 | return ERR_PTR(-ENOSPC); |
Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 1035 | |
Chris Wilson | 43ae70d9 | 2017-10-09 09:44:01 +0100 | [diff] [blame] | 1036 | if (flags & PIN_MAPPABLE && |
Chris Wilson | 944397f | 2017-01-09 16:16:11 +0000 | [diff] [blame] | 1037 | vma->fence_size > dev_priv->ggtt.mappable_end / 2) |
Chris Wilson | ad16d2e | 2016-10-13 09:55:04 +0100 | [diff] [blame] | 1038 | return ERR_PTR(-ENOSPC); |
| 1039 | } |
| 1040 | |
Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 1041 | WARN(i915_vma_is_pinned(vma), |
| 1042 | "bo is already pinned in ggtt with incorrect alignment:" |
Chris Wilson | 05a20d0 | 2016-08-18 17:16:55 +0100 | [diff] [blame] | 1043 | " offset=%08x, req.alignment=%llx," |
| 1044 | " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n", |
| 1045 | i915_ggtt_offset(vma), alignment, |
Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 1046 | !!(flags & PIN_MAPPABLE), |
Chris Wilson | 05a20d0 | 2016-08-18 17:16:55 +0100 | [diff] [blame] | 1047 | i915_vma_is_map_and_fenceable(vma)); |
Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 1048 | ret = i915_vma_unbind(vma); |
| 1049 | if (ret) |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 1050 | return ERR_PTR(ret); |
Chris Wilson | 59bfa12 | 2016-08-04 16:32:31 +0100 | [diff] [blame] | 1051 | } |
| 1052 | |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 1053 | ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); |
| 1054 | if (ret) |
| 1055 | return ERR_PTR(ret); |
Joonas Lahtinen | ec7adb6 | 2015-03-16 14:11:13 +0200 | [diff] [blame] | 1056 | |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 1057 | return vma; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1058 | } |
| 1059 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1060 | int |
Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 1061 | i915_gem_madvise_ioctl(struct drm_device *dev, void *data, |
| 1062 | struct drm_file *file_priv) |
| 1063 | { |
Chris Wilson | 3b4fa96 | 2019-05-30 21:34:59 +0100 | [diff] [blame] | 1064 | struct drm_i915_private *i915 = to_i915(dev); |
Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 1065 | struct drm_i915_gem_madvise *args = data; |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 1066 | struct drm_i915_gem_object *obj; |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 1067 | int err; |
Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 1068 | |
| 1069 | switch (args->madv) { |
| 1070 | case I915_MADV_DONTNEED: |
| 1071 | case I915_MADV_WILLNEED: |
| 1072 | break; |
| 1073 | default: |
| 1074 | return -EINVAL; |
| 1075 | } |
| 1076 | |
Chris Wilson | 03ac064 | 2016-07-20 13:31:51 +0100 | [diff] [blame] | 1077 | obj = i915_gem_object_lookup(file_priv, args->handle); |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 1078 | if (!obj) |
| 1079 | return -ENOENT; |
| 1080 | |
| 1081 | err = mutex_lock_interruptible(&obj->mm.lock); |
| 1082 | if (err) |
| 1083 | goto out; |
Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 1084 | |
Chris Wilson | f1fa4f4 | 2017-10-13 21:26:13 +0100 | [diff] [blame] | 1085 | if (i915_gem_object_has_pages(obj) && |
Chris Wilson | 3e510a8 | 2016-08-05 10:14:23 +0100 | [diff] [blame] | 1086 | i915_gem_object_is_tiled(obj) && |
Chris Wilson | 3b4fa96 | 2019-05-30 21:34:59 +0100 | [diff] [blame] | 1087 | i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { |
Chris Wilson | bc0629a | 2016-11-01 10:03:17 +0000 | [diff] [blame] | 1088 | if (obj->mm.madv == I915_MADV_WILLNEED) { |
| 1089 | GEM_BUG_ON(!obj->mm.quirked); |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 1090 | __i915_gem_object_unpin_pages(obj); |
Chris Wilson | bc0629a | 2016-11-01 10:03:17 +0000 | [diff] [blame] | 1091 | obj->mm.quirked = false; |
| 1092 | } |
| 1093 | if (args->madv == I915_MADV_WILLNEED) { |
Chris Wilson | 2c3a3f4 | 2016-11-04 10:30:01 +0000 | [diff] [blame] | 1094 | GEM_BUG_ON(obj->mm.quirked); |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 1095 | __i915_gem_object_pin_pages(obj); |
Chris Wilson | bc0629a | 2016-11-01 10:03:17 +0000 | [diff] [blame] | 1096 | obj->mm.quirked = true; |
| 1097 | } |
Daniel Vetter | 656bfa3 | 2014-11-20 09:26:30 +0100 | [diff] [blame] | 1098 | } |
| 1099 | |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 1100 | if (obj->mm.madv != __I915_MADV_PURGED) |
| 1101 | obj->mm.madv = args->madv; |
Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 1102 | |
Chris Wilson | 3b4fa96 | 2019-05-30 21:34:59 +0100 | [diff] [blame] | 1103 | if (i915_gem_object_has_pages(obj)) { |
| 1104 | struct list_head *list; |
| 1105 | |
Chris Wilson | d82b4b2 | 2019-05-30 21:35:00 +0100 | [diff] [blame] | 1106 | if (i915_gem_object_is_shrinkable(obj)) { |
Chris Wilson | a8cff4c8 | 2019-06-10 15:54:30 +0100 | [diff] [blame] | 1107 | unsigned long flags; |
| 1108 | |
| 1109 | spin_lock_irqsave(&i915->mm.obj_lock, flags); |
| 1110 | |
Chris Wilson | d82b4b2 | 2019-05-30 21:35:00 +0100 | [diff] [blame] | 1111 | if (obj->mm.madv != I915_MADV_WILLNEED) |
| 1112 | list = &i915->mm.purge_list; |
Chris Wilson | d82b4b2 | 2019-05-30 21:35:00 +0100 | [diff] [blame] | 1113 | else |
Chris Wilson | ecab9be | 2019-06-12 11:57:20 +0100 | [diff] [blame] | 1114 | list = &i915->mm.shrink_list; |
Chris Wilson | d82b4b2 | 2019-05-30 21:35:00 +0100 | [diff] [blame] | 1115 | list_move_tail(&obj->mm.link, list); |
Chris Wilson | a8cff4c8 | 2019-06-10 15:54:30 +0100 | [diff] [blame] | 1116 | |
| 1117 | spin_unlock_irqrestore(&i915->mm.obj_lock, flags); |
Chris Wilson | d82b4b2 | 2019-05-30 21:35:00 +0100 | [diff] [blame] | 1118 | } |
Chris Wilson | 3b4fa96 | 2019-05-30 21:34:59 +0100 | [diff] [blame] | 1119 | } |
| 1120 | |
Chris Wilson | 6c085a7 | 2012-08-20 11:40:46 +0200 | [diff] [blame] | 1121 | /* if the object is no longer attached, discard its backing storage */ |
Chris Wilson | f1fa4f4 | 2017-10-13 21:26:13 +0100 | [diff] [blame] | 1122 | if (obj->mm.madv == I915_MADV_DONTNEED && |
| 1123 | !i915_gem_object_has_pages(obj)) |
Chris Wilson | f033428 | 2019-05-28 10:29:46 +0100 | [diff] [blame] | 1124 | i915_gem_object_truncate(obj); |
Chris Wilson | 2d7ef39 | 2009-09-20 23:13:10 +0100 | [diff] [blame] | 1125 | |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 1126 | args->retained = obj->mm.madv != __I915_MADV_PURGED; |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 1127 | mutex_unlock(&obj->mm.lock); |
Chris Wilson | bb6baf7 | 2009-09-22 14:24:13 +0100 | [diff] [blame] | 1128 | |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 1129 | out: |
Chris Wilson | f8c417c | 2016-07-20 13:31:53 +0100 | [diff] [blame] | 1130 | i915_gem_object_put(obj); |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 1131 | return err; |
Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 1132 | } |
| 1133 | |
Chris Wilson | 2414551 | 2017-01-24 11:01:35 +0000 | [diff] [blame] | 1134 | void i915_gem_sanitize(struct drm_i915_private *i915) |
| 1135 | { |
Chris Wilson | 538ef96 | 2019-01-14 14:21:18 +0000 | [diff] [blame] | 1136 | intel_wakeref_t wakeref; |
| 1137 | |
Chris Wilson | c3160da | 2018-05-31 09:22:45 +0100 | [diff] [blame] | 1138 | GEM_TRACE("\n"); |
| 1139 | |
Daniele Ceraolo Spurio | d858d56 | 2019-06-13 16:21:54 -0700 | [diff] [blame] | 1140 | wakeref = intel_runtime_pm_get(&i915->runtime_pm); |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 1141 | intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); |
Chris Wilson | c3160da | 2018-05-31 09:22:45 +0100 | [diff] [blame] | 1142 | |
| 1143 | /* |
| 1144 | * As we have just resumed the machine and woken the device up from |
| 1145 | * deep PCI sleep (presumably D3_cold), assume the HW has been reset |
| 1146 | * back to defaults, recovering from whatever wedged state we left it |
| 1147 | * in and so worth trying to use the device once more. |
| 1148 | */ |
Chris Wilson | c41166f | 2019-02-20 14:56:37 +0000 | [diff] [blame] | 1149 | if (i915_terminally_wedged(i915)) |
Chris Wilson | f36325f | 2017-08-26 12:09:34 +0100 | [diff] [blame] | 1150 | i915_gem_unset_wedged(i915); |
Chris Wilson | f36325f | 2017-08-26 12:09:34 +0100 | [diff] [blame] | 1151 | |
Chris Wilson | 2414551 | 2017-01-24 11:01:35 +0000 | [diff] [blame] | 1152 | /* |
| 1153 | * If we inherit context state from the BIOS or earlier occupants |
| 1154 | * of the GPU, the GPU may be in an inconsistent state when we |
| 1155 | * try to take over. The only way to remove the earlier state |
| 1156 | * is by resetting. However, resetting on earlier gen is tricky as |
| 1157 | * it may impact the display and we are uncertain about the stability |
Joonas Lahtinen | ea117b8 | 2017-04-28 10:53:38 +0300 | [diff] [blame] | 1158 | * of the reset, so this could be applied to even earlier gen. |
Chris Wilson | 2414551 | 2017-01-24 11:01:35 +0000 | [diff] [blame] | 1159 | */ |
Chris Wilson | 79ffac85 | 2019-04-24 21:07:17 +0100 | [diff] [blame] | 1160 | intel_gt_sanitize(i915, false); |
Chris Wilson | c3160da | 2018-05-31 09:22:45 +0100 | [diff] [blame] | 1161 | |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 1162 | intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); |
Daniele Ceraolo Spurio | d858d56 | 2019-06-13 16:21:54 -0700 | [diff] [blame] | 1163 | intel_runtime_pm_put(&i915->runtime_pm, wakeref); |
Chris Wilson | 2414551 | 2017-01-24 11:01:35 +0000 | [diff] [blame] | 1164 | } |
| 1165 | |
Tvrtko Ursulin | cf6844b | 2019-06-21 08:07:47 +0100 | [diff] [blame] | 1166 | static void init_unused_ring(struct intel_gt *gt, u32 base) |
Ville Syrjälä | 81e7f20 | 2014-08-15 01:21:55 +0300 | [diff] [blame] | 1167 | { |
Tvrtko Ursulin | cf6844b | 2019-06-21 08:07:47 +0100 | [diff] [blame] | 1168 | struct intel_uncore *uncore = gt->uncore; |
| 1169 | |
| 1170 | intel_uncore_write(uncore, RING_CTL(base), 0); |
| 1171 | intel_uncore_write(uncore, RING_HEAD(base), 0); |
| 1172 | intel_uncore_write(uncore, RING_TAIL(base), 0); |
| 1173 | intel_uncore_write(uncore, RING_START(base), 0); |
Ville Syrjälä | 81e7f20 | 2014-08-15 01:21:55 +0300 | [diff] [blame] | 1174 | } |
| 1175 | |
Tvrtko Ursulin | cf6844b | 2019-06-21 08:07:47 +0100 | [diff] [blame] | 1176 | static void init_unused_rings(struct intel_gt *gt) |
Ville Syrjälä | 81e7f20 | 2014-08-15 01:21:55 +0300 | [diff] [blame] | 1177 | { |
Tvrtko Ursulin | cf6844b | 2019-06-21 08:07:47 +0100 | [diff] [blame] | 1178 | struct drm_i915_private *i915 = gt->i915; |
| 1179 | |
| 1180 | if (IS_I830(i915)) { |
| 1181 | init_unused_ring(gt, PRB1_BASE); |
| 1182 | init_unused_ring(gt, SRB0_BASE); |
| 1183 | init_unused_ring(gt, SRB1_BASE); |
| 1184 | init_unused_ring(gt, SRB2_BASE); |
| 1185 | init_unused_ring(gt, SRB3_BASE); |
| 1186 | } else if (IS_GEN(i915, 2)) { |
| 1187 | init_unused_ring(gt, SRB0_BASE); |
| 1188 | init_unused_ring(gt, SRB1_BASE); |
| 1189 | } else if (IS_GEN(i915, 3)) { |
| 1190 | init_unused_ring(gt, PRB1_BASE); |
| 1191 | init_unused_ring(gt, PRB2_BASE); |
Ville Syrjälä | 81e7f20 | 2014-08-15 01:21:55 +0300 | [diff] [blame] | 1192 | } |
| 1193 | } |
| 1194 | |
Tvrtko Ursulin | abc584f | 2019-06-21 08:07:53 +0100 | [diff] [blame] | 1195 | static int init_hw(struct intel_gt *gt) |
Chris Wilson | 20a8a74 | 2017-02-08 14:30:31 +0000 | [diff] [blame] | 1196 | { |
Tvrtko Ursulin | abc584f | 2019-06-21 08:07:53 +0100 | [diff] [blame] | 1197 | struct drm_i915_private *i915 = gt->i915; |
| 1198 | struct intel_uncore *uncore = gt->uncore; |
Chris Wilson | d200cda | 2016-04-28 09:56:44 +0100 | [diff] [blame] | 1199 | int ret; |
Ben Widawsky | 4fc7c97 | 2013-02-08 11:49:24 -0800 | [diff] [blame] | 1200 | |
Tvrtko Ursulin | abc584f | 2019-06-21 08:07:53 +0100 | [diff] [blame] | 1201 | gt->last_init_time = ktime_get(); |
Chris Wilson | de867c2 | 2016-10-25 13:16:02 +0100 | [diff] [blame] | 1202 | |
Chris Wilson | 5e4f518 | 2015-02-13 14:35:59 +0000 | [diff] [blame] | 1203 | /* Double layer security blanket, see i915_gem_init() */ |
Tvrtko Ursulin | abc584f | 2019-06-21 08:07:53 +0100 | [diff] [blame] | 1204 | intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); |
Chris Wilson | 5e4f518 | 2015-02-13 14:35:59 +0000 | [diff] [blame] | 1205 | |
Tvrtko Ursulin | abc584f | 2019-06-21 08:07:53 +0100 | [diff] [blame] | 1206 | if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9) |
| 1207 | intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf)); |
Ben Widawsky | 4fc7c97 | 2013-02-08 11:49:24 -0800 | [diff] [blame] | 1208 | |
Tvrtko Ursulin | abc584f | 2019-06-21 08:07:53 +0100 | [diff] [blame] | 1209 | if (IS_HASWELL(i915)) |
| 1210 | intel_uncore_write(uncore, |
| 1211 | MI_PREDICATE_RESULT_2, |
| 1212 | IS_HSW_GT3(i915) ? |
| 1213 | LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); |
Rodrigo Vivi | 9435373 | 2013-08-28 16:45:46 -0300 | [diff] [blame] | 1214 | |
Tvrtko Ursulin | 094304b | 2018-12-03 12:50:10 +0000 | [diff] [blame] | 1215 | /* Apply the GT workarounds... */ |
Tvrtko Ursulin | abc584f | 2019-06-21 08:07:53 +0100 | [diff] [blame] | 1216 | intel_gt_apply_workarounds(gt); |
Tvrtko Ursulin | 094304b | 2018-12-03 12:50:10 +0000 | [diff] [blame] | 1217 | /* ...and determine whether they are sticking. */ |
Tvrtko Ursulin | abc584f | 2019-06-21 08:07:53 +0100 | [diff] [blame] | 1218 | intel_gt_verify_workarounds(gt, "init"); |
Oscar Mateo | 59b449d | 2018-04-10 09:12:47 -0700 | [diff] [blame] | 1219 | |
Tvrtko Ursulin | abc584f | 2019-06-21 08:07:53 +0100 | [diff] [blame] | 1220 | intel_gt_init_swizzling(gt); |
Ben Widawsky | 4fc7c97 | 2013-02-08 11:49:24 -0800 | [diff] [blame] | 1221 | |
Daniel Vetter | d5abdfd | 2014-11-20 09:45:19 +0100 | [diff] [blame] | 1222 | /* |
| 1223 | * At least 830 can leave some of the unused rings |
| 1224 | * "active" (ie. head != tail) after resume which |
| 1225 | * will prevent c3 entry. Makes sure all unused rings |
| 1226 | * are totally idle. |
| 1227 | */ |
Tvrtko Ursulin | abc584f | 2019-06-21 08:07:53 +0100 | [diff] [blame] | 1228 | init_unused_rings(gt); |
Daniel Vetter | d5abdfd | 2014-11-20 09:45:19 +0100 | [diff] [blame] | 1229 | |
Tvrtko Ursulin | abc584f | 2019-06-21 08:07:53 +0100 | [diff] [blame] | 1230 | ret = i915_ppgtt_init_hw(gt); |
John Harrison | 4ad2fd8 | 2015-06-18 13:11:20 +0100 | [diff] [blame] | 1231 | if (ret) { |
Chris Wilson | 8177e11 | 2018-02-07 11:15:45 +0000 | [diff] [blame] | 1232 | DRM_ERROR("Enabling PPGTT failed (%d)\n", ret); |
John Harrison | 4ad2fd8 | 2015-06-18 13:11:20 +0100 | [diff] [blame] | 1233 | goto out; |
| 1234 | } |
| 1235 | |
Tvrtko Ursulin | 6b0a8df | 2019-06-21 08:07:55 +0100 | [diff] [blame] | 1236 | ret = intel_wopcm_init_hw(&i915->wopcm, gt); |
Jackie Li | f08e203 | 2018-03-13 17:32:53 -0700 | [diff] [blame] | 1237 | if (ret) { |
| 1238 | DRM_ERROR("Enabling WOPCM failed (%d)\n", ret); |
| 1239 | goto out; |
| 1240 | } |
| 1241 | |
Michał Winiarski | 9bdc357 | 2017-10-25 18:25:19 +0100 | [diff] [blame] | 1242 | /* We can't enable contexts until all firmware is loaded */ |
Tvrtko Ursulin | abc584f | 2019-06-21 08:07:53 +0100 | [diff] [blame] | 1243 | ret = intel_uc_init_hw(i915); |
Chris Wilson | 8177e11 | 2018-02-07 11:15:45 +0000 | [diff] [blame] | 1244 | if (ret) { |
| 1245 | DRM_ERROR("Enabling uc failed (%d)\n", ret); |
Michał Winiarski | 9bdc357 | 2017-10-25 18:25:19 +0100 | [diff] [blame] | 1246 | goto out; |
Chris Wilson | 8177e11 | 2018-02-07 11:15:45 +0000 | [diff] [blame] | 1247 | } |
Michał Winiarski | 9bdc357 | 2017-10-25 18:25:19 +0100 | [diff] [blame] | 1248 | |
Tvrtko Ursulin | abc584f | 2019-06-21 08:07:53 +0100 | [diff] [blame] | 1249 | intel_mocs_init_l3cc_table(gt); |
Peter Antoine | 0ccdacf | 2016-04-13 15:03:25 +0100 | [diff] [blame] | 1250 | |
Tvrtko Ursulin | abc584f | 2019-06-21 08:07:53 +0100 | [diff] [blame] | 1251 | intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); |
Michał Winiarski | 60c0a66 | 2018-07-12 14:48:10 +0200 | [diff] [blame] | 1252 | |
| 1253 | return 0; |
Michal Wajdeczko | b96f6eb | 2018-06-05 12:24:43 +0000 | [diff] [blame] | 1254 | |
Michał Winiarski | 60c0a66 | 2018-07-12 14:48:10 +0200 | [diff] [blame] | 1255 | out: |
Tvrtko Ursulin | abc584f | 2019-06-21 08:07:53 +0100 | [diff] [blame] | 1256 | intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); |
| 1257 | |
| 1258 | return ret; |
| 1259 | } |
| 1260 | |
| 1261 | int i915_gem_init_hw(struct drm_i915_private *i915) |
| 1262 | { |
Tvrtko Ursulin | 8649187 | 2019-06-21 08:07:54 +0100 | [diff] [blame] | 1263 | struct intel_uncore *uncore = &i915->uncore; |
Tvrtko Ursulin | abc584f | 2019-06-21 08:07:53 +0100 | [diff] [blame] | 1264 | int ret; |
| 1265 | |
| 1266 | BUG_ON(!i915->kernel_context); |
| 1267 | ret = i915_terminally_wedged(i915); |
| 1268 | if (ret) |
| 1269 | return ret; |
| 1270 | |
Tvrtko Ursulin | 8649187 | 2019-06-21 08:07:54 +0100 | [diff] [blame] | 1271 | /* Double layer security blanket, see i915_gem_init() */ |
| 1272 | intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); |
| 1273 | |
Tvrtko Ursulin | abc584f | 2019-06-21 08:07:53 +0100 | [diff] [blame] | 1274 | ret = init_hw(&i915->gt); |
Tvrtko Ursulin | 8649187 | 2019-06-21 08:07:54 +0100 | [diff] [blame] | 1275 | if (ret) |
| 1276 | goto err_init; |
| 1277 | |
| 1278 | /* Only when the HW is re-initialised, can we replay the requests */ |
| 1279 | ret = intel_engines_resume(i915); |
| 1280 | if (ret) |
| 1281 | goto err_engines; |
| 1282 | |
| 1283 | intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); |
| 1284 | |
| 1285 | intel_engines_set_scheduler_caps(i915); |
| 1286 | |
| 1287 | return 0; |
| 1288 | |
| 1289 | err_engines: |
| 1290 | intel_uc_fini_hw(i915); |
| 1291 | err_init: |
| 1292 | intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); |
Tvrtko Ursulin | abc584f | 2019-06-21 08:07:53 +0100 | [diff] [blame] | 1293 | |
| 1294 | intel_engines_set_scheduler_caps(i915); |
Michał Winiarski | 60c0a66 | 2018-07-12 14:48:10 +0200 | [diff] [blame] | 1295 | |
| 1296 | return ret; |
Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 1297 | } |
| 1298 | |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1299 | static int __intel_engines_record_defaults(struct drm_i915_private *i915) |
| 1300 | { |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1301 | struct intel_engine_cs *engine; |
Chris Wilson | 5e2a041 | 2019-04-26 17:33:34 +0100 | [diff] [blame] | 1302 | struct i915_gem_context *ctx; |
| 1303 | struct i915_gem_engines *e; |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1304 | enum intel_engine_id id; |
Chris Wilson | 604c37d | 2019-03-08 09:36:55 +0000 | [diff] [blame] | 1305 | int err = 0; |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1306 | |
| 1307 | /* |
| 1308 | * As we reset the gpu during very early sanitisation, the current |
| 1309 | * register state on the GPU should reflect its defaults values. |
| 1310 | * We load a context onto the hw (with restore-inhibit), then switch |
| 1311 | * over to a second context to save that default register state. We |
| 1312 | * can then prime every new context with that state so they all start |
| 1313 | * from the same default HW values. |
| 1314 | */ |
| 1315 | |
| 1316 | ctx = i915_gem_context_create_kernel(i915, 0); |
| 1317 | if (IS_ERR(ctx)) |
| 1318 | return PTR_ERR(ctx); |
| 1319 | |
Chris Wilson | 5e2a041 | 2019-04-26 17:33:34 +0100 | [diff] [blame] | 1320 | e = i915_gem_context_lock_engines(ctx); |
| 1321 | |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1322 | for_each_engine(engine, i915, id) { |
Chris Wilson | 5e2a041 | 2019-04-26 17:33:34 +0100 | [diff] [blame] | 1323 | struct intel_context *ce = e->engines[id]; |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 1324 | struct i915_request *rq; |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1325 | |
Chris Wilson | 5e2a041 | 2019-04-26 17:33:34 +0100 | [diff] [blame] | 1326 | rq = intel_context_create_request(ce); |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1327 | if (IS_ERR(rq)) { |
| 1328 | err = PTR_ERR(rq); |
Chris Wilson | 5e2a041 | 2019-04-26 17:33:34 +0100 | [diff] [blame] | 1329 | goto err_active; |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1330 | } |
| 1331 | |
Chris Wilson | 3fef5cd | 2017-11-20 10:20:02 +0000 | [diff] [blame] | 1332 | err = 0; |
Chris Wilson | 5e2a041 | 2019-04-26 17:33:34 +0100 | [diff] [blame] | 1333 | if (rq->engine->init_context) |
| 1334 | err = rq->engine->init_context(rq); |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1335 | |
Chris Wilson | 697b9a8 | 2018-06-12 11:51:35 +0100 | [diff] [blame] | 1336 | i915_request_add(rq); |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1337 | if (err) |
| 1338 | goto err_active; |
| 1339 | } |
| 1340 | |
Chris Wilson | 604c37d | 2019-03-08 09:36:55 +0000 | [diff] [blame] | 1341 | /* Flush the default context image to memory, and enable powersaving. */ |
Chris Wilson | 23c3c3d | 2019-04-24 21:07:14 +0100 | [diff] [blame] | 1342 | if (!i915_gem_load_power_context(i915)) { |
Chris Wilson | 604c37d | 2019-03-08 09:36:55 +0000 | [diff] [blame] | 1343 | err = -EIO; |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1344 | goto err_active; |
Chris Wilson | 2621cef | 2018-07-09 13:20:43 +0100 | [diff] [blame] | 1345 | } |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1346 | |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1347 | for_each_engine(engine, i915, id) { |
Chris Wilson | 5e2a041 | 2019-04-26 17:33:34 +0100 | [diff] [blame] | 1348 | struct intel_context *ce = e->engines[id]; |
| 1349 | struct i915_vma *state = ce->state; |
Chris Wilson | 37d7c9c | 2018-09-14 13:35:03 +0100 | [diff] [blame] | 1350 | void *vaddr; |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1351 | |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1352 | if (!state) |
| 1353 | continue; |
| 1354 | |
Chris Wilson | 0881954 | 2019-03-08 13:25:22 +0000 | [diff] [blame] | 1355 | GEM_BUG_ON(intel_context_is_pinned(ce)); |
Chris Wilson | c4d52fe | 2019-03-08 13:25:19 +0000 | [diff] [blame] | 1356 | |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1357 | /* |
| 1358 | * As we will hold a reference to the logical state, it will |
| 1359 | * not be torn down with the context, and importantly the |
| 1360 | * object will hold onto its vma (making it possible for a |
| 1361 | * stray GTT write to corrupt our defaults). Unmap the vma |
| 1362 | * from the GTT to prevent such accidents and reclaim the |
| 1363 | * space. |
| 1364 | */ |
| 1365 | err = i915_vma_unbind(state); |
| 1366 | if (err) |
| 1367 | goto err_active; |
| 1368 | |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 1369 | i915_gem_object_lock(state->obj); |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1370 | err = i915_gem_object_set_to_cpu_domain(state->obj, false); |
Chris Wilson | 6951e58 | 2019-05-28 10:29:51 +0100 | [diff] [blame] | 1371 | i915_gem_object_unlock(state->obj); |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1372 | if (err) |
| 1373 | goto err_active; |
| 1374 | |
| 1375 | engine->default_state = i915_gem_object_get(state->obj); |
Chris Wilson | a679f58 | 2019-03-21 16:19:07 +0000 | [diff] [blame] | 1376 | i915_gem_object_set_cache_coherency(engine->default_state, |
| 1377 | I915_CACHE_LLC); |
Chris Wilson | 37d7c9c | 2018-09-14 13:35:03 +0100 | [diff] [blame] | 1378 | |
| 1379 | /* Check we can acquire the image of the context state */ |
| 1380 | vaddr = i915_gem_object_pin_map(engine->default_state, |
Chris Wilson | 666424a | 2018-09-14 13:35:04 +0100 | [diff] [blame] | 1381 | I915_MAP_FORCE_WB); |
Chris Wilson | 37d7c9c | 2018-09-14 13:35:03 +0100 | [diff] [blame] | 1382 | if (IS_ERR(vaddr)) { |
| 1383 | err = PTR_ERR(vaddr); |
| 1384 | goto err_active; |
| 1385 | } |
| 1386 | |
| 1387 | i915_gem_object_unpin_map(engine->default_state); |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1388 | } |
| 1389 | |
| 1390 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) { |
| 1391 | unsigned int found = intel_engines_has_context_isolation(i915); |
| 1392 | |
| 1393 | /* |
| 1394 | * Make sure that classes with multiple engine instances all |
| 1395 | * share the same basic configuration. |
| 1396 | */ |
| 1397 | for_each_engine(engine, i915, id) { |
| 1398 | unsigned int bit = BIT(engine->uabi_class); |
| 1399 | unsigned int expected = engine->default_state ? bit : 0; |
| 1400 | |
| 1401 | if ((found & bit) != expected) { |
| 1402 | DRM_ERROR("mismatching default context state for class %d on engine %s\n", |
| 1403 | engine->uabi_class, engine->name); |
| 1404 | } |
| 1405 | } |
| 1406 | } |
| 1407 | |
| 1408 | out_ctx: |
Chris Wilson | 5e2a041 | 2019-04-26 17:33:34 +0100 | [diff] [blame] | 1409 | i915_gem_context_unlock_engines(ctx); |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1410 | i915_gem_context_set_closed(ctx); |
| 1411 | i915_gem_context_put(ctx); |
| 1412 | return err; |
| 1413 | |
| 1414 | err_active: |
| 1415 | /* |
| 1416 | * If we have to abandon now, we expect the engines to be idle |
Chris Wilson | 604c37d | 2019-03-08 09:36:55 +0000 | [diff] [blame] | 1417 | * and ready to be torn-down. The quickest way we can accomplish |
| 1418 | * this is by declaring ourselves wedged. |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1419 | */ |
Chris Wilson | 604c37d | 2019-03-08 09:36:55 +0000 | [diff] [blame] | 1420 | i915_gem_set_wedged(i915); |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1421 | goto out_ctx; |
| 1422 | } |
| 1423 | |
Chris Wilson | 5179749 | 2018-12-04 14:15:16 +0000 | [diff] [blame] | 1424 | static int |
| 1425 | i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size) |
| 1426 | { |
| 1427 | struct drm_i915_gem_object *obj; |
| 1428 | struct i915_vma *vma; |
| 1429 | int ret; |
| 1430 | |
| 1431 | obj = i915_gem_object_create_stolen(i915, size); |
| 1432 | if (!obj) |
| 1433 | obj = i915_gem_object_create_internal(i915, size); |
| 1434 | if (IS_ERR(obj)) { |
| 1435 | DRM_ERROR("Failed to allocate scratch page\n"); |
| 1436 | return PTR_ERR(obj); |
| 1437 | } |
| 1438 | |
| 1439 | vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); |
| 1440 | if (IS_ERR(vma)) { |
| 1441 | ret = PTR_ERR(vma); |
| 1442 | goto err_unref; |
| 1443 | } |
| 1444 | |
| 1445 | ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); |
| 1446 | if (ret) |
| 1447 | goto err_unref; |
| 1448 | |
| 1449 | i915->gt.scratch = vma; |
| 1450 | return 0; |
| 1451 | |
| 1452 | err_unref: |
| 1453 | i915_gem_object_put(obj); |
| 1454 | return ret; |
| 1455 | } |
| 1456 | |
| 1457 | static void i915_gem_fini_scratch(struct drm_i915_private *i915) |
| 1458 | { |
| 1459 | i915_vma_unpin_and_release(&i915->gt.scratch, 0); |
| 1460 | } |
| 1461 | |
Chris Wilson | 254e118 | 2019-04-17 08:56:28 +0100 | [diff] [blame] | 1462 | static int intel_engines_verify_workarounds(struct drm_i915_private *i915) |
| 1463 | { |
| 1464 | struct intel_engine_cs *engine; |
| 1465 | enum intel_engine_id id; |
| 1466 | int err = 0; |
| 1467 | |
| 1468 | if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) |
| 1469 | return 0; |
| 1470 | |
| 1471 | for_each_engine(engine, i915, id) { |
| 1472 | if (intel_engine_verify_workarounds(engine, "load")) |
| 1473 | err = -EIO; |
| 1474 | } |
| 1475 | |
| 1476 | return err; |
| 1477 | } |
| 1478 | |
Tvrtko Ursulin | bf9e842 | 2016-12-01 14:16:38 +0000 | [diff] [blame] | 1479 | int i915_gem_init(struct drm_i915_private *dev_priv) |
Chris Wilson | 1070a42 | 2012-04-24 15:47:41 +0100 | [diff] [blame] | 1480 | { |
Chris Wilson | 1070a42 | 2012-04-24 15:47:41 +0100 | [diff] [blame] | 1481 | int ret; |
| 1482 | |
Changbin Du | 52b2416 | 2018-05-08 17:07:05 +0800 | [diff] [blame] | 1483 | /* We need to fallback to 4K pages if host doesn't support huge gtt. */ |
| 1484 | if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv)) |
Matthew Auld | da9fe3f3 | 2017-10-06 23:18:31 +0100 | [diff] [blame] | 1485 | mkwrite_device_info(dev_priv)->page_sizes = |
| 1486 | I915_GTT_PAGE_SIZE_4K; |
| 1487 | |
Chris Wilson | 9431282 | 2017-05-03 10:39:18 +0100 | [diff] [blame] | 1488 | dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1); |
Chris Wilson | 57822dc | 2017-02-22 11:40:48 +0000 | [diff] [blame] | 1489 | |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 1490 | i915_timelines_init(dev_priv); |
| 1491 | |
Chris Wilson | ee48700 | 2017-11-22 17:26:21 +0000 | [diff] [blame] | 1492 | ret = i915_gem_init_userptr(dev_priv); |
| 1493 | if (ret) |
| 1494 | return ret; |
| 1495 | |
Sagar Arun Kamble | 70deead | 2018-01-24 21:16:58 +0530 | [diff] [blame] | 1496 | ret = intel_uc_init_misc(dev_priv); |
Michał Winiarski | 3176ff4 | 2017-12-13 23:13:47 +0100 | [diff] [blame] | 1497 | if (ret) |
| 1498 | return ret; |
| 1499 | |
Michal Wajdeczko | f7dc015 | 2018-06-28 14:15:21 +0000 | [diff] [blame] | 1500 | ret = intel_wopcm_init(&dev_priv->wopcm); |
| 1501 | if (ret) |
| 1502 | goto err_uc_misc; |
| 1503 | |
Chris Wilson | 5e4f518 | 2015-02-13 14:35:59 +0000 | [diff] [blame] | 1504 | /* This is just a security blanket to placate dragons. |
| 1505 | * On some systems, we very sporadically observe that the first TLBs |
| 1506 | * used by the CS may be stale, despite us poking the TLB reset. If |
| 1507 | * we hold the forcewake during initialisation these problems |
| 1508 | * just magically go away. |
| 1509 | */ |
Chris Wilson | ee48700 | 2017-11-22 17:26:21 +0000 | [diff] [blame] | 1510 | mutex_lock(&dev_priv->drm.struct_mutex); |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 1511 | intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); |
Chris Wilson | 5e4f518 | 2015-02-13 14:35:59 +0000 | [diff] [blame] | 1512 | |
Tvrtko Ursulin | 1d66377a | 2019-06-21 08:08:05 +0100 | [diff] [blame^] | 1513 | ret = i915_init_ggtt(dev_priv); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1514 | if (ret) { |
| 1515 | GEM_BUG_ON(ret == -EIO); |
| 1516 | goto err_unlock; |
| 1517 | } |
Jesse Barnes | d62b489 | 2013-03-08 10:45:53 -0800 | [diff] [blame] | 1518 | |
Chris Wilson | 5179749 | 2018-12-04 14:15:16 +0000 | [diff] [blame] | 1519 | ret = i915_gem_init_scratch(dev_priv, |
Lucas De Marchi | cf819ef | 2018-12-12 10:10:43 -0800 | [diff] [blame] | 1520 | IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1521 | if (ret) { |
| 1522 | GEM_BUG_ON(ret == -EIO); |
| 1523 | goto err_ggtt; |
| 1524 | } |
Ben Widawsky | 2fa48d8 | 2013-12-06 14:11:04 -0800 | [diff] [blame] | 1525 | |
Chris Wilson | 11334c6 | 2019-04-26 17:33:33 +0100 | [diff] [blame] | 1526 | ret = intel_engines_setup(dev_priv); |
| 1527 | if (ret) { |
| 1528 | GEM_BUG_ON(ret == -EIO); |
| 1529 | goto err_unlock; |
| 1530 | } |
| 1531 | |
Chris Wilson | 5179749 | 2018-12-04 14:15:16 +0000 | [diff] [blame] | 1532 | ret = i915_gem_contexts_init(dev_priv); |
| 1533 | if (ret) { |
| 1534 | GEM_BUG_ON(ret == -EIO); |
| 1535 | goto err_scratch; |
| 1536 | } |
| 1537 | |
Tvrtko Ursulin | bf9e842 | 2016-12-01 14:16:38 +0000 | [diff] [blame] | 1538 | ret = intel_engines_init(dev_priv); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1539 | if (ret) { |
| 1540 | GEM_BUG_ON(ret == -EIO); |
| 1541 | goto err_context; |
| 1542 | } |
Daniel Vetter | 53ca26c | 2012-04-26 23:28:03 +0200 | [diff] [blame] | 1543 | |
Chris Wilson | f58d13d | 2017-11-10 14:26:29 +0000 | [diff] [blame] | 1544 | intel_init_gt_powersave(dev_priv); |
| 1545 | |
Michał Winiarski | 61b5c15 | 2017-12-13 23:13:48 +0100 | [diff] [blame] | 1546 | ret = intel_uc_init(dev_priv); |
Chris Wilson | cc6a818 | 2017-11-10 14:26:30 +0000 | [diff] [blame] | 1547 | if (ret) |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1548 | goto err_pm; |
Chris Wilson | cc6a818 | 2017-11-10 14:26:30 +0000 | [diff] [blame] | 1549 | |
Michał Winiarski | 61b5c15 | 2017-12-13 23:13:48 +0100 | [diff] [blame] | 1550 | ret = i915_gem_init_hw(dev_priv); |
| 1551 | if (ret) |
| 1552 | goto err_uc_init; |
| 1553 | |
Chris Wilson | cc6a818 | 2017-11-10 14:26:30 +0000 | [diff] [blame] | 1554 | /* |
| 1555 | * Despite its name intel_init_clock_gating applies both display |
| 1556 | * clock gating workarounds; GT mmio workarounds and the occasional |
| 1557 | * GT power context workaround. Worse, sometimes it includes a context |
| 1558 | * register workaround which we need to apply before we record the |
| 1559 | * default HW state for all contexts. |
| 1560 | * |
| 1561 | * FIXME: break up the workarounds and apply them at the right time! |
| 1562 | */ |
| 1563 | intel_init_clock_gating(dev_priv); |
| 1564 | |
Chris Wilson | 254e118 | 2019-04-17 08:56:28 +0100 | [diff] [blame] | 1565 | ret = intel_engines_verify_workarounds(dev_priv); |
| 1566 | if (ret) |
| 1567 | goto err_init_hw; |
| 1568 | |
Chris Wilson | d2b4b97 | 2017-11-10 14:26:33 +0000 | [diff] [blame] | 1569 | ret = __intel_engines_record_defaults(dev_priv); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1570 | if (ret) |
| 1571 | goto err_init_hw; |
| 1572 | |
| 1573 | if (i915_inject_load_failure()) { |
| 1574 | ret = -ENODEV; |
| 1575 | goto err_init_hw; |
| 1576 | } |
| 1577 | |
| 1578 | if (i915_inject_load_failure()) { |
| 1579 | ret = -EIO; |
| 1580 | goto err_init_hw; |
| 1581 | } |
| 1582 | |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 1583 | intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1584 | mutex_unlock(&dev_priv->drm.struct_mutex); |
| 1585 | |
| 1586 | return 0; |
| 1587 | |
| 1588 | /* |
| 1589 | * Unwinding is complicated by that we want to handle -EIO to mean |
| 1590 | * disable GPU submission but keep KMS alive. We want to mark the |
| 1591 | * HW as irrevisibly wedged, but keep enough state around that the |
| 1592 | * driver doesn't explode during runtime. |
| 1593 | */ |
| 1594 | err_init_hw: |
Chris Wilson | 8571a05 | 2018-06-06 15:54:41 +0100 | [diff] [blame] | 1595 | mutex_unlock(&dev_priv->drm.struct_mutex); |
| 1596 | |
Chris Wilson | 79ffac85 | 2019-04-24 21:07:17 +0100 | [diff] [blame] | 1597 | i915_gem_set_wedged(dev_priv); |
Chris Wilson | 5861b01 | 2019-03-08 09:36:54 +0000 | [diff] [blame] | 1598 | i915_gem_suspend(dev_priv); |
Chris Wilson | 8571a05 | 2018-06-06 15:54:41 +0100 | [diff] [blame] | 1599 | i915_gem_suspend_late(dev_priv); |
| 1600 | |
Chris Wilson | 8bcf9f7 | 2018-07-10 10:44:20 +0100 | [diff] [blame] | 1601 | i915_gem_drain_workqueue(dev_priv); |
| 1602 | |
Chris Wilson | 8571a05 | 2018-06-06 15:54:41 +0100 | [diff] [blame] | 1603 | mutex_lock(&dev_priv->drm.struct_mutex); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1604 | intel_uc_fini_hw(dev_priv); |
Michał Winiarski | 61b5c15 | 2017-12-13 23:13:48 +0100 | [diff] [blame] | 1605 | err_uc_init: |
| 1606 | intel_uc_fini(dev_priv); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1607 | err_pm: |
| 1608 | if (ret != -EIO) { |
| 1609 | intel_cleanup_gt_powersave(dev_priv); |
Chris Wilson | 45b9c96 | 2019-05-01 11:32:04 +0100 | [diff] [blame] | 1610 | intel_engines_cleanup(dev_priv); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1611 | } |
| 1612 | err_context: |
| 1613 | if (ret != -EIO) |
| 1614 | i915_gem_contexts_fini(dev_priv); |
Chris Wilson | 5179749 | 2018-12-04 14:15:16 +0000 | [diff] [blame] | 1615 | err_scratch: |
| 1616 | i915_gem_fini_scratch(dev_priv); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1617 | err_ggtt: |
| 1618 | err_unlock: |
Daniele Ceraolo Spurio | 3ceea6a | 2019-03-19 11:35:36 -0700 | [diff] [blame] | 1619 | intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1620 | mutex_unlock(&dev_priv->drm.struct_mutex); |
| 1621 | |
Michal Wajdeczko | f7dc015 | 2018-06-28 14:15:21 +0000 | [diff] [blame] | 1622 | err_uc_misc: |
Sagar Arun Kamble | 70deead | 2018-01-24 21:16:58 +0530 | [diff] [blame] | 1623 | intel_uc_fini_misc(dev_priv); |
Sagar Arun Kamble | da943b5 | 2018-01-10 18:24:16 +0530 | [diff] [blame] | 1624 | |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 1625 | if (ret != -EIO) { |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1626 | i915_gem_cleanup_userptr(dev_priv); |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 1627 | i915_timelines_fini(dev_priv); |
| 1628 | } |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1629 | |
Chris Wilson | 6099032 | 2014-04-09 09:19:42 +0100 | [diff] [blame] | 1630 | if (ret == -EIO) { |
Chris Wilson | 7ed43df | 2018-07-26 09:50:32 +0100 | [diff] [blame] | 1631 | mutex_lock(&dev_priv->drm.struct_mutex); |
| 1632 | |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1633 | /* |
| 1634 | * Allow engine initialisation to fail by marking the GPU as |
Chris Wilson | 6099032 | 2014-04-09 09:19:42 +0100 | [diff] [blame] | 1635 | * wedged. But we only want to do this where the GPU is angry, |
| 1636 | * for all other failure, such as an allocation failure, bail. |
| 1637 | */ |
Chris Wilson | c41166f | 2019-02-20 14:56:37 +0000 | [diff] [blame] | 1638 | if (!i915_reset_failed(dev_priv)) { |
Chris Wilson | 51c18bf | 2018-06-09 12:10:58 +0100 | [diff] [blame] | 1639 | i915_load_error(dev_priv, |
| 1640 | "Failed to initialize GPU, declaring it wedged!\n"); |
Chris Wilson | 6f74b36 | 2017-10-15 15:37:25 +0100 | [diff] [blame] | 1641 | i915_gem_set_wedged(dev_priv); |
| 1642 | } |
Chris Wilson | 7ed43df | 2018-07-26 09:50:32 +0100 | [diff] [blame] | 1643 | |
| 1644 | /* Minimal basic recovery for KMS */ |
| 1645 | ret = i915_ggtt_enable_hw(dev_priv); |
| 1646 | i915_gem_restore_gtt_mappings(dev_priv); |
| 1647 | i915_gem_restore_fences(dev_priv); |
| 1648 | intel_init_clock_gating(dev_priv); |
| 1649 | |
| 1650 | mutex_unlock(&dev_priv->drm.struct_mutex); |
Chris Wilson | 1070a42 | 2012-04-24 15:47:41 +0100 | [diff] [blame] | 1651 | } |
| 1652 | |
Chris Wilson | 6ca9a2b | 2017-12-13 13:43:47 +0000 | [diff] [blame] | 1653 | i915_gem_drain_freed_objects(dev_priv); |
Chris Wilson | 6099032 | 2014-04-09 09:19:42 +0100 | [diff] [blame] | 1654 | return ret; |
Chris Wilson | 1070a42 | 2012-04-24 15:47:41 +0100 | [diff] [blame] | 1655 | } |
| 1656 | |
Janusz Krzysztofik | 47bc28d | 2019-05-30 15:31:05 +0200 | [diff] [blame] | 1657 | void i915_gem_fini_hw(struct drm_i915_private *dev_priv) |
Michal Wajdeczko | 8979187a | 2018-06-04 09:00:32 +0000 | [diff] [blame] | 1658 | { |
Chris Wilson | 79ffac85 | 2019-04-24 21:07:17 +0100 | [diff] [blame] | 1659 | GEM_BUG_ON(dev_priv->gt.awake); |
| 1660 | |
Chris Wilson | 0cf289b | 2019-06-13 08:32:54 +0100 | [diff] [blame] | 1661 | intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref); |
Chris Wilson | b27e35a | 2019-05-27 12:51:14 +0100 | [diff] [blame] | 1662 | |
Michal Wajdeczko | 8979187a | 2018-06-04 09:00:32 +0000 | [diff] [blame] | 1663 | i915_gem_suspend_late(dev_priv); |
Chris Wilson | 30b71084 | 2018-08-12 23:36:29 +0100 | [diff] [blame] | 1664 | intel_disable_gt_powersave(dev_priv); |
Michal Wajdeczko | 8979187a | 2018-06-04 09:00:32 +0000 | [diff] [blame] | 1665 | |
| 1666 | /* Flush any outstanding unpin_work. */ |
| 1667 | i915_gem_drain_workqueue(dev_priv); |
| 1668 | |
| 1669 | mutex_lock(&dev_priv->drm.struct_mutex); |
| 1670 | intel_uc_fini_hw(dev_priv); |
| 1671 | intel_uc_fini(dev_priv); |
Janusz Krzysztofik | 47bc28d | 2019-05-30 15:31:05 +0200 | [diff] [blame] | 1672 | mutex_unlock(&dev_priv->drm.struct_mutex); |
| 1673 | |
| 1674 | i915_gem_drain_freed_objects(dev_priv); |
| 1675 | } |
| 1676 | |
| 1677 | void i915_gem_fini(struct drm_i915_private *dev_priv) |
| 1678 | { |
| 1679 | mutex_lock(&dev_priv->drm.struct_mutex); |
Chris Wilson | 45b9c96 | 2019-05-01 11:32:04 +0100 | [diff] [blame] | 1680 | intel_engines_cleanup(dev_priv); |
Michal Wajdeczko | 8979187a | 2018-06-04 09:00:32 +0000 | [diff] [blame] | 1681 | i915_gem_contexts_fini(dev_priv); |
Chris Wilson | 5179749 | 2018-12-04 14:15:16 +0000 | [diff] [blame] | 1682 | i915_gem_fini_scratch(dev_priv); |
Michal Wajdeczko | 8979187a | 2018-06-04 09:00:32 +0000 | [diff] [blame] | 1683 | mutex_unlock(&dev_priv->drm.struct_mutex); |
| 1684 | |
Tvrtko Ursulin | 25d140f | 2018-12-03 13:33:19 +0000 | [diff] [blame] | 1685 | intel_wa_list_free(&dev_priv->gt_wa_list); |
| 1686 | |
Chris Wilson | 30b71084 | 2018-08-12 23:36:29 +0100 | [diff] [blame] | 1687 | intel_cleanup_gt_powersave(dev_priv); |
| 1688 | |
Michal Wajdeczko | 8979187a | 2018-06-04 09:00:32 +0000 | [diff] [blame] | 1689 | intel_uc_fini_misc(dev_priv); |
| 1690 | i915_gem_cleanup_userptr(dev_priv); |
Chris Wilson | 1e34556 | 2019-01-28 10:23:56 +0000 | [diff] [blame] | 1691 | i915_timelines_fini(dev_priv); |
Michal Wajdeczko | 8979187a | 2018-06-04 09:00:32 +0000 | [diff] [blame] | 1692 | |
| 1693 | i915_gem_drain_freed_objects(dev_priv); |
| 1694 | |
| 1695 | WARN_ON(!list_empty(&dev_priv->contexts.list)); |
| 1696 | } |
| 1697 | |
Chris Wilson | 2414551 | 2017-01-24 11:01:35 +0000 | [diff] [blame] | 1698 | void i915_gem_init_mmio(struct drm_i915_private *i915) |
| 1699 | { |
| 1700 | i915_gem_sanitize(i915); |
| 1701 | } |
| 1702 | |
Chris Wilson | 9c52d1c | 2017-11-10 23:24:47 +0000 | [diff] [blame] | 1703 | static void i915_gem_init__mm(struct drm_i915_private *i915) |
| 1704 | { |
Chris Wilson | 9c52d1c | 2017-11-10 23:24:47 +0000 | [diff] [blame] | 1705 | spin_lock_init(&i915->mm.obj_lock); |
| 1706 | spin_lock_init(&i915->mm.free_lock); |
| 1707 | |
| 1708 | init_llist_head(&i915->mm.free_list); |
| 1709 | |
Chris Wilson | 3b4fa96 | 2019-05-30 21:34:59 +0100 | [diff] [blame] | 1710 | INIT_LIST_HEAD(&i915->mm.purge_list); |
Chris Wilson | ecab9be | 2019-06-12 11:57:20 +0100 | [diff] [blame] | 1711 | INIT_LIST_HEAD(&i915->mm.shrink_list); |
Chris Wilson | 9c52d1c | 2017-11-10 23:24:47 +0000 | [diff] [blame] | 1712 | |
Chris Wilson | 8475355 | 2019-05-28 10:29:45 +0100 | [diff] [blame] | 1713 | i915_gem_init__objects(i915); |
Chris Wilson | 9c52d1c | 2017-11-10 23:24:47 +0000 | [diff] [blame] | 1714 | } |
| 1715 | |
Michal Wajdeczko | a0de908 | 2018-03-23 12:34:49 +0000 | [diff] [blame] | 1716 | int i915_gem_init_early(struct drm_i915_private *dev_priv) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1717 | { |
Chris Wilson | 13f1bfd | 2019-02-28 10:20:34 +0000 | [diff] [blame] | 1718 | int err; |
Chris Wilson | d1b48c1 | 2017-08-16 09:52:08 +0100 | [diff] [blame] | 1719 | |
Chris Wilson | 9c52d1c | 2017-11-10 23:24:47 +0000 | [diff] [blame] | 1720 | i915_gem_init__mm(dev_priv); |
Chris Wilson | 23c3c3d | 2019-04-24 21:07:14 +0100 | [diff] [blame] | 1721 | i915_gem_init__pm(dev_priv); |
Chris Wilson | f212381 | 2017-10-16 12:40:37 +0100 | [diff] [blame] | 1722 | |
Chris Wilson | 1f15b76 | 2016-07-01 17:23:14 +0100 | [diff] [blame] | 1723 | init_waitqueue_head(&dev_priv->gpu_error.wait_queue); |
Daniel Vetter | 1f83fee | 2012-11-15 17:17:22 +0100 | [diff] [blame] | 1724 | init_waitqueue_head(&dev_priv->gpu_error.reset_queue); |
Chris Wilson | 18bb2bc | 2019-01-14 21:04:01 +0000 | [diff] [blame] | 1725 | mutex_init(&dev_priv->gpu_error.wedge_mutex); |
Chris Wilson | 2caffbf | 2019-02-08 15:37:03 +0000 | [diff] [blame] | 1726 | init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu); |
Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 1727 | |
Joonas Lahtinen | 6f63340 | 2016-09-01 14:58:21 +0300 | [diff] [blame] | 1728 | atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0); |
| 1729 | |
Chris Wilson | b5add95 | 2016-08-04 16:32:36 +0100 | [diff] [blame] | 1730 | spin_lock_init(&dev_priv->fb_tracking.lock); |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 1731 | |
Matthew Auld | 465c403 | 2017-10-06 23:18:14 +0100 | [diff] [blame] | 1732 | err = i915_gemfs_init(dev_priv); |
| 1733 | if (err) |
| 1734 | DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err); |
| 1735 | |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 1736 | return 0; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1737 | } |
Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 1738 | |
Michal Wajdeczko | a0de908 | 2018-03-23 12:34:49 +0000 | [diff] [blame] | 1739 | void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) |
Imre Deak | d64aa09 | 2016-01-19 15:26:29 +0200 | [diff] [blame] | 1740 | { |
Chris Wilson | c4d4c1c | 2017-02-10 16:35:23 +0000 | [diff] [blame] | 1741 | i915_gem_drain_freed_objects(dev_priv); |
Chris Wilson | c9c70471 | 2018-02-19 22:06:31 +0000 | [diff] [blame] | 1742 | GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list)); |
| 1743 | GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count)); |
Chris Wilson | d82b4b2 | 2019-05-30 21:35:00 +0100 | [diff] [blame] | 1744 | WARN_ON(dev_priv->mm.shrink_count); |
Matthew Auld | ea84aa7 | 2016-11-17 21:04:11 +0000 | [diff] [blame] | 1745 | |
Chris Wilson | 2caffbf | 2019-02-08 15:37:03 +0000 | [diff] [blame] | 1746 | cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu); |
| 1747 | |
Matthew Auld | 465c403 | 2017-10-06 23:18:14 +0100 | [diff] [blame] | 1748 | i915_gemfs_fini(dev_priv); |
Imre Deak | d64aa09 | 2016-01-19 15:26:29 +0200 | [diff] [blame] | 1749 | } |
| 1750 | |
Chris Wilson | 6a800ea | 2016-09-21 14:51:07 +0100 | [diff] [blame] | 1751 | int i915_gem_freeze(struct drm_i915_private *dev_priv) |
| 1752 | { |
Chris Wilson | d0aa301 | 2017-04-07 11:25:49 +0100 | [diff] [blame] | 1753 | /* Discard all purgeable objects, let userspace recover those as |
| 1754 | * required after resuming. |
| 1755 | */ |
Chris Wilson | 6a800ea | 2016-09-21 14:51:07 +0100 | [diff] [blame] | 1756 | i915_gem_shrink_all(dev_priv); |
Chris Wilson | 6a800ea | 2016-09-21 14:51:07 +0100 | [diff] [blame] | 1757 | |
Chris Wilson | 6a800ea | 2016-09-21 14:51:07 +0100 | [diff] [blame] | 1758 | return 0; |
| 1759 | } |
| 1760 | |
Chris Wilson | 95c778d | 2018-06-01 15:41:25 +0100 | [diff] [blame] | 1761 | int i915_gem_freeze_late(struct drm_i915_private *i915) |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 1762 | { |
| 1763 | struct drm_i915_gem_object *obj; |
Chris Wilson | ecab9be | 2019-06-12 11:57:20 +0100 | [diff] [blame] | 1764 | intel_wakeref_t wakeref; |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 1765 | |
Chris Wilson | 95c778d | 2018-06-01 15:41:25 +0100 | [diff] [blame] | 1766 | /* |
| 1767 | * Called just before we write the hibernation image. |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 1768 | * |
| 1769 | * We need to update the domain tracking to reflect that the CPU |
| 1770 | * will be accessing all the pages to create and restore from the |
| 1771 | * hibernation, and so upon restoration those pages will be in the |
| 1772 | * CPU domain. |
| 1773 | * |
| 1774 | * To make sure the hibernation image contains the latest state, |
| 1775 | * we update that state just before writing out the image. |
Chris Wilson | 7aab2d5 | 2016-09-09 20:02:18 +0100 | [diff] [blame] | 1776 | * |
| 1777 | * To try and reduce the hibernation image, we manually shrink |
Chris Wilson | d0aa301 | 2017-04-07 11:25:49 +0100 | [diff] [blame] | 1778 | * the objects as well, see i915_gem_freeze() |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 1779 | */ |
| 1780 | |
Daniele Ceraolo Spurio | d858d56 | 2019-06-13 16:21:54 -0700 | [diff] [blame] | 1781 | wakeref = intel_runtime_pm_get(&i915->runtime_pm); |
Chris Wilson | ecab9be | 2019-06-12 11:57:20 +0100 | [diff] [blame] | 1782 | |
| 1783 | i915_gem_shrink(i915, -1UL, NULL, ~0); |
Chris Wilson | 95c778d | 2018-06-01 15:41:25 +0100 | [diff] [blame] | 1784 | i915_gem_drain_freed_objects(i915); |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 1785 | |
Chris Wilson | ecab9be | 2019-06-12 11:57:20 +0100 | [diff] [blame] | 1786 | list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) { |
| 1787 | i915_gem_object_lock(obj); |
| 1788 | WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true)); |
| 1789 | i915_gem_object_unlock(obj); |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 1790 | } |
Chris Wilson | ecab9be | 2019-06-12 11:57:20 +0100 | [diff] [blame] | 1791 | |
Daniele Ceraolo Spurio | d858d56 | 2019-06-13 16:21:54 -0700 | [diff] [blame] | 1792 | intel_runtime_pm_put(&i915->runtime_pm, wakeref); |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 1793 | |
| 1794 | return 0; |
| 1795 | } |
| 1796 | |
Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 1797 | void i915_gem_release(struct drm_device *dev, struct drm_file *file) |
Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 1798 | { |
Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 1799 | struct drm_i915_file_private *file_priv = file->driver_priv; |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 1800 | struct i915_request *request; |
Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 1801 | |
| 1802 | /* Clean up our request list when the client is going away, so that |
| 1803 | * later retire_requests won't dereference our soon-to-be-gone |
| 1804 | * file_priv. |
| 1805 | */ |
Chris Wilson | 1c25595 | 2010-09-26 11:03:27 +0100 | [diff] [blame] | 1806 | spin_lock(&file_priv->mm.lock); |
Chris Wilson | c8659ef | 2017-03-02 12:25:25 +0000 | [diff] [blame] | 1807 | list_for_each_entry(request, &file_priv->mm.request_list, client_link) |
Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 1808 | request->file_priv = NULL; |
Chris Wilson | 1c25595 | 2010-09-26 11:03:27 +0100 | [diff] [blame] | 1809 | spin_unlock(&file_priv->mm.lock); |
Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 1810 | } |
| 1811 | |
Chris Wilson | 829a0af | 2017-06-20 12:05:45 +0100 | [diff] [blame] | 1812 | int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) |
Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 1813 | { |
| 1814 | struct drm_i915_file_private *file_priv; |
Ben Widawsky | e422b88 | 2013-12-06 14:10:58 -0800 | [diff] [blame] | 1815 | int ret; |
Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 1816 | |
Chris Wilson | c4c29d7 | 2016-11-09 10:45:07 +0000 | [diff] [blame] | 1817 | DRM_DEBUG("\n"); |
Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 1818 | |
| 1819 | file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); |
| 1820 | if (!file_priv) |
| 1821 | return -ENOMEM; |
| 1822 | |
| 1823 | file->driver_priv = file_priv; |
Chris Wilson | 829a0af | 2017-06-20 12:05:45 +0100 | [diff] [blame] | 1824 | file_priv->dev_priv = i915; |
Chris Wilson | ab0e7ff | 2014-02-25 17:11:24 +0200 | [diff] [blame] | 1825 | file_priv->file = file; |
Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 1826 | |
| 1827 | spin_lock_init(&file_priv->mm.lock); |
| 1828 | INIT_LIST_HEAD(&file_priv->mm.request_list); |
Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 1829 | |
Chris Wilson | c80ff16 | 2016-07-27 09:07:27 +0100 | [diff] [blame] | 1830 | file_priv->bsd_engine = -1; |
Mika Kuoppala | 14921f3 | 2018-06-15 13:44:29 +0300 | [diff] [blame] | 1831 | file_priv->hang_timestamp = jiffies; |
Tvrtko Ursulin | de1add3 | 2016-01-15 15:12:50 +0000 | [diff] [blame] | 1832 | |
Chris Wilson | 829a0af | 2017-06-20 12:05:45 +0100 | [diff] [blame] | 1833 | ret = i915_gem_context_open(i915, file); |
Ben Widawsky | e422b88 | 2013-12-06 14:10:58 -0800 | [diff] [blame] | 1834 | if (ret) |
| 1835 | kfree(file_priv); |
Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 1836 | |
Ben Widawsky | e422b88 | 2013-12-06 14:10:58 -0800 | [diff] [blame] | 1837 | return ret; |
Chris Wilson | b29c19b | 2013-09-25 17:34:56 +0100 | [diff] [blame] | 1838 | } |
| 1839 | |
Daniel Vetter | b680c37 | 2014-09-19 18:27:27 +0200 | [diff] [blame] | 1840 | /** |
| 1841 | * i915_gem_track_fb - update frontbuffer tracking |
Geliang Tang | d9072a3 | 2015-09-15 05:58:44 -0700 | [diff] [blame] | 1842 | * @old: current GEM buffer for the frontbuffer slots |
| 1843 | * @new: new GEM buffer for the frontbuffer slots |
| 1844 | * @frontbuffer_bits: bitmask of frontbuffer slots |
Daniel Vetter | b680c37 | 2014-09-19 18:27:27 +0200 | [diff] [blame] | 1845 | * |
| 1846 | * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them |
| 1847 | * from @old and setting them in @new. Both @old and @new can be NULL. |
| 1848 | */ |
Daniel Vetter | a071fa0 | 2014-06-18 23:28:09 +0200 | [diff] [blame] | 1849 | void i915_gem_track_fb(struct drm_i915_gem_object *old, |
| 1850 | struct drm_i915_gem_object *new, |
| 1851 | unsigned frontbuffer_bits) |
| 1852 | { |
Chris Wilson | faf5bf0 | 2016-08-04 16:32:37 +0100 | [diff] [blame] | 1853 | /* Control of individual bits within the mask are guarded by |
| 1854 | * the owning plane->mutex, i.e. we can never see concurrent |
| 1855 | * manipulation of individual bits. But since the bitfield as a whole |
| 1856 | * is updated using RMW, we need to use atomics in order to update |
| 1857 | * the bits. |
| 1858 | */ |
| 1859 | BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > |
Chris Wilson | 74f6e18 | 2018-09-26 11:47:07 +0100 | [diff] [blame] | 1860 | BITS_PER_TYPE(atomic_t)); |
Chris Wilson | faf5bf0 | 2016-08-04 16:32:37 +0100 | [diff] [blame] | 1861 | |
Daniel Vetter | a071fa0 | 2014-06-18 23:28:09 +0200 | [diff] [blame] | 1862 | if (old) { |
Chris Wilson | faf5bf0 | 2016-08-04 16:32:37 +0100 | [diff] [blame] | 1863 | WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits)); |
| 1864 | atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits); |
Daniel Vetter | a071fa0 | 2014-06-18 23:28:09 +0200 | [diff] [blame] | 1865 | } |
| 1866 | |
| 1867 | if (new) { |
Chris Wilson | faf5bf0 | 2016-08-04 16:32:37 +0100 | [diff] [blame] | 1868 | WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits); |
| 1869 | atomic_or(frontbuffer_bits, &new->frontbuffer_bits); |
Daniel Vetter | a071fa0 | 2014-06-18 23:28:09 +0200 | [diff] [blame] | 1870 | } |
| 1871 | } |
| 1872 | |
Chris Wilson | 935a2f7 | 2017-02-13 17:15:13 +0000 | [diff] [blame] | 1873 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
Chris Wilson | 66d9cb5 | 2017-02-13 17:15:17 +0000 | [diff] [blame] | 1874 | #include "selftests/mock_gem_device.c" |
Chris Wilson | 3f51b7e1 | 2018-08-30 14:48:06 +0100 | [diff] [blame] | 1875 | #include "selftests/i915_gem.c" |
Chris Wilson | 935a2f7 | 2017-02-13 17:15:13 +0000 | [diff] [blame] | 1876 | #endif |