blob: 905890e3ac24139a470dfc2ea47529b4a08317eb [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
Daniel Vetterbe6a0372015-03-18 10:46:04 +01002 * Copyright © 2008-2015 Intel Corporation
Eric Anholt673a3942008-07-30 12:06:12 -07003 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Herrmann0de23972013-07-24 21:07:52 +020028#include <drm/drm_vma_manager.h>
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/i915_drm.h>
Chris Wilson6b5e90f2016-11-14 20:41:05 +000030#include <linux/dma-fence-array.h>
Chris Wilsonfe3288b2017-02-12 17:20:01 +000031#include <linux/kthread.h>
Christian König52791ee2019-08-11 10:06:32 +020032#include <linux/dma-resv.h>
Hugh Dickins5949eac2011-06-27 16:18:18 -070033#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Chris Wilson20e49332016-11-22 14:41:21 +000035#include <linux/stop_machine.h>
Eric Anholt673a3942008-07-30 12:06:12 -070036#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020038#include <linux/dma-buf.h>
Daniel Vetterfcd70cd2019-01-17 22:03:34 +010039#include <linux/mman.h>
Eric Anholt673a3942008-07-30 12:06:12 -070040
Jani Nikuladf0566a2019-06-13 11:44:16 +030041#include "display/intel_display.h"
42#include "display/intel_frontbuffer.h"
43
Chris Wilson10be98a2019-05-28 10:29:49 +010044#include "gem/i915_gem_clflush.h"
45#include "gem/i915_gem_context.h"
Chris Wilsonafa13082019-05-28 10:29:43 +010046#include "gem/i915_gem_ioctls.h"
Chris Wilson10be98a2019-05-28 10:29:49 +010047#include "gem/i915_gem_pm.h"
Chris Wilson750bde22019-11-21 07:10:41 +000048#include "gt/intel_context.h"
Chris Wilson750e76b2019-08-06 13:43:00 +010049#include "gt/intel_engine_user.h"
Tvrtko Ursulinbaea4292019-06-21 08:08:02 +010050#include "gt/intel_gt.h"
Chris Wilson79ffac852019-04-24 21:07:17 +010051#include "gt/intel_gt_pm.h"
Chris Wilsonae2e28b2019-10-22 15:19:35 +010052#include "gt/intel_gt_requests.h"
Chris Wilson112ed2d2019-04-24 18:48:39 +010053#include "gt/intel_mocs.h"
54#include "gt/intel_reset.h"
Chris Wilsona5627722019-07-29 12:37:20 +010055#include "gt/intel_renderstate.h"
Andi Shyti3e7abf82019-10-24 22:16:41 +010056#include "gt/intel_rps.h"
Chris Wilson112ed2d2019-04-24 18:48:39 +010057#include "gt/intel_workarounds.h"
58
Chris Wilson9f588922019-01-16 15:33:04 +000059#include "i915_drv.h"
Chris Wilson37d63f82019-05-28 10:29:50 +010060#include "i915_scatterlist.h"
Chris Wilson9f588922019-01-16 15:33:04 +000061#include "i915_trace.h"
62#include "i915_vgpu.h"
63
Jani Nikula696173b2019-04-05 14:00:15 +030064#include "intel_pm.h"
Chris Wilson9f588922019-01-16 15:33:04 +000065
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053066static int
Chris Wilson28507482019-10-04 14:39:58 +010067insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053068{
Chris Wilson28507482019-10-04 14:39:58 +010069 int err;
70
71 err = mutex_lock_interruptible(&ggtt->vm.mutex);
72 if (err)
73 return err;
74
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053075 memset(node, 0, sizeof(*node));
Chris Wilson28507482019-10-04 14:39:58 +010076 err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
77 size, 0, I915_COLOR_UNEVICTABLE,
78 0, ggtt->mappable_end,
79 DRM_MM_INSERT_LOW);
80
81 mutex_unlock(&ggtt->vm.mutex);
82
83 return err;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053084}
85
86static void
Chris Wilson28507482019-10-04 14:39:58 +010087remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053088{
Chris Wilson28507482019-10-04 14:39:58 +010089 mutex_lock(&ggtt->vm.mutex);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053090 drm_mm_remove_node(node);
Chris Wilson28507482019-10-04 14:39:58 +010091 mutex_unlock(&ggtt->vm.mutex);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053092}
93
Eric Anholt673a3942008-07-30 12:06:12 -070094int
Eric Anholt5a125c32008-10-22 21:40:13 -070095i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +000096 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -070097{
Chris Wilson09d7e462019-01-28 10:23:53 +000098 struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +030099 struct drm_i915_gem_get_aperture *args = data;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100100 struct i915_vma *vma;
Weinan Liff8f7972017-05-31 10:35:52 +0800101 u64 pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700102
Chris Wilson28507482019-10-04 14:39:58 +0100103 if (mutex_lock_interruptible(&ggtt->vm.mutex))
104 return -EINTR;
Chris Wilson09d7e462019-01-28 10:23:53 +0000105
Chris Wilson82ad6442018-06-05 16:37:58 +0100106 pinned = ggtt->vm.reserved;
Chris Wilson499197d2019-01-28 10:23:52 +0000107 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
Chris Wilson20dfbde2016-08-04 16:32:30 +0100108 if (i915_vma_is_pinned(vma))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100109 pinned += vma->node.size;
Chris Wilson09d7e462019-01-28 10:23:53 +0000110
111 mutex_unlock(&ggtt->vm.mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700112
Chris Wilson82ad6442018-06-05 16:37:58 +0100113 args->aper_size = ggtt->vm.total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400114 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000115
Eric Anholt5a125c32008-10-22 21:40:13 -0700116 return 0;
117}
118
Chris Wilsonc03467b2019-07-03 10:17:17 +0100119int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
120 unsigned long flags)
Chris Wilsonaa653a62016-08-04 07:52:27 +0100121{
122 struct i915_vma *vma;
123 LIST_HEAD(still_in_list);
Chris Wilson6951e582019-05-28 10:29:51 +0100124 int ret = 0;
Chris Wilsonaa653a62016-08-04 07:52:27 +0100125
Chris Wilson528cbd12019-01-28 10:23:54 +0000126 spin_lock(&obj->vma.lock);
127 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
128 struct i915_vma,
129 obj_link))) {
Chris Wilson28507482019-10-04 14:39:58 +0100130 struct i915_address_space *vm = vma->vm;
131
132 ret = -EBUSY;
133 if (!i915_vm_tryopen(vm))
134 break;
135
Chris Wilsonaa653a62016-08-04 07:52:27 +0100136 list_move_tail(&vma->obj_link, &still_in_list);
Chris Wilson528cbd12019-01-28 10:23:54 +0000137 spin_unlock(&obj->vma.lock);
138
Chris Wilsonc03467b2019-07-03 10:17:17 +0100139 if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
140 !i915_vma_is_active(vma))
141 ret = i915_vma_unbind(vma);
Chris Wilson528cbd12019-01-28 10:23:54 +0000142
Chris Wilson28507482019-10-04 14:39:58 +0100143 i915_vm_close(vm);
Chris Wilson528cbd12019-01-28 10:23:54 +0000144 spin_lock(&obj->vma.lock);
Chris Wilsonaa653a62016-08-04 07:52:27 +0100145 }
Chris Wilson528cbd12019-01-28 10:23:54 +0000146 list_splice(&still_in_list, &obj->vma.list);
147 spin_unlock(&obj->vma.lock);
Chris Wilsonaa653a62016-08-04 07:52:27 +0100148
149 return ret;
150}
151
Chris Wilson00731152014-05-21 12:42:56 +0100152static int
153i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
154 struct drm_i915_gem_pwrite *args,
Chris Wilson03ac84f2016-10-28 13:58:36 +0100155 struct drm_file *file)
Chris Wilson00731152014-05-21 12:42:56 +0100156{
Chris Wilson00731152014-05-21 12:42:56 +0100157 void *vaddr = obj->phys_handle->vaddr + args->offset;
Gustavo Padovan3ed605b2016-04-26 12:32:27 -0300158 char __user *user_data = u64_to_user_ptr(args->data_ptr);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800159
Chris Wilson8e7cb172019-08-16 08:46:35 +0100160 /*
161 * We manually control the domain here and pretend that it
Chris Wilson6a2c4232014-11-04 04:51:40 -0800162 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
163 */
Chris Wilsone85ade12019-12-18 10:40:43 +0000164 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
Chris Wilson8e7cb172019-08-16 08:46:35 +0100165
Chris Wilson10466d22017-01-06 15:22:38 +0000166 if (copy_from_user(vaddr, user_data, args->size))
167 return -EFAULT;
Chris Wilson00731152014-05-21 12:42:56 +0100168
Chris Wilson6a2c4232014-11-04 04:51:40 -0800169 drm_clflush_virt_range(vaddr, args->size);
Tvrtko Ursulinbaea4292019-06-21 08:08:02 +0100170 intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200171
Chris Wilsone85ade12019-12-18 10:40:43 +0000172 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
Chris Wilson10466d22017-01-06 15:22:38 +0000173 return 0;
Chris Wilson00731152014-05-21 12:42:56 +0100174}
175
Dave Airlieff72145b2011-02-07 12:16:14 +1000176static int
177i915_gem_create(struct drm_file *file,
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000178 struct drm_i915_private *dev_priv,
Michał Winiarskie1634842019-03-26 18:02:18 +0100179 u64 *size_p,
Jani Nikula739f3ab2019-01-16 11:15:19 +0200180 u32 *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700181{
Chris Wilson05394f32010-11-08 19:18:58 +0000182 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300183 u32 handle;
Michał Winiarskie1634842019-03-26 18:02:18 +0100184 u64 size;
185 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700186
Michał Winiarskie1634842019-03-26 18:02:18 +0100187 size = round_up(*size_p, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200188 if (size == 0)
189 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700190
191 /* Allocate the new object */
Chris Wilson84753552019-05-28 10:29:45 +0100192 obj = i915_gem_object_create_shmem(dev_priv, size);
Chris Wilsonfe3db792016-04-25 13:32:13 +0100193 if (IS_ERR(obj))
194 return PTR_ERR(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700195
Chris Wilson05394f32010-11-08 19:18:58 +0000196 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100197 /* drop reference from allocate - handle holds it now */
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100198 i915_gem_object_put(obj);
Daniel Vetterd861e332013-07-24 23:25:03 +0200199 if (ret)
200 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100201
Dave Airlieff72145b2011-02-07 12:16:14 +1000202 *handle_p = handle;
Chris Wilson99534022019-04-17 14:25:07 +0100203 *size_p = size;
Eric Anholt673a3942008-07-30 12:06:12 -0700204 return 0;
205}
206
Dave Airlieff72145b2011-02-07 12:16:14 +1000207int
208i915_gem_dumb_create(struct drm_file *file,
209 struct drm_device *dev,
210 struct drm_mode_create_dumb *args)
211{
Ville Syrjäläaa5ca8b2019-05-09 15:21:57 +0300212 int cpp = DIV_ROUND_UP(args->bpp, 8);
213 u32 format;
214
215 switch (cpp) {
216 case 1:
217 format = DRM_FORMAT_C8;
218 break;
219 case 2:
220 format = DRM_FORMAT_RGB565;
221 break;
222 case 4:
223 format = DRM_FORMAT_XRGB8888;
224 break;
225 default:
226 return -EINVAL;
227 }
228
Dave Airlieff72145b2011-02-07 12:16:14 +1000229 /* have to work out size/pitch and return them */
Ville Syrjäläaa5ca8b2019-05-09 15:21:57 +0300230 args->pitch = ALIGN(args->width * cpp, 64);
231
232 /* align stride to page size so that we can remap */
233 if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
234 DRM_FORMAT_MOD_LINEAR))
235 args->pitch = ALIGN(args->pitch, 4096);
236
Dave Airlieff72145b2011-02-07 12:16:14 +1000237 args->size = args->pitch * args->height;
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000238 return i915_gem_create(file, to_i915(dev),
Michał Winiarskie1634842019-03-26 18:02:18 +0100239 &args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000240}
241
Dave Airlieff72145b2011-02-07 12:16:14 +1000242/**
243 * Creates a new mm object and returns a handle to it.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100244 * @dev: drm device pointer
245 * @data: ioctl data blob
246 * @file: drm file pointer
Dave Airlieff72145b2011-02-07 12:16:14 +1000247 */
248int
249i915_gem_create_ioctl(struct drm_device *dev, void *data,
250 struct drm_file *file)
251{
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000252 struct drm_i915_private *dev_priv = to_i915(dev);
Dave Airlieff72145b2011-02-07 12:16:14 +1000253 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200254
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000255 i915_gem_flush_free_objects(dev_priv);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +0100256
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000257 return i915_gem_create(file, dev_priv,
Michał Winiarskie1634842019-03-26 18:02:18 +0100258 &args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000259}
260
Daniel Vetterd174bd62012-03-25 19:47:40 +0200261static int
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000262shmem_pread(struct page *page, int offset, int len, char __user *user_data,
263 bool needs_clflush)
Daniel Vetterd174bd62012-03-25 19:47:40 +0200264{
265 char *vaddr;
266 int ret;
267
268 vaddr = kmap(page);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200269
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000270 if (needs_clflush)
271 drm_clflush_virt_range(vaddr + offset, len);
272
273 ret = __copy_to_user(user_data, vaddr + offset, len);
274
Daniel Vetterd174bd62012-03-25 19:47:40 +0200275 kunmap(page);
276
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000277 return ret ? -EFAULT : 0;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100278}
279
280static int
281i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
282 struct drm_i915_gem_pread *args)
283{
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100284 unsigned int needs_clflush;
285 unsigned int idx, offset;
Chris Wilson6951e582019-05-28 10:29:51 +0100286 struct dma_fence *fence;
287 char __user *user_data;
288 u64 remain;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100289 int ret;
290
Chris Wilson6951e582019-05-28 10:29:51 +0100291 ret = i915_gem_object_prepare_read(obj, &needs_clflush);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100292 if (ret)
293 return ret;
294
Chris Wilson6951e582019-05-28 10:29:51 +0100295 fence = i915_gem_object_lock_fence(obj);
296 i915_gem_object_finish_access(obj);
297 if (!fence)
298 return -ENOMEM;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100299
300 remain = args->size;
301 user_data = u64_to_user_ptr(args->data_ptr);
302 offset = offset_in_page(args->offset);
303 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
304 struct page *page = i915_gem_object_get_page(obj, idx);
Chris Wilsona5e856a52018-10-12 15:02:28 +0100305 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100306
307 ret = shmem_pread(page, offset, length, user_data,
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100308 needs_clflush);
309 if (ret)
310 break;
311
312 remain -= length;
313 user_data += length;
314 offset = 0;
315 }
316
Chris Wilson6951e582019-05-28 10:29:51 +0100317 i915_gem_object_unlock_fence(obj, fence);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100318 return ret;
319}
320
321static inline bool
322gtt_user_read(struct io_mapping *mapping,
323 loff_t base, int offset,
324 char __user *user_data, int length)
325{
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300326 void __iomem *vaddr;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100327 unsigned long unwritten;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530328
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530329 /* We can use the cpu mem copy function because this is X86. */
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300330 vaddr = io_mapping_map_atomic_wc(mapping, base);
331 unwritten = __copy_to_user_inatomic(user_data,
332 (void __force *)vaddr + offset,
333 length);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100334 io_mapping_unmap_atomic(vaddr);
335 if (unwritten) {
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300336 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
337 unwritten = copy_to_user(user_data,
338 (void __force *)vaddr + offset,
339 length);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100340 io_mapping_unmap(vaddr);
341 }
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530342 return unwritten;
343}
344
345static int
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100346i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
347 const struct drm_i915_gem_pread *args)
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530348{
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100349 struct drm_i915_private *i915 = to_i915(obj->base.dev);
350 struct i915_ggtt *ggtt = &i915->ggtt;
Chris Wilson538ef962019-01-14 14:21:18 +0000351 intel_wakeref_t wakeref;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530352 struct drm_mm_node node;
Chris Wilson6951e582019-05-28 10:29:51 +0100353 struct dma_fence *fence;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100354 void __user *user_data;
Chris Wilson6951e582019-05-28 10:29:51 +0100355 struct i915_vma *vma;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100356 u64 remain, offset;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530357 int ret;
358
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700359 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
Chris Wilson1f7fd482019-08-22 07:15:57 +0100360 vma = ERR_PTR(-ENODEV);
361 if (!i915_gem_object_is_tiled(obj))
362 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
363 PIN_MAPPABLE |
364 PIN_NONBLOCK /* NOWARN */ |
365 PIN_NOEVICT);
Chris Wilson18034582016-08-18 17:16:45 +0100366 if (!IS_ERR(vma)) {
367 node.start = i915_ggtt_offset(vma);
Chris Wilson4ee92c72019-10-03 22:00:59 +0100368 node.flags = 0;
Chris Wilson1f7fd482019-08-22 07:15:57 +0100369 } else {
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100370 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530371 if (ret)
Chris Wilson28507482019-10-04 14:39:58 +0100372 goto out_rpm;
Chris Wilsonb290a782019-10-03 22:00:58 +0100373 GEM_BUG_ON(!drm_mm_node_allocated(&node));
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530374 }
375
Chris Wilson6951e582019-05-28 10:29:51 +0100376 ret = i915_gem_object_lock_interruptible(obj);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530377 if (ret)
378 goto out_unpin;
379
Chris Wilson6951e582019-05-28 10:29:51 +0100380 ret = i915_gem_object_set_to_gtt_domain(obj, false);
381 if (ret) {
382 i915_gem_object_unlock(obj);
383 goto out_unpin;
384 }
385
386 fence = i915_gem_object_lock_fence(obj);
387 i915_gem_object_unlock(obj);
388 if (!fence) {
389 ret = -ENOMEM;
390 goto out_unpin;
391 }
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530392
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100393 user_data = u64_to_user_ptr(args->data_ptr);
394 remain = args->size;
395 offset = args->offset;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530396
397 while (remain > 0) {
398 /* Operation in this page
399 *
400 * page_base = page offset within aperture
401 * page_offset = offset within page
402 * page_length = bytes to copy for this page
403 */
404 u32 page_base = node.start;
405 unsigned page_offset = offset_in_page(offset);
406 unsigned page_length = PAGE_SIZE - page_offset;
407 page_length = remain < page_length ? remain : page_length;
Chris Wilsonb290a782019-10-03 22:00:58 +0100408 if (drm_mm_node_allocated(&node)) {
Chris Wilson82ad6442018-06-05 16:37:58 +0100409 ggtt->vm.insert_page(&ggtt->vm,
410 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
411 node.start, I915_CACHE_NONE, 0);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530412 } else {
413 page_base += offset & PAGE_MASK;
414 }
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100415
Matthew Auld73ebd502017-12-11 15:18:20 +0000416 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100417 user_data, page_length)) {
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530418 ret = -EFAULT;
419 break;
420 }
421
422 remain -= page_length;
423 user_data += page_length;
424 offset += page_length;
425 }
426
Chris Wilson6951e582019-05-28 10:29:51 +0100427 i915_gem_object_unlock_fence(obj, fence);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530428out_unpin:
Chris Wilsonb290a782019-10-03 22:00:58 +0100429 if (drm_mm_node_allocated(&node)) {
Chris Wilson82ad6442018-06-05 16:37:58 +0100430 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
Chris Wilson28507482019-10-04 14:39:58 +0100431 remove_mappable_node(ggtt, &node);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530432 } else {
Chris Wilson058d88c2016-08-15 10:49:06 +0100433 i915_vma_unpin(vma);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530434 }
Chris Wilson28507482019-10-04 14:39:58 +0100435out_rpm:
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700436 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
Eric Anholteb014592009-03-10 11:44:52 -0700437 return ret;
438}
439
Eric Anholt673a3942008-07-30 12:06:12 -0700440/**
441 * Reads data from the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100442 * @dev: drm device pointer
443 * @data: ioctl data blob
444 * @file: drm file pointer
Eric Anholt673a3942008-07-30 12:06:12 -0700445 *
446 * On error, the contents of *data are undefined.
447 */
448int
449i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000450 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700451{
452 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000453 struct drm_i915_gem_object *obj;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100454 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700455
Chris Wilson51311d02010-11-17 09:10:42 +0000456 if (args->size == 0)
457 return 0;
458
Linus Torvalds96d4f262019-01-03 18:57:57 -0800459 if (!access_ok(u64_to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000460 args->size))
461 return -EFAULT;
462
Chris Wilson03ac0642016-07-20 13:31:51 +0100463 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100464 if (!obj)
465 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -0700466
Chris Wilson7dcd2492010-09-26 20:21:44 +0100467 /* Bounds check source. */
Matthew Auld966d5bf2016-12-13 20:32:22 +0000468 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100469 ret = -EINVAL;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100470 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100471 }
472
Chris Wilsondb53a302011-02-03 11:57:46 +0000473 trace_i915_gem_object_pread(obj, args->offset, args->size);
474
Chris Wilsone95433c2016-10-28 13:58:27 +0100475 ret = i915_gem_object_wait(obj,
476 I915_WAIT_INTERRUPTIBLE,
Chris Wilson62eb3c22019-02-13 09:25:04 +0000477 MAX_SCHEDULE_TIMEOUT);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100478 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100479 goto out;
Chris Wilson258a5ed2016-08-05 10:14:16 +0100480
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100481 ret = i915_gem_object_pin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100482 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100483 goto out;
Chris Wilson258a5ed2016-08-05 10:14:16 +0100484
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100485 ret = i915_gem_shmem_pread(obj, args);
Chris Wilson9c870d02016-10-24 13:42:15 +0100486 if (ret == -EFAULT || ret == -ENODEV)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100487 ret = i915_gem_gtt_pread(obj, args);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530488
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100489 i915_gem_object_unpin_pages(obj);
490out:
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100491 i915_gem_object_put(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700492 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700493}
494
Keith Packard0839ccb2008-10-30 19:38:48 -0700495/* This is the fast write path which cannot handle
496 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700497 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700498
Chris Wilsonfe115622016-10-28 13:58:40 +0100499static inline bool
500ggtt_write(struct io_mapping *mapping,
501 loff_t base, int offset,
502 char __user *user_data, int length)
Keith Packard0839ccb2008-10-30 19:38:48 -0700503{
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300504 void __iomem *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700505 unsigned long unwritten;
506
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700507 /* We can use the cpu mem copy function because this is X86. */
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300508 vaddr = io_mapping_map_atomic_wc(mapping, base);
509 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
Keith Packard0839ccb2008-10-30 19:38:48 -0700510 user_data, length);
Chris Wilsonfe115622016-10-28 13:58:40 +0100511 io_mapping_unmap_atomic(vaddr);
512 if (unwritten) {
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300513 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
514 unwritten = copy_from_user((void __force *)vaddr + offset,
515 user_data, length);
Chris Wilsonfe115622016-10-28 13:58:40 +0100516 io_mapping_unmap(vaddr);
517 }
Keith Packard0839ccb2008-10-30 19:38:48 -0700518
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100519 return unwritten;
520}
521
Eric Anholt3de09aa2009-03-09 09:42:23 -0700522/**
523 * This is the fast pwrite path, where we copy the data directly from the
524 * user into the GTT, uncached.
Chris Wilsonfe115622016-10-28 13:58:40 +0100525 * @obj: i915 GEM object
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100526 * @args: pwrite arguments structure
Eric Anholt3de09aa2009-03-09 09:42:23 -0700527 */
Eric Anholt673a3942008-07-30 12:06:12 -0700528static int
Chris Wilsonfe115622016-10-28 13:58:40 +0100529i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
530 const struct drm_i915_gem_pwrite *args)
Eric Anholt673a3942008-07-30 12:06:12 -0700531{
Chris Wilsonfe115622016-10-28 13:58:40 +0100532 struct drm_i915_private *i915 = to_i915(obj->base.dev);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530533 struct i915_ggtt *ggtt = &i915->ggtt;
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700534 struct intel_runtime_pm *rpm = &i915->runtime_pm;
Chris Wilson538ef962019-01-14 14:21:18 +0000535 intel_wakeref_t wakeref;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530536 struct drm_mm_node node;
Chris Wilson6951e582019-05-28 10:29:51 +0100537 struct dma_fence *fence;
Chris Wilsonfe115622016-10-28 13:58:40 +0100538 struct i915_vma *vma;
539 u64 remain, offset;
540 void __user *user_data;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530541 int ret;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530542
Chris Wilson8bd818152017-10-19 07:37:33 +0100543 if (i915_gem_object_has_struct_page(obj)) {
544 /*
545 * Avoid waking the device up if we can fallback, as
546 * waking/resuming is very slow (worst-case 10-100 ms
547 * depending on PCI sleeps and our own resume time).
548 * This easily dwarfs any performance advantage from
549 * using the cache bypass of indirect GGTT access.
550 */
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700551 wakeref = intel_runtime_pm_get_if_in_use(rpm);
Chris Wilson28507482019-10-04 14:39:58 +0100552 if (!wakeref)
553 return -EFAULT;
Chris Wilson8bd818152017-10-19 07:37:33 +0100554 } else {
555 /* No backing pages, no fallback, we must force GGTT access */
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700556 wakeref = intel_runtime_pm_get(rpm);
Chris Wilson8bd818152017-10-19 07:37:33 +0100557 }
558
Chris Wilson1f7fd482019-08-22 07:15:57 +0100559 vma = ERR_PTR(-ENODEV);
560 if (!i915_gem_object_is_tiled(obj))
561 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
562 PIN_MAPPABLE |
563 PIN_NONBLOCK /* NOWARN */ |
564 PIN_NOEVICT);
Chris Wilson18034582016-08-18 17:16:45 +0100565 if (!IS_ERR(vma)) {
566 node.start = i915_ggtt_offset(vma);
Chris Wilson4ee92c72019-10-03 22:00:59 +0100567 node.flags = 0;
Chris Wilson1f7fd482019-08-22 07:15:57 +0100568 } else {
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100569 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530570 if (ret)
Chris Wilson8bd818152017-10-19 07:37:33 +0100571 goto out_rpm;
Chris Wilsonb290a782019-10-03 22:00:58 +0100572 GEM_BUG_ON(!drm_mm_node_allocated(&node));
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530573 }
Daniel Vetter935aaa62012-03-25 19:47:35 +0200574
Chris Wilson6951e582019-05-28 10:29:51 +0100575 ret = i915_gem_object_lock_interruptible(obj);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200576 if (ret)
577 goto out_unpin;
578
Chris Wilson6951e582019-05-28 10:29:51 +0100579 ret = i915_gem_object_set_to_gtt_domain(obj, true);
580 if (ret) {
581 i915_gem_object_unlock(obj);
582 goto out_unpin;
583 }
584
585 fence = i915_gem_object_lock_fence(obj);
586 i915_gem_object_unlock(obj);
587 if (!fence) {
588 ret = -ENOMEM;
589 goto out_unpin;
590 }
Chris Wilsonfe115622016-10-28 13:58:40 +0100591
Chris Wilsone85ade12019-12-18 10:40:43 +0000592 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200593
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530594 user_data = u64_to_user_ptr(args->data_ptr);
595 offset = args->offset;
596 remain = args->size;
597 while (remain) {
Eric Anholt673a3942008-07-30 12:06:12 -0700598 /* Operation in this page
599 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700600 * page_base = page offset within aperture
601 * page_offset = offset within page
602 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700603 */
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530604 u32 page_base = node.start;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100605 unsigned int page_offset = offset_in_page(offset);
606 unsigned int page_length = PAGE_SIZE - page_offset;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530607 page_length = remain < page_length ? remain : page_length;
Chris Wilsonb290a782019-10-03 22:00:58 +0100608 if (drm_mm_node_allocated(&node)) {
Chris Wilsonbdae33b2019-07-18 15:54:05 +0100609 /* flush the write before we modify the GGTT */
610 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
Chris Wilson82ad6442018-06-05 16:37:58 +0100611 ggtt->vm.insert_page(&ggtt->vm,
612 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
613 node.start, I915_CACHE_NONE, 0);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530614 wmb(); /* flush modifications to the GGTT (insert_page) */
615 } else {
616 page_base += offset & PAGE_MASK;
617 }
Keith Packard0839ccb2008-10-30 19:38:48 -0700618 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700619 * source page isn't available. Return the error and we'll
620 * retry in the slow path.
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530621 * If the object is non-shmem backed, we retry again with the
622 * path that handles page fault.
Keith Packard0839ccb2008-10-30 19:38:48 -0700623 */
Matthew Auld73ebd502017-12-11 15:18:20 +0000624 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
Chris Wilsonfe115622016-10-28 13:58:40 +0100625 user_data, page_length)) {
626 ret = -EFAULT;
627 break;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200628 }
Eric Anholt673a3942008-07-30 12:06:12 -0700629
Keith Packard0839ccb2008-10-30 19:38:48 -0700630 remain -= page_length;
631 user_data += page_length;
632 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700633 }
Chris Wilsone85ade12019-12-18 10:40:43 +0000634 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
Chris Wilsonfe115622016-10-28 13:58:40 +0100635
Chris Wilson6951e582019-05-28 10:29:51 +0100636 i915_gem_object_unlock_fence(obj, fence);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200637out_unpin:
Chris Wilsonbdae33b2019-07-18 15:54:05 +0100638 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
Chris Wilsonb290a782019-10-03 22:00:58 +0100639 if (drm_mm_node_allocated(&node)) {
Chris Wilson82ad6442018-06-05 16:37:58 +0100640 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
Chris Wilson28507482019-10-04 14:39:58 +0100641 remove_mappable_node(ggtt, &node);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530642 } else {
Chris Wilson058d88c2016-08-15 10:49:06 +0100643 i915_vma_unpin(vma);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530644 }
Chris Wilson8bd818152017-10-19 07:37:33 +0100645out_rpm:
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700646 intel_runtime_pm_put(rpm, wakeref);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700647 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700648}
649
Chris Wilsonfe115622016-10-28 13:58:40 +0100650/* Per-page copy function for the shmem pwrite fastpath.
651 * Flushes invalid cachelines before writing to the target if
652 * needs_clflush_before is set and flushes out any written cachelines after
653 * writing if needs_clflush is set.
654 */
Eric Anholt40123c12009-03-09 13:42:30 -0700655static int
Chris Wilsonfe115622016-10-28 13:58:40 +0100656shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
Chris Wilsonfe115622016-10-28 13:58:40 +0100657 bool needs_clflush_before,
658 bool needs_clflush_after)
Eric Anholt40123c12009-03-09 13:42:30 -0700659{
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000660 char *vaddr;
Chris Wilsonfe115622016-10-28 13:58:40 +0100661 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700662
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000663 vaddr = kmap(page);
Chris Wilsonfe115622016-10-28 13:58:40 +0100664
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000665 if (needs_clflush_before)
666 drm_clflush_virt_range(vaddr + offset, len);
Chris Wilsonfe115622016-10-28 13:58:40 +0100667
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000668 ret = __copy_from_user(vaddr + offset, user_data, len);
669 if (!ret && needs_clflush_after)
670 drm_clflush_virt_range(vaddr + offset, len);
Chris Wilsonfe115622016-10-28 13:58:40 +0100671
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000672 kunmap(page);
673
674 return ret ? -EFAULT : 0;
Chris Wilsonfe115622016-10-28 13:58:40 +0100675}
676
677static int
678i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
679 const struct drm_i915_gem_pwrite *args)
680{
Chris Wilsonfe115622016-10-28 13:58:40 +0100681 unsigned int partial_cacheline_write;
682 unsigned int needs_clflush;
683 unsigned int offset, idx;
Chris Wilson6951e582019-05-28 10:29:51 +0100684 struct dma_fence *fence;
685 void __user *user_data;
686 u64 remain;
Chris Wilsonfe115622016-10-28 13:58:40 +0100687 int ret;
688
Chris Wilson6951e582019-05-28 10:29:51 +0100689 ret = i915_gem_object_prepare_write(obj, &needs_clflush);
Chris Wilson43394c72016-08-18 17:16:47 +0100690 if (ret)
691 return ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700692
Chris Wilson6951e582019-05-28 10:29:51 +0100693 fence = i915_gem_object_lock_fence(obj);
694 i915_gem_object_finish_access(obj);
695 if (!fence)
696 return -ENOMEM;
Chris Wilsonfe115622016-10-28 13:58:40 +0100697
Chris Wilsonfe115622016-10-28 13:58:40 +0100698 /* If we don't overwrite a cacheline completely we need to be
699 * careful to have up-to-date data by first clflushing. Don't
700 * overcomplicate things and flush the entire patch.
701 */
702 partial_cacheline_write = 0;
703 if (needs_clflush & CLFLUSH_BEFORE)
704 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
705
Chris Wilson43394c72016-08-18 17:16:47 +0100706 user_data = u64_to_user_ptr(args->data_ptr);
Chris Wilson43394c72016-08-18 17:16:47 +0100707 remain = args->size;
Chris Wilsonfe115622016-10-28 13:58:40 +0100708 offset = offset_in_page(args->offset);
709 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
710 struct page *page = i915_gem_object_get_page(obj, idx);
Chris Wilsona5e856a52018-10-12 15:02:28 +0100711 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100712
Chris Wilsonfe115622016-10-28 13:58:40 +0100713 ret = shmem_pwrite(page, offset, length, user_data,
Chris Wilsonfe115622016-10-28 13:58:40 +0100714 (offset | length) & partial_cacheline_write,
715 needs_clflush & CLFLUSH_AFTER);
716 if (ret)
Chris Wilson9da3da62012-06-01 15:20:22 +0100717 break;
718
Chris Wilsonfe115622016-10-28 13:58:40 +0100719 remain -= length;
720 user_data += length;
721 offset = 0;
Eric Anholt40123c12009-03-09 13:42:30 -0700722 }
723
Chris Wilsone85ade12019-12-18 10:40:43 +0000724 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
Chris Wilson6951e582019-05-28 10:29:51 +0100725 i915_gem_object_unlock_fence(obj, fence);
726
Eric Anholt40123c12009-03-09 13:42:30 -0700727 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700728}
729
730/**
731 * Writes data to the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100732 * @dev: drm device
733 * @data: ioctl data blob
734 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -0700735 *
736 * On error, the contents of the buffer that were to be modified are undefined.
737 */
738int
739i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100740 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700741{
742 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000743 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000744 int ret;
745
746 if (args->size == 0)
747 return 0;
748
Linus Torvalds96d4f262019-01-03 18:57:57 -0800749 if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
Chris Wilson51311d02010-11-17 09:10:42 +0000750 return -EFAULT;
751
Chris Wilson03ac0642016-07-20 13:31:51 +0100752 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100753 if (!obj)
754 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -0700755
Chris Wilson7dcd2492010-09-26 20:21:44 +0100756 /* Bounds check destination. */
Matthew Auld966d5bf2016-12-13 20:32:22 +0000757 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100758 ret = -EINVAL;
Chris Wilson258a5ed2016-08-05 10:14:16 +0100759 goto err;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100760 }
761
Chris Wilsonf8c1cce2018-07-12 19:53:14 +0100762 /* Writes not allowed into this read-only object */
763 if (i915_gem_object_is_readonly(obj)) {
764 ret = -EINVAL;
765 goto err;
766 }
767
Chris Wilsondb53a302011-02-03 11:57:46 +0000768 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
769
Chris Wilson7c55e2c2017-03-07 12:03:38 +0000770 ret = -ENODEV;
771 if (obj->ops->pwrite)
772 ret = obj->ops->pwrite(obj, args);
773 if (ret != -ENODEV)
774 goto err;
775
Chris Wilsone95433c2016-10-28 13:58:27 +0100776 ret = i915_gem_object_wait(obj,
777 I915_WAIT_INTERRUPTIBLE |
778 I915_WAIT_ALL,
Chris Wilson62eb3c22019-02-13 09:25:04 +0000779 MAX_SCHEDULE_TIMEOUT);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100780 if (ret)
781 goto err;
782
Chris Wilsonfe115622016-10-28 13:58:40 +0100783 ret = i915_gem_object_pin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100784 if (ret)
Chris Wilsonfe115622016-10-28 13:58:40 +0100785 goto err;
Chris Wilson258a5ed2016-08-05 10:14:16 +0100786
Daniel Vetter935aaa62012-03-25 19:47:35 +0200787 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700788 /* We can only do the GTT pwrite on untiled buffers, as otherwise
789 * it would end up going through the fenced access, and we'll get
790 * different detiling behavior between reading and writing.
791 * pread/pwrite currently are reading and writing from the CPU
792 * perspective, requiring manual detiling by the client.
793 */
Chris Wilson6eae0052016-06-20 15:05:52 +0100794 if (!i915_gem_object_has_struct_page(obj) ||
Chris Wilson9c870d02016-10-24 13:42:15 +0100795 cpu_write_needs_clflush(obj))
Daniel Vetter935aaa62012-03-25 19:47:35 +0200796 /* Note that the gtt paths might fail with non-page-backed user
797 * pointers (e.g. gtt mappings when moving data between
Chris Wilson9c870d02016-10-24 13:42:15 +0100798 * textures). Fallback to the shmem path in that case.
799 */
Chris Wilsonfe115622016-10-28 13:58:40 +0100800 ret = i915_gem_gtt_pwrite_fast(obj, args);
Eric Anholt673a3942008-07-30 12:06:12 -0700801
Chris Wilsond1054ee2016-07-16 18:42:36 +0100802 if (ret == -EFAULT || ret == -ENOSPC) {
Chris Wilson6a2c4232014-11-04 04:51:40 -0800803 if (obj->phys_handle)
804 ret = i915_gem_phys_pwrite(obj, args, file);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530805 else
Chris Wilsonfe115622016-10-28 13:58:40 +0100806 ret = i915_gem_shmem_pwrite(obj, args);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800807 }
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100808
Chris Wilsonfe115622016-10-28 13:58:40 +0100809 i915_gem_object_unpin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100810err:
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100811 i915_gem_object_put(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100812 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700813}
814
Eric Anholt673a3942008-07-30 12:06:12 -0700815/**
816 * Called when user space has done writes to this buffer
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100817 * @dev: drm device
818 * @data: ioctl data blob
819 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -0700820 */
821int
822i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000823 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700824{
825 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000826 struct drm_i915_gem_object *obj;
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100827
Chris Wilson03ac0642016-07-20 13:31:51 +0100828 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilsonc21724c2016-08-05 10:14:19 +0100829 if (!obj)
830 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -0700831
Tina Zhanga03f3952017-11-14 10:25:13 +0000832 /*
833 * Proxy objects are barred from CPU access, so there is no
834 * need to ban sw_finish as it is a nop.
835 */
836
Eric Anholt673a3942008-07-30 12:06:12 -0700837 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson5a97bcc2017-02-22 11:40:46 +0000838 i915_gem_object_flush_if_display(obj);
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100839 i915_gem_object_put(obj);
Chris Wilson5a97bcc2017-02-22 11:40:46 +0000840
841 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700842}
843
Chris Wilson0cf289b2019-06-13 08:32:54 +0100844void i915_gem_runtime_suspend(struct drm_i915_private *i915)
Chris Wilsoneedd10f2014-06-16 08:57:44 +0100845{
Chris Wilson3594a3e2016-10-24 13:42:16 +0100846 struct drm_i915_gem_object *obj, *on;
Chris Wilson7c108fd2016-10-24 13:42:18 +0100847 int i;
Chris Wilsoneedd10f2014-06-16 08:57:44 +0100848
Chris Wilson3594a3e2016-10-24 13:42:16 +0100849 /*
850 * Only called during RPM suspend. All users of the userfault_list
851 * must be holding an RPM wakeref to ensure that this can not
852 * run concurrently with themselves (and use the struct_mutex for
853 * protection between themselves).
854 */
855
856 list_for_each_entry_safe(obj, on,
Chris Wilson0cf289b2019-06-13 08:32:54 +0100857 &i915->ggtt.userfault_list, userfault_link)
Chris Wilsona65adaf2017-10-09 09:43:57 +0100858 __i915_gem_object_release_mmap(obj);
Chris Wilson7c108fd2016-10-24 13:42:18 +0100859
Chris Wilson0cf289b2019-06-13 08:32:54 +0100860 /*
861 * The fence will be lost when the device powers down. If any were
Chris Wilson7c108fd2016-10-24 13:42:18 +0100862 * in use by hardware (i.e. they are pinned), we should not be powering
863 * down! All other fences will be reacquired by the user upon waking.
864 */
Chris Wilson0cf289b2019-06-13 08:32:54 +0100865 for (i = 0; i < i915->ggtt.num_fences; i++) {
866 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
Chris Wilson7c108fd2016-10-24 13:42:18 +0100867
Chris Wilson0cf289b2019-06-13 08:32:54 +0100868 /*
869 * Ideally we want to assert that the fence register is not
Chris Wilsone0ec3ec2017-02-03 12:57:17 +0000870 * live at this point (i.e. that no piece of code will be
871 * trying to write through fence + GTT, as that both violates
872 * our tracking of activity and associated locking/barriers,
873 * but also is illegal given that the hw is powered down).
874 *
875 * Previously we used reg->pin_count as a "liveness" indicator.
876 * That is not sufficient, and we need a more fine-grained
877 * tool if we want to have a sanity check here.
878 */
Chris Wilson7c108fd2016-10-24 13:42:18 +0100879
880 if (!reg->vma)
881 continue;
882
Chris Wilsona65adaf2017-10-09 09:43:57 +0100883 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
Chris Wilson7c108fd2016-10-24 13:42:18 +0100884 reg->dirty = true;
885 }
Chris Wilsoneedd10f2014-06-16 08:57:44 +0100886}
887
Chris Wilson058d88c2016-08-15 10:49:06 +0100888struct i915_vma *
Joonas Lahtinenec7adb62015-03-16 14:11:13 +0200889i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
890 const struct i915_ggtt_view *view,
Chris Wilson91b2db62016-08-04 16:32:23 +0100891 u64 size,
Chris Wilson2ffffd02016-08-04 16:32:22 +0100892 u64 alignment,
893 u64 flags)
Joonas Lahtinenec7adb62015-03-16 14:11:13 +0200894{
Chris Wilsonad16d2e2016-10-13 09:55:04 +0100895 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilson82ad6442018-06-05 16:37:58 +0100896 struct i915_address_space *vm = &dev_priv->ggtt.vm;
Jon Bloomfield4f7af192018-05-22 13:59:06 -0700897
898 return i915_gem_object_pin(obj, vm, view, size, alignment,
899 flags | PIN_GLOBAL);
900}
901
902struct i915_vma *
903i915_gem_object_pin(struct drm_i915_gem_object *obj,
904 struct i915_address_space *vm,
905 const struct i915_ggtt_view *view,
906 u64 size,
907 u64 alignment,
908 u64 flags)
909{
910 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilson59bfa122016-08-04 16:32:31 +0100911 struct i915_vma *vma;
912 int ret;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300913
Chris Wilsona4311742019-09-28 09:25:46 +0100914 if (i915_gem_object_never_bind_ggtt(obj))
915 return ERR_PTR(-ENODEV);
916
Chris Wilsonac87a6fd2018-02-20 13:42:05 +0000917 if (flags & PIN_MAPPABLE &&
918 (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
Chris Wilson43ae70d92017-10-09 09:44:01 +0100919 /* If the required space is larger than the available
920 * aperture, we will not able to find a slot for the
921 * object and unbinding the object now will be in
922 * vain. Worse, doing so may cause us to ping-pong
923 * the object in and out of the Global GTT and
924 * waste a lot of cycles under the mutex.
925 */
926 if (obj->base.size > dev_priv->ggtt.mappable_end)
927 return ERR_PTR(-E2BIG);
928
929 /* If NONBLOCK is set the caller is optimistically
930 * trying to cache the full object within the mappable
931 * aperture, and *must* have a fallback in place for
932 * situations where we cannot bind the object. We
933 * can be a little more lax here and use the fallback
934 * more often to avoid costly migrations of ourselves
935 * and other objects within the aperture.
936 *
937 * Half-the-aperture is used as a simple heuristic.
938 * More interesting would to do search for a free
939 * block prior to making the commitment to unbind.
940 * That caters for the self-harm case, and with a
941 * little more heuristics (e.g. NOFAULT, NOEVICT)
942 * we could try to minimise harm to others.
943 */
944 if (flags & PIN_NONBLOCK &&
945 obj->base.size > dev_priv->ggtt.mappable_end / 2)
946 return ERR_PTR(-ENOSPC);
947 }
948
Chris Wilson718659a2017-01-16 15:21:28 +0000949 vma = i915_vma_instance(obj, vm, view);
Chengguang Xu772b5402019-02-21 10:08:19 +0800950 if (IS_ERR(vma))
Chris Wilson058d88c2016-08-15 10:49:06 +0100951 return vma;
Chris Wilson59bfa122016-08-04 16:32:31 +0100952
953 if (i915_vma_misplaced(vma, size, alignment, flags)) {
Chris Wilson43ae70d92017-10-09 09:44:01 +0100954 if (flags & PIN_NONBLOCK) {
955 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
956 return ERR_PTR(-ENOSPC);
Chris Wilson59bfa122016-08-04 16:32:31 +0100957
Chris Wilson43ae70d92017-10-09 09:44:01 +0100958 if (flags & PIN_MAPPABLE &&
Chris Wilson944397f2017-01-09 16:16:11 +0000959 vma->fence_size > dev_priv->ggtt.mappable_end / 2)
Chris Wilsonad16d2e2016-10-13 09:55:04 +0100960 return ERR_PTR(-ENOSPC);
961 }
962
Chris Wilson59bfa122016-08-04 16:32:31 +0100963 ret = i915_vma_unbind(vma);
964 if (ret)
Chris Wilson058d88c2016-08-15 10:49:06 +0100965 return ERR_PTR(ret);
Chris Wilson59bfa122016-08-04 16:32:31 +0100966 }
967
Chris Wilson636e83f2019-08-23 16:39:44 +0100968 if (vma->fence && !i915_gem_object_is_tiled(obj)) {
969 mutex_lock(&vma->vm->mutex);
970 ret = i915_vma_revoke_fence(vma);
971 mutex_unlock(&vma->vm->mutex);
972 if (ret)
973 return ERR_PTR(ret);
974 }
975
Jon Bloomfield4f7af192018-05-22 13:59:06 -0700976 ret = i915_vma_pin(vma, size, alignment, flags);
Chris Wilson058d88c2016-08-15 10:49:06 +0100977 if (ret)
978 return ERR_PTR(ret);
Joonas Lahtinenec7adb62015-03-16 14:11:13 +0200979
Chris Wilson058d88c2016-08-15 10:49:06 +0100980 return vma;
Eric Anholt673a3942008-07-30 12:06:12 -0700981}
982
Eric Anholt673a3942008-07-30 12:06:12 -0700983int
Chris Wilson3ef94da2009-09-14 16:50:29 +0100984i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
985 struct drm_file *file_priv)
986{
Chris Wilson3b4fa962019-05-30 21:34:59 +0100987 struct drm_i915_private *i915 = to_i915(dev);
Chris Wilson3ef94da2009-09-14 16:50:29 +0100988 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000989 struct drm_i915_gem_object *obj;
Chris Wilson1233e2d2016-10-28 13:58:37 +0100990 int err;
Chris Wilson3ef94da2009-09-14 16:50:29 +0100991
992 switch (args->madv) {
993 case I915_MADV_DONTNEED:
994 case I915_MADV_WILLNEED:
995 break;
996 default:
997 return -EINVAL;
998 }
999
Chris Wilson03ac0642016-07-20 13:31:51 +01001000 obj = i915_gem_object_lookup(file_priv, args->handle);
Chris Wilson1233e2d2016-10-28 13:58:37 +01001001 if (!obj)
1002 return -ENOENT;
1003
1004 err = mutex_lock_interruptible(&obj->mm.lock);
1005 if (err)
1006 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001007
Chris Wilsonf1fa4f42017-10-13 21:26:13 +01001008 if (i915_gem_object_has_pages(obj) &&
Chris Wilson3e510a82016-08-05 10:14:23 +01001009 i915_gem_object_is_tiled(obj) &&
Chris Wilson3b4fa962019-05-30 21:34:59 +01001010 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
Chris Wilsonbc0629a2016-11-01 10:03:17 +00001011 if (obj->mm.madv == I915_MADV_WILLNEED) {
1012 GEM_BUG_ON(!obj->mm.quirked);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01001013 __i915_gem_object_unpin_pages(obj);
Chris Wilsonbc0629a2016-11-01 10:03:17 +00001014 obj->mm.quirked = false;
1015 }
1016 if (args->madv == I915_MADV_WILLNEED) {
Chris Wilson2c3a3f42016-11-04 10:30:01 +00001017 GEM_BUG_ON(obj->mm.quirked);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01001018 __i915_gem_object_pin_pages(obj);
Chris Wilsonbc0629a2016-11-01 10:03:17 +00001019 obj->mm.quirked = true;
1020 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01001021 }
1022
Chris Wilsona4f5ea62016-10-28 13:58:35 +01001023 if (obj->mm.madv != __I915_MADV_PURGED)
1024 obj->mm.madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001025
Chris Wilson3b4fa962019-05-30 21:34:59 +01001026 if (i915_gem_object_has_pages(obj)) {
1027 struct list_head *list;
1028
Chris Wilsond82b4b22019-05-30 21:35:00 +01001029 if (i915_gem_object_is_shrinkable(obj)) {
Chris Wilsona8cff4c82019-06-10 15:54:30 +01001030 unsigned long flags;
1031
1032 spin_lock_irqsave(&i915->mm.obj_lock, flags);
1033
Chris Wilsond82b4b22019-05-30 21:35:00 +01001034 if (obj->mm.madv != I915_MADV_WILLNEED)
1035 list = &i915->mm.purge_list;
Chris Wilsond82b4b22019-05-30 21:35:00 +01001036 else
Chris Wilsonecab9be2019-06-12 11:57:20 +01001037 list = &i915->mm.shrink_list;
Chris Wilsond82b4b22019-05-30 21:35:00 +01001038 list_move_tail(&obj->mm.link, list);
Chris Wilsona8cff4c82019-06-10 15:54:30 +01001039
1040 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
Chris Wilsond82b4b22019-05-30 21:35:00 +01001041 }
Chris Wilson3b4fa962019-05-30 21:34:59 +01001042 }
1043
Chris Wilson6c085a72012-08-20 11:40:46 +02001044 /* if the object is no longer attached, discard its backing storage */
Chris Wilsonf1fa4f42017-10-13 21:26:13 +01001045 if (obj->mm.madv == I915_MADV_DONTNEED &&
1046 !i915_gem_object_has_pages(obj))
Chris Wilsonf0334282019-05-28 10:29:46 +01001047 i915_gem_object_truncate(obj);
Chris Wilson2d7ef392009-09-20 23:13:10 +01001048
Chris Wilsona4f5ea62016-10-28 13:58:35 +01001049 args->retained = obj->mm.madv != __I915_MADV_PURGED;
Chris Wilson1233e2d2016-10-28 13:58:37 +01001050 mutex_unlock(&obj->mm.lock);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001051
Chris Wilson1233e2d2016-10-28 13:58:37 +01001052out:
Chris Wilsonf8c417c2016-07-20 13:31:53 +01001053 i915_gem_object_put(obj);
Chris Wilson1233e2d2016-10-28 13:58:37 +01001054 return err;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001055}
1056
Chris Wilson750bde22019-11-21 07:10:41 +00001057static int __intel_context_flush_retire(struct intel_context *ce)
1058{
1059 struct intel_timeline *tl;
1060
1061 tl = intel_context_timeline_lock(ce);
1062 if (IS_ERR(tl))
1063 return PTR_ERR(tl);
1064
1065 intel_context_timeline_unlock(tl);
1066 return 0;
1067}
1068
Chris Wilsonae2e28b2019-10-22 15:19:35 +01001069static int __intel_engines_record_defaults(struct intel_gt *gt)
Chris Wilsond2b4b972017-11-10 14:26:33 +00001070{
Chris Wilson38775822019-08-08 12:06:11 +01001071 struct i915_request *requests[I915_NUM_ENGINES] = {};
Chris Wilsond2b4b972017-11-10 14:26:33 +00001072 struct intel_engine_cs *engine;
1073 enum intel_engine_id id;
Chris Wilson604c37d2019-03-08 09:36:55 +00001074 int err = 0;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001075
1076 /*
1077 * As we reset the gpu during very early sanitisation, the current
1078 * register state on the GPU should reflect its defaults values.
1079 * We load a context onto the hw (with restore-inhibit), then switch
1080 * over to a second context to save that default register state. We
1081 * can then prime every new context with that state so they all start
1082 * from the same default HW values.
1083 */
1084
Chris Wilsonae2e28b2019-10-22 15:19:35 +01001085 for_each_engine(engine, gt, id) {
Chris Wilson38775822019-08-08 12:06:11 +01001086 struct intel_context *ce;
Chris Wilsone61e0f52018-02-21 09:56:36 +00001087 struct i915_request *rq;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001088
Chris Wilson38775822019-08-08 12:06:11 +01001089 /* We must be able to switch to something! */
1090 GEM_BUG_ON(!engine->kernel_context);
1091 engine->serial++; /* force the kernel context switch */
1092
Chris Wilsonae2e28b2019-10-22 15:19:35 +01001093 ce = intel_context_create(engine->kernel_context->gem_context,
1094 engine);
Chris Wilson38775822019-08-08 12:06:11 +01001095 if (IS_ERR(ce)) {
1096 err = PTR_ERR(ce);
1097 goto out;
1098 }
1099
Chris Wilson5e2a0412019-04-26 17:33:34 +01001100 rq = intel_context_create_request(ce);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001101 if (IS_ERR(rq)) {
1102 err = PTR_ERR(rq);
Chris Wilson38775822019-08-08 12:06:11 +01001103 intel_context_put(ce);
1104 goto out;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001105 }
1106
Chris Wilsona5627722019-07-29 12:37:20 +01001107 err = intel_engine_emit_ctx_wa(rq);
1108 if (err)
1109 goto err_rq;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001110
Chris Wilsona5627722019-07-29 12:37:20 +01001111 err = intel_renderstate_emit(rq);
1112 if (err)
1113 goto err_rq;
1114
1115err_rq:
Chris Wilson38775822019-08-08 12:06:11 +01001116 requests[id] = i915_request_get(rq);
Chris Wilson697b9a82018-06-12 11:51:35 +01001117 i915_request_add(rq);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001118 if (err)
Chris Wilson38775822019-08-08 12:06:11 +01001119 goto out;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001120 }
1121
Chris Wilson604c37d2019-03-08 09:36:55 +00001122 /* Flush the default context image to memory, and enable powersaving. */
Chris Wilsonae2e28b2019-10-22 15:19:35 +01001123 if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
Chris Wilson604c37d2019-03-08 09:36:55 +00001124 err = -EIO;
Chris Wilson38775822019-08-08 12:06:11 +01001125 goto out;
Chris Wilson2621cef2018-07-09 13:20:43 +01001126 }
Chris Wilsond2b4b972017-11-10 14:26:33 +00001127
Chris Wilson38775822019-08-08 12:06:11 +01001128 for (id = 0; id < ARRAY_SIZE(requests); id++) {
1129 struct i915_request *rq;
1130 struct i915_vma *state;
Chris Wilson37d7c9c2018-09-14 13:35:03 +01001131 void *vaddr;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001132
Chris Wilson38775822019-08-08 12:06:11 +01001133 rq = requests[id];
1134 if (!rq)
Chris Wilsond2b4b972017-11-10 14:26:33 +00001135 continue;
1136
Chris Wilson750bde22019-11-21 07:10:41 +00001137 GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT,
1138 &rq->hw_context->flags));
Chris Wilson38775822019-08-08 12:06:11 +01001139 state = rq->hw_context->state;
1140 if (!state)
1141 continue;
Chris Wilsonc4d52fe2019-03-08 13:25:19 +00001142
Chris Wilson750bde22019-11-21 07:10:41 +00001143 /* Serialise with retirement on another CPU */
1144 err = __intel_context_flush_retire(rq->hw_context);
1145 if (err)
1146 goto out;
1147
1148 /* We want to be able to unbind the state from the GGTT */
1149 GEM_BUG_ON(intel_context_is_pinned(rq->hw_context));
1150
Chris Wilsond2b4b972017-11-10 14:26:33 +00001151 /*
1152 * As we will hold a reference to the logical state, it will
1153 * not be torn down with the context, and importantly the
1154 * object will hold onto its vma (making it possible for a
1155 * stray GTT write to corrupt our defaults). Unmap the vma
1156 * from the GTT to prevent such accidents and reclaim the
1157 * space.
1158 */
1159 err = i915_vma_unbind(state);
1160 if (err)
Chris Wilson38775822019-08-08 12:06:11 +01001161 goto out;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001162
Chris Wilson6951e582019-05-28 10:29:51 +01001163 i915_gem_object_lock(state->obj);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001164 err = i915_gem_object_set_to_cpu_domain(state->obj, false);
Chris Wilson6951e582019-05-28 10:29:51 +01001165 i915_gem_object_unlock(state->obj);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001166 if (err)
Chris Wilson38775822019-08-08 12:06:11 +01001167 goto out;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001168
Chris Wilson38775822019-08-08 12:06:11 +01001169 i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC);
Chris Wilson37d7c9c2018-09-14 13:35:03 +01001170
1171 /* Check we can acquire the image of the context state */
Chris Wilson38775822019-08-08 12:06:11 +01001172 vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB);
Chris Wilson37d7c9c2018-09-14 13:35:03 +01001173 if (IS_ERR(vaddr)) {
1174 err = PTR_ERR(vaddr);
Chris Wilson38775822019-08-08 12:06:11 +01001175 goto out;
Chris Wilson37d7c9c2018-09-14 13:35:03 +01001176 }
1177
Chris Wilson38775822019-08-08 12:06:11 +01001178 rq->engine->default_state = i915_gem_object_get(state->obj);
1179 i915_gem_object_unpin_map(state->obj);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001180 }
1181
Chris Wilson38775822019-08-08 12:06:11 +01001182out:
Chris Wilsond2b4b972017-11-10 14:26:33 +00001183 /*
1184 * If we have to abandon now, we expect the engines to be idle
Chris Wilson604c37d2019-03-08 09:36:55 +00001185 * and ready to be torn-down. The quickest way we can accomplish
1186 * this is by declaring ourselves wedged.
Chris Wilsond2b4b972017-11-10 14:26:33 +00001187 */
Chris Wilson38775822019-08-08 12:06:11 +01001188 if (err)
Chris Wilsonae2e28b2019-10-22 15:19:35 +01001189 intel_gt_set_wedged(gt);
Chris Wilson38775822019-08-08 12:06:11 +01001190
1191 for (id = 0; id < ARRAY_SIZE(requests); id++) {
1192 struct intel_context *ce;
1193 struct i915_request *rq;
1194
1195 rq = requests[id];
1196 if (!rq)
1197 continue;
1198
1199 ce = rq->hw_context;
1200 i915_request_put(rq);
1201 intel_context_put(ce);
1202 }
1203 return err;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001204}
1205
Tvrtko Ursulin7f63aa22019-10-22 10:47:20 +01001206static int intel_engines_verify_workarounds(struct intel_gt *gt)
Chris Wilson254e1182019-04-17 08:56:28 +01001207{
1208 struct intel_engine_cs *engine;
1209 enum intel_engine_id id;
1210 int err = 0;
1211
1212 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1213 return 0;
1214
Tvrtko Ursulin7f63aa22019-10-22 10:47:20 +01001215 for_each_engine(engine, gt, id) {
Chris Wilson254e1182019-04-17 08:56:28 +01001216 if (intel_engine_verify_workarounds(engine, "load"))
1217 err = -EIO;
1218 }
1219
1220 return err;
1221}
1222
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001223int i915_gem_init(struct drm_i915_private *dev_priv)
Chris Wilson1070a422012-04-24 15:47:41 +01001224{
Chris Wilson1070a422012-04-24 15:47:41 +01001225 int ret;
1226
Changbin Du52b24162018-05-08 17:07:05 +08001227 /* We need to fallback to 4K pages if host doesn't support huge gtt. */
1228 if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
Matthew Auldda9fe3f32017-10-06 23:18:31 +01001229 mkwrite_device_info(dev_priv)->page_sizes =
1230 I915_GTT_PAGE_SIZE_4K;
1231
Tvrtko Ursulinf0c02c12019-06-21 08:08:10 +01001232 intel_timelines_init(dev_priv);
Chris Wilson1e345562019-01-28 10:23:56 +00001233
Chris Wilsonee487002017-11-22 17:26:21 +00001234 ret = i915_gem_init_userptr(dev_priv);
1235 if (ret)
1236 return ret;
1237
Daniele Ceraolo Spurioca7b2c12019-07-13 11:00:13 +01001238 intel_uc_fetch_firmwares(&dev_priv->gt.uc);
Michal Wajdeczko6bd0fbe2019-08-02 18:40:55 +00001239 intel_wopcm_init(&dev_priv->wopcm);
Michal Wajdeczkof7dc0152018-06-28 14:15:21 +00001240
Chris Wilson5e4f5182015-02-13 14:35:59 +00001241 /* This is just a security blanket to placate dragons.
1242 * On some systems, we very sporadically observe that the first TLBs
1243 * used by the CS may be stale, despite us poking the TLB reset. If
1244 * we hold the forcewake during initialisation these problems
1245 * just magically go away.
1246 */
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001247 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson5e4f5182015-02-13 14:35:59 +00001248
Tvrtko Ursulin1d66377a2019-06-21 08:08:05 +01001249 ret = i915_init_ggtt(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001250 if (ret) {
1251 GEM_BUG_ON(ret == -EIO);
1252 goto err_unlock;
1253 }
Jesse Barnesd62b4892013-03-08 10:45:53 -08001254
Andi Shyti42014f62019-09-05 14:14:03 +03001255 intel_gt_init(&dev_priv->gt);
Ben Widawsky2fa48d82013-12-06 14:11:04 -08001256
Tvrtko Ursulin78f60602019-10-22 10:47:18 +01001257 ret = intel_engines_setup(&dev_priv->gt);
Chris Wilson11334c62019-04-26 17:33:33 +01001258 if (ret) {
1259 GEM_BUG_ON(ret == -EIO);
1260 goto err_unlock;
1261 }
1262
Chris Wilsona4e7ccd2019-10-04 14:40:09 +01001263 ret = i915_gem_init_contexts(dev_priv);
Chris Wilson51797492018-12-04 14:15:16 +00001264 if (ret) {
1265 GEM_BUG_ON(ret == -EIO);
1266 goto err_scratch;
1267 }
1268
Tvrtko Ursulin7841fcb2019-10-22 10:47:19 +01001269 ret = intel_engines_init(&dev_priv->gt);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001270 if (ret) {
1271 GEM_BUG_ON(ret == -EIO);
1272 goto err_context;
1273 }
Daniel Vetter53ca26c2012-04-26 23:28:03 +02001274
Michal Wajdeczko0075a202019-08-17 13:11:44 +00001275 intel_uc_init(&dev_priv->gt.uc);
Chris Wilsoncc6a8182017-11-10 14:26:30 +00001276
Tvrtko Ursulin61fa60f2019-09-10 15:38:20 +01001277 ret = intel_gt_init_hw(&dev_priv->gt);
Michał Winiarski61b5c152017-12-13 23:13:48 +01001278 if (ret)
1279 goto err_uc_init;
1280
Chris Wilson092be382019-06-26 16:45:49 +01001281 /* Only when the HW is re-initialised, can we replay the requests */
1282 ret = intel_gt_resume(&dev_priv->gt);
1283 if (ret)
1284 goto err_init_hw;
1285
Chris Wilsoncc6a8182017-11-10 14:26:30 +00001286 /*
1287 * Despite its name intel_init_clock_gating applies both display
1288 * clock gating workarounds; GT mmio workarounds and the occasional
1289 * GT power context workaround. Worse, sometimes it includes a context
1290 * register workaround which we need to apply before we record the
1291 * default HW state for all contexts.
1292 *
1293 * FIXME: break up the workarounds and apply them at the right time!
1294 */
1295 intel_init_clock_gating(dev_priv);
1296
Tvrtko Ursulin7f63aa22019-10-22 10:47:20 +01001297 ret = intel_engines_verify_workarounds(&dev_priv->gt);
Chris Wilson254e1182019-04-17 08:56:28 +01001298 if (ret)
Chris Wilson092be382019-06-26 16:45:49 +01001299 goto err_gt;
Chris Wilson254e1182019-04-17 08:56:28 +01001300
Chris Wilsonae2e28b2019-10-22 15:19:35 +01001301 ret = __intel_engines_record_defaults(&dev_priv->gt);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001302 if (ret)
Chris Wilson092be382019-06-26 16:45:49 +01001303 goto err_gt;
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001304
Janusz Krzysztofikdd6e38d2019-10-29 11:20:35 +01001305 ret = i915_inject_probe_error(dev_priv, -ENODEV);
Michal Wajdeczko50d84412019-08-02 18:40:50 +00001306 if (ret)
Chris Wilson092be382019-06-26 16:45:49 +01001307 goto err_gt;
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001308
Janusz Krzysztofikdd6e38d2019-10-29 11:20:35 +01001309 ret = i915_inject_probe_error(dev_priv, -EIO);
Michal Wajdeczko50d84412019-08-02 18:40:50 +00001310 if (ret)
Chris Wilson092be382019-06-26 16:45:49 +01001311 goto err_gt;
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001312
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001313 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001314
1315 return 0;
1316
1317 /*
1318 * Unwinding is complicated by that we want to handle -EIO to mean
1319 * disable GPU submission but keep KMS alive. We want to mark the
1320 * HW as irrevisibly wedged, but keep enough state around that the
1321 * driver doesn't explode during runtime.
1322 */
Chris Wilson092be382019-06-26 16:45:49 +01001323err_gt:
Michał Winiarski5311f512019-09-26 14:31:40 +01001324 intel_gt_set_wedged_on_init(&dev_priv->gt);
Chris Wilson5861b012019-03-08 09:36:54 +00001325 i915_gem_suspend(dev_priv);
Chris Wilson8571a052018-06-06 15:54:41 +01001326 i915_gem_suspend_late(dev_priv);
1327
Chris Wilson8bcf9f72018-07-10 10:44:20 +01001328 i915_gem_drain_workqueue(dev_priv);
Chris Wilson092be382019-06-26 16:45:49 +01001329err_init_hw:
Daniele Ceraolo Spurioca7b2c12019-07-13 11:00:13 +01001330 intel_uc_fini_hw(&dev_priv->gt.uc);
Michał Winiarski61b5c152017-12-13 23:13:48 +01001331err_uc_init:
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001332 if (ret != -EIO) {
Michal Wajdeczko0075a202019-08-17 13:11:44 +00001333 intel_uc_fini(&dev_priv->gt.uc);
Tvrtko Ursulinb0258bf2019-10-22 10:47:17 +01001334 intel_engines_cleanup(&dev_priv->gt);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001335 }
1336err_context:
1337 if (ret != -EIO)
Chris Wilsona4e7ccd2019-10-04 14:40:09 +01001338 i915_gem_driver_release__contexts(dev_priv);
Chris Wilson51797492018-12-04 14:15:16 +00001339err_scratch:
Andi Shyti42014f62019-09-05 14:14:03 +03001340 intel_gt_driver_release(&dev_priv->gt);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001341err_unlock:
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001342 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001343
Chris Wilson1e345562019-01-28 10:23:56 +00001344 if (ret != -EIO) {
Michal Wajdeczkoa5f978c2019-08-11 19:51:32 +00001345 intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001346 i915_gem_cleanup_userptr(dev_priv);
Tvrtko Ursulinf0c02c12019-06-21 08:08:10 +01001347 intel_timelines_fini(dev_priv);
Chris Wilson1e345562019-01-28 10:23:56 +00001348 }
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001349
Chris Wilson60990322014-04-09 09:19:42 +01001350 if (ret == -EIO) {
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001351 /*
Michal Wajdeczkoa5f978c2019-08-11 19:51:32 +00001352 * Allow engines or uC initialisation to fail by marking the GPU
1353 * as wedged. But we only want to do this when the GPU is angry,
Chris Wilson60990322014-04-09 09:19:42 +01001354 * for all other failure, such as an allocation failure, bail.
1355 */
Chris Wilsoncb823ed2019-07-12 20:29:53 +01001356 if (!intel_gt_is_wedged(&dev_priv->gt)) {
Janusz Krzysztofikf2db53f2019-07-12 13:24:27 +02001357 i915_probe_error(dev_priv,
1358 "Failed to initialize GPU, declaring it wedged!\n");
Chris Wilsoncb823ed2019-07-12 20:29:53 +01001359 intel_gt_set_wedged(&dev_priv->gt);
Chris Wilson6f74b362017-10-15 15:37:25 +01001360 }
Chris Wilson7ed43df2018-07-26 09:50:32 +01001361
1362 /* Minimal basic recovery for KMS */
1363 ret = i915_ggtt_enable_hw(dev_priv);
1364 i915_gem_restore_gtt_mappings(dev_priv);
Chris Wilsone9d4c922019-10-16 15:32:33 +01001365 i915_gem_restore_fences(&dev_priv->ggtt);
Chris Wilson7ed43df2018-07-26 09:50:32 +01001366 intel_init_clock_gating(dev_priv);
Chris Wilson1070a422012-04-24 15:47:41 +01001367 }
1368
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001369 i915_gem_drain_freed_objects(dev_priv);
Chris Wilson60990322014-04-09 09:19:42 +01001370 return ret;
Chris Wilson1070a422012-04-24 15:47:41 +01001371}
1372
Chris Wilsonc29579d2019-08-06 13:42:59 +01001373void i915_gem_driver_register(struct drm_i915_private *i915)
1374{
1375 i915_gem_driver_register__shrinker(i915);
Chris Wilson750e76b2019-08-06 13:43:00 +01001376
1377 intel_engines_driver_register(i915);
Chris Wilsonc29579d2019-08-06 13:42:59 +01001378}
1379
1380void i915_gem_driver_unregister(struct drm_i915_private *i915)
1381{
1382 i915_gem_driver_unregister__shrinker(i915);
1383}
1384
Janusz Krzysztofik78dae1a2019-07-12 13:24:29 +02001385void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001386{
Chris Wilson0cf289b2019-06-13 08:32:54 +01001387 intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
Chris Wilsonb27e35a2019-05-27 12:51:14 +01001388
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001389 i915_gem_suspend_late(dev_priv);
Andi Shyti42014f62019-09-05 14:14:03 +03001390 intel_gt_driver_remove(&dev_priv->gt);
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001391
1392 /* Flush any outstanding unpin_work. */
1393 i915_gem_drain_workqueue(dev_priv);
1394
Daniele Ceraolo Spurioca7b2c12019-07-13 11:00:13 +01001395 intel_uc_fini_hw(&dev_priv->gt.uc);
1396 intel_uc_fini(&dev_priv->gt.uc);
Janusz Krzysztofik47bc28d2019-05-30 15:31:05 +02001397
1398 i915_gem_drain_freed_objects(dev_priv);
1399}
1400
Janusz Krzysztofik3b58a942019-07-12 13:24:28 +02001401void i915_gem_driver_release(struct drm_i915_private *dev_priv)
Janusz Krzysztofik47bc28d2019-05-30 15:31:05 +02001402{
Tvrtko Ursulinb0258bf2019-10-22 10:47:17 +01001403 intel_engines_cleanup(&dev_priv->gt);
Chris Wilsona4e7ccd2019-10-04 14:40:09 +01001404 i915_gem_driver_release__contexts(dev_priv);
Andi Shyti42014f62019-09-05 14:14:03 +03001405 intel_gt_driver_release(&dev_priv->gt);
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001406
Tvrtko Ursulin25d140f2018-12-03 13:33:19 +00001407 intel_wa_list_free(&dev_priv->gt_wa_list);
1408
Daniele Ceraolo Spurioca7b2c12019-07-13 11:00:13 +01001409 intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001410 i915_gem_cleanup_userptr(dev_priv);
Tvrtko Ursulinf0c02c12019-06-21 08:08:10 +01001411 intel_timelines_fini(dev_priv);
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001412
1413 i915_gem_drain_freed_objects(dev_priv);
1414
Chris Wilsona4e7ccd2019-10-04 14:40:09 +01001415 WARN_ON(!list_empty(&dev_priv->gem.contexts.list));
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001416}
1417
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001418static void i915_gem_init__mm(struct drm_i915_private *i915)
1419{
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001420 spin_lock_init(&i915->mm.obj_lock);
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001421
1422 init_llist_head(&i915->mm.free_list);
1423
Chris Wilson3b4fa962019-05-30 21:34:59 +01001424 INIT_LIST_HEAD(&i915->mm.purge_list);
Chris Wilsonecab9be2019-06-12 11:57:20 +01001425 INIT_LIST_HEAD(&i915->mm.shrink_list);
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001426
Chris Wilson84753552019-05-28 10:29:45 +01001427 i915_gem_init__objects(i915);
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001428}
1429
Matthew Aulda3f356b2019-09-27 18:33:49 +01001430void i915_gem_init_early(struct drm_i915_private *dev_priv)
Eric Anholt673a3942008-07-30 12:06:12 -07001431{
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001432 i915_gem_init__mm(dev_priv);
Chris Wilsonf2123812017-10-16 12:40:37 +01001433
Chris Wilsonb5add952016-08-04 16:32:36 +01001434 spin_lock_init(&dev_priv->fb_tracking.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001435}
Dave Airlie71acb5e2008-12-30 20:31:46 +10001436
Michal Wajdeczkoa0de9082018-03-23 12:34:49 +00001437void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
Imre Deakd64aa092016-01-19 15:26:29 +02001438{
Chris Wilsonc4d4c1c2017-02-10 16:35:23 +00001439 i915_gem_drain_freed_objects(dev_priv);
Chris Wilsonc9c704712018-02-19 22:06:31 +00001440 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1441 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
Chris Wilsond82b4b22019-05-30 21:35:00 +01001442 WARN_ON(dev_priv->mm.shrink_count);
Imre Deakd64aa092016-01-19 15:26:29 +02001443}
1444
Chris Wilson6a800ea2016-09-21 14:51:07 +01001445int i915_gem_freeze(struct drm_i915_private *dev_priv)
1446{
Chris Wilsond0aa3012017-04-07 11:25:49 +01001447 /* Discard all purgeable objects, let userspace recover those as
1448 * required after resuming.
1449 */
Chris Wilson6a800ea2016-09-21 14:51:07 +01001450 i915_gem_shrink_all(dev_priv);
Chris Wilson6a800ea2016-09-21 14:51:07 +01001451
Chris Wilson6a800ea2016-09-21 14:51:07 +01001452 return 0;
1453}
1454
Chris Wilson95c778d2018-06-01 15:41:25 +01001455int i915_gem_freeze_late(struct drm_i915_private *i915)
Chris Wilson461fb992016-05-14 07:26:33 +01001456{
1457 struct drm_i915_gem_object *obj;
Chris Wilsonecab9be2019-06-12 11:57:20 +01001458 intel_wakeref_t wakeref;
Chris Wilson461fb992016-05-14 07:26:33 +01001459
Chris Wilson95c778d2018-06-01 15:41:25 +01001460 /*
1461 * Called just before we write the hibernation image.
Chris Wilson461fb992016-05-14 07:26:33 +01001462 *
1463 * We need to update the domain tracking to reflect that the CPU
1464 * will be accessing all the pages to create and restore from the
1465 * hibernation, and so upon restoration those pages will be in the
1466 * CPU domain.
1467 *
1468 * To make sure the hibernation image contains the latest state,
1469 * we update that state just before writing out the image.
Chris Wilson7aab2d52016-09-09 20:02:18 +01001470 *
1471 * To try and reduce the hibernation image, we manually shrink
Chris Wilsond0aa3012017-04-07 11:25:49 +01001472 * the objects as well, see i915_gem_freeze()
Chris Wilson461fb992016-05-14 07:26:33 +01001473 */
1474
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001475 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
Chris Wilsonecab9be2019-06-12 11:57:20 +01001476
1477 i915_gem_shrink(i915, -1UL, NULL, ~0);
Chris Wilson95c778d2018-06-01 15:41:25 +01001478 i915_gem_drain_freed_objects(i915);
Chris Wilson461fb992016-05-14 07:26:33 +01001479
Chris Wilsonecab9be2019-06-12 11:57:20 +01001480 list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
1481 i915_gem_object_lock(obj);
1482 WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
1483 i915_gem_object_unlock(obj);
Chris Wilson461fb992016-05-14 07:26:33 +01001484 }
Chris Wilsonecab9be2019-06-12 11:57:20 +01001485
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001486 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
Chris Wilson461fb992016-05-14 07:26:33 +01001487
1488 return 0;
1489}
1490
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001491void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00001492{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001493 struct drm_i915_file_private *file_priv = file->driver_priv;
Chris Wilsone61e0f52018-02-21 09:56:36 +00001494 struct i915_request *request;
Eric Anholtb9624422009-06-03 07:27:35 +00001495
1496 /* Clean up our request list when the client is going away, so that
1497 * later retire_requests won't dereference our soon-to-be-gone
1498 * file_priv.
1499 */
Chris Wilson1c255952010-09-26 11:03:27 +01001500 spin_lock(&file_priv->mm.lock);
Chris Wilsonc8659ef2017-03-02 12:25:25 +00001501 list_for_each_entry(request, &file_priv->mm.request_list, client_link)
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001502 request->file_priv = NULL;
Chris Wilson1c255952010-09-26 11:03:27 +01001503 spin_unlock(&file_priv->mm.lock);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001504}
1505
Chris Wilson829a0af2017-06-20 12:05:45 +01001506int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001507{
1508 struct drm_i915_file_private *file_priv;
Ben Widawskye422b882013-12-06 14:10:58 -08001509 int ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001510
Chris Wilsonc4c29d72016-11-09 10:45:07 +00001511 DRM_DEBUG("\n");
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001512
1513 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1514 if (!file_priv)
1515 return -ENOMEM;
1516
1517 file->driver_priv = file_priv;
Chris Wilson829a0af2017-06-20 12:05:45 +01001518 file_priv->dev_priv = i915;
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02001519 file_priv->file = file;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001520
1521 spin_lock_init(&file_priv->mm.lock);
1522 INIT_LIST_HEAD(&file_priv->mm.request_list);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001523
Chris Wilsonc80ff162016-07-27 09:07:27 +01001524 file_priv->bsd_engine = -1;
Mika Kuoppala14921f32018-06-15 13:44:29 +03001525 file_priv->hang_timestamp = jiffies;
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001526
Chris Wilson829a0af2017-06-20 12:05:45 +01001527 ret = i915_gem_context_open(i915, file);
Ben Widawskye422b882013-12-06 14:10:58 -08001528 if (ret)
1529 kfree(file_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001530
Ben Widawskye422b882013-12-06 14:10:58 -08001531 return ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001532}
1533
Chris Wilson935a2f72017-02-13 17:15:13 +00001534#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
Chris Wilson66d9cb52017-02-13 17:15:17 +00001535#include "selftests/mock_gem_device.c"
Chris Wilson3f51b7e12018-08-30 14:48:06 +01001536#include "selftests/i915_gem.c"
Chris Wilson935a2f72017-02-13 17:15:13 +00001537#endif