blob: 0f271a53012db55ca7855ede8ba7d905102d7fc2 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
Daniel Vetterbe6a0372015-03-18 10:46:04 +01002 * Copyright © 2008-2015 Intel Corporation
Eric Anholt673a3942008-07-30 12:06:12 -07003 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Herrmann0de23972013-07-24 21:07:52 +020028#include <drm/drm_vma_manager.h>
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/i915_drm.h>
Chris Wilson6b5e90f2016-11-14 20:41:05 +000030#include <linux/dma-fence-array.h>
Chris Wilsonfe3288b2017-02-12 17:20:01 +000031#include <linux/kthread.h>
Christian König52791ee2019-08-11 10:06:32 +020032#include <linux/dma-resv.h>
Hugh Dickins5949eac2011-06-27 16:18:18 -070033#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Chris Wilson20e49332016-11-22 14:41:21 +000035#include <linux/stop_machine.h>
Eric Anholt673a3942008-07-30 12:06:12 -070036#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020038#include <linux/dma-buf.h>
Daniel Vetterfcd70cd2019-01-17 22:03:34 +010039#include <linux/mman.h>
Eric Anholt673a3942008-07-30 12:06:12 -070040
Jani Nikuladf0566a2019-06-13 11:44:16 +030041#include "display/intel_display.h"
42#include "display/intel_frontbuffer.h"
43
Chris Wilson10be98a2019-05-28 10:29:49 +010044#include "gem/i915_gem_clflush.h"
45#include "gem/i915_gem_context.h"
Chris Wilsonafa13082019-05-28 10:29:43 +010046#include "gem/i915_gem_ioctls.h"
Chris Wilson10be98a2019-05-28 10:29:49 +010047#include "gem/i915_gem_pm.h"
Chris Wilson750e76b2019-08-06 13:43:00 +010048#include "gt/intel_engine_user.h"
Tvrtko Ursulinbaea4292019-06-21 08:08:02 +010049#include "gt/intel_gt.h"
Chris Wilson79ffac852019-04-24 21:07:17 +010050#include "gt/intel_gt_pm.h"
Chris Wilsonae2e28b2019-10-22 15:19:35 +010051#include "gt/intel_gt_requests.h"
Chris Wilson112ed2d2019-04-24 18:48:39 +010052#include "gt/intel_mocs.h"
53#include "gt/intel_reset.h"
Chris Wilsona5627722019-07-29 12:37:20 +010054#include "gt/intel_renderstate.h"
Andi Shyti3e7abf82019-10-24 22:16:41 +010055#include "gt/intel_rps.h"
Chris Wilson112ed2d2019-04-24 18:48:39 +010056#include "gt/intel_workarounds.h"
57
Chris Wilson9f588922019-01-16 15:33:04 +000058#include "i915_drv.h"
Chris Wilson37d63f82019-05-28 10:29:50 +010059#include "i915_scatterlist.h"
Chris Wilson9f588922019-01-16 15:33:04 +000060#include "i915_trace.h"
61#include "i915_vgpu.h"
62
Jani Nikula696173b2019-04-05 14:00:15 +030063#include "intel_pm.h"
Chris Wilson9f588922019-01-16 15:33:04 +000064
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053065static int
Chris Wilson28507482019-10-04 14:39:58 +010066insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053067{
Chris Wilson28507482019-10-04 14:39:58 +010068 int err;
69
70 err = mutex_lock_interruptible(&ggtt->vm.mutex);
71 if (err)
72 return err;
73
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053074 memset(node, 0, sizeof(*node));
Chris Wilson28507482019-10-04 14:39:58 +010075 err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
76 size, 0, I915_COLOR_UNEVICTABLE,
77 0, ggtt->mappable_end,
78 DRM_MM_INSERT_LOW);
79
80 mutex_unlock(&ggtt->vm.mutex);
81
82 return err;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053083}
84
85static void
Chris Wilson28507482019-10-04 14:39:58 +010086remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053087{
Chris Wilson28507482019-10-04 14:39:58 +010088 mutex_lock(&ggtt->vm.mutex);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053089 drm_mm_remove_node(node);
Chris Wilson28507482019-10-04 14:39:58 +010090 mutex_unlock(&ggtt->vm.mutex);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053091}
92
Eric Anholt673a3942008-07-30 12:06:12 -070093int
Eric Anholt5a125c32008-10-22 21:40:13 -070094i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +000095 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -070096{
Chris Wilson09d7e462019-01-28 10:23:53 +000097 struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +030098 struct drm_i915_gem_get_aperture *args = data;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010099 struct i915_vma *vma;
Weinan Liff8f7972017-05-31 10:35:52 +0800100 u64 pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700101
Chris Wilson28507482019-10-04 14:39:58 +0100102 if (mutex_lock_interruptible(&ggtt->vm.mutex))
103 return -EINTR;
Chris Wilson09d7e462019-01-28 10:23:53 +0000104
Chris Wilson82ad6442018-06-05 16:37:58 +0100105 pinned = ggtt->vm.reserved;
Chris Wilson499197d2019-01-28 10:23:52 +0000106 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
Chris Wilson20dfbde2016-08-04 16:32:30 +0100107 if (i915_vma_is_pinned(vma))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100108 pinned += vma->node.size;
Chris Wilson09d7e462019-01-28 10:23:53 +0000109
110 mutex_unlock(&ggtt->vm.mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700111
Chris Wilson82ad6442018-06-05 16:37:58 +0100112 args->aper_size = ggtt->vm.total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400113 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000114
Eric Anholt5a125c32008-10-22 21:40:13 -0700115 return 0;
116}
117
Chris Wilsonc03467b2019-07-03 10:17:17 +0100118int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
119 unsigned long flags)
Chris Wilsonaa653a62016-08-04 07:52:27 +0100120{
121 struct i915_vma *vma;
122 LIST_HEAD(still_in_list);
Chris Wilson6951e582019-05-28 10:29:51 +0100123 int ret = 0;
Chris Wilsonaa653a62016-08-04 07:52:27 +0100124
Chris Wilson528cbd12019-01-28 10:23:54 +0000125 spin_lock(&obj->vma.lock);
126 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
127 struct i915_vma,
128 obj_link))) {
Chris Wilson28507482019-10-04 14:39:58 +0100129 struct i915_address_space *vm = vma->vm;
130
131 ret = -EBUSY;
132 if (!i915_vm_tryopen(vm))
133 break;
134
Chris Wilsonaa653a62016-08-04 07:52:27 +0100135 list_move_tail(&vma->obj_link, &still_in_list);
Chris Wilson528cbd12019-01-28 10:23:54 +0000136 spin_unlock(&obj->vma.lock);
137
Chris Wilsonc03467b2019-07-03 10:17:17 +0100138 if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
139 !i915_vma_is_active(vma))
140 ret = i915_vma_unbind(vma);
Chris Wilson528cbd12019-01-28 10:23:54 +0000141
Chris Wilson28507482019-10-04 14:39:58 +0100142 i915_vm_close(vm);
Chris Wilson528cbd12019-01-28 10:23:54 +0000143 spin_lock(&obj->vma.lock);
Chris Wilsonaa653a62016-08-04 07:52:27 +0100144 }
Chris Wilson528cbd12019-01-28 10:23:54 +0000145 list_splice(&still_in_list, &obj->vma.list);
146 spin_unlock(&obj->vma.lock);
Chris Wilsonaa653a62016-08-04 07:52:27 +0100147
148 return ret;
149}
150
Chris Wilson00731152014-05-21 12:42:56 +0100151static int
152i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
153 struct drm_i915_gem_pwrite *args,
Chris Wilson03ac84f2016-10-28 13:58:36 +0100154 struct drm_file *file)
Chris Wilson00731152014-05-21 12:42:56 +0100155{
Chris Wilson00731152014-05-21 12:42:56 +0100156 void *vaddr = obj->phys_handle->vaddr + args->offset;
Gustavo Padovan3ed605b2016-04-26 12:32:27 -0300157 char __user *user_data = u64_to_user_ptr(args->data_ptr);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800158
Chris Wilson8e7cb172019-08-16 08:46:35 +0100159 /*
160 * We manually control the domain here and pretend that it
Chris Wilson6a2c4232014-11-04 04:51:40 -0800161 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
162 */
Chris Wilson8e7cb172019-08-16 08:46:35 +0100163 intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
164
Chris Wilson10466d22017-01-06 15:22:38 +0000165 if (copy_from_user(vaddr, user_data, args->size))
166 return -EFAULT;
Chris Wilson00731152014-05-21 12:42:56 +0100167
Chris Wilson6a2c4232014-11-04 04:51:40 -0800168 drm_clflush_virt_range(vaddr, args->size);
Tvrtko Ursulinbaea4292019-06-21 08:08:02 +0100169 intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200170
Chris Wilson8e7cb172019-08-16 08:46:35 +0100171 intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
Chris Wilson10466d22017-01-06 15:22:38 +0000172 return 0;
Chris Wilson00731152014-05-21 12:42:56 +0100173}
174
Dave Airlieff72145b2011-02-07 12:16:14 +1000175static int
176i915_gem_create(struct drm_file *file,
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000177 struct drm_i915_private *dev_priv,
Michał Winiarskie1634842019-03-26 18:02:18 +0100178 u64 *size_p,
Jani Nikula739f3ab2019-01-16 11:15:19 +0200179 u32 *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700180{
Chris Wilson05394f32010-11-08 19:18:58 +0000181 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300182 u32 handle;
Michał Winiarskie1634842019-03-26 18:02:18 +0100183 u64 size;
184 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700185
Michał Winiarskie1634842019-03-26 18:02:18 +0100186 size = round_up(*size_p, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200187 if (size == 0)
188 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700189
190 /* Allocate the new object */
Chris Wilson84753552019-05-28 10:29:45 +0100191 obj = i915_gem_object_create_shmem(dev_priv, size);
Chris Wilsonfe3db792016-04-25 13:32:13 +0100192 if (IS_ERR(obj))
193 return PTR_ERR(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700194
Chris Wilson05394f32010-11-08 19:18:58 +0000195 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100196 /* drop reference from allocate - handle holds it now */
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100197 i915_gem_object_put(obj);
Daniel Vetterd861e332013-07-24 23:25:03 +0200198 if (ret)
199 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100200
Dave Airlieff72145b2011-02-07 12:16:14 +1000201 *handle_p = handle;
Chris Wilson99534022019-04-17 14:25:07 +0100202 *size_p = size;
Eric Anholt673a3942008-07-30 12:06:12 -0700203 return 0;
204}
205
Dave Airlieff72145b2011-02-07 12:16:14 +1000206int
207i915_gem_dumb_create(struct drm_file *file,
208 struct drm_device *dev,
209 struct drm_mode_create_dumb *args)
210{
Ville Syrjäläaa5ca8b2019-05-09 15:21:57 +0300211 int cpp = DIV_ROUND_UP(args->bpp, 8);
212 u32 format;
213
214 switch (cpp) {
215 case 1:
216 format = DRM_FORMAT_C8;
217 break;
218 case 2:
219 format = DRM_FORMAT_RGB565;
220 break;
221 case 4:
222 format = DRM_FORMAT_XRGB8888;
223 break;
224 default:
225 return -EINVAL;
226 }
227
Dave Airlieff72145b2011-02-07 12:16:14 +1000228 /* have to work out size/pitch and return them */
Ville Syrjäläaa5ca8b2019-05-09 15:21:57 +0300229 args->pitch = ALIGN(args->width * cpp, 64);
230
231 /* align stride to page size so that we can remap */
232 if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
233 DRM_FORMAT_MOD_LINEAR))
234 args->pitch = ALIGN(args->pitch, 4096);
235
Dave Airlieff72145b2011-02-07 12:16:14 +1000236 args->size = args->pitch * args->height;
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000237 return i915_gem_create(file, to_i915(dev),
Michał Winiarskie1634842019-03-26 18:02:18 +0100238 &args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000239}
240
Dave Airlieff72145b2011-02-07 12:16:14 +1000241/**
242 * Creates a new mm object and returns a handle to it.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100243 * @dev: drm device pointer
244 * @data: ioctl data blob
245 * @file: drm file pointer
Dave Airlieff72145b2011-02-07 12:16:14 +1000246 */
247int
248i915_gem_create_ioctl(struct drm_device *dev, void *data,
249 struct drm_file *file)
250{
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000251 struct drm_i915_private *dev_priv = to_i915(dev);
Dave Airlieff72145b2011-02-07 12:16:14 +1000252 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200253
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000254 i915_gem_flush_free_objects(dev_priv);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +0100255
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000256 return i915_gem_create(file, dev_priv,
Michał Winiarskie1634842019-03-26 18:02:18 +0100257 &args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000258}
259
Daniel Vetterd174bd62012-03-25 19:47:40 +0200260static int
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000261shmem_pread(struct page *page, int offset, int len, char __user *user_data,
262 bool needs_clflush)
Daniel Vetterd174bd62012-03-25 19:47:40 +0200263{
264 char *vaddr;
265 int ret;
266
267 vaddr = kmap(page);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200268
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000269 if (needs_clflush)
270 drm_clflush_virt_range(vaddr + offset, len);
271
272 ret = __copy_to_user(user_data, vaddr + offset, len);
273
Daniel Vetterd174bd62012-03-25 19:47:40 +0200274 kunmap(page);
275
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000276 return ret ? -EFAULT : 0;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100277}
278
279static int
280i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
281 struct drm_i915_gem_pread *args)
282{
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100283 unsigned int needs_clflush;
284 unsigned int idx, offset;
Chris Wilson6951e582019-05-28 10:29:51 +0100285 struct dma_fence *fence;
286 char __user *user_data;
287 u64 remain;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100288 int ret;
289
Chris Wilson6951e582019-05-28 10:29:51 +0100290 ret = i915_gem_object_prepare_read(obj, &needs_clflush);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100291 if (ret)
292 return ret;
293
Chris Wilson6951e582019-05-28 10:29:51 +0100294 fence = i915_gem_object_lock_fence(obj);
295 i915_gem_object_finish_access(obj);
296 if (!fence)
297 return -ENOMEM;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100298
299 remain = args->size;
300 user_data = u64_to_user_ptr(args->data_ptr);
301 offset = offset_in_page(args->offset);
302 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
303 struct page *page = i915_gem_object_get_page(obj, idx);
Chris Wilsona5e856a52018-10-12 15:02:28 +0100304 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100305
306 ret = shmem_pread(page, offset, length, user_data,
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100307 needs_clflush);
308 if (ret)
309 break;
310
311 remain -= length;
312 user_data += length;
313 offset = 0;
314 }
315
Chris Wilson6951e582019-05-28 10:29:51 +0100316 i915_gem_object_unlock_fence(obj, fence);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100317 return ret;
318}
319
320static inline bool
321gtt_user_read(struct io_mapping *mapping,
322 loff_t base, int offset,
323 char __user *user_data, int length)
324{
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300325 void __iomem *vaddr;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100326 unsigned long unwritten;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530327
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530328 /* We can use the cpu mem copy function because this is X86. */
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300329 vaddr = io_mapping_map_atomic_wc(mapping, base);
330 unwritten = __copy_to_user_inatomic(user_data,
331 (void __force *)vaddr + offset,
332 length);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100333 io_mapping_unmap_atomic(vaddr);
334 if (unwritten) {
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300335 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
336 unwritten = copy_to_user(user_data,
337 (void __force *)vaddr + offset,
338 length);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100339 io_mapping_unmap(vaddr);
340 }
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530341 return unwritten;
342}
343
344static int
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100345i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
346 const struct drm_i915_gem_pread *args)
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530347{
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100348 struct drm_i915_private *i915 = to_i915(obj->base.dev);
349 struct i915_ggtt *ggtt = &i915->ggtt;
Chris Wilson538ef962019-01-14 14:21:18 +0000350 intel_wakeref_t wakeref;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530351 struct drm_mm_node node;
Chris Wilson6951e582019-05-28 10:29:51 +0100352 struct dma_fence *fence;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100353 void __user *user_data;
Chris Wilson6951e582019-05-28 10:29:51 +0100354 struct i915_vma *vma;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100355 u64 remain, offset;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530356 int ret;
357
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700358 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
Chris Wilson1f7fd482019-08-22 07:15:57 +0100359 vma = ERR_PTR(-ENODEV);
360 if (!i915_gem_object_is_tiled(obj))
361 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
362 PIN_MAPPABLE |
363 PIN_NONBLOCK /* NOWARN */ |
364 PIN_NOEVICT);
Chris Wilson18034582016-08-18 17:16:45 +0100365 if (!IS_ERR(vma)) {
366 node.start = i915_ggtt_offset(vma);
Chris Wilson4ee92c72019-10-03 22:00:59 +0100367 node.flags = 0;
Chris Wilson1f7fd482019-08-22 07:15:57 +0100368 } else {
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100369 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530370 if (ret)
Chris Wilson28507482019-10-04 14:39:58 +0100371 goto out_rpm;
Chris Wilsonb290a782019-10-03 22:00:58 +0100372 GEM_BUG_ON(!drm_mm_node_allocated(&node));
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530373 }
374
Chris Wilson6951e582019-05-28 10:29:51 +0100375 ret = i915_gem_object_lock_interruptible(obj);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530376 if (ret)
377 goto out_unpin;
378
Chris Wilson6951e582019-05-28 10:29:51 +0100379 ret = i915_gem_object_set_to_gtt_domain(obj, false);
380 if (ret) {
381 i915_gem_object_unlock(obj);
382 goto out_unpin;
383 }
384
385 fence = i915_gem_object_lock_fence(obj);
386 i915_gem_object_unlock(obj);
387 if (!fence) {
388 ret = -ENOMEM;
389 goto out_unpin;
390 }
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530391
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100392 user_data = u64_to_user_ptr(args->data_ptr);
393 remain = args->size;
394 offset = args->offset;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530395
396 while (remain > 0) {
397 /* Operation in this page
398 *
399 * page_base = page offset within aperture
400 * page_offset = offset within page
401 * page_length = bytes to copy for this page
402 */
403 u32 page_base = node.start;
404 unsigned page_offset = offset_in_page(offset);
405 unsigned page_length = PAGE_SIZE - page_offset;
406 page_length = remain < page_length ? remain : page_length;
Chris Wilsonb290a782019-10-03 22:00:58 +0100407 if (drm_mm_node_allocated(&node)) {
Chris Wilson82ad6442018-06-05 16:37:58 +0100408 ggtt->vm.insert_page(&ggtt->vm,
409 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
410 node.start, I915_CACHE_NONE, 0);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530411 } else {
412 page_base += offset & PAGE_MASK;
413 }
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100414
Matthew Auld73ebd502017-12-11 15:18:20 +0000415 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100416 user_data, page_length)) {
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530417 ret = -EFAULT;
418 break;
419 }
420
421 remain -= page_length;
422 user_data += page_length;
423 offset += page_length;
424 }
425
Chris Wilson6951e582019-05-28 10:29:51 +0100426 i915_gem_object_unlock_fence(obj, fence);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530427out_unpin:
Chris Wilsonb290a782019-10-03 22:00:58 +0100428 if (drm_mm_node_allocated(&node)) {
Chris Wilson82ad6442018-06-05 16:37:58 +0100429 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
Chris Wilson28507482019-10-04 14:39:58 +0100430 remove_mappable_node(ggtt, &node);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530431 } else {
Chris Wilson058d88c2016-08-15 10:49:06 +0100432 i915_vma_unpin(vma);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530433 }
Chris Wilson28507482019-10-04 14:39:58 +0100434out_rpm:
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700435 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
Eric Anholteb014592009-03-10 11:44:52 -0700436 return ret;
437}
438
Eric Anholt673a3942008-07-30 12:06:12 -0700439/**
440 * Reads data from the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100441 * @dev: drm device pointer
442 * @data: ioctl data blob
443 * @file: drm file pointer
Eric Anholt673a3942008-07-30 12:06:12 -0700444 *
445 * On error, the contents of *data are undefined.
446 */
447int
448i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000449 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700450{
451 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000452 struct drm_i915_gem_object *obj;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100453 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700454
Chris Wilson51311d02010-11-17 09:10:42 +0000455 if (args->size == 0)
456 return 0;
457
Linus Torvalds96d4f262019-01-03 18:57:57 -0800458 if (!access_ok(u64_to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000459 args->size))
460 return -EFAULT;
461
Chris Wilson03ac0642016-07-20 13:31:51 +0100462 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100463 if (!obj)
464 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -0700465
Chris Wilson7dcd2492010-09-26 20:21:44 +0100466 /* Bounds check source. */
Matthew Auld966d5bf2016-12-13 20:32:22 +0000467 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100468 ret = -EINVAL;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100469 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100470 }
471
Chris Wilsondb53a302011-02-03 11:57:46 +0000472 trace_i915_gem_object_pread(obj, args->offset, args->size);
473
Chris Wilsone95433c2016-10-28 13:58:27 +0100474 ret = i915_gem_object_wait(obj,
475 I915_WAIT_INTERRUPTIBLE,
Chris Wilson62eb3c22019-02-13 09:25:04 +0000476 MAX_SCHEDULE_TIMEOUT);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100477 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100478 goto out;
Chris Wilson258a5ed2016-08-05 10:14:16 +0100479
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100480 ret = i915_gem_object_pin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100481 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100482 goto out;
Chris Wilson258a5ed2016-08-05 10:14:16 +0100483
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100484 ret = i915_gem_shmem_pread(obj, args);
Chris Wilson9c870d02016-10-24 13:42:15 +0100485 if (ret == -EFAULT || ret == -ENODEV)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100486 ret = i915_gem_gtt_pread(obj, args);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530487
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100488 i915_gem_object_unpin_pages(obj);
489out:
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100490 i915_gem_object_put(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700491 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700492}
493
Keith Packard0839ccb2008-10-30 19:38:48 -0700494/* This is the fast write path which cannot handle
495 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700496 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700497
Chris Wilsonfe115622016-10-28 13:58:40 +0100498static inline bool
499ggtt_write(struct io_mapping *mapping,
500 loff_t base, int offset,
501 char __user *user_data, int length)
Keith Packard0839ccb2008-10-30 19:38:48 -0700502{
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300503 void __iomem *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700504 unsigned long unwritten;
505
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700506 /* We can use the cpu mem copy function because this is X86. */
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300507 vaddr = io_mapping_map_atomic_wc(mapping, base);
508 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
Keith Packard0839ccb2008-10-30 19:38:48 -0700509 user_data, length);
Chris Wilsonfe115622016-10-28 13:58:40 +0100510 io_mapping_unmap_atomic(vaddr);
511 if (unwritten) {
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300512 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
513 unwritten = copy_from_user((void __force *)vaddr + offset,
514 user_data, length);
Chris Wilsonfe115622016-10-28 13:58:40 +0100515 io_mapping_unmap(vaddr);
516 }
Keith Packard0839ccb2008-10-30 19:38:48 -0700517
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100518 return unwritten;
519}
520
Eric Anholt3de09aa2009-03-09 09:42:23 -0700521/**
522 * This is the fast pwrite path, where we copy the data directly from the
523 * user into the GTT, uncached.
Chris Wilsonfe115622016-10-28 13:58:40 +0100524 * @obj: i915 GEM object
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100525 * @args: pwrite arguments structure
Eric Anholt3de09aa2009-03-09 09:42:23 -0700526 */
Eric Anholt673a3942008-07-30 12:06:12 -0700527static int
Chris Wilsonfe115622016-10-28 13:58:40 +0100528i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
529 const struct drm_i915_gem_pwrite *args)
Eric Anholt673a3942008-07-30 12:06:12 -0700530{
Chris Wilsonfe115622016-10-28 13:58:40 +0100531 struct drm_i915_private *i915 = to_i915(obj->base.dev);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530532 struct i915_ggtt *ggtt = &i915->ggtt;
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700533 struct intel_runtime_pm *rpm = &i915->runtime_pm;
Chris Wilson538ef962019-01-14 14:21:18 +0000534 intel_wakeref_t wakeref;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530535 struct drm_mm_node node;
Chris Wilson6951e582019-05-28 10:29:51 +0100536 struct dma_fence *fence;
Chris Wilsonfe115622016-10-28 13:58:40 +0100537 struct i915_vma *vma;
538 u64 remain, offset;
539 void __user *user_data;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530540 int ret;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530541
Chris Wilson8bd818152017-10-19 07:37:33 +0100542 if (i915_gem_object_has_struct_page(obj)) {
543 /*
544 * Avoid waking the device up if we can fallback, as
545 * waking/resuming is very slow (worst-case 10-100 ms
546 * depending on PCI sleeps and our own resume time).
547 * This easily dwarfs any performance advantage from
548 * using the cache bypass of indirect GGTT access.
549 */
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700550 wakeref = intel_runtime_pm_get_if_in_use(rpm);
Chris Wilson28507482019-10-04 14:39:58 +0100551 if (!wakeref)
552 return -EFAULT;
Chris Wilson8bd818152017-10-19 07:37:33 +0100553 } else {
554 /* No backing pages, no fallback, we must force GGTT access */
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700555 wakeref = intel_runtime_pm_get(rpm);
Chris Wilson8bd818152017-10-19 07:37:33 +0100556 }
557
Chris Wilson1f7fd482019-08-22 07:15:57 +0100558 vma = ERR_PTR(-ENODEV);
559 if (!i915_gem_object_is_tiled(obj))
560 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
561 PIN_MAPPABLE |
562 PIN_NONBLOCK /* NOWARN */ |
563 PIN_NOEVICT);
Chris Wilson18034582016-08-18 17:16:45 +0100564 if (!IS_ERR(vma)) {
565 node.start = i915_ggtt_offset(vma);
Chris Wilson4ee92c72019-10-03 22:00:59 +0100566 node.flags = 0;
Chris Wilson1f7fd482019-08-22 07:15:57 +0100567 } else {
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100568 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530569 if (ret)
Chris Wilson8bd818152017-10-19 07:37:33 +0100570 goto out_rpm;
Chris Wilsonb290a782019-10-03 22:00:58 +0100571 GEM_BUG_ON(!drm_mm_node_allocated(&node));
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530572 }
Daniel Vetter935aaa62012-03-25 19:47:35 +0200573
Chris Wilson6951e582019-05-28 10:29:51 +0100574 ret = i915_gem_object_lock_interruptible(obj);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200575 if (ret)
576 goto out_unpin;
577
Chris Wilson6951e582019-05-28 10:29:51 +0100578 ret = i915_gem_object_set_to_gtt_domain(obj, true);
579 if (ret) {
580 i915_gem_object_unlock(obj);
581 goto out_unpin;
582 }
583
584 fence = i915_gem_object_lock_fence(obj);
585 i915_gem_object_unlock(obj);
586 if (!fence) {
587 ret = -ENOMEM;
588 goto out_unpin;
589 }
Chris Wilsonfe115622016-10-28 13:58:40 +0100590
Chris Wilson8e7cb172019-08-16 08:46:35 +0100591 intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200592
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530593 user_data = u64_to_user_ptr(args->data_ptr);
594 offset = args->offset;
595 remain = args->size;
596 while (remain) {
Eric Anholt673a3942008-07-30 12:06:12 -0700597 /* Operation in this page
598 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700599 * page_base = page offset within aperture
600 * page_offset = offset within page
601 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700602 */
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530603 u32 page_base = node.start;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100604 unsigned int page_offset = offset_in_page(offset);
605 unsigned int page_length = PAGE_SIZE - page_offset;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530606 page_length = remain < page_length ? remain : page_length;
Chris Wilsonb290a782019-10-03 22:00:58 +0100607 if (drm_mm_node_allocated(&node)) {
Chris Wilsonbdae33b2019-07-18 15:54:05 +0100608 /* flush the write before we modify the GGTT */
609 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
Chris Wilson82ad6442018-06-05 16:37:58 +0100610 ggtt->vm.insert_page(&ggtt->vm,
611 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
612 node.start, I915_CACHE_NONE, 0);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530613 wmb(); /* flush modifications to the GGTT (insert_page) */
614 } else {
615 page_base += offset & PAGE_MASK;
616 }
Keith Packard0839ccb2008-10-30 19:38:48 -0700617 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700618 * source page isn't available. Return the error and we'll
619 * retry in the slow path.
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530620 * If the object is non-shmem backed, we retry again with the
621 * path that handles page fault.
Keith Packard0839ccb2008-10-30 19:38:48 -0700622 */
Matthew Auld73ebd502017-12-11 15:18:20 +0000623 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
Chris Wilsonfe115622016-10-28 13:58:40 +0100624 user_data, page_length)) {
625 ret = -EFAULT;
626 break;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200627 }
Eric Anholt673a3942008-07-30 12:06:12 -0700628
Keith Packard0839ccb2008-10-30 19:38:48 -0700629 remain -= page_length;
630 user_data += page_length;
631 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700632 }
Chris Wilson8e7cb172019-08-16 08:46:35 +0100633 intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
Chris Wilsonfe115622016-10-28 13:58:40 +0100634
Chris Wilson6951e582019-05-28 10:29:51 +0100635 i915_gem_object_unlock_fence(obj, fence);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200636out_unpin:
Chris Wilsonbdae33b2019-07-18 15:54:05 +0100637 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
Chris Wilsonb290a782019-10-03 22:00:58 +0100638 if (drm_mm_node_allocated(&node)) {
Chris Wilson82ad6442018-06-05 16:37:58 +0100639 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
Chris Wilson28507482019-10-04 14:39:58 +0100640 remove_mappable_node(ggtt, &node);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530641 } else {
Chris Wilson058d88c2016-08-15 10:49:06 +0100642 i915_vma_unpin(vma);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530643 }
Chris Wilson8bd818152017-10-19 07:37:33 +0100644out_rpm:
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700645 intel_runtime_pm_put(rpm, wakeref);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700646 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700647}
648
Chris Wilsonfe115622016-10-28 13:58:40 +0100649/* Per-page copy function for the shmem pwrite fastpath.
650 * Flushes invalid cachelines before writing to the target if
651 * needs_clflush_before is set and flushes out any written cachelines after
652 * writing if needs_clflush is set.
653 */
Eric Anholt40123c12009-03-09 13:42:30 -0700654static int
Chris Wilsonfe115622016-10-28 13:58:40 +0100655shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
Chris Wilsonfe115622016-10-28 13:58:40 +0100656 bool needs_clflush_before,
657 bool needs_clflush_after)
Eric Anholt40123c12009-03-09 13:42:30 -0700658{
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000659 char *vaddr;
Chris Wilsonfe115622016-10-28 13:58:40 +0100660 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700661
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000662 vaddr = kmap(page);
Chris Wilsonfe115622016-10-28 13:58:40 +0100663
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000664 if (needs_clflush_before)
665 drm_clflush_virt_range(vaddr + offset, len);
Chris Wilsonfe115622016-10-28 13:58:40 +0100666
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000667 ret = __copy_from_user(vaddr + offset, user_data, len);
668 if (!ret && needs_clflush_after)
669 drm_clflush_virt_range(vaddr + offset, len);
Chris Wilsonfe115622016-10-28 13:58:40 +0100670
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000671 kunmap(page);
672
673 return ret ? -EFAULT : 0;
Chris Wilsonfe115622016-10-28 13:58:40 +0100674}
675
676static int
677i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
678 const struct drm_i915_gem_pwrite *args)
679{
Chris Wilsonfe115622016-10-28 13:58:40 +0100680 unsigned int partial_cacheline_write;
681 unsigned int needs_clflush;
682 unsigned int offset, idx;
Chris Wilson6951e582019-05-28 10:29:51 +0100683 struct dma_fence *fence;
684 void __user *user_data;
685 u64 remain;
Chris Wilsonfe115622016-10-28 13:58:40 +0100686 int ret;
687
Chris Wilson6951e582019-05-28 10:29:51 +0100688 ret = i915_gem_object_prepare_write(obj, &needs_clflush);
Chris Wilson43394c72016-08-18 17:16:47 +0100689 if (ret)
690 return ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700691
Chris Wilson6951e582019-05-28 10:29:51 +0100692 fence = i915_gem_object_lock_fence(obj);
693 i915_gem_object_finish_access(obj);
694 if (!fence)
695 return -ENOMEM;
Chris Wilsonfe115622016-10-28 13:58:40 +0100696
Chris Wilsonfe115622016-10-28 13:58:40 +0100697 /* If we don't overwrite a cacheline completely we need to be
698 * careful to have up-to-date data by first clflushing. Don't
699 * overcomplicate things and flush the entire patch.
700 */
701 partial_cacheline_write = 0;
702 if (needs_clflush & CLFLUSH_BEFORE)
703 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
704
Chris Wilson43394c72016-08-18 17:16:47 +0100705 user_data = u64_to_user_ptr(args->data_ptr);
Chris Wilson43394c72016-08-18 17:16:47 +0100706 remain = args->size;
Chris Wilsonfe115622016-10-28 13:58:40 +0100707 offset = offset_in_page(args->offset);
708 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
709 struct page *page = i915_gem_object_get_page(obj, idx);
Chris Wilsona5e856a52018-10-12 15:02:28 +0100710 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100711
Chris Wilsonfe115622016-10-28 13:58:40 +0100712 ret = shmem_pwrite(page, offset, length, user_data,
Chris Wilsonfe115622016-10-28 13:58:40 +0100713 (offset | length) & partial_cacheline_write,
714 needs_clflush & CLFLUSH_AFTER);
715 if (ret)
Chris Wilson9da3da62012-06-01 15:20:22 +0100716 break;
717
Chris Wilsonfe115622016-10-28 13:58:40 +0100718 remain -= length;
719 user_data += length;
720 offset = 0;
Eric Anholt40123c12009-03-09 13:42:30 -0700721 }
722
Chris Wilson8e7cb172019-08-16 08:46:35 +0100723 intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
Chris Wilson6951e582019-05-28 10:29:51 +0100724 i915_gem_object_unlock_fence(obj, fence);
725
Eric Anholt40123c12009-03-09 13:42:30 -0700726 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700727}
728
729/**
730 * Writes data to the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100731 * @dev: drm device
732 * @data: ioctl data blob
733 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -0700734 *
735 * On error, the contents of the buffer that were to be modified are undefined.
736 */
737int
738i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100739 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700740{
741 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000742 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000743 int ret;
744
745 if (args->size == 0)
746 return 0;
747
Linus Torvalds96d4f262019-01-03 18:57:57 -0800748 if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
Chris Wilson51311d02010-11-17 09:10:42 +0000749 return -EFAULT;
750
Chris Wilson03ac0642016-07-20 13:31:51 +0100751 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100752 if (!obj)
753 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -0700754
Chris Wilson7dcd2492010-09-26 20:21:44 +0100755 /* Bounds check destination. */
Matthew Auld966d5bf2016-12-13 20:32:22 +0000756 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100757 ret = -EINVAL;
Chris Wilson258a5ed2016-08-05 10:14:16 +0100758 goto err;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100759 }
760
Chris Wilsonf8c1cce2018-07-12 19:53:14 +0100761 /* Writes not allowed into this read-only object */
762 if (i915_gem_object_is_readonly(obj)) {
763 ret = -EINVAL;
764 goto err;
765 }
766
Chris Wilsondb53a302011-02-03 11:57:46 +0000767 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
768
Chris Wilson7c55e2c2017-03-07 12:03:38 +0000769 ret = -ENODEV;
770 if (obj->ops->pwrite)
771 ret = obj->ops->pwrite(obj, args);
772 if (ret != -ENODEV)
773 goto err;
774
Chris Wilsone95433c2016-10-28 13:58:27 +0100775 ret = i915_gem_object_wait(obj,
776 I915_WAIT_INTERRUPTIBLE |
777 I915_WAIT_ALL,
Chris Wilson62eb3c22019-02-13 09:25:04 +0000778 MAX_SCHEDULE_TIMEOUT);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100779 if (ret)
780 goto err;
781
Chris Wilsonfe115622016-10-28 13:58:40 +0100782 ret = i915_gem_object_pin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100783 if (ret)
Chris Wilsonfe115622016-10-28 13:58:40 +0100784 goto err;
Chris Wilson258a5ed2016-08-05 10:14:16 +0100785
Daniel Vetter935aaa62012-03-25 19:47:35 +0200786 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700787 /* We can only do the GTT pwrite on untiled buffers, as otherwise
788 * it would end up going through the fenced access, and we'll get
789 * different detiling behavior between reading and writing.
790 * pread/pwrite currently are reading and writing from the CPU
791 * perspective, requiring manual detiling by the client.
792 */
Chris Wilson6eae0052016-06-20 15:05:52 +0100793 if (!i915_gem_object_has_struct_page(obj) ||
Chris Wilson9c870d02016-10-24 13:42:15 +0100794 cpu_write_needs_clflush(obj))
Daniel Vetter935aaa62012-03-25 19:47:35 +0200795 /* Note that the gtt paths might fail with non-page-backed user
796 * pointers (e.g. gtt mappings when moving data between
Chris Wilson9c870d02016-10-24 13:42:15 +0100797 * textures). Fallback to the shmem path in that case.
798 */
Chris Wilsonfe115622016-10-28 13:58:40 +0100799 ret = i915_gem_gtt_pwrite_fast(obj, args);
Eric Anholt673a3942008-07-30 12:06:12 -0700800
Chris Wilsond1054ee2016-07-16 18:42:36 +0100801 if (ret == -EFAULT || ret == -ENOSPC) {
Chris Wilson6a2c4232014-11-04 04:51:40 -0800802 if (obj->phys_handle)
803 ret = i915_gem_phys_pwrite(obj, args, file);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530804 else
Chris Wilsonfe115622016-10-28 13:58:40 +0100805 ret = i915_gem_shmem_pwrite(obj, args);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800806 }
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100807
Chris Wilsonfe115622016-10-28 13:58:40 +0100808 i915_gem_object_unpin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100809err:
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100810 i915_gem_object_put(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100811 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700812}
813
Eric Anholt673a3942008-07-30 12:06:12 -0700814/**
815 * Called when user space has done writes to this buffer
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100816 * @dev: drm device
817 * @data: ioctl data blob
818 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -0700819 */
820int
821i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000822 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700823{
824 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000825 struct drm_i915_gem_object *obj;
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100826
Chris Wilson03ac0642016-07-20 13:31:51 +0100827 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilsonc21724c2016-08-05 10:14:19 +0100828 if (!obj)
829 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -0700830
Tina Zhanga03f3952017-11-14 10:25:13 +0000831 /*
832 * Proxy objects are barred from CPU access, so there is no
833 * need to ban sw_finish as it is a nop.
834 */
835
Eric Anholt673a3942008-07-30 12:06:12 -0700836 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson5a97bcc2017-02-22 11:40:46 +0000837 i915_gem_object_flush_if_display(obj);
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100838 i915_gem_object_put(obj);
Chris Wilson5a97bcc2017-02-22 11:40:46 +0000839
840 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700841}
842
Chris Wilson0cf289b2019-06-13 08:32:54 +0100843void i915_gem_runtime_suspend(struct drm_i915_private *i915)
Chris Wilsoneedd10f2014-06-16 08:57:44 +0100844{
Chris Wilson3594a3e2016-10-24 13:42:16 +0100845 struct drm_i915_gem_object *obj, *on;
Chris Wilson7c108fd2016-10-24 13:42:18 +0100846 int i;
Chris Wilsoneedd10f2014-06-16 08:57:44 +0100847
Chris Wilson3594a3e2016-10-24 13:42:16 +0100848 /*
849 * Only called during RPM suspend. All users of the userfault_list
850 * must be holding an RPM wakeref to ensure that this can not
851 * run concurrently with themselves (and use the struct_mutex for
852 * protection between themselves).
853 */
854
855 list_for_each_entry_safe(obj, on,
Chris Wilson0cf289b2019-06-13 08:32:54 +0100856 &i915->ggtt.userfault_list, userfault_link)
Chris Wilsona65adaf2017-10-09 09:43:57 +0100857 __i915_gem_object_release_mmap(obj);
Chris Wilson7c108fd2016-10-24 13:42:18 +0100858
Chris Wilson0cf289b2019-06-13 08:32:54 +0100859 /*
860 * The fence will be lost when the device powers down. If any were
Chris Wilson7c108fd2016-10-24 13:42:18 +0100861 * in use by hardware (i.e. they are pinned), we should not be powering
862 * down! All other fences will be reacquired by the user upon waking.
863 */
Chris Wilson0cf289b2019-06-13 08:32:54 +0100864 for (i = 0; i < i915->ggtt.num_fences; i++) {
865 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
Chris Wilson7c108fd2016-10-24 13:42:18 +0100866
Chris Wilson0cf289b2019-06-13 08:32:54 +0100867 /*
868 * Ideally we want to assert that the fence register is not
Chris Wilsone0ec3ec2017-02-03 12:57:17 +0000869 * live at this point (i.e. that no piece of code will be
870 * trying to write through fence + GTT, as that both violates
871 * our tracking of activity and associated locking/barriers,
872 * but also is illegal given that the hw is powered down).
873 *
874 * Previously we used reg->pin_count as a "liveness" indicator.
875 * That is not sufficient, and we need a more fine-grained
876 * tool if we want to have a sanity check here.
877 */
Chris Wilson7c108fd2016-10-24 13:42:18 +0100878
879 if (!reg->vma)
880 continue;
881
Chris Wilsona65adaf2017-10-09 09:43:57 +0100882 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
Chris Wilson7c108fd2016-10-24 13:42:18 +0100883 reg->dirty = true;
884 }
Chris Wilsoneedd10f2014-06-16 08:57:44 +0100885}
886
Chris Wilson058d88c2016-08-15 10:49:06 +0100887struct i915_vma *
Joonas Lahtinenec7adb62015-03-16 14:11:13 +0200888i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
889 const struct i915_ggtt_view *view,
Chris Wilson91b2db62016-08-04 16:32:23 +0100890 u64 size,
Chris Wilson2ffffd02016-08-04 16:32:22 +0100891 u64 alignment,
892 u64 flags)
Joonas Lahtinenec7adb62015-03-16 14:11:13 +0200893{
Chris Wilsonad16d2e2016-10-13 09:55:04 +0100894 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilson82ad6442018-06-05 16:37:58 +0100895 struct i915_address_space *vm = &dev_priv->ggtt.vm;
Chris Wilson59bfa122016-08-04 16:32:31 +0100896 struct i915_vma *vma;
897 int ret;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300898
Chris Wilsona4311742019-09-28 09:25:46 +0100899 if (i915_gem_object_never_bind_ggtt(obj))
900 return ERR_PTR(-ENODEV);
901
Chris Wilsonac87a6fd2018-02-20 13:42:05 +0000902 if (flags & PIN_MAPPABLE &&
903 (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
Chris Wilson43ae70d92017-10-09 09:44:01 +0100904 /* If the required space is larger than the available
905 * aperture, we will not able to find a slot for the
906 * object and unbinding the object now will be in
907 * vain. Worse, doing so may cause us to ping-pong
908 * the object in and out of the Global GTT and
909 * waste a lot of cycles under the mutex.
910 */
911 if (obj->base.size > dev_priv->ggtt.mappable_end)
912 return ERR_PTR(-E2BIG);
913
914 /* If NONBLOCK is set the caller is optimistically
915 * trying to cache the full object within the mappable
916 * aperture, and *must* have a fallback in place for
917 * situations where we cannot bind the object. We
918 * can be a little more lax here and use the fallback
919 * more often to avoid costly migrations of ourselves
920 * and other objects within the aperture.
921 *
922 * Half-the-aperture is used as a simple heuristic.
923 * More interesting would to do search for a free
924 * block prior to making the commitment to unbind.
925 * That caters for the self-harm case, and with a
926 * little more heuristics (e.g. NOFAULT, NOEVICT)
927 * we could try to minimise harm to others.
928 */
929 if (flags & PIN_NONBLOCK &&
930 obj->base.size > dev_priv->ggtt.mappable_end / 2)
931 return ERR_PTR(-ENOSPC);
932 }
933
Chris Wilson718659a2017-01-16 15:21:28 +0000934 vma = i915_vma_instance(obj, vm, view);
Chengguang Xu772b5402019-02-21 10:08:19 +0800935 if (IS_ERR(vma))
Chris Wilson058d88c2016-08-15 10:49:06 +0100936 return vma;
Chris Wilson59bfa122016-08-04 16:32:31 +0100937
938 if (i915_vma_misplaced(vma, size, alignment, flags)) {
Chris Wilson43ae70d92017-10-09 09:44:01 +0100939 if (flags & PIN_NONBLOCK) {
940 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
941 return ERR_PTR(-ENOSPC);
Chris Wilson59bfa122016-08-04 16:32:31 +0100942
Chris Wilson43ae70d92017-10-09 09:44:01 +0100943 if (flags & PIN_MAPPABLE &&
Chris Wilson944397f2017-01-09 16:16:11 +0000944 vma->fence_size > dev_priv->ggtt.mappable_end / 2)
Chris Wilsonad16d2e2016-10-13 09:55:04 +0100945 return ERR_PTR(-ENOSPC);
946 }
947
Chris Wilson59bfa122016-08-04 16:32:31 +0100948 ret = i915_vma_unbind(vma);
949 if (ret)
Chris Wilson058d88c2016-08-15 10:49:06 +0100950 return ERR_PTR(ret);
Chris Wilson59bfa122016-08-04 16:32:31 +0100951 }
952
Chris Wilson636e83f2019-08-23 16:39:44 +0100953 if (vma->fence && !i915_gem_object_is_tiled(obj)) {
954 mutex_lock(&vma->vm->mutex);
955 ret = i915_vma_revoke_fence(vma);
956 mutex_unlock(&vma->vm->mutex);
957 if (ret)
958 return ERR_PTR(ret);
959 }
960
Chris Wilson058d88c2016-08-15 10:49:06 +0100961 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
962 if (ret)
963 return ERR_PTR(ret);
Joonas Lahtinenec7adb62015-03-16 14:11:13 +0200964
Chris Wilson058d88c2016-08-15 10:49:06 +0100965 return vma;
Eric Anholt673a3942008-07-30 12:06:12 -0700966}
967
Eric Anholt673a3942008-07-30 12:06:12 -0700968int
Chris Wilson3ef94da2009-09-14 16:50:29 +0100969i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
970 struct drm_file *file_priv)
971{
Chris Wilson3b4fa962019-05-30 21:34:59 +0100972 struct drm_i915_private *i915 = to_i915(dev);
Chris Wilson3ef94da2009-09-14 16:50:29 +0100973 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000974 struct drm_i915_gem_object *obj;
Chris Wilson1233e2d2016-10-28 13:58:37 +0100975 int err;
Chris Wilson3ef94da2009-09-14 16:50:29 +0100976
977 switch (args->madv) {
978 case I915_MADV_DONTNEED:
979 case I915_MADV_WILLNEED:
980 break;
981 default:
982 return -EINVAL;
983 }
984
Chris Wilson03ac0642016-07-20 13:31:51 +0100985 obj = i915_gem_object_lookup(file_priv, args->handle);
Chris Wilson1233e2d2016-10-28 13:58:37 +0100986 if (!obj)
987 return -ENOENT;
988
989 err = mutex_lock_interruptible(&obj->mm.lock);
990 if (err)
991 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +0100992
Chris Wilsonf1fa4f42017-10-13 21:26:13 +0100993 if (i915_gem_object_has_pages(obj) &&
Chris Wilson3e510a82016-08-05 10:14:23 +0100994 i915_gem_object_is_tiled(obj) &&
Chris Wilson3b4fa962019-05-30 21:34:59 +0100995 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
Chris Wilsonbc0629a2016-11-01 10:03:17 +0000996 if (obj->mm.madv == I915_MADV_WILLNEED) {
997 GEM_BUG_ON(!obj->mm.quirked);
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100998 __i915_gem_object_unpin_pages(obj);
Chris Wilsonbc0629a2016-11-01 10:03:17 +0000999 obj->mm.quirked = false;
1000 }
1001 if (args->madv == I915_MADV_WILLNEED) {
Chris Wilson2c3a3f42016-11-04 10:30:01 +00001002 GEM_BUG_ON(obj->mm.quirked);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01001003 __i915_gem_object_pin_pages(obj);
Chris Wilsonbc0629a2016-11-01 10:03:17 +00001004 obj->mm.quirked = true;
1005 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01001006 }
1007
Chris Wilsona4f5ea62016-10-28 13:58:35 +01001008 if (obj->mm.madv != __I915_MADV_PURGED)
1009 obj->mm.madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001010
Chris Wilson3b4fa962019-05-30 21:34:59 +01001011 if (i915_gem_object_has_pages(obj)) {
1012 struct list_head *list;
1013
Chris Wilsond82b4b22019-05-30 21:35:00 +01001014 if (i915_gem_object_is_shrinkable(obj)) {
Chris Wilsona8cff4c82019-06-10 15:54:30 +01001015 unsigned long flags;
1016
1017 spin_lock_irqsave(&i915->mm.obj_lock, flags);
1018
Chris Wilsond82b4b22019-05-30 21:35:00 +01001019 if (obj->mm.madv != I915_MADV_WILLNEED)
1020 list = &i915->mm.purge_list;
Chris Wilsond82b4b22019-05-30 21:35:00 +01001021 else
Chris Wilsonecab9be2019-06-12 11:57:20 +01001022 list = &i915->mm.shrink_list;
Chris Wilsond82b4b22019-05-30 21:35:00 +01001023 list_move_tail(&obj->mm.link, list);
Chris Wilsona8cff4c82019-06-10 15:54:30 +01001024
1025 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
Chris Wilsond82b4b22019-05-30 21:35:00 +01001026 }
Chris Wilson3b4fa962019-05-30 21:34:59 +01001027 }
1028
Chris Wilson6c085a72012-08-20 11:40:46 +02001029 /* if the object is no longer attached, discard its backing storage */
Chris Wilsonf1fa4f42017-10-13 21:26:13 +01001030 if (obj->mm.madv == I915_MADV_DONTNEED &&
1031 !i915_gem_object_has_pages(obj))
Chris Wilsonf0334282019-05-28 10:29:46 +01001032 i915_gem_object_truncate(obj);
Chris Wilson2d7ef392009-09-20 23:13:10 +01001033
Chris Wilsona4f5ea62016-10-28 13:58:35 +01001034 args->retained = obj->mm.madv != __I915_MADV_PURGED;
Chris Wilson1233e2d2016-10-28 13:58:37 +01001035 mutex_unlock(&obj->mm.lock);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001036
Chris Wilson1233e2d2016-10-28 13:58:37 +01001037out:
Chris Wilsonf8c417c2016-07-20 13:31:53 +01001038 i915_gem_object_put(obj);
Chris Wilson1233e2d2016-10-28 13:58:37 +01001039 return err;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001040}
1041
Chris Wilson24145512017-01-24 11:01:35 +00001042void i915_gem_sanitize(struct drm_i915_private *i915)
1043{
Chris Wilson538ef962019-01-14 14:21:18 +00001044 intel_wakeref_t wakeref;
1045
Chris Wilsonc3160da2018-05-31 09:22:45 +01001046 GEM_TRACE("\n");
1047
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001048 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001049 intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
Chris Wilsonc3160da2018-05-31 09:22:45 +01001050
1051 /*
1052 * As we have just resumed the machine and woken the device up from
1053 * deep PCI sleep (presumably D3_cold), assume the HW has been reset
1054 * back to defaults, recovering from whatever wedged state we left it
1055 * in and so worth trying to use the device once more.
1056 */
Chris Wilsoncb823ed2019-07-12 20:29:53 +01001057 if (intel_gt_is_wedged(&i915->gt))
1058 intel_gt_unset_wedged(&i915->gt);
Chris Wilsonf36325f2017-08-26 12:09:34 +01001059
Chris Wilson24145512017-01-24 11:01:35 +00001060 /*
1061 * If we inherit context state from the BIOS or earlier occupants
1062 * of the GPU, the GPU may be in an inconsistent state when we
1063 * try to take over. The only way to remove the earlier state
1064 * is by resetting. However, resetting on earlier gen is tricky as
1065 * it may impact the display and we are uncertain about the stability
Joonas Lahtinenea117b82017-04-28 10:53:38 +03001066 * of the reset, so this could be applied to even earlier gen.
Chris Wilson24145512017-01-24 11:01:35 +00001067 */
Chris Wilson0c916212019-06-25 14:01:10 +01001068 intel_gt_sanitize(&i915->gt, false);
Chris Wilsonc3160da2018-05-31 09:22:45 +01001069
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001070 intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001071 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
Chris Wilson24145512017-01-24 11:01:35 +00001072}
1073
Chris Wilsonae2e28b2019-10-22 15:19:35 +01001074static int __intel_engines_record_defaults(struct intel_gt *gt)
Chris Wilsond2b4b972017-11-10 14:26:33 +00001075{
Chris Wilson38775822019-08-08 12:06:11 +01001076 struct i915_request *requests[I915_NUM_ENGINES] = {};
Chris Wilsond2b4b972017-11-10 14:26:33 +00001077 struct intel_engine_cs *engine;
1078 enum intel_engine_id id;
Chris Wilson604c37d2019-03-08 09:36:55 +00001079 int err = 0;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001080
1081 /*
1082 * As we reset the gpu during very early sanitisation, the current
1083 * register state on the GPU should reflect its defaults values.
1084 * We load a context onto the hw (with restore-inhibit), then switch
1085 * over to a second context to save that default register state. We
1086 * can then prime every new context with that state so they all start
1087 * from the same default HW values.
1088 */
1089
Chris Wilsonae2e28b2019-10-22 15:19:35 +01001090 for_each_engine(engine, gt, id) {
Chris Wilson38775822019-08-08 12:06:11 +01001091 struct intel_context *ce;
Chris Wilsone61e0f52018-02-21 09:56:36 +00001092 struct i915_request *rq;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001093
Chris Wilson38775822019-08-08 12:06:11 +01001094 /* We must be able to switch to something! */
1095 GEM_BUG_ON(!engine->kernel_context);
1096 engine->serial++; /* force the kernel context switch */
1097
Chris Wilsonae2e28b2019-10-22 15:19:35 +01001098 ce = intel_context_create(engine->kernel_context->gem_context,
1099 engine);
Chris Wilson38775822019-08-08 12:06:11 +01001100 if (IS_ERR(ce)) {
1101 err = PTR_ERR(ce);
1102 goto out;
1103 }
1104
Chris Wilson5e2a0412019-04-26 17:33:34 +01001105 rq = intel_context_create_request(ce);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001106 if (IS_ERR(rq)) {
1107 err = PTR_ERR(rq);
Chris Wilson38775822019-08-08 12:06:11 +01001108 intel_context_put(ce);
1109 goto out;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001110 }
1111
Chris Wilsona5627722019-07-29 12:37:20 +01001112 err = intel_engine_emit_ctx_wa(rq);
1113 if (err)
1114 goto err_rq;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001115
Chris Wilsona5627722019-07-29 12:37:20 +01001116 err = intel_renderstate_emit(rq);
1117 if (err)
1118 goto err_rq;
1119
1120err_rq:
Chris Wilson38775822019-08-08 12:06:11 +01001121 requests[id] = i915_request_get(rq);
Chris Wilson697b9a82018-06-12 11:51:35 +01001122 i915_request_add(rq);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001123 if (err)
Chris Wilson38775822019-08-08 12:06:11 +01001124 goto out;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001125 }
1126
Chris Wilson604c37d2019-03-08 09:36:55 +00001127 /* Flush the default context image to memory, and enable powersaving. */
Chris Wilsonae2e28b2019-10-22 15:19:35 +01001128 if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
Chris Wilson604c37d2019-03-08 09:36:55 +00001129 err = -EIO;
Chris Wilson38775822019-08-08 12:06:11 +01001130 goto out;
Chris Wilson2621cef2018-07-09 13:20:43 +01001131 }
Chris Wilsond2b4b972017-11-10 14:26:33 +00001132
Chris Wilson38775822019-08-08 12:06:11 +01001133 for (id = 0; id < ARRAY_SIZE(requests); id++) {
1134 struct i915_request *rq;
1135 struct i915_vma *state;
Chris Wilson37d7c9c2018-09-14 13:35:03 +01001136 void *vaddr;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001137
Chris Wilson38775822019-08-08 12:06:11 +01001138 rq = requests[id];
1139 if (!rq)
Chris Wilsond2b4b972017-11-10 14:26:33 +00001140 continue;
1141
Chris Wilson38775822019-08-08 12:06:11 +01001142 /* We want to be able to unbind the state from the GGTT */
1143 GEM_BUG_ON(intel_context_is_pinned(rq->hw_context));
1144
1145 state = rq->hw_context->state;
1146 if (!state)
1147 continue;
Chris Wilsonc4d52fe2019-03-08 13:25:19 +00001148
Chris Wilsond2b4b972017-11-10 14:26:33 +00001149 /*
1150 * As we will hold a reference to the logical state, it will
1151 * not be torn down with the context, and importantly the
1152 * object will hold onto its vma (making it possible for a
1153 * stray GTT write to corrupt our defaults). Unmap the vma
1154 * from the GTT to prevent such accidents and reclaim the
1155 * space.
1156 */
1157 err = i915_vma_unbind(state);
1158 if (err)
Chris Wilson38775822019-08-08 12:06:11 +01001159 goto out;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001160
Chris Wilson6951e582019-05-28 10:29:51 +01001161 i915_gem_object_lock(state->obj);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001162 err = i915_gem_object_set_to_cpu_domain(state->obj, false);
Chris Wilson6951e582019-05-28 10:29:51 +01001163 i915_gem_object_unlock(state->obj);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001164 if (err)
Chris Wilson38775822019-08-08 12:06:11 +01001165 goto out;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001166
Chris Wilson38775822019-08-08 12:06:11 +01001167 i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC);
Chris Wilson37d7c9c2018-09-14 13:35:03 +01001168
1169 /* Check we can acquire the image of the context state */
Chris Wilson38775822019-08-08 12:06:11 +01001170 vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB);
Chris Wilson37d7c9c2018-09-14 13:35:03 +01001171 if (IS_ERR(vaddr)) {
1172 err = PTR_ERR(vaddr);
Chris Wilson38775822019-08-08 12:06:11 +01001173 goto out;
Chris Wilson37d7c9c2018-09-14 13:35:03 +01001174 }
1175
Chris Wilson38775822019-08-08 12:06:11 +01001176 rq->engine->default_state = i915_gem_object_get(state->obj);
1177 i915_gem_object_unpin_map(state->obj);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001178 }
1179
Chris Wilson38775822019-08-08 12:06:11 +01001180out:
Chris Wilsond2b4b972017-11-10 14:26:33 +00001181 /*
1182 * If we have to abandon now, we expect the engines to be idle
Chris Wilson604c37d2019-03-08 09:36:55 +00001183 * and ready to be torn-down. The quickest way we can accomplish
1184 * this is by declaring ourselves wedged.
Chris Wilsond2b4b972017-11-10 14:26:33 +00001185 */
Chris Wilson38775822019-08-08 12:06:11 +01001186 if (err)
Chris Wilsonae2e28b2019-10-22 15:19:35 +01001187 intel_gt_set_wedged(gt);
Chris Wilson38775822019-08-08 12:06:11 +01001188
1189 for (id = 0; id < ARRAY_SIZE(requests); id++) {
1190 struct intel_context *ce;
1191 struct i915_request *rq;
1192
1193 rq = requests[id];
1194 if (!rq)
1195 continue;
1196
1197 ce = rq->hw_context;
1198 i915_request_put(rq);
1199 intel_context_put(ce);
1200 }
1201 return err;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001202}
1203
Tvrtko Ursulin7f63aa22019-10-22 10:47:20 +01001204static int intel_engines_verify_workarounds(struct intel_gt *gt)
Chris Wilson254e1182019-04-17 08:56:28 +01001205{
1206 struct intel_engine_cs *engine;
1207 enum intel_engine_id id;
1208 int err = 0;
1209
1210 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1211 return 0;
1212
Tvrtko Ursulin7f63aa22019-10-22 10:47:20 +01001213 for_each_engine(engine, gt, id) {
Chris Wilson254e1182019-04-17 08:56:28 +01001214 if (intel_engine_verify_workarounds(engine, "load"))
1215 err = -EIO;
1216 }
1217
1218 return err;
1219}
1220
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001221int i915_gem_init(struct drm_i915_private *dev_priv)
Chris Wilson1070a422012-04-24 15:47:41 +01001222{
Chris Wilson1070a422012-04-24 15:47:41 +01001223 int ret;
1224
Changbin Du52b24162018-05-08 17:07:05 +08001225 /* We need to fallback to 4K pages if host doesn't support huge gtt. */
1226 if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
Matthew Auldda9fe3f32017-10-06 23:18:31 +01001227 mkwrite_device_info(dev_priv)->page_sizes =
1228 I915_GTT_PAGE_SIZE_4K;
1229
Tvrtko Ursulinf0c02c12019-06-21 08:08:10 +01001230 intel_timelines_init(dev_priv);
Chris Wilson1e345562019-01-28 10:23:56 +00001231
Chris Wilsonee487002017-11-22 17:26:21 +00001232 ret = i915_gem_init_userptr(dev_priv);
1233 if (ret)
1234 return ret;
1235
Daniele Ceraolo Spurioca7b2c12019-07-13 11:00:13 +01001236 intel_uc_fetch_firmwares(&dev_priv->gt.uc);
Michal Wajdeczko6bd0fbe2019-08-02 18:40:55 +00001237 intel_wopcm_init(&dev_priv->wopcm);
Michal Wajdeczkof7dc0152018-06-28 14:15:21 +00001238
Chris Wilson5e4f5182015-02-13 14:35:59 +00001239 /* This is just a security blanket to placate dragons.
1240 * On some systems, we very sporadically observe that the first TLBs
1241 * used by the CS may be stale, despite us poking the TLB reset. If
1242 * we hold the forcewake during initialisation these problems
1243 * just magically go away.
1244 */
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001245 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson5e4f5182015-02-13 14:35:59 +00001246
Tvrtko Ursulin1d66377a2019-06-21 08:08:05 +01001247 ret = i915_init_ggtt(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001248 if (ret) {
1249 GEM_BUG_ON(ret == -EIO);
1250 goto err_unlock;
1251 }
Jesse Barnesd62b4892013-03-08 10:45:53 -08001252
Andi Shyti42014f62019-09-05 14:14:03 +03001253 intel_gt_init(&dev_priv->gt);
Ben Widawsky2fa48d82013-12-06 14:11:04 -08001254
Tvrtko Ursulin78f60602019-10-22 10:47:18 +01001255 ret = intel_engines_setup(&dev_priv->gt);
Chris Wilson11334c62019-04-26 17:33:33 +01001256 if (ret) {
1257 GEM_BUG_ON(ret == -EIO);
1258 goto err_unlock;
1259 }
1260
Chris Wilsona4e7ccd2019-10-04 14:40:09 +01001261 ret = i915_gem_init_contexts(dev_priv);
Chris Wilson51797492018-12-04 14:15:16 +00001262 if (ret) {
1263 GEM_BUG_ON(ret == -EIO);
1264 goto err_scratch;
1265 }
1266
Tvrtko Ursulin7841fcb2019-10-22 10:47:19 +01001267 ret = intel_engines_init(&dev_priv->gt);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001268 if (ret) {
1269 GEM_BUG_ON(ret == -EIO);
1270 goto err_context;
1271 }
Daniel Vetter53ca26c2012-04-26 23:28:03 +02001272
Michal Wajdeczko0075a202019-08-17 13:11:44 +00001273 intel_uc_init(&dev_priv->gt.uc);
Chris Wilsoncc6a8182017-11-10 14:26:30 +00001274
Tvrtko Ursulin61fa60f2019-09-10 15:38:20 +01001275 ret = intel_gt_init_hw(&dev_priv->gt);
Michał Winiarski61b5c152017-12-13 23:13:48 +01001276 if (ret)
1277 goto err_uc_init;
1278
Chris Wilson092be382019-06-26 16:45:49 +01001279 /* Only when the HW is re-initialised, can we replay the requests */
1280 ret = intel_gt_resume(&dev_priv->gt);
1281 if (ret)
1282 goto err_init_hw;
1283
Chris Wilsoncc6a8182017-11-10 14:26:30 +00001284 /*
1285 * Despite its name intel_init_clock_gating applies both display
1286 * clock gating workarounds; GT mmio workarounds and the occasional
1287 * GT power context workaround. Worse, sometimes it includes a context
1288 * register workaround which we need to apply before we record the
1289 * default HW state for all contexts.
1290 *
1291 * FIXME: break up the workarounds and apply them at the right time!
1292 */
1293 intel_init_clock_gating(dev_priv);
1294
Tvrtko Ursulin7f63aa22019-10-22 10:47:20 +01001295 ret = intel_engines_verify_workarounds(&dev_priv->gt);
Chris Wilson254e1182019-04-17 08:56:28 +01001296 if (ret)
Chris Wilson092be382019-06-26 16:45:49 +01001297 goto err_gt;
Chris Wilson254e1182019-04-17 08:56:28 +01001298
Chris Wilsonae2e28b2019-10-22 15:19:35 +01001299 ret = __intel_engines_record_defaults(&dev_priv->gt);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001300 if (ret)
Chris Wilson092be382019-06-26 16:45:49 +01001301 goto err_gt;
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001302
Michal Wajdeczko50d84412019-08-02 18:40:50 +00001303 ret = i915_inject_load_error(dev_priv, -ENODEV);
1304 if (ret)
Chris Wilson092be382019-06-26 16:45:49 +01001305 goto err_gt;
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001306
Michal Wajdeczko50d84412019-08-02 18:40:50 +00001307 ret = i915_inject_load_error(dev_priv, -EIO);
1308 if (ret)
Chris Wilson092be382019-06-26 16:45:49 +01001309 goto err_gt;
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001310
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001311 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001312
1313 return 0;
1314
1315 /*
1316 * Unwinding is complicated by that we want to handle -EIO to mean
1317 * disable GPU submission but keep KMS alive. We want to mark the
1318 * HW as irrevisibly wedged, but keep enough state around that the
1319 * driver doesn't explode during runtime.
1320 */
Chris Wilson092be382019-06-26 16:45:49 +01001321err_gt:
Michał Winiarski5311f512019-09-26 14:31:40 +01001322 intel_gt_set_wedged_on_init(&dev_priv->gt);
Chris Wilson5861b012019-03-08 09:36:54 +00001323 i915_gem_suspend(dev_priv);
Chris Wilson8571a052018-06-06 15:54:41 +01001324 i915_gem_suspend_late(dev_priv);
1325
Chris Wilson8bcf9f72018-07-10 10:44:20 +01001326 i915_gem_drain_workqueue(dev_priv);
Chris Wilson092be382019-06-26 16:45:49 +01001327err_init_hw:
Daniele Ceraolo Spurioca7b2c12019-07-13 11:00:13 +01001328 intel_uc_fini_hw(&dev_priv->gt.uc);
Michał Winiarski61b5c152017-12-13 23:13:48 +01001329err_uc_init:
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001330 if (ret != -EIO) {
Michal Wajdeczko0075a202019-08-17 13:11:44 +00001331 intel_uc_fini(&dev_priv->gt.uc);
Tvrtko Ursulinb0258bf2019-10-22 10:47:17 +01001332 intel_engines_cleanup(&dev_priv->gt);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001333 }
1334err_context:
1335 if (ret != -EIO)
Chris Wilsona4e7ccd2019-10-04 14:40:09 +01001336 i915_gem_driver_release__contexts(dev_priv);
Chris Wilson51797492018-12-04 14:15:16 +00001337err_scratch:
Andi Shyti42014f62019-09-05 14:14:03 +03001338 intel_gt_driver_release(&dev_priv->gt);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001339err_unlock:
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001340 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001341
Chris Wilson1e345562019-01-28 10:23:56 +00001342 if (ret != -EIO) {
Michal Wajdeczkoa5f978c2019-08-11 19:51:32 +00001343 intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001344 i915_gem_cleanup_userptr(dev_priv);
Tvrtko Ursulinf0c02c12019-06-21 08:08:10 +01001345 intel_timelines_fini(dev_priv);
Chris Wilson1e345562019-01-28 10:23:56 +00001346 }
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001347
Chris Wilson60990322014-04-09 09:19:42 +01001348 if (ret == -EIO) {
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001349 /*
Michal Wajdeczkoa5f978c2019-08-11 19:51:32 +00001350 * Allow engines or uC initialisation to fail by marking the GPU
1351 * as wedged. But we only want to do this when the GPU is angry,
Chris Wilson60990322014-04-09 09:19:42 +01001352 * for all other failure, such as an allocation failure, bail.
1353 */
Chris Wilsoncb823ed2019-07-12 20:29:53 +01001354 if (!intel_gt_is_wedged(&dev_priv->gt)) {
Janusz Krzysztofikf2db53f2019-07-12 13:24:27 +02001355 i915_probe_error(dev_priv,
1356 "Failed to initialize GPU, declaring it wedged!\n");
Chris Wilsoncb823ed2019-07-12 20:29:53 +01001357 intel_gt_set_wedged(&dev_priv->gt);
Chris Wilson6f74b362017-10-15 15:37:25 +01001358 }
Chris Wilson7ed43df2018-07-26 09:50:32 +01001359
1360 /* Minimal basic recovery for KMS */
1361 ret = i915_ggtt_enable_hw(dev_priv);
1362 i915_gem_restore_gtt_mappings(dev_priv);
Chris Wilsone9d4c922019-10-16 15:32:33 +01001363 i915_gem_restore_fences(&dev_priv->ggtt);
Chris Wilson7ed43df2018-07-26 09:50:32 +01001364 intel_init_clock_gating(dev_priv);
Chris Wilson1070a422012-04-24 15:47:41 +01001365 }
1366
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001367 i915_gem_drain_freed_objects(dev_priv);
Chris Wilson60990322014-04-09 09:19:42 +01001368 return ret;
Chris Wilson1070a422012-04-24 15:47:41 +01001369}
1370
Chris Wilsonc29579d2019-08-06 13:42:59 +01001371void i915_gem_driver_register(struct drm_i915_private *i915)
1372{
1373 i915_gem_driver_register__shrinker(i915);
Chris Wilson750e76b2019-08-06 13:43:00 +01001374
1375 intel_engines_driver_register(i915);
Chris Wilsonc29579d2019-08-06 13:42:59 +01001376}
1377
1378void i915_gem_driver_unregister(struct drm_i915_private *i915)
1379{
1380 i915_gem_driver_unregister__shrinker(i915);
1381}
1382
Janusz Krzysztofik78dae1a2019-07-12 13:24:29 +02001383void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001384{
Chris Wilson0cf289b2019-06-13 08:32:54 +01001385 intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
Chris Wilsonb27e35a2019-05-27 12:51:14 +01001386
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001387 i915_gem_suspend_late(dev_priv);
Andi Shyti42014f62019-09-05 14:14:03 +03001388 intel_gt_driver_remove(&dev_priv->gt);
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001389
1390 /* Flush any outstanding unpin_work. */
1391 i915_gem_drain_workqueue(dev_priv);
1392
Daniele Ceraolo Spurioca7b2c12019-07-13 11:00:13 +01001393 intel_uc_fini_hw(&dev_priv->gt.uc);
1394 intel_uc_fini(&dev_priv->gt.uc);
Janusz Krzysztofik47bc28d2019-05-30 15:31:05 +02001395
1396 i915_gem_drain_freed_objects(dev_priv);
1397}
1398
Janusz Krzysztofik3b58a942019-07-12 13:24:28 +02001399void i915_gem_driver_release(struct drm_i915_private *dev_priv)
Janusz Krzysztofik47bc28d2019-05-30 15:31:05 +02001400{
Tvrtko Ursulinb0258bf2019-10-22 10:47:17 +01001401 intel_engines_cleanup(&dev_priv->gt);
Chris Wilsona4e7ccd2019-10-04 14:40:09 +01001402 i915_gem_driver_release__contexts(dev_priv);
Andi Shyti42014f62019-09-05 14:14:03 +03001403 intel_gt_driver_release(&dev_priv->gt);
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001404
Tvrtko Ursulin25d140f2018-12-03 13:33:19 +00001405 intel_wa_list_free(&dev_priv->gt_wa_list);
1406
Daniele Ceraolo Spurioca7b2c12019-07-13 11:00:13 +01001407 intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001408 i915_gem_cleanup_userptr(dev_priv);
Tvrtko Ursulinf0c02c12019-06-21 08:08:10 +01001409 intel_timelines_fini(dev_priv);
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001410
1411 i915_gem_drain_freed_objects(dev_priv);
1412
Chris Wilsona4e7ccd2019-10-04 14:40:09 +01001413 WARN_ON(!list_empty(&dev_priv->gem.contexts.list));
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001414}
1415
Chris Wilson24145512017-01-24 11:01:35 +00001416void i915_gem_init_mmio(struct drm_i915_private *i915)
1417{
1418 i915_gem_sanitize(i915);
1419}
1420
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001421static void i915_gem_init__mm(struct drm_i915_private *i915)
1422{
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001423 spin_lock_init(&i915->mm.obj_lock);
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001424
1425 init_llist_head(&i915->mm.free_list);
1426
Chris Wilson3b4fa962019-05-30 21:34:59 +01001427 INIT_LIST_HEAD(&i915->mm.purge_list);
Chris Wilsonecab9be2019-06-12 11:57:20 +01001428 INIT_LIST_HEAD(&i915->mm.shrink_list);
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001429
Chris Wilson84753552019-05-28 10:29:45 +01001430 i915_gem_init__objects(i915);
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001431}
1432
Matthew Aulda3f356b2019-09-27 18:33:49 +01001433void i915_gem_init_early(struct drm_i915_private *dev_priv)
Eric Anholt673a3942008-07-30 12:06:12 -07001434{
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001435 i915_gem_init__mm(dev_priv);
Chris Wilsonf2123812017-10-16 12:40:37 +01001436
Chris Wilsonb5add952016-08-04 16:32:36 +01001437 spin_lock_init(&dev_priv->fb_tracking.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001438}
Dave Airlie71acb5e2008-12-30 20:31:46 +10001439
Michal Wajdeczkoa0de9082018-03-23 12:34:49 +00001440void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
Imre Deakd64aa092016-01-19 15:26:29 +02001441{
Chris Wilsonc4d4c1c2017-02-10 16:35:23 +00001442 i915_gem_drain_freed_objects(dev_priv);
Chris Wilsonc9c704712018-02-19 22:06:31 +00001443 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1444 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
Chris Wilsond82b4b22019-05-30 21:35:00 +01001445 WARN_ON(dev_priv->mm.shrink_count);
Imre Deakd64aa092016-01-19 15:26:29 +02001446}
1447
Chris Wilson6a800ea2016-09-21 14:51:07 +01001448int i915_gem_freeze(struct drm_i915_private *dev_priv)
1449{
Chris Wilsond0aa3012017-04-07 11:25:49 +01001450 /* Discard all purgeable objects, let userspace recover those as
1451 * required after resuming.
1452 */
Chris Wilson6a800ea2016-09-21 14:51:07 +01001453 i915_gem_shrink_all(dev_priv);
Chris Wilson6a800ea2016-09-21 14:51:07 +01001454
Chris Wilson6a800ea2016-09-21 14:51:07 +01001455 return 0;
1456}
1457
Chris Wilson95c778d2018-06-01 15:41:25 +01001458int i915_gem_freeze_late(struct drm_i915_private *i915)
Chris Wilson461fb992016-05-14 07:26:33 +01001459{
1460 struct drm_i915_gem_object *obj;
Chris Wilsonecab9be2019-06-12 11:57:20 +01001461 intel_wakeref_t wakeref;
Chris Wilson461fb992016-05-14 07:26:33 +01001462
Chris Wilson95c778d2018-06-01 15:41:25 +01001463 /*
1464 * Called just before we write the hibernation image.
Chris Wilson461fb992016-05-14 07:26:33 +01001465 *
1466 * We need to update the domain tracking to reflect that the CPU
1467 * will be accessing all the pages to create and restore from the
1468 * hibernation, and so upon restoration those pages will be in the
1469 * CPU domain.
1470 *
1471 * To make sure the hibernation image contains the latest state,
1472 * we update that state just before writing out the image.
Chris Wilson7aab2d52016-09-09 20:02:18 +01001473 *
1474 * To try and reduce the hibernation image, we manually shrink
Chris Wilsond0aa3012017-04-07 11:25:49 +01001475 * the objects as well, see i915_gem_freeze()
Chris Wilson461fb992016-05-14 07:26:33 +01001476 */
1477
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001478 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
Chris Wilsonecab9be2019-06-12 11:57:20 +01001479
1480 i915_gem_shrink(i915, -1UL, NULL, ~0);
Chris Wilson95c778d2018-06-01 15:41:25 +01001481 i915_gem_drain_freed_objects(i915);
Chris Wilson461fb992016-05-14 07:26:33 +01001482
Chris Wilsonecab9be2019-06-12 11:57:20 +01001483 list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
1484 i915_gem_object_lock(obj);
1485 WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
1486 i915_gem_object_unlock(obj);
Chris Wilson461fb992016-05-14 07:26:33 +01001487 }
Chris Wilsonecab9be2019-06-12 11:57:20 +01001488
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001489 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
Chris Wilson461fb992016-05-14 07:26:33 +01001490
1491 return 0;
1492}
1493
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001494void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00001495{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001496 struct drm_i915_file_private *file_priv = file->driver_priv;
Chris Wilsone61e0f52018-02-21 09:56:36 +00001497 struct i915_request *request;
Eric Anholtb9624422009-06-03 07:27:35 +00001498
1499 /* Clean up our request list when the client is going away, so that
1500 * later retire_requests won't dereference our soon-to-be-gone
1501 * file_priv.
1502 */
Chris Wilson1c255952010-09-26 11:03:27 +01001503 spin_lock(&file_priv->mm.lock);
Chris Wilsonc8659ef2017-03-02 12:25:25 +00001504 list_for_each_entry(request, &file_priv->mm.request_list, client_link)
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001505 request->file_priv = NULL;
Chris Wilson1c255952010-09-26 11:03:27 +01001506 spin_unlock(&file_priv->mm.lock);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001507}
1508
Chris Wilson829a0af2017-06-20 12:05:45 +01001509int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001510{
1511 struct drm_i915_file_private *file_priv;
Ben Widawskye422b882013-12-06 14:10:58 -08001512 int ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001513
Chris Wilsonc4c29d72016-11-09 10:45:07 +00001514 DRM_DEBUG("\n");
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001515
1516 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1517 if (!file_priv)
1518 return -ENOMEM;
1519
1520 file->driver_priv = file_priv;
Chris Wilson829a0af2017-06-20 12:05:45 +01001521 file_priv->dev_priv = i915;
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02001522 file_priv->file = file;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001523
1524 spin_lock_init(&file_priv->mm.lock);
1525 INIT_LIST_HEAD(&file_priv->mm.request_list);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001526
Chris Wilsonc80ff162016-07-27 09:07:27 +01001527 file_priv->bsd_engine = -1;
Mika Kuoppala14921f32018-06-15 13:44:29 +03001528 file_priv->hang_timestamp = jiffies;
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001529
Chris Wilson829a0af2017-06-20 12:05:45 +01001530 ret = i915_gem_context_open(i915, file);
Ben Widawskye422b882013-12-06 14:10:58 -08001531 if (ret)
1532 kfree(file_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001533
Ben Widawskye422b882013-12-06 14:10:58 -08001534 return ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001535}
1536
Chris Wilson935a2f72017-02-13 17:15:13 +00001537#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
Chris Wilson66d9cb52017-02-13 17:15:17 +00001538#include "selftests/mock_gem_device.c"
Chris Wilson3f51b7e12018-08-30 14:48:06 +01001539#include "selftests/i915_gem.c"
Chris Wilson935a2f72017-02-13 17:15:13 +00001540#endif