blob: 50d7e1e8d8ad4c080d1263eaf7c138caefa9d9ad [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
Daniel Vetterbe6a0372015-03-18 10:46:04 +01002 * Copyright © 2008-2015 Intel Corporation
Eric Anholt673a3942008-07-30 12:06:12 -07003 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Herrmann0de23972013-07-24 21:07:52 +020028#include <drm/drm_vma_manager.h>
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/i915_drm.h>
Chris Wilson6b5e90f2016-11-14 20:41:05 +000030#include <linux/dma-fence-array.h>
Chris Wilsonfe3288b2017-02-12 17:20:01 +000031#include <linux/kthread.h>
Chris Wilsonc13d87e2016-07-20 09:21:15 +010032#include <linux/reservation.h>
Hugh Dickins5949eac2011-06-27 16:18:18 -070033#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Chris Wilson20e49332016-11-22 14:41:21 +000035#include <linux/stop_machine.h>
Eric Anholt673a3942008-07-30 12:06:12 -070036#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020038#include <linux/dma-buf.h>
Daniel Vetterfcd70cd2019-01-17 22:03:34 +010039#include <linux/mman.h>
Eric Anholt673a3942008-07-30 12:06:12 -070040
Jani Nikuladf0566a2019-06-13 11:44:16 +030041#include "display/intel_display.h"
42#include "display/intel_frontbuffer.h"
43
Chris Wilson10be98a2019-05-28 10:29:49 +010044#include "gem/i915_gem_clflush.h"
45#include "gem/i915_gem_context.h"
Chris Wilsonafa13082019-05-28 10:29:43 +010046#include "gem/i915_gem_ioctls.h"
Chris Wilson10be98a2019-05-28 10:29:49 +010047#include "gem/i915_gem_pm.h"
48#include "gem/i915_gemfs.h"
Chris Wilson79ffac852019-04-24 21:07:17 +010049#include "gt/intel_engine_pm.h"
Tvrtko Ursulinbaea4292019-06-21 08:08:02 +010050#include "gt/intel_gt.h"
Chris Wilson79ffac852019-04-24 21:07:17 +010051#include "gt/intel_gt_pm.h"
Chris Wilson112ed2d2019-04-24 18:48:39 +010052#include "gt/intel_mocs.h"
53#include "gt/intel_reset.h"
54#include "gt/intel_workarounds.h"
55
Chris Wilson9f588922019-01-16 15:33:04 +000056#include "i915_drv.h"
Chris Wilson37d63f82019-05-28 10:29:50 +010057#include "i915_scatterlist.h"
Chris Wilson9f588922019-01-16 15:33:04 +000058#include "i915_trace.h"
59#include "i915_vgpu.h"
60
61#include "intel_drv.h"
Jani Nikula696173b2019-04-05 14:00:15 +030062#include "intel_pm.h"
Chris Wilson9f588922019-01-16 15:33:04 +000063
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053064static int
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +010065insert_mappable_node(struct i915_ggtt *ggtt,
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053066 struct drm_mm_node *node, u32 size)
67{
68 memset(node, 0, sizeof(*node));
Chris Wilson82ad6442018-06-05 16:37:58 +010069 return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
Chris Wilson4e64e552017-02-02 21:04:38 +000070 size, 0, I915_COLOR_UNEVICTABLE,
71 0, ggtt->mappable_end,
72 DRM_MM_INSERT_LOW);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053073}
74
75static void
76remove_mappable_node(struct drm_mm_node *node)
77{
78 drm_mm_remove_node(node);
79}
80
Eric Anholt673a3942008-07-30 12:06:12 -070081int
Eric Anholt5a125c32008-10-22 21:40:13 -070082i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +000083 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -070084{
Chris Wilson09d7e462019-01-28 10:23:53 +000085 struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +030086 struct drm_i915_gem_get_aperture *args = data;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010087 struct i915_vma *vma;
Weinan Liff8f7972017-05-31 10:35:52 +080088 u64 pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -070089
Chris Wilson09d7e462019-01-28 10:23:53 +000090 mutex_lock(&ggtt->vm.mutex);
91
Chris Wilson82ad6442018-06-05 16:37:58 +010092 pinned = ggtt->vm.reserved;
Chris Wilson499197d2019-01-28 10:23:52 +000093 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
Chris Wilson20dfbde2016-08-04 16:32:30 +010094 if (i915_vma_is_pinned(vma))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010095 pinned += vma->node.size;
Chris Wilson09d7e462019-01-28 10:23:53 +000096
97 mutex_unlock(&ggtt->vm.mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -070098
Chris Wilson82ad6442018-06-05 16:37:58 +010099 args->aper_size = ggtt->vm.total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400100 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000101
Eric Anholt5a125c32008-10-22 21:40:13 -0700102 return 0;
103}
104
Chris Wilson35a96112016-08-14 18:44:40 +0100105int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
Chris Wilsonaa653a62016-08-04 07:52:27 +0100106{
107 struct i915_vma *vma;
108 LIST_HEAD(still_in_list);
Chris Wilson6951e582019-05-28 10:29:51 +0100109 int ret = 0;
Chris Wilsonaa653a62016-08-04 07:52:27 +0100110
Chris Wilson02bef8f2016-08-14 18:44:41 +0100111 lockdep_assert_held(&obj->base.dev->struct_mutex);
112
Chris Wilson528cbd12019-01-28 10:23:54 +0000113 spin_lock(&obj->vma.lock);
114 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
115 struct i915_vma,
116 obj_link))) {
Chris Wilsonaa653a62016-08-04 07:52:27 +0100117 list_move_tail(&vma->obj_link, &still_in_list);
Chris Wilson528cbd12019-01-28 10:23:54 +0000118 spin_unlock(&obj->vma.lock);
119
Chris Wilsonaa653a62016-08-04 07:52:27 +0100120 ret = i915_vma_unbind(vma);
Chris Wilson528cbd12019-01-28 10:23:54 +0000121
122 spin_lock(&obj->vma.lock);
Chris Wilsonaa653a62016-08-04 07:52:27 +0100123 }
Chris Wilson528cbd12019-01-28 10:23:54 +0000124 list_splice(&still_in_list, &obj->vma.list);
125 spin_unlock(&obj->vma.lock);
Chris Wilsonaa653a62016-08-04 07:52:27 +0100126
127 return ret;
128}
129
Chris Wilson00731152014-05-21 12:42:56 +0100130static int
131i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
132 struct drm_i915_gem_pwrite *args,
Chris Wilson03ac84f2016-10-28 13:58:36 +0100133 struct drm_file *file)
Chris Wilson00731152014-05-21 12:42:56 +0100134{
Chris Wilson00731152014-05-21 12:42:56 +0100135 void *vaddr = obj->phys_handle->vaddr + args->offset;
Gustavo Padovan3ed605b2016-04-26 12:32:27 -0300136 char __user *user_data = u64_to_user_ptr(args->data_ptr);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800137
138 /* We manually control the domain here and pretend that it
139 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
140 */
Rodrigo Vivi77a0d1c2015-06-18 11:43:24 -0700141 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Chris Wilson10466d22017-01-06 15:22:38 +0000142 if (copy_from_user(vaddr, user_data, args->size))
143 return -EFAULT;
Chris Wilson00731152014-05-21 12:42:56 +0100144
Chris Wilson6a2c4232014-11-04 04:51:40 -0800145 drm_clflush_virt_range(vaddr, args->size);
Tvrtko Ursulinbaea4292019-06-21 08:08:02 +0100146 intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200147
Chris Wilsond59b21e2017-02-22 11:40:49 +0000148 intel_fb_obj_flush(obj, ORIGIN_CPU);
Chris Wilson10466d22017-01-06 15:22:38 +0000149 return 0;
Chris Wilson00731152014-05-21 12:42:56 +0100150}
151
Dave Airlieff72145b2011-02-07 12:16:14 +1000152static int
153i915_gem_create(struct drm_file *file,
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000154 struct drm_i915_private *dev_priv,
Michał Winiarskie1634842019-03-26 18:02:18 +0100155 u64 *size_p,
Jani Nikula739f3ab2019-01-16 11:15:19 +0200156 u32 *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700157{
Chris Wilson05394f32010-11-08 19:18:58 +0000158 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300159 u32 handle;
Michał Winiarskie1634842019-03-26 18:02:18 +0100160 u64 size;
161 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700162
Michał Winiarskie1634842019-03-26 18:02:18 +0100163 size = round_up(*size_p, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200164 if (size == 0)
165 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700166
167 /* Allocate the new object */
Chris Wilson84753552019-05-28 10:29:45 +0100168 obj = i915_gem_object_create_shmem(dev_priv, size);
Chris Wilsonfe3db792016-04-25 13:32:13 +0100169 if (IS_ERR(obj))
170 return PTR_ERR(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700171
Chris Wilson05394f32010-11-08 19:18:58 +0000172 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100173 /* drop reference from allocate - handle holds it now */
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100174 i915_gem_object_put(obj);
Daniel Vetterd861e332013-07-24 23:25:03 +0200175 if (ret)
176 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100177
Dave Airlieff72145b2011-02-07 12:16:14 +1000178 *handle_p = handle;
Chris Wilson99534022019-04-17 14:25:07 +0100179 *size_p = size;
Eric Anholt673a3942008-07-30 12:06:12 -0700180 return 0;
181}
182
Dave Airlieff72145b2011-02-07 12:16:14 +1000183int
184i915_gem_dumb_create(struct drm_file *file,
185 struct drm_device *dev,
186 struct drm_mode_create_dumb *args)
187{
Ville Syrjäläaa5ca8b2019-05-09 15:21:57 +0300188 int cpp = DIV_ROUND_UP(args->bpp, 8);
189 u32 format;
190
191 switch (cpp) {
192 case 1:
193 format = DRM_FORMAT_C8;
194 break;
195 case 2:
196 format = DRM_FORMAT_RGB565;
197 break;
198 case 4:
199 format = DRM_FORMAT_XRGB8888;
200 break;
201 default:
202 return -EINVAL;
203 }
204
Dave Airlieff72145b2011-02-07 12:16:14 +1000205 /* have to work out size/pitch and return them */
Ville Syrjäläaa5ca8b2019-05-09 15:21:57 +0300206 args->pitch = ALIGN(args->width * cpp, 64);
207
208 /* align stride to page size so that we can remap */
209 if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
210 DRM_FORMAT_MOD_LINEAR))
211 args->pitch = ALIGN(args->pitch, 4096);
212
Dave Airlieff72145b2011-02-07 12:16:14 +1000213 args->size = args->pitch * args->height;
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000214 return i915_gem_create(file, to_i915(dev),
Michał Winiarskie1634842019-03-26 18:02:18 +0100215 &args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000216}
217
Dave Airlieff72145b2011-02-07 12:16:14 +1000218/**
219 * Creates a new mm object and returns a handle to it.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100220 * @dev: drm device pointer
221 * @data: ioctl data blob
222 * @file: drm file pointer
Dave Airlieff72145b2011-02-07 12:16:14 +1000223 */
224int
225i915_gem_create_ioctl(struct drm_device *dev, void *data,
226 struct drm_file *file)
227{
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000228 struct drm_i915_private *dev_priv = to_i915(dev);
Dave Airlieff72145b2011-02-07 12:16:14 +1000229 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200230
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000231 i915_gem_flush_free_objects(dev_priv);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +0100232
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000233 return i915_gem_create(file, dev_priv,
Michał Winiarskie1634842019-03-26 18:02:18 +0100234 &args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000235}
236
Daniel Vetterd174bd62012-03-25 19:47:40 +0200237static int
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000238shmem_pread(struct page *page, int offset, int len, char __user *user_data,
239 bool needs_clflush)
Daniel Vetterd174bd62012-03-25 19:47:40 +0200240{
241 char *vaddr;
242 int ret;
243
244 vaddr = kmap(page);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200245
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000246 if (needs_clflush)
247 drm_clflush_virt_range(vaddr + offset, len);
248
249 ret = __copy_to_user(user_data, vaddr + offset, len);
250
Daniel Vetterd174bd62012-03-25 19:47:40 +0200251 kunmap(page);
252
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000253 return ret ? -EFAULT : 0;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100254}
255
256static int
257i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
258 struct drm_i915_gem_pread *args)
259{
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100260 unsigned int needs_clflush;
261 unsigned int idx, offset;
Chris Wilson6951e582019-05-28 10:29:51 +0100262 struct dma_fence *fence;
263 char __user *user_data;
264 u64 remain;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100265 int ret;
266
Chris Wilson6951e582019-05-28 10:29:51 +0100267 ret = i915_gem_object_prepare_read(obj, &needs_clflush);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100268 if (ret)
269 return ret;
270
Chris Wilson6951e582019-05-28 10:29:51 +0100271 fence = i915_gem_object_lock_fence(obj);
272 i915_gem_object_finish_access(obj);
273 if (!fence)
274 return -ENOMEM;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100275
276 remain = args->size;
277 user_data = u64_to_user_ptr(args->data_ptr);
278 offset = offset_in_page(args->offset);
279 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
280 struct page *page = i915_gem_object_get_page(obj, idx);
Chris Wilsona5e856a52018-10-12 15:02:28 +0100281 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100282
283 ret = shmem_pread(page, offset, length, user_data,
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100284 needs_clflush);
285 if (ret)
286 break;
287
288 remain -= length;
289 user_data += length;
290 offset = 0;
291 }
292
Chris Wilson6951e582019-05-28 10:29:51 +0100293 i915_gem_object_unlock_fence(obj, fence);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100294 return ret;
295}
296
297static inline bool
298gtt_user_read(struct io_mapping *mapping,
299 loff_t base, int offset,
300 char __user *user_data, int length)
301{
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300302 void __iomem *vaddr;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100303 unsigned long unwritten;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530304
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530305 /* We can use the cpu mem copy function because this is X86. */
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300306 vaddr = io_mapping_map_atomic_wc(mapping, base);
307 unwritten = __copy_to_user_inatomic(user_data,
308 (void __force *)vaddr + offset,
309 length);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100310 io_mapping_unmap_atomic(vaddr);
311 if (unwritten) {
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300312 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
313 unwritten = copy_to_user(user_data,
314 (void __force *)vaddr + offset,
315 length);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100316 io_mapping_unmap(vaddr);
317 }
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530318 return unwritten;
319}
320
321static int
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100322i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
323 const struct drm_i915_gem_pread *args)
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530324{
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100325 struct drm_i915_private *i915 = to_i915(obj->base.dev);
326 struct i915_ggtt *ggtt = &i915->ggtt;
Chris Wilson538ef962019-01-14 14:21:18 +0000327 intel_wakeref_t wakeref;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530328 struct drm_mm_node node;
Chris Wilson6951e582019-05-28 10:29:51 +0100329 struct dma_fence *fence;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100330 void __user *user_data;
Chris Wilson6951e582019-05-28 10:29:51 +0100331 struct i915_vma *vma;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100332 u64 remain, offset;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530333 int ret;
334
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100335 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
336 if (ret)
337 return ret;
338
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700339 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100340 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
Chris Wilsona3259ca2017-10-09 09:44:00 +0100341 PIN_MAPPABLE |
342 PIN_NONFAULT |
343 PIN_NONBLOCK);
Chris Wilson18034582016-08-18 17:16:45 +0100344 if (!IS_ERR(vma)) {
345 node.start = i915_ggtt_offset(vma);
346 node.allocated = false;
Chris Wilson49ef5292016-08-18 17:17:00 +0100347 ret = i915_vma_put_fence(vma);
Chris Wilson18034582016-08-18 17:16:45 +0100348 if (ret) {
349 i915_vma_unpin(vma);
350 vma = ERR_PTR(ret);
351 }
352 }
Chris Wilson058d88c2016-08-15 10:49:06 +0100353 if (IS_ERR(vma)) {
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100354 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530355 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100356 goto out_unlock;
357 GEM_BUG_ON(!node.allocated);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530358 }
359
Chris Wilson6951e582019-05-28 10:29:51 +0100360 mutex_unlock(&i915->drm.struct_mutex);
361
362 ret = i915_gem_object_lock_interruptible(obj);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530363 if (ret)
364 goto out_unpin;
365
Chris Wilson6951e582019-05-28 10:29:51 +0100366 ret = i915_gem_object_set_to_gtt_domain(obj, false);
367 if (ret) {
368 i915_gem_object_unlock(obj);
369 goto out_unpin;
370 }
371
372 fence = i915_gem_object_lock_fence(obj);
373 i915_gem_object_unlock(obj);
374 if (!fence) {
375 ret = -ENOMEM;
376 goto out_unpin;
377 }
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530378
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100379 user_data = u64_to_user_ptr(args->data_ptr);
380 remain = args->size;
381 offset = args->offset;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530382
383 while (remain > 0) {
384 /* Operation in this page
385 *
386 * page_base = page offset within aperture
387 * page_offset = offset within page
388 * page_length = bytes to copy for this page
389 */
390 u32 page_base = node.start;
391 unsigned page_offset = offset_in_page(offset);
392 unsigned page_length = PAGE_SIZE - page_offset;
393 page_length = remain < page_length ? remain : page_length;
394 if (node.allocated) {
395 wmb();
Chris Wilson82ad6442018-06-05 16:37:58 +0100396 ggtt->vm.insert_page(&ggtt->vm,
397 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
398 node.start, I915_CACHE_NONE, 0);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530399 wmb();
400 } else {
401 page_base += offset & PAGE_MASK;
402 }
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100403
Matthew Auld73ebd502017-12-11 15:18:20 +0000404 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100405 user_data, page_length)) {
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530406 ret = -EFAULT;
407 break;
408 }
409
410 remain -= page_length;
411 user_data += page_length;
412 offset += page_length;
413 }
414
Chris Wilson6951e582019-05-28 10:29:51 +0100415 i915_gem_object_unlock_fence(obj, fence);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530416out_unpin:
Chris Wilson6951e582019-05-28 10:29:51 +0100417 mutex_lock(&i915->drm.struct_mutex);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530418 if (node.allocated) {
419 wmb();
Chris Wilson82ad6442018-06-05 16:37:58 +0100420 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530421 remove_mappable_node(&node);
422 } else {
Chris Wilson058d88c2016-08-15 10:49:06 +0100423 i915_vma_unpin(vma);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530424 }
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100425out_unlock:
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700426 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100427 mutex_unlock(&i915->drm.struct_mutex);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100428
Eric Anholteb014592009-03-10 11:44:52 -0700429 return ret;
430}
431
Eric Anholt673a3942008-07-30 12:06:12 -0700432/**
433 * Reads data from the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100434 * @dev: drm device pointer
435 * @data: ioctl data blob
436 * @file: drm file pointer
Eric Anholt673a3942008-07-30 12:06:12 -0700437 *
438 * On error, the contents of *data are undefined.
439 */
440int
441i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000442 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700443{
444 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000445 struct drm_i915_gem_object *obj;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100446 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700447
Chris Wilson51311d02010-11-17 09:10:42 +0000448 if (args->size == 0)
449 return 0;
450
Linus Torvalds96d4f262019-01-03 18:57:57 -0800451 if (!access_ok(u64_to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000452 args->size))
453 return -EFAULT;
454
Chris Wilson03ac0642016-07-20 13:31:51 +0100455 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100456 if (!obj)
457 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -0700458
Chris Wilson7dcd2492010-09-26 20:21:44 +0100459 /* Bounds check source. */
Matthew Auld966d5bf2016-12-13 20:32:22 +0000460 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100461 ret = -EINVAL;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100462 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100463 }
464
Chris Wilsondb53a302011-02-03 11:57:46 +0000465 trace_i915_gem_object_pread(obj, args->offset, args->size);
466
Chris Wilsone95433c2016-10-28 13:58:27 +0100467 ret = i915_gem_object_wait(obj,
468 I915_WAIT_INTERRUPTIBLE,
Chris Wilson62eb3c22019-02-13 09:25:04 +0000469 MAX_SCHEDULE_TIMEOUT);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100470 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100471 goto out;
Chris Wilson258a5ed2016-08-05 10:14:16 +0100472
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100473 ret = i915_gem_object_pin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100474 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100475 goto out;
Chris Wilson258a5ed2016-08-05 10:14:16 +0100476
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100477 ret = i915_gem_shmem_pread(obj, args);
Chris Wilson9c870d02016-10-24 13:42:15 +0100478 if (ret == -EFAULT || ret == -ENODEV)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100479 ret = i915_gem_gtt_pread(obj, args);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530480
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100481 i915_gem_object_unpin_pages(obj);
482out:
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100483 i915_gem_object_put(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700484 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700485}
486
Keith Packard0839ccb2008-10-30 19:38:48 -0700487/* This is the fast write path which cannot handle
488 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700489 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700490
Chris Wilsonfe115622016-10-28 13:58:40 +0100491static inline bool
492ggtt_write(struct io_mapping *mapping,
493 loff_t base, int offset,
494 char __user *user_data, int length)
Keith Packard0839ccb2008-10-30 19:38:48 -0700495{
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300496 void __iomem *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700497 unsigned long unwritten;
498
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700499 /* We can use the cpu mem copy function because this is X86. */
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300500 vaddr = io_mapping_map_atomic_wc(mapping, base);
501 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
Keith Packard0839ccb2008-10-30 19:38:48 -0700502 user_data, length);
Chris Wilsonfe115622016-10-28 13:58:40 +0100503 io_mapping_unmap_atomic(vaddr);
504 if (unwritten) {
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300505 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
506 unwritten = copy_from_user((void __force *)vaddr + offset,
507 user_data, length);
Chris Wilsonfe115622016-10-28 13:58:40 +0100508 io_mapping_unmap(vaddr);
509 }
Keith Packard0839ccb2008-10-30 19:38:48 -0700510
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100511 return unwritten;
512}
513
Eric Anholt3de09aa2009-03-09 09:42:23 -0700514/**
515 * This is the fast pwrite path, where we copy the data directly from the
516 * user into the GTT, uncached.
Chris Wilsonfe115622016-10-28 13:58:40 +0100517 * @obj: i915 GEM object
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100518 * @args: pwrite arguments structure
Eric Anholt3de09aa2009-03-09 09:42:23 -0700519 */
Eric Anholt673a3942008-07-30 12:06:12 -0700520static int
Chris Wilsonfe115622016-10-28 13:58:40 +0100521i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
522 const struct drm_i915_gem_pwrite *args)
Eric Anholt673a3942008-07-30 12:06:12 -0700523{
Chris Wilsonfe115622016-10-28 13:58:40 +0100524 struct drm_i915_private *i915 = to_i915(obj->base.dev);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530525 struct i915_ggtt *ggtt = &i915->ggtt;
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700526 struct intel_runtime_pm *rpm = &i915->runtime_pm;
Chris Wilson538ef962019-01-14 14:21:18 +0000527 intel_wakeref_t wakeref;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530528 struct drm_mm_node node;
Chris Wilson6951e582019-05-28 10:29:51 +0100529 struct dma_fence *fence;
Chris Wilsonfe115622016-10-28 13:58:40 +0100530 struct i915_vma *vma;
531 u64 remain, offset;
532 void __user *user_data;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530533 int ret;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530534
Chris Wilsonfe115622016-10-28 13:58:40 +0100535 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
536 if (ret)
537 return ret;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200538
Chris Wilson8bd818152017-10-19 07:37:33 +0100539 if (i915_gem_object_has_struct_page(obj)) {
540 /*
541 * Avoid waking the device up if we can fallback, as
542 * waking/resuming is very slow (worst-case 10-100 ms
543 * depending on PCI sleeps and our own resume time).
544 * This easily dwarfs any performance advantage from
545 * using the cache bypass of indirect GGTT access.
546 */
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700547 wakeref = intel_runtime_pm_get_if_in_use(rpm);
Chris Wilson538ef962019-01-14 14:21:18 +0000548 if (!wakeref) {
Chris Wilson8bd818152017-10-19 07:37:33 +0100549 ret = -EFAULT;
550 goto out_unlock;
551 }
552 } else {
553 /* No backing pages, no fallback, we must force GGTT access */
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700554 wakeref = intel_runtime_pm_get(rpm);
Chris Wilson8bd818152017-10-19 07:37:33 +0100555 }
556
Chris Wilson058d88c2016-08-15 10:49:06 +0100557 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
Chris Wilsona3259ca2017-10-09 09:44:00 +0100558 PIN_MAPPABLE |
559 PIN_NONFAULT |
560 PIN_NONBLOCK);
Chris Wilson18034582016-08-18 17:16:45 +0100561 if (!IS_ERR(vma)) {
562 node.start = i915_ggtt_offset(vma);
563 node.allocated = false;
Chris Wilson49ef5292016-08-18 17:17:00 +0100564 ret = i915_vma_put_fence(vma);
Chris Wilson18034582016-08-18 17:16:45 +0100565 if (ret) {
566 i915_vma_unpin(vma);
567 vma = ERR_PTR(ret);
568 }
569 }
Chris Wilson058d88c2016-08-15 10:49:06 +0100570 if (IS_ERR(vma)) {
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100571 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530572 if (ret)
Chris Wilson8bd818152017-10-19 07:37:33 +0100573 goto out_rpm;
Chris Wilsonfe115622016-10-28 13:58:40 +0100574 GEM_BUG_ON(!node.allocated);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530575 }
Daniel Vetter935aaa62012-03-25 19:47:35 +0200576
Chris Wilson6951e582019-05-28 10:29:51 +0100577 mutex_unlock(&i915->drm.struct_mutex);
578
579 ret = i915_gem_object_lock_interruptible(obj);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200580 if (ret)
581 goto out_unpin;
582
Chris Wilson6951e582019-05-28 10:29:51 +0100583 ret = i915_gem_object_set_to_gtt_domain(obj, true);
584 if (ret) {
585 i915_gem_object_unlock(obj);
586 goto out_unpin;
587 }
588
589 fence = i915_gem_object_lock_fence(obj);
590 i915_gem_object_unlock(obj);
591 if (!fence) {
592 ret = -ENOMEM;
593 goto out_unpin;
594 }
Chris Wilsonfe115622016-10-28 13:58:40 +0100595
Chris Wilsonb19482d2016-08-18 17:16:43 +0100596 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200597
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530598 user_data = u64_to_user_ptr(args->data_ptr);
599 offset = args->offset;
600 remain = args->size;
601 while (remain) {
Eric Anholt673a3942008-07-30 12:06:12 -0700602 /* Operation in this page
603 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700604 * page_base = page offset within aperture
605 * page_offset = offset within page
606 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700607 */
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530608 u32 page_base = node.start;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100609 unsigned int page_offset = offset_in_page(offset);
610 unsigned int page_length = PAGE_SIZE - page_offset;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530611 page_length = remain < page_length ? remain : page_length;
612 if (node.allocated) {
613 wmb(); /* flush the write before we modify the GGTT */
Chris Wilson82ad6442018-06-05 16:37:58 +0100614 ggtt->vm.insert_page(&ggtt->vm,
615 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
616 node.start, I915_CACHE_NONE, 0);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530617 wmb(); /* flush modifications to the GGTT (insert_page) */
618 } else {
619 page_base += offset & PAGE_MASK;
620 }
Keith Packard0839ccb2008-10-30 19:38:48 -0700621 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700622 * source page isn't available. Return the error and we'll
623 * retry in the slow path.
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530624 * If the object is non-shmem backed, we retry again with the
625 * path that handles page fault.
Keith Packard0839ccb2008-10-30 19:38:48 -0700626 */
Matthew Auld73ebd502017-12-11 15:18:20 +0000627 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
Chris Wilsonfe115622016-10-28 13:58:40 +0100628 user_data, page_length)) {
629 ret = -EFAULT;
630 break;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200631 }
Eric Anholt673a3942008-07-30 12:06:12 -0700632
Keith Packard0839ccb2008-10-30 19:38:48 -0700633 remain -= page_length;
634 user_data += page_length;
635 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700636 }
Chris Wilsond59b21e2017-02-22 11:40:49 +0000637 intel_fb_obj_flush(obj, ORIGIN_CPU);
Chris Wilsonfe115622016-10-28 13:58:40 +0100638
Chris Wilson6951e582019-05-28 10:29:51 +0100639 i915_gem_object_unlock_fence(obj, fence);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200640out_unpin:
Chris Wilson6951e582019-05-28 10:29:51 +0100641 mutex_lock(&i915->drm.struct_mutex);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530642 if (node.allocated) {
643 wmb();
Chris Wilson82ad6442018-06-05 16:37:58 +0100644 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530645 remove_mappable_node(&node);
646 } else {
Chris Wilson058d88c2016-08-15 10:49:06 +0100647 i915_vma_unpin(vma);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530648 }
Chris Wilson8bd818152017-10-19 07:37:33 +0100649out_rpm:
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700650 intel_runtime_pm_put(rpm, wakeref);
Chris Wilson8bd818152017-10-19 07:37:33 +0100651out_unlock:
Chris Wilsonfe115622016-10-28 13:58:40 +0100652 mutex_unlock(&i915->drm.struct_mutex);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700653 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700654}
655
Chris Wilsonfe115622016-10-28 13:58:40 +0100656/* Per-page copy function for the shmem pwrite fastpath.
657 * Flushes invalid cachelines before writing to the target if
658 * needs_clflush_before is set and flushes out any written cachelines after
659 * writing if needs_clflush is set.
660 */
Eric Anholt40123c12009-03-09 13:42:30 -0700661static int
Chris Wilsonfe115622016-10-28 13:58:40 +0100662shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
Chris Wilsonfe115622016-10-28 13:58:40 +0100663 bool needs_clflush_before,
664 bool needs_clflush_after)
Eric Anholt40123c12009-03-09 13:42:30 -0700665{
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000666 char *vaddr;
Chris Wilsonfe115622016-10-28 13:58:40 +0100667 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700668
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000669 vaddr = kmap(page);
Chris Wilsonfe115622016-10-28 13:58:40 +0100670
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000671 if (needs_clflush_before)
672 drm_clflush_virt_range(vaddr + offset, len);
Chris Wilsonfe115622016-10-28 13:58:40 +0100673
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000674 ret = __copy_from_user(vaddr + offset, user_data, len);
675 if (!ret && needs_clflush_after)
676 drm_clflush_virt_range(vaddr + offset, len);
Chris Wilsonfe115622016-10-28 13:58:40 +0100677
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000678 kunmap(page);
679
680 return ret ? -EFAULT : 0;
Chris Wilsonfe115622016-10-28 13:58:40 +0100681}
682
683static int
684i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
685 const struct drm_i915_gem_pwrite *args)
686{
Chris Wilsonfe115622016-10-28 13:58:40 +0100687 unsigned int partial_cacheline_write;
688 unsigned int needs_clflush;
689 unsigned int offset, idx;
Chris Wilson6951e582019-05-28 10:29:51 +0100690 struct dma_fence *fence;
691 void __user *user_data;
692 u64 remain;
Chris Wilsonfe115622016-10-28 13:58:40 +0100693 int ret;
694
Chris Wilson6951e582019-05-28 10:29:51 +0100695 ret = i915_gem_object_prepare_write(obj, &needs_clflush);
Chris Wilson43394c72016-08-18 17:16:47 +0100696 if (ret)
697 return ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700698
Chris Wilson6951e582019-05-28 10:29:51 +0100699 fence = i915_gem_object_lock_fence(obj);
700 i915_gem_object_finish_access(obj);
701 if (!fence)
702 return -ENOMEM;
Chris Wilsonfe115622016-10-28 13:58:40 +0100703
Chris Wilsonfe115622016-10-28 13:58:40 +0100704 /* If we don't overwrite a cacheline completely we need to be
705 * careful to have up-to-date data by first clflushing. Don't
706 * overcomplicate things and flush the entire patch.
707 */
708 partial_cacheline_write = 0;
709 if (needs_clflush & CLFLUSH_BEFORE)
710 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
711
Chris Wilson43394c72016-08-18 17:16:47 +0100712 user_data = u64_to_user_ptr(args->data_ptr);
Chris Wilson43394c72016-08-18 17:16:47 +0100713 remain = args->size;
Chris Wilsonfe115622016-10-28 13:58:40 +0100714 offset = offset_in_page(args->offset);
715 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
716 struct page *page = i915_gem_object_get_page(obj, idx);
Chris Wilsona5e856a52018-10-12 15:02:28 +0100717 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100718
Chris Wilsonfe115622016-10-28 13:58:40 +0100719 ret = shmem_pwrite(page, offset, length, user_data,
Chris Wilsonfe115622016-10-28 13:58:40 +0100720 (offset | length) & partial_cacheline_write,
721 needs_clflush & CLFLUSH_AFTER);
722 if (ret)
Chris Wilson9da3da62012-06-01 15:20:22 +0100723 break;
724
Chris Wilsonfe115622016-10-28 13:58:40 +0100725 remain -= length;
726 user_data += length;
727 offset = 0;
Eric Anholt40123c12009-03-09 13:42:30 -0700728 }
729
Chris Wilsond59b21e2017-02-22 11:40:49 +0000730 intel_fb_obj_flush(obj, ORIGIN_CPU);
Chris Wilson6951e582019-05-28 10:29:51 +0100731 i915_gem_object_unlock_fence(obj, fence);
732
Eric Anholt40123c12009-03-09 13:42:30 -0700733 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700734}
735
736/**
737 * Writes data to the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100738 * @dev: drm device
739 * @data: ioctl data blob
740 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -0700741 *
742 * On error, the contents of the buffer that were to be modified are undefined.
743 */
744int
745i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100746 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700747{
748 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000749 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000750 int ret;
751
752 if (args->size == 0)
753 return 0;
754
Linus Torvalds96d4f262019-01-03 18:57:57 -0800755 if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
Chris Wilson51311d02010-11-17 09:10:42 +0000756 return -EFAULT;
757
Chris Wilson03ac0642016-07-20 13:31:51 +0100758 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100759 if (!obj)
760 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -0700761
Chris Wilson7dcd2492010-09-26 20:21:44 +0100762 /* Bounds check destination. */
Matthew Auld966d5bf2016-12-13 20:32:22 +0000763 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100764 ret = -EINVAL;
Chris Wilson258a5ed2016-08-05 10:14:16 +0100765 goto err;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100766 }
767
Chris Wilsonf8c1cce2018-07-12 19:53:14 +0100768 /* Writes not allowed into this read-only object */
769 if (i915_gem_object_is_readonly(obj)) {
770 ret = -EINVAL;
771 goto err;
772 }
773
Chris Wilsondb53a302011-02-03 11:57:46 +0000774 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
775
Chris Wilson7c55e2c2017-03-07 12:03:38 +0000776 ret = -ENODEV;
777 if (obj->ops->pwrite)
778 ret = obj->ops->pwrite(obj, args);
779 if (ret != -ENODEV)
780 goto err;
781
Chris Wilsone95433c2016-10-28 13:58:27 +0100782 ret = i915_gem_object_wait(obj,
783 I915_WAIT_INTERRUPTIBLE |
784 I915_WAIT_ALL,
Chris Wilson62eb3c22019-02-13 09:25:04 +0000785 MAX_SCHEDULE_TIMEOUT);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100786 if (ret)
787 goto err;
788
Chris Wilsonfe115622016-10-28 13:58:40 +0100789 ret = i915_gem_object_pin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100790 if (ret)
Chris Wilsonfe115622016-10-28 13:58:40 +0100791 goto err;
Chris Wilson258a5ed2016-08-05 10:14:16 +0100792
Daniel Vetter935aaa62012-03-25 19:47:35 +0200793 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700794 /* We can only do the GTT pwrite on untiled buffers, as otherwise
795 * it would end up going through the fenced access, and we'll get
796 * different detiling behavior between reading and writing.
797 * pread/pwrite currently are reading and writing from the CPU
798 * perspective, requiring manual detiling by the client.
799 */
Chris Wilson6eae0052016-06-20 15:05:52 +0100800 if (!i915_gem_object_has_struct_page(obj) ||
Chris Wilson9c870d02016-10-24 13:42:15 +0100801 cpu_write_needs_clflush(obj))
Daniel Vetter935aaa62012-03-25 19:47:35 +0200802 /* Note that the gtt paths might fail with non-page-backed user
803 * pointers (e.g. gtt mappings when moving data between
Chris Wilson9c870d02016-10-24 13:42:15 +0100804 * textures). Fallback to the shmem path in that case.
805 */
Chris Wilsonfe115622016-10-28 13:58:40 +0100806 ret = i915_gem_gtt_pwrite_fast(obj, args);
Eric Anholt673a3942008-07-30 12:06:12 -0700807
Chris Wilsond1054ee2016-07-16 18:42:36 +0100808 if (ret == -EFAULT || ret == -ENOSPC) {
Chris Wilson6a2c4232014-11-04 04:51:40 -0800809 if (obj->phys_handle)
810 ret = i915_gem_phys_pwrite(obj, args, file);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530811 else
Chris Wilsonfe115622016-10-28 13:58:40 +0100812 ret = i915_gem_shmem_pwrite(obj, args);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800813 }
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100814
Chris Wilsonfe115622016-10-28 13:58:40 +0100815 i915_gem_object_unpin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100816err:
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100817 i915_gem_object_put(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100818 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700819}
820
Eric Anholt673a3942008-07-30 12:06:12 -0700821/**
822 * Called when user space has done writes to this buffer
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100823 * @dev: drm device
824 * @data: ioctl data blob
825 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -0700826 */
827int
828i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000829 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700830{
831 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000832 struct drm_i915_gem_object *obj;
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100833
Chris Wilson03ac0642016-07-20 13:31:51 +0100834 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilsonc21724c2016-08-05 10:14:19 +0100835 if (!obj)
836 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -0700837
Tina Zhanga03f3952017-11-14 10:25:13 +0000838 /*
839 * Proxy objects are barred from CPU access, so there is no
840 * need to ban sw_finish as it is a nop.
841 */
842
Eric Anholt673a3942008-07-30 12:06:12 -0700843 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson5a97bcc2017-02-22 11:40:46 +0000844 i915_gem_object_flush_if_display(obj);
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100845 i915_gem_object_put(obj);
Chris Wilson5a97bcc2017-02-22 11:40:46 +0000846
847 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700848}
849
Chris Wilson0cf289b2019-06-13 08:32:54 +0100850void i915_gem_runtime_suspend(struct drm_i915_private *i915)
Chris Wilsoneedd10f2014-06-16 08:57:44 +0100851{
Chris Wilson3594a3e2016-10-24 13:42:16 +0100852 struct drm_i915_gem_object *obj, *on;
Chris Wilson7c108fd2016-10-24 13:42:18 +0100853 int i;
Chris Wilsoneedd10f2014-06-16 08:57:44 +0100854
Chris Wilson3594a3e2016-10-24 13:42:16 +0100855 /*
856 * Only called during RPM suspend. All users of the userfault_list
857 * must be holding an RPM wakeref to ensure that this can not
858 * run concurrently with themselves (and use the struct_mutex for
859 * protection between themselves).
860 */
861
862 list_for_each_entry_safe(obj, on,
Chris Wilson0cf289b2019-06-13 08:32:54 +0100863 &i915->ggtt.userfault_list, userfault_link)
Chris Wilsona65adaf2017-10-09 09:43:57 +0100864 __i915_gem_object_release_mmap(obj);
Chris Wilson7c108fd2016-10-24 13:42:18 +0100865
Chris Wilson0cf289b2019-06-13 08:32:54 +0100866 /*
867 * The fence will be lost when the device powers down. If any were
Chris Wilson7c108fd2016-10-24 13:42:18 +0100868 * in use by hardware (i.e. they are pinned), we should not be powering
869 * down! All other fences will be reacquired by the user upon waking.
870 */
Chris Wilson0cf289b2019-06-13 08:32:54 +0100871 for (i = 0; i < i915->ggtt.num_fences; i++) {
872 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
Chris Wilson7c108fd2016-10-24 13:42:18 +0100873
Chris Wilson0cf289b2019-06-13 08:32:54 +0100874 /*
875 * Ideally we want to assert that the fence register is not
Chris Wilsone0ec3ec2017-02-03 12:57:17 +0000876 * live at this point (i.e. that no piece of code will be
877 * trying to write through fence + GTT, as that both violates
878 * our tracking of activity and associated locking/barriers,
879 * but also is illegal given that the hw is powered down).
880 *
881 * Previously we used reg->pin_count as a "liveness" indicator.
882 * That is not sufficient, and we need a more fine-grained
883 * tool if we want to have a sanity check here.
884 */
Chris Wilson7c108fd2016-10-24 13:42:18 +0100885
886 if (!reg->vma)
887 continue;
888
Chris Wilsona65adaf2017-10-09 09:43:57 +0100889 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
Chris Wilson7c108fd2016-10-24 13:42:18 +0100890 reg->dirty = true;
891 }
Chris Wilsoneedd10f2014-06-16 08:57:44 +0100892}
893
Chris Wilson25112b62017-03-30 15:50:39 +0100894static int wait_for_engines(struct drm_i915_private *i915)
895{
Chris Wilsonee42c002017-12-11 19:41:34 +0000896 if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
Chris Wilson59e4b192017-12-11 19:41:35 +0000897 dev_err(i915->drm.dev,
898 "Failed to idle engines, declaring wedged!\n");
Chris Wilson629820f2018-03-09 10:11:14 +0000899 GEM_TRACE_DUMP();
Chris Wilsoncad99462017-08-26 12:09:33 +0100900 i915_gem_set_wedged(i915);
901 return -EIO;
Chris Wilson25112b62017-03-30 15:50:39 +0100902 }
903
904 return 0;
905}
906
Chris Wilson1e345562019-01-28 10:23:56 +0000907static long
908wait_for_timelines(struct drm_i915_private *i915,
909 unsigned int flags, long timeout)
910{
911 struct i915_gt_timelines *gt = &i915->gt.timelines;
912 struct i915_timeline *tl;
913
Chris Wilson1e345562019-01-28 10:23:56 +0000914 mutex_lock(&gt->mutex);
Chris Wilson9407d3b2019-01-28 18:18:12 +0000915 list_for_each_entry(tl, &gt->active_list, link) {
Chris Wilson1e345562019-01-28 10:23:56 +0000916 struct i915_request *rq;
917
Chris Wilson21950ee2019-02-05 13:00:05 +0000918 rq = i915_active_request_get_unlocked(&tl->last_request);
Chris Wilson1e345562019-01-28 10:23:56 +0000919 if (!rq)
920 continue;
921
922 mutex_unlock(&gt->mutex);
923
924 /*
925 * "Race-to-idle".
926 *
927 * Switching to the kernel context is often used a synchronous
928 * step prior to idling, e.g. in suspend for flushing all
929 * current operations to memory before sleeping. These we
930 * want to complete as quickly as possible to avoid prolonged
931 * stalls, so allow the gpu to boost to maximum clocks.
932 */
933 if (flags & I915_WAIT_FOR_IDLE_BOOST)
Chris Wilson62eb3c22019-02-13 09:25:04 +0000934 gen6_rps_boost(rq);
Chris Wilson1e345562019-01-28 10:23:56 +0000935
936 timeout = i915_request_wait(rq, flags, timeout);
937 i915_request_put(rq);
938 if (timeout < 0)
939 return timeout;
940
941 /* restart after reacquiring the lock */
942 mutex_lock(&gt->mutex);
Chris Wilson9407d3b2019-01-28 18:18:12 +0000943 tl = list_entry(&gt->active_list, typeof(*tl), link);
Chris Wilson1e345562019-01-28 10:23:56 +0000944 }
945 mutex_unlock(&gt->mutex);
946
947 return timeout;
948}
949
Chris Wilsonec625fb2018-07-09 13:20:42 +0100950int i915_gem_wait_for_idle(struct drm_i915_private *i915,
951 unsigned int flags, long timeout)
Chris Wilson73cb9702016-10-28 13:58:46 +0100952{
Chris Wilson79ffac852019-04-24 21:07:17 +0100953 GEM_TRACE("flags=%x (%s), timeout=%ld%s, awake?=%s\n",
Chris Wilsonec625fb2018-07-09 13:20:42 +0100954 flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
Chris Wilson79ffac852019-04-24 21:07:17 +0100955 timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "",
956 yesno(i915->gt.awake));
Chris Wilson09a4c022018-05-24 09:11:35 +0100957
Chris Wilson863e9fd2017-05-30 13:13:32 +0100958 /* If the device is asleep, we have no requests outstanding */
959 if (!READ_ONCE(i915->gt.awake))
960 return 0;
961
Chris Wilson1e345562019-01-28 10:23:56 +0000962 timeout = wait_for_timelines(i915, flags, timeout);
963 if (timeout < 0)
964 return timeout;
965
Chris Wilson9caa34a2016-11-11 14:58:08 +0000966 if (flags & I915_WAIT_LOCKED) {
Chris Wilsona89d1f92018-05-02 17:38:39 +0100967 int err;
Chris Wilson9caa34a2016-11-11 14:58:08 +0000968
969 lockdep_assert_held(&i915->drm.struct_mutex);
970
Chris Wilsona61b47f2018-06-27 12:53:34 +0100971 err = wait_for_engines(i915);
972 if (err)
973 return err;
974
Chris Wilsone61e0f52018-02-21 09:56:36 +0000975 i915_retire_requests(i915);
Chris Wilsona89d1f92018-05-02 17:38:39 +0100976 }
Chris Wilsona61b47f2018-06-27 12:53:34 +0100977
978 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +0100979}
980
Chris Wilson058d88c2016-08-15 10:49:06 +0100981struct i915_vma *
Joonas Lahtinenec7adb62015-03-16 14:11:13 +0200982i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
983 const struct i915_ggtt_view *view,
Chris Wilson91b2db62016-08-04 16:32:23 +0100984 u64 size,
Chris Wilson2ffffd02016-08-04 16:32:22 +0100985 u64 alignment,
986 u64 flags)
Joonas Lahtinenec7adb62015-03-16 14:11:13 +0200987{
Chris Wilsonad16d2e2016-10-13 09:55:04 +0100988 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilson82ad6442018-06-05 16:37:58 +0100989 struct i915_address_space *vm = &dev_priv->ggtt.vm;
Chris Wilson59bfa122016-08-04 16:32:31 +0100990 struct i915_vma *vma;
991 int ret;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300992
Chris Wilson4c7d62c2016-10-28 13:58:32 +0100993 lockdep_assert_held(&obj->base.dev->struct_mutex);
994
Chris Wilsonac87a6fd2018-02-20 13:42:05 +0000995 if (flags & PIN_MAPPABLE &&
996 (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
Chris Wilson43ae70d92017-10-09 09:44:01 +0100997 /* If the required space is larger than the available
998 * aperture, we will not able to find a slot for the
999 * object and unbinding the object now will be in
1000 * vain. Worse, doing so may cause us to ping-pong
1001 * the object in and out of the Global GTT and
1002 * waste a lot of cycles under the mutex.
1003 */
1004 if (obj->base.size > dev_priv->ggtt.mappable_end)
1005 return ERR_PTR(-E2BIG);
1006
1007 /* If NONBLOCK is set the caller is optimistically
1008 * trying to cache the full object within the mappable
1009 * aperture, and *must* have a fallback in place for
1010 * situations where we cannot bind the object. We
1011 * can be a little more lax here and use the fallback
1012 * more often to avoid costly migrations of ourselves
1013 * and other objects within the aperture.
1014 *
1015 * Half-the-aperture is used as a simple heuristic.
1016 * More interesting would to do search for a free
1017 * block prior to making the commitment to unbind.
1018 * That caters for the self-harm case, and with a
1019 * little more heuristics (e.g. NOFAULT, NOEVICT)
1020 * we could try to minimise harm to others.
1021 */
1022 if (flags & PIN_NONBLOCK &&
1023 obj->base.size > dev_priv->ggtt.mappable_end / 2)
1024 return ERR_PTR(-ENOSPC);
1025 }
1026
Chris Wilson718659a2017-01-16 15:21:28 +00001027 vma = i915_vma_instance(obj, vm, view);
Chengguang Xu772b5402019-02-21 10:08:19 +08001028 if (IS_ERR(vma))
Chris Wilson058d88c2016-08-15 10:49:06 +01001029 return vma;
Chris Wilson59bfa122016-08-04 16:32:31 +01001030
1031 if (i915_vma_misplaced(vma, size, alignment, flags)) {
Chris Wilson43ae70d92017-10-09 09:44:01 +01001032 if (flags & PIN_NONBLOCK) {
1033 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
1034 return ERR_PTR(-ENOSPC);
Chris Wilson59bfa122016-08-04 16:32:31 +01001035
Chris Wilson43ae70d92017-10-09 09:44:01 +01001036 if (flags & PIN_MAPPABLE &&
Chris Wilson944397f2017-01-09 16:16:11 +00001037 vma->fence_size > dev_priv->ggtt.mappable_end / 2)
Chris Wilsonad16d2e2016-10-13 09:55:04 +01001038 return ERR_PTR(-ENOSPC);
1039 }
1040
Chris Wilson59bfa122016-08-04 16:32:31 +01001041 WARN(i915_vma_is_pinned(vma),
1042 "bo is already pinned in ggtt with incorrect alignment:"
Chris Wilson05a20d02016-08-18 17:16:55 +01001043 " offset=%08x, req.alignment=%llx,"
1044 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
1045 i915_ggtt_offset(vma), alignment,
Chris Wilson59bfa122016-08-04 16:32:31 +01001046 !!(flags & PIN_MAPPABLE),
Chris Wilson05a20d02016-08-18 17:16:55 +01001047 i915_vma_is_map_and_fenceable(vma));
Chris Wilson59bfa122016-08-04 16:32:31 +01001048 ret = i915_vma_unbind(vma);
1049 if (ret)
Chris Wilson058d88c2016-08-15 10:49:06 +01001050 return ERR_PTR(ret);
Chris Wilson59bfa122016-08-04 16:32:31 +01001051 }
1052
Chris Wilson058d88c2016-08-15 10:49:06 +01001053 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
1054 if (ret)
1055 return ERR_PTR(ret);
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02001056
Chris Wilson058d88c2016-08-15 10:49:06 +01001057 return vma;
Eric Anholt673a3942008-07-30 12:06:12 -07001058}
1059
Eric Anholt673a3942008-07-30 12:06:12 -07001060int
Chris Wilson3ef94da2009-09-14 16:50:29 +01001061i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1062 struct drm_file *file_priv)
1063{
Chris Wilson3b4fa962019-05-30 21:34:59 +01001064 struct drm_i915_private *i915 = to_i915(dev);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001065 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001066 struct drm_i915_gem_object *obj;
Chris Wilson1233e2d2016-10-28 13:58:37 +01001067 int err;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001068
1069 switch (args->madv) {
1070 case I915_MADV_DONTNEED:
1071 case I915_MADV_WILLNEED:
1072 break;
1073 default:
1074 return -EINVAL;
1075 }
1076
Chris Wilson03ac0642016-07-20 13:31:51 +01001077 obj = i915_gem_object_lookup(file_priv, args->handle);
Chris Wilson1233e2d2016-10-28 13:58:37 +01001078 if (!obj)
1079 return -ENOENT;
1080
1081 err = mutex_lock_interruptible(&obj->mm.lock);
1082 if (err)
1083 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001084
Chris Wilsonf1fa4f42017-10-13 21:26:13 +01001085 if (i915_gem_object_has_pages(obj) &&
Chris Wilson3e510a82016-08-05 10:14:23 +01001086 i915_gem_object_is_tiled(obj) &&
Chris Wilson3b4fa962019-05-30 21:34:59 +01001087 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
Chris Wilsonbc0629a2016-11-01 10:03:17 +00001088 if (obj->mm.madv == I915_MADV_WILLNEED) {
1089 GEM_BUG_ON(!obj->mm.quirked);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01001090 __i915_gem_object_unpin_pages(obj);
Chris Wilsonbc0629a2016-11-01 10:03:17 +00001091 obj->mm.quirked = false;
1092 }
1093 if (args->madv == I915_MADV_WILLNEED) {
Chris Wilson2c3a3f42016-11-04 10:30:01 +00001094 GEM_BUG_ON(obj->mm.quirked);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01001095 __i915_gem_object_pin_pages(obj);
Chris Wilsonbc0629a2016-11-01 10:03:17 +00001096 obj->mm.quirked = true;
1097 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01001098 }
1099
Chris Wilsona4f5ea62016-10-28 13:58:35 +01001100 if (obj->mm.madv != __I915_MADV_PURGED)
1101 obj->mm.madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001102
Chris Wilson3b4fa962019-05-30 21:34:59 +01001103 if (i915_gem_object_has_pages(obj)) {
1104 struct list_head *list;
1105
Chris Wilsond82b4b22019-05-30 21:35:00 +01001106 if (i915_gem_object_is_shrinkable(obj)) {
Chris Wilsona8cff4c82019-06-10 15:54:30 +01001107 unsigned long flags;
1108
1109 spin_lock_irqsave(&i915->mm.obj_lock, flags);
1110
Chris Wilsond82b4b22019-05-30 21:35:00 +01001111 if (obj->mm.madv != I915_MADV_WILLNEED)
1112 list = &i915->mm.purge_list;
Chris Wilsond82b4b22019-05-30 21:35:00 +01001113 else
Chris Wilsonecab9be2019-06-12 11:57:20 +01001114 list = &i915->mm.shrink_list;
Chris Wilsond82b4b22019-05-30 21:35:00 +01001115 list_move_tail(&obj->mm.link, list);
Chris Wilsona8cff4c82019-06-10 15:54:30 +01001116
1117 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
Chris Wilsond82b4b22019-05-30 21:35:00 +01001118 }
Chris Wilson3b4fa962019-05-30 21:34:59 +01001119 }
1120
Chris Wilson6c085a72012-08-20 11:40:46 +02001121 /* if the object is no longer attached, discard its backing storage */
Chris Wilsonf1fa4f42017-10-13 21:26:13 +01001122 if (obj->mm.madv == I915_MADV_DONTNEED &&
1123 !i915_gem_object_has_pages(obj))
Chris Wilsonf0334282019-05-28 10:29:46 +01001124 i915_gem_object_truncate(obj);
Chris Wilson2d7ef392009-09-20 23:13:10 +01001125
Chris Wilsona4f5ea62016-10-28 13:58:35 +01001126 args->retained = obj->mm.madv != __I915_MADV_PURGED;
Chris Wilson1233e2d2016-10-28 13:58:37 +01001127 mutex_unlock(&obj->mm.lock);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001128
Chris Wilson1233e2d2016-10-28 13:58:37 +01001129out:
Chris Wilsonf8c417c2016-07-20 13:31:53 +01001130 i915_gem_object_put(obj);
Chris Wilson1233e2d2016-10-28 13:58:37 +01001131 return err;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001132}
1133
Chris Wilson24145512017-01-24 11:01:35 +00001134void i915_gem_sanitize(struct drm_i915_private *i915)
1135{
Chris Wilson538ef962019-01-14 14:21:18 +00001136 intel_wakeref_t wakeref;
1137
Chris Wilsonc3160da2018-05-31 09:22:45 +01001138 GEM_TRACE("\n");
1139
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001140 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001141 intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
Chris Wilsonc3160da2018-05-31 09:22:45 +01001142
1143 /*
1144 * As we have just resumed the machine and woken the device up from
1145 * deep PCI sleep (presumably D3_cold), assume the HW has been reset
1146 * back to defaults, recovering from whatever wedged state we left it
1147 * in and so worth trying to use the device once more.
1148 */
Chris Wilsonc41166f2019-02-20 14:56:37 +00001149 if (i915_terminally_wedged(i915))
Chris Wilsonf36325f2017-08-26 12:09:34 +01001150 i915_gem_unset_wedged(i915);
Chris Wilsonf36325f2017-08-26 12:09:34 +01001151
Chris Wilson24145512017-01-24 11:01:35 +00001152 /*
1153 * If we inherit context state from the BIOS or earlier occupants
1154 * of the GPU, the GPU may be in an inconsistent state when we
1155 * try to take over. The only way to remove the earlier state
1156 * is by resetting. However, resetting on earlier gen is tricky as
1157 * it may impact the display and we are uncertain about the stability
Joonas Lahtinenea117b82017-04-28 10:53:38 +03001158 * of the reset, so this could be applied to even earlier gen.
Chris Wilson24145512017-01-24 11:01:35 +00001159 */
Chris Wilson79ffac852019-04-24 21:07:17 +01001160 intel_gt_sanitize(i915, false);
Chris Wilsonc3160da2018-05-31 09:22:45 +01001161
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001162 intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001163 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
Chris Wilson24145512017-01-24 11:01:35 +00001164}
1165
Tvrtko Ursulincf6844b2019-06-21 08:07:47 +01001166static void init_unused_ring(struct intel_gt *gt, u32 base)
Ville Syrjälä81e7f202014-08-15 01:21:55 +03001167{
Tvrtko Ursulincf6844b2019-06-21 08:07:47 +01001168 struct intel_uncore *uncore = gt->uncore;
1169
1170 intel_uncore_write(uncore, RING_CTL(base), 0);
1171 intel_uncore_write(uncore, RING_HEAD(base), 0);
1172 intel_uncore_write(uncore, RING_TAIL(base), 0);
1173 intel_uncore_write(uncore, RING_START(base), 0);
Ville Syrjälä81e7f202014-08-15 01:21:55 +03001174}
1175
Tvrtko Ursulincf6844b2019-06-21 08:07:47 +01001176static void init_unused_rings(struct intel_gt *gt)
Ville Syrjälä81e7f202014-08-15 01:21:55 +03001177{
Tvrtko Ursulincf6844b2019-06-21 08:07:47 +01001178 struct drm_i915_private *i915 = gt->i915;
1179
1180 if (IS_I830(i915)) {
1181 init_unused_ring(gt, PRB1_BASE);
1182 init_unused_ring(gt, SRB0_BASE);
1183 init_unused_ring(gt, SRB1_BASE);
1184 init_unused_ring(gt, SRB2_BASE);
1185 init_unused_ring(gt, SRB3_BASE);
1186 } else if (IS_GEN(i915, 2)) {
1187 init_unused_ring(gt, SRB0_BASE);
1188 init_unused_ring(gt, SRB1_BASE);
1189 } else if (IS_GEN(i915, 3)) {
1190 init_unused_ring(gt, PRB1_BASE);
1191 init_unused_ring(gt, PRB2_BASE);
Ville Syrjälä81e7f202014-08-15 01:21:55 +03001192 }
1193}
1194
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001195static int init_hw(struct intel_gt *gt)
Chris Wilson20a8a742017-02-08 14:30:31 +00001196{
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001197 struct drm_i915_private *i915 = gt->i915;
1198 struct intel_uncore *uncore = gt->uncore;
Chris Wilsond200cda2016-04-28 09:56:44 +01001199 int ret;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08001200
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001201 gt->last_init_time = ktime_get();
Chris Wilsonde867c22016-10-25 13:16:02 +01001202
Chris Wilson5e4f5182015-02-13 14:35:59 +00001203 /* Double layer security blanket, see i915_gem_init() */
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001204 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
Chris Wilson5e4f5182015-02-13 14:35:59 +00001205
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001206 if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
1207 intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
Ben Widawsky4fc7c972013-02-08 11:49:24 -08001208
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001209 if (IS_HASWELL(i915))
1210 intel_uncore_write(uncore,
1211 MI_PREDICATE_RESULT_2,
1212 IS_HSW_GT3(i915) ?
1213 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
Rodrigo Vivi94353732013-08-28 16:45:46 -03001214
Tvrtko Ursulin094304b2018-12-03 12:50:10 +00001215 /* Apply the GT workarounds... */
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001216 intel_gt_apply_workarounds(gt);
Tvrtko Ursulin094304b2018-12-03 12:50:10 +00001217 /* ...and determine whether they are sticking. */
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001218 intel_gt_verify_workarounds(gt, "init");
Oscar Mateo59b449d2018-04-10 09:12:47 -07001219
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001220 intel_gt_init_swizzling(gt);
Ben Widawsky4fc7c972013-02-08 11:49:24 -08001221
Daniel Vetterd5abdfd2014-11-20 09:45:19 +01001222 /*
1223 * At least 830 can leave some of the unused rings
1224 * "active" (ie. head != tail) after resume which
1225 * will prevent c3 entry. Makes sure all unused rings
1226 * are totally idle.
1227 */
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001228 init_unused_rings(gt);
Daniel Vetterd5abdfd2014-11-20 09:45:19 +01001229
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001230 ret = i915_ppgtt_init_hw(gt);
John Harrison4ad2fd82015-06-18 13:11:20 +01001231 if (ret) {
Chris Wilson8177e112018-02-07 11:15:45 +00001232 DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
John Harrison4ad2fd82015-06-18 13:11:20 +01001233 goto out;
1234 }
1235
Tvrtko Ursulin6b0a8df2019-06-21 08:07:55 +01001236 ret = intel_wopcm_init_hw(&i915->wopcm, gt);
Jackie Lif08e2032018-03-13 17:32:53 -07001237 if (ret) {
1238 DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
1239 goto out;
1240 }
1241
Michał Winiarski9bdc3572017-10-25 18:25:19 +01001242 /* We can't enable contexts until all firmware is loaded */
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001243 ret = intel_uc_init_hw(i915);
Chris Wilson8177e112018-02-07 11:15:45 +00001244 if (ret) {
1245 DRM_ERROR("Enabling uc failed (%d)\n", ret);
Michał Winiarski9bdc3572017-10-25 18:25:19 +01001246 goto out;
Chris Wilson8177e112018-02-07 11:15:45 +00001247 }
Michał Winiarski9bdc3572017-10-25 18:25:19 +01001248
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001249 intel_mocs_init_l3cc_table(gt);
Peter Antoine0ccdacf2016-04-13 15:03:25 +01001250
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001251 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
Michał Winiarski60c0a662018-07-12 14:48:10 +02001252
1253 return 0;
Michal Wajdeczkob96f6eb2018-06-05 12:24:43 +00001254
Michał Winiarski60c0a662018-07-12 14:48:10 +02001255out:
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001256 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
1257
1258 return ret;
1259}
1260
1261int i915_gem_init_hw(struct drm_i915_private *i915)
1262{
Tvrtko Ursulin86491872019-06-21 08:07:54 +01001263 struct intel_uncore *uncore = &i915->uncore;
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001264 int ret;
1265
1266 BUG_ON(!i915->kernel_context);
1267 ret = i915_terminally_wedged(i915);
1268 if (ret)
1269 return ret;
1270
Tvrtko Ursulin86491872019-06-21 08:07:54 +01001271 /* Double layer security blanket, see i915_gem_init() */
1272 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1273
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001274 ret = init_hw(&i915->gt);
Tvrtko Ursulin86491872019-06-21 08:07:54 +01001275 if (ret)
1276 goto err_init;
1277
1278 /* Only when the HW is re-initialised, can we replay the requests */
1279 ret = intel_engines_resume(i915);
1280 if (ret)
1281 goto err_engines;
1282
1283 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
1284
1285 intel_engines_set_scheduler_caps(i915);
1286
1287 return 0;
1288
1289err_engines:
1290 intel_uc_fini_hw(i915);
1291err_init:
1292 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001293
1294 intel_engines_set_scheduler_caps(i915);
Michał Winiarski60c0a662018-07-12 14:48:10 +02001295
1296 return ret;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001297}
1298
Chris Wilsond2b4b972017-11-10 14:26:33 +00001299static int __intel_engines_record_defaults(struct drm_i915_private *i915)
1300{
Chris Wilsond2b4b972017-11-10 14:26:33 +00001301 struct intel_engine_cs *engine;
Chris Wilson5e2a0412019-04-26 17:33:34 +01001302 struct i915_gem_context *ctx;
1303 struct i915_gem_engines *e;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001304 enum intel_engine_id id;
Chris Wilson604c37d2019-03-08 09:36:55 +00001305 int err = 0;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001306
1307 /*
1308 * As we reset the gpu during very early sanitisation, the current
1309 * register state on the GPU should reflect its defaults values.
1310 * We load a context onto the hw (with restore-inhibit), then switch
1311 * over to a second context to save that default register state. We
1312 * can then prime every new context with that state so they all start
1313 * from the same default HW values.
1314 */
1315
1316 ctx = i915_gem_context_create_kernel(i915, 0);
1317 if (IS_ERR(ctx))
1318 return PTR_ERR(ctx);
1319
Chris Wilson5e2a0412019-04-26 17:33:34 +01001320 e = i915_gem_context_lock_engines(ctx);
1321
Chris Wilsond2b4b972017-11-10 14:26:33 +00001322 for_each_engine(engine, i915, id) {
Chris Wilson5e2a0412019-04-26 17:33:34 +01001323 struct intel_context *ce = e->engines[id];
Chris Wilsone61e0f52018-02-21 09:56:36 +00001324 struct i915_request *rq;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001325
Chris Wilson5e2a0412019-04-26 17:33:34 +01001326 rq = intel_context_create_request(ce);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001327 if (IS_ERR(rq)) {
1328 err = PTR_ERR(rq);
Chris Wilson5e2a0412019-04-26 17:33:34 +01001329 goto err_active;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001330 }
1331
Chris Wilson3fef5cd2017-11-20 10:20:02 +00001332 err = 0;
Chris Wilson5e2a0412019-04-26 17:33:34 +01001333 if (rq->engine->init_context)
1334 err = rq->engine->init_context(rq);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001335
Chris Wilson697b9a82018-06-12 11:51:35 +01001336 i915_request_add(rq);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001337 if (err)
1338 goto err_active;
1339 }
1340
Chris Wilson604c37d2019-03-08 09:36:55 +00001341 /* Flush the default context image to memory, and enable powersaving. */
Chris Wilson23c3c3d2019-04-24 21:07:14 +01001342 if (!i915_gem_load_power_context(i915)) {
Chris Wilson604c37d2019-03-08 09:36:55 +00001343 err = -EIO;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001344 goto err_active;
Chris Wilson2621cef2018-07-09 13:20:43 +01001345 }
Chris Wilsond2b4b972017-11-10 14:26:33 +00001346
Chris Wilsond2b4b972017-11-10 14:26:33 +00001347 for_each_engine(engine, i915, id) {
Chris Wilson5e2a0412019-04-26 17:33:34 +01001348 struct intel_context *ce = e->engines[id];
1349 struct i915_vma *state = ce->state;
Chris Wilson37d7c9c2018-09-14 13:35:03 +01001350 void *vaddr;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001351
Chris Wilsond2b4b972017-11-10 14:26:33 +00001352 if (!state)
1353 continue;
1354
Chris Wilson08819542019-03-08 13:25:22 +00001355 GEM_BUG_ON(intel_context_is_pinned(ce));
Chris Wilsonc4d52fe2019-03-08 13:25:19 +00001356
Chris Wilsond2b4b972017-11-10 14:26:33 +00001357 /*
1358 * As we will hold a reference to the logical state, it will
1359 * not be torn down with the context, and importantly the
1360 * object will hold onto its vma (making it possible for a
1361 * stray GTT write to corrupt our defaults). Unmap the vma
1362 * from the GTT to prevent such accidents and reclaim the
1363 * space.
1364 */
1365 err = i915_vma_unbind(state);
1366 if (err)
1367 goto err_active;
1368
Chris Wilson6951e582019-05-28 10:29:51 +01001369 i915_gem_object_lock(state->obj);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001370 err = i915_gem_object_set_to_cpu_domain(state->obj, false);
Chris Wilson6951e582019-05-28 10:29:51 +01001371 i915_gem_object_unlock(state->obj);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001372 if (err)
1373 goto err_active;
1374
1375 engine->default_state = i915_gem_object_get(state->obj);
Chris Wilsona679f582019-03-21 16:19:07 +00001376 i915_gem_object_set_cache_coherency(engine->default_state,
1377 I915_CACHE_LLC);
Chris Wilson37d7c9c2018-09-14 13:35:03 +01001378
1379 /* Check we can acquire the image of the context state */
1380 vaddr = i915_gem_object_pin_map(engine->default_state,
Chris Wilson666424a2018-09-14 13:35:04 +01001381 I915_MAP_FORCE_WB);
Chris Wilson37d7c9c2018-09-14 13:35:03 +01001382 if (IS_ERR(vaddr)) {
1383 err = PTR_ERR(vaddr);
1384 goto err_active;
1385 }
1386
1387 i915_gem_object_unpin_map(engine->default_state);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001388 }
1389
1390 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
1391 unsigned int found = intel_engines_has_context_isolation(i915);
1392
1393 /*
1394 * Make sure that classes with multiple engine instances all
1395 * share the same basic configuration.
1396 */
1397 for_each_engine(engine, i915, id) {
1398 unsigned int bit = BIT(engine->uabi_class);
1399 unsigned int expected = engine->default_state ? bit : 0;
1400
1401 if ((found & bit) != expected) {
1402 DRM_ERROR("mismatching default context state for class %d on engine %s\n",
1403 engine->uabi_class, engine->name);
1404 }
1405 }
1406 }
1407
1408out_ctx:
Chris Wilson5e2a0412019-04-26 17:33:34 +01001409 i915_gem_context_unlock_engines(ctx);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001410 i915_gem_context_set_closed(ctx);
1411 i915_gem_context_put(ctx);
1412 return err;
1413
1414err_active:
1415 /*
1416 * If we have to abandon now, we expect the engines to be idle
Chris Wilson604c37d2019-03-08 09:36:55 +00001417 * and ready to be torn-down. The quickest way we can accomplish
1418 * this is by declaring ourselves wedged.
Chris Wilsond2b4b972017-11-10 14:26:33 +00001419 */
Chris Wilson604c37d2019-03-08 09:36:55 +00001420 i915_gem_set_wedged(i915);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001421 goto out_ctx;
1422}
1423
Chris Wilson51797492018-12-04 14:15:16 +00001424static int
1425i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
1426{
1427 struct drm_i915_gem_object *obj;
1428 struct i915_vma *vma;
1429 int ret;
1430
1431 obj = i915_gem_object_create_stolen(i915, size);
1432 if (!obj)
1433 obj = i915_gem_object_create_internal(i915, size);
1434 if (IS_ERR(obj)) {
1435 DRM_ERROR("Failed to allocate scratch page\n");
1436 return PTR_ERR(obj);
1437 }
1438
1439 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1440 if (IS_ERR(vma)) {
1441 ret = PTR_ERR(vma);
1442 goto err_unref;
1443 }
1444
1445 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
1446 if (ret)
1447 goto err_unref;
1448
1449 i915->gt.scratch = vma;
1450 return 0;
1451
1452err_unref:
1453 i915_gem_object_put(obj);
1454 return ret;
1455}
1456
1457static void i915_gem_fini_scratch(struct drm_i915_private *i915)
1458{
1459 i915_vma_unpin_and_release(&i915->gt.scratch, 0);
1460}
1461
Chris Wilson254e1182019-04-17 08:56:28 +01001462static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
1463{
1464 struct intel_engine_cs *engine;
1465 enum intel_engine_id id;
1466 int err = 0;
1467
1468 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1469 return 0;
1470
1471 for_each_engine(engine, i915, id) {
1472 if (intel_engine_verify_workarounds(engine, "load"))
1473 err = -EIO;
1474 }
1475
1476 return err;
1477}
1478
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001479int i915_gem_init(struct drm_i915_private *dev_priv)
Chris Wilson1070a422012-04-24 15:47:41 +01001480{
Chris Wilson1070a422012-04-24 15:47:41 +01001481 int ret;
1482
Changbin Du52b24162018-05-08 17:07:05 +08001483 /* We need to fallback to 4K pages if host doesn't support huge gtt. */
1484 if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
Matthew Auldda9fe3f32017-10-06 23:18:31 +01001485 mkwrite_device_info(dev_priv)->page_sizes =
1486 I915_GTT_PAGE_SIZE_4K;
1487
Chris Wilson94312822017-05-03 10:39:18 +01001488 dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
Chris Wilson57822dc2017-02-22 11:40:48 +00001489
Chris Wilson1e345562019-01-28 10:23:56 +00001490 i915_timelines_init(dev_priv);
1491
Chris Wilsonee487002017-11-22 17:26:21 +00001492 ret = i915_gem_init_userptr(dev_priv);
1493 if (ret)
1494 return ret;
1495
Sagar Arun Kamble70deead2018-01-24 21:16:58 +05301496 ret = intel_uc_init_misc(dev_priv);
Michał Winiarski3176ff42017-12-13 23:13:47 +01001497 if (ret)
1498 return ret;
1499
Michal Wajdeczkof7dc0152018-06-28 14:15:21 +00001500 ret = intel_wopcm_init(&dev_priv->wopcm);
1501 if (ret)
1502 goto err_uc_misc;
1503
Chris Wilson5e4f5182015-02-13 14:35:59 +00001504 /* This is just a security blanket to placate dragons.
1505 * On some systems, we very sporadically observe that the first TLBs
1506 * used by the CS may be stale, despite us poking the TLB reset. If
1507 * we hold the forcewake during initialisation these problems
1508 * just magically go away.
1509 */
Chris Wilsonee487002017-11-22 17:26:21 +00001510 mutex_lock(&dev_priv->drm.struct_mutex);
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001511 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson5e4f5182015-02-13 14:35:59 +00001512
Tvrtko Ursulin1d66377a2019-06-21 08:08:05 +01001513 ret = i915_init_ggtt(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001514 if (ret) {
1515 GEM_BUG_ON(ret == -EIO);
1516 goto err_unlock;
1517 }
Jesse Barnesd62b4892013-03-08 10:45:53 -08001518
Chris Wilson51797492018-12-04 14:15:16 +00001519 ret = i915_gem_init_scratch(dev_priv,
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001520 IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001521 if (ret) {
1522 GEM_BUG_ON(ret == -EIO);
1523 goto err_ggtt;
1524 }
Ben Widawsky2fa48d82013-12-06 14:11:04 -08001525
Chris Wilson11334c62019-04-26 17:33:33 +01001526 ret = intel_engines_setup(dev_priv);
1527 if (ret) {
1528 GEM_BUG_ON(ret == -EIO);
1529 goto err_unlock;
1530 }
1531
Chris Wilson51797492018-12-04 14:15:16 +00001532 ret = i915_gem_contexts_init(dev_priv);
1533 if (ret) {
1534 GEM_BUG_ON(ret == -EIO);
1535 goto err_scratch;
1536 }
1537
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001538 ret = intel_engines_init(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001539 if (ret) {
1540 GEM_BUG_ON(ret == -EIO);
1541 goto err_context;
1542 }
Daniel Vetter53ca26c2012-04-26 23:28:03 +02001543
Chris Wilsonf58d13d2017-11-10 14:26:29 +00001544 intel_init_gt_powersave(dev_priv);
1545
Michał Winiarski61b5c152017-12-13 23:13:48 +01001546 ret = intel_uc_init(dev_priv);
Chris Wilsoncc6a8182017-11-10 14:26:30 +00001547 if (ret)
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001548 goto err_pm;
Chris Wilsoncc6a8182017-11-10 14:26:30 +00001549
Michał Winiarski61b5c152017-12-13 23:13:48 +01001550 ret = i915_gem_init_hw(dev_priv);
1551 if (ret)
1552 goto err_uc_init;
1553
Chris Wilsoncc6a8182017-11-10 14:26:30 +00001554 /*
1555 * Despite its name intel_init_clock_gating applies both display
1556 * clock gating workarounds; GT mmio workarounds and the occasional
1557 * GT power context workaround. Worse, sometimes it includes a context
1558 * register workaround which we need to apply before we record the
1559 * default HW state for all contexts.
1560 *
1561 * FIXME: break up the workarounds and apply them at the right time!
1562 */
1563 intel_init_clock_gating(dev_priv);
1564
Chris Wilson254e1182019-04-17 08:56:28 +01001565 ret = intel_engines_verify_workarounds(dev_priv);
1566 if (ret)
1567 goto err_init_hw;
1568
Chris Wilsond2b4b972017-11-10 14:26:33 +00001569 ret = __intel_engines_record_defaults(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001570 if (ret)
1571 goto err_init_hw;
1572
1573 if (i915_inject_load_failure()) {
1574 ret = -ENODEV;
1575 goto err_init_hw;
1576 }
1577
1578 if (i915_inject_load_failure()) {
1579 ret = -EIO;
1580 goto err_init_hw;
1581 }
1582
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001583 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001584 mutex_unlock(&dev_priv->drm.struct_mutex);
1585
1586 return 0;
1587
1588 /*
1589 * Unwinding is complicated by that we want to handle -EIO to mean
1590 * disable GPU submission but keep KMS alive. We want to mark the
1591 * HW as irrevisibly wedged, but keep enough state around that the
1592 * driver doesn't explode during runtime.
1593 */
1594err_init_hw:
Chris Wilson8571a052018-06-06 15:54:41 +01001595 mutex_unlock(&dev_priv->drm.struct_mutex);
1596
Chris Wilson79ffac852019-04-24 21:07:17 +01001597 i915_gem_set_wedged(dev_priv);
Chris Wilson5861b012019-03-08 09:36:54 +00001598 i915_gem_suspend(dev_priv);
Chris Wilson8571a052018-06-06 15:54:41 +01001599 i915_gem_suspend_late(dev_priv);
1600
Chris Wilson8bcf9f72018-07-10 10:44:20 +01001601 i915_gem_drain_workqueue(dev_priv);
1602
Chris Wilson8571a052018-06-06 15:54:41 +01001603 mutex_lock(&dev_priv->drm.struct_mutex);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001604 intel_uc_fini_hw(dev_priv);
Michał Winiarski61b5c152017-12-13 23:13:48 +01001605err_uc_init:
1606 intel_uc_fini(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001607err_pm:
1608 if (ret != -EIO) {
1609 intel_cleanup_gt_powersave(dev_priv);
Chris Wilson45b9c962019-05-01 11:32:04 +01001610 intel_engines_cleanup(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001611 }
1612err_context:
1613 if (ret != -EIO)
1614 i915_gem_contexts_fini(dev_priv);
Chris Wilson51797492018-12-04 14:15:16 +00001615err_scratch:
1616 i915_gem_fini_scratch(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001617err_ggtt:
1618err_unlock:
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001619 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001620 mutex_unlock(&dev_priv->drm.struct_mutex);
1621
Michal Wajdeczkof7dc0152018-06-28 14:15:21 +00001622err_uc_misc:
Sagar Arun Kamble70deead2018-01-24 21:16:58 +05301623 intel_uc_fini_misc(dev_priv);
Sagar Arun Kambleda943b52018-01-10 18:24:16 +05301624
Chris Wilson1e345562019-01-28 10:23:56 +00001625 if (ret != -EIO) {
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001626 i915_gem_cleanup_userptr(dev_priv);
Chris Wilson1e345562019-01-28 10:23:56 +00001627 i915_timelines_fini(dev_priv);
1628 }
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001629
Chris Wilson60990322014-04-09 09:19:42 +01001630 if (ret == -EIO) {
Chris Wilson7ed43df2018-07-26 09:50:32 +01001631 mutex_lock(&dev_priv->drm.struct_mutex);
1632
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001633 /*
1634 * Allow engine initialisation to fail by marking the GPU as
Chris Wilson60990322014-04-09 09:19:42 +01001635 * wedged. But we only want to do this where the GPU is angry,
1636 * for all other failure, such as an allocation failure, bail.
1637 */
Chris Wilsonc41166f2019-02-20 14:56:37 +00001638 if (!i915_reset_failed(dev_priv)) {
Chris Wilson51c18bf2018-06-09 12:10:58 +01001639 i915_load_error(dev_priv,
1640 "Failed to initialize GPU, declaring it wedged!\n");
Chris Wilson6f74b362017-10-15 15:37:25 +01001641 i915_gem_set_wedged(dev_priv);
1642 }
Chris Wilson7ed43df2018-07-26 09:50:32 +01001643
1644 /* Minimal basic recovery for KMS */
1645 ret = i915_ggtt_enable_hw(dev_priv);
1646 i915_gem_restore_gtt_mappings(dev_priv);
1647 i915_gem_restore_fences(dev_priv);
1648 intel_init_clock_gating(dev_priv);
1649
1650 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilson1070a422012-04-24 15:47:41 +01001651 }
1652
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001653 i915_gem_drain_freed_objects(dev_priv);
Chris Wilson60990322014-04-09 09:19:42 +01001654 return ret;
Chris Wilson1070a422012-04-24 15:47:41 +01001655}
1656
Janusz Krzysztofik47bc28d2019-05-30 15:31:05 +02001657void i915_gem_fini_hw(struct drm_i915_private *dev_priv)
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001658{
Chris Wilson79ffac852019-04-24 21:07:17 +01001659 GEM_BUG_ON(dev_priv->gt.awake);
1660
Chris Wilson0cf289b2019-06-13 08:32:54 +01001661 intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
Chris Wilsonb27e35a2019-05-27 12:51:14 +01001662
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001663 i915_gem_suspend_late(dev_priv);
Chris Wilson30b710842018-08-12 23:36:29 +01001664 intel_disable_gt_powersave(dev_priv);
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001665
1666 /* Flush any outstanding unpin_work. */
1667 i915_gem_drain_workqueue(dev_priv);
1668
1669 mutex_lock(&dev_priv->drm.struct_mutex);
1670 intel_uc_fini_hw(dev_priv);
1671 intel_uc_fini(dev_priv);
Janusz Krzysztofik47bc28d2019-05-30 15:31:05 +02001672 mutex_unlock(&dev_priv->drm.struct_mutex);
1673
1674 i915_gem_drain_freed_objects(dev_priv);
1675}
1676
1677void i915_gem_fini(struct drm_i915_private *dev_priv)
1678{
1679 mutex_lock(&dev_priv->drm.struct_mutex);
Chris Wilson45b9c962019-05-01 11:32:04 +01001680 intel_engines_cleanup(dev_priv);
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001681 i915_gem_contexts_fini(dev_priv);
Chris Wilson51797492018-12-04 14:15:16 +00001682 i915_gem_fini_scratch(dev_priv);
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001683 mutex_unlock(&dev_priv->drm.struct_mutex);
1684
Tvrtko Ursulin25d140f2018-12-03 13:33:19 +00001685 intel_wa_list_free(&dev_priv->gt_wa_list);
1686
Chris Wilson30b710842018-08-12 23:36:29 +01001687 intel_cleanup_gt_powersave(dev_priv);
1688
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001689 intel_uc_fini_misc(dev_priv);
1690 i915_gem_cleanup_userptr(dev_priv);
Chris Wilson1e345562019-01-28 10:23:56 +00001691 i915_timelines_fini(dev_priv);
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001692
1693 i915_gem_drain_freed_objects(dev_priv);
1694
1695 WARN_ON(!list_empty(&dev_priv->contexts.list));
1696}
1697
Chris Wilson24145512017-01-24 11:01:35 +00001698void i915_gem_init_mmio(struct drm_i915_private *i915)
1699{
1700 i915_gem_sanitize(i915);
1701}
1702
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001703static void i915_gem_init__mm(struct drm_i915_private *i915)
1704{
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001705 spin_lock_init(&i915->mm.obj_lock);
1706 spin_lock_init(&i915->mm.free_lock);
1707
1708 init_llist_head(&i915->mm.free_list);
1709
Chris Wilson3b4fa962019-05-30 21:34:59 +01001710 INIT_LIST_HEAD(&i915->mm.purge_list);
Chris Wilsonecab9be2019-06-12 11:57:20 +01001711 INIT_LIST_HEAD(&i915->mm.shrink_list);
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001712
Chris Wilson84753552019-05-28 10:29:45 +01001713 i915_gem_init__objects(i915);
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001714}
1715
Michal Wajdeczkoa0de9082018-03-23 12:34:49 +00001716int i915_gem_init_early(struct drm_i915_private *dev_priv)
Eric Anholt673a3942008-07-30 12:06:12 -07001717{
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001718 int err;
Chris Wilsond1b48c12017-08-16 09:52:08 +01001719
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001720 i915_gem_init__mm(dev_priv);
Chris Wilson23c3c3d2019-04-24 21:07:14 +01001721 i915_gem_init__pm(dev_priv);
Chris Wilsonf2123812017-10-16 12:40:37 +01001722
Chris Wilson1f15b762016-07-01 17:23:14 +01001723 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001724 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
Chris Wilson18bb2bc2019-01-14 21:04:01 +00001725 mutex_init(&dev_priv->gpu_error.wedge_mutex);
Chris Wilson2caffbf2019-02-08 15:37:03 +00001726 init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
Chris Wilson31169712009-09-14 16:50:28 +01001727
Joonas Lahtinen6f633402016-09-01 14:58:21 +03001728 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
1729
Chris Wilsonb5add952016-08-04 16:32:36 +01001730 spin_lock_init(&dev_priv->fb_tracking.lock);
Chris Wilson73cb9702016-10-28 13:58:46 +01001731
Matthew Auld465c4032017-10-06 23:18:14 +01001732 err = i915_gemfs_init(dev_priv);
1733 if (err)
1734 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
1735
Chris Wilson73cb9702016-10-28 13:58:46 +01001736 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001737}
Dave Airlie71acb5e2008-12-30 20:31:46 +10001738
Michal Wajdeczkoa0de9082018-03-23 12:34:49 +00001739void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
Imre Deakd64aa092016-01-19 15:26:29 +02001740{
Chris Wilsonc4d4c1c2017-02-10 16:35:23 +00001741 i915_gem_drain_freed_objects(dev_priv);
Chris Wilsonc9c704712018-02-19 22:06:31 +00001742 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1743 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
Chris Wilsond82b4b22019-05-30 21:35:00 +01001744 WARN_ON(dev_priv->mm.shrink_count);
Matthew Auldea84aa72016-11-17 21:04:11 +00001745
Chris Wilson2caffbf2019-02-08 15:37:03 +00001746 cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
1747
Matthew Auld465c4032017-10-06 23:18:14 +01001748 i915_gemfs_fini(dev_priv);
Imre Deakd64aa092016-01-19 15:26:29 +02001749}
1750
Chris Wilson6a800ea2016-09-21 14:51:07 +01001751int i915_gem_freeze(struct drm_i915_private *dev_priv)
1752{
Chris Wilsond0aa3012017-04-07 11:25:49 +01001753 /* Discard all purgeable objects, let userspace recover those as
1754 * required after resuming.
1755 */
Chris Wilson6a800ea2016-09-21 14:51:07 +01001756 i915_gem_shrink_all(dev_priv);
Chris Wilson6a800ea2016-09-21 14:51:07 +01001757
Chris Wilson6a800ea2016-09-21 14:51:07 +01001758 return 0;
1759}
1760
Chris Wilson95c778d2018-06-01 15:41:25 +01001761int i915_gem_freeze_late(struct drm_i915_private *i915)
Chris Wilson461fb992016-05-14 07:26:33 +01001762{
1763 struct drm_i915_gem_object *obj;
Chris Wilsonecab9be2019-06-12 11:57:20 +01001764 intel_wakeref_t wakeref;
Chris Wilson461fb992016-05-14 07:26:33 +01001765
Chris Wilson95c778d2018-06-01 15:41:25 +01001766 /*
1767 * Called just before we write the hibernation image.
Chris Wilson461fb992016-05-14 07:26:33 +01001768 *
1769 * We need to update the domain tracking to reflect that the CPU
1770 * will be accessing all the pages to create and restore from the
1771 * hibernation, and so upon restoration those pages will be in the
1772 * CPU domain.
1773 *
1774 * To make sure the hibernation image contains the latest state,
1775 * we update that state just before writing out the image.
Chris Wilson7aab2d52016-09-09 20:02:18 +01001776 *
1777 * To try and reduce the hibernation image, we manually shrink
Chris Wilsond0aa3012017-04-07 11:25:49 +01001778 * the objects as well, see i915_gem_freeze()
Chris Wilson461fb992016-05-14 07:26:33 +01001779 */
1780
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001781 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
Chris Wilsonecab9be2019-06-12 11:57:20 +01001782
1783 i915_gem_shrink(i915, -1UL, NULL, ~0);
Chris Wilson95c778d2018-06-01 15:41:25 +01001784 i915_gem_drain_freed_objects(i915);
Chris Wilson461fb992016-05-14 07:26:33 +01001785
Chris Wilsonecab9be2019-06-12 11:57:20 +01001786 list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
1787 i915_gem_object_lock(obj);
1788 WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
1789 i915_gem_object_unlock(obj);
Chris Wilson461fb992016-05-14 07:26:33 +01001790 }
Chris Wilsonecab9be2019-06-12 11:57:20 +01001791
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001792 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
Chris Wilson461fb992016-05-14 07:26:33 +01001793
1794 return 0;
1795}
1796
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001797void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00001798{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001799 struct drm_i915_file_private *file_priv = file->driver_priv;
Chris Wilsone61e0f52018-02-21 09:56:36 +00001800 struct i915_request *request;
Eric Anholtb9624422009-06-03 07:27:35 +00001801
1802 /* Clean up our request list when the client is going away, so that
1803 * later retire_requests won't dereference our soon-to-be-gone
1804 * file_priv.
1805 */
Chris Wilson1c255952010-09-26 11:03:27 +01001806 spin_lock(&file_priv->mm.lock);
Chris Wilsonc8659ef2017-03-02 12:25:25 +00001807 list_for_each_entry(request, &file_priv->mm.request_list, client_link)
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001808 request->file_priv = NULL;
Chris Wilson1c255952010-09-26 11:03:27 +01001809 spin_unlock(&file_priv->mm.lock);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001810}
1811
Chris Wilson829a0af2017-06-20 12:05:45 +01001812int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001813{
1814 struct drm_i915_file_private *file_priv;
Ben Widawskye422b882013-12-06 14:10:58 -08001815 int ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001816
Chris Wilsonc4c29d72016-11-09 10:45:07 +00001817 DRM_DEBUG("\n");
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001818
1819 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1820 if (!file_priv)
1821 return -ENOMEM;
1822
1823 file->driver_priv = file_priv;
Chris Wilson829a0af2017-06-20 12:05:45 +01001824 file_priv->dev_priv = i915;
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02001825 file_priv->file = file;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001826
1827 spin_lock_init(&file_priv->mm.lock);
1828 INIT_LIST_HEAD(&file_priv->mm.request_list);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001829
Chris Wilsonc80ff162016-07-27 09:07:27 +01001830 file_priv->bsd_engine = -1;
Mika Kuoppala14921f32018-06-15 13:44:29 +03001831 file_priv->hang_timestamp = jiffies;
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001832
Chris Wilson829a0af2017-06-20 12:05:45 +01001833 ret = i915_gem_context_open(i915, file);
Ben Widawskye422b882013-12-06 14:10:58 -08001834 if (ret)
1835 kfree(file_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001836
Ben Widawskye422b882013-12-06 14:10:58 -08001837 return ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001838}
1839
Daniel Vetterb680c372014-09-19 18:27:27 +02001840/**
1841 * i915_gem_track_fb - update frontbuffer tracking
Geliang Tangd9072a32015-09-15 05:58:44 -07001842 * @old: current GEM buffer for the frontbuffer slots
1843 * @new: new GEM buffer for the frontbuffer slots
1844 * @frontbuffer_bits: bitmask of frontbuffer slots
Daniel Vetterb680c372014-09-19 18:27:27 +02001845 *
1846 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
1847 * from @old and setting them in @new. Both @old and @new can be NULL.
1848 */
Daniel Vettera071fa02014-06-18 23:28:09 +02001849void i915_gem_track_fb(struct drm_i915_gem_object *old,
1850 struct drm_i915_gem_object *new,
1851 unsigned frontbuffer_bits)
1852{
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01001853 /* Control of individual bits within the mask are guarded by
1854 * the owning plane->mutex, i.e. we can never see concurrent
1855 * manipulation of individual bits. But since the bitfield as a whole
1856 * is updated using RMW, we need to use atomics in order to update
1857 * the bits.
1858 */
1859 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
Chris Wilson74f6e182018-09-26 11:47:07 +01001860 BITS_PER_TYPE(atomic_t));
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01001861
Daniel Vettera071fa02014-06-18 23:28:09 +02001862 if (old) {
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01001863 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
1864 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
Daniel Vettera071fa02014-06-18 23:28:09 +02001865 }
1866
1867 if (new) {
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01001868 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
1869 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
Daniel Vettera071fa02014-06-18 23:28:09 +02001870 }
1871}
1872
Chris Wilson935a2f72017-02-13 17:15:13 +00001873#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
Chris Wilson66d9cb52017-02-13 17:15:17 +00001874#include "selftests/mock_gem_device.c"
Chris Wilson3f51b7e12018-08-30 14:48:06 +01001875#include "selftests/i915_gem.c"
Chris Wilson935a2f72017-02-13 17:15:13 +00001876#endif