blob: 190ad54fb072dedf5b44cdd0fd18f0fbe006d8b0 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
Daniel Vetterbe6a0372015-03-18 10:46:04 +01002 * Copyright © 2008-2015 Intel Corporation
Eric Anholt673a3942008-07-30 12:06:12 -07003 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Herrmann0de23972013-07-24 21:07:52 +020028#include <drm/drm_vma_manager.h>
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/i915_drm.h>
Chris Wilson6b5e90f2016-11-14 20:41:05 +000030#include <linux/dma-fence-array.h>
Chris Wilsonfe3288b2017-02-12 17:20:01 +000031#include <linux/kthread.h>
Chris Wilsonc13d87e2016-07-20 09:21:15 +010032#include <linux/reservation.h>
Hugh Dickins5949eac2011-06-27 16:18:18 -070033#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Chris Wilson20e49332016-11-22 14:41:21 +000035#include <linux/stop_machine.h>
Eric Anholt673a3942008-07-30 12:06:12 -070036#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020038#include <linux/dma-buf.h>
Daniel Vetterfcd70cd2019-01-17 22:03:34 +010039#include <linux/mman.h>
Eric Anholt673a3942008-07-30 12:06:12 -070040
Jani Nikuladf0566a2019-06-13 11:44:16 +030041#include "display/intel_display.h"
42#include "display/intel_frontbuffer.h"
43
Chris Wilson10be98a2019-05-28 10:29:49 +010044#include "gem/i915_gem_clflush.h"
45#include "gem/i915_gem_context.h"
Chris Wilsonafa13082019-05-28 10:29:43 +010046#include "gem/i915_gem_ioctls.h"
Chris Wilson10be98a2019-05-28 10:29:49 +010047#include "gem/i915_gem_pm.h"
48#include "gem/i915_gemfs.h"
Chris Wilson79ffac852019-04-24 21:07:17 +010049#include "gt/intel_engine_pm.h"
50#include "gt/intel_gt_pm.h"
Chris Wilson112ed2d2019-04-24 18:48:39 +010051#include "gt/intel_mocs.h"
52#include "gt/intel_reset.h"
53#include "gt/intel_workarounds.h"
54
Chris Wilson9f588922019-01-16 15:33:04 +000055#include "i915_drv.h"
Chris Wilson37d63f82019-05-28 10:29:50 +010056#include "i915_scatterlist.h"
Chris Wilson9f588922019-01-16 15:33:04 +000057#include "i915_trace.h"
58#include "i915_vgpu.h"
59
60#include "intel_drv.h"
Jani Nikula696173b2019-04-05 14:00:15 +030061#include "intel_pm.h"
Chris Wilson9f588922019-01-16 15:33:04 +000062
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053063static int
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +010064insert_mappable_node(struct i915_ggtt *ggtt,
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053065 struct drm_mm_node *node, u32 size)
66{
67 memset(node, 0, sizeof(*node));
Chris Wilson82ad6442018-06-05 16:37:58 +010068 return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
Chris Wilson4e64e552017-02-02 21:04:38 +000069 size, 0, I915_COLOR_UNEVICTABLE,
70 0, ggtt->mappable_end,
71 DRM_MM_INSERT_LOW);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053072}
73
74static void
75remove_mappable_node(struct drm_mm_node *node)
76{
77 drm_mm_remove_node(node);
78}
79
Eric Anholt673a3942008-07-30 12:06:12 -070080int
Eric Anholt5a125c32008-10-22 21:40:13 -070081i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +000082 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -070083{
Chris Wilson09d7e462019-01-28 10:23:53 +000084 struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +030085 struct drm_i915_gem_get_aperture *args = data;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010086 struct i915_vma *vma;
Weinan Liff8f7972017-05-31 10:35:52 +080087 u64 pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -070088
Chris Wilson09d7e462019-01-28 10:23:53 +000089 mutex_lock(&ggtt->vm.mutex);
90
Chris Wilson82ad6442018-06-05 16:37:58 +010091 pinned = ggtt->vm.reserved;
Chris Wilson499197d2019-01-28 10:23:52 +000092 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
Chris Wilson20dfbde2016-08-04 16:32:30 +010093 if (i915_vma_is_pinned(vma))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010094 pinned += vma->node.size;
Chris Wilson09d7e462019-01-28 10:23:53 +000095
96 mutex_unlock(&ggtt->vm.mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -070097
Chris Wilson82ad6442018-06-05 16:37:58 +010098 args->aper_size = ggtt->vm.total;
Akshay Joshi0206e352011-08-16 15:34:10 -040099 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000100
Eric Anholt5a125c32008-10-22 21:40:13 -0700101 return 0;
102}
103
Chris Wilson35a96112016-08-14 18:44:40 +0100104int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
Chris Wilsonaa653a62016-08-04 07:52:27 +0100105{
106 struct i915_vma *vma;
107 LIST_HEAD(still_in_list);
Chris Wilson6951e582019-05-28 10:29:51 +0100108 int ret = 0;
Chris Wilsonaa653a62016-08-04 07:52:27 +0100109
Chris Wilson02bef8f2016-08-14 18:44:41 +0100110 lockdep_assert_held(&obj->base.dev->struct_mutex);
111
Chris Wilson528cbd12019-01-28 10:23:54 +0000112 spin_lock(&obj->vma.lock);
113 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
114 struct i915_vma,
115 obj_link))) {
Chris Wilsonaa653a62016-08-04 07:52:27 +0100116 list_move_tail(&vma->obj_link, &still_in_list);
Chris Wilson528cbd12019-01-28 10:23:54 +0000117 spin_unlock(&obj->vma.lock);
118
Chris Wilsonaa653a62016-08-04 07:52:27 +0100119 ret = i915_vma_unbind(vma);
Chris Wilson528cbd12019-01-28 10:23:54 +0000120
121 spin_lock(&obj->vma.lock);
Chris Wilsonaa653a62016-08-04 07:52:27 +0100122 }
Chris Wilson528cbd12019-01-28 10:23:54 +0000123 list_splice(&still_in_list, &obj->vma.list);
124 spin_unlock(&obj->vma.lock);
Chris Wilsonaa653a62016-08-04 07:52:27 +0100125
126 return ret;
127}
128
Chris Wilson00731152014-05-21 12:42:56 +0100129static int
130i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
131 struct drm_i915_gem_pwrite *args,
Chris Wilson03ac84f2016-10-28 13:58:36 +0100132 struct drm_file *file)
Chris Wilson00731152014-05-21 12:42:56 +0100133{
Chris Wilson00731152014-05-21 12:42:56 +0100134 void *vaddr = obj->phys_handle->vaddr + args->offset;
Gustavo Padovan3ed605b2016-04-26 12:32:27 -0300135 char __user *user_data = u64_to_user_ptr(args->data_ptr);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800136
137 /* We manually control the domain here and pretend that it
138 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
139 */
Rodrigo Vivi77a0d1c2015-06-18 11:43:24 -0700140 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Chris Wilson10466d22017-01-06 15:22:38 +0000141 if (copy_from_user(vaddr, user_data, args->size))
142 return -EFAULT;
Chris Wilson00731152014-05-21 12:42:56 +0100143
Chris Wilson6a2c4232014-11-04 04:51:40 -0800144 drm_clflush_virt_range(vaddr, args->size);
Chris Wilson10466d22017-01-06 15:22:38 +0000145 i915_gem_chipset_flush(to_i915(obj->base.dev));
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200146
Chris Wilsond59b21e2017-02-22 11:40:49 +0000147 intel_fb_obj_flush(obj, ORIGIN_CPU);
Chris Wilson10466d22017-01-06 15:22:38 +0000148 return 0;
Chris Wilson00731152014-05-21 12:42:56 +0100149}
150
Dave Airlieff72145b2011-02-07 12:16:14 +1000151static int
152i915_gem_create(struct drm_file *file,
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000153 struct drm_i915_private *dev_priv,
Michał Winiarskie1634842019-03-26 18:02:18 +0100154 u64 *size_p,
Jani Nikula739f3ab2019-01-16 11:15:19 +0200155 u32 *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700156{
Chris Wilson05394f32010-11-08 19:18:58 +0000157 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300158 u32 handle;
Michał Winiarskie1634842019-03-26 18:02:18 +0100159 u64 size;
160 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700161
Michał Winiarskie1634842019-03-26 18:02:18 +0100162 size = round_up(*size_p, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200163 if (size == 0)
164 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700165
166 /* Allocate the new object */
Chris Wilson84753552019-05-28 10:29:45 +0100167 obj = i915_gem_object_create_shmem(dev_priv, size);
Chris Wilsonfe3db792016-04-25 13:32:13 +0100168 if (IS_ERR(obj))
169 return PTR_ERR(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700170
Chris Wilson05394f32010-11-08 19:18:58 +0000171 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100172 /* drop reference from allocate - handle holds it now */
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100173 i915_gem_object_put(obj);
Daniel Vetterd861e332013-07-24 23:25:03 +0200174 if (ret)
175 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100176
Dave Airlieff72145b2011-02-07 12:16:14 +1000177 *handle_p = handle;
Chris Wilson99534022019-04-17 14:25:07 +0100178 *size_p = size;
Eric Anholt673a3942008-07-30 12:06:12 -0700179 return 0;
180}
181
Dave Airlieff72145b2011-02-07 12:16:14 +1000182int
183i915_gem_dumb_create(struct drm_file *file,
184 struct drm_device *dev,
185 struct drm_mode_create_dumb *args)
186{
Ville Syrjäläaa5ca8b2019-05-09 15:21:57 +0300187 int cpp = DIV_ROUND_UP(args->bpp, 8);
188 u32 format;
189
190 switch (cpp) {
191 case 1:
192 format = DRM_FORMAT_C8;
193 break;
194 case 2:
195 format = DRM_FORMAT_RGB565;
196 break;
197 case 4:
198 format = DRM_FORMAT_XRGB8888;
199 break;
200 default:
201 return -EINVAL;
202 }
203
Dave Airlieff72145b2011-02-07 12:16:14 +1000204 /* have to work out size/pitch and return them */
Ville Syrjäläaa5ca8b2019-05-09 15:21:57 +0300205 args->pitch = ALIGN(args->width * cpp, 64);
206
207 /* align stride to page size so that we can remap */
208 if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
209 DRM_FORMAT_MOD_LINEAR))
210 args->pitch = ALIGN(args->pitch, 4096);
211
Dave Airlieff72145b2011-02-07 12:16:14 +1000212 args->size = args->pitch * args->height;
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000213 return i915_gem_create(file, to_i915(dev),
Michał Winiarskie1634842019-03-26 18:02:18 +0100214 &args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000215}
216
Dave Airlieff72145b2011-02-07 12:16:14 +1000217/**
218 * Creates a new mm object and returns a handle to it.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100219 * @dev: drm device pointer
220 * @data: ioctl data blob
221 * @file: drm file pointer
Dave Airlieff72145b2011-02-07 12:16:14 +1000222 */
223int
224i915_gem_create_ioctl(struct drm_device *dev, void *data,
225 struct drm_file *file)
226{
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000227 struct drm_i915_private *dev_priv = to_i915(dev);
Dave Airlieff72145b2011-02-07 12:16:14 +1000228 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200229
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000230 i915_gem_flush_free_objects(dev_priv);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +0100231
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000232 return i915_gem_create(file, dev_priv,
Michał Winiarskie1634842019-03-26 18:02:18 +0100233 &args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000234}
235
Chris Wilson7125397b2017-12-06 12:49:14 +0000236void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
Chris Wilsonef749212017-04-12 12:01:10 +0100237{
Chris Wilson538ef962019-01-14 14:21:18 +0000238 intel_wakeref_t wakeref;
239
Chris Wilson7125397b2017-12-06 12:49:14 +0000240 /*
241 * No actual flushing is required for the GTT write domain for reads
242 * from the GTT domain. Writes to it "immediately" go to main memory
243 * as far as we know, so there's no chipset flush. It also doesn't
244 * land in the GPU render cache.
Chris Wilsonef749212017-04-12 12:01:10 +0100245 *
246 * However, we do have to enforce the order so that all writes through
247 * the GTT land before any writes to the device, such as updates to
248 * the GATT itself.
249 *
250 * We also have to wait a bit for the writes to land from the GTT.
251 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
252 * timing. This issue has only been observed when switching quickly
253 * between GTT writes and CPU reads from inside the kernel on recent hw,
254 * and it appears to only affect discrete GTT blocks (i.e. on LLC
Chris Wilson7125397b2017-12-06 12:49:14 +0000255 * system agents we cannot reproduce this behaviour, until Cannonlake
256 * that was!).
Chris Wilsonef749212017-04-12 12:01:10 +0100257 */
Chris Wilson7125397b2017-12-06 12:49:14 +0000258
Chris Wilson900ccf32018-07-20 11:19:10 +0100259 wmb();
260
261 if (INTEL_INFO(dev_priv)->has_coherent_ggtt)
262 return;
263
Chris Wilsona8bd3b82018-07-17 10:26:55 +0100264 i915_gem_chipset_flush(dev_priv);
Chris Wilsonef749212017-04-12 12:01:10 +0100265
Daniele Ceraolo Spurioc447ff72019-06-13 16:21:55 -0700266 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
Tvrtko Ursuline33a4be2019-06-11 11:45:44 +0100267 struct intel_uncore *uncore = &dev_priv->uncore;
Chris Wilson7125397b2017-12-06 12:49:14 +0000268
Tvrtko Ursuline33a4be2019-06-11 11:45:44 +0100269 spin_lock_irq(&uncore->lock);
270 intel_uncore_posting_read_fw(uncore,
271 RING_HEAD(RENDER_RING_BASE));
272 spin_unlock_irq(&uncore->lock);
Chris Wilsond4225a52019-01-14 14:21:23 +0000273 }
Chris Wilson7125397b2017-12-06 12:49:14 +0000274}
275
Daniel Vetterd174bd62012-03-25 19:47:40 +0200276static int
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000277shmem_pread(struct page *page, int offset, int len, char __user *user_data,
278 bool needs_clflush)
Daniel Vetterd174bd62012-03-25 19:47:40 +0200279{
280 char *vaddr;
281 int ret;
282
283 vaddr = kmap(page);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200284
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000285 if (needs_clflush)
286 drm_clflush_virt_range(vaddr + offset, len);
287
288 ret = __copy_to_user(user_data, vaddr + offset, len);
289
Daniel Vetterd174bd62012-03-25 19:47:40 +0200290 kunmap(page);
291
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000292 return ret ? -EFAULT : 0;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100293}
294
295static int
296i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
297 struct drm_i915_gem_pread *args)
298{
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100299 unsigned int needs_clflush;
300 unsigned int idx, offset;
Chris Wilson6951e582019-05-28 10:29:51 +0100301 struct dma_fence *fence;
302 char __user *user_data;
303 u64 remain;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100304 int ret;
305
Chris Wilson6951e582019-05-28 10:29:51 +0100306 ret = i915_gem_object_prepare_read(obj, &needs_clflush);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100307 if (ret)
308 return ret;
309
Chris Wilson6951e582019-05-28 10:29:51 +0100310 fence = i915_gem_object_lock_fence(obj);
311 i915_gem_object_finish_access(obj);
312 if (!fence)
313 return -ENOMEM;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100314
315 remain = args->size;
316 user_data = u64_to_user_ptr(args->data_ptr);
317 offset = offset_in_page(args->offset);
318 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
319 struct page *page = i915_gem_object_get_page(obj, idx);
Chris Wilsona5e856a52018-10-12 15:02:28 +0100320 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100321
322 ret = shmem_pread(page, offset, length, user_data,
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100323 needs_clflush);
324 if (ret)
325 break;
326
327 remain -= length;
328 user_data += length;
329 offset = 0;
330 }
331
Chris Wilson6951e582019-05-28 10:29:51 +0100332 i915_gem_object_unlock_fence(obj, fence);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100333 return ret;
334}
335
336static inline bool
337gtt_user_read(struct io_mapping *mapping,
338 loff_t base, int offset,
339 char __user *user_data, int length)
340{
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300341 void __iomem *vaddr;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100342 unsigned long unwritten;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530343
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530344 /* We can use the cpu mem copy function because this is X86. */
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300345 vaddr = io_mapping_map_atomic_wc(mapping, base);
346 unwritten = __copy_to_user_inatomic(user_data,
347 (void __force *)vaddr + offset,
348 length);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100349 io_mapping_unmap_atomic(vaddr);
350 if (unwritten) {
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300351 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
352 unwritten = copy_to_user(user_data,
353 (void __force *)vaddr + offset,
354 length);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100355 io_mapping_unmap(vaddr);
356 }
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530357 return unwritten;
358}
359
360static int
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100361i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
362 const struct drm_i915_gem_pread *args)
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530363{
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100364 struct drm_i915_private *i915 = to_i915(obj->base.dev);
365 struct i915_ggtt *ggtt = &i915->ggtt;
Chris Wilson538ef962019-01-14 14:21:18 +0000366 intel_wakeref_t wakeref;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530367 struct drm_mm_node node;
Chris Wilson6951e582019-05-28 10:29:51 +0100368 struct dma_fence *fence;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100369 void __user *user_data;
Chris Wilson6951e582019-05-28 10:29:51 +0100370 struct i915_vma *vma;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100371 u64 remain, offset;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530372 int ret;
373
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100374 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
375 if (ret)
376 return ret;
377
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700378 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100379 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
Chris Wilsona3259ca2017-10-09 09:44:00 +0100380 PIN_MAPPABLE |
381 PIN_NONFAULT |
382 PIN_NONBLOCK);
Chris Wilson18034582016-08-18 17:16:45 +0100383 if (!IS_ERR(vma)) {
384 node.start = i915_ggtt_offset(vma);
385 node.allocated = false;
Chris Wilson49ef5292016-08-18 17:17:00 +0100386 ret = i915_vma_put_fence(vma);
Chris Wilson18034582016-08-18 17:16:45 +0100387 if (ret) {
388 i915_vma_unpin(vma);
389 vma = ERR_PTR(ret);
390 }
391 }
Chris Wilson058d88c2016-08-15 10:49:06 +0100392 if (IS_ERR(vma)) {
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100393 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530394 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100395 goto out_unlock;
396 GEM_BUG_ON(!node.allocated);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530397 }
398
Chris Wilson6951e582019-05-28 10:29:51 +0100399 mutex_unlock(&i915->drm.struct_mutex);
400
401 ret = i915_gem_object_lock_interruptible(obj);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530402 if (ret)
403 goto out_unpin;
404
Chris Wilson6951e582019-05-28 10:29:51 +0100405 ret = i915_gem_object_set_to_gtt_domain(obj, false);
406 if (ret) {
407 i915_gem_object_unlock(obj);
408 goto out_unpin;
409 }
410
411 fence = i915_gem_object_lock_fence(obj);
412 i915_gem_object_unlock(obj);
413 if (!fence) {
414 ret = -ENOMEM;
415 goto out_unpin;
416 }
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530417
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100418 user_data = u64_to_user_ptr(args->data_ptr);
419 remain = args->size;
420 offset = args->offset;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530421
422 while (remain > 0) {
423 /* Operation in this page
424 *
425 * page_base = page offset within aperture
426 * page_offset = offset within page
427 * page_length = bytes to copy for this page
428 */
429 u32 page_base = node.start;
430 unsigned page_offset = offset_in_page(offset);
431 unsigned page_length = PAGE_SIZE - page_offset;
432 page_length = remain < page_length ? remain : page_length;
433 if (node.allocated) {
434 wmb();
Chris Wilson82ad6442018-06-05 16:37:58 +0100435 ggtt->vm.insert_page(&ggtt->vm,
436 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
437 node.start, I915_CACHE_NONE, 0);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530438 wmb();
439 } else {
440 page_base += offset & PAGE_MASK;
441 }
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100442
Matthew Auld73ebd502017-12-11 15:18:20 +0000443 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100444 user_data, page_length)) {
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530445 ret = -EFAULT;
446 break;
447 }
448
449 remain -= page_length;
450 user_data += page_length;
451 offset += page_length;
452 }
453
Chris Wilson6951e582019-05-28 10:29:51 +0100454 i915_gem_object_unlock_fence(obj, fence);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530455out_unpin:
Chris Wilson6951e582019-05-28 10:29:51 +0100456 mutex_lock(&i915->drm.struct_mutex);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530457 if (node.allocated) {
458 wmb();
Chris Wilson82ad6442018-06-05 16:37:58 +0100459 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530460 remove_mappable_node(&node);
461 } else {
Chris Wilson058d88c2016-08-15 10:49:06 +0100462 i915_vma_unpin(vma);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530463 }
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100464out_unlock:
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700465 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100466 mutex_unlock(&i915->drm.struct_mutex);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100467
Eric Anholteb014592009-03-10 11:44:52 -0700468 return ret;
469}
470
Eric Anholt673a3942008-07-30 12:06:12 -0700471/**
472 * Reads data from the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100473 * @dev: drm device pointer
474 * @data: ioctl data blob
475 * @file: drm file pointer
Eric Anholt673a3942008-07-30 12:06:12 -0700476 *
477 * On error, the contents of *data are undefined.
478 */
479int
480i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000481 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700482{
483 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000484 struct drm_i915_gem_object *obj;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100485 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700486
Chris Wilson51311d02010-11-17 09:10:42 +0000487 if (args->size == 0)
488 return 0;
489
Linus Torvalds96d4f262019-01-03 18:57:57 -0800490 if (!access_ok(u64_to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000491 args->size))
492 return -EFAULT;
493
Chris Wilson03ac0642016-07-20 13:31:51 +0100494 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100495 if (!obj)
496 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -0700497
Chris Wilson7dcd2492010-09-26 20:21:44 +0100498 /* Bounds check source. */
Matthew Auld966d5bf2016-12-13 20:32:22 +0000499 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100500 ret = -EINVAL;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100501 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100502 }
503
Chris Wilsondb53a302011-02-03 11:57:46 +0000504 trace_i915_gem_object_pread(obj, args->offset, args->size);
505
Chris Wilsone95433c2016-10-28 13:58:27 +0100506 ret = i915_gem_object_wait(obj,
507 I915_WAIT_INTERRUPTIBLE,
Chris Wilson62eb3c22019-02-13 09:25:04 +0000508 MAX_SCHEDULE_TIMEOUT);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100509 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100510 goto out;
Chris Wilson258a5ed2016-08-05 10:14:16 +0100511
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100512 ret = i915_gem_object_pin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100513 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100514 goto out;
Chris Wilson258a5ed2016-08-05 10:14:16 +0100515
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100516 ret = i915_gem_shmem_pread(obj, args);
Chris Wilson9c870d02016-10-24 13:42:15 +0100517 if (ret == -EFAULT || ret == -ENODEV)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100518 ret = i915_gem_gtt_pread(obj, args);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530519
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100520 i915_gem_object_unpin_pages(obj);
521out:
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100522 i915_gem_object_put(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700523 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700524}
525
Keith Packard0839ccb2008-10-30 19:38:48 -0700526/* This is the fast write path which cannot handle
527 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700528 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700529
Chris Wilsonfe115622016-10-28 13:58:40 +0100530static inline bool
531ggtt_write(struct io_mapping *mapping,
532 loff_t base, int offset,
533 char __user *user_data, int length)
Keith Packard0839ccb2008-10-30 19:38:48 -0700534{
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300535 void __iomem *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700536 unsigned long unwritten;
537
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700538 /* We can use the cpu mem copy function because this is X86. */
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300539 vaddr = io_mapping_map_atomic_wc(mapping, base);
540 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
Keith Packard0839ccb2008-10-30 19:38:48 -0700541 user_data, length);
Chris Wilsonfe115622016-10-28 13:58:40 +0100542 io_mapping_unmap_atomic(vaddr);
543 if (unwritten) {
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300544 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
545 unwritten = copy_from_user((void __force *)vaddr + offset,
546 user_data, length);
Chris Wilsonfe115622016-10-28 13:58:40 +0100547 io_mapping_unmap(vaddr);
548 }
Keith Packard0839ccb2008-10-30 19:38:48 -0700549
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100550 return unwritten;
551}
552
Eric Anholt3de09aa2009-03-09 09:42:23 -0700553/**
554 * This is the fast pwrite path, where we copy the data directly from the
555 * user into the GTT, uncached.
Chris Wilsonfe115622016-10-28 13:58:40 +0100556 * @obj: i915 GEM object
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100557 * @args: pwrite arguments structure
Eric Anholt3de09aa2009-03-09 09:42:23 -0700558 */
Eric Anholt673a3942008-07-30 12:06:12 -0700559static int
Chris Wilsonfe115622016-10-28 13:58:40 +0100560i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
561 const struct drm_i915_gem_pwrite *args)
Eric Anholt673a3942008-07-30 12:06:12 -0700562{
Chris Wilsonfe115622016-10-28 13:58:40 +0100563 struct drm_i915_private *i915 = to_i915(obj->base.dev);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530564 struct i915_ggtt *ggtt = &i915->ggtt;
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700565 struct intel_runtime_pm *rpm = &i915->runtime_pm;
Chris Wilson538ef962019-01-14 14:21:18 +0000566 intel_wakeref_t wakeref;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530567 struct drm_mm_node node;
Chris Wilson6951e582019-05-28 10:29:51 +0100568 struct dma_fence *fence;
Chris Wilsonfe115622016-10-28 13:58:40 +0100569 struct i915_vma *vma;
570 u64 remain, offset;
571 void __user *user_data;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530572 int ret;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530573
Chris Wilsonfe115622016-10-28 13:58:40 +0100574 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
575 if (ret)
576 return ret;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200577
Chris Wilson8bd818152017-10-19 07:37:33 +0100578 if (i915_gem_object_has_struct_page(obj)) {
579 /*
580 * Avoid waking the device up if we can fallback, as
581 * waking/resuming is very slow (worst-case 10-100 ms
582 * depending on PCI sleeps and our own resume time).
583 * This easily dwarfs any performance advantage from
584 * using the cache bypass of indirect GGTT access.
585 */
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700586 wakeref = intel_runtime_pm_get_if_in_use(rpm);
Chris Wilson538ef962019-01-14 14:21:18 +0000587 if (!wakeref) {
Chris Wilson8bd818152017-10-19 07:37:33 +0100588 ret = -EFAULT;
589 goto out_unlock;
590 }
591 } else {
592 /* No backing pages, no fallback, we must force GGTT access */
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700593 wakeref = intel_runtime_pm_get(rpm);
Chris Wilson8bd818152017-10-19 07:37:33 +0100594 }
595
Chris Wilson058d88c2016-08-15 10:49:06 +0100596 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
Chris Wilsona3259ca2017-10-09 09:44:00 +0100597 PIN_MAPPABLE |
598 PIN_NONFAULT |
599 PIN_NONBLOCK);
Chris Wilson18034582016-08-18 17:16:45 +0100600 if (!IS_ERR(vma)) {
601 node.start = i915_ggtt_offset(vma);
602 node.allocated = false;
Chris Wilson49ef5292016-08-18 17:17:00 +0100603 ret = i915_vma_put_fence(vma);
Chris Wilson18034582016-08-18 17:16:45 +0100604 if (ret) {
605 i915_vma_unpin(vma);
606 vma = ERR_PTR(ret);
607 }
608 }
Chris Wilson058d88c2016-08-15 10:49:06 +0100609 if (IS_ERR(vma)) {
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100610 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530611 if (ret)
Chris Wilson8bd818152017-10-19 07:37:33 +0100612 goto out_rpm;
Chris Wilsonfe115622016-10-28 13:58:40 +0100613 GEM_BUG_ON(!node.allocated);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530614 }
Daniel Vetter935aaa62012-03-25 19:47:35 +0200615
Chris Wilson6951e582019-05-28 10:29:51 +0100616 mutex_unlock(&i915->drm.struct_mutex);
617
618 ret = i915_gem_object_lock_interruptible(obj);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200619 if (ret)
620 goto out_unpin;
621
Chris Wilson6951e582019-05-28 10:29:51 +0100622 ret = i915_gem_object_set_to_gtt_domain(obj, true);
623 if (ret) {
624 i915_gem_object_unlock(obj);
625 goto out_unpin;
626 }
627
628 fence = i915_gem_object_lock_fence(obj);
629 i915_gem_object_unlock(obj);
630 if (!fence) {
631 ret = -ENOMEM;
632 goto out_unpin;
633 }
Chris Wilsonfe115622016-10-28 13:58:40 +0100634
Chris Wilsonb19482d2016-08-18 17:16:43 +0100635 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200636
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530637 user_data = u64_to_user_ptr(args->data_ptr);
638 offset = args->offset;
639 remain = args->size;
640 while (remain) {
Eric Anholt673a3942008-07-30 12:06:12 -0700641 /* Operation in this page
642 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700643 * page_base = page offset within aperture
644 * page_offset = offset within page
645 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700646 */
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530647 u32 page_base = node.start;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100648 unsigned int page_offset = offset_in_page(offset);
649 unsigned int page_length = PAGE_SIZE - page_offset;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530650 page_length = remain < page_length ? remain : page_length;
651 if (node.allocated) {
652 wmb(); /* flush the write before we modify the GGTT */
Chris Wilson82ad6442018-06-05 16:37:58 +0100653 ggtt->vm.insert_page(&ggtt->vm,
654 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
655 node.start, I915_CACHE_NONE, 0);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530656 wmb(); /* flush modifications to the GGTT (insert_page) */
657 } else {
658 page_base += offset & PAGE_MASK;
659 }
Keith Packard0839ccb2008-10-30 19:38:48 -0700660 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700661 * source page isn't available. Return the error and we'll
662 * retry in the slow path.
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530663 * If the object is non-shmem backed, we retry again with the
664 * path that handles page fault.
Keith Packard0839ccb2008-10-30 19:38:48 -0700665 */
Matthew Auld73ebd502017-12-11 15:18:20 +0000666 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
Chris Wilsonfe115622016-10-28 13:58:40 +0100667 user_data, page_length)) {
668 ret = -EFAULT;
669 break;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200670 }
Eric Anholt673a3942008-07-30 12:06:12 -0700671
Keith Packard0839ccb2008-10-30 19:38:48 -0700672 remain -= page_length;
673 user_data += page_length;
674 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700675 }
Chris Wilsond59b21e2017-02-22 11:40:49 +0000676 intel_fb_obj_flush(obj, ORIGIN_CPU);
Chris Wilsonfe115622016-10-28 13:58:40 +0100677
Chris Wilson6951e582019-05-28 10:29:51 +0100678 i915_gem_object_unlock_fence(obj, fence);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200679out_unpin:
Chris Wilson6951e582019-05-28 10:29:51 +0100680 mutex_lock(&i915->drm.struct_mutex);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530681 if (node.allocated) {
682 wmb();
Chris Wilson82ad6442018-06-05 16:37:58 +0100683 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530684 remove_mappable_node(&node);
685 } else {
Chris Wilson058d88c2016-08-15 10:49:06 +0100686 i915_vma_unpin(vma);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530687 }
Chris Wilson8bd818152017-10-19 07:37:33 +0100688out_rpm:
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700689 intel_runtime_pm_put(rpm, wakeref);
Chris Wilson8bd818152017-10-19 07:37:33 +0100690out_unlock:
Chris Wilsonfe115622016-10-28 13:58:40 +0100691 mutex_unlock(&i915->drm.struct_mutex);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700692 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700693}
694
Chris Wilsonfe115622016-10-28 13:58:40 +0100695/* Per-page copy function for the shmem pwrite fastpath.
696 * Flushes invalid cachelines before writing to the target if
697 * needs_clflush_before is set and flushes out any written cachelines after
698 * writing if needs_clflush is set.
699 */
Eric Anholt40123c12009-03-09 13:42:30 -0700700static int
Chris Wilsonfe115622016-10-28 13:58:40 +0100701shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
Chris Wilsonfe115622016-10-28 13:58:40 +0100702 bool needs_clflush_before,
703 bool needs_clflush_after)
Eric Anholt40123c12009-03-09 13:42:30 -0700704{
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000705 char *vaddr;
Chris Wilsonfe115622016-10-28 13:58:40 +0100706 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700707
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000708 vaddr = kmap(page);
Chris Wilsonfe115622016-10-28 13:58:40 +0100709
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000710 if (needs_clflush_before)
711 drm_clflush_virt_range(vaddr + offset, len);
Chris Wilsonfe115622016-10-28 13:58:40 +0100712
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000713 ret = __copy_from_user(vaddr + offset, user_data, len);
714 if (!ret && needs_clflush_after)
715 drm_clflush_virt_range(vaddr + offset, len);
Chris Wilsonfe115622016-10-28 13:58:40 +0100716
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000717 kunmap(page);
718
719 return ret ? -EFAULT : 0;
Chris Wilsonfe115622016-10-28 13:58:40 +0100720}
721
722static int
723i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
724 const struct drm_i915_gem_pwrite *args)
725{
Chris Wilsonfe115622016-10-28 13:58:40 +0100726 unsigned int partial_cacheline_write;
727 unsigned int needs_clflush;
728 unsigned int offset, idx;
Chris Wilson6951e582019-05-28 10:29:51 +0100729 struct dma_fence *fence;
730 void __user *user_data;
731 u64 remain;
Chris Wilsonfe115622016-10-28 13:58:40 +0100732 int ret;
733
Chris Wilson6951e582019-05-28 10:29:51 +0100734 ret = i915_gem_object_prepare_write(obj, &needs_clflush);
Chris Wilson43394c72016-08-18 17:16:47 +0100735 if (ret)
736 return ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700737
Chris Wilson6951e582019-05-28 10:29:51 +0100738 fence = i915_gem_object_lock_fence(obj);
739 i915_gem_object_finish_access(obj);
740 if (!fence)
741 return -ENOMEM;
Chris Wilsonfe115622016-10-28 13:58:40 +0100742
Chris Wilsonfe115622016-10-28 13:58:40 +0100743 /* If we don't overwrite a cacheline completely we need to be
744 * careful to have up-to-date data by first clflushing. Don't
745 * overcomplicate things and flush the entire patch.
746 */
747 partial_cacheline_write = 0;
748 if (needs_clflush & CLFLUSH_BEFORE)
749 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
750
Chris Wilson43394c72016-08-18 17:16:47 +0100751 user_data = u64_to_user_ptr(args->data_ptr);
Chris Wilson43394c72016-08-18 17:16:47 +0100752 remain = args->size;
Chris Wilsonfe115622016-10-28 13:58:40 +0100753 offset = offset_in_page(args->offset);
754 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
755 struct page *page = i915_gem_object_get_page(obj, idx);
Chris Wilsona5e856a52018-10-12 15:02:28 +0100756 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100757
Chris Wilsonfe115622016-10-28 13:58:40 +0100758 ret = shmem_pwrite(page, offset, length, user_data,
Chris Wilsonfe115622016-10-28 13:58:40 +0100759 (offset | length) & partial_cacheline_write,
760 needs_clflush & CLFLUSH_AFTER);
761 if (ret)
Chris Wilson9da3da62012-06-01 15:20:22 +0100762 break;
763
Chris Wilsonfe115622016-10-28 13:58:40 +0100764 remain -= length;
765 user_data += length;
766 offset = 0;
Eric Anholt40123c12009-03-09 13:42:30 -0700767 }
768
Chris Wilsond59b21e2017-02-22 11:40:49 +0000769 intel_fb_obj_flush(obj, ORIGIN_CPU);
Chris Wilson6951e582019-05-28 10:29:51 +0100770 i915_gem_object_unlock_fence(obj, fence);
771
Eric Anholt40123c12009-03-09 13:42:30 -0700772 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700773}
774
775/**
776 * Writes data to the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100777 * @dev: drm device
778 * @data: ioctl data blob
779 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -0700780 *
781 * On error, the contents of the buffer that were to be modified are undefined.
782 */
783int
784i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100785 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700786{
787 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000788 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000789 int ret;
790
791 if (args->size == 0)
792 return 0;
793
Linus Torvalds96d4f262019-01-03 18:57:57 -0800794 if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
Chris Wilson51311d02010-11-17 09:10:42 +0000795 return -EFAULT;
796
Chris Wilson03ac0642016-07-20 13:31:51 +0100797 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100798 if (!obj)
799 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -0700800
Chris Wilson7dcd2492010-09-26 20:21:44 +0100801 /* Bounds check destination. */
Matthew Auld966d5bf2016-12-13 20:32:22 +0000802 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100803 ret = -EINVAL;
Chris Wilson258a5ed2016-08-05 10:14:16 +0100804 goto err;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100805 }
806
Chris Wilsonf8c1cce2018-07-12 19:53:14 +0100807 /* Writes not allowed into this read-only object */
808 if (i915_gem_object_is_readonly(obj)) {
809 ret = -EINVAL;
810 goto err;
811 }
812
Chris Wilsondb53a302011-02-03 11:57:46 +0000813 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
814
Chris Wilson7c55e2c2017-03-07 12:03:38 +0000815 ret = -ENODEV;
816 if (obj->ops->pwrite)
817 ret = obj->ops->pwrite(obj, args);
818 if (ret != -ENODEV)
819 goto err;
820
Chris Wilsone95433c2016-10-28 13:58:27 +0100821 ret = i915_gem_object_wait(obj,
822 I915_WAIT_INTERRUPTIBLE |
823 I915_WAIT_ALL,
Chris Wilson62eb3c22019-02-13 09:25:04 +0000824 MAX_SCHEDULE_TIMEOUT);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100825 if (ret)
826 goto err;
827
Chris Wilsonfe115622016-10-28 13:58:40 +0100828 ret = i915_gem_object_pin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100829 if (ret)
Chris Wilsonfe115622016-10-28 13:58:40 +0100830 goto err;
Chris Wilson258a5ed2016-08-05 10:14:16 +0100831
Daniel Vetter935aaa62012-03-25 19:47:35 +0200832 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700833 /* We can only do the GTT pwrite on untiled buffers, as otherwise
834 * it would end up going through the fenced access, and we'll get
835 * different detiling behavior between reading and writing.
836 * pread/pwrite currently are reading and writing from the CPU
837 * perspective, requiring manual detiling by the client.
838 */
Chris Wilson6eae0052016-06-20 15:05:52 +0100839 if (!i915_gem_object_has_struct_page(obj) ||
Chris Wilson9c870d02016-10-24 13:42:15 +0100840 cpu_write_needs_clflush(obj))
Daniel Vetter935aaa62012-03-25 19:47:35 +0200841 /* Note that the gtt paths might fail with non-page-backed user
842 * pointers (e.g. gtt mappings when moving data between
Chris Wilson9c870d02016-10-24 13:42:15 +0100843 * textures). Fallback to the shmem path in that case.
844 */
Chris Wilsonfe115622016-10-28 13:58:40 +0100845 ret = i915_gem_gtt_pwrite_fast(obj, args);
Eric Anholt673a3942008-07-30 12:06:12 -0700846
Chris Wilsond1054ee2016-07-16 18:42:36 +0100847 if (ret == -EFAULT || ret == -ENOSPC) {
Chris Wilson6a2c4232014-11-04 04:51:40 -0800848 if (obj->phys_handle)
849 ret = i915_gem_phys_pwrite(obj, args, file);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530850 else
Chris Wilsonfe115622016-10-28 13:58:40 +0100851 ret = i915_gem_shmem_pwrite(obj, args);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800852 }
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100853
Chris Wilsonfe115622016-10-28 13:58:40 +0100854 i915_gem_object_unpin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100855err:
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100856 i915_gem_object_put(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100857 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700858}
859
Eric Anholt673a3942008-07-30 12:06:12 -0700860/**
861 * Called when user space has done writes to this buffer
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100862 * @dev: drm device
863 * @data: ioctl data blob
864 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -0700865 */
866int
867i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000868 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700869{
870 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000871 struct drm_i915_gem_object *obj;
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100872
Chris Wilson03ac0642016-07-20 13:31:51 +0100873 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilsonc21724c2016-08-05 10:14:19 +0100874 if (!obj)
875 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -0700876
Tina Zhanga03f3952017-11-14 10:25:13 +0000877 /*
878 * Proxy objects are barred from CPU access, so there is no
879 * need to ban sw_finish as it is a nop.
880 */
881
Eric Anholt673a3942008-07-30 12:06:12 -0700882 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson5a97bcc2017-02-22 11:40:46 +0000883 i915_gem_object_flush_if_display(obj);
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100884 i915_gem_object_put(obj);
Chris Wilson5a97bcc2017-02-22 11:40:46 +0000885
886 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700887}
888
Chris Wilson0cf289b2019-06-13 08:32:54 +0100889void i915_gem_runtime_suspend(struct drm_i915_private *i915)
Chris Wilsoneedd10f2014-06-16 08:57:44 +0100890{
Chris Wilson3594a3e2016-10-24 13:42:16 +0100891 struct drm_i915_gem_object *obj, *on;
Chris Wilson7c108fd2016-10-24 13:42:18 +0100892 int i;
Chris Wilsoneedd10f2014-06-16 08:57:44 +0100893
Chris Wilson3594a3e2016-10-24 13:42:16 +0100894 /*
895 * Only called during RPM suspend. All users of the userfault_list
896 * must be holding an RPM wakeref to ensure that this can not
897 * run concurrently with themselves (and use the struct_mutex for
898 * protection between themselves).
899 */
900
901 list_for_each_entry_safe(obj, on,
Chris Wilson0cf289b2019-06-13 08:32:54 +0100902 &i915->ggtt.userfault_list, userfault_link)
Chris Wilsona65adaf2017-10-09 09:43:57 +0100903 __i915_gem_object_release_mmap(obj);
Chris Wilson7c108fd2016-10-24 13:42:18 +0100904
Chris Wilson0cf289b2019-06-13 08:32:54 +0100905 /*
906 * The fence will be lost when the device powers down. If any were
Chris Wilson7c108fd2016-10-24 13:42:18 +0100907 * in use by hardware (i.e. they are pinned), we should not be powering
908 * down! All other fences will be reacquired by the user upon waking.
909 */
Chris Wilson0cf289b2019-06-13 08:32:54 +0100910 for (i = 0; i < i915->ggtt.num_fences; i++) {
911 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
Chris Wilson7c108fd2016-10-24 13:42:18 +0100912
Chris Wilson0cf289b2019-06-13 08:32:54 +0100913 /*
914 * Ideally we want to assert that the fence register is not
Chris Wilsone0ec3ec2017-02-03 12:57:17 +0000915 * live at this point (i.e. that no piece of code will be
916 * trying to write through fence + GTT, as that both violates
917 * our tracking of activity and associated locking/barriers,
918 * but also is illegal given that the hw is powered down).
919 *
920 * Previously we used reg->pin_count as a "liveness" indicator.
921 * That is not sufficient, and we need a more fine-grained
922 * tool if we want to have a sanity check here.
923 */
Chris Wilson7c108fd2016-10-24 13:42:18 +0100924
925 if (!reg->vma)
926 continue;
927
Chris Wilsona65adaf2017-10-09 09:43:57 +0100928 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
Chris Wilson7c108fd2016-10-24 13:42:18 +0100929 reg->dirty = true;
930 }
Chris Wilsoneedd10f2014-06-16 08:57:44 +0100931}
932
Chris Wilson25112b62017-03-30 15:50:39 +0100933static int wait_for_engines(struct drm_i915_private *i915)
934{
Chris Wilsonee42c002017-12-11 19:41:34 +0000935 if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
Chris Wilson59e4b192017-12-11 19:41:35 +0000936 dev_err(i915->drm.dev,
937 "Failed to idle engines, declaring wedged!\n");
Chris Wilson629820f2018-03-09 10:11:14 +0000938 GEM_TRACE_DUMP();
Chris Wilsoncad99462017-08-26 12:09:33 +0100939 i915_gem_set_wedged(i915);
940 return -EIO;
Chris Wilson25112b62017-03-30 15:50:39 +0100941 }
942
943 return 0;
944}
945
Chris Wilson1e345562019-01-28 10:23:56 +0000946static long
947wait_for_timelines(struct drm_i915_private *i915,
948 unsigned int flags, long timeout)
949{
950 struct i915_gt_timelines *gt = &i915->gt.timelines;
951 struct i915_timeline *tl;
952
Chris Wilson1e345562019-01-28 10:23:56 +0000953 mutex_lock(&gt->mutex);
Chris Wilson9407d3b2019-01-28 18:18:12 +0000954 list_for_each_entry(tl, &gt->active_list, link) {
Chris Wilson1e345562019-01-28 10:23:56 +0000955 struct i915_request *rq;
956
Chris Wilson21950ee2019-02-05 13:00:05 +0000957 rq = i915_active_request_get_unlocked(&tl->last_request);
Chris Wilson1e345562019-01-28 10:23:56 +0000958 if (!rq)
959 continue;
960
961 mutex_unlock(&gt->mutex);
962
963 /*
964 * "Race-to-idle".
965 *
966 * Switching to the kernel context is often used a synchronous
967 * step prior to idling, e.g. in suspend for flushing all
968 * current operations to memory before sleeping. These we
969 * want to complete as quickly as possible to avoid prolonged
970 * stalls, so allow the gpu to boost to maximum clocks.
971 */
972 if (flags & I915_WAIT_FOR_IDLE_BOOST)
Chris Wilson62eb3c22019-02-13 09:25:04 +0000973 gen6_rps_boost(rq);
Chris Wilson1e345562019-01-28 10:23:56 +0000974
975 timeout = i915_request_wait(rq, flags, timeout);
976 i915_request_put(rq);
977 if (timeout < 0)
978 return timeout;
979
980 /* restart after reacquiring the lock */
981 mutex_lock(&gt->mutex);
Chris Wilson9407d3b2019-01-28 18:18:12 +0000982 tl = list_entry(&gt->active_list, typeof(*tl), link);
Chris Wilson1e345562019-01-28 10:23:56 +0000983 }
984 mutex_unlock(&gt->mutex);
985
986 return timeout;
987}
988
Chris Wilsonec625fb2018-07-09 13:20:42 +0100989int i915_gem_wait_for_idle(struct drm_i915_private *i915,
990 unsigned int flags, long timeout)
Chris Wilson73cb9702016-10-28 13:58:46 +0100991{
Chris Wilson79ffac852019-04-24 21:07:17 +0100992 GEM_TRACE("flags=%x (%s), timeout=%ld%s, awake?=%s\n",
Chris Wilsonec625fb2018-07-09 13:20:42 +0100993 flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
Chris Wilson79ffac852019-04-24 21:07:17 +0100994 timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "",
995 yesno(i915->gt.awake));
Chris Wilson09a4c022018-05-24 09:11:35 +0100996
Chris Wilson863e9fd2017-05-30 13:13:32 +0100997 /* If the device is asleep, we have no requests outstanding */
998 if (!READ_ONCE(i915->gt.awake))
999 return 0;
1000
Chris Wilson1e345562019-01-28 10:23:56 +00001001 timeout = wait_for_timelines(i915, flags, timeout);
1002 if (timeout < 0)
1003 return timeout;
1004
Chris Wilson9caa34a2016-11-11 14:58:08 +00001005 if (flags & I915_WAIT_LOCKED) {
Chris Wilsona89d1f92018-05-02 17:38:39 +01001006 int err;
Chris Wilson9caa34a2016-11-11 14:58:08 +00001007
1008 lockdep_assert_held(&i915->drm.struct_mutex);
1009
Chris Wilsona61b47f2018-06-27 12:53:34 +01001010 err = wait_for_engines(i915);
1011 if (err)
1012 return err;
1013
Chris Wilsone61e0f52018-02-21 09:56:36 +00001014 i915_retire_requests(i915);
Chris Wilsona89d1f92018-05-02 17:38:39 +01001015 }
Chris Wilsona61b47f2018-06-27 12:53:34 +01001016
1017 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01001018}
1019
Chris Wilson058d88c2016-08-15 10:49:06 +01001020struct i915_vma *
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02001021i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
1022 const struct i915_ggtt_view *view,
Chris Wilson91b2db62016-08-04 16:32:23 +01001023 u64 size,
Chris Wilson2ffffd02016-08-04 16:32:22 +01001024 u64 alignment,
1025 u64 flags)
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02001026{
Chris Wilsonad16d2e2016-10-13 09:55:04 +01001027 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilson82ad6442018-06-05 16:37:58 +01001028 struct i915_address_space *vm = &dev_priv->ggtt.vm;
Chris Wilson59bfa122016-08-04 16:32:31 +01001029 struct i915_vma *vma;
1030 int ret;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001031
Chris Wilson4c7d62c2016-10-28 13:58:32 +01001032 lockdep_assert_held(&obj->base.dev->struct_mutex);
1033
Chris Wilsonac87a6fd2018-02-20 13:42:05 +00001034 if (flags & PIN_MAPPABLE &&
1035 (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
Chris Wilson43ae70d92017-10-09 09:44:01 +01001036 /* If the required space is larger than the available
1037 * aperture, we will not able to find a slot for the
1038 * object and unbinding the object now will be in
1039 * vain. Worse, doing so may cause us to ping-pong
1040 * the object in and out of the Global GTT and
1041 * waste a lot of cycles under the mutex.
1042 */
1043 if (obj->base.size > dev_priv->ggtt.mappable_end)
1044 return ERR_PTR(-E2BIG);
1045
1046 /* If NONBLOCK is set the caller is optimistically
1047 * trying to cache the full object within the mappable
1048 * aperture, and *must* have a fallback in place for
1049 * situations where we cannot bind the object. We
1050 * can be a little more lax here and use the fallback
1051 * more often to avoid costly migrations of ourselves
1052 * and other objects within the aperture.
1053 *
1054 * Half-the-aperture is used as a simple heuristic.
1055 * More interesting would to do search for a free
1056 * block prior to making the commitment to unbind.
1057 * That caters for the self-harm case, and with a
1058 * little more heuristics (e.g. NOFAULT, NOEVICT)
1059 * we could try to minimise harm to others.
1060 */
1061 if (flags & PIN_NONBLOCK &&
1062 obj->base.size > dev_priv->ggtt.mappable_end / 2)
1063 return ERR_PTR(-ENOSPC);
1064 }
1065
Chris Wilson718659a2017-01-16 15:21:28 +00001066 vma = i915_vma_instance(obj, vm, view);
Chengguang Xu772b5402019-02-21 10:08:19 +08001067 if (IS_ERR(vma))
Chris Wilson058d88c2016-08-15 10:49:06 +01001068 return vma;
Chris Wilson59bfa122016-08-04 16:32:31 +01001069
1070 if (i915_vma_misplaced(vma, size, alignment, flags)) {
Chris Wilson43ae70d92017-10-09 09:44:01 +01001071 if (flags & PIN_NONBLOCK) {
1072 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
1073 return ERR_PTR(-ENOSPC);
Chris Wilson59bfa122016-08-04 16:32:31 +01001074
Chris Wilson43ae70d92017-10-09 09:44:01 +01001075 if (flags & PIN_MAPPABLE &&
Chris Wilson944397f2017-01-09 16:16:11 +00001076 vma->fence_size > dev_priv->ggtt.mappable_end / 2)
Chris Wilsonad16d2e2016-10-13 09:55:04 +01001077 return ERR_PTR(-ENOSPC);
1078 }
1079
Chris Wilson59bfa122016-08-04 16:32:31 +01001080 WARN(i915_vma_is_pinned(vma),
1081 "bo is already pinned in ggtt with incorrect alignment:"
Chris Wilson05a20d02016-08-18 17:16:55 +01001082 " offset=%08x, req.alignment=%llx,"
1083 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
1084 i915_ggtt_offset(vma), alignment,
Chris Wilson59bfa122016-08-04 16:32:31 +01001085 !!(flags & PIN_MAPPABLE),
Chris Wilson05a20d02016-08-18 17:16:55 +01001086 i915_vma_is_map_and_fenceable(vma));
Chris Wilson59bfa122016-08-04 16:32:31 +01001087 ret = i915_vma_unbind(vma);
1088 if (ret)
Chris Wilson058d88c2016-08-15 10:49:06 +01001089 return ERR_PTR(ret);
Chris Wilson59bfa122016-08-04 16:32:31 +01001090 }
1091
Chris Wilson058d88c2016-08-15 10:49:06 +01001092 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
1093 if (ret)
1094 return ERR_PTR(ret);
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02001095
Chris Wilson058d88c2016-08-15 10:49:06 +01001096 return vma;
Eric Anholt673a3942008-07-30 12:06:12 -07001097}
1098
Eric Anholt673a3942008-07-30 12:06:12 -07001099int
Chris Wilson3ef94da2009-09-14 16:50:29 +01001100i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1101 struct drm_file *file_priv)
1102{
Chris Wilson3b4fa962019-05-30 21:34:59 +01001103 struct drm_i915_private *i915 = to_i915(dev);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001104 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001105 struct drm_i915_gem_object *obj;
Chris Wilson1233e2d2016-10-28 13:58:37 +01001106 int err;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001107
1108 switch (args->madv) {
1109 case I915_MADV_DONTNEED:
1110 case I915_MADV_WILLNEED:
1111 break;
1112 default:
1113 return -EINVAL;
1114 }
1115
Chris Wilson03ac0642016-07-20 13:31:51 +01001116 obj = i915_gem_object_lookup(file_priv, args->handle);
Chris Wilson1233e2d2016-10-28 13:58:37 +01001117 if (!obj)
1118 return -ENOENT;
1119
1120 err = mutex_lock_interruptible(&obj->mm.lock);
1121 if (err)
1122 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001123
Chris Wilsonf1fa4f42017-10-13 21:26:13 +01001124 if (i915_gem_object_has_pages(obj) &&
Chris Wilson3e510a82016-08-05 10:14:23 +01001125 i915_gem_object_is_tiled(obj) &&
Chris Wilson3b4fa962019-05-30 21:34:59 +01001126 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
Chris Wilsonbc0629a2016-11-01 10:03:17 +00001127 if (obj->mm.madv == I915_MADV_WILLNEED) {
1128 GEM_BUG_ON(!obj->mm.quirked);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01001129 __i915_gem_object_unpin_pages(obj);
Chris Wilsonbc0629a2016-11-01 10:03:17 +00001130 obj->mm.quirked = false;
1131 }
1132 if (args->madv == I915_MADV_WILLNEED) {
Chris Wilson2c3a3f42016-11-04 10:30:01 +00001133 GEM_BUG_ON(obj->mm.quirked);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01001134 __i915_gem_object_pin_pages(obj);
Chris Wilsonbc0629a2016-11-01 10:03:17 +00001135 obj->mm.quirked = true;
1136 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01001137 }
1138
Chris Wilsona4f5ea62016-10-28 13:58:35 +01001139 if (obj->mm.madv != __I915_MADV_PURGED)
1140 obj->mm.madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001141
Chris Wilson3b4fa962019-05-30 21:34:59 +01001142 if (i915_gem_object_has_pages(obj)) {
1143 struct list_head *list;
1144
Chris Wilsond82b4b22019-05-30 21:35:00 +01001145 if (i915_gem_object_is_shrinkable(obj)) {
Chris Wilsona8cff4c82019-06-10 15:54:30 +01001146 unsigned long flags;
1147
1148 spin_lock_irqsave(&i915->mm.obj_lock, flags);
1149
Chris Wilsond82b4b22019-05-30 21:35:00 +01001150 if (obj->mm.madv != I915_MADV_WILLNEED)
1151 list = &i915->mm.purge_list;
Chris Wilsond82b4b22019-05-30 21:35:00 +01001152 else
Chris Wilsonecab9be2019-06-12 11:57:20 +01001153 list = &i915->mm.shrink_list;
Chris Wilsond82b4b22019-05-30 21:35:00 +01001154 list_move_tail(&obj->mm.link, list);
Chris Wilsona8cff4c82019-06-10 15:54:30 +01001155
1156 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
Chris Wilsond82b4b22019-05-30 21:35:00 +01001157 }
Chris Wilson3b4fa962019-05-30 21:34:59 +01001158 }
1159
Chris Wilson6c085a72012-08-20 11:40:46 +02001160 /* if the object is no longer attached, discard its backing storage */
Chris Wilsonf1fa4f42017-10-13 21:26:13 +01001161 if (obj->mm.madv == I915_MADV_DONTNEED &&
1162 !i915_gem_object_has_pages(obj))
Chris Wilsonf0334282019-05-28 10:29:46 +01001163 i915_gem_object_truncate(obj);
Chris Wilson2d7ef392009-09-20 23:13:10 +01001164
Chris Wilsona4f5ea62016-10-28 13:58:35 +01001165 args->retained = obj->mm.madv != __I915_MADV_PURGED;
Chris Wilson1233e2d2016-10-28 13:58:37 +01001166 mutex_unlock(&obj->mm.lock);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001167
Chris Wilson1233e2d2016-10-28 13:58:37 +01001168out:
Chris Wilsonf8c417c2016-07-20 13:31:53 +01001169 i915_gem_object_put(obj);
Chris Wilson1233e2d2016-10-28 13:58:37 +01001170 return err;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001171}
1172
Chris Wilson24145512017-01-24 11:01:35 +00001173void i915_gem_sanitize(struct drm_i915_private *i915)
1174{
Chris Wilson538ef962019-01-14 14:21:18 +00001175 intel_wakeref_t wakeref;
1176
Chris Wilsonc3160da2018-05-31 09:22:45 +01001177 GEM_TRACE("\n");
1178
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001179 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001180 intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
Chris Wilsonc3160da2018-05-31 09:22:45 +01001181
1182 /*
1183 * As we have just resumed the machine and woken the device up from
1184 * deep PCI sleep (presumably D3_cold), assume the HW has been reset
1185 * back to defaults, recovering from whatever wedged state we left it
1186 * in and so worth trying to use the device once more.
1187 */
Chris Wilsonc41166f2019-02-20 14:56:37 +00001188 if (i915_terminally_wedged(i915))
Chris Wilsonf36325f2017-08-26 12:09:34 +01001189 i915_gem_unset_wedged(i915);
Chris Wilsonf36325f2017-08-26 12:09:34 +01001190
Chris Wilson24145512017-01-24 11:01:35 +00001191 /*
1192 * If we inherit context state from the BIOS or earlier occupants
1193 * of the GPU, the GPU may be in an inconsistent state when we
1194 * try to take over. The only way to remove the earlier state
1195 * is by resetting. However, resetting on earlier gen is tricky as
1196 * it may impact the display and we are uncertain about the stability
Joonas Lahtinenea117b82017-04-28 10:53:38 +03001197 * of the reset, so this could be applied to even earlier gen.
Chris Wilson24145512017-01-24 11:01:35 +00001198 */
Chris Wilson79ffac852019-04-24 21:07:17 +01001199 intel_gt_sanitize(i915, false);
Chris Wilsonc3160da2018-05-31 09:22:45 +01001200
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001201 intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001202 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
Chris Wilson24145512017-01-24 11:01:35 +00001203}
1204
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001205void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
Daniel Vetterf691e2f2012-02-02 09:58:12 +01001206{
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001207 if (INTEL_GEN(dev_priv) < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01001208 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
1209 return;
1210
1211 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
1212 DISP_TILE_SURFACE_SWIZZLING);
1213
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001214 if (IS_GEN(dev_priv, 5))
Daniel Vetter11782b02012-01-31 16:47:55 +01001215 return;
1216
Daniel Vetterf691e2f2012-02-02 09:58:12 +01001217 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001218 if (IS_GEN(dev_priv, 6))
Daniel Vetter6b26c862012-04-24 14:04:12 +02001219 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001220 else if (IS_GEN(dev_priv, 7))
Daniel Vetter6b26c862012-04-24 14:04:12 +02001221 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001222 else if (IS_GEN(dev_priv, 8))
Ben Widawsky31a53362013-11-02 21:07:04 -07001223 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
Ben Widawsky8782e262012-12-18 10:31:23 -08001224 else
1225 BUG();
Daniel Vetterf691e2f2012-02-02 09:58:12 +01001226}
Daniel Vettere21af882012-02-09 20:53:27 +01001227
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001228static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
Ville Syrjälä81e7f202014-08-15 01:21:55 +03001229{
Ville Syrjälä81e7f202014-08-15 01:21:55 +03001230 I915_WRITE(RING_CTL(base), 0);
1231 I915_WRITE(RING_HEAD(base), 0);
1232 I915_WRITE(RING_TAIL(base), 0);
1233 I915_WRITE(RING_START(base), 0);
1234}
1235
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001236static void init_unused_rings(struct drm_i915_private *dev_priv)
Ville Syrjälä81e7f202014-08-15 01:21:55 +03001237{
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001238 if (IS_I830(dev_priv)) {
1239 init_unused_ring(dev_priv, PRB1_BASE);
1240 init_unused_ring(dev_priv, SRB0_BASE);
1241 init_unused_ring(dev_priv, SRB1_BASE);
1242 init_unused_ring(dev_priv, SRB2_BASE);
1243 init_unused_ring(dev_priv, SRB3_BASE);
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001244 } else if (IS_GEN(dev_priv, 2)) {
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001245 init_unused_ring(dev_priv, SRB0_BASE);
1246 init_unused_ring(dev_priv, SRB1_BASE);
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001247 } else if (IS_GEN(dev_priv, 3)) {
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001248 init_unused_ring(dev_priv, PRB1_BASE);
1249 init_unused_ring(dev_priv, PRB2_BASE);
Ville Syrjälä81e7f202014-08-15 01:21:55 +03001250 }
1251}
1252
Chris Wilson20a8a742017-02-08 14:30:31 +00001253int i915_gem_init_hw(struct drm_i915_private *dev_priv)
1254{
Chris Wilsond200cda2016-04-28 09:56:44 +01001255 int ret;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08001256
Chris Wilsonde867c22016-10-25 13:16:02 +01001257 dev_priv->gt.last_init_time = ktime_get();
1258
Chris Wilson5e4f5182015-02-13 14:35:59 +00001259 /* Double layer security blanket, see i915_gem_init() */
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001260 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson5e4f5182015-02-13 14:35:59 +00001261
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00001262 if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
Ben Widawsky05e21cc2013-07-04 11:02:04 -07001263 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
Ben Widawsky4fc7c972013-02-08 11:49:24 -08001264
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01001265 if (IS_HASWELL(dev_priv))
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001266 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
Ville Syrjälä0bf21342013-11-29 14:56:12 +02001267 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
Rodrigo Vivi94353732013-08-28 16:45:46 -03001268
Tvrtko Ursulin094304b2018-12-03 12:50:10 +00001269 /* Apply the GT workarounds... */
Tvrtko Ursulin25d140f2018-12-03 13:33:19 +00001270 intel_gt_apply_workarounds(dev_priv);
Tvrtko Ursulin094304b2018-12-03 12:50:10 +00001271 /* ...and determine whether they are sticking. */
1272 intel_gt_verify_workarounds(dev_priv, "init");
Oscar Mateo59b449d2018-04-10 09:12:47 -07001273
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001274 i915_gem_init_swizzling(dev_priv);
Ben Widawsky4fc7c972013-02-08 11:49:24 -08001275
Daniel Vetterd5abdfd2014-11-20 09:45:19 +01001276 /*
1277 * At least 830 can leave some of the unused rings
1278 * "active" (ie. head != tail) after resume which
1279 * will prevent c3 entry. Makes sure all unused rings
1280 * are totally idle.
1281 */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001282 init_unused_rings(dev_priv);
Daniel Vetterd5abdfd2014-11-20 09:45:19 +01001283
Dave Gordoned54c1a2016-01-19 19:02:54 +00001284 BUG_ON(!dev_priv->kernel_context);
Chris Wilsonc41166f2019-02-20 14:56:37 +00001285 ret = i915_terminally_wedged(dev_priv);
1286 if (ret)
Chris Wilson6f74b362017-10-15 15:37:25 +01001287 goto out;
John Harrison90638cc2015-05-29 17:43:37 +01001288
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001289 ret = i915_ppgtt_init_hw(dev_priv);
John Harrison4ad2fd82015-06-18 13:11:20 +01001290 if (ret) {
Chris Wilson8177e112018-02-07 11:15:45 +00001291 DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
John Harrison4ad2fd82015-06-18 13:11:20 +01001292 goto out;
1293 }
1294
Jackie Lif08e2032018-03-13 17:32:53 -07001295 ret = intel_wopcm_init_hw(&dev_priv->wopcm);
1296 if (ret) {
1297 DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
1298 goto out;
1299 }
1300
Michał Winiarski9bdc3572017-10-25 18:25:19 +01001301 /* We can't enable contexts until all firmware is loaded */
1302 ret = intel_uc_init_hw(dev_priv);
Chris Wilson8177e112018-02-07 11:15:45 +00001303 if (ret) {
1304 DRM_ERROR("Enabling uc failed (%d)\n", ret);
Michał Winiarski9bdc3572017-10-25 18:25:19 +01001305 goto out;
Chris Wilson8177e112018-02-07 11:15:45 +00001306 }
Michał Winiarski9bdc3572017-10-25 18:25:19 +01001307
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001308 intel_mocs_init_l3cc_table(dev_priv);
Peter Antoine0ccdacf2016-04-13 15:03:25 +01001309
Chris Wilson136109c2017-11-02 13:14:30 +00001310 /* Only when the HW is re-initialised, can we replay the requests */
Chris Wilson79ffac852019-04-24 21:07:17 +01001311 ret = intel_engines_resume(dev_priv);
Michal Wajdeczkob96f6eb2018-06-05 12:24:43 +00001312 if (ret)
1313 goto cleanup_uc;
Michał Winiarski60c0a662018-07-12 14:48:10 +02001314
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001315 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Michał Winiarski60c0a662018-07-12 14:48:10 +02001316
Chris Wilson79ffac852019-04-24 21:07:17 +01001317 intel_engines_set_scheduler_caps(dev_priv);
Michał Winiarski60c0a662018-07-12 14:48:10 +02001318 return 0;
Michal Wajdeczkob96f6eb2018-06-05 12:24:43 +00001319
1320cleanup_uc:
1321 intel_uc_fini_hw(dev_priv);
Michał Winiarski60c0a662018-07-12 14:48:10 +02001322out:
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001323 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Michał Winiarski60c0a662018-07-12 14:48:10 +02001324
1325 return ret;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001326}
1327
Chris Wilsond2b4b972017-11-10 14:26:33 +00001328static int __intel_engines_record_defaults(struct drm_i915_private *i915)
1329{
Chris Wilsond2b4b972017-11-10 14:26:33 +00001330 struct intel_engine_cs *engine;
Chris Wilson5e2a0412019-04-26 17:33:34 +01001331 struct i915_gem_context *ctx;
1332 struct i915_gem_engines *e;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001333 enum intel_engine_id id;
Chris Wilson604c37d2019-03-08 09:36:55 +00001334 int err = 0;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001335
1336 /*
1337 * As we reset the gpu during very early sanitisation, the current
1338 * register state on the GPU should reflect its defaults values.
1339 * We load a context onto the hw (with restore-inhibit), then switch
1340 * over to a second context to save that default register state. We
1341 * can then prime every new context with that state so they all start
1342 * from the same default HW values.
1343 */
1344
1345 ctx = i915_gem_context_create_kernel(i915, 0);
1346 if (IS_ERR(ctx))
1347 return PTR_ERR(ctx);
1348
Chris Wilson5e2a0412019-04-26 17:33:34 +01001349 e = i915_gem_context_lock_engines(ctx);
1350
Chris Wilsond2b4b972017-11-10 14:26:33 +00001351 for_each_engine(engine, i915, id) {
Chris Wilson5e2a0412019-04-26 17:33:34 +01001352 struct intel_context *ce = e->engines[id];
Chris Wilsone61e0f52018-02-21 09:56:36 +00001353 struct i915_request *rq;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001354
Chris Wilson5e2a0412019-04-26 17:33:34 +01001355 rq = intel_context_create_request(ce);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001356 if (IS_ERR(rq)) {
1357 err = PTR_ERR(rq);
Chris Wilson5e2a0412019-04-26 17:33:34 +01001358 goto err_active;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001359 }
1360
Chris Wilson3fef5cd2017-11-20 10:20:02 +00001361 err = 0;
Chris Wilson5e2a0412019-04-26 17:33:34 +01001362 if (rq->engine->init_context)
1363 err = rq->engine->init_context(rq);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001364
Chris Wilson697b9a82018-06-12 11:51:35 +01001365 i915_request_add(rq);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001366 if (err)
1367 goto err_active;
1368 }
1369
Chris Wilson604c37d2019-03-08 09:36:55 +00001370 /* Flush the default context image to memory, and enable powersaving. */
Chris Wilson23c3c3d2019-04-24 21:07:14 +01001371 if (!i915_gem_load_power_context(i915)) {
Chris Wilson604c37d2019-03-08 09:36:55 +00001372 err = -EIO;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001373 goto err_active;
Chris Wilson2621cef2018-07-09 13:20:43 +01001374 }
Chris Wilsond2b4b972017-11-10 14:26:33 +00001375
Chris Wilsond2b4b972017-11-10 14:26:33 +00001376 for_each_engine(engine, i915, id) {
Chris Wilson5e2a0412019-04-26 17:33:34 +01001377 struct intel_context *ce = e->engines[id];
1378 struct i915_vma *state = ce->state;
Chris Wilson37d7c9c2018-09-14 13:35:03 +01001379 void *vaddr;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001380
Chris Wilsond2b4b972017-11-10 14:26:33 +00001381 if (!state)
1382 continue;
1383
Chris Wilson08819542019-03-08 13:25:22 +00001384 GEM_BUG_ON(intel_context_is_pinned(ce));
Chris Wilsonc4d52fe2019-03-08 13:25:19 +00001385
Chris Wilsond2b4b972017-11-10 14:26:33 +00001386 /*
1387 * As we will hold a reference to the logical state, it will
1388 * not be torn down with the context, and importantly the
1389 * object will hold onto its vma (making it possible for a
1390 * stray GTT write to corrupt our defaults). Unmap the vma
1391 * from the GTT to prevent such accidents and reclaim the
1392 * space.
1393 */
1394 err = i915_vma_unbind(state);
1395 if (err)
1396 goto err_active;
1397
Chris Wilson6951e582019-05-28 10:29:51 +01001398 i915_gem_object_lock(state->obj);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001399 err = i915_gem_object_set_to_cpu_domain(state->obj, false);
Chris Wilson6951e582019-05-28 10:29:51 +01001400 i915_gem_object_unlock(state->obj);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001401 if (err)
1402 goto err_active;
1403
1404 engine->default_state = i915_gem_object_get(state->obj);
Chris Wilsona679f582019-03-21 16:19:07 +00001405 i915_gem_object_set_cache_coherency(engine->default_state,
1406 I915_CACHE_LLC);
Chris Wilson37d7c9c2018-09-14 13:35:03 +01001407
1408 /* Check we can acquire the image of the context state */
1409 vaddr = i915_gem_object_pin_map(engine->default_state,
Chris Wilson666424a2018-09-14 13:35:04 +01001410 I915_MAP_FORCE_WB);
Chris Wilson37d7c9c2018-09-14 13:35:03 +01001411 if (IS_ERR(vaddr)) {
1412 err = PTR_ERR(vaddr);
1413 goto err_active;
1414 }
1415
1416 i915_gem_object_unpin_map(engine->default_state);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001417 }
1418
1419 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
1420 unsigned int found = intel_engines_has_context_isolation(i915);
1421
1422 /*
1423 * Make sure that classes with multiple engine instances all
1424 * share the same basic configuration.
1425 */
1426 for_each_engine(engine, i915, id) {
1427 unsigned int bit = BIT(engine->uabi_class);
1428 unsigned int expected = engine->default_state ? bit : 0;
1429
1430 if ((found & bit) != expected) {
1431 DRM_ERROR("mismatching default context state for class %d on engine %s\n",
1432 engine->uabi_class, engine->name);
1433 }
1434 }
1435 }
1436
1437out_ctx:
Chris Wilson5e2a0412019-04-26 17:33:34 +01001438 i915_gem_context_unlock_engines(ctx);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001439 i915_gem_context_set_closed(ctx);
1440 i915_gem_context_put(ctx);
1441 return err;
1442
1443err_active:
1444 /*
1445 * If we have to abandon now, we expect the engines to be idle
Chris Wilson604c37d2019-03-08 09:36:55 +00001446 * and ready to be torn-down. The quickest way we can accomplish
1447 * this is by declaring ourselves wedged.
Chris Wilsond2b4b972017-11-10 14:26:33 +00001448 */
Chris Wilson604c37d2019-03-08 09:36:55 +00001449 i915_gem_set_wedged(i915);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001450 goto out_ctx;
1451}
1452
Chris Wilson51797492018-12-04 14:15:16 +00001453static int
1454i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
1455{
1456 struct drm_i915_gem_object *obj;
1457 struct i915_vma *vma;
1458 int ret;
1459
1460 obj = i915_gem_object_create_stolen(i915, size);
1461 if (!obj)
1462 obj = i915_gem_object_create_internal(i915, size);
1463 if (IS_ERR(obj)) {
1464 DRM_ERROR("Failed to allocate scratch page\n");
1465 return PTR_ERR(obj);
1466 }
1467
1468 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1469 if (IS_ERR(vma)) {
1470 ret = PTR_ERR(vma);
1471 goto err_unref;
1472 }
1473
1474 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
1475 if (ret)
1476 goto err_unref;
1477
1478 i915->gt.scratch = vma;
1479 return 0;
1480
1481err_unref:
1482 i915_gem_object_put(obj);
1483 return ret;
1484}
1485
1486static void i915_gem_fini_scratch(struct drm_i915_private *i915)
1487{
1488 i915_vma_unpin_and_release(&i915->gt.scratch, 0);
1489}
1490
Chris Wilson254e1182019-04-17 08:56:28 +01001491static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
1492{
1493 struct intel_engine_cs *engine;
1494 enum intel_engine_id id;
1495 int err = 0;
1496
1497 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1498 return 0;
1499
1500 for_each_engine(engine, i915, id) {
1501 if (intel_engine_verify_workarounds(engine, "load"))
1502 err = -EIO;
1503 }
1504
1505 return err;
1506}
1507
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001508int i915_gem_init(struct drm_i915_private *dev_priv)
Chris Wilson1070a422012-04-24 15:47:41 +01001509{
Chris Wilson1070a422012-04-24 15:47:41 +01001510 int ret;
1511
Changbin Du52b24162018-05-08 17:07:05 +08001512 /* We need to fallback to 4K pages if host doesn't support huge gtt. */
1513 if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
Matthew Auldda9fe3f32017-10-06 23:18:31 +01001514 mkwrite_device_info(dev_priv)->page_sizes =
1515 I915_GTT_PAGE_SIZE_4K;
1516
Chris Wilson94312822017-05-03 10:39:18 +01001517 dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
Chris Wilson57822dc2017-02-22 11:40:48 +00001518
Chris Wilson1e345562019-01-28 10:23:56 +00001519 i915_timelines_init(dev_priv);
1520
Chris Wilsonee487002017-11-22 17:26:21 +00001521 ret = i915_gem_init_userptr(dev_priv);
1522 if (ret)
1523 return ret;
1524
Sagar Arun Kamble70deead2018-01-24 21:16:58 +05301525 ret = intel_uc_init_misc(dev_priv);
Michał Winiarski3176ff42017-12-13 23:13:47 +01001526 if (ret)
1527 return ret;
1528
Michal Wajdeczkof7dc0152018-06-28 14:15:21 +00001529 ret = intel_wopcm_init(&dev_priv->wopcm);
1530 if (ret)
1531 goto err_uc_misc;
1532
Chris Wilson5e4f5182015-02-13 14:35:59 +00001533 /* This is just a security blanket to placate dragons.
1534 * On some systems, we very sporadically observe that the first TLBs
1535 * used by the CS may be stale, despite us poking the TLB reset. If
1536 * we hold the forcewake during initialisation these problems
1537 * just magically go away.
1538 */
Chris Wilsonee487002017-11-22 17:26:21 +00001539 mutex_lock(&dev_priv->drm.struct_mutex);
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001540 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson5e4f5182015-02-13 14:35:59 +00001541
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01001542 ret = i915_gem_init_ggtt(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001543 if (ret) {
1544 GEM_BUG_ON(ret == -EIO);
1545 goto err_unlock;
1546 }
Jesse Barnesd62b4892013-03-08 10:45:53 -08001547
Chris Wilson51797492018-12-04 14:15:16 +00001548 ret = i915_gem_init_scratch(dev_priv,
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001549 IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001550 if (ret) {
1551 GEM_BUG_ON(ret == -EIO);
1552 goto err_ggtt;
1553 }
Ben Widawsky2fa48d82013-12-06 14:11:04 -08001554
Chris Wilson11334c62019-04-26 17:33:33 +01001555 ret = intel_engines_setup(dev_priv);
1556 if (ret) {
1557 GEM_BUG_ON(ret == -EIO);
1558 goto err_unlock;
1559 }
1560
Chris Wilson51797492018-12-04 14:15:16 +00001561 ret = i915_gem_contexts_init(dev_priv);
1562 if (ret) {
1563 GEM_BUG_ON(ret == -EIO);
1564 goto err_scratch;
1565 }
1566
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001567 ret = intel_engines_init(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001568 if (ret) {
1569 GEM_BUG_ON(ret == -EIO);
1570 goto err_context;
1571 }
Daniel Vetter53ca26c2012-04-26 23:28:03 +02001572
Chris Wilsonf58d13d2017-11-10 14:26:29 +00001573 intel_init_gt_powersave(dev_priv);
1574
Michał Winiarski61b5c152017-12-13 23:13:48 +01001575 ret = intel_uc_init(dev_priv);
Chris Wilsoncc6a8182017-11-10 14:26:30 +00001576 if (ret)
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001577 goto err_pm;
Chris Wilsoncc6a8182017-11-10 14:26:30 +00001578
Michał Winiarski61b5c152017-12-13 23:13:48 +01001579 ret = i915_gem_init_hw(dev_priv);
1580 if (ret)
1581 goto err_uc_init;
1582
Chris Wilsoncc6a8182017-11-10 14:26:30 +00001583 /*
1584 * Despite its name intel_init_clock_gating applies both display
1585 * clock gating workarounds; GT mmio workarounds and the occasional
1586 * GT power context workaround. Worse, sometimes it includes a context
1587 * register workaround which we need to apply before we record the
1588 * default HW state for all contexts.
1589 *
1590 * FIXME: break up the workarounds and apply them at the right time!
1591 */
1592 intel_init_clock_gating(dev_priv);
1593
Chris Wilson254e1182019-04-17 08:56:28 +01001594 ret = intel_engines_verify_workarounds(dev_priv);
1595 if (ret)
1596 goto err_init_hw;
1597
Chris Wilsond2b4b972017-11-10 14:26:33 +00001598 ret = __intel_engines_record_defaults(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001599 if (ret)
1600 goto err_init_hw;
1601
1602 if (i915_inject_load_failure()) {
1603 ret = -ENODEV;
1604 goto err_init_hw;
1605 }
1606
1607 if (i915_inject_load_failure()) {
1608 ret = -EIO;
1609 goto err_init_hw;
1610 }
1611
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001612 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001613 mutex_unlock(&dev_priv->drm.struct_mutex);
1614
1615 return 0;
1616
1617 /*
1618 * Unwinding is complicated by that we want to handle -EIO to mean
1619 * disable GPU submission but keep KMS alive. We want to mark the
1620 * HW as irrevisibly wedged, but keep enough state around that the
1621 * driver doesn't explode during runtime.
1622 */
1623err_init_hw:
Chris Wilson8571a052018-06-06 15:54:41 +01001624 mutex_unlock(&dev_priv->drm.struct_mutex);
1625
Chris Wilson79ffac852019-04-24 21:07:17 +01001626 i915_gem_set_wedged(dev_priv);
Chris Wilson5861b012019-03-08 09:36:54 +00001627 i915_gem_suspend(dev_priv);
Chris Wilson8571a052018-06-06 15:54:41 +01001628 i915_gem_suspend_late(dev_priv);
1629
Chris Wilson8bcf9f72018-07-10 10:44:20 +01001630 i915_gem_drain_workqueue(dev_priv);
1631
Chris Wilson8571a052018-06-06 15:54:41 +01001632 mutex_lock(&dev_priv->drm.struct_mutex);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001633 intel_uc_fini_hw(dev_priv);
Michał Winiarski61b5c152017-12-13 23:13:48 +01001634err_uc_init:
1635 intel_uc_fini(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001636err_pm:
1637 if (ret != -EIO) {
1638 intel_cleanup_gt_powersave(dev_priv);
Chris Wilson45b9c962019-05-01 11:32:04 +01001639 intel_engines_cleanup(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001640 }
1641err_context:
1642 if (ret != -EIO)
1643 i915_gem_contexts_fini(dev_priv);
Chris Wilson51797492018-12-04 14:15:16 +00001644err_scratch:
1645 i915_gem_fini_scratch(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001646err_ggtt:
1647err_unlock:
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001648 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001649 mutex_unlock(&dev_priv->drm.struct_mutex);
1650
Michal Wajdeczkof7dc0152018-06-28 14:15:21 +00001651err_uc_misc:
Sagar Arun Kamble70deead2018-01-24 21:16:58 +05301652 intel_uc_fini_misc(dev_priv);
Sagar Arun Kambleda943b52018-01-10 18:24:16 +05301653
Chris Wilson1e345562019-01-28 10:23:56 +00001654 if (ret != -EIO) {
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001655 i915_gem_cleanup_userptr(dev_priv);
Chris Wilson1e345562019-01-28 10:23:56 +00001656 i915_timelines_fini(dev_priv);
1657 }
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001658
Chris Wilson60990322014-04-09 09:19:42 +01001659 if (ret == -EIO) {
Chris Wilson7ed43df2018-07-26 09:50:32 +01001660 mutex_lock(&dev_priv->drm.struct_mutex);
1661
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001662 /*
1663 * Allow engine initialisation to fail by marking the GPU as
Chris Wilson60990322014-04-09 09:19:42 +01001664 * wedged. But we only want to do this where the GPU is angry,
1665 * for all other failure, such as an allocation failure, bail.
1666 */
Chris Wilsonc41166f2019-02-20 14:56:37 +00001667 if (!i915_reset_failed(dev_priv)) {
Chris Wilson51c18bf2018-06-09 12:10:58 +01001668 i915_load_error(dev_priv,
1669 "Failed to initialize GPU, declaring it wedged!\n");
Chris Wilson6f74b362017-10-15 15:37:25 +01001670 i915_gem_set_wedged(dev_priv);
1671 }
Chris Wilson7ed43df2018-07-26 09:50:32 +01001672
1673 /* Minimal basic recovery for KMS */
1674 ret = i915_ggtt_enable_hw(dev_priv);
1675 i915_gem_restore_gtt_mappings(dev_priv);
1676 i915_gem_restore_fences(dev_priv);
1677 intel_init_clock_gating(dev_priv);
1678
1679 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilson1070a422012-04-24 15:47:41 +01001680 }
1681
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001682 i915_gem_drain_freed_objects(dev_priv);
Chris Wilson60990322014-04-09 09:19:42 +01001683 return ret;
Chris Wilson1070a422012-04-24 15:47:41 +01001684}
1685
Janusz Krzysztofik47bc28d2019-05-30 15:31:05 +02001686void i915_gem_fini_hw(struct drm_i915_private *dev_priv)
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001687{
Chris Wilson79ffac852019-04-24 21:07:17 +01001688 GEM_BUG_ON(dev_priv->gt.awake);
1689
Chris Wilson0cf289b2019-06-13 08:32:54 +01001690 intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
Chris Wilsonb27e35a2019-05-27 12:51:14 +01001691
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001692 i915_gem_suspend_late(dev_priv);
Chris Wilson30b710842018-08-12 23:36:29 +01001693 intel_disable_gt_powersave(dev_priv);
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001694
1695 /* Flush any outstanding unpin_work. */
1696 i915_gem_drain_workqueue(dev_priv);
1697
1698 mutex_lock(&dev_priv->drm.struct_mutex);
1699 intel_uc_fini_hw(dev_priv);
1700 intel_uc_fini(dev_priv);
Janusz Krzysztofik47bc28d2019-05-30 15:31:05 +02001701 mutex_unlock(&dev_priv->drm.struct_mutex);
1702
1703 i915_gem_drain_freed_objects(dev_priv);
1704}
1705
1706void i915_gem_fini(struct drm_i915_private *dev_priv)
1707{
1708 mutex_lock(&dev_priv->drm.struct_mutex);
Chris Wilson45b9c962019-05-01 11:32:04 +01001709 intel_engines_cleanup(dev_priv);
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001710 i915_gem_contexts_fini(dev_priv);
Chris Wilson51797492018-12-04 14:15:16 +00001711 i915_gem_fini_scratch(dev_priv);
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001712 mutex_unlock(&dev_priv->drm.struct_mutex);
1713
Tvrtko Ursulin25d140f2018-12-03 13:33:19 +00001714 intel_wa_list_free(&dev_priv->gt_wa_list);
1715
Chris Wilson30b710842018-08-12 23:36:29 +01001716 intel_cleanup_gt_powersave(dev_priv);
1717
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001718 intel_uc_fini_misc(dev_priv);
1719 i915_gem_cleanup_userptr(dev_priv);
Chris Wilson1e345562019-01-28 10:23:56 +00001720 i915_timelines_fini(dev_priv);
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001721
1722 i915_gem_drain_freed_objects(dev_priv);
1723
1724 WARN_ON(!list_empty(&dev_priv->contexts.list));
1725}
1726
Chris Wilson24145512017-01-24 11:01:35 +00001727void i915_gem_init_mmio(struct drm_i915_private *i915)
1728{
1729 i915_gem_sanitize(i915);
1730}
1731
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001732static void i915_gem_init__mm(struct drm_i915_private *i915)
1733{
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001734 spin_lock_init(&i915->mm.obj_lock);
1735 spin_lock_init(&i915->mm.free_lock);
1736
1737 init_llist_head(&i915->mm.free_list);
1738
Chris Wilson3b4fa962019-05-30 21:34:59 +01001739 INIT_LIST_HEAD(&i915->mm.purge_list);
Chris Wilsonecab9be2019-06-12 11:57:20 +01001740 INIT_LIST_HEAD(&i915->mm.shrink_list);
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001741
Chris Wilson84753552019-05-28 10:29:45 +01001742 i915_gem_init__objects(i915);
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001743}
1744
Michal Wajdeczkoa0de9082018-03-23 12:34:49 +00001745int i915_gem_init_early(struct drm_i915_private *dev_priv)
Eric Anholt673a3942008-07-30 12:06:12 -07001746{
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001747 int err;
Chris Wilsond1b48c12017-08-16 09:52:08 +01001748
Chris Wilson79ffac852019-04-24 21:07:17 +01001749 intel_gt_pm_init(dev_priv);
1750
Chris Wilson643b4502018-04-30 14:15:03 +01001751 INIT_LIST_HEAD(&dev_priv->gt.active_rings);
Chris Wilson3365e222018-05-03 20:51:14 +01001752 INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
Chris Wilson155ab882019-06-06 12:23:20 +01001753 spin_lock_init(&dev_priv->gt.closed_lock);
Chris Wilson643b4502018-04-30 14:15:03 +01001754
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001755 i915_gem_init__mm(dev_priv);
Chris Wilson23c3c3d2019-04-24 21:07:14 +01001756 i915_gem_init__pm(dev_priv);
Chris Wilsonf2123812017-10-16 12:40:37 +01001757
Chris Wilson1f15b762016-07-01 17:23:14 +01001758 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001759 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
Chris Wilson18bb2bc2019-01-14 21:04:01 +00001760 mutex_init(&dev_priv->gpu_error.wedge_mutex);
Chris Wilson2caffbf2019-02-08 15:37:03 +00001761 init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
Chris Wilson31169712009-09-14 16:50:28 +01001762
Joonas Lahtinen6f633402016-09-01 14:58:21 +03001763 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
1764
Chris Wilsonb5add952016-08-04 16:32:36 +01001765 spin_lock_init(&dev_priv->fb_tracking.lock);
Chris Wilson73cb9702016-10-28 13:58:46 +01001766
Matthew Auld465c4032017-10-06 23:18:14 +01001767 err = i915_gemfs_init(dev_priv);
1768 if (err)
1769 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
1770
Chris Wilson73cb9702016-10-28 13:58:46 +01001771 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001772}
Dave Airlie71acb5e2008-12-30 20:31:46 +10001773
Michal Wajdeczkoa0de9082018-03-23 12:34:49 +00001774void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
Imre Deakd64aa092016-01-19 15:26:29 +02001775{
Chris Wilsonc4d4c1c2017-02-10 16:35:23 +00001776 i915_gem_drain_freed_objects(dev_priv);
Chris Wilsonc9c704712018-02-19 22:06:31 +00001777 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1778 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
Chris Wilsond82b4b22019-05-30 21:35:00 +01001779 WARN_ON(dev_priv->mm.shrink_count);
Matthew Auldea84aa72016-11-17 21:04:11 +00001780
Chris Wilson2caffbf2019-02-08 15:37:03 +00001781 cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
1782
Matthew Auld465c4032017-10-06 23:18:14 +01001783 i915_gemfs_fini(dev_priv);
Imre Deakd64aa092016-01-19 15:26:29 +02001784}
1785
Chris Wilson6a800ea2016-09-21 14:51:07 +01001786int i915_gem_freeze(struct drm_i915_private *dev_priv)
1787{
Chris Wilsond0aa3012017-04-07 11:25:49 +01001788 /* Discard all purgeable objects, let userspace recover those as
1789 * required after resuming.
1790 */
Chris Wilson6a800ea2016-09-21 14:51:07 +01001791 i915_gem_shrink_all(dev_priv);
Chris Wilson6a800ea2016-09-21 14:51:07 +01001792
Chris Wilson6a800ea2016-09-21 14:51:07 +01001793 return 0;
1794}
1795
Chris Wilson95c778d2018-06-01 15:41:25 +01001796int i915_gem_freeze_late(struct drm_i915_private *i915)
Chris Wilson461fb992016-05-14 07:26:33 +01001797{
1798 struct drm_i915_gem_object *obj;
Chris Wilsonecab9be2019-06-12 11:57:20 +01001799 intel_wakeref_t wakeref;
Chris Wilson461fb992016-05-14 07:26:33 +01001800
Chris Wilson95c778d2018-06-01 15:41:25 +01001801 /*
1802 * Called just before we write the hibernation image.
Chris Wilson461fb992016-05-14 07:26:33 +01001803 *
1804 * We need to update the domain tracking to reflect that the CPU
1805 * will be accessing all the pages to create and restore from the
1806 * hibernation, and so upon restoration those pages will be in the
1807 * CPU domain.
1808 *
1809 * To make sure the hibernation image contains the latest state,
1810 * we update that state just before writing out the image.
Chris Wilson7aab2d52016-09-09 20:02:18 +01001811 *
1812 * To try and reduce the hibernation image, we manually shrink
Chris Wilsond0aa3012017-04-07 11:25:49 +01001813 * the objects as well, see i915_gem_freeze()
Chris Wilson461fb992016-05-14 07:26:33 +01001814 */
1815
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001816 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
Chris Wilsonecab9be2019-06-12 11:57:20 +01001817
1818 i915_gem_shrink(i915, -1UL, NULL, ~0);
Chris Wilson95c778d2018-06-01 15:41:25 +01001819 i915_gem_drain_freed_objects(i915);
Chris Wilson461fb992016-05-14 07:26:33 +01001820
Chris Wilsonecab9be2019-06-12 11:57:20 +01001821 list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
1822 i915_gem_object_lock(obj);
1823 WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
1824 i915_gem_object_unlock(obj);
Chris Wilson461fb992016-05-14 07:26:33 +01001825 }
Chris Wilsonecab9be2019-06-12 11:57:20 +01001826
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001827 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
Chris Wilson461fb992016-05-14 07:26:33 +01001828
1829 return 0;
1830}
1831
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001832void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00001833{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001834 struct drm_i915_file_private *file_priv = file->driver_priv;
Chris Wilsone61e0f52018-02-21 09:56:36 +00001835 struct i915_request *request;
Eric Anholtb9624422009-06-03 07:27:35 +00001836
1837 /* Clean up our request list when the client is going away, so that
1838 * later retire_requests won't dereference our soon-to-be-gone
1839 * file_priv.
1840 */
Chris Wilson1c255952010-09-26 11:03:27 +01001841 spin_lock(&file_priv->mm.lock);
Chris Wilsonc8659ef2017-03-02 12:25:25 +00001842 list_for_each_entry(request, &file_priv->mm.request_list, client_link)
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001843 request->file_priv = NULL;
Chris Wilson1c255952010-09-26 11:03:27 +01001844 spin_unlock(&file_priv->mm.lock);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001845}
1846
Chris Wilson829a0af2017-06-20 12:05:45 +01001847int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001848{
1849 struct drm_i915_file_private *file_priv;
Ben Widawskye422b882013-12-06 14:10:58 -08001850 int ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001851
Chris Wilsonc4c29d72016-11-09 10:45:07 +00001852 DRM_DEBUG("\n");
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001853
1854 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1855 if (!file_priv)
1856 return -ENOMEM;
1857
1858 file->driver_priv = file_priv;
Chris Wilson829a0af2017-06-20 12:05:45 +01001859 file_priv->dev_priv = i915;
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02001860 file_priv->file = file;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001861
1862 spin_lock_init(&file_priv->mm.lock);
1863 INIT_LIST_HEAD(&file_priv->mm.request_list);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001864
Chris Wilsonc80ff162016-07-27 09:07:27 +01001865 file_priv->bsd_engine = -1;
Mika Kuoppala14921f32018-06-15 13:44:29 +03001866 file_priv->hang_timestamp = jiffies;
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001867
Chris Wilson829a0af2017-06-20 12:05:45 +01001868 ret = i915_gem_context_open(i915, file);
Ben Widawskye422b882013-12-06 14:10:58 -08001869 if (ret)
1870 kfree(file_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001871
Ben Widawskye422b882013-12-06 14:10:58 -08001872 return ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001873}
1874
Daniel Vetterb680c372014-09-19 18:27:27 +02001875/**
1876 * i915_gem_track_fb - update frontbuffer tracking
Geliang Tangd9072a32015-09-15 05:58:44 -07001877 * @old: current GEM buffer for the frontbuffer slots
1878 * @new: new GEM buffer for the frontbuffer slots
1879 * @frontbuffer_bits: bitmask of frontbuffer slots
Daniel Vetterb680c372014-09-19 18:27:27 +02001880 *
1881 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
1882 * from @old and setting them in @new. Both @old and @new can be NULL.
1883 */
Daniel Vettera071fa02014-06-18 23:28:09 +02001884void i915_gem_track_fb(struct drm_i915_gem_object *old,
1885 struct drm_i915_gem_object *new,
1886 unsigned frontbuffer_bits)
1887{
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01001888 /* Control of individual bits within the mask are guarded by
1889 * the owning plane->mutex, i.e. we can never see concurrent
1890 * manipulation of individual bits. But since the bitfield as a whole
1891 * is updated using RMW, we need to use atomics in order to update
1892 * the bits.
1893 */
1894 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
Chris Wilson74f6e182018-09-26 11:47:07 +01001895 BITS_PER_TYPE(atomic_t));
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01001896
Daniel Vettera071fa02014-06-18 23:28:09 +02001897 if (old) {
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01001898 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
1899 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
Daniel Vettera071fa02014-06-18 23:28:09 +02001900 }
1901
1902 if (new) {
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01001903 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
1904 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
Daniel Vettera071fa02014-06-18 23:28:09 +02001905 }
1906}
1907
Chris Wilson935a2f72017-02-13 17:15:13 +00001908#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
Chris Wilson66d9cb52017-02-13 17:15:17 +00001909#include "selftests/mock_gem_device.c"
Chris Wilson3f51b7e12018-08-30 14:48:06 +01001910#include "selftests/i915_gem.c"
Chris Wilson935a2f72017-02-13 17:15:13 +00001911#endif