blob: 5c6d94fe1ca2f6892b1381f1799b0c20d1670711 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
Daniel Vetterbe6a0372015-03-18 10:46:04 +01002 * Copyright © 2008-2015 Intel Corporation
Eric Anholt673a3942008-07-30 12:06:12 -07003 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Herrmann0de23972013-07-24 21:07:52 +020028#include <drm/drm_vma_manager.h>
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/i915_drm.h>
Chris Wilson6b5e90f2016-11-14 20:41:05 +000030#include <linux/dma-fence-array.h>
Chris Wilsonfe3288b2017-02-12 17:20:01 +000031#include <linux/kthread.h>
Chris Wilsonc13d87e2016-07-20 09:21:15 +010032#include <linux/reservation.h>
Hugh Dickins5949eac2011-06-27 16:18:18 -070033#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Chris Wilson20e49332016-11-22 14:41:21 +000035#include <linux/stop_machine.h>
Eric Anholt673a3942008-07-30 12:06:12 -070036#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020038#include <linux/dma-buf.h>
Daniel Vetterfcd70cd2019-01-17 22:03:34 +010039#include <linux/mman.h>
Eric Anholt673a3942008-07-30 12:06:12 -070040
Chris Wilson10be98a2019-05-28 10:29:49 +010041#include "gem/i915_gem_clflush.h"
42#include "gem/i915_gem_context.h"
Chris Wilsonafa13082019-05-28 10:29:43 +010043#include "gem/i915_gem_ioctls.h"
Chris Wilson10be98a2019-05-28 10:29:49 +010044#include "gem/i915_gem_pm.h"
45#include "gem/i915_gemfs.h"
Chris Wilson79ffac852019-04-24 21:07:17 +010046#include "gt/intel_engine_pm.h"
47#include "gt/intel_gt_pm.h"
Chris Wilson112ed2d2019-04-24 18:48:39 +010048#include "gt/intel_mocs.h"
49#include "gt/intel_reset.h"
50#include "gt/intel_workarounds.h"
51
Chris Wilson9f588922019-01-16 15:33:04 +000052#include "i915_drv.h"
Chris Wilson37d63f82019-05-28 10:29:50 +010053#include "i915_scatterlist.h"
Chris Wilson9f588922019-01-16 15:33:04 +000054#include "i915_trace.h"
55#include "i915_vgpu.h"
56
Ville Syrjäläaa5ca8b2019-05-09 15:21:57 +030057#include "intel_display.h"
Chris Wilson9f588922019-01-16 15:33:04 +000058#include "intel_drv.h"
59#include "intel_frontbuffer.h"
Jani Nikula696173b2019-04-05 14:00:15 +030060#include "intel_pm.h"
Chris Wilson9f588922019-01-16 15:33:04 +000061
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053062static int
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +010063insert_mappable_node(struct i915_ggtt *ggtt,
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053064 struct drm_mm_node *node, u32 size)
65{
66 memset(node, 0, sizeof(*node));
Chris Wilson82ad6442018-06-05 16:37:58 +010067 return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
Chris Wilson4e64e552017-02-02 21:04:38 +000068 size, 0, I915_COLOR_UNEVICTABLE,
69 0, ggtt->mappable_end,
70 DRM_MM_INSERT_LOW);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053071}
72
73static void
74remove_mappable_node(struct drm_mm_node *node)
75{
76 drm_mm_remove_node(node);
77}
78
Eric Anholt673a3942008-07-30 12:06:12 -070079int
Eric Anholt5a125c32008-10-22 21:40:13 -070080i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +000081 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -070082{
Chris Wilson09d7e462019-01-28 10:23:53 +000083 struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +030084 struct drm_i915_gem_get_aperture *args = data;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010085 struct i915_vma *vma;
Weinan Liff8f7972017-05-31 10:35:52 +080086 u64 pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -070087
Chris Wilson09d7e462019-01-28 10:23:53 +000088 mutex_lock(&ggtt->vm.mutex);
89
Chris Wilson82ad6442018-06-05 16:37:58 +010090 pinned = ggtt->vm.reserved;
Chris Wilson499197d2019-01-28 10:23:52 +000091 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
Chris Wilson20dfbde2016-08-04 16:32:30 +010092 if (i915_vma_is_pinned(vma))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010093 pinned += vma->node.size;
Chris Wilson09d7e462019-01-28 10:23:53 +000094
95 mutex_unlock(&ggtt->vm.mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -070096
Chris Wilson82ad6442018-06-05 16:37:58 +010097 args->aper_size = ggtt->vm.total;
Akshay Joshi0206e352011-08-16 15:34:10 -040098 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +000099
Eric Anholt5a125c32008-10-22 21:40:13 -0700100 return 0;
101}
102
Chris Wilson35a96112016-08-14 18:44:40 +0100103int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
Chris Wilsonaa653a62016-08-04 07:52:27 +0100104{
105 struct i915_vma *vma;
106 LIST_HEAD(still_in_list);
Chris Wilson6951e582019-05-28 10:29:51 +0100107 int ret = 0;
Chris Wilsonaa653a62016-08-04 07:52:27 +0100108
Chris Wilson02bef8f2016-08-14 18:44:41 +0100109 lockdep_assert_held(&obj->base.dev->struct_mutex);
110
Chris Wilson528cbd12019-01-28 10:23:54 +0000111 spin_lock(&obj->vma.lock);
112 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
113 struct i915_vma,
114 obj_link))) {
Chris Wilsonaa653a62016-08-04 07:52:27 +0100115 list_move_tail(&vma->obj_link, &still_in_list);
Chris Wilson528cbd12019-01-28 10:23:54 +0000116 spin_unlock(&obj->vma.lock);
117
Chris Wilsonaa653a62016-08-04 07:52:27 +0100118 ret = i915_vma_unbind(vma);
Chris Wilson528cbd12019-01-28 10:23:54 +0000119
120 spin_lock(&obj->vma.lock);
Chris Wilsonaa653a62016-08-04 07:52:27 +0100121 }
Chris Wilson528cbd12019-01-28 10:23:54 +0000122 list_splice(&still_in_list, &obj->vma.list);
123 spin_unlock(&obj->vma.lock);
Chris Wilsonaa653a62016-08-04 07:52:27 +0100124
125 return ret;
126}
127
Chris Wilson00731152014-05-21 12:42:56 +0100128static int
129i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
130 struct drm_i915_gem_pwrite *args,
Chris Wilson03ac84f2016-10-28 13:58:36 +0100131 struct drm_file *file)
Chris Wilson00731152014-05-21 12:42:56 +0100132{
Chris Wilson00731152014-05-21 12:42:56 +0100133 void *vaddr = obj->phys_handle->vaddr + args->offset;
Gustavo Padovan3ed605b2016-04-26 12:32:27 -0300134 char __user *user_data = u64_to_user_ptr(args->data_ptr);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800135
136 /* We manually control the domain here and pretend that it
137 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
138 */
Rodrigo Vivi77a0d1c2015-06-18 11:43:24 -0700139 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Chris Wilson10466d22017-01-06 15:22:38 +0000140 if (copy_from_user(vaddr, user_data, args->size))
141 return -EFAULT;
Chris Wilson00731152014-05-21 12:42:56 +0100142
Chris Wilson6a2c4232014-11-04 04:51:40 -0800143 drm_clflush_virt_range(vaddr, args->size);
Chris Wilson10466d22017-01-06 15:22:38 +0000144 i915_gem_chipset_flush(to_i915(obj->base.dev));
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200145
Chris Wilsond59b21e2017-02-22 11:40:49 +0000146 intel_fb_obj_flush(obj, ORIGIN_CPU);
Chris Wilson10466d22017-01-06 15:22:38 +0000147 return 0;
Chris Wilson00731152014-05-21 12:42:56 +0100148}
149
Dave Airlieff72145b2011-02-07 12:16:14 +1000150static int
151i915_gem_create(struct drm_file *file,
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000152 struct drm_i915_private *dev_priv,
Michał Winiarskie1634842019-03-26 18:02:18 +0100153 u64 *size_p,
Jani Nikula739f3ab2019-01-16 11:15:19 +0200154 u32 *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700155{
Chris Wilson05394f32010-11-08 19:18:58 +0000156 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300157 u32 handle;
Michał Winiarskie1634842019-03-26 18:02:18 +0100158 u64 size;
159 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700160
Michał Winiarskie1634842019-03-26 18:02:18 +0100161 size = round_up(*size_p, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200162 if (size == 0)
163 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700164
165 /* Allocate the new object */
Chris Wilson84753552019-05-28 10:29:45 +0100166 obj = i915_gem_object_create_shmem(dev_priv, size);
Chris Wilsonfe3db792016-04-25 13:32:13 +0100167 if (IS_ERR(obj))
168 return PTR_ERR(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700169
Chris Wilson05394f32010-11-08 19:18:58 +0000170 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100171 /* drop reference from allocate - handle holds it now */
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100172 i915_gem_object_put(obj);
Daniel Vetterd861e332013-07-24 23:25:03 +0200173 if (ret)
174 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100175
Dave Airlieff72145b2011-02-07 12:16:14 +1000176 *handle_p = handle;
Chris Wilson99534022019-04-17 14:25:07 +0100177 *size_p = size;
Eric Anholt673a3942008-07-30 12:06:12 -0700178 return 0;
179}
180
Dave Airlieff72145b2011-02-07 12:16:14 +1000181int
182i915_gem_dumb_create(struct drm_file *file,
183 struct drm_device *dev,
184 struct drm_mode_create_dumb *args)
185{
Ville Syrjäläaa5ca8b2019-05-09 15:21:57 +0300186 int cpp = DIV_ROUND_UP(args->bpp, 8);
187 u32 format;
188
189 switch (cpp) {
190 case 1:
191 format = DRM_FORMAT_C8;
192 break;
193 case 2:
194 format = DRM_FORMAT_RGB565;
195 break;
196 case 4:
197 format = DRM_FORMAT_XRGB8888;
198 break;
199 default:
200 return -EINVAL;
201 }
202
Dave Airlieff72145b2011-02-07 12:16:14 +1000203 /* have to work out size/pitch and return them */
Ville Syrjäläaa5ca8b2019-05-09 15:21:57 +0300204 args->pitch = ALIGN(args->width * cpp, 64);
205
206 /* align stride to page size so that we can remap */
207 if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
208 DRM_FORMAT_MOD_LINEAR))
209 args->pitch = ALIGN(args->pitch, 4096);
210
Dave Airlieff72145b2011-02-07 12:16:14 +1000211 args->size = args->pitch * args->height;
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000212 return i915_gem_create(file, to_i915(dev),
Michał Winiarskie1634842019-03-26 18:02:18 +0100213 &args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000214}
215
Dave Airlieff72145b2011-02-07 12:16:14 +1000216/**
217 * Creates a new mm object and returns a handle to it.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100218 * @dev: drm device pointer
219 * @data: ioctl data blob
220 * @file: drm file pointer
Dave Airlieff72145b2011-02-07 12:16:14 +1000221 */
222int
223i915_gem_create_ioctl(struct drm_device *dev, void *data,
224 struct drm_file *file)
225{
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000226 struct drm_i915_private *dev_priv = to_i915(dev);
Dave Airlieff72145b2011-02-07 12:16:14 +1000227 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200228
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000229 i915_gem_flush_free_objects(dev_priv);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +0100230
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000231 return i915_gem_create(file, dev_priv,
Michał Winiarskie1634842019-03-26 18:02:18 +0100232 &args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000233}
234
Chris Wilson7125397b2017-12-06 12:49:14 +0000235void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
Chris Wilsonef749212017-04-12 12:01:10 +0100236{
Chris Wilson538ef962019-01-14 14:21:18 +0000237 intel_wakeref_t wakeref;
238
Chris Wilson7125397b2017-12-06 12:49:14 +0000239 /*
240 * No actual flushing is required for the GTT write domain for reads
241 * from the GTT domain. Writes to it "immediately" go to main memory
242 * as far as we know, so there's no chipset flush. It also doesn't
243 * land in the GPU render cache.
Chris Wilsonef749212017-04-12 12:01:10 +0100244 *
245 * However, we do have to enforce the order so that all writes through
246 * the GTT land before any writes to the device, such as updates to
247 * the GATT itself.
248 *
249 * We also have to wait a bit for the writes to land from the GTT.
250 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
251 * timing. This issue has only been observed when switching quickly
252 * between GTT writes and CPU reads from inside the kernel on recent hw,
253 * and it appears to only affect discrete GTT blocks (i.e. on LLC
Chris Wilson7125397b2017-12-06 12:49:14 +0000254 * system agents we cannot reproduce this behaviour, until Cannonlake
255 * that was!).
Chris Wilsonef749212017-04-12 12:01:10 +0100256 */
Chris Wilson7125397b2017-12-06 12:49:14 +0000257
Chris Wilson900ccf32018-07-20 11:19:10 +0100258 wmb();
259
260 if (INTEL_INFO(dev_priv)->has_coherent_ggtt)
261 return;
262
Chris Wilsona8bd3b82018-07-17 10:26:55 +0100263 i915_gem_chipset_flush(dev_priv);
Chris Wilsonef749212017-04-12 12:01:10 +0100264
Chris Wilsond4225a52019-01-14 14:21:23 +0000265 with_intel_runtime_pm(dev_priv, wakeref) {
266 spin_lock_irq(&dev_priv->uncore.lock);
Chris Wilson7125397b2017-12-06 12:49:14 +0000267
Chris Wilsond4225a52019-01-14 14:21:23 +0000268 POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
Chris Wilson7125397b2017-12-06 12:49:14 +0000269
Chris Wilsond4225a52019-01-14 14:21:23 +0000270 spin_unlock_irq(&dev_priv->uncore.lock);
271 }
Chris Wilson7125397b2017-12-06 12:49:14 +0000272}
273
Daniel Vetterd174bd62012-03-25 19:47:40 +0200274static int
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000275shmem_pread(struct page *page, int offset, int len, char __user *user_data,
276 bool needs_clflush)
Daniel Vetterd174bd62012-03-25 19:47:40 +0200277{
278 char *vaddr;
279 int ret;
280
281 vaddr = kmap(page);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200282
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000283 if (needs_clflush)
284 drm_clflush_virt_range(vaddr + offset, len);
285
286 ret = __copy_to_user(user_data, vaddr + offset, len);
287
Daniel Vetterd174bd62012-03-25 19:47:40 +0200288 kunmap(page);
289
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000290 return ret ? -EFAULT : 0;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100291}
292
293static int
294i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
295 struct drm_i915_gem_pread *args)
296{
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100297 unsigned int needs_clflush;
298 unsigned int idx, offset;
Chris Wilson6951e582019-05-28 10:29:51 +0100299 struct dma_fence *fence;
300 char __user *user_data;
301 u64 remain;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100302 int ret;
303
Chris Wilson6951e582019-05-28 10:29:51 +0100304 ret = i915_gem_object_prepare_read(obj, &needs_clflush);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100305 if (ret)
306 return ret;
307
Chris Wilson6951e582019-05-28 10:29:51 +0100308 fence = i915_gem_object_lock_fence(obj);
309 i915_gem_object_finish_access(obj);
310 if (!fence)
311 return -ENOMEM;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100312
313 remain = args->size;
314 user_data = u64_to_user_ptr(args->data_ptr);
315 offset = offset_in_page(args->offset);
316 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
317 struct page *page = i915_gem_object_get_page(obj, idx);
Chris Wilsona5e856a52018-10-12 15:02:28 +0100318 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100319
320 ret = shmem_pread(page, offset, length, user_data,
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100321 needs_clflush);
322 if (ret)
323 break;
324
325 remain -= length;
326 user_data += length;
327 offset = 0;
328 }
329
Chris Wilson6951e582019-05-28 10:29:51 +0100330 i915_gem_object_unlock_fence(obj, fence);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100331 return ret;
332}
333
334static inline bool
335gtt_user_read(struct io_mapping *mapping,
336 loff_t base, int offset,
337 char __user *user_data, int length)
338{
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300339 void __iomem *vaddr;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100340 unsigned long unwritten;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530341
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530342 /* We can use the cpu mem copy function because this is X86. */
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300343 vaddr = io_mapping_map_atomic_wc(mapping, base);
344 unwritten = __copy_to_user_inatomic(user_data,
345 (void __force *)vaddr + offset,
346 length);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100347 io_mapping_unmap_atomic(vaddr);
348 if (unwritten) {
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300349 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
350 unwritten = copy_to_user(user_data,
351 (void __force *)vaddr + offset,
352 length);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100353 io_mapping_unmap(vaddr);
354 }
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530355 return unwritten;
356}
357
358static int
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100359i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
360 const struct drm_i915_gem_pread *args)
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530361{
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100362 struct drm_i915_private *i915 = to_i915(obj->base.dev);
363 struct i915_ggtt *ggtt = &i915->ggtt;
Chris Wilson538ef962019-01-14 14:21:18 +0000364 intel_wakeref_t wakeref;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530365 struct drm_mm_node node;
Chris Wilson6951e582019-05-28 10:29:51 +0100366 struct dma_fence *fence;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100367 void __user *user_data;
Chris Wilson6951e582019-05-28 10:29:51 +0100368 struct i915_vma *vma;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100369 u64 remain, offset;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530370 int ret;
371
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100372 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
373 if (ret)
374 return ret;
375
Chris Wilson538ef962019-01-14 14:21:18 +0000376 wakeref = intel_runtime_pm_get(i915);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100377 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
Chris Wilsona3259ca2017-10-09 09:44:00 +0100378 PIN_MAPPABLE |
379 PIN_NONFAULT |
380 PIN_NONBLOCK);
Chris Wilson18034582016-08-18 17:16:45 +0100381 if (!IS_ERR(vma)) {
382 node.start = i915_ggtt_offset(vma);
383 node.allocated = false;
Chris Wilson49ef5292016-08-18 17:17:00 +0100384 ret = i915_vma_put_fence(vma);
Chris Wilson18034582016-08-18 17:16:45 +0100385 if (ret) {
386 i915_vma_unpin(vma);
387 vma = ERR_PTR(ret);
388 }
389 }
Chris Wilson058d88c2016-08-15 10:49:06 +0100390 if (IS_ERR(vma)) {
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100391 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530392 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100393 goto out_unlock;
394 GEM_BUG_ON(!node.allocated);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530395 }
396
Chris Wilson6951e582019-05-28 10:29:51 +0100397 mutex_unlock(&i915->drm.struct_mutex);
398
399 ret = i915_gem_object_lock_interruptible(obj);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530400 if (ret)
401 goto out_unpin;
402
Chris Wilson6951e582019-05-28 10:29:51 +0100403 ret = i915_gem_object_set_to_gtt_domain(obj, false);
404 if (ret) {
405 i915_gem_object_unlock(obj);
406 goto out_unpin;
407 }
408
409 fence = i915_gem_object_lock_fence(obj);
410 i915_gem_object_unlock(obj);
411 if (!fence) {
412 ret = -ENOMEM;
413 goto out_unpin;
414 }
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530415
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100416 user_data = u64_to_user_ptr(args->data_ptr);
417 remain = args->size;
418 offset = args->offset;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530419
420 while (remain > 0) {
421 /* Operation in this page
422 *
423 * page_base = page offset within aperture
424 * page_offset = offset within page
425 * page_length = bytes to copy for this page
426 */
427 u32 page_base = node.start;
428 unsigned page_offset = offset_in_page(offset);
429 unsigned page_length = PAGE_SIZE - page_offset;
430 page_length = remain < page_length ? remain : page_length;
431 if (node.allocated) {
432 wmb();
Chris Wilson82ad6442018-06-05 16:37:58 +0100433 ggtt->vm.insert_page(&ggtt->vm,
434 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
435 node.start, I915_CACHE_NONE, 0);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530436 wmb();
437 } else {
438 page_base += offset & PAGE_MASK;
439 }
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100440
Matthew Auld73ebd502017-12-11 15:18:20 +0000441 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100442 user_data, page_length)) {
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530443 ret = -EFAULT;
444 break;
445 }
446
447 remain -= page_length;
448 user_data += page_length;
449 offset += page_length;
450 }
451
Chris Wilson6951e582019-05-28 10:29:51 +0100452 i915_gem_object_unlock_fence(obj, fence);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530453out_unpin:
Chris Wilson6951e582019-05-28 10:29:51 +0100454 mutex_lock(&i915->drm.struct_mutex);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530455 if (node.allocated) {
456 wmb();
Chris Wilson82ad6442018-06-05 16:37:58 +0100457 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530458 remove_mappable_node(&node);
459 } else {
Chris Wilson058d88c2016-08-15 10:49:06 +0100460 i915_vma_unpin(vma);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530461 }
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100462out_unlock:
Chris Wilson538ef962019-01-14 14:21:18 +0000463 intel_runtime_pm_put(i915, wakeref);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100464 mutex_unlock(&i915->drm.struct_mutex);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100465
Eric Anholteb014592009-03-10 11:44:52 -0700466 return ret;
467}
468
Eric Anholt673a3942008-07-30 12:06:12 -0700469/**
470 * Reads data from the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100471 * @dev: drm device pointer
472 * @data: ioctl data blob
473 * @file: drm file pointer
Eric Anholt673a3942008-07-30 12:06:12 -0700474 *
475 * On error, the contents of *data are undefined.
476 */
477int
478i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000479 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700480{
481 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000482 struct drm_i915_gem_object *obj;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100483 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700484
Chris Wilson51311d02010-11-17 09:10:42 +0000485 if (args->size == 0)
486 return 0;
487
Linus Torvalds96d4f262019-01-03 18:57:57 -0800488 if (!access_ok(u64_to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000489 args->size))
490 return -EFAULT;
491
Chris Wilson03ac0642016-07-20 13:31:51 +0100492 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100493 if (!obj)
494 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -0700495
Chris Wilson7dcd2492010-09-26 20:21:44 +0100496 /* Bounds check source. */
Matthew Auld966d5bf2016-12-13 20:32:22 +0000497 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100498 ret = -EINVAL;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100499 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100500 }
501
Chris Wilsondb53a302011-02-03 11:57:46 +0000502 trace_i915_gem_object_pread(obj, args->offset, args->size);
503
Chris Wilsone95433c2016-10-28 13:58:27 +0100504 ret = i915_gem_object_wait(obj,
505 I915_WAIT_INTERRUPTIBLE,
Chris Wilson62eb3c22019-02-13 09:25:04 +0000506 MAX_SCHEDULE_TIMEOUT);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100507 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100508 goto out;
Chris Wilson258a5ed2016-08-05 10:14:16 +0100509
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100510 ret = i915_gem_object_pin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100511 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100512 goto out;
Chris Wilson258a5ed2016-08-05 10:14:16 +0100513
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100514 ret = i915_gem_shmem_pread(obj, args);
Chris Wilson9c870d02016-10-24 13:42:15 +0100515 if (ret == -EFAULT || ret == -ENODEV)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100516 ret = i915_gem_gtt_pread(obj, args);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530517
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100518 i915_gem_object_unpin_pages(obj);
519out:
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100520 i915_gem_object_put(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700521 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700522}
523
Keith Packard0839ccb2008-10-30 19:38:48 -0700524/* This is the fast write path which cannot handle
525 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700526 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700527
Chris Wilsonfe115622016-10-28 13:58:40 +0100528static inline bool
529ggtt_write(struct io_mapping *mapping,
530 loff_t base, int offset,
531 char __user *user_data, int length)
Keith Packard0839ccb2008-10-30 19:38:48 -0700532{
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300533 void __iomem *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700534 unsigned long unwritten;
535
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700536 /* We can use the cpu mem copy function because this is X86. */
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300537 vaddr = io_mapping_map_atomic_wc(mapping, base);
538 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
Keith Packard0839ccb2008-10-30 19:38:48 -0700539 user_data, length);
Chris Wilsonfe115622016-10-28 13:58:40 +0100540 io_mapping_unmap_atomic(vaddr);
541 if (unwritten) {
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300542 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
543 unwritten = copy_from_user((void __force *)vaddr + offset,
544 user_data, length);
Chris Wilsonfe115622016-10-28 13:58:40 +0100545 io_mapping_unmap(vaddr);
546 }
Keith Packard0839ccb2008-10-30 19:38:48 -0700547
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100548 return unwritten;
549}
550
Eric Anholt3de09aa2009-03-09 09:42:23 -0700551/**
552 * This is the fast pwrite path, where we copy the data directly from the
553 * user into the GTT, uncached.
Chris Wilsonfe115622016-10-28 13:58:40 +0100554 * @obj: i915 GEM object
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100555 * @args: pwrite arguments structure
Eric Anholt3de09aa2009-03-09 09:42:23 -0700556 */
Eric Anholt673a3942008-07-30 12:06:12 -0700557static int
Chris Wilsonfe115622016-10-28 13:58:40 +0100558i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
559 const struct drm_i915_gem_pwrite *args)
Eric Anholt673a3942008-07-30 12:06:12 -0700560{
Chris Wilsonfe115622016-10-28 13:58:40 +0100561 struct drm_i915_private *i915 = to_i915(obj->base.dev);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530562 struct i915_ggtt *ggtt = &i915->ggtt;
Chris Wilson538ef962019-01-14 14:21:18 +0000563 intel_wakeref_t wakeref;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530564 struct drm_mm_node node;
Chris Wilson6951e582019-05-28 10:29:51 +0100565 struct dma_fence *fence;
Chris Wilsonfe115622016-10-28 13:58:40 +0100566 struct i915_vma *vma;
567 u64 remain, offset;
568 void __user *user_data;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530569 int ret;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530570
Chris Wilsonfe115622016-10-28 13:58:40 +0100571 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
572 if (ret)
573 return ret;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200574
Chris Wilson8bd818152017-10-19 07:37:33 +0100575 if (i915_gem_object_has_struct_page(obj)) {
576 /*
577 * Avoid waking the device up if we can fallback, as
578 * waking/resuming is very slow (worst-case 10-100 ms
579 * depending on PCI sleeps and our own resume time).
580 * This easily dwarfs any performance advantage from
581 * using the cache bypass of indirect GGTT access.
582 */
Chris Wilson538ef962019-01-14 14:21:18 +0000583 wakeref = intel_runtime_pm_get_if_in_use(i915);
584 if (!wakeref) {
Chris Wilson8bd818152017-10-19 07:37:33 +0100585 ret = -EFAULT;
586 goto out_unlock;
587 }
588 } else {
589 /* No backing pages, no fallback, we must force GGTT access */
Chris Wilson538ef962019-01-14 14:21:18 +0000590 wakeref = intel_runtime_pm_get(i915);
Chris Wilson8bd818152017-10-19 07:37:33 +0100591 }
592
Chris Wilson058d88c2016-08-15 10:49:06 +0100593 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
Chris Wilsona3259ca2017-10-09 09:44:00 +0100594 PIN_MAPPABLE |
595 PIN_NONFAULT |
596 PIN_NONBLOCK);
Chris Wilson18034582016-08-18 17:16:45 +0100597 if (!IS_ERR(vma)) {
598 node.start = i915_ggtt_offset(vma);
599 node.allocated = false;
Chris Wilson49ef5292016-08-18 17:17:00 +0100600 ret = i915_vma_put_fence(vma);
Chris Wilson18034582016-08-18 17:16:45 +0100601 if (ret) {
602 i915_vma_unpin(vma);
603 vma = ERR_PTR(ret);
604 }
605 }
Chris Wilson058d88c2016-08-15 10:49:06 +0100606 if (IS_ERR(vma)) {
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100607 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530608 if (ret)
Chris Wilson8bd818152017-10-19 07:37:33 +0100609 goto out_rpm;
Chris Wilsonfe115622016-10-28 13:58:40 +0100610 GEM_BUG_ON(!node.allocated);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530611 }
Daniel Vetter935aaa62012-03-25 19:47:35 +0200612
Chris Wilson6951e582019-05-28 10:29:51 +0100613 mutex_unlock(&i915->drm.struct_mutex);
614
615 ret = i915_gem_object_lock_interruptible(obj);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200616 if (ret)
617 goto out_unpin;
618
Chris Wilson6951e582019-05-28 10:29:51 +0100619 ret = i915_gem_object_set_to_gtt_domain(obj, true);
620 if (ret) {
621 i915_gem_object_unlock(obj);
622 goto out_unpin;
623 }
624
625 fence = i915_gem_object_lock_fence(obj);
626 i915_gem_object_unlock(obj);
627 if (!fence) {
628 ret = -ENOMEM;
629 goto out_unpin;
630 }
Chris Wilsonfe115622016-10-28 13:58:40 +0100631
Chris Wilsonb19482d2016-08-18 17:16:43 +0100632 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200633
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530634 user_data = u64_to_user_ptr(args->data_ptr);
635 offset = args->offset;
636 remain = args->size;
637 while (remain) {
Eric Anholt673a3942008-07-30 12:06:12 -0700638 /* Operation in this page
639 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700640 * page_base = page offset within aperture
641 * page_offset = offset within page
642 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700643 */
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530644 u32 page_base = node.start;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100645 unsigned int page_offset = offset_in_page(offset);
646 unsigned int page_length = PAGE_SIZE - page_offset;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530647 page_length = remain < page_length ? remain : page_length;
648 if (node.allocated) {
649 wmb(); /* flush the write before we modify the GGTT */
Chris Wilson82ad6442018-06-05 16:37:58 +0100650 ggtt->vm.insert_page(&ggtt->vm,
651 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
652 node.start, I915_CACHE_NONE, 0);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530653 wmb(); /* flush modifications to the GGTT (insert_page) */
654 } else {
655 page_base += offset & PAGE_MASK;
656 }
Keith Packard0839ccb2008-10-30 19:38:48 -0700657 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700658 * source page isn't available. Return the error and we'll
659 * retry in the slow path.
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530660 * If the object is non-shmem backed, we retry again with the
661 * path that handles page fault.
Keith Packard0839ccb2008-10-30 19:38:48 -0700662 */
Matthew Auld73ebd502017-12-11 15:18:20 +0000663 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
Chris Wilsonfe115622016-10-28 13:58:40 +0100664 user_data, page_length)) {
665 ret = -EFAULT;
666 break;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200667 }
Eric Anholt673a3942008-07-30 12:06:12 -0700668
Keith Packard0839ccb2008-10-30 19:38:48 -0700669 remain -= page_length;
670 user_data += page_length;
671 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700672 }
Chris Wilsond59b21e2017-02-22 11:40:49 +0000673 intel_fb_obj_flush(obj, ORIGIN_CPU);
Chris Wilsonfe115622016-10-28 13:58:40 +0100674
Chris Wilson6951e582019-05-28 10:29:51 +0100675 i915_gem_object_unlock_fence(obj, fence);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200676out_unpin:
Chris Wilson6951e582019-05-28 10:29:51 +0100677 mutex_lock(&i915->drm.struct_mutex);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530678 if (node.allocated) {
679 wmb();
Chris Wilson82ad6442018-06-05 16:37:58 +0100680 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530681 remove_mappable_node(&node);
682 } else {
Chris Wilson058d88c2016-08-15 10:49:06 +0100683 i915_vma_unpin(vma);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530684 }
Chris Wilson8bd818152017-10-19 07:37:33 +0100685out_rpm:
Chris Wilson538ef962019-01-14 14:21:18 +0000686 intel_runtime_pm_put(i915, wakeref);
Chris Wilson8bd818152017-10-19 07:37:33 +0100687out_unlock:
Chris Wilsonfe115622016-10-28 13:58:40 +0100688 mutex_unlock(&i915->drm.struct_mutex);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700689 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700690}
691
Chris Wilsonfe115622016-10-28 13:58:40 +0100692/* Per-page copy function for the shmem pwrite fastpath.
693 * Flushes invalid cachelines before writing to the target if
694 * needs_clflush_before is set and flushes out any written cachelines after
695 * writing if needs_clflush is set.
696 */
Eric Anholt40123c12009-03-09 13:42:30 -0700697static int
Chris Wilsonfe115622016-10-28 13:58:40 +0100698shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
Chris Wilsonfe115622016-10-28 13:58:40 +0100699 bool needs_clflush_before,
700 bool needs_clflush_after)
Eric Anholt40123c12009-03-09 13:42:30 -0700701{
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000702 char *vaddr;
Chris Wilsonfe115622016-10-28 13:58:40 +0100703 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700704
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000705 vaddr = kmap(page);
Chris Wilsonfe115622016-10-28 13:58:40 +0100706
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000707 if (needs_clflush_before)
708 drm_clflush_virt_range(vaddr + offset, len);
Chris Wilsonfe115622016-10-28 13:58:40 +0100709
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000710 ret = __copy_from_user(vaddr + offset, user_data, len);
711 if (!ret && needs_clflush_after)
712 drm_clflush_virt_range(vaddr + offset, len);
Chris Wilsonfe115622016-10-28 13:58:40 +0100713
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000714 kunmap(page);
715
716 return ret ? -EFAULT : 0;
Chris Wilsonfe115622016-10-28 13:58:40 +0100717}
718
719static int
720i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
721 const struct drm_i915_gem_pwrite *args)
722{
Chris Wilsonfe115622016-10-28 13:58:40 +0100723 unsigned int partial_cacheline_write;
724 unsigned int needs_clflush;
725 unsigned int offset, idx;
Chris Wilson6951e582019-05-28 10:29:51 +0100726 struct dma_fence *fence;
727 void __user *user_data;
728 u64 remain;
Chris Wilsonfe115622016-10-28 13:58:40 +0100729 int ret;
730
Chris Wilson6951e582019-05-28 10:29:51 +0100731 ret = i915_gem_object_prepare_write(obj, &needs_clflush);
Chris Wilson43394c72016-08-18 17:16:47 +0100732 if (ret)
733 return ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700734
Chris Wilson6951e582019-05-28 10:29:51 +0100735 fence = i915_gem_object_lock_fence(obj);
736 i915_gem_object_finish_access(obj);
737 if (!fence)
738 return -ENOMEM;
Chris Wilsonfe115622016-10-28 13:58:40 +0100739
Chris Wilsonfe115622016-10-28 13:58:40 +0100740 /* If we don't overwrite a cacheline completely we need to be
741 * careful to have up-to-date data by first clflushing. Don't
742 * overcomplicate things and flush the entire patch.
743 */
744 partial_cacheline_write = 0;
745 if (needs_clflush & CLFLUSH_BEFORE)
746 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
747
Chris Wilson43394c72016-08-18 17:16:47 +0100748 user_data = u64_to_user_ptr(args->data_ptr);
Chris Wilson43394c72016-08-18 17:16:47 +0100749 remain = args->size;
Chris Wilsonfe115622016-10-28 13:58:40 +0100750 offset = offset_in_page(args->offset);
751 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
752 struct page *page = i915_gem_object_get_page(obj, idx);
Chris Wilsona5e856a52018-10-12 15:02:28 +0100753 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100754
Chris Wilsonfe115622016-10-28 13:58:40 +0100755 ret = shmem_pwrite(page, offset, length, user_data,
Chris Wilsonfe115622016-10-28 13:58:40 +0100756 (offset | length) & partial_cacheline_write,
757 needs_clflush & CLFLUSH_AFTER);
758 if (ret)
Chris Wilson9da3da62012-06-01 15:20:22 +0100759 break;
760
Chris Wilsonfe115622016-10-28 13:58:40 +0100761 remain -= length;
762 user_data += length;
763 offset = 0;
Eric Anholt40123c12009-03-09 13:42:30 -0700764 }
765
Chris Wilsond59b21e2017-02-22 11:40:49 +0000766 intel_fb_obj_flush(obj, ORIGIN_CPU);
Chris Wilson6951e582019-05-28 10:29:51 +0100767 i915_gem_object_unlock_fence(obj, fence);
768
Eric Anholt40123c12009-03-09 13:42:30 -0700769 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700770}
771
772/**
773 * Writes data to the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100774 * @dev: drm device
775 * @data: ioctl data blob
776 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -0700777 *
778 * On error, the contents of the buffer that were to be modified are undefined.
779 */
780int
781i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100782 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700783{
784 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000785 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000786 int ret;
787
788 if (args->size == 0)
789 return 0;
790
Linus Torvalds96d4f262019-01-03 18:57:57 -0800791 if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
Chris Wilson51311d02010-11-17 09:10:42 +0000792 return -EFAULT;
793
Chris Wilson03ac0642016-07-20 13:31:51 +0100794 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100795 if (!obj)
796 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -0700797
Chris Wilson7dcd2492010-09-26 20:21:44 +0100798 /* Bounds check destination. */
Matthew Auld966d5bf2016-12-13 20:32:22 +0000799 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100800 ret = -EINVAL;
Chris Wilson258a5ed2016-08-05 10:14:16 +0100801 goto err;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100802 }
803
Chris Wilsonf8c1cce2018-07-12 19:53:14 +0100804 /* Writes not allowed into this read-only object */
805 if (i915_gem_object_is_readonly(obj)) {
806 ret = -EINVAL;
807 goto err;
808 }
809
Chris Wilsondb53a302011-02-03 11:57:46 +0000810 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
811
Chris Wilson7c55e2c2017-03-07 12:03:38 +0000812 ret = -ENODEV;
813 if (obj->ops->pwrite)
814 ret = obj->ops->pwrite(obj, args);
815 if (ret != -ENODEV)
816 goto err;
817
Chris Wilsone95433c2016-10-28 13:58:27 +0100818 ret = i915_gem_object_wait(obj,
819 I915_WAIT_INTERRUPTIBLE |
820 I915_WAIT_ALL,
Chris Wilson62eb3c22019-02-13 09:25:04 +0000821 MAX_SCHEDULE_TIMEOUT);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100822 if (ret)
823 goto err;
824
Chris Wilsonfe115622016-10-28 13:58:40 +0100825 ret = i915_gem_object_pin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100826 if (ret)
Chris Wilsonfe115622016-10-28 13:58:40 +0100827 goto err;
Chris Wilson258a5ed2016-08-05 10:14:16 +0100828
Daniel Vetter935aaa62012-03-25 19:47:35 +0200829 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700830 /* We can only do the GTT pwrite on untiled buffers, as otherwise
831 * it would end up going through the fenced access, and we'll get
832 * different detiling behavior between reading and writing.
833 * pread/pwrite currently are reading and writing from the CPU
834 * perspective, requiring manual detiling by the client.
835 */
Chris Wilson6eae0052016-06-20 15:05:52 +0100836 if (!i915_gem_object_has_struct_page(obj) ||
Chris Wilson9c870d02016-10-24 13:42:15 +0100837 cpu_write_needs_clflush(obj))
Daniel Vetter935aaa62012-03-25 19:47:35 +0200838 /* Note that the gtt paths might fail with non-page-backed user
839 * pointers (e.g. gtt mappings when moving data between
Chris Wilson9c870d02016-10-24 13:42:15 +0100840 * textures). Fallback to the shmem path in that case.
841 */
Chris Wilsonfe115622016-10-28 13:58:40 +0100842 ret = i915_gem_gtt_pwrite_fast(obj, args);
Eric Anholt673a3942008-07-30 12:06:12 -0700843
Chris Wilsond1054ee2016-07-16 18:42:36 +0100844 if (ret == -EFAULT || ret == -ENOSPC) {
Chris Wilson6a2c4232014-11-04 04:51:40 -0800845 if (obj->phys_handle)
846 ret = i915_gem_phys_pwrite(obj, args, file);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530847 else
Chris Wilsonfe115622016-10-28 13:58:40 +0100848 ret = i915_gem_shmem_pwrite(obj, args);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800849 }
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100850
Chris Wilsonfe115622016-10-28 13:58:40 +0100851 i915_gem_object_unpin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100852err:
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100853 i915_gem_object_put(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100854 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700855}
856
Eric Anholt673a3942008-07-30 12:06:12 -0700857/**
858 * Called when user space has done writes to this buffer
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100859 * @dev: drm device
860 * @data: ioctl data blob
861 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -0700862 */
863int
864i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000865 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700866{
867 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000868 struct drm_i915_gem_object *obj;
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100869
Chris Wilson03ac0642016-07-20 13:31:51 +0100870 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilsonc21724c2016-08-05 10:14:19 +0100871 if (!obj)
872 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -0700873
Tina Zhanga03f3952017-11-14 10:25:13 +0000874 /*
875 * Proxy objects are barred from CPU access, so there is no
876 * need to ban sw_finish as it is a nop.
877 */
878
Eric Anholt673a3942008-07-30 12:06:12 -0700879 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson5a97bcc2017-02-22 11:40:46 +0000880 i915_gem_object_flush_if_display(obj);
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100881 i915_gem_object_put(obj);
Chris Wilson5a97bcc2017-02-22 11:40:46 +0000882
883 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700884}
885
Chris Wilson7c108fd2016-10-24 13:42:18 +0100886void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
Chris Wilsoneedd10f2014-06-16 08:57:44 +0100887{
Chris Wilson3594a3e2016-10-24 13:42:16 +0100888 struct drm_i915_gem_object *obj, *on;
Chris Wilson7c108fd2016-10-24 13:42:18 +0100889 int i;
Chris Wilsoneedd10f2014-06-16 08:57:44 +0100890
Chris Wilson3594a3e2016-10-24 13:42:16 +0100891 /*
892 * Only called during RPM suspend. All users of the userfault_list
893 * must be holding an RPM wakeref to ensure that this can not
894 * run concurrently with themselves (and use the struct_mutex for
895 * protection between themselves).
896 */
897
898 list_for_each_entry_safe(obj, on,
Chris Wilsona65adaf2017-10-09 09:43:57 +0100899 &dev_priv->mm.userfault_list, userfault_link)
900 __i915_gem_object_release_mmap(obj);
Chris Wilson7c108fd2016-10-24 13:42:18 +0100901
902 /* The fence will be lost when the device powers down. If any were
903 * in use by hardware (i.e. they are pinned), we should not be powering
904 * down! All other fences will be reacquired by the user upon waking.
905 */
906 for (i = 0; i < dev_priv->num_fence_regs; i++) {
907 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
908
Chris Wilsone0ec3ec2017-02-03 12:57:17 +0000909 /* Ideally we want to assert that the fence register is not
910 * live at this point (i.e. that no piece of code will be
911 * trying to write through fence + GTT, as that both violates
912 * our tracking of activity and associated locking/barriers,
913 * but also is illegal given that the hw is powered down).
914 *
915 * Previously we used reg->pin_count as a "liveness" indicator.
916 * That is not sufficient, and we need a more fine-grained
917 * tool if we want to have a sanity check here.
918 */
Chris Wilson7c108fd2016-10-24 13:42:18 +0100919
920 if (!reg->vma)
921 continue;
922
Chris Wilsona65adaf2017-10-09 09:43:57 +0100923 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
Chris Wilson7c108fd2016-10-24 13:42:18 +0100924 reg->dirty = true;
925 }
Chris Wilsoneedd10f2014-06-16 08:57:44 +0100926}
927
Chris Wilson25112b62017-03-30 15:50:39 +0100928static int wait_for_engines(struct drm_i915_private *i915)
929{
Chris Wilsonee42c002017-12-11 19:41:34 +0000930 if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
Chris Wilson59e4b192017-12-11 19:41:35 +0000931 dev_err(i915->drm.dev,
932 "Failed to idle engines, declaring wedged!\n");
Chris Wilson629820f2018-03-09 10:11:14 +0000933 GEM_TRACE_DUMP();
Chris Wilsoncad99462017-08-26 12:09:33 +0100934 i915_gem_set_wedged(i915);
935 return -EIO;
Chris Wilson25112b62017-03-30 15:50:39 +0100936 }
937
938 return 0;
939}
940
Chris Wilson1e345562019-01-28 10:23:56 +0000941static long
942wait_for_timelines(struct drm_i915_private *i915,
943 unsigned int flags, long timeout)
944{
945 struct i915_gt_timelines *gt = &i915->gt.timelines;
946 struct i915_timeline *tl;
947
Chris Wilson1e345562019-01-28 10:23:56 +0000948 mutex_lock(&gt->mutex);
Chris Wilson9407d3b2019-01-28 18:18:12 +0000949 list_for_each_entry(tl, &gt->active_list, link) {
Chris Wilson1e345562019-01-28 10:23:56 +0000950 struct i915_request *rq;
951
Chris Wilson21950ee2019-02-05 13:00:05 +0000952 rq = i915_active_request_get_unlocked(&tl->last_request);
Chris Wilson1e345562019-01-28 10:23:56 +0000953 if (!rq)
954 continue;
955
956 mutex_unlock(&gt->mutex);
957
958 /*
959 * "Race-to-idle".
960 *
961 * Switching to the kernel context is often used a synchronous
962 * step prior to idling, e.g. in suspend for flushing all
963 * current operations to memory before sleeping. These we
964 * want to complete as quickly as possible to avoid prolonged
965 * stalls, so allow the gpu to boost to maximum clocks.
966 */
967 if (flags & I915_WAIT_FOR_IDLE_BOOST)
Chris Wilson62eb3c22019-02-13 09:25:04 +0000968 gen6_rps_boost(rq);
Chris Wilson1e345562019-01-28 10:23:56 +0000969
970 timeout = i915_request_wait(rq, flags, timeout);
971 i915_request_put(rq);
972 if (timeout < 0)
973 return timeout;
974
975 /* restart after reacquiring the lock */
976 mutex_lock(&gt->mutex);
Chris Wilson9407d3b2019-01-28 18:18:12 +0000977 tl = list_entry(&gt->active_list, typeof(*tl), link);
Chris Wilson1e345562019-01-28 10:23:56 +0000978 }
979 mutex_unlock(&gt->mutex);
980
981 return timeout;
982}
983
Chris Wilsonec625fb2018-07-09 13:20:42 +0100984int i915_gem_wait_for_idle(struct drm_i915_private *i915,
985 unsigned int flags, long timeout)
Chris Wilson73cb9702016-10-28 13:58:46 +0100986{
Chris Wilson79ffac852019-04-24 21:07:17 +0100987 GEM_TRACE("flags=%x (%s), timeout=%ld%s, awake?=%s\n",
Chris Wilsonec625fb2018-07-09 13:20:42 +0100988 flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
Chris Wilson79ffac852019-04-24 21:07:17 +0100989 timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "",
990 yesno(i915->gt.awake));
Chris Wilson09a4c022018-05-24 09:11:35 +0100991
Chris Wilson863e9fd2017-05-30 13:13:32 +0100992 /* If the device is asleep, we have no requests outstanding */
993 if (!READ_ONCE(i915->gt.awake))
994 return 0;
995
Chris Wilson1e345562019-01-28 10:23:56 +0000996 timeout = wait_for_timelines(i915, flags, timeout);
997 if (timeout < 0)
998 return timeout;
999
Chris Wilson9caa34a2016-11-11 14:58:08 +00001000 if (flags & I915_WAIT_LOCKED) {
Chris Wilsona89d1f92018-05-02 17:38:39 +01001001 int err;
Chris Wilson9caa34a2016-11-11 14:58:08 +00001002
1003 lockdep_assert_held(&i915->drm.struct_mutex);
1004
Chris Wilsona61b47f2018-06-27 12:53:34 +01001005 err = wait_for_engines(i915);
1006 if (err)
1007 return err;
1008
Chris Wilsone61e0f52018-02-21 09:56:36 +00001009 i915_retire_requests(i915);
Chris Wilsona89d1f92018-05-02 17:38:39 +01001010 }
Chris Wilsona61b47f2018-06-27 12:53:34 +01001011
1012 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01001013}
1014
Chris Wilson058d88c2016-08-15 10:49:06 +01001015struct i915_vma *
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02001016i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
1017 const struct i915_ggtt_view *view,
Chris Wilson91b2db62016-08-04 16:32:23 +01001018 u64 size,
Chris Wilson2ffffd02016-08-04 16:32:22 +01001019 u64 alignment,
1020 u64 flags)
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02001021{
Chris Wilsonad16d2e2016-10-13 09:55:04 +01001022 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilson82ad6442018-06-05 16:37:58 +01001023 struct i915_address_space *vm = &dev_priv->ggtt.vm;
Chris Wilson59bfa122016-08-04 16:32:31 +01001024 struct i915_vma *vma;
1025 int ret;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001026
Chris Wilson4c7d62c2016-10-28 13:58:32 +01001027 lockdep_assert_held(&obj->base.dev->struct_mutex);
1028
Chris Wilsonac87a6fd2018-02-20 13:42:05 +00001029 if (flags & PIN_MAPPABLE &&
1030 (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
Chris Wilson43ae70d92017-10-09 09:44:01 +01001031 /* If the required space is larger than the available
1032 * aperture, we will not able to find a slot for the
1033 * object and unbinding the object now will be in
1034 * vain. Worse, doing so may cause us to ping-pong
1035 * the object in and out of the Global GTT and
1036 * waste a lot of cycles under the mutex.
1037 */
1038 if (obj->base.size > dev_priv->ggtt.mappable_end)
1039 return ERR_PTR(-E2BIG);
1040
1041 /* If NONBLOCK is set the caller is optimistically
1042 * trying to cache the full object within the mappable
1043 * aperture, and *must* have a fallback in place for
1044 * situations where we cannot bind the object. We
1045 * can be a little more lax here and use the fallback
1046 * more often to avoid costly migrations of ourselves
1047 * and other objects within the aperture.
1048 *
1049 * Half-the-aperture is used as a simple heuristic.
1050 * More interesting would to do search for a free
1051 * block prior to making the commitment to unbind.
1052 * That caters for the self-harm case, and with a
1053 * little more heuristics (e.g. NOFAULT, NOEVICT)
1054 * we could try to minimise harm to others.
1055 */
1056 if (flags & PIN_NONBLOCK &&
1057 obj->base.size > dev_priv->ggtt.mappable_end / 2)
1058 return ERR_PTR(-ENOSPC);
1059 }
1060
Chris Wilson718659a2017-01-16 15:21:28 +00001061 vma = i915_vma_instance(obj, vm, view);
Chengguang Xu772b5402019-02-21 10:08:19 +08001062 if (IS_ERR(vma))
Chris Wilson058d88c2016-08-15 10:49:06 +01001063 return vma;
Chris Wilson59bfa122016-08-04 16:32:31 +01001064
1065 if (i915_vma_misplaced(vma, size, alignment, flags)) {
Chris Wilson43ae70d92017-10-09 09:44:01 +01001066 if (flags & PIN_NONBLOCK) {
1067 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
1068 return ERR_PTR(-ENOSPC);
Chris Wilson59bfa122016-08-04 16:32:31 +01001069
Chris Wilson43ae70d92017-10-09 09:44:01 +01001070 if (flags & PIN_MAPPABLE &&
Chris Wilson944397f2017-01-09 16:16:11 +00001071 vma->fence_size > dev_priv->ggtt.mappable_end / 2)
Chris Wilsonad16d2e2016-10-13 09:55:04 +01001072 return ERR_PTR(-ENOSPC);
1073 }
1074
Chris Wilson59bfa122016-08-04 16:32:31 +01001075 WARN(i915_vma_is_pinned(vma),
1076 "bo is already pinned in ggtt with incorrect alignment:"
Chris Wilson05a20d02016-08-18 17:16:55 +01001077 " offset=%08x, req.alignment=%llx,"
1078 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
1079 i915_ggtt_offset(vma), alignment,
Chris Wilson59bfa122016-08-04 16:32:31 +01001080 !!(flags & PIN_MAPPABLE),
Chris Wilson05a20d02016-08-18 17:16:55 +01001081 i915_vma_is_map_and_fenceable(vma));
Chris Wilson59bfa122016-08-04 16:32:31 +01001082 ret = i915_vma_unbind(vma);
1083 if (ret)
Chris Wilson058d88c2016-08-15 10:49:06 +01001084 return ERR_PTR(ret);
Chris Wilson59bfa122016-08-04 16:32:31 +01001085 }
1086
Chris Wilson058d88c2016-08-15 10:49:06 +01001087 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
1088 if (ret)
1089 return ERR_PTR(ret);
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02001090
Chris Wilson058d88c2016-08-15 10:49:06 +01001091 return vma;
Eric Anholt673a3942008-07-30 12:06:12 -07001092}
1093
Eric Anholt673a3942008-07-30 12:06:12 -07001094int
Chris Wilson3ef94da2009-09-14 16:50:29 +01001095i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1096 struct drm_file *file_priv)
1097{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001098 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001099 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001100 struct drm_i915_gem_object *obj;
Chris Wilson1233e2d2016-10-28 13:58:37 +01001101 int err;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001102
1103 switch (args->madv) {
1104 case I915_MADV_DONTNEED:
1105 case I915_MADV_WILLNEED:
1106 break;
1107 default:
1108 return -EINVAL;
1109 }
1110
Chris Wilson03ac0642016-07-20 13:31:51 +01001111 obj = i915_gem_object_lookup(file_priv, args->handle);
Chris Wilson1233e2d2016-10-28 13:58:37 +01001112 if (!obj)
1113 return -ENOENT;
1114
1115 err = mutex_lock_interruptible(&obj->mm.lock);
1116 if (err)
1117 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001118
Chris Wilsonf1fa4f42017-10-13 21:26:13 +01001119 if (i915_gem_object_has_pages(obj) &&
Chris Wilson3e510a82016-08-05 10:14:23 +01001120 i915_gem_object_is_tiled(obj) &&
Daniel Vetter656bfa32014-11-20 09:26:30 +01001121 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
Chris Wilsonbc0629a2016-11-01 10:03:17 +00001122 if (obj->mm.madv == I915_MADV_WILLNEED) {
1123 GEM_BUG_ON(!obj->mm.quirked);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01001124 __i915_gem_object_unpin_pages(obj);
Chris Wilsonbc0629a2016-11-01 10:03:17 +00001125 obj->mm.quirked = false;
1126 }
1127 if (args->madv == I915_MADV_WILLNEED) {
Chris Wilson2c3a3f42016-11-04 10:30:01 +00001128 GEM_BUG_ON(obj->mm.quirked);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01001129 __i915_gem_object_pin_pages(obj);
Chris Wilsonbc0629a2016-11-01 10:03:17 +00001130 obj->mm.quirked = true;
1131 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01001132 }
1133
Chris Wilsona4f5ea62016-10-28 13:58:35 +01001134 if (obj->mm.madv != __I915_MADV_PURGED)
1135 obj->mm.madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001136
Chris Wilson6c085a72012-08-20 11:40:46 +02001137 /* if the object is no longer attached, discard its backing storage */
Chris Wilsonf1fa4f42017-10-13 21:26:13 +01001138 if (obj->mm.madv == I915_MADV_DONTNEED &&
1139 !i915_gem_object_has_pages(obj))
Chris Wilsonf0334282019-05-28 10:29:46 +01001140 i915_gem_object_truncate(obj);
Chris Wilson2d7ef392009-09-20 23:13:10 +01001141
Chris Wilsona4f5ea62016-10-28 13:58:35 +01001142 args->retained = obj->mm.madv != __I915_MADV_PURGED;
Chris Wilson1233e2d2016-10-28 13:58:37 +01001143 mutex_unlock(&obj->mm.lock);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001144
Chris Wilson1233e2d2016-10-28 13:58:37 +01001145out:
Chris Wilsonf8c417c2016-07-20 13:31:53 +01001146 i915_gem_object_put(obj);
Chris Wilson1233e2d2016-10-28 13:58:37 +01001147 return err;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001148}
1149
Chris Wilson24145512017-01-24 11:01:35 +00001150void i915_gem_sanitize(struct drm_i915_private *i915)
1151{
Chris Wilson538ef962019-01-14 14:21:18 +00001152 intel_wakeref_t wakeref;
1153
Chris Wilsonc3160da2018-05-31 09:22:45 +01001154 GEM_TRACE("\n");
1155
Chris Wilson538ef962019-01-14 14:21:18 +00001156 wakeref = intel_runtime_pm_get(i915);
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001157 intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
Chris Wilsonc3160da2018-05-31 09:22:45 +01001158
1159 /*
1160 * As we have just resumed the machine and woken the device up from
1161 * deep PCI sleep (presumably D3_cold), assume the HW has been reset
1162 * back to defaults, recovering from whatever wedged state we left it
1163 * in and so worth trying to use the device once more.
1164 */
Chris Wilsonc41166f2019-02-20 14:56:37 +00001165 if (i915_terminally_wedged(i915))
Chris Wilsonf36325f2017-08-26 12:09:34 +01001166 i915_gem_unset_wedged(i915);
Chris Wilsonf36325f2017-08-26 12:09:34 +01001167
Chris Wilson24145512017-01-24 11:01:35 +00001168 /*
1169 * If we inherit context state from the BIOS or earlier occupants
1170 * of the GPU, the GPU may be in an inconsistent state when we
1171 * try to take over. The only way to remove the earlier state
1172 * is by resetting. However, resetting on earlier gen is tricky as
1173 * it may impact the display and we are uncertain about the stability
Joonas Lahtinenea117b82017-04-28 10:53:38 +03001174 * of the reset, so this could be applied to even earlier gen.
Chris Wilson24145512017-01-24 11:01:35 +00001175 */
Chris Wilson79ffac852019-04-24 21:07:17 +01001176 intel_gt_sanitize(i915, false);
Chris Wilsonc3160da2018-05-31 09:22:45 +01001177
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001178 intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
Chris Wilson538ef962019-01-14 14:21:18 +00001179 intel_runtime_pm_put(i915, wakeref);
Chris Wilsonc3160da2018-05-31 09:22:45 +01001180
Chris Wilsoneb8d0f52019-01-25 13:22:28 +00001181 mutex_lock(&i915->drm.struct_mutex);
Chris Wilson4dfacb02018-05-31 09:22:43 +01001182 i915_gem_contexts_lost(i915);
1183 mutex_unlock(&i915->drm.struct_mutex);
Chris Wilson24145512017-01-24 11:01:35 +00001184}
1185
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001186void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
Daniel Vetterf691e2f2012-02-02 09:58:12 +01001187{
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001188 if (INTEL_GEN(dev_priv) < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01001189 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
1190 return;
1191
1192 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
1193 DISP_TILE_SURFACE_SWIZZLING);
1194
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001195 if (IS_GEN(dev_priv, 5))
Daniel Vetter11782b02012-01-31 16:47:55 +01001196 return;
1197
Daniel Vetterf691e2f2012-02-02 09:58:12 +01001198 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001199 if (IS_GEN(dev_priv, 6))
Daniel Vetter6b26c862012-04-24 14:04:12 +02001200 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001201 else if (IS_GEN(dev_priv, 7))
Daniel Vetter6b26c862012-04-24 14:04:12 +02001202 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001203 else if (IS_GEN(dev_priv, 8))
Ben Widawsky31a53362013-11-02 21:07:04 -07001204 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
Ben Widawsky8782e262012-12-18 10:31:23 -08001205 else
1206 BUG();
Daniel Vetterf691e2f2012-02-02 09:58:12 +01001207}
Daniel Vettere21af882012-02-09 20:53:27 +01001208
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001209static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
Ville Syrjälä81e7f202014-08-15 01:21:55 +03001210{
Ville Syrjälä81e7f202014-08-15 01:21:55 +03001211 I915_WRITE(RING_CTL(base), 0);
1212 I915_WRITE(RING_HEAD(base), 0);
1213 I915_WRITE(RING_TAIL(base), 0);
1214 I915_WRITE(RING_START(base), 0);
1215}
1216
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001217static void init_unused_rings(struct drm_i915_private *dev_priv)
Ville Syrjälä81e7f202014-08-15 01:21:55 +03001218{
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001219 if (IS_I830(dev_priv)) {
1220 init_unused_ring(dev_priv, PRB1_BASE);
1221 init_unused_ring(dev_priv, SRB0_BASE);
1222 init_unused_ring(dev_priv, SRB1_BASE);
1223 init_unused_ring(dev_priv, SRB2_BASE);
1224 init_unused_ring(dev_priv, SRB3_BASE);
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001225 } else if (IS_GEN(dev_priv, 2)) {
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001226 init_unused_ring(dev_priv, SRB0_BASE);
1227 init_unused_ring(dev_priv, SRB1_BASE);
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001228 } else if (IS_GEN(dev_priv, 3)) {
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001229 init_unused_ring(dev_priv, PRB1_BASE);
1230 init_unused_ring(dev_priv, PRB2_BASE);
Ville Syrjälä81e7f202014-08-15 01:21:55 +03001231 }
1232}
1233
Chris Wilson20a8a742017-02-08 14:30:31 +00001234int i915_gem_init_hw(struct drm_i915_private *dev_priv)
1235{
Chris Wilsond200cda2016-04-28 09:56:44 +01001236 int ret;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08001237
Chris Wilsonde867c22016-10-25 13:16:02 +01001238 dev_priv->gt.last_init_time = ktime_get();
1239
Chris Wilson5e4f5182015-02-13 14:35:59 +00001240 /* Double layer security blanket, see i915_gem_init() */
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001241 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson5e4f5182015-02-13 14:35:59 +00001242
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00001243 if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
Ben Widawsky05e21cc2013-07-04 11:02:04 -07001244 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
Ben Widawsky4fc7c972013-02-08 11:49:24 -08001245
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01001246 if (IS_HASWELL(dev_priv))
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001247 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
Ville Syrjälä0bf21342013-11-29 14:56:12 +02001248 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
Rodrigo Vivi94353732013-08-28 16:45:46 -03001249
Tvrtko Ursulin094304b2018-12-03 12:50:10 +00001250 /* Apply the GT workarounds... */
Tvrtko Ursulin25d140f2018-12-03 13:33:19 +00001251 intel_gt_apply_workarounds(dev_priv);
Tvrtko Ursulin094304b2018-12-03 12:50:10 +00001252 /* ...and determine whether they are sticking. */
1253 intel_gt_verify_workarounds(dev_priv, "init");
Oscar Mateo59b449d2018-04-10 09:12:47 -07001254
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001255 i915_gem_init_swizzling(dev_priv);
Ben Widawsky4fc7c972013-02-08 11:49:24 -08001256
Daniel Vetterd5abdfd2014-11-20 09:45:19 +01001257 /*
1258 * At least 830 can leave some of the unused rings
1259 * "active" (ie. head != tail) after resume which
1260 * will prevent c3 entry. Makes sure all unused rings
1261 * are totally idle.
1262 */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001263 init_unused_rings(dev_priv);
Daniel Vetterd5abdfd2014-11-20 09:45:19 +01001264
Dave Gordoned54c1a2016-01-19 19:02:54 +00001265 BUG_ON(!dev_priv->kernel_context);
Chris Wilsonc41166f2019-02-20 14:56:37 +00001266 ret = i915_terminally_wedged(dev_priv);
1267 if (ret)
Chris Wilson6f74b362017-10-15 15:37:25 +01001268 goto out;
John Harrison90638cc2015-05-29 17:43:37 +01001269
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00001270 ret = i915_ppgtt_init_hw(dev_priv);
John Harrison4ad2fd82015-06-18 13:11:20 +01001271 if (ret) {
Chris Wilson8177e112018-02-07 11:15:45 +00001272 DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
John Harrison4ad2fd82015-06-18 13:11:20 +01001273 goto out;
1274 }
1275
Jackie Lif08e2032018-03-13 17:32:53 -07001276 ret = intel_wopcm_init_hw(&dev_priv->wopcm);
1277 if (ret) {
1278 DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
1279 goto out;
1280 }
1281
Michał Winiarski9bdc3572017-10-25 18:25:19 +01001282 /* We can't enable contexts until all firmware is loaded */
1283 ret = intel_uc_init_hw(dev_priv);
Chris Wilson8177e112018-02-07 11:15:45 +00001284 if (ret) {
1285 DRM_ERROR("Enabling uc failed (%d)\n", ret);
Michał Winiarski9bdc3572017-10-25 18:25:19 +01001286 goto out;
Chris Wilson8177e112018-02-07 11:15:45 +00001287 }
Michał Winiarski9bdc3572017-10-25 18:25:19 +01001288
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001289 intel_mocs_init_l3cc_table(dev_priv);
Peter Antoine0ccdacf2016-04-13 15:03:25 +01001290
Chris Wilson136109c2017-11-02 13:14:30 +00001291 /* Only when the HW is re-initialised, can we replay the requests */
Chris Wilson79ffac852019-04-24 21:07:17 +01001292 ret = intel_engines_resume(dev_priv);
Michal Wajdeczkob96f6eb2018-06-05 12:24:43 +00001293 if (ret)
1294 goto cleanup_uc;
Michał Winiarski60c0a662018-07-12 14:48:10 +02001295
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001296 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Michał Winiarski60c0a662018-07-12 14:48:10 +02001297
Chris Wilson79ffac852019-04-24 21:07:17 +01001298 intel_engines_set_scheduler_caps(dev_priv);
Michał Winiarski60c0a662018-07-12 14:48:10 +02001299 return 0;
Michal Wajdeczkob96f6eb2018-06-05 12:24:43 +00001300
1301cleanup_uc:
1302 intel_uc_fini_hw(dev_priv);
Michał Winiarski60c0a662018-07-12 14:48:10 +02001303out:
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001304 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Michał Winiarski60c0a662018-07-12 14:48:10 +02001305
1306 return ret;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001307}
1308
Chris Wilsond2b4b972017-11-10 14:26:33 +00001309static int __intel_engines_record_defaults(struct drm_i915_private *i915)
1310{
Chris Wilsond2b4b972017-11-10 14:26:33 +00001311 struct intel_engine_cs *engine;
Chris Wilson5e2a0412019-04-26 17:33:34 +01001312 struct i915_gem_context *ctx;
1313 struct i915_gem_engines *e;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001314 enum intel_engine_id id;
Chris Wilson604c37d2019-03-08 09:36:55 +00001315 int err = 0;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001316
1317 /*
1318 * As we reset the gpu during very early sanitisation, the current
1319 * register state on the GPU should reflect its defaults values.
1320 * We load a context onto the hw (with restore-inhibit), then switch
1321 * over to a second context to save that default register state. We
1322 * can then prime every new context with that state so they all start
1323 * from the same default HW values.
1324 */
1325
1326 ctx = i915_gem_context_create_kernel(i915, 0);
1327 if (IS_ERR(ctx))
1328 return PTR_ERR(ctx);
1329
Chris Wilson5e2a0412019-04-26 17:33:34 +01001330 e = i915_gem_context_lock_engines(ctx);
1331
Chris Wilsond2b4b972017-11-10 14:26:33 +00001332 for_each_engine(engine, i915, id) {
Chris Wilson5e2a0412019-04-26 17:33:34 +01001333 struct intel_context *ce = e->engines[id];
Chris Wilsone61e0f52018-02-21 09:56:36 +00001334 struct i915_request *rq;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001335
Chris Wilson5e2a0412019-04-26 17:33:34 +01001336 rq = intel_context_create_request(ce);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001337 if (IS_ERR(rq)) {
1338 err = PTR_ERR(rq);
Chris Wilson5e2a0412019-04-26 17:33:34 +01001339 goto err_active;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001340 }
1341
Chris Wilson3fef5cd2017-11-20 10:20:02 +00001342 err = 0;
Chris Wilson5e2a0412019-04-26 17:33:34 +01001343 if (rq->engine->init_context)
1344 err = rq->engine->init_context(rq);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001345
Chris Wilson697b9a82018-06-12 11:51:35 +01001346 i915_request_add(rq);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001347 if (err)
1348 goto err_active;
1349 }
1350
Chris Wilson604c37d2019-03-08 09:36:55 +00001351 /* Flush the default context image to memory, and enable powersaving. */
Chris Wilson23c3c3d2019-04-24 21:07:14 +01001352 if (!i915_gem_load_power_context(i915)) {
Chris Wilson604c37d2019-03-08 09:36:55 +00001353 err = -EIO;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001354 goto err_active;
Chris Wilson2621cef2018-07-09 13:20:43 +01001355 }
Chris Wilsond2b4b972017-11-10 14:26:33 +00001356
Chris Wilsond2b4b972017-11-10 14:26:33 +00001357 for_each_engine(engine, i915, id) {
Chris Wilson5e2a0412019-04-26 17:33:34 +01001358 struct intel_context *ce = e->engines[id];
1359 struct i915_vma *state = ce->state;
Chris Wilson37d7c9c2018-09-14 13:35:03 +01001360 void *vaddr;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001361
Chris Wilsond2b4b972017-11-10 14:26:33 +00001362 if (!state)
1363 continue;
1364
Chris Wilson08819542019-03-08 13:25:22 +00001365 GEM_BUG_ON(intel_context_is_pinned(ce));
Chris Wilsonc4d52fe2019-03-08 13:25:19 +00001366
Chris Wilsond2b4b972017-11-10 14:26:33 +00001367 /*
1368 * As we will hold a reference to the logical state, it will
1369 * not be torn down with the context, and importantly the
1370 * object will hold onto its vma (making it possible for a
1371 * stray GTT write to corrupt our defaults). Unmap the vma
1372 * from the GTT to prevent such accidents and reclaim the
1373 * space.
1374 */
1375 err = i915_vma_unbind(state);
1376 if (err)
1377 goto err_active;
1378
Chris Wilson6951e582019-05-28 10:29:51 +01001379 i915_gem_object_lock(state->obj);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001380 err = i915_gem_object_set_to_cpu_domain(state->obj, false);
Chris Wilson6951e582019-05-28 10:29:51 +01001381 i915_gem_object_unlock(state->obj);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001382 if (err)
1383 goto err_active;
1384
1385 engine->default_state = i915_gem_object_get(state->obj);
Chris Wilsona679f582019-03-21 16:19:07 +00001386 i915_gem_object_set_cache_coherency(engine->default_state,
1387 I915_CACHE_LLC);
Chris Wilson37d7c9c2018-09-14 13:35:03 +01001388
1389 /* Check we can acquire the image of the context state */
1390 vaddr = i915_gem_object_pin_map(engine->default_state,
Chris Wilson666424a2018-09-14 13:35:04 +01001391 I915_MAP_FORCE_WB);
Chris Wilson37d7c9c2018-09-14 13:35:03 +01001392 if (IS_ERR(vaddr)) {
1393 err = PTR_ERR(vaddr);
1394 goto err_active;
1395 }
1396
1397 i915_gem_object_unpin_map(engine->default_state);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001398 }
1399
1400 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
1401 unsigned int found = intel_engines_has_context_isolation(i915);
1402
1403 /*
1404 * Make sure that classes with multiple engine instances all
1405 * share the same basic configuration.
1406 */
1407 for_each_engine(engine, i915, id) {
1408 unsigned int bit = BIT(engine->uabi_class);
1409 unsigned int expected = engine->default_state ? bit : 0;
1410
1411 if ((found & bit) != expected) {
1412 DRM_ERROR("mismatching default context state for class %d on engine %s\n",
1413 engine->uabi_class, engine->name);
1414 }
1415 }
1416 }
1417
1418out_ctx:
Chris Wilson5e2a0412019-04-26 17:33:34 +01001419 i915_gem_context_unlock_engines(ctx);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001420 i915_gem_context_set_closed(ctx);
1421 i915_gem_context_put(ctx);
1422 return err;
1423
1424err_active:
1425 /*
1426 * If we have to abandon now, we expect the engines to be idle
Chris Wilson604c37d2019-03-08 09:36:55 +00001427 * and ready to be torn-down. The quickest way we can accomplish
1428 * this is by declaring ourselves wedged.
Chris Wilsond2b4b972017-11-10 14:26:33 +00001429 */
Chris Wilson604c37d2019-03-08 09:36:55 +00001430 i915_gem_set_wedged(i915);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001431 goto out_ctx;
1432}
1433
Chris Wilson51797492018-12-04 14:15:16 +00001434static int
1435i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
1436{
1437 struct drm_i915_gem_object *obj;
1438 struct i915_vma *vma;
1439 int ret;
1440
1441 obj = i915_gem_object_create_stolen(i915, size);
1442 if (!obj)
1443 obj = i915_gem_object_create_internal(i915, size);
1444 if (IS_ERR(obj)) {
1445 DRM_ERROR("Failed to allocate scratch page\n");
1446 return PTR_ERR(obj);
1447 }
1448
1449 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1450 if (IS_ERR(vma)) {
1451 ret = PTR_ERR(vma);
1452 goto err_unref;
1453 }
1454
1455 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
1456 if (ret)
1457 goto err_unref;
1458
1459 i915->gt.scratch = vma;
1460 return 0;
1461
1462err_unref:
1463 i915_gem_object_put(obj);
1464 return ret;
1465}
1466
1467static void i915_gem_fini_scratch(struct drm_i915_private *i915)
1468{
1469 i915_vma_unpin_and_release(&i915->gt.scratch, 0);
1470}
1471
Chris Wilson254e1182019-04-17 08:56:28 +01001472static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
1473{
1474 struct intel_engine_cs *engine;
1475 enum intel_engine_id id;
1476 int err = 0;
1477
1478 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1479 return 0;
1480
1481 for_each_engine(engine, i915, id) {
1482 if (intel_engine_verify_workarounds(engine, "load"))
1483 err = -EIO;
1484 }
1485
1486 return err;
1487}
1488
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001489int i915_gem_init(struct drm_i915_private *dev_priv)
Chris Wilson1070a422012-04-24 15:47:41 +01001490{
Chris Wilson1070a422012-04-24 15:47:41 +01001491 int ret;
1492
Changbin Du52b24162018-05-08 17:07:05 +08001493 /* We need to fallback to 4K pages if host doesn't support huge gtt. */
1494 if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
Matthew Auldda9fe3f32017-10-06 23:18:31 +01001495 mkwrite_device_info(dev_priv)->page_sizes =
1496 I915_GTT_PAGE_SIZE_4K;
1497
Chris Wilson94312822017-05-03 10:39:18 +01001498 dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
Chris Wilson57822dc2017-02-22 11:40:48 +00001499
Chris Wilson1e345562019-01-28 10:23:56 +00001500 i915_timelines_init(dev_priv);
1501
Chris Wilsonee487002017-11-22 17:26:21 +00001502 ret = i915_gem_init_userptr(dev_priv);
1503 if (ret)
1504 return ret;
1505
Sagar Arun Kamble70deead2018-01-24 21:16:58 +05301506 ret = intel_uc_init_misc(dev_priv);
Michał Winiarski3176ff42017-12-13 23:13:47 +01001507 if (ret)
1508 return ret;
1509
Michal Wajdeczkof7dc0152018-06-28 14:15:21 +00001510 ret = intel_wopcm_init(&dev_priv->wopcm);
1511 if (ret)
1512 goto err_uc_misc;
1513
Chris Wilson5e4f5182015-02-13 14:35:59 +00001514 /* This is just a security blanket to placate dragons.
1515 * On some systems, we very sporadically observe that the first TLBs
1516 * used by the CS may be stale, despite us poking the TLB reset. If
1517 * we hold the forcewake during initialisation these problems
1518 * just magically go away.
1519 */
Chris Wilsonee487002017-11-22 17:26:21 +00001520 mutex_lock(&dev_priv->drm.struct_mutex);
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001521 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson5e4f5182015-02-13 14:35:59 +00001522
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01001523 ret = i915_gem_init_ggtt(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001524 if (ret) {
1525 GEM_BUG_ON(ret == -EIO);
1526 goto err_unlock;
1527 }
Jesse Barnesd62b4892013-03-08 10:45:53 -08001528
Chris Wilson51797492018-12-04 14:15:16 +00001529 ret = i915_gem_init_scratch(dev_priv,
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001530 IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001531 if (ret) {
1532 GEM_BUG_ON(ret == -EIO);
1533 goto err_ggtt;
1534 }
Ben Widawsky2fa48d82013-12-06 14:11:04 -08001535
Chris Wilson11334c62019-04-26 17:33:33 +01001536 ret = intel_engines_setup(dev_priv);
1537 if (ret) {
1538 GEM_BUG_ON(ret == -EIO);
1539 goto err_unlock;
1540 }
1541
Chris Wilson51797492018-12-04 14:15:16 +00001542 ret = i915_gem_contexts_init(dev_priv);
1543 if (ret) {
1544 GEM_BUG_ON(ret == -EIO);
1545 goto err_scratch;
1546 }
1547
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001548 ret = intel_engines_init(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001549 if (ret) {
1550 GEM_BUG_ON(ret == -EIO);
1551 goto err_context;
1552 }
Daniel Vetter53ca26c2012-04-26 23:28:03 +02001553
Chris Wilsonf58d13d2017-11-10 14:26:29 +00001554 intel_init_gt_powersave(dev_priv);
1555
Michał Winiarski61b5c152017-12-13 23:13:48 +01001556 ret = intel_uc_init(dev_priv);
Chris Wilsoncc6a8182017-11-10 14:26:30 +00001557 if (ret)
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001558 goto err_pm;
Chris Wilsoncc6a8182017-11-10 14:26:30 +00001559
Michał Winiarski61b5c152017-12-13 23:13:48 +01001560 ret = i915_gem_init_hw(dev_priv);
1561 if (ret)
1562 goto err_uc_init;
1563
Chris Wilsoncc6a8182017-11-10 14:26:30 +00001564 /*
1565 * Despite its name intel_init_clock_gating applies both display
1566 * clock gating workarounds; GT mmio workarounds and the occasional
1567 * GT power context workaround. Worse, sometimes it includes a context
1568 * register workaround which we need to apply before we record the
1569 * default HW state for all contexts.
1570 *
1571 * FIXME: break up the workarounds and apply them at the right time!
1572 */
1573 intel_init_clock_gating(dev_priv);
1574
Chris Wilson254e1182019-04-17 08:56:28 +01001575 ret = intel_engines_verify_workarounds(dev_priv);
1576 if (ret)
1577 goto err_init_hw;
1578
Chris Wilsond2b4b972017-11-10 14:26:33 +00001579 ret = __intel_engines_record_defaults(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001580 if (ret)
1581 goto err_init_hw;
1582
1583 if (i915_inject_load_failure()) {
1584 ret = -ENODEV;
1585 goto err_init_hw;
1586 }
1587
1588 if (i915_inject_load_failure()) {
1589 ret = -EIO;
1590 goto err_init_hw;
1591 }
1592
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001593 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001594 mutex_unlock(&dev_priv->drm.struct_mutex);
1595
1596 return 0;
1597
1598 /*
1599 * Unwinding is complicated by that we want to handle -EIO to mean
1600 * disable GPU submission but keep KMS alive. We want to mark the
1601 * HW as irrevisibly wedged, but keep enough state around that the
1602 * driver doesn't explode during runtime.
1603 */
1604err_init_hw:
Chris Wilson8571a052018-06-06 15:54:41 +01001605 mutex_unlock(&dev_priv->drm.struct_mutex);
1606
Chris Wilson79ffac852019-04-24 21:07:17 +01001607 i915_gem_set_wedged(dev_priv);
Chris Wilson5861b012019-03-08 09:36:54 +00001608 i915_gem_suspend(dev_priv);
Chris Wilson8571a052018-06-06 15:54:41 +01001609 i915_gem_suspend_late(dev_priv);
1610
Chris Wilson8bcf9f72018-07-10 10:44:20 +01001611 i915_gem_drain_workqueue(dev_priv);
1612
Chris Wilson8571a052018-06-06 15:54:41 +01001613 mutex_lock(&dev_priv->drm.struct_mutex);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001614 intel_uc_fini_hw(dev_priv);
Michał Winiarski61b5c152017-12-13 23:13:48 +01001615err_uc_init:
1616 intel_uc_fini(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001617err_pm:
1618 if (ret != -EIO) {
1619 intel_cleanup_gt_powersave(dev_priv);
Chris Wilson45b9c962019-05-01 11:32:04 +01001620 intel_engines_cleanup(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001621 }
1622err_context:
1623 if (ret != -EIO)
1624 i915_gem_contexts_fini(dev_priv);
Chris Wilson51797492018-12-04 14:15:16 +00001625err_scratch:
1626 i915_gem_fini_scratch(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001627err_ggtt:
1628err_unlock:
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001629 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001630 mutex_unlock(&dev_priv->drm.struct_mutex);
1631
Michal Wajdeczkof7dc0152018-06-28 14:15:21 +00001632err_uc_misc:
Sagar Arun Kamble70deead2018-01-24 21:16:58 +05301633 intel_uc_fini_misc(dev_priv);
Sagar Arun Kambleda943b52018-01-10 18:24:16 +05301634
Chris Wilson1e345562019-01-28 10:23:56 +00001635 if (ret != -EIO) {
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001636 i915_gem_cleanup_userptr(dev_priv);
Chris Wilson1e345562019-01-28 10:23:56 +00001637 i915_timelines_fini(dev_priv);
1638 }
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001639
Chris Wilson60990322014-04-09 09:19:42 +01001640 if (ret == -EIO) {
Chris Wilson7ed43df2018-07-26 09:50:32 +01001641 mutex_lock(&dev_priv->drm.struct_mutex);
1642
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001643 /*
1644 * Allow engine initialisation to fail by marking the GPU as
Chris Wilson60990322014-04-09 09:19:42 +01001645 * wedged. But we only want to do this where the GPU is angry,
1646 * for all other failure, such as an allocation failure, bail.
1647 */
Chris Wilsonc41166f2019-02-20 14:56:37 +00001648 if (!i915_reset_failed(dev_priv)) {
Chris Wilson51c18bf2018-06-09 12:10:58 +01001649 i915_load_error(dev_priv,
1650 "Failed to initialize GPU, declaring it wedged!\n");
Chris Wilson6f74b362017-10-15 15:37:25 +01001651 i915_gem_set_wedged(dev_priv);
1652 }
Chris Wilson7ed43df2018-07-26 09:50:32 +01001653
1654 /* Minimal basic recovery for KMS */
1655 ret = i915_ggtt_enable_hw(dev_priv);
1656 i915_gem_restore_gtt_mappings(dev_priv);
1657 i915_gem_restore_fences(dev_priv);
1658 intel_init_clock_gating(dev_priv);
1659
1660 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilson1070a422012-04-24 15:47:41 +01001661 }
1662
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001663 i915_gem_drain_freed_objects(dev_priv);
Chris Wilson60990322014-04-09 09:19:42 +01001664 return ret;
Chris Wilson1070a422012-04-24 15:47:41 +01001665}
1666
Janusz Krzysztofik47bc28d2019-05-30 15:31:05 +02001667void i915_gem_fini_hw(struct drm_i915_private *dev_priv)
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001668{
Chris Wilson79ffac852019-04-24 21:07:17 +01001669 GEM_BUG_ON(dev_priv->gt.awake);
1670
Chris Wilsonb27e35a2019-05-27 12:51:14 +01001671 intel_wakeref_auto_fini(&dev_priv->mm.userfault_wakeref);
1672
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001673 i915_gem_suspend_late(dev_priv);
Chris Wilson30b710842018-08-12 23:36:29 +01001674 intel_disable_gt_powersave(dev_priv);
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001675
1676 /* Flush any outstanding unpin_work. */
1677 i915_gem_drain_workqueue(dev_priv);
1678
1679 mutex_lock(&dev_priv->drm.struct_mutex);
1680 intel_uc_fini_hw(dev_priv);
1681 intel_uc_fini(dev_priv);
Janusz Krzysztofik47bc28d2019-05-30 15:31:05 +02001682 mutex_unlock(&dev_priv->drm.struct_mutex);
1683
1684 i915_gem_drain_freed_objects(dev_priv);
1685}
1686
1687void i915_gem_fini(struct drm_i915_private *dev_priv)
1688{
1689 mutex_lock(&dev_priv->drm.struct_mutex);
Chris Wilson45b9c962019-05-01 11:32:04 +01001690 intel_engines_cleanup(dev_priv);
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001691 i915_gem_contexts_fini(dev_priv);
Chris Wilson51797492018-12-04 14:15:16 +00001692 i915_gem_fini_scratch(dev_priv);
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001693 mutex_unlock(&dev_priv->drm.struct_mutex);
1694
Tvrtko Ursulin25d140f2018-12-03 13:33:19 +00001695 intel_wa_list_free(&dev_priv->gt_wa_list);
1696
Chris Wilson30b710842018-08-12 23:36:29 +01001697 intel_cleanup_gt_powersave(dev_priv);
1698
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001699 intel_uc_fini_misc(dev_priv);
1700 i915_gem_cleanup_userptr(dev_priv);
Chris Wilson1e345562019-01-28 10:23:56 +00001701 i915_timelines_fini(dev_priv);
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001702
1703 i915_gem_drain_freed_objects(dev_priv);
1704
1705 WARN_ON(!list_empty(&dev_priv->contexts.list));
1706}
1707
Chris Wilson24145512017-01-24 11:01:35 +00001708void i915_gem_init_mmio(struct drm_i915_private *i915)
1709{
1710 i915_gem_sanitize(i915);
1711}
1712
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001713void
Imre Deak40ae4e12016-03-16 14:54:03 +02001714i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
1715{
Chris Wilson49ef5292016-08-18 17:17:00 +01001716 int i;
Imre Deak40ae4e12016-03-16 14:54:03 +02001717
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +00001718 if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) &&
Imre Deak40ae4e12016-03-16 14:54:03 +02001719 !IS_CHERRYVIEW(dev_priv))
1720 dev_priv->num_fence_regs = 32;
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +00001721 else if (INTEL_GEN(dev_priv) >= 4 ||
Jani Nikula73f67aa2016-12-07 22:48:09 +02001722 IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
1723 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
Imre Deak40ae4e12016-03-16 14:54:03 +02001724 dev_priv->num_fence_regs = 16;
1725 else
1726 dev_priv->num_fence_regs = 8;
1727
Chris Wilsonc0336662016-05-06 15:40:21 +01001728 if (intel_vgpu_active(dev_priv))
Imre Deak40ae4e12016-03-16 14:54:03 +02001729 dev_priv->num_fence_regs =
1730 I915_READ(vgtif_reg(avail_rs.fence_num));
1731
1732 /* Initialize fence registers to zero */
Chris Wilson49ef5292016-08-18 17:17:00 +01001733 for (i = 0; i < dev_priv->num_fence_regs; i++) {
1734 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
1735
1736 fence->i915 = dev_priv;
1737 fence->id = i;
1738 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
1739 }
Tvrtko Ursulin4362f4f2016-11-16 08:55:33 +00001740 i915_gem_restore_fences(dev_priv);
Imre Deak40ae4e12016-03-16 14:54:03 +02001741
Tvrtko Ursulin4362f4f2016-11-16 08:55:33 +00001742 i915_gem_detect_bit_6_swizzle(dev_priv);
Imre Deak40ae4e12016-03-16 14:54:03 +02001743}
1744
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001745static void i915_gem_init__mm(struct drm_i915_private *i915)
1746{
1747 spin_lock_init(&i915->mm.object_stat_lock);
1748 spin_lock_init(&i915->mm.obj_lock);
1749 spin_lock_init(&i915->mm.free_lock);
1750
1751 init_llist_head(&i915->mm.free_list);
1752
1753 INIT_LIST_HEAD(&i915->mm.unbound_list);
1754 INIT_LIST_HEAD(&i915->mm.bound_list);
1755 INIT_LIST_HEAD(&i915->mm.fence_list);
Chris Wilsonb27e35a2019-05-27 12:51:14 +01001756
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001757 INIT_LIST_HEAD(&i915->mm.userfault_list);
Chris Wilsonb27e35a2019-05-27 12:51:14 +01001758 intel_wakeref_auto_init(&i915->mm.userfault_wakeref, i915);
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001759
Chris Wilson84753552019-05-28 10:29:45 +01001760 i915_gem_init__objects(i915);
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001761}
1762
Michal Wajdeczkoa0de9082018-03-23 12:34:49 +00001763int i915_gem_init_early(struct drm_i915_private *dev_priv)
Eric Anholt673a3942008-07-30 12:06:12 -07001764{
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001765 int err;
Chris Wilsond1b48c12017-08-16 09:52:08 +01001766
Chris Wilson79ffac852019-04-24 21:07:17 +01001767 intel_gt_pm_init(dev_priv);
1768
Chris Wilson643b4502018-04-30 14:15:03 +01001769 INIT_LIST_HEAD(&dev_priv->gt.active_rings);
Chris Wilson3365e222018-05-03 20:51:14 +01001770 INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
Chris Wilson643b4502018-04-30 14:15:03 +01001771
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001772 i915_gem_init__mm(dev_priv);
Chris Wilson23c3c3d2019-04-24 21:07:14 +01001773 i915_gem_init__pm(dev_priv);
Chris Wilsonf2123812017-10-16 12:40:37 +01001774
Chris Wilson1f15b762016-07-01 17:23:14 +01001775 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001776 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
Chris Wilson18bb2bc2019-01-14 21:04:01 +00001777 mutex_init(&dev_priv->gpu_error.wedge_mutex);
Chris Wilson2caffbf2019-02-08 15:37:03 +00001778 init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
Chris Wilson31169712009-09-14 16:50:28 +01001779
Joonas Lahtinen6f633402016-09-01 14:58:21 +03001780 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
1781
Chris Wilsonb5add952016-08-04 16:32:36 +01001782 spin_lock_init(&dev_priv->fb_tracking.lock);
Chris Wilson73cb9702016-10-28 13:58:46 +01001783
Matthew Auld465c4032017-10-06 23:18:14 +01001784 err = i915_gemfs_init(dev_priv);
1785 if (err)
1786 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
1787
Chris Wilson73cb9702016-10-28 13:58:46 +01001788 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001789}
Dave Airlie71acb5e2008-12-30 20:31:46 +10001790
Michal Wajdeczkoa0de9082018-03-23 12:34:49 +00001791void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
Imre Deakd64aa092016-01-19 15:26:29 +02001792{
Chris Wilsonc4d4c1c2017-02-10 16:35:23 +00001793 i915_gem_drain_freed_objects(dev_priv);
Chris Wilsonc9c704712018-02-19 22:06:31 +00001794 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1795 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
Chris Wilsonc4d4c1c2017-02-10 16:35:23 +00001796 WARN_ON(dev_priv->mm.object_count);
Matthew Auldea84aa72016-11-17 21:04:11 +00001797
Chris Wilson2caffbf2019-02-08 15:37:03 +00001798 cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
1799
Matthew Auld465c4032017-10-06 23:18:14 +01001800 i915_gemfs_fini(dev_priv);
Imre Deakd64aa092016-01-19 15:26:29 +02001801}
1802
Chris Wilson6a800ea2016-09-21 14:51:07 +01001803int i915_gem_freeze(struct drm_i915_private *dev_priv)
1804{
Chris Wilsond0aa3012017-04-07 11:25:49 +01001805 /* Discard all purgeable objects, let userspace recover those as
1806 * required after resuming.
1807 */
Chris Wilson6a800ea2016-09-21 14:51:07 +01001808 i915_gem_shrink_all(dev_priv);
Chris Wilson6a800ea2016-09-21 14:51:07 +01001809
Chris Wilson6a800ea2016-09-21 14:51:07 +01001810 return 0;
1811}
1812
Chris Wilson95c778d2018-06-01 15:41:25 +01001813int i915_gem_freeze_late(struct drm_i915_private *i915)
Chris Wilson461fb992016-05-14 07:26:33 +01001814{
1815 struct drm_i915_gem_object *obj;
Chris Wilson7aab2d52016-09-09 20:02:18 +01001816 struct list_head *phases[] = {
Chris Wilson95c778d2018-06-01 15:41:25 +01001817 &i915->mm.unbound_list,
1818 &i915->mm.bound_list,
Chris Wilson7aab2d52016-09-09 20:02:18 +01001819 NULL
Chris Wilson95c778d2018-06-01 15:41:25 +01001820 }, **phase;
Chris Wilson461fb992016-05-14 07:26:33 +01001821
Chris Wilson95c778d2018-06-01 15:41:25 +01001822 /*
1823 * Called just before we write the hibernation image.
Chris Wilson461fb992016-05-14 07:26:33 +01001824 *
1825 * We need to update the domain tracking to reflect that the CPU
1826 * will be accessing all the pages to create and restore from the
1827 * hibernation, and so upon restoration those pages will be in the
1828 * CPU domain.
1829 *
1830 * To make sure the hibernation image contains the latest state,
1831 * we update that state just before writing out the image.
Chris Wilson7aab2d52016-09-09 20:02:18 +01001832 *
1833 * To try and reduce the hibernation image, we manually shrink
Chris Wilsond0aa3012017-04-07 11:25:49 +01001834 * the objects as well, see i915_gem_freeze()
Chris Wilson461fb992016-05-14 07:26:33 +01001835 */
1836
Chris Wilson95c778d2018-06-01 15:41:25 +01001837 i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND);
1838 i915_gem_drain_freed_objects(i915);
Chris Wilson461fb992016-05-14 07:26:33 +01001839
Chris Wilson95c778d2018-06-01 15:41:25 +01001840 for (phase = phases; *phase; phase++) {
Chris Wilson6951e582019-05-28 10:29:51 +01001841 list_for_each_entry(obj, *phase, mm.link) {
1842 i915_gem_object_lock(obj);
Chris Wilson95c778d2018-06-01 15:41:25 +01001843 WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
Chris Wilson6951e582019-05-28 10:29:51 +01001844 i915_gem_object_unlock(obj);
1845 }
Chris Wilson461fb992016-05-14 07:26:33 +01001846 }
1847
1848 return 0;
1849}
1850
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001851void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00001852{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001853 struct drm_i915_file_private *file_priv = file->driver_priv;
Chris Wilsone61e0f52018-02-21 09:56:36 +00001854 struct i915_request *request;
Eric Anholtb9624422009-06-03 07:27:35 +00001855
1856 /* Clean up our request list when the client is going away, so that
1857 * later retire_requests won't dereference our soon-to-be-gone
1858 * file_priv.
1859 */
Chris Wilson1c255952010-09-26 11:03:27 +01001860 spin_lock(&file_priv->mm.lock);
Chris Wilsonc8659ef2017-03-02 12:25:25 +00001861 list_for_each_entry(request, &file_priv->mm.request_list, client_link)
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001862 request->file_priv = NULL;
Chris Wilson1c255952010-09-26 11:03:27 +01001863 spin_unlock(&file_priv->mm.lock);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001864}
1865
Chris Wilson829a0af2017-06-20 12:05:45 +01001866int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001867{
1868 struct drm_i915_file_private *file_priv;
Ben Widawskye422b882013-12-06 14:10:58 -08001869 int ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001870
Chris Wilsonc4c29d72016-11-09 10:45:07 +00001871 DRM_DEBUG("\n");
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001872
1873 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1874 if (!file_priv)
1875 return -ENOMEM;
1876
1877 file->driver_priv = file_priv;
Chris Wilson829a0af2017-06-20 12:05:45 +01001878 file_priv->dev_priv = i915;
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02001879 file_priv->file = file;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001880
1881 spin_lock_init(&file_priv->mm.lock);
1882 INIT_LIST_HEAD(&file_priv->mm.request_list);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001883
Chris Wilsonc80ff162016-07-27 09:07:27 +01001884 file_priv->bsd_engine = -1;
Mika Kuoppala14921f32018-06-15 13:44:29 +03001885 file_priv->hang_timestamp = jiffies;
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001886
Chris Wilson829a0af2017-06-20 12:05:45 +01001887 ret = i915_gem_context_open(i915, file);
Ben Widawskye422b882013-12-06 14:10:58 -08001888 if (ret)
1889 kfree(file_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001890
Ben Widawskye422b882013-12-06 14:10:58 -08001891 return ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001892}
1893
Daniel Vetterb680c372014-09-19 18:27:27 +02001894/**
1895 * i915_gem_track_fb - update frontbuffer tracking
Geliang Tangd9072a32015-09-15 05:58:44 -07001896 * @old: current GEM buffer for the frontbuffer slots
1897 * @new: new GEM buffer for the frontbuffer slots
1898 * @frontbuffer_bits: bitmask of frontbuffer slots
Daniel Vetterb680c372014-09-19 18:27:27 +02001899 *
1900 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
1901 * from @old and setting them in @new. Both @old and @new can be NULL.
1902 */
Daniel Vettera071fa02014-06-18 23:28:09 +02001903void i915_gem_track_fb(struct drm_i915_gem_object *old,
1904 struct drm_i915_gem_object *new,
1905 unsigned frontbuffer_bits)
1906{
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01001907 /* Control of individual bits within the mask are guarded by
1908 * the owning plane->mutex, i.e. we can never see concurrent
1909 * manipulation of individual bits. But since the bitfield as a whole
1910 * is updated using RMW, we need to use atomics in order to update
1911 * the bits.
1912 */
1913 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
Chris Wilson74f6e182018-09-26 11:47:07 +01001914 BITS_PER_TYPE(atomic_t));
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01001915
Daniel Vettera071fa02014-06-18 23:28:09 +02001916 if (old) {
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01001917 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
1918 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
Daniel Vettera071fa02014-06-18 23:28:09 +02001919 }
1920
1921 if (new) {
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01001922 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
1923 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
Daniel Vettera071fa02014-06-18 23:28:09 +02001924 }
1925}
1926
Chris Wilson935a2f72017-02-13 17:15:13 +00001927#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
Chris Wilson66d9cb52017-02-13 17:15:17 +00001928#include "selftests/mock_gem_device.c"
Chris Wilson3f51b7e12018-08-30 14:48:06 +01001929#include "selftests/i915_gem.c"
Chris Wilson935a2f72017-02-13 17:15:13 +00001930#endif