blob: e24955b5ebc229173fcdd5794fe281b1b0f663ce [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
Daniel Vetterbe6a0372015-03-18 10:46:04 +01002 * Copyright © 2008-2015 Intel Corporation
Eric Anholt673a3942008-07-30 12:06:12 -07003 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Herrmann0de23972013-07-24 21:07:52 +020028#include <drm/drm_vma_manager.h>
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/i915_drm.h>
Chris Wilson6b5e90f2016-11-14 20:41:05 +000030#include <linux/dma-fence-array.h>
Chris Wilsonfe3288b2017-02-12 17:20:01 +000031#include <linux/kthread.h>
Chris Wilsonc13d87e2016-07-20 09:21:15 +010032#include <linux/reservation.h>
Hugh Dickins5949eac2011-06-27 16:18:18 -070033#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Chris Wilson20e49332016-11-22 14:41:21 +000035#include <linux/stop_machine.h>
Eric Anholt673a3942008-07-30 12:06:12 -070036#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020038#include <linux/dma-buf.h>
Daniel Vetterfcd70cd2019-01-17 22:03:34 +010039#include <linux/mman.h>
Eric Anholt673a3942008-07-30 12:06:12 -070040
Jani Nikuladf0566a2019-06-13 11:44:16 +030041#include "display/intel_display.h"
42#include "display/intel_frontbuffer.h"
43
Chris Wilson10be98a2019-05-28 10:29:49 +010044#include "gem/i915_gem_clflush.h"
45#include "gem/i915_gem_context.h"
Chris Wilsonafa13082019-05-28 10:29:43 +010046#include "gem/i915_gem_ioctls.h"
Chris Wilson10be98a2019-05-28 10:29:49 +010047#include "gem/i915_gem_pm.h"
48#include "gem/i915_gemfs.h"
Tvrtko Ursulinbaea4292019-06-21 08:08:02 +010049#include "gt/intel_gt.h"
Chris Wilson79ffac852019-04-24 21:07:17 +010050#include "gt/intel_gt_pm.h"
Chris Wilson112ed2d2019-04-24 18:48:39 +010051#include "gt/intel_mocs.h"
52#include "gt/intel_reset.h"
53#include "gt/intel_workarounds.h"
54
Chris Wilson9f588922019-01-16 15:33:04 +000055#include "i915_drv.h"
Chris Wilson37d63f82019-05-28 10:29:50 +010056#include "i915_scatterlist.h"
Chris Wilson9f588922019-01-16 15:33:04 +000057#include "i915_trace.h"
58#include "i915_vgpu.h"
59
60#include "intel_drv.h"
Jani Nikula696173b2019-04-05 14:00:15 +030061#include "intel_pm.h"
Chris Wilson9f588922019-01-16 15:33:04 +000062
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053063static int
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +010064insert_mappable_node(struct i915_ggtt *ggtt,
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053065 struct drm_mm_node *node, u32 size)
66{
67 memset(node, 0, sizeof(*node));
Chris Wilson82ad6442018-06-05 16:37:58 +010068 return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
Chris Wilson4e64e552017-02-02 21:04:38 +000069 size, 0, I915_COLOR_UNEVICTABLE,
70 0, ggtt->mappable_end,
71 DRM_MM_INSERT_LOW);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053072}
73
74static void
75remove_mappable_node(struct drm_mm_node *node)
76{
77 drm_mm_remove_node(node);
78}
79
Eric Anholt673a3942008-07-30 12:06:12 -070080int
Eric Anholt5a125c32008-10-22 21:40:13 -070081i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +000082 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -070083{
Chris Wilson09d7e462019-01-28 10:23:53 +000084 struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +030085 struct drm_i915_gem_get_aperture *args = data;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010086 struct i915_vma *vma;
Weinan Liff8f7972017-05-31 10:35:52 +080087 u64 pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -070088
Chris Wilson09d7e462019-01-28 10:23:53 +000089 mutex_lock(&ggtt->vm.mutex);
90
Chris Wilson82ad6442018-06-05 16:37:58 +010091 pinned = ggtt->vm.reserved;
Chris Wilson499197d2019-01-28 10:23:52 +000092 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
Chris Wilson20dfbde2016-08-04 16:32:30 +010093 if (i915_vma_is_pinned(vma))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010094 pinned += vma->node.size;
Chris Wilson09d7e462019-01-28 10:23:53 +000095
96 mutex_unlock(&ggtt->vm.mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -070097
Chris Wilson82ad6442018-06-05 16:37:58 +010098 args->aper_size = ggtt->vm.total;
Akshay Joshi0206e352011-08-16 15:34:10 -040099 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000100
Eric Anholt5a125c32008-10-22 21:40:13 -0700101 return 0;
102}
103
Chris Wilsonc03467b2019-07-03 10:17:17 +0100104int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
105 unsigned long flags)
Chris Wilsonaa653a62016-08-04 07:52:27 +0100106{
107 struct i915_vma *vma;
108 LIST_HEAD(still_in_list);
Chris Wilson6951e582019-05-28 10:29:51 +0100109 int ret = 0;
Chris Wilsonaa653a62016-08-04 07:52:27 +0100110
Chris Wilson02bef8f2016-08-14 18:44:41 +0100111 lockdep_assert_held(&obj->base.dev->struct_mutex);
112
Chris Wilson528cbd12019-01-28 10:23:54 +0000113 spin_lock(&obj->vma.lock);
114 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
115 struct i915_vma,
116 obj_link))) {
Chris Wilsonaa653a62016-08-04 07:52:27 +0100117 list_move_tail(&vma->obj_link, &still_in_list);
Chris Wilson528cbd12019-01-28 10:23:54 +0000118 spin_unlock(&obj->vma.lock);
119
Chris Wilsonc03467b2019-07-03 10:17:17 +0100120 ret = -EBUSY;
121 if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
122 !i915_vma_is_active(vma))
123 ret = i915_vma_unbind(vma);
Chris Wilson528cbd12019-01-28 10:23:54 +0000124
125 spin_lock(&obj->vma.lock);
Chris Wilsonaa653a62016-08-04 07:52:27 +0100126 }
Chris Wilson528cbd12019-01-28 10:23:54 +0000127 list_splice(&still_in_list, &obj->vma.list);
128 spin_unlock(&obj->vma.lock);
Chris Wilsonaa653a62016-08-04 07:52:27 +0100129
130 return ret;
131}
132
Chris Wilson00731152014-05-21 12:42:56 +0100133static int
134i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
135 struct drm_i915_gem_pwrite *args,
Chris Wilson03ac84f2016-10-28 13:58:36 +0100136 struct drm_file *file)
Chris Wilson00731152014-05-21 12:42:56 +0100137{
Chris Wilson00731152014-05-21 12:42:56 +0100138 void *vaddr = obj->phys_handle->vaddr + args->offset;
Gustavo Padovan3ed605b2016-04-26 12:32:27 -0300139 char __user *user_data = u64_to_user_ptr(args->data_ptr);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800140
141 /* We manually control the domain here and pretend that it
142 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
143 */
Rodrigo Vivi77a0d1c2015-06-18 11:43:24 -0700144 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Chris Wilson10466d22017-01-06 15:22:38 +0000145 if (copy_from_user(vaddr, user_data, args->size))
146 return -EFAULT;
Chris Wilson00731152014-05-21 12:42:56 +0100147
Chris Wilson6a2c4232014-11-04 04:51:40 -0800148 drm_clflush_virt_range(vaddr, args->size);
Tvrtko Ursulinbaea4292019-06-21 08:08:02 +0100149 intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200150
Chris Wilsond59b21e2017-02-22 11:40:49 +0000151 intel_fb_obj_flush(obj, ORIGIN_CPU);
Chris Wilson10466d22017-01-06 15:22:38 +0000152 return 0;
Chris Wilson00731152014-05-21 12:42:56 +0100153}
154
Dave Airlieff72145b2011-02-07 12:16:14 +1000155static int
156i915_gem_create(struct drm_file *file,
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000157 struct drm_i915_private *dev_priv,
Michał Winiarskie1634842019-03-26 18:02:18 +0100158 u64 *size_p,
Jani Nikula739f3ab2019-01-16 11:15:19 +0200159 u32 *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700160{
Chris Wilson05394f32010-11-08 19:18:58 +0000161 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300162 u32 handle;
Michał Winiarskie1634842019-03-26 18:02:18 +0100163 u64 size;
164 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700165
Michał Winiarskie1634842019-03-26 18:02:18 +0100166 size = round_up(*size_p, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200167 if (size == 0)
168 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700169
170 /* Allocate the new object */
Chris Wilson84753552019-05-28 10:29:45 +0100171 obj = i915_gem_object_create_shmem(dev_priv, size);
Chris Wilsonfe3db792016-04-25 13:32:13 +0100172 if (IS_ERR(obj))
173 return PTR_ERR(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700174
Chris Wilson05394f32010-11-08 19:18:58 +0000175 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100176 /* drop reference from allocate - handle holds it now */
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100177 i915_gem_object_put(obj);
Daniel Vetterd861e332013-07-24 23:25:03 +0200178 if (ret)
179 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100180
Dave Airlieff72145b2011-02-07 12:16:14 +1000181 *handle_p = handle;
Chris Wilson99534022019-04-17 14:25:07 +0100182 *size_p = size;
Eric Anholt673a3942008-07-30 12:06:12 -0700183 return 0;
184}
185
Dave Airlieff72145b2011-02-07 12:16:14 +1000186int
187i915_gem_dumb_create(struct drm_file *file,
188 struct drm_device *dev,
189 struct drm_mode_create_dumb *args)
190{
Ville Syrjäläaa5ca8b2019-05-09 15:21:57 +0300191 int cpp = DIV_ROUND_UP(args->bpp, 8);
192 u32 format;
193
194 switch (cpp) {
195 case 1:
196 format = DRM_FORMAT_C8;
197 break;
198 case 2:
199 format = DRM_FORMAT_RGB565;
200 break;
201 case 4:
202 format = DRM_FORMAT_XRGB8888;
203 break;
204 default:
205 return -EINVAL;
206 }
207
Dave Airlieff72145b2011-02-07 12:16:14 +1000208 /* have to work out size/pitch and return them */
Ville Syrjäläaa5ca8b2019-05-09 15:21:57 +0300209 args->pitch = ALIGN(args->width * cpp, 64);
210
211 /* align stride to page size so that we can remap */
212 if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
213 DRM_FORMAT_MOD_LINEAR))
214 args->pitch = ALIGN(args->pitch, 4096);
215
Dave Airlieff72145b2011-02-07 12:16:14 +1000216 args->size = args->pitch * args->height;
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000217 return i915_gem_create(file, to_i915(dev),
Michał Winiarskie1634842019-03-26 18:02:18 +0100218 &args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000219}
220
Dave Airlieff72145b2011-02-07 12:16:14 +1000221/**
222 * Creates a new mm object and returns a handle to it.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100223 * @dev: drm device pointer
224 * @data: ioctl data blob
225 * @file: drm file pointer
Dave Airlieff72145b2011-02-07 12:16:14 +1000226 */
227int
228i915_gem_create_ioctl(struct drm_device *dev, void *data,
229 struct drm_file *file)
230{
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000231 struct drm_i915_private *dev_priv = to_i915(dev);
Dave Airlieff72145b2011-02-07 12:16:14 +1000232 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200233
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000234 i915_gem_flush_free_objects(dev_priv);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +0100235
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000236 return i915_gem_create(file, dev_priv,
Michał Winiarskie1634842019-03-26 18:02:18 +0100237 &args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000238}
239
Daniel Vetterd174bd62012-03-25 19:47:40 +0200240static int
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000241shmem_pread(struct page *page, int offset, int len, char __user *user_data,
242 bool needs_clflush)
Daniel Vetterd174bd62012-03-25 19:47:40 +0200243{
244 char *vaddr;
245 int ret;
246
247 vaddr = kmap(page);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200248
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000249 if (needs_clflush)
250 drm_clflush_virt_range(vaddr + offset, len);
251
252 ret = __copy_to_user(user_data, vaddr + offset, len);
253
Daniel Vetterd174bd62012-03-25 19:47:40 +0200254 kunmap(page);
255
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000256 return ret ? -EFAULT : 0;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100257}
258
259static int
260i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
261 struct drm_i915_gem_pread *args)
262{
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100263 unsigned int needs_clflush;
264 unsigned int idx, offset;
Chris Wilson6951e582019-05-28 10:29:51 +0100265 struct dma_fence *fence;
266 char __user *user_data;
267 u64 remain;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100268 int ret;
269
Chris Wilson6951e582019-05-28 10:29:51 +0100270 ret = i915_gem_object_prepare_read(obj, &needs_clflush);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100271 if (ret)
272 return ret;
273
Chris Wilson6951e582019-05-28 10:29:51 +0100274 fence = i915_gem_object_lock_fence(obj);
275 i915_gem_object_finish_access(obj);
276 if (!fence)
277 return -ENOMEM;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100278
279 remain = args->size;
280 user_data = u64_to_user_ptr(args->data_ptr);
281 offset = offset_in_page(args->offset);
282 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
283 struct page *page = i915_gem_object_get_page(obj, idx);
Chris Wilsona5e856a52018-10-12 15:02:28 +0100284 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100285
286 ret = shmem_pread(page, offset, length, user_data,
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100287 needs_clflush);
288 if (ret)
289 break;
290
291 remain -= length;
292 user_data += length;
293 offset = 0;
294 }
295
Chris Wilson6951e582019-05-28 10:29:51 +0100296 i915_gem_object_unlock_fence(obj, fence);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100297 return ret;
298}
299
300static inline bool
301gtt_user_read(struct io_mapping *mapping,
302 loff_t base, int offset,
303 char __user *user_data, int length)
304{
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300305 void __iomem *vaddr;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100306 unsigned long unwritten;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530307
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530308 /* We can use the cpu mem copy function because this is X86. */
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300309 vaddr = io_mapping_map_atomic_wc(mapping, base);
310 unwritten = __copy_to_user_inatomic(user_data,
311 (void __force *)vaddr + offset,
312 length);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100313 io_mapping_unmap_atomic(vaddr);
314 if (unwritten) {
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300315 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
316 unwritten = copy_to_user(user_data,
317 (void __force *)vaddr + offset,
318 length);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100319 io_mapping_unmap(vaddr);
320 }
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530321 return unwritten;
322}
323
324static int
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100325i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
326 const struct drm_i915_gem_pread *args)
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530327{
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100328 struct drm_i915_private *i915 = to_i915(obj->base.dev);
329 struct i915_ggtt *ggtt = &i915->ggtt;
Chris Wilson538ef962019-01-14 14:21:18 +0000330 intel_wakeref_t wakeref;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530331 struct drm_mm_node node;
Chris Wilson6951e582019-05-28 10:29:51 +0100332 struct dma_fence *fence;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100333 void __user *user_data;
Chris Wilson6951e582019-05-28 10:29:51 +0100334 struct i915_vma *vma;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100335 u64 remain, offset;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530336 int ret;
337
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100338 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
339 if (ret)
340 return ret;
341
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700342 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100343 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
Chris Wilsona3259ca2017-10-09 09:44:00 +0100344 PIN_MAPPABLE |
345 PIN_NONFAULT |
346 PIN_NONBLOCK);
Chris Wilson18034582016-08-18 17:16:45 +0100347 if (!IS_ERR(vma)) {
348 node.start = i915_ggtt_offset(vma);
349 node.allocated = false;
Chris Wilson49ef5292016-08-18 17:17:00 +0100350 ret = i915_vma_put_fence(vma);
Chris Wilson18034582016-08-18 17:16:45 +0100351 if (ret) {
352 i915_vma_unpin(vma);
353 vma = ERR_PTR(ret);
354 }
355 }
Chris Wilson058d88c2016-08-15 10:49:06 +0100356 if (IS_ERR(vma)) {
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100357 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530358 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100359 goto out_unlock;
360 GEM_BUG_ON(!node.allocated);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530361 }
362
Chris Wilson6951e582019-05-28 10:29:51 +0100363 mutex_unlock(&i915->drm.struct_mutex);
364
365 ret = i915_gem_object_lock_interruptible(obj);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530366 if (ret)
367 goto out_unpin;
368
Chris Wilson6951e582019-05-28 10:29:51 +0100369 ret = i915_gem_object_set_to_gtt_domain(obj, false);
370 if (ret) {
371 i915_gem_object_unlock(obj);
372 goto out_unpin;
373 }
374
375 fence = i915_gem_object_lock_fence(obj);
376 i915_gem_object_unlock(obj);
377 if (!fence) {
378 ret = -ENOMEM;
379 goto out_unpin;
380 }
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530381
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100382 user_data = u64_to_user_ptr(args->data_ptr);
383 remain = args->size;
384 offset = args->offset;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530385
386 while (remain > 0) {
387 /* Operation in this page
388 *
389 * page_base = page offset within aperture
390 * page_offset = offset within page
391 * page_length = bytes to copy for this page
392 */
393 u32 page_base = node.start;
394 unsigned page_offset = offset_in_page(offset);
395 unsigned page_length = PAGE_SIZE - page_offset;
396 page_length = remain < page_length ? remain : page_length;
397 if (node.allocated) {
398 wmb();
Chris Wilson82ad6442018-06-05 16:37:58 +0100399 ggtt->vm.insert_page(&ggtt->vm,
400 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
401 node.start, I915_CACHE_NONE, 0);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530402 wmb();
403 } else {
404 page_base += offset & PAGE_MASK;
405 }
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100406
Matthew Auld73ebd502017-12-11 15:18:20 +0000407 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100408 user_data, page_length)) {
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530409 ret = -EFAULT;
410 break;
411 }
412
413 remain -= page_length;
414 user_data += page_length;
415 offset += page_length;
416 }
417
Chris Wilson6951e582019-05-28 10:29:51 +0100418 i915_gem_object_unlock_fence(obj, fence);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530419out_unpin:
Chris Wilson6951e582019-05-28 10:29:51 +0100420 mutex_lock(&i915->drm.struct_mutex);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530421 if (node.allocated) {
422 wmb();
Chris Wilson82ad6442018-06-05 16:37:58 +0100423 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530424 remove_mappable_node(&node);
425 } else {
Chris Wilson058d88c2016-08-15 10:49:06 +0100426 i915_vma_unpin(vma);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530427 }
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100428out_unlock:
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700429 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100430 mutex_unlock(&i915->drm.struct_mutex);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100431
Eric Anholteb014592009-03-10 11:44:52 -0700432 return ret;
433}
434
Eric Anholt673a3942008-07-30 12:06:12 -0700435/**
436 * Reads data from the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100437 * @dev: drm device pointer
438 * @data: ioctl data blob
439 * @file: drm file pointer
Eric Anholt673a3942008-07-30 12:06:12 -0700440 *
441 * On error, the contents of *data are undefined.
442 */
443int
444i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000445 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700446{
447 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000448 struct drm_i915_gem_object *obj;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100449 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700450
Chris Wilson51311d02010-11-17 09:10:42 +0000451 if (args->size == 0)
452 return 0;
453
Linus Torvalds96d4f262019-01-03 18:57:57 -0800454 if (!access_ok(u64_to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000455 args->size))
456 return -EFAULT;
457
Chris Wilson03ac0642016-07-20 13:31:51 +0100458 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100459 if (!obj)
460 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -0700461
Chris Wilson7dcd2492010-09-26 20:21:44 +0100462 /* Bounds check source. */
Matthew Auld966d5bf2016-12-13 20:32:22 +0000463 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100464 ret = -EINVAL;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100465 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100466 }
467
Chris Wilsondb53a302011-02-03 11:57:46 +0000468 trace_i915_gem_object_pread(obj, args->offset, args->size);
469
Chris Wilsone95433c2016-10-28 13:58:27 +0100470 ret = i915_gem_object_wait(obj,
471 I915_WAIT_INTERRUPTIBLE,
Chris Wilson62eb3c22019-02-13 09:25:04 +0000472 MAX_SCHEDULE_TIMEOUT);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100473 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100474 goto out;
Chris Wilson258a5ed2016-08-05 10:14:16 +0100475
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100476 ret = i915_gem_object_pin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100477 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100478 goto out;
Chris Wilson258a5ed2016-08-05 10:14:16 +0100479
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100480 ret = i915_gem_shmem_pread(obj, args);
Chris Wilson9c870d02016-10-24 13:42:15 +0100481 if (ret == -EFAULT || ret == -ENODEV)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100482 ret = i915_gem_gtt_pread(obj, args);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530483
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100484 i915_gem_object_unpin_pages(obj);
485out:
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100486 i915_gem_object_put(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700487 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700488}
489
Keith Packard0839ccb2008-10-30 19:38:48 -0700490/* This is the fast write path which cannot handle
491 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700492 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700493
Chris Wilsonfe115622016-10-28 13:58:40 +0100494static inline bool
495ggtt_write(struct io_mapping *mapping,
496 loff_t base, int offset,
497 char __user *user_data, int length)
Keith Packard0839ccb2008-10-30 19:38:48 -0700498{
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300499 void __iomem *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700500 unsigned long unwritten;
501
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700502 /* We can use the cpu mem copy function because this is X86. */
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300503 vaddr = io_mapping_map_atomic_wc(mapping, base);
504 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
Keith Packard0839ccb2008-10-30 19:38:48 -0700505 user_data, length);
Chris Wilsonfe115622016-10-28 13:58:40 +0100506 io_mapping_unmap_atomic(vaddr);
507 if (unwritten) {
Ville Syrjäläafe722b2017-09-01 20:12:52 +0300508 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
509 unwritten = copy_from_user((void __force *)vaddr + offset,
510 user_data, length);
Chris Wilsonfe115622016-10-28 13:58:40 +0100511 io_mapping_unmap(vaddr);
512 }
Keith Packard0839ccb2008-10-30 19:38:48 -0700513
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100514 return unwritten;
515}
516
Eric Anholt3de09aa2009-03-09 09:42:23 -0700517/**
518 * This is the fast pwrite path, where we copy the data directly from the
519 * user into the GTT, uncached.
Chris Wilsonfe115622016-10-28 13:58:40 +0100520 * @obj: i915 GEM object
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100521 * @args: pwrite arguments structure
Eric Anholt3de09aa2009-03-09 09:42:23 -0700522 */
Eric Anholt673a3942008-07-30 12:06:12 -0700523static int
Chris Wilsonfe115622016-10-28 13:58:40 +0100524i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
525 const struct drm_i915_gem_pwrite *args)
Eric Anholt673a3942008-07-30 12:06:12 -0700526{
Chris Wilsonfe115622016-10-28 13:58:40 +0100527 struct drm_i915_private *i915 = to_i915(obj->base.dev);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530528 struct i915_ggtt *ggtt = &i915->ggtt;
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700529 struct intel_runtime_pm *rpm = &i915->runtime_pm;
Chris Wilson538ef962019-01-14 14:21:18 +0000530 intel_wakeref_t wakeref;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530531 struct drm_mm_node node;
Chris Wilson6951e582019-05-28 10:29:51 +0100532 struct dma_fence *fence;
Chris Wilsonfe115622016-10-28 13:58:40 +0100533 struct i915_vma *vma;
534 u64 remain, offset;
535 void __user *user_data;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530536 int ret;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530537
Chris Wilsonfe115622016-10-28 13:58:40 +0100538 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
539 if (ret)
540 return ret;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200541
Chris Wilson8bd818152017-10-19 07:37:33 +0100542 if (i915_gem_object_has_struct_page(obj)) {
543 /*
544 * Avoid waking the device up if we can fallback, as
545 * waking/resuming is very slow (worst-case 10-100 ms
546 * depending on PCI sleeps and our own resume time).
547 * This easily dwarfs any performance advantage from
548 * using the cache bypass of indirect GGTT access.
549 */
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700550 wakeref = intel_runtime_pm_get_if_in_use(rpm);
Chris Wilson538ef962019-01-14 14:21:18 +0000551 if (!wakeref) {
Chris Wilson8bd818152017-10-19 07:37:33 +0100552 ret = -EFAULT;
553 goto out_unlock;
554 }
555 } else {
556 /* No backing pages, no fallback, we must force GGTT access */
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700557 wakeref = intel_runtime_pm_get(rpm);
Chris Wilson8bd818152017-10-19 07:37:33 +0100558 }
559
Chris Wilson058d88c2016-08-15 10:49:06 +0100560 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
Chris Wilsona3259ca2017-10-09 09:44:00 +0100561 PIN_MAPPABLE |
562 PIN_NONFAULT |
563 PIN_NONBLOCK);
Chris Wilson18034582016-08-18 17:16:45 +0100564 if (!IS_ERR(vma)) {
565 node.start = i915_ggtt_offset(vma);
566 node.allocated = false;
Chris Wilson49ef5292016-08-18 17:17:00 +0100567 ret = i915_vma_put_fence(vma);
Chris Wilson18034582016-08-18 17:16:45 +0100568 if (ret) {
569 i915_vma_unpin(vma);
570 vma = ERR_PTR(ret);
571 }
572 }
Chris Wilson058d88c2016-08-15 10:49:06 +0100573 if (IS_ERR(vma)) {
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100574 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530575 if (ret)
Chris Wilson8bd818152017-10-19 07:37:33 +0100576 goto out_rpm;
Chris Wilsonfe115622016-10-28 13:58:40 +0100577 GEM_BUG_ON(!node.allocated);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530578 }
Daniel Vetter935aaa62012-03-25 19:47:35 +0200579
Chris Wilson6951e582019-05-28 10:29:51 +0100580 mutex_unlock(&i915->drm.struct_mutex);
581
582 ret = i915_gem_object_lock_interruptible(obj);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200583 if (ret)
584 goto out_unpin;
585
Chris Wilson6951e582019-05-28 10:29:51 +0100586 ret = i915_gem_object_set_to_gtt_domain(obj, true);
587 if (ret) {
588 i915_gem_object_unlock(obj);
589 goto out_unpin;
590 }
591
592 fence = i915_gem_object_lock_fence(obj);
593 i915_gem_object_unlock(obj);
594 if (!fence) {
595 ret = -ENOMEM;
596 goto out_unpin;
597 }
Chris Wilsonfe115622016-10-28 13:58:40 +0100598
Chris Wilsonb19482d2016-08-18 17:16:43 +0100599 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200600
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530601 user_data = u64_to_user_ptr(args->data_ptr);
602 offset = args->offset;
603 remain = args->size;
604 while (remain) {
Eric Anholt673a3942008-07-30 12:06:12 -0700605 /* Operation in this page
606 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700607 * page_base = page offset within aperture
608 * page_offset = offset within page
609 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700610 */
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530611 u32 page_base = node.start;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100612 unsigned int page_offset = offset_in_page(offset);
613 unsigned int page_length = PAGE_SIZE - page_offset;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530614 page_length = remain < page_length ? remain : page_length;
615 if (node.allocated) {
616 wmb(); /* flush the write before we modify the GGTT */
Chris Wilson82ad6442018-06-05 16:37:58 +0100617 ggtt->vm.insert_page(&ggtt->vm,
618 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
619 node.start, I915_CACHE_NONE, 0);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530620 wmb(); /* flush modifications to the GGTT (insert_page) */
621 } else {
622 page_base += offset & PAGE_MASK;
623 }
Keith Packard0839ccb2008-10-30 19:38:48 -0700624 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700625 * source page isn't available. Return the error and we'll
626 * retry in the slow path.
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530627 * If the object is non-shmem backed, we retry again with the
628 * path that handles page fault.
Keith Packard0839ccb2008-10-30 19:38:48 -0700629 */
Matthew Auld73ebd502017-12-11 15:18:20 +0000630 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
Chris Wilsonfe115622016-10-28 13:58:40 +0100631 user_data, page_length)) {
632 ret = -EFAULT;
633 break;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200634 }
Eric Anholt673a3942008-07-30 12:06:12 -0700635
Keith Packard0839ccb2008-10-30 19:38:48 -0700636 remain -= page_length;
637 user_data += page_length;
638 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700639 }
Chris Wilsond59b21e2017-02-22 11:40:49 +0000640 intel_fb_obj_flush(obj, ORIGIN_CPU);
Chris Wilsonfe115622016-10-28 13:58:40 +0100641
Chris Wilson6951e582019-05-28 10:29:51 +0100642 i915_gem_object_unlock_fence(obj, fence);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200643out_unpin:
Chris Wilson6951e582019-05-28 10:29:51 +0100644 mutex_lock(&i915->drm.struct_mutex);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530645 if (node.allocated) {
646 wmb();
Chris Wilson82ad6442018-06-05 16:37:58 +0100647 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530648 remove_mappable_node(&node);
649 } else {
Chris Wilson058d88c2016-08-15 10:49:06 +0100650 i915_vma_unpin(vma);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530651 }
Chris Wilson8bd818152017-10-19 07:37:33 +0100652out_rpm:
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700653 intel_runtime_pm_put(rpm, wakeref);
Chris Wilson8bd818152017-10-19 07:37:33 +0100654out_unlock:
Chris Wilsonfe115622016-10-28 13:58:40 +0100655 mutex_unlock(&i915->drm.struct_mutex);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700656 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700657}
658
Chris Wilsonfe115622016-10-28 13:58:40 +0100659/* Per-page copy function for the shmem pwrite fastpath.
660 * Flushes invalid cachelines before writing to the target if
661 * needs_clflush_before is set and flushes out any written cachelines after
662 * writing if needs_clflush is set.
663 */
Eric Anholt40123c12009-03-09 13:42:30 -0700664static int
Chris Wilsonfe115622016-10-28 13:58:40 +0100665shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
Chris Wilsonfe115622016-10-28 13:58:40 +0100666 bool needs_clflush_before,
667 bool needs_clflush_after)
Eric Anholt40123c12009-03-09 13:42:30 -0700668{
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000669 char *vaddr;
Chris Wilsonfe115622016-10-28 13:58:40 +0100670 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700671
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000672 vaddr = kmap(page);
Chris Wilsonfe115622016-10-28 13:58:40 +0100673
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000674 if (needs_clflush_before)
675 drm_clflush_virt_range(vaddr + offset, len);
Chris Wilsonfe115622016-10-28 13:58:40 +0100676
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000677 ret = __copy_from_user(vaddr + offset, user_data, len);
678 if (!ret && needs_clflush_after)
679 drm_clflush_virt_range(vaddr + offset, len);
Chris Wilsonfe115622016-10-28 13:58:40 +0100680
Chris Wilsonb9d126e2019-01-05 12:07:58 +0000681 kunmap(page);
682
683 return ret ? -EFAULT : 0;
Chris Wilsonfe115622016-10-28 13:58:40 +0100684}
685
686static int
687i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
688 const struct drm_i915_gem_pwrite *args)
689{
Chris Wilsonfe115622016-10-28 13:58:40 +0100690 unsigned int partial_cacheline_write;
691 unsigned int needs_clflush;
692 unsigned int offset, idx;
Chris Wilson6951e582019-05-28 10:29:51 +0100693 struct dma_fence *fence;
694 void __user *user_data;
695 u64 remain;
Chris Wilsonfe115622016-10-28 13:58:40 +0100696 int ret;
697
Chris Wilson6951e582019-05-28 10:29:51 +0100698 ret = i915_gem_object_prepare_write(obj, &needs_clflush);
Chris Wilson43394c72016-08-18 17:16:47 +0100699 if (ret)
700 return ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700701
Chris Wilson6951e582019-05-28 10:29:51 +0100702 fence = i915_gem_object_lock_fence(obj);
703 i915_gem_object_finish_access(obj);
704 if (!fence)
705 return -ENOMEM;
Chris Wilsonfe115622016-10-28 13:58:40 +0100706
Chris Wilsonfe115622016-10-28 13:58:40 +0100707 /* If we don't overwrite a cacheline completely we need to be
708 * careful to have up-to-date data by first clflushing. Don't
709 * overcomplicate things and flush the entire patch.
710 */
711 partial_cacheline_write = 0;
712 if (needs_clflush & CLFLUSH_BEFORE)
713 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
714
Chris Wilson43394c72016-08-18 17:16:47 +0100715 user_data = u64_to_user_ptr(args->data_ptr);
Chris Wilson43394c72016-08-18 17:16:47 +0100716 remain = args->size;
Chris Wilsonfe115622016-10-28 13:58:40 +0100717 offset = offset_in_page(args->offset);
718 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
719 struct page *page = i915_gem_object_get_page(obj, idx);
Chris Wilsona5e856a52018-10-12 15:02:28 +0100720 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100721
Chris Wilsonfe115622016-10-28 13:58:40 +0100722 ret = shmem_pwrite(page, offset, length, user_data,
Chris Wilsonfe115622016-10-28 13:58:40 +0100723 (offset | length) & partial_cacheline_write,
724 needs_clflush & CLFLUSH_AFTER);
725 if (ret)
Chris Wilson9da3da62012-06-01 15:20:22 +0100726 break;
727
Chris Wilsonfe115622016-10-28 13:58:40 +0100728 remain -= length;
729 user_data += length;
730 offset = 0;
Eric Anholt40123c12009-03-09 13:42:30 -0700731 }
732
Chris Wilsond59b21e2017-02-22 11:40:49 +0000733 intel_fb_obj_flush(obj, ORIGIN_CPU);
Chris Wilson6951e582019-05-28 10:29:51 +0100734 i915_gem_object_unlock_fence(obj, fence);
735
Eric Anholt40123c12009-03-09 13:42:30 -0700736 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700737}
738
739/**
740 * Writes data to the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100741 * @dev: drm device
742 * @data: ioctl data blob
743 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -0700744 *
745 * On error, the contents of the buffer that were to be modified are undefined.
746 */
747int
748i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100749 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700750{
751 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000752 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000753 int ret;
754
755 if (args->size == 0)
756 return 0;
757
Linus Torvalds96d4f262019-01-03 18:57:57 -0800758 if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
Chris Wilson51311d02010-11-17 09:10:42 +0000759 return -EFAULT;
760
Chris Wilson03ac0642016-07-20 13:31:51 +0100761 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100762 if (!obj)
763 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -0700764
Chris Wilson7dcd2492010-09-26 20:21:44 +0100765 /* Bounds check destination. */
Matthew Auld966d5bf2016-12-13 20:32:22 +0000766 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100767 ret = -EINVAL;
Chris Wilson258a5ed2016-08-05 10:14:16 +0100768 goto err;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100769 }
770
Chris Wilsonf8c1cce2018-07-12 19:53:14 +0100771 /* Writes not allowed into this read-only object */
772 if (i915_gem_object_is_readonly(obj)) {
773 ret = -EINVAL;
774 goto err;
775 }
776
Chris Wilsondb53a302011-02-03 11:57:46 +0000777 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
778
Chris Wilson7c55e2c2017-03-07 12:03:38 +0000779 ret = -ENODEV;
780 if (obj->ops->pwrite)
781 ret = obj->ops->pwrite(obj, args);
782 if (ret != -ENODEV)
783 goto err;
784
Chris Wilsone95433c2016-10-28 13:58:27 +0100785 ret = i915_gem_object_wait(obj,
786 I915_WAIT_INTERRUPTIBLE |
787 I915_WAIT_ALL,
Chris Wilson62eb3c22019-02-13 09:25:04 +0000788 MAX_SCHEDULE_TIMEOUT);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100789 if (ret)
790 goto err;
791
Chris Wilsonfe115622016-10-28 13:58:40 +0100792 ret = i915_gem_object_pin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100793 if (ret)
Chris Wilsonfe115622016-10-28 13:58:40 +0100794 goto err;
Chris Wilson258a5ed2016-08-05 10:14:16 +0100795
Daniel Vetter935aaa62012-03-25 19:47:35 +0200796 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700797 /* We can only do the GTT pwrite on untiled buffers, as otherwise
798 * it would end up going through the fenced access, and we'll get
799 * different detiling behavior between reading and writing.
800 * pread/pwrite currently are reading and writing from the CPU
801 * perspective, requiring manual detiling by the client.
802 */
Chris Wilson6eae0052016-06-20 15:05:52 +0100803 if (!i915_gem_object_has_struct_page(obj) ||
Chris Wilson9c870d02016-10-24 13:42:15 +0100804 cpu_write_needs_clflush(obj))
Daniel Vetter935aaa62012-03-25 19:47:35 +0200805 /* Note that the gtt paths might fail with non-page-backed user
806 * pointers (e.g. gtt mappings when moving data between
Chris Wilson9c870d02016-10-24 13:42:15 +0100807 * textures). Fallback to the shmem path in that case.
808 */
Chris Wilsonfe115622016-10-28 13:58:40 +0100809 ret = i915_gem_gtt_pwrite_fast(obj, args);
Eric Anholt673a3942008-07-30 12:06:12 -0700810
Chris Wilsond1054ee2016-07-16 18:42:36 +0100811 if (ret == -EFAULT || ret == -ENOSPC) {
Chris Wilson6a2c4232014-11-04 04:51:40 -0800812 if (obj->phys_handle)
813 ret = i915_gem_phys_pwrite(obj, args, file);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530814 else
Chris Wilsonfe115622016-10-28 13:58:40 +0100815 ret = i915_gem_shmem_pwrite(obj, args);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800816 }
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100817
Chris Wilsonfe115622016-10-28 13:58:40 +0100818 i915_gem_object_unpin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100819err:
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100820 i915_gem_object_put(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +0100821 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700822}
823
Eric Anholt673a3942008-07-30 12:06:12 -0700824/**
825 * Called when user space has done writes to this buffer
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100826 * @dev: drm device
827 * @data: ioctl data blob
828 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -0700829 */
830int
831i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000832 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700833{
834 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000835 struct drm_i915_gem_object *obj;
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100836
Chris Wilson03ac0642016-07-20 13:31:51 +0100837 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilsonc21724c2016-08-05 10:14:19 +0100838 if (!obj)
839 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -0700840
Tina Zhanga03f3952017-11-14 10:25:13 +0000841 /*
842 * Proxy objects are barred from CPU access, so there is no
843 * need to ban sw_finish as it is a nop.
844 */
845
Eric Anholt673a3942008-07-30 12:06:12 -0700846 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson5a97bcc2017-02-22 11:40:46 +0000847 i915_gem_object_flush_if_display(obj);
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100848 i915_gem_object_put(obj);
Chris Wilson5a97bcc2017-02-22 11:40:46 +0000849
850 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700851}
852
Chris Wilson0cf289b2019-06-13 08:32:54 +0100853void i915_gem_runtime_suspend(struct drm_i915_private *i915)
Chris Wilsoneedd10f2014-06-16 08:57:44 +0100854{
Chris Wilson3594a3e2016-10-24 13:42:16 +0100855 struct drm_i915_gem_object *obj, *on;
Chris Wilson7c108fd2016-10-24 13:42:18 +0100856 int i;
Chris Wilsoneedd10f2014-06-16 08:57:44 +0100857
Chris Wilson3594a3e2016-10-24 13:42:16 +0100858 /*
859 * Only called during RPM suspend. All users of the userfault_list
860 * must be holding an RPM wakeref to ensure that this can not
861 * run concurrently with themselves (and use the struct_mutex for
862 * protection between themselves).
863 */
864
865 list_for_each_entry_safe(obj, on,
Chris Wilson0cf289b2019-06-13 08:32:54 +0100866 &i915->ggtt.userfault_list, userfault_link)
Chris Wilsona65adaf2017-10-09 09:43:57 +0100867 __i915_gem_object_release_mmap(obj);
Chris Wilson7c108fd2016-10-24 13:42:18 +0100868
Chris Wilson0cf289b2019-06-13 08:32:54 +0100869 /*
870 * The fence will be lost when the device powers down. If any were
Chris Wilson7c108fd2016-10-24 13:42:18 +0100871 * in use by hardware (i.e. they are pinned), we should not be powering
872 * down! All other fences will be reacquired by the user upon waking.
873 */
Chris Wilson0cf289b2019-06-13 08:32:54 +0100874 for (i = 0; i < i915->ggtt.num_fences; i++) {
875 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
Chris Wilson7c108fd2016-10-24 13:42:18 +0100876
Chris Wilson0cf289b2019-06-13 08:32:54 +0100877 /*
878 * Ideally we want to assert that the fence register is not
Chris Wilsone0ec3ec2017-02-03 12:57:17 +0000879 * live at this point (i.e. that no piece of code will be
880 * trying to write through fence + GTT, as that both violates
881 * our tracking of activity and associated locking/barriers,
882 * but also is illegal given that the hw is powered down).
883 *
884 * Previously we used reg->pin_count as a "liveness" indicator.
885 * That is not sufficient, and we need a more fine-grained
886 * tool if we want to have a sanity check here.
887 */
Chris Wilson7c108fd2016-10-24 13:42:18 +0100888
889 if (!reg->vma)
890 continue;
891
Chris Wilsona65adaf2017-10-09 09:43:57 +0100892 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
Chris Wilson7c108fd2016-10-24 13:42:18 +0100893 reg->dirty = true;
894 }
Chris Wilsoneedd10f2014-06-16 08:57:44 +0100895}
896
Chris Wilsoncb823ed2019-07-12 20:29:53 +0100897static int wait_for_engines(struct intel_gt *gt)
Chris Wilson25112b62017-03-30 15:50:39 +0100898{
Chris Wilsoncb823ed2019-07-12 20:29:53 +0100899 if (wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT)) {
900 dev_err(gt->i915->drm.dev,
Chris Wilson59e4b192017-12-11 19:41:35 +0000901 "Failed to idle engines, declaring wedged!\n");
Chris Wilson629820f2018-03-09 10:11:14 +0000902 GEM_TRACE_DUMP();
Chris Wilsoncb823ed2019-07-12 20:29:53 +0100903 intel_gt_set_wedged(gt);
Chris Wilsoncad99462017-08-26 12:09:33 +0100904 return -EIO;
Chris Wilson25112b62017-03-30 15:50:39 +0100905 }
906
907 return 0;
908}
909
Chris Wilson1e345562019-01-28 10:23:56 +0000910static long
911wait_for_timelines(struct drm_i915_private *i915,
912 unsigned int flags, long timeout)
913{
Chris Wilsonc6fe28b2019-06-21 14:16:39 +0100914 struct intel_gt_timelines *gt = &i915->gt.timelines;
Tvrtko Ursulinf0c02c12019-06-21 08:08:10 +0100915 struct intel_timeline *tl;
Chris Wilson1e345562019-01-28 10:23:56 +0000916
Chris Wilson1e345562019-01-28 10:23:56 +0000917 mutex_lock(&gt->mutex);
Chris Wilson9407d3b2019-01-28 18:18:12 +0000918 list_for_each_entry(tl, &gt->active_list, link) {
Chris Wilson1e345562019-01-28 10:23:56 +0000919 struct i915_request *rq;
920
Chris Wilson21950ee2019-02-05 13:00:05 +0000921 rq = i915_active_request_get_unlocked(&tl->last_request);
Chris Wilson1e345562019-01-28 10:23:56 +0000922 if (!rq)
923 continue;
924
925 mutex_unlock(&gt->mutex);
926
927 /*
928 * "Race-to-idle".
929 *
930 * Switching to the kernel context is often used a synchronous
931 * step prior to idling, e.g. in suspend for flushing all
932 * current operations to memory before sleeping. These we
933 * want to complete as quickly as possible to avoid prolonged
934 * stalls, so allow the gpu to boost to maximum clocks.
935 */
936 if (flags & I915_WAIT_FOR_IDLE_BOOST)
Chris Wilson62eb3c22019-02-13 09:25:04 +0000937 gen6_rps_boost(rq);
Chris Wilson1e345562019-01-28 10:23:56 +0000938
939 timeout = i915_request_wait(rq, flags, timeout);
940 i915_request_put(rq);
941 if (timeout < 0)
942 return timeout;
943
944 /* restart after reacquiring the lock */
945 mutex_lock(&gt->mutex);
Chris Wilson9407d3b2019-01-28 18:18:12 +0000946 tl = list_entry(&gt->active_list, typeof(*tl), link);
Chris Wilson1e345562019-01-28 10:23:56 +0000947 }
948 mutex_unlock(&gt->mutex);
949
950 return timeout;
951}
952
Chris Wilsonec625fb2018-07-09 13:20:42 +0100953int i915_gem_wait_for_idle(struct drm_i915_private *i915,
954 unsigned int flags, long timeout)
Chris Wilson73cb9702016-10-28 13:58:46 +0100955{
Chris Wilson79ffac852019-04-24 21:07:17 +0100956 GEM_TRACE("flags=%x (%s), timeout=%ld%s, awake?=%s\n",
Chris Wilsonec625fb2018-07-09 13:20:42 +0100957 flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
Chris Wilson79ffac852019-04-24 21:07:17 +0100958 timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "",
959 yesno(i915->gt.awake));
Chris Wilson09a4c022018-05-24 09:11:35 +0100960
Chris Wilson863e9fd2017-05-30 13:13:32 +0100961 /* If the device is asleep, we have no requests outstanding */
962 if (!READ_ONCE(i915->gt.awake))
963 return 0;
964
Chris Wilson1e345562019-01-28 10:23:56 +0000965 timeout = wait_for_timelines(i915, flags, timeout);
966 if (timeout < 0)
967 return timeout;
968
Chris Wilson9caa34a2016-11-11 14:58:08 +0000969 if (flags & I915_WAIT_LOCKED) {
Chris Wilsona89d1f92018-05-02 17:38:39 +0100970 int err;
Chris Wilson9caa34a2016-11-11 14:58:08 +0000971
972 lockdep_assert_held(&i915->drm.struct_mutex);
973
Chris Wilsoncb823ed2019-07-12 20:29:53 +0100974 err = wait_for_engines(&i915->gt);
Chris Wilsona61b47f2018-06-27 12:53:34 +0100975 if (err)
976 return err;
977
Chris Wilsone61e0f52018-02-21 09:56:36 +0000978 i915_retire_requests(i915);
Chris Wilsona89d1f92018-05-02 17:38:39 +0100979 }
Chris Wilsona61b47f2018-06-27 12:53:34 +0100980
981 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +0100982}
983
Chris Wilson058d88c2016-08-15 10:49:06 +0100984struct i915_vma *
Joonas Lahtinenec7adb62015-03-16 14:11:13 +0200985i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
986 const struct i915_ggtt_view *view,
Chris Wilson91b2db62016-08-04 16:32:23 +0100987 u64 size,
Chris Wilson2ffffd02016-08-04 16:32:22 +0100988 u64 alignment,
989 u64 flags)
Joonas Lahtinenec7adb62015-03-16 14:11:13 +0200990{
Chris Wilsonad16d2e2016-10-13 09:55:04 +0100991 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilson82ad6442018-06-05 16:37:58 +0100992 struct i915_address_space *vm = &dev_priv->ggtt.vm;
Chris Wilson59bfa122016-08-04 16:32:31 +0100993 struct i915_vma *vma;
994 int ret;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300995
Chris Wilson4c7d62c2016-10-28 13:58:32 +0100996 lockdep_assert_held(&obj->base.dev->struct_mutex);
997
Chris Wilsonac87a6fd2018-02-20 13:42:05 +0000998 if (flags & PIN_MAPPABLE &&
999 (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
Chris Wilson43ae70d92017-10-09 09:44:01 +01001000 /* If the required space is larger than the available
1001 * aperture, we will not able to find a slot for the
1002 * object and unbinding the object now will be in
1003 * vain. Worse, doing so may cause us to ping-pong
1004 * the object in and out of the Global GTT and
1005 * waste a lot of cycles under the mutex.
1006 */
1007 if (obj->base.size > dev_priv->ggtt.mappable_end)
1008 return ERR_PTR(-E2BIG);
1009
1010 /* If NONBLOCK is set the caller is optimistically
1011 * trying to cache the full object within the mappable
1012 * aperture, and *must* have a fallback in place for
1013 * situations where we cannot bind the object. We
1014 * can be a little more lax here and use the fallback
1015 * more often to avoid costly migrations of ourselves
1016 * and other objects within the aperture.
1017 *
1018 * Half-the-aperture is used as a simple heuristic.
1019 * More interesting would to do search for a free
1020 * block prior to making the commitment to unbind.
1021 * That caters for the self-harm case, and with a
1022 * little more heuristics (e.g. NOFAULT, NOEVICT)
1023 * we could try to minimise harm to others.
1024 */
1025 if (flags & PIN_NONBLOCK &&
1026 obj->base.size > dev_priv->ggtt.mappable_end / 2)
1027 return ERR_PTR(-ENOSPC);
1028 }
1029
Chris Wilson718659a2017-01-16 15:21:28 +00001030 vma = i915_vma_instance(obj, vm, view);
Chengguang Xu772b5402019-02-21 10:08:19 +08001031 if (IS_ERR(vma))
Chris Wilson058d88c2016-08-15 10:49:06 +01001032 return vma;
Chris Wilson59bfa122016-08-04 16:32:31 +01001033
1034 if (i915_vma_misplaced(vma, size, alignment, flags)) {
Chris Wilson43ae70d92017-10-09 09:44:01 +01001035 if (flags & PIN_NONBLOCK) {
1036 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
1037 return ERR_PTR(-ENOSPC);
Chris Wilson59bfa122016-08-04 16:32:31 +01001038
Chris Wilson43ae70d92017-10-09 09:44:01 +01001039 if (flags & PIN_MAPPABLE &&
Chris Wilson944397f2017-01-09 16:16:11 +00001040 vma->fence_size > dev_priv->ggtt.mappable_end / 2)
Chris Wilsonad16d2e2016-10-13 09:55:04 +01001041 return ERR_PTR(-ENOSPC);
1042 }
1043
Chris Wilson59bfa122016-08-04 16:32:31 +01001044 WARN(i915_vma_is_pinned(vma),
1045 "bo is already pinned in ggtt with incorrect alignment:"
Chris Wilson05a20d02016-08-18 17:16:55 +01001046 " offset=%08x, req.alignment=%llx,"
1047 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
1048 i915_ggtt_offset(vma), alignment,
Chris Wilson59bfa122016-08-04 16:32:31 +01001049 !!(flags & PIN_MAPPABLE),
Chris Wilson05a20d02016-08-18 17:16:55 +01001050 i915_vma_is_map_and_fenceable(vma));
Chris Wilson59bfa122016-08-04 16:32:31 +01001051 ret = i915_vma_unbind(vma);
1052 if (ret)
Chris Wilson058d88c2016-08-15 10:49:06 +01001053 return ERR_PTR(ret);
Chris Wilson59bfa122016-08-04 16:32:31 +01001054 }
1055
Chris Wilson058d88c2016-08-15 10:49:06 +01001056 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
1057 if (ret)
1058 return ERR_PTR(ret);
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02001059
Chris Wilson058d88c2016-08-15 10:49:06 +01001060 return vma;
Eric Anholt673a3942008-07-30 12:06:12 -07001061}
1062
Eric Anholt673a3942008-07-30 12:06:12 -07001063int
Chris Wilson3ef94da2009-09-14 16:50:29 +01001064i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1065 struct drm_file *file_priv)
1066{
Chris Wilson3b4fa962019-05-30 21:34:59 +01001067 struct drm_i915_private *i915 = to_i915(dev);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001068 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001069 struct drm_i915_gem_object *obj;
Chris Wilson1233e2d2016-10-28 13:58:37 +01001070 int err;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001071
1072 switch (args->madv) {
1073 case I915_MADV_DONTNEED:
1074 case I915_MADV_WILLNEED:
1075 break;
1076 default:
1077 return -EINVAL;
1078 }
1079
Chris Wilson03ac0642016-07-20 13:31:51 +01001080 obj = i915_gem_object_lookup(file_priv, args->handle);
Chris Wilson1233e2d2016-10-28 13:58:37 +01001081 if (!obj)
1082 return -ENOENT;
1083
1084 err = mutex_lock_interruptible(&obj->mm.lock);
1085 if (err)
1086 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001087
Chris Wilsonf1fa4f42017-10-13 21:26:13 +01001088 if (i915_gem_object_has_pages(obj) &&
Chris Wilson3e510a82016-08-05 10:14:23 +01001089 i915_gem_object_is_tiled(obj) &&
Chris Wilson3b4fa962019-05-30 21:34:59 +01001090 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
Chris Wilsonbc0629a2016-11-01 10:03:17 +00001091 if (obj->mm.madv == I915_MADV_WILLNEED) {
1092 GEM_BUG_ON(!obj->mm.quirked);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01001093 __i915_gem_object_unpin_pages(obj);
Chris Wilsonbc0629a2016-11-01 10:03:17 +00001094 obj->mm.quirked = false;
1095 }
1096 if (args->madv == I915_MADV_WILLNEED) {
Chris Wilson2c3a3f42016-11-04 10:30:01 +00001097 GEM_BUG_ON(obj->mm.quirked);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01001098 __i915_gem_object_pin_pages(obj);
Chris Wilsonbc0629a2016-11-01 10:03:17 +00001099 obj->mm.quirked = true;
1100 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01001101 }
1102
Chris Wilsona4f5ea62016-10-28 13:58:35 +01001103 if (obj->mm.madv != __I915_MADV_PURGED)
1104 obj->mm.madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001105
Chris Wilson3b4fa962019-05-30 21:34:59 +01001106 if (i915_gem_object_has_pages(obj)) {
1107 struct list_head *list;
1108
Chris Wilsond82b4b22019-05-30 21:35:00 +01001109 if (i915_gem_object_is_shrinkable(obj)) {
Chris Wilsona8cff4c82019-06-10 15:54:30 +01001110 unsigned long flags;
1111
1112 spin_lock_irqsave(&i915->mm.obj_lock, flags);
1113
Chris Wilsond82b4b22019-05-30 21:35:00 +01001114 if (obj->mm.madv != I915_MADV_WILLNEED)
1115 list = &i915->mm.purge_list;
Chris Wilsond82b4b22019-05-30 21:35:00 +01001116 else
Chris Wilsonecab9be2019-06-12 11:57:20 +01001117 list = &i915->mm.shrink_list;
Chris Wilsond82b4b22019-05-30 21:35:00 +01001118 list_move_tail(&obj->mm.link, list);
Chris Wilsona8cff4c82019-06-10 15:54:30 +01001119
1120 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
Chris Wilsond82b4b22019-05-30 21:35:00 +01001121 }
Chris Wilson3b4fa962019-05-30 21:34:59 +01001122 }
1123
Chris Wilson6c085a72012-08-20 11:40:46 +02001124 /* if the object is no longer attached, discard its backing storage */
Chris Wilsonf1fa4f42017-10-13 21:26:13 +01001125 if (obj->mm.madv == I915_MADV_DONTNEED &&
1126 !i915_gem_object_has_pages(obj))
Chris Wilsonf0334282019-05-28 10:29:46 +01001127 i915_gem_object_truncate(obj);
Chris Wilson2d7ef392009-09-20 23:13:10 +01001128
Chris Wilsona4f5ea62016-10-28 13:58:35 +01001129 args->retained = obj->mm.madv != __I915_MADV_PURGED;
Chris Wilson1233e2d2016-10-28 13:58:37 +01001130 mutex_unlock(&obj->mm.lock);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001131
Chris Wilson1233e2d2016-10-28 13:58:37 +01001132out:
Chris Wilsonf8c417c2016-07-20 13:31:53 +01001133 i915_gem_object_put(obj);
Chris Wilson1233e2d2016-10-28 13:58:37 +01001134 return err;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001135}
1136
Chris Wilson24145512017-01-24 11:01:35 +00001137void i915_gem_sanitize(struct drm_i915_private *i915)
1138{
Chris Wilson538ef962019-01-14 14:21:18 +00001139 intel_wakeref_t wakeref;
1140
Chris Wilsonc3160da2018-05-31 09:22:45 +01001141 GEM_TRACE("\n");
1142
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001143 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001144 intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
Chris Wilsonc3160da2018-05-31 09:22:45 +01001145
1146 /*
1147 * As we have just resumed the machine and woken the device up from
1148 * deep PCI sleep (presumably D3_cold), assume the HW has been reset
1149 * back to defaults, recovering from whatever wedged state we left it
1150 * in and so worth trying to use the device once more.
1151 */
Chris Wilsoncb823ed2019-07-12 20:29:53 +01001152 if (intel_gt_is_wedged(&i915->gt))
1153 intel_gt_unset_wedged(&i915->gt);
Chris Wilsonf36325f2017-08-26 12:09:34 +01001154
Chris Wilson24145512017-01-24 11:01:35 +00001155 /*
1156 * If we inherit context state from the BIOS or earlier occupants
1157 * of the GPU, the GPU may be in an inconsistent state when we
1158 * try to take over. The only way to remove the earlier state
1159 * is by resetting. However, resetting on earlier gen is tricky as
1160 * it may impact the display and we are uncertain about the stability
Joonas Lahtinenea117b82017-04-28 10:53:38 +03001161 * of the reset, so this could be applied to even earlier gen.
Chris Wilson24145512017-01-24 11:01:35 +00001162 */
Chris Wilson0c916212019-06-25 14:01:10 +01001163 intel_gt_sanitize(&i915->gt, false);
Chris Wilsonc3160da2018-05-31 09:22:45 +01001164
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001165 intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001166 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
Chris Wilson24145512017-01-24 11:01:35 +00001167}
1168
Tvrtko Ursulincf6844b2019-06-21 08:07:47 +01001169static void init_unused_ring(struct intel_gt *gt, u32 base)
Ville Syrjälä81e7f202014-08-15 01:21:55 +03001170{
Tvrtko Ursulincf6844b2019-06-21 08:07:47 +01001171 struct intel_uncore *uncore = gt->uncore;
1172
1173 intel_uncore_write(uncore, RING_CTL(base), 0);
1174 intel_uncore_write(uncore, RING_HEAD(base), 0);
1175 intel_uncore_write(uncore, RING_TAIL(base), 0);
1176 intel_uncore_write(uncore, RING_START(base), 0);
Ville Syrjälä81e7f202014-08-15 01:21:55 +03001177}
1178
Tvrtko Ursulincf6844b2019-06-21 08:07:47 +01001179static void init_unused_rings(struct intel_gt *gt)
Ville Syrjälä81e7f202014-08-15 01:21:55 +03001180{
Tvrtko Ursulincf6844b2019-06-21 08:07:47 +01001181 struct drm_i915_private *i915 = gt->i915;
1182
1183 if (IS_I830(i915)) {
1184 init_unused_ring(gt, PRB1_BASE);
1185 init_unused_ring(gt, SRB0_BASE);
1186 init_unused_ring(gt, SRB1_BASE);
1187 init_unused_ring(gt, SRB2_BASE);
1188 init_unused_ring(gt, SRB3_BASE);
1189 } else if (IS_GEN(i915, 2)) {
1190 init_unused_ring(gt, SRB0_BASE);
1191 init_unused_ring(gt, SRB1_BASE);
1192 } else if (IS_GEN(i915, 3)) {
1193 init_unused_ring(gt, PRB1_BASE);
1194 init_unused_ring(gt, PRB2_BASE);
Ville Syrjälä81e7f202014-08-15 01:21:55 +03001195 }
1196}
1197
Chris Wilson092be382019-06-26 16:45:49 +01001198int i915_gem_init_hw(struct drm_i915_private *i915)
Chris Wilson20a8a742017-02-08 14:30:31 +00001199{
Chris Wilson092be382019-06-26 16:45:49 +01001200 struct intel_uncore *uncore = &i915->uncore;
1201 struct intel_gt *gt = &i915->gt;
Chris Wilsond200cda2016-04-28 09:56:44 +01001202 int ret;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08001203
Chris Wilson092be382019-06-26 16:45:49 +01001204 BUG_ON(!i915->kernel_context);
Chris Wilsoncb823ed2019-07-12 20:29:53 +01001205 ret = intel_gt_terminally_wedged(gt);
Chris Wilson092be382019-06-26 16:45:49 +01001206 if (ret)
1207 return ret;
1208
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001209 gt->last_init_time = ktime_get();
Chris Wilsonde867c22016-10-25 13:16:02 +01001210
Chris Wilson5e4f5182015-02-13 14:35:59 +00001211 /* Double layer security blanket, see i915_gem_init() */
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001212 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
Chris Wilson5e4f5182015-02-13 14:35:59 +00001213
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001214 if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
1215 intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
Ben Widawsky4fc7c972013-02-08 11:49:24 -08001216
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001217 if (IS_HASWELL(i915))
1218 intel_uncore_write(uncore,
1219 MI_PREDICATE_RESULT_2,
1220 IS_HSW_GT3(i915) ?
1221 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
Rodrigo Vivi94353732013-08-28 16:45:46 -03001222
Tvrtko Ursulin094304b2018-12-03 12:50:10 +00001223 /* Apply the GT workarounds... */
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001224 intel_gt_apply_workarounds(gt);
Tvrtko Ursulin094304b2018-12-03 12:50:10 +00001225 /* ...and determine whether they are sticking. */
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001226 intel_gt_verify_workarounds(gt, "init");
Oscar Mateo59b449d2018-04-10 09:12:47 -07001227
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001228 intel_gt_init_swizzling(gt);
Ben Widawsky4fc7c972013-02-08 11:49:24 -08001229
Daniel Vetterd5abdfd2014-11-20 09:45:19 +01001230 /*
1231 * At least 830 can leave some of the unused rings
1232 * "active" (ie. head != tail) after resume which
1233 * will prevent c3 entry. Makes sure all unused rings
1234 * are totally idle.
1235 */
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001236 init_unused_rings(gt);
Daniel Vetterd5abdfd2014-11-20 09:45:19 +01001237
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001238 ret = i915_ppgtt_init_hw(gt);
John Harrison4ad2fd82015-06-18 13:11:20 +01001239 if (ret) {
Chris Wilson8177e112018-02-07 11:15:45 +00001240 DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
John Harrison4ad2fd82015-06-18 13:11:20 +01001241 goto out;
1242 }
1243
Tvrtko Ursulin6b0a8df2019-06-21 08:07:55 +01001244 ret = intel_wopcm_init_hw(&i915->wopcm, gt);
Jackie Lif08e2032018-03-13 17:32:53 -07001245 if (ret) {
1246 DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
1247 goto out;
1248 }
1249
Michał Winiarski9bdc3572017-10-25 18:25:19 +01001250 /* We can't enable contexts until all firmware is loaded */
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001251 ret = intel_uc_init_hw(i915);
Chris Wilson8177e112018-02-07 11:15:45 +00001252 if (ret) {
1253 DRM_ERROR("Enabling uc failed (%d)\n", ret);
Michał Winiarski9bdc3572017-10-25 18:25:19 +01001254 goto out;
Chris Wilson8177e112018-02-07 11:15:45 +00001255 }
Michał Winiarski9bdc3572017-10-25 18:25:19 +01001256
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001257 intel_mocs_init_l3cc_table(gt);
Peter Antoine0ccdacf2016-04-13 15:03:25 +01001258
Chris Wilson092be382019-06-26 16:45:49 +01001259 intel_engines_set_scheduler_caps(i915);
Michal Wajdeczkob96f6eb2018-06-05 12:24:43 +00001260
Michał Winiarski60c0a662018-07-12 14:48:10 +02001261out:
Tvrtko Ursulinabc584f2019-06-21 08:07:53 +01001262 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
Michał Winiarski60c0a662018-07-12 14:48:10 +02001263 return ret;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001264}
1265
Chris Wilsond2b4b972017-11-10 14:26:33 +00001266static int __intel_engines_record_defaults(struct drm_i915_private *i915)
1267{
Chris Wilsond2b4b972017-11-10 14:26:33 +00001268 struct intel_engine_cs *engine;
Chris Wilson5e2a0412019-04-26 17:33:34 +01001269 struct i915_gem_context *ctx;
1270 struct i915_gem_engines *e;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001271 enum intel_engine_id id;
Chris Wilson604c37d2019-03-08 09:36:55 +00001272 int err = 0;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001273
1274 /*
1275 * As we reset the gpu during very early sanitisation, the current
1276 * register state on the GPU should reflect its defaults values.
1277 * We load a context onto the hw (with restore-inhibit), then switch
1278 * over to a second context to save that default register state. We
1279 * can then prime every new context with that state so they all start
1280 * from the same default HW values.
1281 */
1282
1283 ctx = i915_gem_context_create_kernel(i915, 0);
1284 if (IS_ERR(ctx))
1285 return PTR_ERR(ctx);
1286
Chris Wilson5e2a0412019-04-26 17:33:34 +01001287 e = i915_gem_context_lock_engines(ctx);
1288
Chris Wilsond2b4b972017-11-10 14:26:33 +00001289 for_each_engine(engine, i915, id) {
Chris Wilson5e2a0412019-04-26 17:33:34 +01001290 struct intel_context *ce = e->engines[id];
Chris Wilsone61e0f52018-02-21 09:56:36 +00001291 struct i915_request *rq;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001292
Chris Wilson5e2a0412019-04-26 17:33:34 +01001293 rq = intel_context_create_request(ce);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001294 if (IS_ERR(rq)) {
1295 err = PTR_ERR(rq);
Chris Wilson5e2a0412019-04-26 17:33:34 +01001296 goto err_active;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001297 }
1298
Chris Wilson3fef5cd2017-11-20 10:20:02 +00001299 err = 0;
Chris Wilson5e2a0412019-04-26 17:33:34 +01001300 if (rq->engine->init_context)
1301 err = rq->engine->init_context(rq);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001302
Chris Wilson697b9a82018-06-12 11:51:35 +01001303 i915_request_add(rq);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001304 if (err)
1305 goto err_active;
1306 }
1307
Chris Wilson604c37d2019-03-08 09:36:55 +00001308 /* Flush the default context image to memory, and enable powersaving. */
Chris Wilson23c3c3d2019-04-24 21:07:14 +01001309 if (!i915_gem_load_power_context(i915)) {
Chris Wilson604c37d2019-03-08 09:36:55 +00001310 err = -EIO;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001311 goto err_active;
Chris Wilson2621cef2018-07-09 13:20:43 +01001312 }
Chris Wilsond2b4b972017-11-10 14:26:33 +00001313
Chris Wilsond2b4b972017-11-10 14:26:33 +00001314 for_each_engine(engine, i915, id) {
Chris Wilson5e2a0412019-04-26 17:33:34 +01001315 struct intel_context *ce = e->engines[id];
1316 struct i915_vma *state = ce->state;
Chris Wilson37d7c9c2018-09-14 13:35:03 +01001317 void *vaddr;
Chris Wilsond2b4b972017-11-10 14:26:33 +00001318
Chris Wilsond2b4b972017-11-10 14:26:33 +00001319 if (!state)
1320 continue;
1321
Chris Wilson08819542019-03-08 13:25:22 +00001322 GEM_BUG_ON(intel_context_is_pinned(ce));
Chris Wilsonc4d52fe2019-03-08 13:25:19 +00001323
Chris Wilsond2b4b972017-11-10 14:26:33 +00001324 /*
1325 * As we will hold a reference to the logical state, it will
1326 * not be torn down with the context, and importantly the
1327 * object will hold onto its vma (making it possible for a
1328 * stray GTT write to corrupt our defaults). Unmap the vma
1329 * from the GTT to prevent such accidents and reclaim the
1330 * space.
1331 */
1332 err = i915_vma_unbind(state);
1333 if (err)
1334 goto err_active;
1335
Chris Wilson6951e582019-05-28 10:29:51 +01001336 i915_gem_object_lock(state->obj);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001337 err = i915_gem_object_set_to_cpu_domain(state->obj, false);
Chris Wilson6951e582019-05-28 10:29:51 +01001338 i915_gem_object_unlock(state->obj);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001339 if (err)
1340 goto err_active;
1341
1342 engine->default_state = i915_gem_object_get(state->obj);
Chris Wilsona679f582019-03-21 16:19:07 +00001343 i915_gem_object_set_cache_coherency(engine->default_state,
1344 I915_CACHE_LLC);
Chris Wilson37d7c9c2018-09-14 13:35:03 +01001345
1346 /* Check we can acquire the image of the context state */
1347 vaddr = i915_gem_object_pin_map(engine->default_state,
Chris Wilson666424a2018-09-14 13:35:04 +01001348 I915_MAP_FORCE_WB);
Chris Wilson37d7c9c2018-09-14 13:35:03 +01001349 if (IS_ERR(vaddr)) {
1350 err = PTR_ERR(vaddr);
1351 goto err_active;
1352 }
1353
1354 i915_gem_object_unpin_map(engine->default_state);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001355 }
1356
1357 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
1358 unsigned int found = intel_engines_has_context_isolation(i915);
1359
1360 /*
1361 * Make sure that classes with multiple engine instances all
1362 * share the same basic configuration.
1363 */
1364 for_each_engine(engine, i915, id) {
1365 unsigned int bit = BIT(engine->uabi_class);
1366 unsigned int expected = engine->default_state ? bit : 0;
1367
1368 if ((found & bit) != expected) {
1369 DRM_ERROR("mismatching default context state for class %d on engine %s\n",
1370 engine->uabi_class, engine->name);
1371 }
1372 }
1373 }
1374
1375out_ctx:
Chris Wilson5e2a0412019-04-26 17:33:34 +01001376 i915_gem_context_unlock_engines(ctx);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001377 i915_gem_context_set_closed(ctx);
1378 i915_gem_context_put(ctx);
1379 return err;
1380
1381err_active:
1382 /*
1383 * If we have to abandon now, we expect the engines to be idle
Chris Wilson604c37d2019-03-08 09:36:55 +00001384 * and ready to be torn-down. The quickest way we can accomplish
1385 * this is by declaring ourselves wedged.
Chris Wilsond2b4b972017-11-10 14:26:33 +00001386 */
Chris Wilsoncb823ed2019-07-12 20:29:53 +01001387 intel_gt_set_wedged(&i915->gt);
Chris Wilsond2b4b972017-11-10 14:26:33 +00001388 goto out_ctx;
1389}
1390
Chris Wilson51797492018-12-04 14:15:16 +00001391static int
1392i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
1393{
Tvrtko Ursulindb56f972019-06-21 08:08:11 +01001394 return intel_gt_init_scratch(&i915->gt, size);
Chris Wilson51797492018-12-04 14:15:16 +00001395}
1396
1397static void i915_gem_fini_scratch(struct drm_i915_private *i915)
1398{
Tvrtko Ursulindb56f972019-06-21 08:08:11 +01001399 intel_gt_fini_scratch(&i915->gt);
Chris Wilson51797492018-12-04 14:15:16 +00001400}
1401
Chris Wilson254e1182019-04-17 08:56:28 +01001402static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
1403{
1404 struct intel_engine_cs *engine;
1405 enum intel_engine_id id;
1406 int err = 0;
1407
1408 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1409 return 0;
1410
1411 for_each_engine(engine, i915, id) {
1412 if (intel_engine_verify_workarounds(engine, "load"))
1413 err = -EIO;
1414 }
1415
1416 return err;
1417}
1418
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001419int i915_gem_init(struct drm_i915_private *dev_priv)
Chris Wilson1070a422012-04-24 15:47:41 +01001420{
Chris Wilson1070a422012-04-24 15:47:41 +01001421 int ret;
1422
Changbin Du52b24162018-05-08 17:07:05 +08001423 /* We need to fallback to 4K pages if host doesn't support huge gtt. */
1424 if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
Matthew Auldda9fe3f32017-10-06 23:18:31 +01001425 mkwrite_device_info(dev_priv)->page_sizes =
1426 I915_GTT_PAGE_SIZE_4K;
1427
Chris Wilson94312822017-05-03 10:39:18 +01001428 dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
Chris Wilson57822dc2017-02-22 11:40:48 +00001429
Tvrtko Ursulinf0c02c12019-06-21 08:08:10 +01001430 intel_timelines_init(dev_priv);
Chris Wilson1e345562019-01-28 10:23:56 +00001431
Chris Wilsonee487002017-11-22 17:26:21 +00001432 ret = i915_gem_init_userptr(dev_priv);
1433 if (ret)
1434 return ret;
1435
Sagar Arun Kamble70deead2018-01-24 21:16:58 +05301436 ret = intel_uc_init_misc(dev_priv);
Michał Winiarski3176ff42017-12-13 23:13:47 +01001437 if (ret)
1438 return ret;
1439
Michal Wajdeczkof7dc0152018-06-28 14:15:21 +00001440 ret = intel_wopcm_init(&dev_priv->wopcm);
1441 if (ret)
1442 goto err_uc_misc;
1443
Chris Wilson5e4f5182015-02-13 14:35:59 +00001444 /* This is just a security blanket to placate dragons.
1445 * On some systems, we very sporadically observe that the first TLBs
1446 * used by the CS may be stale, despite us poking the TLB reset. If
1447 * we hold the forcewake during initialisation these problems
1448 * just magically go away.
1449 */
Chris Wilsonee487002017-11-22 17:26:21 +00001450 mutex_lock(&dev_priv->drm.struct_mutex);
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001451 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson5e4f5182015-02-13 14:35:59 +00001452
Tvrtko Ursulin1d66377a2019-06-21 08:08:05 +01001453 ret = i915_init_ggtt(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001454 if (ret) {
1455 GEM_BUG_ON(ret == -EIO);
1456 goto err_unlock;
1457 }
Jesse Barnesd62b4892013-03-08 10:45:53 -08001458
Chris Wilson51797492018-12-04 14:15:16 +00001459 ret = i915_gem_init_scratch(dev_priv,
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001460 IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001461 if (ret) {
1462 GEM_BUG_ON(ret == -EIO);
1463 goto err_ggtt;
1464 }
Ben Widawsky2fa48d82013-12-06 14:11:04 -08001465
Chris Wilson11334c62019-04-26 17:33:33 +01001466 ret = intel_engines_setup(dev_priv);
1467 if (ret) {
1468 GEM_BUG_ON(ret == -EIO);
1469 goto err_unlock;
1470 }
1471
Chris Wilson51797492018-12-04 14:15:16 +00001472 ret = i915_gem_contexts_init(dev_priv);
1473 if (ret) {
1474 GEM_BUG_ON(ret == -EIO);
1475 goto err_scratch;
1476 }
1477
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001478 ret = intel_engines_init(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001479 if (ret) {
1480 GEM_BUG_ON(ret == -EIO);
1481 goto err_context;
1482 }
Daniel Vetter53ca26c2012-04-26 23:28:03 +02001483
Chris Wilsonf58d13d2017-11-10 14:26:29 +00001484 intel_init_gt_powersave(dev_priv);
1485
Michał Winiarski61b5c152017-12-13 23:13:48 +01001486 ret = intel_uc_init(dev_priv);
Chris Wilsoncc6a8182017-11-10 14:26:30 +00001487 if (ret)
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001488 goto err_pm;
Chris Wilsoncc6a8182017-11-10 14:26:30 +00001489
Michał Winiarski61b5c152017-12-13 23:13:48 +01001490 ret = i915_gem_init_hw(dev_priv);
1491 if (ret)
1492 goto err_uc_init;
1493
Chris Wilson092be382019-06-26 16:45:49 +01001494 /* Only when the HW is re-initialised, can we replay the requests */
1495 ret = intel_gt_resume(&dev_priv->gt);
1496 if (ret)
1497 goto err_init_hw;
1498
Chris Wilsoncc6a8182017-11-10 14:26:30 +00001499 /*
1500 * Despite its name intel_init_clock_gating applies both display
1501 * clock gating workarounds; GT mmio workarounds and the occasional
1502 * GT power context workaround. Worse, sometimes it includes a context
1503 * register workaround which we need to apply before we record the
1504 * default HW state for all contexts.
1505 *
1506 * FIXME: break up the workarounds and apply them at the right time!
1507 */
1508 intel_init_clock_gating(dev_priv);
1509
Chris Wilson254e1182019-04-17 08:56:28 +01001510 ret = intel_engines_verify_workarounds(dev_priv);
1511 if (ret)
Chris Wilson092be382019-06-26 16:45:49 +01001512 goto err_gt;
Chris Wilson254e1182019-04-17 08:56:28 +01001513
Chris Wilsond2b4b972017-11-10 14:26:33 +00001514 ret = __intel_engines_record_defaults(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001515 if (ret)
Chris Wilson092be382019-06-26 16:45:49 +01001516 goto err_gt;
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001517
Janusz Krzysztofikf2db53f2019-07-12 13:24:27 +02001518 if (i915_inject_probe_failure()) {
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001519 ret = -ENODEV;
Chris Wilson092be382019-06-26 16:45:49 +01001520 goto err_gt;
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001521 }
1522
Janusz Krzysztofikf2db53f2019-07-12 13:24:27 +02001523 if (i915_inject_probe_failure()) {
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001524 ret = -EIO;
Chris Wilson092be382019-06-26 16:45:49 +01001525 goto err_gt;
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001526 }
1527
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001528 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001529 mutex_unlock(&dev_priv->drm.struct_mutex);
1530
1531 return 0;
1532
1533 /*
1534 * Unwinding is complicated by that we want to handle -EIO to mean
1535 * disable GPU submission but keep KMS alive. We want to mark the
1536 * HW as irrevisibly wedged, but keep enough state around that the
1537 * driver doesn't explode during runtime.
1538 */
Chris Wilson092be382019-06-26 16:45:49 +01001539err_gt:
Chris Wilson8571a052018-06-06 15:54:41 +01001540 mutex_unlock(&dev_priv->drm.struct_mutex);
1541
Chris Wilsoncb823ed2019-07-12 20:29:53 +01001542 intel_gt_set_wedged(&dev_priv->gt);
Chris Wilson5861b012019-03-08 09:36:54 +00001543 i915_gem_suspend(dev_priv);
Chris Wilson8571a052018-06-06 15:54:41 +01001544 i915_gem_suspend_late(dev_priv);
1545
Chris Wilson8bcf9f72018-07-10 10:44:20 +01001546 i915_gem_drain_workqueue(dev_priv);
1547
Chris Wilson8571a052018-06-06 15:54:41 +01001548 mutex_lock(&dev_priv->drm.struct_mutex);
Chris Wilson092be382019-06-26 16:45:49 +01001549err_init_hw:
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001550 intel_uc_fini_hw(dev_priv);
Michał Winiarski61b5c152017-12-13 23:13:48 +01001551err_uc_init:
1552 intel_uc_fini(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001553err_pm:
1554 if (ret != -EIO) {
1555 intel_cleanup_gt_powersave(dev_priv);
Chris Wilson45b9c962019-05-01 11:32:04 +01001556 intel_engines_cleanup(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001557 }
1558err_context:
1559 if (ret != -EIO)
1560 i915_gem_contexts_fini(dev_priv);
Chris Wilson51797492018-12-04 14:15:16 +00001561err_scratch:
1562 i915_gem_fini_scratch(dev_priv);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001563err_ggtt:
1564err_unlock:
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001565 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001566 mutex_unlock(&dev_priv->drm.struct_mutex);
1567
Michal Wajdeczkof7dc0152018-06-28 14:15:21 +00001568err_uc_misc:
Sagar Arun Kamble70deead2018-01-24 21:16:58 +05301569 intel_uc_fini_misc(dev_priv);
Sagar Arun Kambleda943b52018-01-10 18:24:16 +05301570
Chris Wilson1e345562019-01-28 10:23:56 +00001571 if (ret != -EIO) {
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001572 i915_gem_cleanup_userptr(dev_priv);
Tvrtko Ursulinf0c02c12019-06-21 08:08:10 +01001573 intel_timelines_fini(dev_priv);
Chris Wilson1e345562019-01-28 10:23:56 +00001574 }
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001575
Chris Wilson60990322014-04-09 09:19:42 +01001576 if (ret == -EIO) {
Chris Wilson7ed43df2018-07-26 09:50:32 +01001577 mutex_lock(&dev_priv->drm.struct_mutex);
1578
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001579 /*
1580 * Allow engine initialisation to fail by marking the GPU as
Chris Wilson60990322014-04-09 09:19:42 +01001581 * wedged. But we only want to do this where the GPU is angry,
1582 * for all other failure, such as an allocation failure, bail.
1583 */
Chris Wilsoncb823ed2019-07-12 20:29:53 +01001584 if (!intel_gt_is_wedged(&dev_priv->gt)) {
Janusz Krzysztofikf2db53f2019-07-12 13:24:27 +02001585 i915_probe_error(dev_priv,
1586 "Failed to initialize GPU, declaring it wedged!\n");
Chris Wilsoncb823ed2019-07-12 20:29:53 +01001587 intel_gt_set_wedged(&dev_priv->gt);
Chris Wilson6f74b362017-10-15 15:37:25 +01001588 }
Chris Wilson7ed43df2018-07-26 09:50:32 +01001589
1590 /* Minimal basic recovery for KMS */
1591 ret = i915_ggtt_enable_hw(dev_priv);
1592 i915_gem_restore_gtt_mappings(dev_priv);
1593 i915_gem_restore_fences(dev_priv);
1594 intel_init_clock_gating(dev_priv);
1595
1596 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilson1070a422012-04-24 15:47:41 +01001597 }
1598
Chris Wilson6ca9a2b2017-12-13 13:43:47 +00001599 i915_gem_drain_freed_objects(dev_priv);
Chris Wilson60990322014-04-09 09:19:42 +01001600 return ret;
Chris Wilson1070a422012-04-24 15:47:41 +01001601}
1602
Janusz Krzysztofik78dae1a2019-07-12 13:24:29 +02001603void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001604{
Chris Wilson79ffac852019-04-24 21:07:17 +01001605 GEM_BUG_ON(dev_priv->gt.awake);
1606
Chris Wilson0cf289b2019-06-13 08:32:54 +01001607 intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
Chris Wilsonb27e35a2019-05-27 12:51:14 +01001608
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001609 i915_gem_suspend_late(dev_priv);
Chris Wilson30b710842018-08-12 23:36:29 +01001610 intel_disable_gt_powersave(dev_priv);
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001611
1612 /* Flush any outstanding unpin_work. */
1613 i915_gem_drain_workqueue(dev_priv);
1614
1615 mutex_lock(&dev_priv->drm.struct_mutex);
1616 intel_uc_fini_hw(dev_priv);
1617 intel_uc_fini(dev_priv);
Janusz Krzysztofik47bc28d2019-05-30 15:31:05 +02001618 mutex_unlock(&dev_priv->drm.struct_mutex);
1619
1620 i915_gem_drain_freed_objects(dev_priv);
1621}
1622
Janusz Krzysztofik3b58a942019-07-12 13:24:28 +02001623void i915_gem_driver_release(struct drm_i915_private *dev_priv)
Janusz Krzysztofik47bc28d2019-05-30 15:31:05 +02001624{
1625 mutex_lock(&dev_priv->drm.struct_mutex);
Chris Wilson45b9c962019-05-01 11:32:04 +01001626 intel_engines_cleanup(dev_priv);
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001627 i915_gem_contexts_fini(dev_priv);
Chris Wilson51797492018-12-04 14:15:16 +00001628 i915_gem_fini_scratch(dev_priv);
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001629 mutex_unlock(&dev_priv->drm.struct_mutex);
1630
Tvrtko Ursulin25d140f2018-12-03 13:33:19 +00001631 intel_wa_list_free(&dev_priv->gt_wa_list);
1632
Chris Wilson30b710842018-08-12 23:36:29 +01001633 intel_cleanup_gt_powersave(dev_priv);
1634
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001635 intel_uc_fini_misc(dev_priv);
1636 i915_gem_cleanup_userptr(dev_priv);
Tvrtko Ursulinf0c02c12019-06-21 08:08:10 +01001637 intel_timelines_fini(dev_priv);
Michal Wajdeczko8979187a2018-06-04 09:00:32 +00001638
1639 i915_gem_drain_freed_objects(dev_priv);
1640
1641 WARN_ON(!list_empty(&dev_priv->contexts.list));
1642}
1643
Chris Wilson24145512017-01-24 11:01:35 +00001644void i915_gem_init_mmio(struct drm_i915_private *i915)
1645{
1646 i915_gem_sanitize(i915);
1647}
1648
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001649static void i915_gem_init__mm(struct drm_i915_private *i915)
1650{
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001651 spin_lock_init(&i915->mm.obj_lock);
1652 spin_lock_init(&i915->mm.free_lock);
1653
1654 init_llist_head(&i915->mm.free_list);
1655
Chris Wilson3b4fa962019-05-30 21:34:59 +01001656 INIT_LIST_HEAD(&i915->mm.purge_list);
Chris Wilsonecab9be2019-06-12 11:57:20 +01001657 INIT_LIST_HEAD(&i915->mm.shrink_list);
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001658
Chris Wilson84753552019-05-28 10:29:45 +01001659 i915_gem_init__objects(i915);
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001660}
1661
Michal Wajdeczkoa0de9082018-03-23 12:34:49 +00001662int i915_gem_init_early(struct drm_i915_private *dev_priv)
Eric Anholt673a3942008-07-30 12:06:12 -07001663{
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001664 int err;
Chris Wilsond1b48c12017-08-16 09:52:08 +01001665
Chris Wilson9c52d1c2017-11-10 23:24:47 +00001666 i915_gem_init__mm(dev_priv);
Chris Wilson23c3c3d2019-04-24 21:07:14 +01001667 i915_gem_init__pm(dev_priv);
Chris Wilsonf2123812017-10-16 12:40:37 +01001668
Joonas Lahtinen6f633402016-09-01 14:58:21 +03001669 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
1670
Chris Wilsonb5add952016-08-04 16:32:36 +01001671 spin_lock_init(&dev_priv->fb_tracking.lock);
Chris Wilson73cb9702016-10-28 13:58:46 +01001672
Matthew Auld465c4032017-10-06 23:18:14 +01001673 err = i915_gemfs_init(dev_priv);
1674 if (err)
1675 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
1676
Chris Wilson73cb9702016-10-28 13:58:46 +01001677 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001678}
Dave Airlie71acb5e2008-12-30 20:31:46 +10001679
Michal Wajdeczkoa0de9082018-03-23 12:34:49 +00001680void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
Imre Deakd64aa092016-01-19 15:26:29 +02001681{
Chris Wilsonc4d4c1c2017-02-10 16:35:23 +00001682 i915_gem_drain_freed_objects(dev_priv);
Chris Wilsonc9c704712018-02-19 22:06:31 +00001683 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1684 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
Chris Wilsond82b4b22019-05-30 21:35:00 +01001685 WARN_ON(dev_priv->mm.shrink_count);
Matthew Auldea84aa72016-11-17 21:04:11 +00001686
Chris Wilsoncb823ed2019-07-12 20:29:53 +01001687 intel_gt_cleanup_early(&dev_priv->gt);
Chris Wilson2caffbf2019-02-08 15:37:03 +00001688
Matthew Auld465c4032017-10-06 23:18:14 +01001689 i915_gemfs_fini(dev_priv);
Imre Deakd64aa092016-01-19 15:26:29 +02001690}
1691
Chris Wilson6a800ea2016-09-21 14:51:07 +01001692int i915_gem_freeze(struct drm_i915_private *dev_priv)
1693{
Chris Wilsond0aa3012017-04-07 11:25:49 +01001694 /* Discard all purgeable objects, let userspace recover those as
1695 * required after resuming.
1696 */
Chris Wilson6a800ea2016-09-21 14:51:07 +01001697 i915_gem_shrink_all(dev_priv);
Chris Wilson6a800ea2016-09-21 14:51:07 +01001698
Chris Wilson6a800ea2016-09-21 14:51:07 +01001699 return 0;
1700}
1701
Chris Wilson95c778d2018-06-01 15:41:25 +01001702int i915_gem_freeze_late(struct drm_i915_private *i915)
Chris Wilson461fb992016-05-14 07:26:33 +01001703{
1704 struct drm_i915_gem_object *obj;
Chris Wilsonecab9be2019-06-12 11:57:20 +01001705 intel_wakeref_t wakeref;
Chris Wilson461fb992016-05-14 07:26:33 +01001706
Chris Wilson95c778d2018-06-01 15:41:25 +01001707 /*
1708 * Called just before we write the hibernation image.
Chris Wilson461fb992016-05-14 07:26:33 +01001709 *
1710 * We need to update the domain tracking to reflect that the CPU
1711 * will be accessing all the pages to create and restore from the
1712 * hibernation, and so upon restoration those pages will be in the
1713 * CPU domain.
1714 *
1715 * To make sure the hibernation image contains the latest state,
1716 * we update that state just before writing out the image.
Chris Wilson7aab2d52016-09-09 20:02:18 +01001717 *
1718 * To try and reduce the hibernation image, we manually shrink
Chris Wilsond0aa3012017-04-07 11:25:49 +01001719 * the objects as well, see i915_gem_freeze()
Chris Wilson461fb992016-05-14 07:26:33 +01001720 */
1721
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001722 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
Chris Wilsonecab9be2019-06-12 11:57:20 +01001723
1724 i915_gem_shrink(i915, -1UL, NULL, ~0);
Chris Wilson95c778d2018-06-01 15:41:25 +01001725 i915_gem_drain_freed_objects(i915);
Chris Wilson461fb992016-05-14 07:26:33 +01001726
Chris Wilsonecab9be2019-06-12 11:57:20 +01001727 list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
1728 i915_gem_object_lock(obj);
1729 WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
1730 i915_gem_object_unlock(obj);
Chris Wilson461fb992016-05-14 07:26:33 +01001731 }
Chris Wilsonecab9be2019-06-12 11:57:20 +01001732
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001733 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
Chris Wilson461fb992016-05-14 07:26:33 +01001734
1735 return 0;
1736}
1737
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001738void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00001739{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001740 struct drm_i915_file_private *file_priv = file->driver_priv;
Chris Wilsone61e0f52018-02-21 09:56:36 +00001741 struct i915_request *request;
Eric Anholtb9624422009-06-03 07:27:35 +00001742
1743 /* Clean up our request list when the client is going away, so that
1744 * later retire_requests won't dereference our soon-to-be-gone
1745 * file_priv.
1746 */
Chris Wilson1c255952010-09-26 11:03:27 +01001747 spin_lock(&file_priv->mm.lock);
Chris Wilsonc8659ef2017-03-02 12:25:25 +00001748 list_for_each_entry(request, &file_priv->mm.request_list, client_link)
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001749 request->file_priv = NULL;
Chris Wilson1c255952010-09-26 11:03:27 +01001750 spin_unlock(&file_priv->mm.lock);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001751}
1752
Chris Wilson829a0af2017-06-20 12:05:45 +01001753int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001754{
1755 struct drm_i915_file_private *file_priv;
Ben Widawskye422b882013-12-06 14:10:58 -08001756 int ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001757
Chris Wilsonc4c29d72016-11-09 10:45:07 +00001758 DRM_DEBUG("\n");
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001759
1760 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1761 if (!file_priv)
1762 return -ENOMEM;
1763
1764 file->driver_priv = file_priv;
Chris Wilson829a0af2017-06-20 12:05:45 +01001765 file_priv->dev_priv = i915;
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02001766 file_priv->file = file;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001767
1768 spin_lock_init(&file_priv->mm.lock);
1769 INIT_LIST_HEAD(&file_priv->mm.request_list);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001770
Chris Wilsonc80ff162016-07-27 09:07:27 +01001771 file_priv->bsd_engine = -1;
Mika Kuoppala14921f32018-06-15 13:44:29 +03001772 file_priv->hang_timestamp = jiffies;
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001773
Chris Wilson829a0af2017-06-20 12:05:45 +01001774 ret = i915_gem_context_open(i915, file);
Ben Widawskye422b882013-12-06 14:10:58 -08001775 if (ret)
1776 kfree(file_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001777
Ben Widawskye422b882013-12-06 14:10:58 -08001778 return ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001779}
1780
Daniel Vetterb680c372014-09-19 18:27:27 +02001781/**
1782 * i915_gem_track_fb - update frontbuffer tracking
Geliang Tangd9072a32015-09-15 05:58:44 -07001783 * @old: current GEM buffer for the frontbuffer slots
1784 * @new: new GEM buffer for the frontbuffer slots
1785 * @frontbuffer_bits: bitmask of frontbuffer slots
Daniel Vetterb680c372014-09-19 18:27:27 +02001786 *
1787 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
1788 * from @old and setting them in @new. Both @old and @new can be NULL.
1789 */
Daniel Vettera071fa02014-06-18 23:28:09 +02001790void i915_gem_track_fb(struct drm_i915_gem_object *old,
1791 struct drm_i915_gem_object *new,
1792 unsigned frontbuffer_bits)
1793{
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01001794 /* Control of individual bits within the mask are guarded by
1795 * the owning plane->mutex, i.e. we can never see concurrent
1796 * manipulation of individual bits. But since the bitfield as a whole
1797 * is updated using RMW, we need to use atomics in order to update
1798 * the bits.
1799 */
1800 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
Chris Wilson74f6e182018-09-26 11:47:07 +01001801 BITS_PER_TYPE(atomic_t));
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01001802
Daniel Vettera071fa02014-06-18 23:28:09 +02001803 if (old) {
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01001804 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
1805 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
Daniel Vettera071fa02014-06-18 23:28:09 +02001806 }
1807
1808 if (new) {
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01001809 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
1810 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
Daniel Vettera071fa02014-06-18 23:28:09 +02001811 }
1812}
1813
Chris Wilson935a2f72017-02-13 17:15:13 +00001814#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
Chris Wilson66d9cb52017-02-13 17:15:17 +00001815#include "selftests/mock_gem_device.c"
Chris Wilson3f51b7e12018-08-30 14:48:06 +01001816#include "selftests/i915_gem.c"
Chris Wilson935a2f72017-02-13 17:15:13 +00001817#endif