blob: 87e9b349ebef03a94df1ae2ef93f1359e8ebd393 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020029#include <drm/drm_vma_manager.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Eric Anholt673a3942008-07-30 12:06:12 -070031#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Chris Wilson2cfcd32a2014-05-20 08:28:43 +010034#include <linux/oom.h>
Hugh Dickins5949eac2011-06-27 16:18:18 -070035#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070037#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080038#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020039#include <linux/dma-buf.h>
Eric Anholt673a3942008-07-30 12:06:12 -070040
Chris Wilson05394f32010-11-08 19:18:58 +000041static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson2c225692013-08-09 12:26:45 +010042static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
43 bool force);
Ben Widawsky07fe0b12013-07-31 17:00:10 -070044static __must_check int
Ben Widawsky23f54482013-09-11 14:57:48 -070045i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
46 bool readonly);
Chris Wilsonc8725f32014-03-17 12:21:55 +000047static void
48i915_gem_object_retire(struct drm_i915_gem_object *obj);
49
Chris Wilson05394f32010-11-08 19:18:58 +000050static int i915_gem_phys_pwrite(struct drm_device *dev,
51 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +100052 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +000053 struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -070054
Chris Wilson61050802012-04-17 15:31:31 +010055static void i915_gem_write_fence(struct drm_device *dev, int reg,
56 struct drm_i915_gem_object *obj);
57static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
58 struct drm_i915_fence_reg *fence,
59 bool enable);
60
Chris Wilsonceabbba52014-03-25 13:23:04 +000061static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
Dave Chinner7dc19d52013-08-28 10:18:11 +100062 struct shrink_control *sc);
Chris Wilsonceabbba52014-03-25 13:23:04 +000063static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
Dave Chinner7dc19d52013-08-28 10:18:11 +100064 struct shrink_control *sc);
Chris Wilson2cfcd32a2014-05-20 08:28:43 +010065static int i915_gem_shrinker_oom(struct notifier_block *nb,
66 unsigned long event,
67 void *ptr);
Chris Wilsond9973b42013-10-04 10:33:00 +010068static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
69static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
Chris Wilson31169712009-09-14 16:50:28 +010070
Chris Wilsonc76ce032013-08-08 14:41:03 +010071static bool cpu_cache_is_coherent(struct drm_device *dev,
72 enum i915_cache_level level)
73{
74 return HAS_LLC(dev) || level != I915_CACHE_NONE;
75}
76
Chris Wilson2c225692013-08-09 12:26:45 +010077static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
78{
79 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
80 return true;
81
82 return obj->pin_display;
83}
84
Chris Wilson61050802012-04-17 15:31:31 +010085static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
86{
87 if (obj->tiling_mode)
88 i915_gem_release_mmap(obj);
89
90 /* As we do not have an associated fence register, we will force
91 * a tiling change if we ever need to acquire one.
92 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +010093 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +010094 obj->fence_reg = I915_FENCE_REG_NONE;
95}
96
Chris Wilson73aa8082010-09-30 11:46:12 +010097/* some bookkeeping */
98static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
99 size_t size)
100{
Daniel Vetterc20e8352013-07-24 22:40:23 +0200101 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100102 dev_priv->mm.object_count++;
103 dev_priv->mm.object_memory += size;
Daniel Vetterc20e8352013-07-24 22:40:23 +0200104 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100105}
106
107static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
108 size_t size)
109{
Daniel Vetterc20e8352013-07-24 22:40:23 +0200110 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100111 dev_priv->mm.object_count--;
112 dev_priv->mm.object_memory -= size;
Daniel Vetterc20e8352013-07-24 22:40:23 +0200113 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100114}
115
Chris Wilson21dd3732011-01-26 15:55:56 +0000116static int
Daniel Vetter33196de2012-11-14 17:14:05 +0100117i915_gem_wait_for_error(struct i915_gpu_error *error)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100118{
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100119 int ret;
120
Daniel Vetter7abb6902013-05-24 21:29:32 +0200121#define EXIT_COND (!i915_reset_in_progress(error) || \
122 i915_terminally_wedged(error))
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100123 if (EXIT_COND)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100124 return 0;
125
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200126 /*
127 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
128 * userspace. If it takes that long something really bad is going on and
129 * we should simply try to bail out and fail as gracefully as possible.
130 */
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100131 ret = wait_event_interruptible_timeout(error->reset_queue,
132 EXIT_COND,
133 10*HZ);
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200134 if (ret == 0) {
135 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
136 return -EIO;
137 } else if (ret < 0) {
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100138 return ret;
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200139 }
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100140#undef EXIT_COND
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100141
Chris Wilson21dd3732011-01-26 15:55:56 +0000142 return 0;
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100143}
144
Chris Wilson54cf91d2010-11-25 18:00:26 +0000145int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100146{
Daniel Vetter33196de2012-11-14 17:14:05 +0100147 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson76c1dec2010-09-25 11:22:51 +0100148 int ret;
149
Daniel Vetter33196de2012-11-14 17:14:05 +0100150 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100151 if (ret)
152 return ret;
153
154 ret = mutex_lock_interruptible(&dev->struct_mutex);
155 if (ret)
156 return ret;
157
Chris Wilson23bc5982010-09-29 16:10:57 +0100158 WARN_ON(i915_verify_lists(dev));
Chris Wilson76c1dec2010-09-25 11:22:51 +0100159 return 0;
160}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100161
Chris Wilson7d1c4802010-08-07 21:45:03 +0100162static inline bool
Chris Wilson05394f32010-11-08 19:18:58 +0000163i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
Chris Wilson7d1c4802010-08-07 21:45:03 +0100164{
Ben Widawsky98438772013-07-31 17:00:12 -0700165 return i915_gem_obj_bound_any(obj) && !obj->active;
Chris Wilson7d1c4802010-08-07 21:45:03 +0100166}
167
Eric Anholt673a3942008-07-30 12:06:12 -0700168int
169i915_gem_init_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000170 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700171{
Ben Widawsky93d18792013-01-17 12:45:17 -0800172 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700173 struct drm_i915_gem_init *args = data;
Chris Wilson20217462010-11-23 15:26:33 +0000174
Daniel Vetter7bb6fb82012-04-24 08:22:52 +0200175 if (drm_core_check_feature(dev, DRIVER_MODESET))
176 return -ENODEV;
177
Chris Wilson20217462010-11-23 15:26:33 +0000178 if (args->gtt_start >= args->gtt_end ||
179 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
180 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700181
Daniel Vetterf534bc02012-03-26 22:37:04 +0200182 /* GEM with user mode setting was never supported on ilk and later. */
183 if (INTEL_INFO(dev)->gen >= 5)
184 return -ENODEV;
185
Eric Anholt673a3942008-07-30 12:06:12 -0700186 mutex_lock(&dev->struct_mutex);
Ben Widawskyd7e50082012-12-18 10:31:25 -0800187 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
188 args->gtt_end);
Ben Widawsky93d18792013-01-17 12:45:17 -0800189 dev_priv->gtt.mappable_end = args->gtt_end;
Eric Anholt673a3942008-07-30 12:06:12 -0700190 mutex_unlock(&dev->struct_mutex);
191
Chris Wilson20217462010-11-23 15:26:33 +0000192 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700193}
194
Eric Anholt5a125c32008-10-22 21:40:13 -0700195int
196i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000197 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700198{
Chris Wilson73aa8082010-09-30 11:46:12 +0100199 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt5a125c32008-10-22 21:40:13 -0700200 struct drm_i915_gem_get_aperture *args = data;
Chris Wilson6299f992010-11-24 12:23:44 +0000201 struct drm_i915_gem_object *obj;
202 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700203
Chris Wilson6299f992010-11-24 12:23:44 +0000204 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100205 mutex_lock(&dev->struct_mutex);
Ben Widawsky35c20a62013-05-31 11:28:48 -0700206 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800207 if (i915_gem_obj_is_pinned(obj))
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700208 pinned += i915_gem_obj_ggtt_size(obj);
Chris Wilson73aa8082010-09-30 11:46:12 +0100209 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700210
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700211 args->aper_size = dev_priv->gtt.base.total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400212 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000213
Eric Anholt5a125c32008-10-22 21:40:13 -0700214 return 0;
215}
216
Chris Wilson42dcedd2012-11-15 11:32:30 +0000217void *i915_gem_object_alloc(struct drm_device *dev)
218{
219 struct drm_i915_private *dev_priv = dev->dev_private;
Joe Perchesfac15c12013-08-29 13:11:07 -0700220 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000221}
222
223void i915_gem_object_free(struct drm_i915_gem_object *obj)
224{
225 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
226 kmem_cache_free(dev_priv->slab, obj);
227}
228
Dave Airlieff72145b2011-02-07 12:16:14 +1000229static int
230i915_gem_create(struct drm_file *file,
231 struct drm_device *dev,
232 uint64_t size,
233 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700234{
Chris Wilson05394f32010-11-08 19:18:58 +0000235 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300236 int ret;
237 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700238
Dave Airlieff72145b2011-02-07 12:16:14 +1000239 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200240 if (size == 0)
241 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700242
243 /* Allocate the new object */
Dave Airlieff72145b2011-02-07 12:16:14 +1000244 obj = i915_gem_alloc_object(dev, size);
Eric Anholt673a3942008-07-30 12:06:12 -0700245 if (obj == NULL)
246 return -ENOMEM;
247
Chris Wilson05394f32010-11-08 19:18:58 +0000248 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100249 /* drop reference from allocate - handle holds it now */
Daniel Vetterd861e332013-07-24 23:25:03 +0200250 drm_gem_object_unreference_unlocked(&obj->base);
251 if (ret)
252 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100253
Dave Airlieff72145b2011-02-07 12:16:14 +1000254 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700255 return 0;
256}
257
Dave Airlieff72145b2011-02-07 12:16:14 +1000258int
259i915_gem_dumb_create(struct drm_file *file,
260 struct drm_device *dev,
261 struct drm_mode_create_dumb *args)
262{
263 /* have to work out size/pitch and return them */
Paulo Zanonide45eaf2013-10-18 18:48:24 -0300264 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000265 args->size = args->pitch * args->height;
266 return i915_gem_create(file, dev,
267 args->size, &args->handle);
268}
269
Dave Airlieff72145b2011-02-07 12:16:14 +1000270/**
271 * Creates a new mm object and returns a handle to it.
272 */
273int
274i915_gem_create_ioctl(struct drm_device *dev, void *data,
275 struct drm_file *file)
276{
277 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200278
Dave Airlieff72145b2011-02-07 12:16:14 +1000279 return i915_gem_create(file, dev,
280 args->size, &args->handle);
281}
282
Daniel Vetter8c599672011-12-14 13:57:31 +0100283static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100284__copy_to_user_swizzled(char __user *cpu_vaddr,
285 const char *gpu_vaddr, int gpu_offset,
286 int length)
287{
288 int ret, cpu_offset = 0;
289
290 while (length > 0) {
291 int cacheline_end = ALIGN(gpu_offset + 1, 64);
292 int this_length = min(cacheline_end - gpu_offset, length);
293 int swizzled_gpu_offset = gpu_offset ^ 64;
294
295 ret = __copy_to_user(cpu_vaddr + cpu_offset,
296 gpu_vaddr + swizzled_gpu_offset,
297 this_length);
298 if (ret)
299 return ret + length;
300
301 cpu_offset += this_length;
302 gpu_offset += this_length;
303 length -= this_length;
304 }
305
306 return 0;
307}
308
309static inline int
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700310__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
311 const char __user *cpu_vaddr,
Daniel Vetter8c599672011-12-14 13:57:31 +0100312 int length)
313{
314 int ret, cpu_offset = 0;
315
316 while (length > 0) {
317 int cacheline_end = ALIGN(gpu_offset + 1, 64);
318 int this_length = min(cacheline_end - gpu_offset, length);
319 int swizzled_gpu_offset = gpu_offset ^ 64;
320
321 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
322 cpu_vaddr + cpu_offset,
323 this_length);
324 if (ret)
325 return ret + length;
326
327 cpu_offset += this_length;
328 gpu_offset += this_length;
329 length -= this_length;
330 }
331
332 return 0;
333}
334
Brad Volkin4c914c02014-02-18 10:15:45 -0800335/*
336 * Pins the specified object's pages and synchronizes the object with
337 * GPU accesses. Sets needs_clflush to non-zero if the caller should
338 * flush the object from the CPU cache.
339 */
340int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
341 int *needs_clflush)
342{
343 int ret;
344
345 *needs_clflush = 0;
346
347 if (!obj->base.filp)
348 return -EINVAL;
349
350 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
351 /* If we're not in the cpu read domain, set ourself into the gtt
352 * read domain and manually flush cachelines (if required). This
353 * optimizes for the case when the gpu will dirty the data
354 * anyway again before the next pread happens. */
355 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
356 obj->cache_level);
357 ret = i915_gem_object_wait_rendering(obj, true);
358 if (ret)
359 return ret;
Chris Wilsonc8725f32014-03-17 12:21:55 +0000360
361 i915_gem_object_retire(obj);
Brad Volkin4c914c02014-02-18 10:15:45 -0800362 }
363
364 ret = i915_gem_object_get_pages(obj);
365 if (ret)
366 return ret;
367
368 i915_gem_object_pin_pages(obj);
369
370 return ret;
371}
372
Daniel Vetterd174bd62012-03-25 19:47:40 +0200373/* Per-page copy function for the shmem pread fastpath.
374 * Flushes invalid cachelines before reading the target if
375 * needs_clflush is set. */
Eric Anholteb014592009-03-10 11:44:52 -0700376static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200377shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
378 char __user *user_data,
379 bool page_do_bit17_swizzling, bool needs_clflush)
380{
381 char *vaddr;
382 int ret;
383
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200384 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200385 return -EINVAL;
386
387 vaddr = kmap_atomic(page);
388 if (needs_clflush)
389 drm_clflush_virt_range(vaddr + shmem_page_offset,
390 page_length);
391 ret = __copy_to_user_inatomic(user_data,
392 vaddr + shmem_page_offset,
393 page_length);
394 kunmap_atomic(vaddr);
395
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100396 return ret ? -EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200397}
398
Daniel Vetter23c18c72012-03-25 19:47:42 +0200399static void
400shmem_clflush_swizzled_range(char *addr, unsigned long length,
401 bool swizzled)
402{
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200403 if (unlikely(swizzled)) {
Daniel Vetter23c18c72012-03-25 19:47:42 +0200404 unsigned long start = (unsigned long) addr;
405 unsigned long end = (unsigned long) addr + length;
406
407 /* For swizzling simply ensure that we always flush both
408 * channels. Lame, but simple and it works. Swizzled
409 * pwrite/pread is far from a hotpath - current userspace
410 * doesn't use it at all. */
411 start = round_down(start, 128);
412 end = round_up(end, 128);
413
414 drm_clflush_virt_range((void *)start, end - start);
415 } else {
416 drm_clflush_virt_range(addr, length);
417 }
418
419}
420
Daniel Vetterd174bd62012-03-25 19:47:40 +0200421/* Only difference to the fast-path function is that this can handle bit17
422 * and uses non-atomic copy and kmap functions. */
423static int
424shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
425 char __user *user_data,
426 bool page_do_bit17_swizzling, bool needs_clflush)
427{
428 char *vaddr;
429 int ret;
430
431 vaddr = kmap(page);
432 if (needs_clflush)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200433 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
434 page_length,
435 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200436
437 if (page_do_bit17_swizzling)
438 ret = __copy_to_user_swizzled(user_data,
439 vaddr, shmem_page_offset,
440 page_length);
441 else
442 ret = __copy_to_user(user_data,
443 vaddr + shmem_page_offset,
444 page_length);
445 kunmap(page);
446
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100447 return ret ? - EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200448}
449
Eric Anholteb014592009-03-10 11:44:52 -0700450static int
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200451i915_gem_shmem_pread(struct drm_device *dev,
452 struct drm_i915_gem_object *obj,
453 struct drm_i915_gem_pread *args,
454 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700455{
Daniel Vetter8461d222011-12-14 13:57:32 +0100456 char __user *user_data;
Eric Anholteb014592009-03-10 11:44:52 -0700457 ssize_t remain;
Daniel Vetter8461d222011-12-14 13:57:32 +0100458 loff_t offset;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100459 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8461d222011-12-14 13:57:32 +0100460 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vetter96d79b52012-03-25 19:47:36 +0200461 int prefaulted = 0;
Daniel Vetter84897312012-03-25 19:47:31 +0200462 int needs_clflush = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200463 struct sg_page_iter sg_iter;
Eric Anholteb014592009-03-10 11:44:52 -0700464
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200465 user_data = to_user_ptr(args->data_ptr);
Eric Anholteb014592009-03-10 11:44:52 -0700466 remain = args->size;
467
Daniel Vetter8461d222011-12-14 13:57:32 +0100468 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700469
Brad Volkin4c914c02014-02-18 10:15:45 -0800470 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100471 if (ret)
472 return ret;
473
Eric Anholteb014592009-03-10 11:44:52 -0700474 offset = args->offset;
Daniel Vetter8461d222011-12-14 13:57:32 +0100475
Imre Deak67d5a502013-02-18 19:28:02 +0200476 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
477 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200478 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +0100479
480 if (remain <= 0)
481 break;
482
Eric Anholteb014592009-03-10 11:44:52 -0700483 /* Operation in this page
484 *
Eric Anholteb014592009-03-10 11:44:52 -0700485 * shmem_page_offset = offset within page in shmem file
Eric Anholteb014592009-03-10 11:44:52 -0700486 * page_length = bytes to copy for this page
487 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100488 shmem_page_offset = offset_in_page(offset);
Eric Anholteb014592009-03-10 11:44:52 -0700489 page_length = remain;
490 if ((shmem_page_offset + page_length) > PAGE_SIZE)
491 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholteb014592009-03-10 11:44:52 -0700492
Daniel Vetter8461d222011-12-14 13:57:32 +0100493 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
494 (page_to_phys(page) & (1 << 17)) != 0;
495
Daniel Vetterd174bd62012-03-25 19:47:40 +0200496 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
497 user_data, page_do_bit17_swizzling,
498 needs_clflush);
499 if (ret == 0)
500 goto next_page;
Eric Anholteb014592009-03-10 11:44:52 -0700501
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200502 mutex_unlock(&dev->struct_mutex);
503
Jani Nikulad330a952014-01-21 11:24:25 +0200504 if (likely(!i915.prefault_disable) && !prefaulted) {
Daniel Vetterf56f8212012-03-25 19:47:41 +0200505 ret = fault_in_multipages_writeable(user_data, remain);
Daniel Vetter96d79b52012-03-25 19:47:36 +0200506 /* Userspace is tricking us, but we've already clobbered
507 * its pages with the prefault and promised to write the
508 * data up to the first fault. Hence ignore any errors
509 * and just continue. */
510 (void)ret;
511 prefaulted = 1;
512 }
513
Daniel Vetterd174bd62012-03-25 19:47:40 +0200514 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
515 user_data, page_do_bit17_swizzling,
516 needs_clflush);
Eric Anholteb014592009-03-10 11:44:52 -0700517
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200518 mutex_lock(&dev->struct_mutex);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100519
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100520 if (ret)
Daniel Vetter8461d222011-12-14 13:57:32 +0100521 goto out;
Daniel Vetter8461d222011-12-14 13:57:32 +0100522
Chris Wilson17793c92014-03-07 08:30:36 +0000523next_page:
Eric Anholteb014592009-03-10 11:44:52 -0700524 remain -= page_length;
Daniel Vetter8461d222011-12-14 13:57:32 +0100525 user_data += page_length;
Eric Anholteb014592009-03-10 11:44:52 -0700526 offset += page_length;
527 }
528
Chris Wilson4f27b752010-10-14 15:26:45 +0100529out:
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100530 i915_gem_object_unpin_pages(obj);
531
Eric Anholteb014592009-03-10 11:44:52 -0700532 return ret;
533}
534
Eric Anholt673a3942008-07-30 12:06:12 -0700535/**
536 * Reads data from the object referenced by handle.
537 *
538 * On error, the contents of *data are undefined.
539 */
540int
541i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000542 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700543{
544 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000545 struct drm_i915_gem_object *obj;
Chris Wilson35b62a82010-09-26 20:23:38 +0100546 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700547
Chris Wilson51311d02010-11-17 09:10:42 +0000548 if (args->size == 0)
549 return 0;
550
551 if (!access_ok(VERIFY_WRITE,
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200552 to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000553 args->size))
554 return -EFAULT;
555
Chris Wilson4f27b752010-10-14 15:26:45 +0100556 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100557 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100558 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700559
Chris Wilson05394f32010-11-08 19:18:58 +0000560 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000561 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100562 ret = -ENOENT;
563 goto unlock;
Chris Wilson4f27b752010-10-14 15:26:45 +0100564 }
Eric Anholt673a3942008-07-30 12:06:12 -0700565
Chris Wilson7dcd2492010-09-26 20:21:44 +0100566 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +0000567 if (args->offset > obj->base.size ||
568 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100569 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100570 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100571 }
572
Daniel Vetter1286ff72012-05-10 15:25:09 +0200573 /* prime objects have no backing filp to GEM pread/pwrite
574 * pages from.
575 */
576 if (!obj->base.filp) {
577 ret = -EINVAL;
578 goto out;
579 }
580
Chris Wilsondb53a302011-02-03 11:57:46 +0000581 trace_i915_gem_object_pread(obj, args->offset, args->size);
582
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200583 ret = i915_gem_shmem_pread(dev, obj, args, file);
Eric Anholt673a3942008-07-30 12:06:12 -0700584
Chris Wilson35b62a82010-09-26 20:23:38 +0100585out:
Chris Wilson05394f32010-11-08 19:18:58 +0000586 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100587unlock:
Chris Wilson4f27b752010-10-14 15:26:45 +0100588 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700589 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700590}
591
Keith Packard0839ccb2008-10-30 19:38:48 -0700592/* This is the fast write path which cannot handle
593 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700594 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700595
Keith Packard0839ccb2008-10-30 19:38:48 -0700596static inline int
597fast_user_write(struct io_mapping *mapping,
598 loff_t page_base, int page_offset,
599 char __user *user_data,
600 int length)
601{
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700602 void __iomem *vaddr_atomic;
603 void *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700604 unsigned long unwritten;
605
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700606 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700607 /* We can use the cpu mem copy function because this is X86. */
608 vaddr = (void __force*)vaddr_atomic + page_offset;
609 unwritten = __copy_from_user_inatomic_nocache(vaddr,
Keith Packard0839ccb2008-10-30 19:38:48 -0700610 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700611 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100612 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -0700613}
614
Eric Anholt3de09aa2009-03-09 09:42:23 -0700615/**
616 * This is the fast pwrite path, where we copy the data directly from the
617 * user into the GTT, uncached.
618 */
Eric Anholt673a3942008-07-30 12:06:12 -0700619static int
Chris Wilson05394f32010-11-08 19:18:58 +0000620i915_gem_gtt_pwrite_fast(struct drm_device *dev,
621 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700622 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000623 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700624{
Jani Nikula3e31c6c2014-03-31 14:27:16 +0300625 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700626 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700627 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700628 char __user *user_data;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200629 int page_offset, page_length, ret;
630
Daniel Vetter1ec9e262014-02-14 14:01:11 +0100631 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200632 if (ret)
633 goto out;
634
635 ret = i915_gem_object_set_to_gtt_domain(obj, true);
636 if (ret)
637 goto out_unpin;
638
639 ret = i915_gem_object_put_fence(obj);
640 if (ret)
641 goto out_unpin;
Eric Anholt673a3942008-07-30 12:06:12 -0700642
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200643 user_data = to_user_ptr(args->data_ptr);
Eric Anholt673a3942008-07-30 12:06:12 -0700644 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700645
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700646 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700647
648 while (remain > 0) {
649 /* Operation in this page
650 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700651 * page_base = page offset within aperture
652 * page_offset = offset within page
653 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700654 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100655 page_base = offset & PAGE_MASK;
656 page_offset = offset_in_page(offset);
Keith Packard0839ccb2008-10-30 19:38:48 -0700657 page_length = remain;
658 if ((page_offset + remain) > PAGE_SIZE)
659 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700660
Keith Packard0839ccb2008-10-30 19:38:48 -0700661 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700662 * source page isn't available. Return the error and we'll
663 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700664 */
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800665 if (fast_user_write(dev_priv->gtt.mappable, page_base,
Daniel Vetter935aaa62012-03-25 19:47:35 +0200666 page_offset, user_data, page_length)) {
667 ret = -EFAULT;
668 goto out_unpin;
669 }
Eric Anholt673a3942008-07-30 12:06:12 -0700670
Keith Packard0839ccb2008-10-30 19:38:48 -0700671 remain -= page_length;
672 user_data += page_length;
673 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700674 }
Eric Anholt673a3942008-07-30 12:06:12 -0700675
Daniel Vetter935aaa62012-03-25 19:47:35 +0200676out_unpin:
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800677 i915_gem_object_ggtt_unpin(obj);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200678out:
Eric Anholt3de09aa2009-03-09 09:42:23 -0700679 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700680}
681
Daniel Vetterd174bd62012-03-25 19:47:40 +0200682/* Per-page copy function for the shmem pwrite fastpath.
683 * Flushes invalid cachelines before writing to the target if
684 * needs_clflush_before is set and flushes out any written cachelines after
685 * writing if needs_clflush is set. */
Eric Anholt673a3942008-07-30 12:06:12 -0700686static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200687shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
688 char __user *user_data,
689 bool page_do_bit17_swizzling,
690 bool needs_clflush_before,
691 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700692{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200693 char *vaddr;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700694 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700695
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200696 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200697 return -EINVAL;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700698
Daniel Vetterd174bd62012-03-25 19:47:40 +0200699 vaddr = kmap_atomic(page);
700 if (needs_clflush_before)
701 drm_clflush_virt_range(vaddr + shmem_page_offset,
702 page_length);
Chris Wilsonc2831a92014-03-07 08:30:37 +0000703 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
704 user_data, page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200705 if (needs_clflush_after)
706 drm_clflush_virt_range(vaddr + shmem_page_offset,
707 page_length);
708 kunmap_atomic(vaddr);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700709
Chris Wilson755d2212012-09-04 21:02:55 +0100710 return ret ? -EFAULT : 0;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700711}
712
Daniel Vetterd174bd62012-03-25 19:47:40 +0200713/* Only difference to the fast-path function is that this can handle bit17
714 * and uses non-atomic copy and kmap functions. */
Eric Anholt3043c602008-10-02 12:24:47 -0700715static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200716shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
717 char __user *user_data,
718 bool page_do_bit17_swizzling,
719 bool needs_clflush_before,
720 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700721{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200722 char *vaddr;
723 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700724
Daniel Vetterd174bd62012-03-25 19:47:40 +0200725 vaddr = kmap(page);
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200726 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
Daniel Vetter23c18c72012-03-25 19:47:42 +0200727 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
728 page_length,
729 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200730 if (page_do_bit17_swizzling)
731 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
Chris Wilsone5281cc2010-10-28 13:45:36 +0100732 user_data,
733 page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200734 else
735 ret = __copy_from_user(vaddr + shmem_page_offset,
736 user_data,
737 page_length);
738 if (needs_clflush_after)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200739 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
740 page_length,
741 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200742 kunmap(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100743
Chris Wilson755d2212012-09-04 21:02:55 +0100744 return ret ? -EFAULT : 0;
Eric Anholt40123c12009-03-09 13:42:30 -0700745}
746
Eric Anholt40123c12009-03-09 13:42:30 -0700747static int
Daniel Vettere244a442012-03-25 19:47:28 +0200748i915_gem_shmem_pwrite(struct drm_device *dev,
749 struct drm_i915_gem_object *obj,
750 struct drm_i915_gem_pwrite *args,
751 struct drm_file *file)
Eric Anholt40123c12009-03-09 13:42:30 -0700752{
Eric Anholt40123c12009-03-09 13:42:30 -0700753 ssize_t remain;
Daniel Vetter8c599672011-12-14 13:57:31 +0100754 loff_t offset;
755 char __user *user_data;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100756 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8c599672011-12-14 13:57:31 +0100757 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vettere244a442012-03-25 19:47:28 +0200758 int hit_slowpath = 0;
Daniel Vetter58642882012-03-25 19:47:37 +0200759 int needs_clflush_after = 0;
760 int needs_clflush_before = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200761 struct sg_page_iter sg_iter;
Eric Anholt40123c12009-03-09 13:42:30 -0700762
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200763 user_data = to_user_ptr(args->data_ptr);
Eric Anholt40123c12009-03-09 13:42:30 -0700764 remain = args->size;
765
Daniel Vetter8c599672011-12-14 13:57:31 +0100766 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700767
Daniel Vetter58642882012-03-25 19:47:37 +0200768 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
769 /* If we're not in the cpu write domain, set ourself into the gtt
770 * write domain and manually flush cachelines (if required). This
771 * optimizes for the case when the gpu will use the data
772 * right away and we therefore have to clflush anyway. */
Chris Wilson2c225692013-08-09 12:26:45 +0100773 needs_clflush_after = cpu_write_needs_clflush(obj);
Ben Widawsky23f54482013-09-11 14:57:48 -0700774 ret = i915_gem_object_wait_rendering(obj, false);
775 if (ret)
776 return ret;
Chris Wilsonc8725f32014-03-17 12:21:55 +0000777
778 i915_gem_object_retire(obj);
Daniel Vetter58642882012-03-25 19:47:37 +0200779 }
Chris Wilsonc76ce032013-08-08 14:41:03 +0100780 /* Same trick applies to invalidate partially written cachelines read
781 * before writing. */
782 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
783 needs_clflush_before =
784 !cpu_cache_is_coherent(dev, obj->cache_level);
Daniel Vetter58642882012-03-25 19:47:37 +0200785
Chris Wilson755d2212012-09-04 21:02:55 +0100786 ret = i915_gem_object_get_pages(obj);
787 if (ret)
788 return ret;
789
790 i915_gem_object_pin_pages(obj);
791
Eric Anholt40123c12009-03-09 13:42:30 -0700792 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000793 obj->dirty = 1;
Eric Anholt40123c12009-03-09 13:42:30 -0700794
Imre Deak67d5a502013-02-18 19:28:02 +0200795 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
796 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200797 struct page *page = sg_page_iter_page(&sg_iter);
Daniel Vetter58642882012-03-25 19:47:37 +0200798 int partial_cacheline_write;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100799
Chris Wilson9da3da62012-06-01 15:20:22 +0100800 if (remain <= 0)
801 break;
802
Eric Anholt40123c12009-03-09 13:42:30 -0700803 /* Operation in this page
804 *
Eric Anholt40123c12009-03-09 13:42:30 -0700805 * shmem_page_offset = offset within page in shmem file
Eric Anholt40123c12009-03-09 13:42:30 -0700806 * page_length = bytes to copy for this page
807 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100808 shmem_page_offset = offset_in_page(offset);
Eric Anholt40123c12009-03-09 13:42:30 -0700809
810 page_length = remain;
811 if ((shmem_page_offset + page_length) > PAGE_SIZE)
812 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700813
Daniel Vetter58642882012-03-25 19:47:37 +0200814 /* If we don't overwrite a cacheline completely we need to be
815 * careful to have up-to-date data by first clflushing. Don't
816 * overcomplicate things and flush the entire patch. */
817 partial_cacheline_write = needs_clflush_before &&
818 ((shmem_page_offset | page_length)
819 & (boot_cpu_data.x86_clflush_size - 1));
820
Daniel Vetter8c599672011-12-14 13:57:31 +0100821 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
822 (page_to_phys(page) & (1 << 17)) != 0;
823
Daniel Vetterd174bd62012-03-25 19:47:40 +0200824 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
825 user_data, page_do_bit17_swizzling,
826 partial_cacheline_write,
827 needs_clflush_after);
828 if (ret == 0)
829 goto next_page;
Eric Anholt40123c12009-03-09 13:42:30 -0700830
Daniel Vettere244a442012-03-25 19:47:28 +0200831 hit_slowpath = 1;
Daniel Vettere244a442012-03-25 19:47:28 +0200832 mutex_unlock(&dev->struct_mutex);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200833 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
834 user_data, page_do_bit17_swizzling,
835 partial_cacheline_write,
836 needs_clflush_after);
Eric Anholt40123c12009-03-09 13:42:30 -0700837
Daniel Vettere244a442012-03-25 19:47:28 +0200838 mutex_lock(&dev->struct_mutex);
Chris Wilson755d2212012-09-04 21:02:55 +0100839
Chris Wilson755d2212012-09-04 21:02:55 +0100840 if (ret)
Daniel Vetter8c599672011-12-14 13:57:31 +0100841 goto out;
Daniel Vetter8c599672011-12-14 13:57:31 +0100842
Chris Wilson17793c92014-03-07 08:30:36 +0000843next_page:
Eric Anholt40123c12009-03-09 13:42:30 -0700844 remain -= page_length;
Daniel Vetter8c599672011-12-14 13:57:31 +0100845 user_data += page_length;
Eric Anholt40123c12009-03-09 13:42:30 -0700846 offset += page_length;
847 }
848
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100849out:
Chris Wilson755d2212012-09-04 21:02:55 +0100850 i915_gem_object_unpin_pages(obj);
851
Daniel Vettere244a442012-03-25 19:47:28 +0200852 if (hit_slowpath) {
Daniel Vetter8dcf0152012-11-15 16:53:58 +0100853 /*
854 * Fixup: Flush cpu caches in case we didn't flush the dirty
855 * cachelines in-line while writing and the object moved
856 * out of the cpu write domain while we've dropped the lock.
857 */
858 if (!needs_clflush_after &&
859 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Chris Wilson000433b2013-08-08 14:41:09 +0100860 if (i915_gem_clflush_object(obj, obj->pin_display))
861 i915_gem_chipset_flush(dev);
Daniel Vettere244a442012-03-25 19:47:28 +0200862 }
Daniel Vetter8c599672011-12-14 13:57:31 +0100863 }
Eric Anholt40123c12009-03-09 13:42:30 -0700864
Daniel Vetter58642882012-03-25 19:47:37 +0200865 if (needs_clflush_after)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800866 i915_gem_chipset_flush(dev);
Daniel Vetter58642882012-03-25 19:47:37 +0200867
Eric Anholt40123c12009-03-09 13:42:30 -0700868 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700869}
870
871/**
872 * Writes data to the object referenced by handle.
873 *
874 * On error, the contents of the buffer that were to be modified are undefined.
875 */
876int
877i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100878 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700879{
880 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000881 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000882 int ret;
883
884 if (args->size == 0)
885 return 0;
886
887 if (!access_ok(VERIFY_READ,
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200888 to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000889 args->size))
890 return -EFAULT;
891
Jani Nikulad330a952014-01-21 11:24:25 +0200892 if (likely(!i915.prefault_disable)) {
Xiong Zhang0b74b502013-07-19 13:51:24 +0800893 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
894 args->size);
895 if (ret)
896 return -EFAULT;
897 }
Eric Anholt673a3942008-07-30 12:06:12 -0700898
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100899 ret = i915_mutex_lock_interruptible(dev);
900 if (ret)
901 return ret;
902
Chris Wilson05394f32010-11-08 19:18:58 +0000903 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000904 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100905 ret = -ENOENT;
906 goto unlock;
907 }
Eric Anholt673a3942008-07-30 12:06:12 -0700908
Chris Wilson7dcd2492010-09-26 20:21:44 +0100909 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +0000910 if (args->offset > obj->base.size ||
911 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100912 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100913 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100914 }
915
Daniel Vetter1286ff72012-05-10 15:25:09 +0200916 /* prime objects have no backing filp to GEM pread/pwrite
917 * pages from.
918 */
919 if (!obj->base.filp) {
920 ret = -EINVAL;
921 goto out;
922 }
923
Chris Wilsondb53a302011-02-03 11:57:46 +0000924 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
925
Daniel Vetter935aaa62012-03-25 19:47:35 +0200926 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700927 /* We can only do the GTT pwrite on untiled buffers, as otherwise
928 * it would end up going through the fenced access, and we'll get
929 * different detiling behavior between reading and writing.
930 * pread/pwrite currently are reading and writing from the CPU
931 * perspective, requiring manual detiling by the client.
932 */
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100933 if (obj->phys_obj) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100934 ret = i915_gem_phys_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100935 goto out;
936 }
937
Chris Wilson2c225692013-08-09 12:26:45 +0100938 if (obj->tiling_mode == I915_TILING_NONE &&
939 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
940 cpu_write_needs_clflush(obj)) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100941 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200942 /* Note that the gtt paths might fail with non-page-backed user
943 * pointers (e.g. gtt mappings when moving data between
944 * textures). Fallback to the shmem path in that case. */
Eric Anholt40123c12009-03-09 13:42:30 -0700945 }
Eric Anholt673a3942008-07-30 12:06:12 -0700946
Chris Wilson86a1ee22012-08-11 15:41:04 +0100947 if (ret == -EFAULT || ret == -ENOSPC)
Daniel Vetter935aaa62012-03-25 19:47:35 +0200948 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100949
Chris Wilson35b62a82010-09-26 20:23:38 +0100950out:
Chris Wilson05394f32010-11-08 19:18:58 +0000951 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100952unlock:
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100953 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -0700954 return ret;
955}
956
Chris Wilsonb3612372012-08-24 09:35:08 +0100957int
Daniel Vetter33196de2012-11-14 17:14:05 +0100958i915_gem_check_wedge(struct i915_gpu_error *error,
Chris Wilsonb3612372012-08-24 09:35:08 +0100959 bool interruptible)
960{
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100961 if (i915_reset_in_progress(error)) {
Chris Wilsonb3612372012-08-24 09:35:08 +0100962 /* Non-interruptible callers can't handle -EAGAIN, hence return
963 * -EIO unconditionally for these. */
964 if (!interruptible)
965 return -EIO;
966
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100967 /* Recovery complete, but the reset failed ... */
968 if (i915_terminally_wedged(error))
Chris Wilsonb3612372012-08-24 09:35:08 +0100969 return -EIO;
970
971 return -EAGAIN;
972 }
973
974 return 0;
975}
976
977/*
978 * Compare seqno against outstanding lazy request. Emit a request if they are
979 * equal.
980 */
981static int
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100982i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
Chris Wilsonb3612372012-08-24 09:35:08 +0100983{
984 int ret;
985
986 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
987
988 ret = 0;
Chris Wilson18235212013-09-04 10:45:51 +0100989 if (seqno == ring->outstanding_lazy_seqno)
Mika Kuoppala0025c072013-06-12 12:35:30 +0300990 ret = i915_add_request(ring, NULL);
Chris Wilsonb3612372012-08-24 09:35:08 +0100991
992 return ret;
993}
994
Chris Wilson094f9a52013-09-25 17:34:55 +0100995static void fake_irq(unsigned long data)
996{
997 wake_up_process((struct task_struct *)data);
998}
999
1000static bool missed_irq(struct drm_i915_private *dev_priv,
Oscar Mateoa4872ba2014-05-22 14:13:33 +01001001 struct intel_engine_cs *ring)
Chris Wilson094f9a52013-09-25 17:34:55 +01001002{
1003 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1004}
1005
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001006static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1007{
1008 if (file_priv == NULL)
1009 return true;
1010
1011 return !atomic_xchg(&file_priv->rps_wait_boost, true);
1012}
1013
Chris Wilsonb3612372012-08-24 09:35:08 +01001014/**
1015 * __wait_seqno - wait until execution of seqno has finished
1016 * @ring: the ring expected to report seqno
1017 * @seqno: duh!
Daniel Vetterf69061b2012-12-06 09:01:42 +01001018 * @reset_counter: reset sequence associated with the given seqno
Chris Wilsonb3612372012-08-24 09:35:08 +01001019 * @interruptible: do an interruptible wait (normally yes)
1020 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1021 *
Daniel Vetterf69061b2012-12-06 09:01:42 +01001022 * Note: It is of utmost importance that the passed in seqno and reset_counter
1023 * values have been read by the caller in an smp safe manner. Where read-side
1024 * locks are involved, it is sufficient to read the reset_counter before
1025 * unlocking the lock that protects the seqno. For lockless tricks, the
1026 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1027 * inserted.
1028 *
Chris Wilsonb3612372012-08-24 09:35:08 +01001029 * Returns 0 if the seqno was found within the alloted time. Else returns the
1030 * errno with remaining time filled in timeout argument.
1031 */
Oscar Mateoa4872ba2014-05-22 14:13:33 +01001032static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
Daniel Vetterf69061b2012-12-06 09:01:42 +01001033 unsigned reset_counter,
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001034 bool interruptible,
1035 struct timespec *timeout,
1036 struct drm_i915_file_private *file_priv)
Chris Wilsonb3612372012-08-24 09:35:08 +01001037{
Damien Lespiau3d13ef22014-02-07 19:12:47 +00001038 struct drm_device *dev = ring->dev;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03001039 struct drm_i915_private *dev_priv = dev->dev_private;
Mika Kuoppala168c3f22013-12-12 17:54:42 +02001040 const bool irq_test_in_progress =
1041 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
Chris Wilson094f9a52013-09-25 17:34:55 +01001042 struct timespec before, now;
1043 DEFINE_WAIT(wait);
Mika Kuoppala47e97662013-12-10 17:02:43 +02001044 unsigned long timeout_expire;
Chris Wilsonb3612372012-08-24 09:35:08 +01001045 int ret;
1046
Paulo Zanoni5d584b22014-03-07 20:08:15 -03001047 WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
Paulo Zanonic67a4702013-08-19 13:18:09 -03001048
Chris Wilsonb3612372012-08-24 09:35:08 +01001049 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1050 return 0;
1051
Mika Kuoppala47e97662013-12-10 17:02:43 +02001052 timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
Chris Wilsonb3612372012-08-24 09:35:08 +01001053
Damien Lespiau3d13ef22014-02-07 19:12:47 +00001054 if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001055 gen6_rps_boost(dev_priv);
1056 if (file_priv)
1057 mod_delayed_work(dev_priv->wq,
1058 &file_priv->mm.idle_work,
1059 msecs_to_jiffies(100));
1060 }
1061
Mika Kuoppala168c3f22013-12-12 17:54:42 +02001062 if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
Chris Wilsonb3612372012-08-24 09:35:08 +01001063 return -ENODEV;
1064
Chris Wilson094f9a52013-09-25 17:34:55 +01001065 /* Record current time in case interrupted by signal, or wedged */
1066 trace_i915_gem_request_wait_begin(ring, seqno);
Chris Wilsonb3612372012-08-24 09:35:08 +01001067 getrawmonotonic(&before);
Chris Wilson094f9a52013-09-25 17:34:55 +01001068 for (;;) {
1069 struct timer_list timer;
Chris Wilsonb3612372012-08-24 09:35:08 +01001070
Chris Wilson094f9a52013-09-25 17:34:55 +01001071 prepare_to_wait(&ring->irq_queue, &wait,
1072 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
Chris Wilsonb3612372012-08-24 09:35:08 +01001073
Daniel Vetterf69061b2012-12-06 09:01:42 +01001074 /* We need to check whether any gpu reset happened in between
1075 * the caller grabbing the seqno and now ... */
Chris Wilson094f9a52013-09-25 17:34:55 +01001076 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1077 /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1078 * is truely gone. */
1079 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1080 if (ret == 0)
1081 ret = -EAGAIN;
1082 break;
1083 }
Daniel Vetterf69061b2012-12-06 09:01:42 +01001084
Chris Wilson094f9a52013-09-25 17:34:55 +01001085 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1086 ret = 0;
1087 break;
1088 }
Chris Wilsonb3612372012-08-24 09:35:08 +01001089
Chris Wilson094f9a52013-09-25 17:34:55 +01001090 if (interruptible && signal_pending(current)) {
1091 ret = -ERESTARTSYS;
1092 break;
1093 }
1094
Mika Kuoppala47e97662013-12-10 17:02:43 +02001095 if (timeout && time_after_eq(jiffies, timeout_expire)) {
Chris Wilson094f9a52013-09-25 17:34:55 +01001096 ret = -ETIME;
1097 break;
1098 }
1099
1100 timer.function = NULL;
1101 if (timeout || missed_irq(dev_priv, ring)) {
Mika Kuoppala47e97662013-12-10 17:02:43 +02001102 unsigned long expire;
1103
Chris Wilson094f9a52013-09-25 17:34:55 +01001104 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
Mika Kuoppala47e97662013-12-10 17:02:43 +02001105 expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
Chris Wilson094f9a52013-09-25 17:34:55 +01001106 mod_timer(&timer, expire);
1107 }
1108
Chris Wilson5035c272013-10-04 09:58:46 +01001109 io_schedule();
Chris Wilson094f9a52013-09-25 17:34:55 +01001110
Chris Wilson094f9a52013-09-25 17:34:55 +01001111 if (timer.function) {
1112 del_singleshot_timer_sync(&timer);
1113 destroy_timer_on_stack(&timer);
1114 }
1115 }
Chris Wilsonb3612372012-08-24 09:35:08 +01001116 getrawmonotonic(&now);
Chris Wilson094f9a52013-09-25 17:34:55 +01001117 trace_i915_gem_request_wait_end(ring, seqno);
Chris Wilsonb3612372012-08-24 09:35:08 +01001118
Mika Kuoppala168c3f22013-12-12 17:54:42 +02001119 if (!irq_test_in_progress)
1120 ring->irq_put(ring);
Chris Wilson094f9a52013-09-25 17:34:55 +01001121
1122 finish_wait(&ring->irq_queue, &wait);
Chris Wilsonb3612372012-08-24 09:35:08 +01001123
1124 if (timeout) {
1125 struct timespec sleep_time = timespec_sub(now, before);
1126 *timeout = timespec_sub(*timeout, sleep_time);
Chris Wilson4f42f4e2013-04-26 16:22:46 +03001127 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1128 set_normalized_timespec(timeout, 0, 0);
Chris Wilsonb3612372012-08-24 09:35:08 +01001129 }
1130
Chris Wilson094f9a52013-09-25 17:34:55 +01001131 return ret;
Chris Wilsonb3612372012-08-24 09:35:08 +01001132}
1133
1134/**
1135 * Waits for a sequence number to be signaled, and cleans up the
1136 * request and object lists appropriately for that event.
1137 */
1138int
Oscar Mateoa4872ba2014-05-22 14:13:33 +01001139i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
Chris Wilsonb3612372012-08-24 09:35:08 +01001140{
1141 struct drm_device *dev = ring->dev;
1142 struct drm_i915_private *dev_priv = dev->dev_private;
1143 bool interruptible = dev_priv->mm.interruptible;
1144 int ret;
1145
1146 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1147 BUG_ON(seqno == 0);
1148
Daniel Vetter33196de2012-11-14 17:14:05 +01001149 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
Chris Wilsonb3612372012-08-24 09:35:08 +01001150 if (ret)
1151 return ret;
1152
1153 ret = i915_gem_check_olr(ring, seqno);
1154 if (ret)
1155 return ret;
1156
Daniel Vetterf69061b2012-12-06 09:01:42 +01001157 return __wait_seqno(ring, seqno,
1158 atomic_read(&dev_priv->gpu_error.reset_counter),
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001159 interruptible, NULL, NULL);
Chris Wilsonb3612372012-08-24 09:35:08 +01001160}
1161
Chris Wilsond26e3af2013-06-29 22:05:26 +01001162static int
1163i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
Oscar Mateoa4872ba2014-05-22 14:13:33 +01001164 struct intel_engine_cs *ring)
Chris Wilsond26e3af2013-06-29 22:05:26 +01001165{
Chris Wilsonc8725f32014-03-17 12:21:55 +00001166 if (!obj->active)
1167 return 0;
Chris Wilsond26e3af2013-06-29 22:05:26 +01001168
1169 /* Manually manage the write flush as we may have not yet
1170 * retired the buffer.
1171 *
1172 * Note that the last_write_seqno is always the earlier of
1173 * the two (read/write) seqno, so if we haved successfully waited,
1174 * we know we have passed the last write.
1175 */
1176 obj->last_write_seqno = 0;
Chris Wilsond26e3af2013-06-29 22:05:26 +01001177
1178 return 0;
1179}
1180
Chris Wilsonb3612372012-08-24 09:35:08 +01001181/**
1182 * Ensures that all rendering to the object has completed and the object is
1183 * safe to unbind from the GTT or access from the CPU.
1184 */
1185static __must_check int
1186i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1187 bool readonly)
1188{
Oscar Mateoa4872ba2014-05-22 14:13:33 +01001189 struct intel_engine_cs *ring = obj->ring;
Chris Wilsonb3612372012-08-24 09:35:08 +01001190 u32 seqno;
1191 int ret;
1192
1193 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1194 if (seqno == 0)
1195 return 0;
1196
1197 ret = i915_wait_seqno(ring, seqno);
1198 if (ret)
1199 return ret;
1200
Chris Wilsond26e3af2013-06-29 22:05:26 +01001201 return i915_gem_object_wait_rendering__tail(obj, ring);
Chris Wilsonb3612372012-08-24 09:35:08 +01001202}
1203
Chris Wilson3236f572012-08-24 09:35:09 +01001204/* A nonblocking variant of the above wait. This is a highly dangerous routine
1205 * as the object state may change during this call.
1206 */
1207static __must_check int
1208i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
Chris Wilson6e4930f2014-02-07 18:37:06 -02001209 struct drm_i915_file_private *file_priv,
Chris Wilson3236f572012-08-24 09:35:09 +01001210 bool readonly)
1211{
1212 struct drm_device *dev = obj->base.dev;
1213 struct drm_i915_private *dev_priv = dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01001214 struct intel_engine_cs *ring = obj->ring;
Daniel Vetterf69061b2012-12-06 09:01:42 +01001215 unsigned reset_counter;
Chris Wilson3236f572012-08-24 09:35:09 +01001216 u32 seqno;
1217 int ret;
1218
1219 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1220 BUG_ON(!dev_priv->mm.interruptible);
1221
1222 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1223 if (seqno == 0)
1224 return 0;
1225
Daniel Vetter33196de2012-11-14 17:14:05 +01001226 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
Chris Wilson3236f572012-08-24 09:35:09 +01001227 if (ret)
1228 return ret;
1229
1230 ret = i915_gem_check_olr(ring, seqno);
1231 if (ret)
1232 return ret;
1233
Daniel Vetterf69061b2012-12-06 09:01:42 +01001234 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilson3236f572012-08-24 09:35:09 +01001235 mutex_unlock(&dev->struct_mutex);
Chris Wilson6e4930f2014-02-07 18:37:06 -02001236 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
Chris Wilson3236f572012-08-24 09:35:09 +01001237 mutex_lock(&dev->struct_mutex);
Chris Wilsond26e3af2013-06-29 22:05:26 +01001238 if (ret)
1239 return ret;
Chris Wilson3236f572012-08-24 09:35:09 +01001240
Chris Wilsond26e3af2013-06-29 22:05:26 +01001241 return i915_gem_object_wait_rendering__tail(obj, ring);
Chris Wilson3236f572012-08-24 09:35:09 +01001242}
1243
Eric Anholt673a3942008-07-30 12:06:12 -07001244/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001245 * Called when user space prepares to use an object with the CPU, either
1246 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -07001247 */
1248int
1249i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001250 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001251{
1252 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001253 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001254 uint32_t read_domains = args->read_domains;
1255 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001256 int ret;
1257
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001258 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +01001259 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001260 return -EINVAL;
1261
Chris Wilson21d509e2009-06-06 09:46:02 +01001262 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001263 return -EINVAL;
1264
1265 /* Having something in the write domain implies it's in the read
1266 * domain, and only that read domain. Enforce that in the request.
1267 */
1268 if (write_domain != 0 && read_domains != write_domain)
1269 return -EINVAL;
1270
Chris Wilson76c1dec2010-09-25 11:22:51 +01001271 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001272 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001273 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001274
Chris Wilson05394f32010-11-08 19:18:58 +00001275 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001276 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001277 ret = -ENOENT;
1278 goto unlock;
Chris Wilson76c1dec2010-09-25 11:22:51 +01001279 }
Jesse Barnes652c3932009-08-17 13:31:43 -07001280
Chris Wilson3236f572012-08-24 09:35:09 +01001281 /* Try to flush the object off the GPU without holding the lock.
1282 * We will repeat the flush holding the lock in the normal manner
1283 * to catch cases where we are gazumped.
1284 */
Chris Wilson6e4930f2014-02-07 18:37:06 -02001285 ret = i915_gem_object_wait_rendering__nonblocking(obj,
1286 file->driver_priv,
1287 !write_domain);
Chris Wilson3236f572012-08-24 09:35:09 +01001288 if (ret)
1289 goto unref;
1290
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001291 if (read_domains & I915_GEM_DOMAIN_GTT) {
1292 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001293
1294 /* Silently promote "you're not bound, there was nothing to do"
1295 * to success, since the client was just asking us to
1296 * make sure everything was done.
1297 */
1298 if (ret == -EINVAL)
1299 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001300 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001301 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001302 }
1303
Chris Wilson3236f572012-08-24 09:35:09 +01001304unref:
Chris Wilson05394f32010-11-08 19:18:58 +00001305 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001306unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001307 mutex_unlock(&dev->struct_mutex);
1308 return ret;
1309}
1310
1311/**
1312 * Called when user space has done writes to this buffer
1313 */
1314int
1315i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001316 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001317{
1318 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001319 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001320 int ret = 0;
1321
Chris Wilson76c1dec2010-09-25 11:22:51 +01001322 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001323 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001324 return ret;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001325
Chris Wilson05394f32010-11-08 19:18:58 +00001326 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001327 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001328 ret = -ENOENT;
1329 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07001330 }
1331
Eric Anholt673a3942008-07-30 12:06:12 -07001332 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson2c225692013-08-09 12:26:45 +01001333 if (obj->pin_display)
1334 i915_gem_object_flush_cpu_write_domain(obj, true);
Eric Anholte47c68e2008-11-14 13:35:19 -08001335
Chris Wilson05394f32010-11-08 19:18:58 +00001336 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001337unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001338 mutex_unlock(&dev->struct_mutex);
1339 return ret;
1340}
1341
1342/**
1343 * Maps the contents of an object, returning the address it is mapped
1344 * into.
1345 *
1346 * While the mapping holds a reference on the contents of the object, it doesn't
1347 * imply a ref on the object itself.
1348 */
1349int
1350i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001351 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001352{
1353 struct drm_i915_gem_mmap *args = data;
1354 struct drm_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001355 unsigned long addr;
1356
Chris Wilson05394f32010-11-08 19:18:58 +00001357 obj = drm_gem_object_lookup(dev, file, args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07001358 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001359 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001360
Daniel Vetter1286ff72012-05-10 15:25:09 +02001361 /* prime objects have no backing filp to GEM mmap
1362 * pages from.
1363 */
1364 if (!obj->filp) {
1365 drm_gem_object_unreference_unlocked(obj);
1366 return -EINVAL;
1367 }
1368
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001369 addr = vm_mmap(obj->filp, 0, args->size,
Eric Anholt673a3942008-07-30 12:06:12 -07001370 PROT_READ | PROT_WRITE, MAP_SHARED,
1371 args->offset);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001372 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001373 if (IS_ERR((void *)addr))
1374 return addr;
1375
1376 args->addr_ptr = (uint64_t) addr;
1377
1378 return 0;
1379}
1380
Jesse Barnesde151cf2008-11-12 10:03:55 -08001381/**
1382 * i915_gem_fault - fault a page into the GTT
1383 * vma: VMA in question
1384 * vmf: fault info
1385 *
1386 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1387 * from userspace. The fault handler takes care of binding the object to
1388 * the GTT (if needed), allocating and programming a fence register (again,
1389 * only if needed based on whether the old reg is still valid or the object
1390 * is tiled) and inserting a new PTE into the faulting process.
1391 *
1392 * Note that the faulting process may involve evicting existing objects
1393 * from the GTT and/or fence registers to make room. So performance may
1394 * suffer if the GTT working set is large or there are few fence registers
1395 * left.
1396 */
1397int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1398{
Chris Wilson05394f32010-11-08 19:18:58 +00001399 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1400 struct drm_device *dev = obj->base.dev;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03001401 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001402 pgoff_t page_offset;
1403 unsigned long pfn;
1404 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001405 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001406
Paulo Zanonif65c9162013-11-27 18:20:34 -02001407 intel_runtime_pm_get(dev_priv);
1408
Jesse Barnesde151cf2008-11-12 10:03:55 -08001409 /* We don't use vmf->pgoff since that has the fake offset */
1410 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1411 PAGE_SHIFT;
1412
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001413 ret = i915_mutex_lock_interruptible(dev);
1414 if (ret)
1415 goto out;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001416
Chris Wilsondb53a302011-02-03 11:57:46 +00001417 trace_i915_gem_object_fault(obj, page_offset, true, write);
1418
Chris Wilson6e4930f2014-02-07 18:37:06 -02001419 /* Try to flush the object off the GPU first without holding the lock.
1420 * Upon reacquiring the lock, we will perform our sanity checks and then
1421 * repeat the flush holding the lock in the normal manner to catch cases
1422 * where we are gazumped.
1423 */
1424 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1425 if (ret)
1426 goto unlock;
1427
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001428 /* Access to snoopable pages through the GTT is incoherent. */
1429 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1430 ret = -EINVAL;
1431 goto unlock;
1432 }
1433
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001434 /* Now bind it into the GTT if needed */
Daniel Vetter1ec9e262014-02-14 14:01:11 +01001435 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
Chris Wilsond9e86c02010-11-10 16:40:20 +00001436 if (ret)
1437 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001438
Chris Wilsonc9839302012-11-20 10:45:17 +00001439 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1440 if (ret)
1441 goto unpin;
1442
1443 ret = i915_gem_object_get_fence(obj);
1444 if (ret)
1445 goto unpin;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001446
Chris Wilson6299f992010-11-24 12:23:44 +00001447 obj->fault_mappable = true;
1448
Ben Widawskyf343c5f2013-07-05 14:41:04 -07001449 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1450 pfn >>= PAGE_SHIFT;
1451 pfn += page_offset;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001452
1453 /* Finally, remap it using the new GTT offset */
1454 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc9839302012-11-20 10:45:17 +00001455unpin:
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08001456 i915_gem_object_ggtt_unpin(obj);
Chris Wilsonc7150892009-09-23 00:43:56 +01001457unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001458 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001459out:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001460 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001461 case -EIO:
Daniel Vettera9340cc2012-07-04 22:18:42 +02001462 /* If this -EIO is due to a gpu hang, give the reset code a
1463 * chance to clean up the mess. Otherwise return the proper
1464 * SIGBUS. */
Paulo Zanonif65c9162013-11-27 18:20:34 -02001465 if (i915_terminally_wedged(&dev_priv->gpu_error)) {
1466 ret = VM_FAULT_SIGBUS;
1467 break;
1468 }
Chris Wilson045e7692010-11-07 09:18:22 +00001469 case -EAGAIN:
Daniel Vetter571c6082013-09-12 17:57:28 +02001470 /*
1471 * EAGAIN means the gpu is hung and we'll wait for the error
1472 * handler to reset everything when re-faulting in
1473 * i915_mutex_lock_interruptible.
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001474 */
Chris Wilsonc7150892009-09-23 00:43:56 +01001475 case 0:
1476 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00001477 case -EINTR:
Dmitry Rogozhkine79e0fe2012-10-03 17:15:26 +03001478 case -EBUSY:
1479 /*
1480 * EBUSY is ok: this just means that another thread
1481 * already did the job.
1482 */
Paulo Zanonif65c9162013-11-27 18:20:34 -02001483 ret = VM_FAULT_NOPAGE;
1484 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001485 case -ENOMEM:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001486 ret = VM_FAULT_OOM;
1487 break;
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001488 case -ENOSPC:
Chris Wilson45d67812014-01-31 11:34:57 +00001489 case -EFAULT:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001490 ret = VM_FAULT_SIGBUS;
1491 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001492 default:
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001493 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
Paulo Zanonif65c9162013-11-27 18:20:34 -02001494 ret = VM_FAULT_SIGBUS;
1495 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001496 }
Paulo Zanonif65c9162013-11-27 18:20:34 -02001497
1498 intel_runtime_pm_put(dev_priv);
1499 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001500}
1501
Paulo Zanoni48018a52013-12-13 15:22:31 -02001502void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1503{
1504 struct i915_vma *vma;
1505
1506 /*
1507 * Only the global gtt is relevant for gtt memory mappings, so restrict
1508 * list traversal to objects bound into the global address space. Note
1509 * that the active list should be empty, but better safe than sorry.
1510 */
1511 WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
1512 list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
1513 i915_gem_release_mmap(vma->obj);
1514 list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
1515 i915_gem_release_mmap(vma->obj);
1516}
1517
Jesse Barnesde151cf2008-11-12 10:03:55 -08001518/**
Chris Wilson901782b2009-07-10 08:18:50 +01001519 * i915_gem_release_mmap - remove physical page mappings
1520 * @obj: obj in question
1521 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001522 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001523 * relinquish ownership of the pages back to the system.
1524 *
1525 * It is vital that we remove the page mapping if we have mapped a tiled
1526 * object through the GTT and then lose the fence register due to
1527 * resource pressure. Similarly if the object has been moved out of the
1528 * aperture, than pages mapped into userspace must be revoked. Removing the
1529 * mapping will then trigger a page fault on the next user access, allowing
1530 * fixup by i915_gem_fault().
1531 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001532void
Chris Wilson05394f32010-11-08 19:18:58 +00001533i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01001534{
Chris Wilson6299f992010-11-24 12:23:44 +00001535 if (!obj->fault_mappable)
1536 return;
Chris Wilson901782b2009-07-10 08:18:50 +01001537
David Herrmann6796cb12014-01-03 14:24:19 +01001538 drm_vma_node_unmap(&obj->base.vma_node,
1539 obj->base.dev->anon_inode->i_mapping);
Chris Wilson6299f992010-11-24 12:23:44 +00001540 obj->fault_mappable = false;
Chris Wilson901782b2009-07-10 08:18:50 +01001541}
1542
Imre Deak0fa87792013-01-07 21:47:35 +02001543uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001544i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
Chris Wilson92b88ae2010-11-09 11:47:32 +00001545{
Chris Wilsone28f8712011-07-18 13:11:49 -07001546 uint32_t gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001547
1548 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001549 tiling_mode == I915_TILING_NONE)
1550 return size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001551
1552 /* Previous chips need a power-of-two fence region when tiling */
1553 if (INTEL_INFO(dev)->gen == 3)
Chris Wilsone28f8712011-07-18 13:11:49 -07001554 gtt_size = 1024*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001555 else
Chris Wilsone28f8712011-07-18 13:11:49 -07001556 gtt_size = 512*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001557
Chris Wilsone28f8712011-07-18 13:11:49 -07001558 while (gtt_size < size)
1559 gtt_size <<= 1;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001560
Chris Wilsone28f8712011-07-18 13:11:49 -07001561 return gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001562}
1563
Jesse Barnesde151cf2008-11-12 10:03:55 -08001564/**
1565 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1566 * @obj: object to check
1567 *
1568 * Return the required GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01001569 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08001570 */
Imre Deakd865110c2013-01-07 21:47:33 +02001571uint32_t
1572i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1573 int tiling_mode, bool fenced)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001574{
Jesse Barnesde151cf2008-11-12 10:03:55 -08001575 /*
1576 * Minimum alignment is 4k (GTT page size), but might be greater
1577 * if a fence register is needed for the object.
1578 */
Imre Deakd865110c2013-01-07 21:47:33 +02001579 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001580 tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001581 return 4096;
1582
1583 /*
1584 * Previous chips need to be aligned to the size of the smallest
1585 * fence register that can contain the object.
1586 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001587 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01001588}
1589
Chris Wilsond8cb5082012-08-11 15:41:03 +01001590static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1591{
1592 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1593 int ret;
1594
David Herrmann0de23972013-07-24 21:07:52 +02001595 if (drm_vma_node_has_offset(&obj->base.vma_node))
Chris Wilsond8cb5082012-08-11 15:41:03 +01001596 return 0;
1597
Daniel Vetterda494d72012-12-20 15:11:16 +01001598 dev_priv->mm.shrinker_no_lock_stealing = true;
1599
Chris Wilsond8cb5082012-08-11 15:41:03 +01001600 ret = drm_gem_create_mmap_offset(&obj->base);
1601 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01001602 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001603
1604 /* Badly fragmented mmap space? The only way we can recover
1605 * space is by destroying unwanted objects. We can't randomly release
1606 * mmap_offsets as userspace expects them to be persistent for the
1607 * lifetime of the objects. The closest we can is to release the
1608 * offsets on purgeable objects by truncating it and marking it purged,
1609 * which prevents userspace from ever using that object again.
1610 */
1611 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1612 ret = drm_gem_create_mmap_offset(&obj->base);
1613 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01001614 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001615
1616 i915_gem_shrink_all(dev_priv);
Daniel Vetterda494d72012-12-20 15:11:16 +01001617 ret = drm_gem_create_mmap_offset(&obj->base);
1618out:
1619 dev_priv->mm.shrinker_no_lock_stealing = false;
1620
1621 return ret;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001622}
1623
1624static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1625{
Chris Wilsond8cb5082012-08-11 15:41:03 +01001626 drm_gem_free_mmap_offset(&obj->base);
1627}
1628
Jesse Barnesde151cf2008-11-12 10:03:55 -08001629int
Dave Airlieff72145b2011-02-07 12:16:14 +10001630i915_gem_mmap_gtt(struct drm_file *file,
1631 struct drm_device *dev,
1632 uint32_t handle,
1633 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001634{
Chris Wilsonda761a62010-10-27 17:37:08 +01001635 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001636 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001637 int ret;
1638
Chris Wilson76c1dec2010-09-25 11:22:51 +01001639 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001640 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001641 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001642
Dave Airlieff72145b2011-02-07 12:16:14 +10001643 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001644 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001645 ret = -ENOENT;
1646 goto unlock;
1647 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001648
Ben Widawsky5d4545a2013-01-17 12:45:15 -08001649 if (obj->base.size > dev_priv->gtt.mappable_end) {
Chris Wilsonda761a62010-10-27 17:37:08 +01001650 ret = -E2BIG;
Eric Anholtff56b0b2011-10-31 23:16:21 -07001651 goto out;
Chris Wilsonda761a62010-10-27 17:37:08 +01001652 }
1653
Chris Wilson05394f32010-11-08 19:18:58 +00001654 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00001655 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00001656 ret = -EFAULT;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001657 goto out;
Chris Wilsonab182822009-09-22 18:46:17 +01001658 }
1659
Chris Wilsond8cb5082012-08-11 15:41:03 +01001660 ret = i915_gem_object_create_mmap_offset(obj);
1661 if (ret)
1662 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001663
David Herrmann0de23972013-07-24 21:07:52 +02001664 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001665
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001666out:
Chris Wilson05394f32010-11-08 19:18:58 +00001667 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001668unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001669 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001670 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001671}
1672
Dave Airlieff72145b2011-02-07 12:16:14 +10001673/**
1674 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1675 * @dev: DRM device
1676 * @data: GTT mapping ioctl data
1677 * @file: GEM object info
1678 *
1679 * Simply returns the fake offset to userspace so it can mmap it.
1680 * The mmap call will end up in drm_gem_mmap(), which will set things
1681 * up so we can get faults in the handler above.
1682 *
1683 * The fault handler will take care of binding the object into the GTT
1684 * (since it may have been evicted to make room for something), allocating
1685 * a fence register, and mapping the appropriate aperture address into
1686 * userspace.
1687 */
1688int
1689i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1690 struct drm_file *file)
1691{
1692 struct drm_i915_gem_mmap_gtt *args = data;
1693
Dave Airlieff72145b2011-02-07 12:16:14 +10001694 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1695}
1696
Chris Wilson55372522014-03-25 13:23:06 +00001697static inline int
1698i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1699{
1700 return obj->madv == I915_MADV_DONTNEED;
1701}
1702
Daniel Vetter225067e2012-08-20 10:23:20 +02001703/* Immediately discard the backing storage */
1704static void
1705i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001706{
Chris Wilson4d6294bf2012-08-11 15:41:05 +01001707 i915_gem_object_free_mmap_offset(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02001708
Chris Wilson4d6294bf2012-08-11 15:41:05 +01001709 if (obj->base.filp == NULL)
1710 return;
1711
Daniel Vetter225067e2012-08-20 10:23:20 +02001712 /* Our goal here is to return as much of the memory as
1713 * is possible back to the system as we are called from OOM.
1714 * To do this we must instruct the shmfs to drop all of its
1715 * backing pages, *now*.
Chris Wilsone5281cc2010-10-28 13:45:36 +01001716 */
Chris Wilson55372522014-03-25 13:23:06 +00001717 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
Daniel Vetter225067e2012-08-20 10:23:20 +02001718 obj->madv = __I915_MADV_PURGED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001719}
Chris Wilsone5281cc2010-10-28 13:45:36 +01001720
Chris Wilson55372522014-03-25 13:23:06 +00001721/* Try to discard unwanted pages */
1722static void
1723i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
Daniel Vetter225067e2012-08-20 10:23:20 +02001724{
Chris Wilson55372522014-03-25 13:23:06 +00001725 struct address_space *mapping;
1726
1727 switch (obj->madv) {
1728 case I915_MADV_DONTNEED:
1729 i915_gem_object_truncate(obj);
1730 case __I915_MADV_PURGED:
1731 return;
1732 }
1733
1734 if (obj->base.filp == NULL)
1735 return;
1736
1737 mapping = file_inode(obj->base.filp)->i_mapping,
1738 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001739}
1740
Chris Wilson5cdf5882010-09-27 15:51:07 +01001741static void
Chris Wilson05394f32010-11-08 19:18:58 +00001742i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001743{
Imre Deak90797e62013-02-18 19:28:03 +02001744 struct sg_page_iter sg_iter;
1745 int ret;
Daniel Vetter1286ff72012-05-10 15:25:09 +02001746
Chris Wilson05394f32010-11-08 19:18:58 +00001747 BUG_ON(obj->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001748
Chris Wilson6c085a72012-08-20 11:40:46 +02001749 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1750 if (ret) {
1751 /* In the event of a disaster, abandon all caches and
1752 * hope for the best.
1753 */
1754 WARN_ON(ret != -EIO);
Chris Wilson2c225692013-08-09 12:26:45 +01001755 i915_gem_clflush_object(obj, true);
Chris Wilson6c085a72012-08-20 11:40:46 +02001756 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1757 }
1758
Daniel Vetter6dacfd22011-09-12 21:30:02 +02001759 if (i915_gem_object_needs_bit17_swizzle(obj))
Eric Anholt280b7132009-03-12 16:56:27 -07001760 i915_gem_object_save_bit_17_swizzle(obj);
1761
Chris Wilson05394f32010-11-08 19:18:58 +00001762 if (obj->madv == I915_MADV_DONTNEED)
1763 obj->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001764
Imre Deak90797e62013-02-18 19:28:03 +02001765 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
Imre Deak2db76d72013-03-26 15:14:18 +02001766 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +01001767
Chris Wilson05394f32010-11-08 19:18:58 +00001768 if (obj->dirty)
Chris Wilson9da3da62012-06-01 15:20:22 +01001769 set_page_dirty(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001770
Chris Wilson05394f32010-11-08 19:18:58 +00001771 if (obj->madv == I915_MADV_WILLNEED)
Chris Wilson9da3da62012-06-01 15:20:22 +01001772 mark_page_accessed(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001773
Chris Wilson9da3da62012-06-01 15:20:22 +01001774 page_cache_release(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001775 }
Chris Wilson05394f32010-11-08 19:18:58 +00001776 obj->dirty = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001777
Chris Wilson9da3da62012-06-01 15:20:22 +01001778 sg_free_table(obj->pages);
1779 kfree(obj->pages);
Chris Wilson37e680a2012-06-07 15:38:42 +01001780}
1781
Chris Wilsondd624af2013-01-15 12:39:35 +00001782int
Chris Wilson37e680a2012-06-07 15:38:42 +01001783i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1784{
1785 const struct drm_i915_gem_object_ops *ops = obj->ops;
1786
Chris Wilson2f745ad2012-09-04 21:02:58 +01001787 if (obj->pages == NULL)
Chris Wilson37e680a2012-06-07 15:38:42 +01001788 return 0;
1789
Chris Wilsona5570172012-09-04 21:02:54 +01001790 if (obj->pages_pin_count)
1791 return -EBUSY;
1792
Ben Widawsky98438772013-07-31 17:00:12 -07001793 BUG_ON(i915_gem_obj_bound_any(obj));
Ben Widawsky3e123022013-07-31 17:00:04 -07001794
Chris Wilsona2165e32012-12-03 11:49:00 +00001795 /* ->put_pages might need to allocate memory for the bit17 swizzle
1796 * array, hence protect them from being reaped by removing them from gtt
1797 * lists early. */
Ben Widawsky35c20a62013-05-31 11:28:48 -07001798 list_del(&obj->global_list);
Chris Wilsona2165e32012-12-03 11:49:00 +00001799
Chris Wilson37e680a2012-06-07 15:38:42 +01001800 ops->put_pages(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00001801 obj->pages = NULL;
Chris Wilson6c085a72012-08-20 11:40:46 +02001802
Chris Wilson55372522014-03-25 13:23:06 +00001803 i915_gem_object_invalidate(obj);
Chris Wilson6c085a72012-08-20 11:40:46 +02001804
1805 return 0;
1806}
1807
Chris Wilsond9973b42013-10-04 10:33:00 +01001808static unsigned long
Daniel Vetter93927ca2013-01-10 18:03:00 +01001809__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1810 bool purgeable_only)
Chris Wilson6c085a72012-08-20 11:40:46 +02001811{
Chris Wilsonc8725f32014-03-17 12:21:55 +00001812 struct list_head still_in_list;
1813 struct drm_i915_gem_object *obj;
Chris Wilsond9973b42013-10-04 10:33:00 +01001814 unsigned long count = 0;
Chris Wilson6c085a72012-08-20 11:40:46 +02001815
Chris Wilson57094f82013-09-04 10:45:50 +01001816 /*
Chris Wilsonc8725f32014-03-17 12:21:55 +00001817 * As we may completely rewrite the (un)bound list whilst unbinding
Chris Wilson57094f82013-09-04 10:45:50 +01001818 * (due to retiring requests) we have to strictly process only
1819 * one element of the list at the time, and recheck the list
1820 * on every iteration.
Chris Wilsonc8725f32014-03-17 12:21:55 +00001821 *
1822 * In particular, we must hold a reference whilst removing the
1823 * object as we may end up waiting for and/or retiring the objects.
1824 * This might release the final reference (held by the active list)
1825 * and result in the object being freed from under us. This is
1826 * similar to the precautions the eviction code must take whilst
1827 * removing objects.
1828 *
1829 * Also note that although these lists do not hold a reference to
1830 * the object we can safely grab one here: The final object
1831 * unreferencing and the bound_list are both protected by the
1832 * dev->struct_mutex and so we won't ever be able to observe an
1833 * object on the bound_list with a reference count equals 0.
Chris Wilson57094f82013-09-04 10:45:50 +01001834 */
Chris Wilsonc8725f32014-03-17 12:21:55 +00001835 INIT_LIST_HEAD(&still_in_list);
1836 while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
1837 obj = list_first_entry(&dev_priv->mm.unbound_list,
1838 typeof(*obj), global_list);
1839 list_move_tail(&obj->global_list, &still_in_list);
1840
1841 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1842 continue;
1843
1844 drm_gem_object_reference(&obj->base);
1845
1846 if (i915_gem_object_put_pages(obj) == 0)
1847 count += obj->base.size >> PAGE_SHIFT;
1848
1849 drm_gem_object_unreference(&obj->base);
1850 }
1851 list_splice(&still_in_list, &dev_priv->mm.unbound_list);
1852
1853 INIT_LIST_HEAD(&still_in_list);
Chris Wilson57094f82013-09-04 10:45:50 +01001854 while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07001855 struct i915_vma *vma, *v;
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001856
Chris Wilson57094f82013-09-04 10:45:50 +01001857 obj = list_first_entry(&dev_priv->mm.bound_list,
1858 typeof(*obj), global_list);
Chris Wilsonc8725f32014-03-17 12:21:55 +00001859 list_move_tail(&obj->global_list, &still_in_list);
Chris Wilson57094f82013-09-04 10:45:50 +01001860
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001861 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1862 continue;
1863
Chris Wilson57094f82013-09-04 10:45:50 +01001864 drm_gem_object_reference(&obj->base);
1865
Ben Widawsky07fe0b12013-07-31 17:00:10 -07001866 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1867 if (i915_vma_unbind(vma))
1868 break;
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001869
Chris Wilson57094f82013-09-04 10:45:50 +01001870 if (i915_gem_object_put_pages(obj) == 0)
Chris Wilson6c085a72012-08-20 11:40:46 +02001871 count += obj->base.size >> PAGE_SHIFT;
Chris Wilson57094f82013-09-04 10:45:50 +01001872
1873 drm_gem_object_unreference(&obj->base);
Chris Wilson6c085a72012-08-20 11:40:46 +02001874 }
Chris Wilsonc8725f32014-03-17 12:21:55 +00001875 list_splice(&still_in_list, &dev_priv->mm.bound_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02001876
1877 return count;
1878}
1879
Chris Wilsond9973b42013-10-04 10:33:00 +01001880static unsigned long
Daniel Vetter93927ca2013-01-10 18:03:00 +01001881i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1882{
1883 return __i915_gem_shrink(dev_priv, target, true);
1884}
1885
Chris Wilsond9973b42013-10-04 10:33:00 +01001886static unsigned long
Chris Wilson6c085a72012-08-20 11:40:46 +02001887i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1888{
Chris Wilson6c085a72012-08-20 11:40:46 +02001889 i915_gem_evict_everything(dev_priv->dev);
Chris Wilsonc8725f32014-03-17 12:21:55 +00001890 return __i915_gem_shrink(dev_priv, LONG_MAX, false);
Daniel Vetter225067e2012-08-20 10:23:20 +02001891}
1892
Chris Wilson37e680a2012-06-07 15:38:42 +01001893static int
Chris Wilson6c085a72012-08-20 11:40:46 +02001894i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001895{
Chris Wilson6c085a72012-08-20 11:40:46 +02001896 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001897 int page_count, i;
1898 struct address_space *mapping;
Chris Wilson9da3da62012-06-01 15:20:22 +01001899 struct sg_table *st;
1900 struct scatterlist *sg;
Imre Deak90797e62013-02-18 19:28:03 +02001901 struct sg_page_iter sg_iter;
Eric Anholt673a3942008-07-30 12:06:12 -07001902 struct page *page;
Imre Deak90797e62013-02-18 19:28:03 +02001903 unsigned long last_pfn = 0; /* suppress gcc warning */
Chris Wilson6c085a72012-08-20 11:40:46 +02001904 gfp_t gfp;
Eric Anholt673a3942008-07-30 12:06:12 -07001905
Chris Wilson6c085a72012-08-20 11:40:46 +02001906 /* Assert that the object is not currently in any GPU domain. As it
1907 * wasn't in the GTT, there shouldn't be any way it could have been in
1908 * a GPU cache
1909 */
1910 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1911 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1912
Chris Wilson9da3da62012-06-01 15:20:22 +01001913 st = kmalloc(sizeof(*st), GFP_KERNEL);
1914 if (st == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07001915 return -ENOMEM;
1916
Chris Wilson9da3da62012-06-01 15:20:22 +01001917 page_count = obj->base.size / PAGE_SIZE;
1918 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
Chris Wilson9da3da62012-06-01 15:20:22 +01001919 kfree(st);
1920 return -ENOMEM;
1921 }
1922
1923 /* Get the list of pages out of our struct file. They'll be pinned
1924 * at this point until we release them.
1925 *
1926 * Fail silently without starting the shrinker
1927 */
Al Viro496ad9a2013-01-23 17:07:38 -05001928 mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilson6c085a72012-08-20 11:40:46 +02001929 gfp = mapping_gfp_mask(mapping);
Linus Torvaldscaf49192012-12-10 10:51:16 -08001930 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
Chris Wilson6c085a72012-08-20 11:40:46 +02001931 gfp &= ~(__GFP_IO | __GFP_WAIT);
Imre Deak90797e62013-02-18 19:28:03 +02001932 sg = st->sgl;
1933 st->nents = 0;
1934 for (i = 0; i < page_count; i++) {
Chris Wilson6c085a72012-08-20 11:40:46 +02001935 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1936 if (IS_ERR(page)) {
1937 i915_gem_purge(dev_priv, page_count);
1938 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1939 }
1940 if (IS_ERR(page)) {
1941 /* We've tried hard to allocate the memory by reaping
1942 * our own buffer, now let the real VM do its job and
1943 * go down in flames if truly OOM.
1944 */
Linus Torvaldscaf49192012-12-10 10:51:16 -08001945 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
Chris Wilson6c085a72012-08-20 11:40:46 +02001946 gfp |= __GFP_IO | __GFP_WAIT;
1947
1948 i915_gem_shrink_all(dev_priv);
1949 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1950 if (IS_ERR(page))
1951 goto err_pages;
1952
Linus Torvaldscaf49192012-12-10 10:51:16 -08001953 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
Chris Wilson6c085a72012-08-20 11:40:46 +02001954 gfp &= ~(__GFP_IO | __GFP_WAIT);
1955 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04001956#ifdef CONFIG_SWIOTLB
1957 if (swiotlb_nr_tbl()) {
1958 st->nents++;
1959 sg_set_page(sg, page, PAGE_SIZE, 0);
1960 sg = sg_next(sg);
1961 continue;
1962 }
1963#endif
Imre Deak90797e62013-02-18 19:28:03 +02001964 if (!i || page_to_pfn(page) != last_pfn + 1) {
1965 if (i)
1966 sg = sg_next(sg);
1967 st->nents++;
1968 sg_set_page(sg, page, PAGE_SIZE, 0);
1969 } else {
1970 sg->length += PAGE_SIZE;
1971 }
1972 last_pfn = page_to_pfn(page);
Daniel Vetter3bbbe702013-10-07 17:15:45 -03001973
1974 /* Check that the i965g/gm workaround works. */
1975 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
Eric Anholt673a3942008-07-30 12:06:12 -07001976 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04001977#ifdef CONFIG_SWIOTLB
1978 if (!swiotlb_nr_tbl())
1979#endif
1980 sg_mark_end(sg);
Chris Wilson74ce6b62012-10-19 15:51:06 +01001981 obj->pages = st;
1982
Eric Anholt673a3942008-07-30 12:06:12 -07001983 if (i915_gem_object_needs_bit17_swizzle(obj))
1984 i915_gem_object_do_bit_17_swizzle(obj);
1985
1986 return 0;
1987
1988err_pages:
Imre Deak90797e62013-02-18 19:28:03 +02001989 sg_mark_end(sg);
1990 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
Imre Deak2db76d72013-03-26 15:14:18 +02001991 page_cache_release(sg_page_iter_page(&sg_iter));
Chris Wilson9da3da62012-06-01 15:20:22 +01001992 sg_free_table(st);
1993 kfree(st);
Chris Wilson0820baf2014-03-25 13:23:03 +00001994
1995 /* shmemfs first checks if there is enough memory to allocate the page
1996 * and reports ENOSPC should there be insufficient, along with the usual
1997 * ENOMEM for a genuine allocation failure.
1998 *
1999 * We use ENOSPC in our driver to mean that we have run out of aperture
2000 * space and so want to translate the error from shmemfs back to our
2001 * usual understanding of ENOMEM.
2002 */
2003 if (PTR_ERR(page) == -ENOSPC)
2004 return -ENOMEM;
2005 else
2006 return PTR_ERR(page);
Eric Anholt673a3942008-07-30 12:06:12 -07002007}
2008
Chris Wilson37e680a2012-06-07 15:38:42 +01002009/* Ensure that the associated pages are gathered from the backing storage
2010 * and pinned into our object. i915_gem_object_get_pages() may be called
2011 * multiple times before they are released by a single call to
2012 * i915_gem_object_put_pages() - once the pages are no longer referenced
2013 * either as a result of memory pressure (reaping pages under the shrinker)
2014 * or as the object is itself released.
2015 */
2016int
2017i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2018{
2019 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2020 const struct drm_i915_gem_object_ops *ops = obj->ops;
2021 int ret;
2022
Chris Wilson2f745ad2012-09-04 21:02:58 +01002023 if (obj->pages)
Chris Wilson37e680a2012-06-07 15:38:42 +01002024 return 0;
2025
Chris Wilson43e28f02013-01-08 10:53:09 +00002026 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00002027 DRM_DEBUG("Attempting to obtain a purgeable object\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00002028 return -EFAULT;
Chris Wilson43e28f02013-01-08 10:53:09 +00002029 }
2030
Chris Wilsona5570172012-09-04 21:02:54 +01002031 BUG_ON(obj->pages_pin_count);
2032
Chris Wilson37e680a2012-06-07 15:38:42 +01002033 ret = ops->get_pages(obj);
2034 if (ret)
2035 return ret;
2036
Ben Widawsky35c20a62013-05-31 11:28:48 -07002037 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Chris Wilson37e680a2012-06-07 15:38:42 +01002038 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002039}
2040
Ben Widawskye2d05a82013-09-24 09:57:58 -07002041static void
Chris Wilson05394f32010-11-08 19:18:58 +00002042i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002043 struct intel_engine_cs *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07002044{
Chris Wilson05394f32010-11-08 19:18:58 +00002045 struct drm_device *dev = obj->base.dev;
Chris Wilson69dc4982010-10-19 10:36:51 +01002046 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9d7730912012-11-27 16:22:52 +00002047 u32 seqno = intel_ring_get_seqno(ring);
Daniel Vetter617dbe22010-02-11 22:16:02 +01002048
Zou Nan hai852835f2010-05-21 09:08:56 +08002049 BUG_ON(ring == NULL);
Chris Wilson02978ff2013-07-09 09:22:39 +01002050 if (obj->ring != ring && obj->last_write_seqno) {
2051 /* Keep the seqno relative to the current ring */
2052 obj->last_write_seqno = seqno;
2053 }
Chris Wilson05394f32010-11-08 19:18:58 +00002054 obj->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07002055
2056 /* Add a reference if we're newly entering the active list. */
Chris Wilson05394f32010-11-08 19:18:58 +00002057 if (!obj->active) {
2058 drm_gem_object_reference(&obj->base);
2059 obj->active = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07002060 }
Daniel Vettere35a41d2010-02-11 22:13:59 +01002061
Chris Wilson05394f32010-11-08 19:18:58 +00002062 list_move_tail(&obj->ring_list, &ring->active_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002063
Chris Wilson0201f1e2012-07-20 12:41:01 +01002064 obj->last_read_seqno = seqno;
Chris Wilson7dd49062012-03-21 10:48:18 +00002065
Chris Wilsoncaea7472010-11-12 13:53:37 +00002066 if (obj->fenced_gpu_access) {
Chris Wilsoncaea7472010-11-12 13:53:37 +00002067 obj->last_fenced_seqno = seqno;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002068
Chris Wilson7dd49062012-03-21 10:48:18 +00002069 /* Bump MRU to take account of the delayed flush */
2070 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2071 struct drm_i915_fence_reg *reg;
2072
2073 reg = &dev_priv->fence_regs[obj->fence_reg];
2074 list_move_tail(&reg->lru_list,
2075 &dev_priv->mm.fence_list);
2076 }
Chris Wilsoncaea7472010-11-12 13:53:37 +00002077 }
2078}
2079
Ben Widawskye2d05a82013-09-24 09:57:58 -07002080void i915_vma_move_to_active(struct i915_vma *vma,
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002081 struct intel_engine_cs *ring)
Ben Widawskye2d05a82013-09-24 09:57:58 -07002082{
2083 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2084 return i915_gem_object_move_to_active(vma->obj, ring);
2085}
2086
Chris Wilsoncaea7472010-11-12 13:53:37 +00002087static void
Chris Wilsoncaea7472010-11-12 13:53:37 +00002088i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2089{
Ben Widawskyca191b12013-07-31 17:00:14 -07002090 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Ben Widawskyfeb822c2013-12-06 14:10:51 -08002091 struct i915_address_space *vm;
2092 struct i915_vma *vma;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002093
Chris Wilson65ce3022012-07-20 12:41:02 +01002094 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002095 BUG_ON(!obj->active);
Chris Wilson65ce3022012-07-20 12:41:02 +01002096
Ben Widawskyfeb822c2013-12-06 14:10:51 -08002097 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2098 vma = i915_gem_obj_to_vma(obj, vm);
2099 if (vma && !list_empty(&vma->mm_list))
2100 list_move_tail(&vma->mm_list, &vm->inactive_list);
2101 }
Chris Wilsoncaea7472010-11-12 13:53:37 +00002102
Chris Wilson65ce3022012-07-20 12:41:02 +01002103 list_del_init(&obj->ring_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002104 obj->ring = NULL;
2105
Chris Wilson65ce3022012-07-20 12:41:02 +01002106 obj->last_read_seqno = 0;
2107 obj->last_write_seqno = 0;
2108 obj->base.write_domain = 0;
2109
2110 obj->last_fenced_seqno = 0;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002111 obj->fenced_gpu_access = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002112
2113 obj->active = 0;
2114 drm_gem_object_unreference(&obj->base);
2115
2116 WARN_ON(i915_verify_lists(dev));
Eric Anholtce44b0e2008-11-06 16:00:31 -08002117}
Eric Anholt673a3942008-07-30 12:06:12 -07002118
Chris Wilsonc8725f32014-03-17 12:21:55 +00002119static void
2120i915_gem_object_retire(struct drm_i915_gem_object *obj)
2121{
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002122 struct intel_engine_cs *ring = obj->ring;
Chris Wilsonc8725f32014-03-17 12:21:55 +00002123
2124 if (ring == NULL)
2125 return;
2126
2127 if (i915_seqno_passed(ring->get_seqno(ring, true),
2128 obj->last_read_seqno))
2129 i915_gem_object_move_to_inactive(obj);
2130}
2131
Chris Wilson9d7730912012-11-27 16:22:52 +00002132static int
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002133i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01002134{
Chris Wilson9d7730912012-11-27 16:22:52 +00002135 struct drm_i915_private *dev_priv = dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002136 struct intel_engine_cs *ring;
Chris Wilson9d7730912012-11-27 16:22:52 +00002137 int ret, i, j;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002138
Chris Wilson107f27a52012-12-10 13:56:17 +02002139 /* Carefully retire all requests without writing to the rings */
Chris Wilson9d7730912012-11-27 16:22:52 +00002140 for_each_ring(ring, dev_priv, i) {
Chris Wilson107f27a52012-12-10 13:56:17 +02002141 ret = intel_ring_idle(ring);
2142 if (ret)
2143 return ret;
Chris Wilson9d7730912012-11-27 16:22:52 +00002144 }
Chris Wilson9d7730912012-11-27 16:22:52 +00002145 i915_gem_retire_requests(dev);
Chris Wilson107f27a52012-12-10 13:56:17 +02002146
2147 /* Finally reset hw state */
Chris Wilson9d7730912012-11-27 16:22:52 +00002148 for_each_ring(ring, dev_priv, i) {
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002149 intel_ring_init_seqno(ring, seqno);
Mika Kuoppala498d2ac2012-12-04 15:12:04 +02002150
Ben Widawskyebc348b2014-04-29 14:52:28 -07002151 for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
2152 ring->semaphore.sync_seqno[j] = 0;
Chris Wilson9d7730912012-11-27 16:22:52 +00002153 }
2154
2155 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002156}
2157
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002158int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2159{
2160 struct drm_i915_private *dev_priv = dev->dev_private;
2161 int ret;
2162
2163 if (seqno == 0)
2164 return -EINVAL;
2165
2166 /* HWS page needs to be set less than what we
2167 * will inject to ring
2168 */
2169 ret = i915_gem_init_seqno(dev, seqno - 1);
2170 if (ret)
2171 return ret;
2172
2173 /* Carefully set the last_seqno value so that wrap
2174 * detection still works
2175 */
2176 dev_priv->next_seqno = seqno;
2177 dev_priv->last_seqno = seqno - 1;
2178 if (dev_priv->last_seqno == 0)
2179 dev_priv->last_seqno--;
2180
2181 return 0;
2182}
2183
Chris Wilson9d7730912012-11-27 16:22:52 +00002184int
2185i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01002186{
Chris Wilson9d7730912012-11-27 16:22:52 +00002187 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002188
Chris Wilson9d7730912012-11-27 16:22:52 +00002189 /* reserve 0 for non-seqno */
2190 if (dev_priv->next_seqno == 0) {
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002191 int ret = i915_gem_init_seqno(dev, 0);
Chris Wilson9d7730912012-11-27 16:22:52 +00002192 if (ret)
2193 return ret;
2194
2195 dev_priv->next_seqno = 1;
2196 }
2197
Mika Kuoppalaf72b3432012-12-10 15:41:48 +02002198 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
Chris Wilson9d7730912012-11-27 16:22:52 +00002199 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002200}
2201
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002202int __i915_add_request(struct intel_engine_cs *ring,
Mika Kuoppala0025c072013-06-12 12:35:30 +03002203 struct drm_file *file,
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002204 struct drm_i915_gem_object *obj,
Mika Kuoppala0025c072013-06-12 12:35:30 +03002205 u32 *out_seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07002206{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002207 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Chris Wilsonacb868d2012-09-26 13:47:30 +01002208 struct drm_i915_gem_request *request;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002209 u32 request_ring_position, request_start;
Chris Wilson3cce4692010-10-27 16:11:02 +01002210 int ret;
2211
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002212 request_start = intel_ring_get_tail(ring);
Daniel Vettercc889e02012-06-13 20:45:19 +02002213 /*
2214 * Emit any outstanding flushes - execbuf can fail to emit the flush
2215 * after having emitted the batchbuffer command. Hence we need to fix
2216 * things up similar to emitting the lazy request. The difference here
2217 * is that the flush _must_ happen before the next request, no matter
2218 * what.
2219 */
Chris Wilsona7b97612012-07-20 12:41:08 +01002220 ret = intel_ring_flush_all_caches(ring);
2221 if (ret)
2222 return ret;
Daniel Vettercc889e02012-06-13 20:45:19 +02002223
Chris Wilson3c0e2342013-09-04 10:45:52 +01002224 request = ring->preallocated_lazy_request;
2225 if (WARN_ON(request == NULL))
Chris Wilsonacb868d2012-09-26 13:47:30 +01002226 return -ENOMEM;
Daniel Vettercc889e02012-06-13 20:45:19 +02002227
Chris Wilsona71d8d92012-02-15 11:25:36 +00002228 /* Record the position of the start of the request so that
2229 * should we detect the updated seqno part-way through the
2230 * GPU processing the request, we never over-estimate the
2231 * position of the head.
2232 */
2233 request_ring_position = intel_ring_get_tail(ring);
2234
Chris Wilson9d7730912012-11-27 16:22:52 +00002235 ret = ring->add_request(ring);
Chris Wilson3c0e2342013-09-04 10:45:52 +01002236 if (ret)
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002237 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002238
Chris Wilson9d7730912012-11-27 16:22:52 +00002239 request->seqno = intel_ring_get_seqno(ring);
Zou Nan hai852835f2010-05-21 09:08:56 +08002240 request->ring = ring;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002241 request->head = request_start;
Chris Wilsona71d8d92012-02-15 11:25:36 +00002242 request->tail = request_ring_position;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002243
2244 /* Whilst this request exists, batch_obj will be on the
2245 * active_list, and so will hold the active reference. Only when this
2246 * request is retired will the the batch_obj be moved onto the
2247 * inactive_list and lose its active reference. Hence we do not need
2248 * to explicitly hold another reference here.
2249 */
Chris Wilson9a7e0c22013-08-26 19:50:54 -03002250 request->batch_obj = obj;
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002251
Chris Wilson9a7e0c22013-08-26 19:50:54 -03002252 /* Hold a reference to the current context so that we can inspect
2253 * it later in case a hangcheck error event fires.
2254 */
2255 request->ctx = ring->last_context;
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002256 if (request->ctx)
2257 i915_gem_context_reference(request->ctx);
2258
Eric Anholt673a3942008-07-30 12:06:12 -07002259 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08002260 list_add_tail(&request->list, &ring->request_list);
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002261 request->file_priv = NULL;
Zou Nan hai852835f2010-05-21 09:08:56 +08002262
Chris Wilsondb53a302011-02-03 11:57:46 +00002263 if (file) {
2264 struct drm_i915_file_private *file_priv = file->driver_priv;
2265
Chris Wilson1c255952010-09-26 11:03:27 +01002266 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002267 request->file_priv = file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00002268 list_add_tail(&request->client_list,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002269 &file_priv->mm.request_list);
Chris Wilson1c255952010-09-26 11:03:27 +01002270 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00002271 }
Eric Anholt673a3942008-07-30 12:06:12 -07002272
Chris Wilson9d7730912012-11-27 16:22:52 +00002273 trace_i915_gem_request_add(ring, request->seqno);
Chris Wilson18235212013-09-04 10:45:51 +01002274 ring->outstanding_lazy_seqno = 0;
Chris Wilson3c0e2342013-09-04 10:45:52 +01002275 ring->preallocated_lazy_request = NULL;
Chris Wilsondb53a302011-02-03 11:57:46 +00002276
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02002277 if (!dev_priv->ums.mm_suspended) {
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03002278 i915_queue_hangcheck(ring->dev);
2279
Chris Wilsonf62a0072014-02-21 17:55:39 +00002280 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2281 queue_delayed_work(dev_priv->wq,
2282 &dev_priv->mm.retire_work,
2283 round_jiffies_up_relative(HZ));
2284 intel_mark_busy(dev_priv->dev);
Ben Gamarif65d9422009-09-14 17:48:44 -04002285 }
Daniel Vettercc889e02012-06-13 20:45:19 +02002286
Chris Wilsonacb868d2012-09-26 13:47:30 +01002287 if (out_seqno)
Chris Wilson9d7730912012-11-27 16:22:52 +00002288 *out_seqno = request->seqno;
Chris Wilson3cce4692010-10-27 16:11:02 +01002289 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002290}
2291
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002292static inline void
2293i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07002294{
Chris Wilson1c255952010-09-26 11:03:27 +01002295 struct drm_i915_file_private *file_priv = request->file_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07002296
Chris Wilson1c255952010-09-26 11:03:27 +01002297 if (!file_priv)
2298 return;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002299
Chris Wilson1c255952010-09-26 11:03:27 +01002300 spin_lock(&file_priv->mm.lock);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002301 list_del(&request->client_list);
2302 request->file_priv = NULL;
Chris Wilson1c255952010-09-26 11:03:27 +01002303 spin_unlock(&file_priv->mm.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07002304}
2305
Mika Kuoppala939fd762014-01-30 19:04:44 +02002306static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
Oscar Mateo273497e2014-05-22 14:13:37 +01002307 const struct intel_context *ctx)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002308{
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002309 unsigned long elapsed;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002310
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002311 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2312
2313 if (ctx->hang_stats.banned)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002314 return true;
2315
2316 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
Ville Syrjäläccc7bed2014-02-21 16:26:47 +02002317 if (!i915_gem_context_is_default(ctx)) {
Mika Kuoppala3fac8972014-01-30 16:05:48 +02002318 DRM_DEBUG("context hanging too fast, banning!\n");
Ville Syrjäläccc7bed2014-02-21 16:26:47 +02002319 return true;
Mika Kuoppala88b4aa82014-03-28 18:18:18 +02002320 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2321 if (i915_stop_ring_allow_warn(dev_priv))
2322 DRM_ERROR("gpu hanging too fast, banning!\n");
Ville Syrjäläccc7bed2014-02-21 16:26:47 +02002323 return true;
Mika Kuoppala3fac8972014-01-30 16:05:48 +02002324 }
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002325 }
2326
2327 return false;
2328}
2329
Mika Kuoppala939fd762014-01-30 19:04:44 +02002330static void i915_set_reset_status(struct drm_i915_private *dev_priv,
Oscar Mateo273497e2014-05-22 14:13:37 +01002331 struct intel_context *ctx,
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002332 const bool guilty)
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002333{
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002334 struct i915_ctx_hang_stats *hs;
2335
2336 if (WARN_ON(!ctx))
2337 return;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002338
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002339 hs = &ctx->hang_stats;
2340
2341 if (guilty) {
Mika Kuoppala939fd762014-01-30 19:04:44 +02002342 hs->banned = i915_context_is_banned(dev_priv, ctx);
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002343 hs->batch_active++;
2344 hs->guilty_ts = get_seconds();
2345 } else {
2346 hs->batch_pending++;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002347 }
2348}
2349
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002350static void i915_gem_free_request(struct drm_i915_gem_request *request)
2351{
2352 list_del(&request->list);
2353 i915_gem_request_remove_from_client(request);
2354
2355 if (request->ctx)
2356 i915_gem_context_unreference(request->ctx);
2357
2358 kfree(request);
2359}
2360
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02002361struct drm_i915_gem_request *
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002362i915_gem_find_active_request(struct intel_engine_cs *ring)
Chris Wilson9375e442010-09-19 12:21:28 +01002363{
Chris Wilson4db080f2013-12-04 11:37:09 +00002364 struct drm_i915_gem_request *request;
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02002365 u32 completed_seqno;
2366
2367 completed_seqno = ring->get_seqno(ring, false);
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002368
Chris Wilson4db080f2013-12-04 11:37:09 +00002369 list_for_each_entry(request, &ring->request_list, list) {
2370 if (i915_seqno_passed(completed_seqno, request->seqno))
2371 continue;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002372
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002373 return request;
Chris Wilson4db080f2013-12-04 11:37:09 +00002374 }
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002375
2376 return NULL;
2377}
2378
2379static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002380 struct intel_engine_cs *ring)
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002381{
2382 struct drm_i915_gem_request *request;
2383 bool ring_hung;
2384
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02002385 request = i915_gem_find_active_request(ring);
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002386
2387 if (request == NULL)
2388 return;
2389
2390 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2391
Mika Kuoppala939fd762014-01-30 19:04:44 +02002392 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002393
2394 list_for_each_entry_continue(request, &ring->request_list, list)
Mika Kuoppala939fd762014-01-30 19:04:44 +02002395 i915_set_reset_status(dev_priv, request->ctx, false);
Chris Wilson4db080f2013-12-04 11:37:09 +00002396}
2397
2398static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002399 struct intel_engine_cs *ring)
Chris Wilson4db080f2013-12-04 11:37:09 +00002400{
Chris Wilsondfaae392010-09-22 10:31:52 +01002401 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00002402 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07002403
Chris Wilson05394f32010-11-08 19:18:58 +00002404 obj = list_first_entry(&ring->active_list,
2405 struct drm_i915_gem_object,
2406 ring_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002407
Chris Wilson05394f32010-11-08 19:18:58 +00002408 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002409 }
Ben Widawsky1d62bee2014-01-01 10:15:13 -08002410
2411 /*
2412 * We must free the requests after all the corresponding objects have
2413 * been moved off active lists. Which is the same order as the normal
2414 * retire_requests function does. This is important if object hold
2415 * implicit references on things like e.g. ppgtt address spaces through
2416 * the request.
2417 */
2418 while (!list_empty(&ring->request_list)) {
2419 struct drm_i915_gem_request *request;
2420
2421 request = list_first_entry(&ring->request_list,
2422 struct drm_i915_gem_request,
2423 list);
2424
2425 i915_gem_free_request(request);
2426 }
Chris Wilsone3efda42014-04-09 09:19:41 +01002427
2428 /* These may not have been flush before the reset, do so now */
2429 kfree(ring->preallocated_lazy_request);
2430 ring->preallocated_lazy_request = NULL;
2431 ring->outstanding_lazy_seqno = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002432}
2433
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002434void i915_gem_restore_fences(struct drm_device *dev)
Chris Wilson312817a2010-11-22 11:50:11 +00002435{
2436 struct drm_i915_private *dev_priv = dev->dev_private;
2437 int i;
2438
Daniel Vetter4b9de732011-10-09 21:52:02 +02002439 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson312817a2010-11-22 11:50:11 +00002440 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
Chris Wilson7d2cb392010-11-27 17:38:29 +00002441
Daniel Vetter94a335d2013-07-17 14:51:28 +02002442 /*
2443 * Commit delayed tiling changes if we have an object still
2444 * attached to the fence, otherwise just clear the fence.
2445 */
2446 if (reg->obj) {
2447 i915_gem_object_update_fence(reg->obj, reg,
2448 reg->obj->tiling_mode);
2449 } else {
2450 i915_gem_write_fence(dev, i, NULL);
2451 }
Chris Wilson312817a2010-11-22 11:50:11 +00002452 }
2453}
2454
Chris Wilson069efc12010-09-30 16:53:18 +01002455void i915_gem_reset(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07002456{
Chris Wilsondfaae392010-09-22 10:31:52 +01002457 struct drm_i915_private *dev_priv = dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002458 struct intel_engine_cs *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002459 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07002460
Chris Wilson4db080f2013-12-04 11:37:09 +00002461 /*
2462 * Before we free the objects from the requests, we need to inspect
2463 * them for finding the guilty party. As the requests only borrow
2464 * their reference to the objects, the inspection must be done first.
2465 */
Chris Wilsonb4519512012-05-11 14:29:30 +01002466 for_each_ring(ring, dev_priv, i)
Chris Wilson4db080f2013-12-04 11:37:09 +00002467 i915_gem_reset_ring_status(dev_priv, ring);
2468
2469 for_each_ring(ring, dev_priv, i)
2470 i915_gem_reset_ring_cleanup(dev_priv, ring);
Chris Wilsondfaae392010-09-22 10:31:52 +01002471
Ben Widawskyacce9ff2013-12-06 14:11:03 -08002472 i915_gem_context_reset(dev);
2473
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002474 i915_gem_restore_fences(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002475}
2476
2477/**
2478 * This function clears the request list as sequence numbers are passed.
2479 */
Chris Wilson1cf0ba12014-05-05 09:07:33 +01002480void
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002481i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07002482{
Eric Anholt673a3942008-07-30 12:06:12 -07002483 uint32_t seqno;
2484
Chris Wilsondb53a302011-02-03 11:57:46 +00002485 if (list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01002486 return;
2487
Chris Wilsondb53a302011-02-03 11:57:46 +00002488 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07002489
Chris Wilsonb2eadbc2012-08-09 10:58:30 +01002490 seqno = ring->get_seqno(ring, true);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002491
Chris Wilsone9103032014-01-07 11:45:14 +00002492 /* Move any buffers on the active list that are no longer referenced
2493 * by the ringbuffer to the flushing/inactive lists as appropriate,
2494 * before we free the context associated with the requests.
2495 */
2496 while (!list_empty(&ring->active_list)) {
2497 struct drm_i915_gem_object *obj;
2498
2499 obj = list_first_entry(&ring->active_list,
2500 struct drm_i915_gem_object,
2501 ring_list);
2502
2503 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2504 break;
2505
2506 i915_gem_object_move_to_inactive(obj);
2507 }
2508
2509
Zou Nan hai852835f2010-05-21 09:08:56 +08002510 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002511 struct drm_i915_gem_request *request;
Eric Anholt673a3942008-07-30 12:06:12 -07002512
Zou Nan hai852835f2010-05-21 09:08:56 +08002513 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07002514 struct drm_i915_gem_request,
2515 list);
Eric Anholt673a3942008-07-30 12:06:12 -07002516
Chris Wilsondfaae392010-09-22 10:31:52 +01002517 if (!i915_seqno_passed(seqno, request->seqno))
Eric Anholt673a3942008-07-30 12:06:12 -07002518 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002519
Chris Wilsondb53a302011-02-03 11:57:46 +00002520 trace_i915_gem_request_retire(ring, request->seqno);
Chris Wilsona71d8d92012-02-15 11:25:36 +00002521 /* We know the GPU must have read the request to have
2522 * sent us the seqno + interrupt, so use the position
2523 * of tail of the request to update the last known position
2524 * of the GPU head.
2525 */
Oscar Mateoee1b1e52014-05-22 14:13:35 +01002526 ring->buffer->last_retired_head = request->tail;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002527
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002528 i915_gem_free_request(request);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002529 }
2530
Chris Wilsondb53a302011-02-03 11:57:46 +00002531 if (unlikely(ring->trace_irq_seqno &&
2532 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002533 ring->irq_put(ring);
Chris Wilsondb53a302011-02-03 11:57:46 +00002534 ring->trace_irq_seqno = 0;
Chris Wilson9d34e5d2009-09-24 05:26:06 +01002535 }
Chris Wilson23bc5982010-09-29 16:10:57 +01002536
Chris Wilsondb53a302011-02-03 11:57:46 +00002537 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07002538}
2539
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002540bool
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002541i915_gem_retire_requests(struct drm_device *dev)
2542{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002543 struct drm_i915_private *dev_priv = dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002544 struct intel_engine_cs *ring;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002545 bool idle = true;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002546 int i;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002547
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002548 for_each_ring(ring, dev_priv, i) {
Chris Wilsonb4519512012-05-11 14:29:30 +01002549 i915_gem_retire_requests_ring(ring);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002550 idle &= list_empty(&ring->request_list);
2551 }
2552
2553 if (idle)
2554 mod_delayed_work(dev_priv->wq,
2555 &dev_priv->mm.idle_work,
2556 msecs_to_jiffies(100));
2557
2558 return idle;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002559}
2560
Daniel Vetter75ef9da2010-08-21 00:25:16 +02002561static void
Eric Anholt673a3942008-07-30 12:06:12 -07002562i915_gem_retire_work_handler(struct work_struct *work)
2563{
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002564 struct drm_i915_private *dev_priv =
2565 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2566 struct drm_device *dev = dev_priv->dev;
Chris Wilson0a587052011-01-09 21:05:44 +00002567 bool idle;
Eric Anholt673a3942008-07-30 12:06:12 -07002568
Chris Wilson891b48c2010-09-29 12:26:37 +01002569 /* Come back later if the device is busy... */
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002570 idle = false;
2571 if (mutex_trylock(&dev->struct_mutex)) {
2572 idle = i915_gem_retire_requests(dev);
2573 mutex_unlock(&dev->struct_mutex);
2574 }
2575 if (!idle)
Chris Wilsonbcb45082012-10-05 17:02:57 +01002576 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2577 round_jiffies_up_relative(HZ));
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002578}
Chris Wilson891b48c2010-09-29 12:26:37 +01002579
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002580static void
2581i915_gem_idle_work_handler(struct work_struct *work)
2582{
2583 struct drm_i915_private *dev_priv =
2584 container_of(work, typeof(*dev_priv), mm.idle_work.work);
Zou Nan haid1b851f2010-05-21 09:08:57 +08002585
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002586 intel_mark_idle(dev_priv->dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002587}
2588
Ben Widawsky5816d642012-04-11 11:18:19 -07002589/**
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002590 * Ensures that an object will eventually get non-busy by flushing any required
2591 * write domains, emitting any outstanding lazy request and retiring and
2592 * completed requests.
2593 */
2594static int
2595i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2596{
2597 int ret;
2598
2599 if (obj->active) {
Chris Wilson0201f1e2012-07-20 12:41:01 +01002600 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002601 if (ret)
2602 return ret;
2603
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002604 i915_gem_retire_requests_ring(obj->ring);
2605 }
2606
2607 return 0;
2608}
2609
2610/**
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002611 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2612 * @DRM_IOCTL_ARGS: standard ioctl arguments
2613 *
2614 * Returns 0 if successful, else an error is returned with the remaining time in
2615 * the timeout parameter.
2616 * -ETIME: object is still busy after timeout
2617 * -ERESTARTSYS: signal interrupted the wait
2618 * -ENONENT: object doesn't exist
2619 * Also possible, but rare:
2620 * -EAGAIN: GPU wedged
2621 * -ENOMEM: damn
2622 * -ENODEV: Internal IRQ fail
2623 * -E?: The add request failed
2624 *
2625 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2626 * non-zero timeout parameter the wait ioctl will wait for the given number of
2627 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2628 * without holding struct_mutex the object may become re-busied before this
2629 * function completes. A similar but shorter * race condition exists in the busy
2630 * ioctl
2631 */
2632int
2633i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2634{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002635 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002636 struct drm_i915_gem_wait *args = data;
2637 struct drm_i915_gem_object *obj;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002638 struct intel_engine_cs *ring = NULL;
Ben Widawskyeac1f142012-06-05 15:24:24 -07002639 struct timespec timeout_stack, *timeout = NULL;
Daniel Vetterf69061b2012-12-06 09:01:42 +01002640 unsigned reset_counter;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002641 u32 seqno = 0;
2642 int ret = 0;
2643
Ben Widawskyeac1f142012-06-05 15:24:24 -07002644 if (args->timeout_ns >= 0) {
2645 timeout_stack = ns_to_timespec(args->timeout_ns);
2646 timeout = &timeout_stack;
2647 }
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002648
2649 ret = i915_mutex_lock_interruptible(dev);
2650 if (ret)
2651 return ret;
2652
2653 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2654 if (&obj->base == NULL) {
2655 mutex_unlock(&dev->struct_mutex);
2656 return -ENOENT;
2657 }
2658
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002659 /* Need to make sure the object gets inactive eventually. */
2660 ret = i915_gem_object_flush_active(obj);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002661 if (ret)
2662 goto out;
2663
2664 if (obj->active) {
Chris Wilson0201f1e2012-07-20 12:41:01 +01002665 seqno = obj->last_read_seqno;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002666 ring = obj->ring;
2667 }
2668
2669 if (seqno == 0)
2670 goto out;
2671
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002672 /* Do this after OLR check to make sure we make forward progress polling
2673 * on this IOCTL with a 0 timeout (like busy ioctl)
2674 */
2675 if (!args->timeout_ns) {
2676 ret = -ETIME;
2677 goto out;
2678 }
2679
2680 drm_gem_object_unreference(&obj->base);
Daniel Vetterf69061b2012-12-06 09:01:42 +01002681 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002682 mutex_unlock(&dev->struct_mutex);
2683
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002684 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
Chris Wilson4f42f4e2013-04-26 16:22:46 +03002685 if (timeout)
Ben Widawskyeac1f142012-06-05 15:24:24 -07002686 args->timeout_ns = timespec_to_ns(timeout);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002687 return ret;
2688
2689out:
2690 drm_gem_object_unreference(&obj->base);
2691 mutex_unlock(&dev->struct_mutex);
2692 return ret;
2693}
2694
2695/**
Ben Widawsky5816d642012-04-11 11:18:19 -07002696 * i915_gem_object_sync - sync an object to a ring.
2697 *
2698 * @obj: object which may be in use on another ring.
2699 * @to: ring we wish to use the object on. May be NULL.
2700 *
2701 * This code is meant to abstract object synchronization with the GPU.
2702 * Calling with NULL implies synchronizing the object with the CPU
2703 * rather than a particular GPU ring.
2704 *
2705 * Returns 0 if successful, else propagates up the lower layer error.
2706 */
Ben Widawsky2911a352012-04-05 14:47:36 -07002707int
2708i915_gem_object_sync(struct drm_i915_gem_object *obj,
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002709 struct intel_engine_cs *to)
Ben Widawsky2911a352012-04-05 14:47:36 -07002710{
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002711 struct intel_engine_cs *from = obj->ring;
Ben Widawsky2911a352012-04-05 14:47:36 -07002712 u32 seqno;
2713 int ret, idx;
2714
2715 if (from == NULL || to == from)
2716 return 0;
2717
Ben Widawsky5816d642012-04-11 11:18:19 -07002718 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
Chris Wilson0201f1e2012-07-20 12:41:01 +01002719 return i915_gem_object_wait_rendering(obj, false);
Ben Widawsky2911a352012-04-05 14:47:36 -07002720
2721 idx = intel_ring_sync_index(from, to);
2722
Chris Wilson0201f1e2012-07-20 12:41:01 +01002723 seqno = obj->last_read_seqno;
Ben Widawskyebc348b2014-04-29 14:52:28 -07002724 if (seqno <= from->semaphore.sync_seqno[idx])
Ben Widawsky2911a352012-04-05 14:47:36 -07002725 return 0;
2726
Ben Widawskyb4aca012012-04-25 20:50:12 -07002727 ret = i915_gem_check_olr(obj->ring, seqno);
2728 if (ret)
2729 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002730
Chris Wilsonb52b89d2013-09-25 11:43:28 +01002731 trace_i915_gem_ring_sync_to(from, to, seqno);
Ben Widawskyebc348b2014-04-29 14:52:28 -07002732 ret = to->semaphore.sync_to(to, from, seqno);
Ben Widawskye3a5a222012-04-11 11:18:20 -07002733 if (!ret)
Mika Kuoppala7b01e262012-11-28 17:18:45 +02002734 /* We use last_read_seqno because sync_to()
2735 * might have just caused seqno wrap under
2736 * the radar.
2737 */
Ben Widawskyebc348b2014-04-29 14:52:28 -07002738 from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
Ben Widawsky2911a352012-04-05 14:47:36 -07002739
Ben Widawskye3a5a222012-04-11 11:18:20 -07002740 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002741}
2742
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002743static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2744{
2745 u32 old_write_domain, old_read_domains;
2746
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002747 /* Force a pagefault for domain tracking on next user access */
2748 i915_gem_release_mmap(obj);
2749
Keith Packardb97c3d92011-06-24 21:02:59 -07002750 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2751 return;
2752
Chris Wilson97c809fd2012-10-09 19:24:38 +01002753 /* Wait for any direct GTT access to complete */
2754 mb();
2755
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002756 old_read_domains = obj->base.read_domains;
2757 old_write_domain = obj->base.write_domain;
2758
2759 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2760 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2761
2762 trace_i915_gem_object_change_domain(obj,
2763 old_read_domains,
2764 old_write_domain);
2765}
2766
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002767int i915_vma_unbind(struct i915_vma *vma)
Eric Anholt673a3942008-07-30 12:06:12 -07002768{
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002769 struct drm_i915_gem_object *obj = vma->obj;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002770 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson43e28f02013-01-08 10:53:09 +00002771 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002772
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002773 if (list_empty(&vma->vma_link))
Eric Anholt673a3942008-07-30 12:06:12 -07002774 return 0;
2775
Daniel Vetter0ff501c2013-08-29 19:50:31 +02002776 if (!drm_mm_node_allocated(&vma->node)) {
2777 i915_gem_vma_destroy(vma);
Daniel Vetter0ff501c2013-08-29 19:50:31 +02002778 return 0;
2779 }
Ben Widawsky433544b2013-08-13 18:09:06 -07002780
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08002781 if (vma->pin_count)
Chris Wilson31d8d652012-05-24 19:11:20 +01002782 return -EBUSY;
Eric Anholt673a3942008-07-30 12:06:12 -07002783
Chris Wilsonc4670ad2012-08-20 10:23:27 +01002784 BUG_ON(obj->pages == NULL);
2785
Chris Wilsona8198ee2011-04-13 22:04:09 +01002786 ret = i915_gem_object_finish_gpu(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002787 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002788 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01002789 /* Continue on if we fail due to EIO, the GPU is hung so we
2790 * should be safe and we need to cleanup or else we might
2791 * cause memory corruption through use-after-free.
2792 */
Chris Wilsona8198ee2011-04-13 22:04:09 +01002793
Daniel Vetter8b1bc9b2014-02-14 14:06:07 +01002794 if (i915_is_ggtt(vma->vm)) {
2795 i915_gem_object_finish_gtt(obj);
Chris Wilsona8198ee2011-04-13 22:04:09 +01002796
Daniel Vetter8b1bc9b2014-02-14 14:06:07 +01002797 /* release the fence reg _after_ flushing */
2798 ret = i915_gem_object_put_fence(obj);
2799 if (ret)
2800 return ret;
2801 }
Daniel Vetter96b47b62009-12-15 17:50:00 +01002802
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002803 trace_i915_vma_unbind(vma);
Chris Wilsondb53a302011-02-03 11:57:46 +00002804
Ben Widawsky6f65e292013-12-06 14:10:56 -08002805 vma->unbind_vma(vma);
2806
Daniel Vetter74163902012-02-15 23:50:21 +01002807 i915_gem_gtt_finish_object(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002808
Chris Wilson64bf9302014-02-25 14:23:28 +00002809 list_del_init(&vma->mm_list);
Daniel Vetter75e9e912010-11-04 17:11:09 +01002810 /* Avoid an unnecessary call to unbind on rebind. */
Ben Widawsky5cacaac2013-07-31 17:00:13 -07002811 if (i915_is_ggtt(vma->vm))
2812 obj->map_and_fenceable = true;
Eric Anholt673a3942008-07-30 12:06:12 -07002813
Ben Widawsky2f633152013-07-17 12:19:03 -07002814 drm_mm_remove_node(&vma->node);
2815 i915_gem_vma_destroy(vma);
2816
2817 /* Since the unbound list is global, only move to that list if
Daniel Vetterb93dab62013-08-26 11:23:47 +02002818 * no more VMAs exist. */
Ben Widawsky2f633152013-07-17 12:19:03 -07002819 if (list_empty(&obj->vma_list))
2820 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002821
Chris Wilson70903c32013-12-04 09:59:09 +00002822 /* And finally now the object is completely decoupled from this vma,
2823 * we can drop its hold on the backing storage and allow it to be
2824 * reaped by the shrinker.
2825 */
2826 i915_gem_object_unpin_pages(obj);
2827
Chris Wilson88241782011-01-07 17:09:48 +00002828 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +00002829}
2830
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07002831int i915_gpu_idle(struct drm_device *dev)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002832{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002833 struct drm_i915_private *dev_priv = dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002834 struct intel_engine_cs *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002835 int ret, i;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002836
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002837 /* Flush everything onto the inactive list. */
Chris Wilsonb4519512012-05-11 14:29:30 +01002838 for_each_ring(ring, dev_priv, i) {
Chris Wilson691e6412014-04-09 09:07:36 +01002839 ret = i915_switch_context(ring, ring->default_context);
Ben Widawskyb6c74882012-08-14 14:35:14 -07002840 if (ret)
2841 return ret;
2842
Chris Wilson3e960502012-11-27 16:22:54 +00002843 ret = intel_ring_idle(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002844 if (ret)
2845 return ret;
2846 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08002847
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002848 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002849}
2850
Chris Wilson9ce079e2012-04-17 15:31:30 +01002851static void i965_write_fence_reg(struct drm_device *dev, int reg,
2852 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002853{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002854 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak56c844e2013-01-07 21:47:34 +02002855 int fence_reg;
2856 int fence_pitch_shift;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002857
Imre Deak56c844e2013-01-07 21:47:34 +02002858 if (INTEL_INFO(dev)->gen >= 6) {
2859 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2860 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2861 } else {
2862 fence_reg = FENCE_REG_965_0;
2863 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2864 }
2865
Chris Wilsond18b9612013-07-10 13:36:23 +01002866 fence_reg += reg * 8;
2867
2868 /* To w/a incoherency with non-atomic 64-bit register updates,
2869 * we split the 64-bit update into two 32-bit writes. In order
2870 * for a partial fence not to be evaluated between writes, we
2871 * precede the update with write to turn off the fence register,
2872 * and only enable the fence as the last step.
2873 *
2874 * For extra levels of paranoia, we make sure each step lands
2875 * before applying the next step.
2876 */
2877 I915_WRITE(fence_reg, 0);
2878 POSTING_READ(fence_reg);
2879
Chris Wilson9ce079e2012-04-17 15:31:30 +01002880 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002881 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilsond18b9612013-07-10 13:36:23 +01002882 uint64_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002883
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002884 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
Chris Wilson9ce079e2012-04-17 15:31:30 +01002885 0xfffff000) << 32;
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002886 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
Imre Deak56c844e2013-01-07 21:47:34 +02002887 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002888 if (obj->tiling_mode == I915_TILING_Y)
2889 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2890 val |= I965_FENCE_REG_VALID;
Daniel Vetterc6642782010-11-12 13:46:18 +00002891
Chris Wilsond18b9612013-07-10 13:36:23 +01002892 I915_WRITE(fence_reg + 4, val >> 32);
2893 POSTING_READ(fence_reg + 4);
2894
2895 I915_WRITE(fence_reg + 0, val);
2896 POSTING_READ(fence_reg);
2897 } else {
2898 I915_WRITE(fence_reg + 4, 0);
2899 POSTING_READ(fence_reg + 4);
2900 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002901}
2902
Chris Wilson9ce079e2012-04-17 15:31:30 +01002903static void i915_write_fence_reg(struct drm_device *dev, int reg,
2904 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002905{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002906 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002907 u32 val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002908
Chris Wilson9ce079e2012-04-17 15:31:30 +01002909 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002910 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002911 int pitch_val;
2912 int tile_width;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002913
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002914 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
Chris Wilson9ce079e2012-04-17 15:31:30 +01002915 (size & -size) != size ||
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002916 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2917 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2918 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002919
2920 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2921 tile_width = 128;
2922 else
2923 tile_width = 512;
2924
2925 /* Note: pitch better be a power of two tile widths */
2926 pitch_val = obj->stride / tile_width;
2927 pitch_val = ffs(pitch_val) - 1;
2928
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002929 val = i915_gem_obj_ggtt_offset(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002930 if (obj->tiling_mode == I915_TILING_Y)
2931 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2932 val |= I915_FENCE_SIZE_BITS(size);
2933 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2934 val |= I830_FENCE_REG_VALID;
2935 } else
2936 val = 0;
2937
2938 if (reg < 8)
2939 reg = FENCE_REG_830_0 + reg * 4;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002940 else
Chris Wilson9ce079e2012-04-17 15:31:30 +01002941 reg = FENCE_REG_945_8 + (reg - 8) * 4;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002942
Chris Wilson9ce079e2012-04-17 15:31:30 +01002943 I915_WRITE(reg, val);
2944 POSTING_READ(reg);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002945}
2946
Chris Wilson9ce079e2012-04-17 15:31:30 +01002947static void i830_write_fence_reg(struct drm_device *dev, int reg,
2948 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002949{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002950 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002951 uint32_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002952
Chris Wilson9ce079e2012-04-17 15:31:30 +01002953 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002954 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002955 uint32_t pitch_val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002956
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002957 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
Chris Wilson9ce079e2012-04-17 15:31:30 +01002958 (size & -size) != size ||
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002959 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2960 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2961 i915_gem_obj_ggtt_offset(obj), size);
Eric Anholte76a16d2009-05-26 17:44:56 -07002962
Chris Wilson9ce079e2012-04-17 15:31:30 +01002963 pitch_val = obj->stride / 128;
2964 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002965
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002966 val = i915_gem_obj_ggtt_offset(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002967 if (obj->tiling_mode == I915_TILING_Y)
2968 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2969 val |= I830_FENCE_SIZE_BITS(size);
2970 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2971 val |= I830_FENCE_REG_VALID;
2972 } else
2973 val = 0;
Daniel Vetterc6642782010-11-12 13:46:18 +00002974
Chris Wilson9ce079e2012-04-17 15:31:30 +01002975 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2976 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2977}
2978
Chris Wilsond0a57782012-10-09 19:24:37 +01002979inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2980{
2981 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2982}
2983
Chris Wilson9ce079e2012-04-17 15:31:30 +01002984static void i915_gem_write_fence(struct drm_device *dev, int reg,
2985 struct drm_i915_gem_object *obj)
2986{
Chris Wilsond0a57782012-10-09 19:24:37 +01002987 struct drm_i915_private *dev_priv = dev->dev_private;
2988
2989 /* Ensure that all CPU reads are completed before installing a fence
2990 * and all writes before removing the fence.
2991 */
2992 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2993 mb();
2994
Daniel Vetter94a335d2013-07-17 14:51:28 +02002995 WARN(obj && (!obj->stride || !obj->tiling_mode),
2996 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2997 obj->stride, obj->tiling_mode);
2998
Chris Wilson9ce079e2012-04-17 15:31:30 +01002999 switch (INTEL_INFO(dev)->gen) {
Ben Widawsky5ab31332013-11-02 21:07:03 -07003000 case 8:
Chris Wilson9ce079e2012-04-17 15:31:30 +01003001 case 7:
Imre Deak56c844e2013-01-07 21:47:34 +02003002 case 6:
Chris Wilson9ce079e2012-04-17 15:31:30 +01003003 case 5:
3004 case 4: i965_write_fence_reg(dev, reg, obj); break;
3005 case 3: i915_write_fence_reg(dev, reg, obj); break;
3006 case 2: i830_write_fence_reg(dev, reg, obj); break;
Ben Widawsky7dbf9d62012-12-18 10:31:22 -08003007 default: BUG();
Chris Wilson9ce079e2012-04-17 15:31:30 +01003008 }
Chris Wilsond0a57782012-10-09 19:24:37 +01003009
3010 /* And similarly be paranoid that no direct access to this region
3011 * is reordered to before the fence is installed.
3012 */
3013 if (i915_gem_object_needs_mb(obj))
3014 mb();
Jesse Barnesde151cf2008-11-12 10:03:55 -08003015}
3016
Chris Wilson61050802012-04-17 15:31:31 +01003017static inline int fence_number(struct drm_i915_private *dev_priv,
3018 struct drm_i915_fence_reg *fence)
3019{
3020 return fence - dev_priv->fence_regs;
3021}
3022
3023static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
3024 struct drm_i915_fence_reg *fence,
3025 bool enable)
3026{
Chris Wilson2dc8aae2013-05-22 17:08:06 +01003027 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson46a0b632013-07-10 13:36:24 +01003028 int reg = fence_number(dev_priv, fence);
Chris Wilson61050802012-04-17 15:31:31 +01003029
Chris Wilson46a0b632013-07-10 13:36:24 +01003030 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
Chris Wilson61050802012-04-17 15:31:31 +01003031
3032 if (enable) {
Chris Wilson46a0b632013-07-10 13:36:24 +01003033 obj->fence_reg = reg;
Chris Wilson61050802012-04-17 15:31:31 +01003034 fence->obj = obj;
3035 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3036 } else {
3037 obj->fence_reg = I915_FENCE_REG_NONE;
3038 fence->obj = NULL;
3039 list_del_init(&fence->lru_list);
3040 }
Daniel Vetter94a335d2013-07-17 14:51:28 +02003041 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +01003042}
3043
Chris Wilsond9e86c02010-11-10 16:40:20 +00003044static int
Chris Wilsond0a57782012-10-09 19:24:37 +01003045i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003046{
Chris Wilson1c293ea2012-04-17 15:31:27 +01003047 if (obj->last_fenced_seqno) {
Chris Wilson86d5bc32012-07-20 12:41:04 +01003048 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
Chris Wilson18991842012-04-17 15:31:29 +01003049 if (ret)
3050 return ret;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003051
3052 obj->last_fenced_seqno = 0;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003053 }
3054
Chris Wilson86d5bc32012-07-20 12:41:04 +01003055 obj->fenced_gpu_access = false;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003056 return 0;
3057}
3058
3059int
3060i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3061{
Chris Wilson61050802012-04-17 15:31:31 +01003062 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003063 struct drm_i915_fence_reg *fence;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003064 int ret;
3065
Chris Wilsond0a57782012-10-09 19:24:37 +01003066 ret = i915_gem_object_wait_fence(obj);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003067 if (ret)
3068 return ret;
3069
Chris Wilson61050802012-04-17 15:31:31 +01003070 if (obj->fence_reg == I915_FENCE_REG_NONE)
3071 return 0;
Chris Wilson1690e1e2011-12-14 13:57:08 +01003072
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003073 fence = &dev_priv->fence_regs[obj->fence_reg];
3074
Daniel Vetteraff10b302014-02-14 14:06:05 +01003075 if (WARN_ON(fence->pin_count))
3076 return -EBUSY;
3077
Chris Wilson61050802012-04-17 15:31:31 +01003078 i915_gem_object_fence_lost(obj);
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003079 i915_gem_object_update_fence(obj, fence, false);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003080
3081 return 0;
3082}
3083
3084static struct drm_i915_fence_reg *
Chris Wilsona360bb12012-04-17 15:31:25 +01003085i915_find_fence_reg(struct drm_device *dev)
Daniel Vetterae3db242010-02-19 11:51:58 +01003086{
Daniel Vetterae3db242010-02-19 11:51:58 +01003087 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson8fe301a2012-04-17 15:31:28 +01003088 struct drm_i915_fence_reg *reg, *avail;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003089 int i;
Daniel Vetterae3db242010-02-19 11:51:58 +01003090
3091 /* First try to find a free reg */
Chris Wilsond9e86c02010-11-10 16:40:20 +00003092 avail = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01003093 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3094 reg = &dev_priv->fence_regs[i];
3095 if (!reg->obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003096 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003097
Chris Wilson1690e1e2011-12-14 13:57:08 +01003098 if (!reg->pin_count)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003099 avail = reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003100 }
3101
Chris Wilsond9e86c02010-11-10 16:40:20 +00003102 if (avail == NULL)
Chris Wilson5dce5b932014-01-20 10:17:36 +00003103 goto deadlock;
Daniel Vetterae3db242010-02-19 11:51:58 +01003104
3105 /* None available, try to steal one or wait for a user to finish */
Chris Wilsond9e86c02010-11-10 16:40:20 +00003106 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
Chris Wilson1690e1e2011-12-14 13:57:08 +01003107 if (reg->pin_count)
Daniel Vetterae3db242010-02-19 11:51:58 +01003108 continue;
3109
Chris Wilson8fe301a2012-04-17 15:31:28 +01003110 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003111 }
3112
Chris Wilson5dce5b932014-01-20 10:17:36 +00003113deadlock:
3114 /* Wait for completion of pending flips which consume fences */
3115 if (intel_has_pending_fb_unpin(dev))
3116 return ERR_PTR(-EAGAIN);
3117
3118 return ERR_PTR(-EDEADLK);
Daniel Vetterae3db242010-02-19 11:51:58 +01003119}
3120
Jesse Barnesde151cf2008-11-12 10:03:55 -08003121/**
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003122 * i915_gem_object_get_fence - set up fencing for an object
Jesse Barnesde151cf2008-11-12 10:03:55 -08003123 * @obj: object to map through a fence reg
3124 *
3125 * When mapping objects through the GTT, userspace wants to be able to write
3126 * to them without having to worry about swizzling if the object is tiled.
Jesse Barnesde151cf2008-11-12 10:03:55 -08003127 * This function walks the fence regs looking for a free one for @obj,
3128 * stealing one if it can't find any.
3129 *
3130 * It then sets up the reg based on the object's properties: address, pitch
3131 * and tiling format.
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003132 *
3133 * For an untiled surface, this removes any existing fence.
Jesse Barnesde151cf2008-11-12 10:03:55 -08003134 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01003135int
Chris Wilson06d98132012-04-17 15:31:24 +01003136i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08003137{
Chris Wilson05394f32010-11-08 19:18:58 +00003138 struct drm_device *dev = obj->base.dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08003139 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson14415742012-04-17 15:31:33 +01003140 bool enable = obj->tiling_mode != I915_TILING_NONE;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003141 struct drm_i915_fence_reg *reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003142 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003143
Chris Wilson14415742012-04-17 15:31:33 +01003144 /* Have we updated the tiling parameters upon the object and so
3145 * will need to serialise the write to the associated fence register?
3146 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +01003147 if (obj->fence_dirty) {
Chris Wilsond0a57782012-10-09 19:24:37 +01003148 ret = i915_gem_object_wait_fence(obj);
Chris Wilson14415742012-04-17 15:31:33 +01003149 if (ret)
3150 return ret;
3151 }
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003152
Chris Wilsond9e86c02010-11-10 16:40:20 +00003153 /* Just update our place in the LRU if our fence is getting reused. */
Chris Wilson05394f32010-11-08 19:18:58 +00003154 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3155 reg = &dev_priv->fence_regs[obj->fence_reg];
Chris Wilson5d82e3e2012-04-21 16:23:23 +01003156 if (!obj->fence_dirty) {
Chris Wilson14415742012-04-17 15:31:33 +01003157 list_move_tail(&reg->lru_list,
3158 &dev_priv->mm.fence_list);
3159 return 0;
3160 }
3161 } else if (enable) {
3162 reg = i915_find_fence_reg(dev);
Chris Wilson5dce5b932014-01-20 10:17:36 +00003163 if (IS_ERR(reg))
3164 return PTR_ERR(reg);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003165
Chris Wilson14415742012-04-17 15:31:33 +01003166 if (reg->obj) {
3167 struct drm_i915_gem_object *old = reg->obj;
3168
Chris Wilsond0a57782012-10-09 19:24:37 +01003169 ret = i915_gem_object_wait_fence(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00003170 if (ret)
3171 return ret;
3172
Chris Wilson14415742012-04-17 15:31:33 +01003173 i915_gem_object_fence_lost(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00003174 }
Chris Wilson14415742012-04-17 15:31:33 +01003175 } else
Eric Anholta09ba7f2009-08-29 12:49:51 -07003176 return 0;
Eric Anholta09ba7f2009-08-29 12:49:51 -07003177
Chris Wilson14415742012-04-17 15:31:33 +01003178 i915_gem_object_update_fence(obj, reg, enable);
Chris Wilson14415742012-04-17 15:31:33 +01003179
Chris Wilson9ce079e2012-04-17 15:31:30 +01003180 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003181}
3182
Chris Wilson42d6ab42012-07-26 11:49:32 +01003183static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3184 struct drm_mm_node *gtt_space,
3185 unsigned long cache_level)
3186{
3187 struct drm_mm_node *other;
3188
3189 /* On non-LLC machines we have to be careful when putting differing
3190 * types of snoopable memory together to avoid the prefetcher
Damien Lespiau4239ca72012-12-03 16:26:16 +00003191 * crossing memory domains and dying.
Chris Wilson42d6ab42012-07-26 11:49:32 +01003192 */
3193 if (HAS_LLC(dev))
3194 return true;
3195
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003196 if (!drm_mm_node_allocated(gtt_space))
Chris Wilson42d6ab42012-07-26 11:49:32 +01003197 return true;
3198
3199 if (list_empty(&gtt_space->node_list))
3200 return true;
3201
3202 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3203 if (other->allocated && !other->hole_follows && other->color != cache_level)
3204 return false;
3205
3206 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3207 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3208 return false;
3209
3210 return true;
3211}
3212
3213static void i915_gem_verify_gtt(struct drm_device *dev)
3214{
3215#if WATCH_GTT
3216 struct drm_i915_private *dev_priv = dev->dev_private;
3217 struct drm_i915_gem_object *obj;
3218 int err = 0;
3219
Ben Widawsky35c20a62013-05-31 11:28:48 -07003220 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
Chris Wilson42d6ab42012-07-26 11:49:32 +01003221 if (obj->gtt_space == NULL) {
3222 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3223 err++;
3224 continue;
3225 }
3226
3227 if (obj->cache_level != obj->gtt_space->color) {
3228 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003229 i915_gem_obj_ggtt_offset(obj),
3230 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
Chris Wilson42d6ab42012-07-26 11:49:32 +01003231 obj->cache_level,
3232 obj->gtt_space->color);
3233 err++;
3234 continue;
3235 }
3236
3237 if (!i915_gem_valid_gtt_space(dev,
3238 obj->gtt_space,
3239 obj->cache_level)) {
3240 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003241 i915_gem_obj_ggtt_offset(obj),
3242 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
Chris Wilson42d6ab42012-07-26 11:49:32 +01003243 obj->cache_level);
3244 err++;
3245 continue;
3246 }
3247 }
3248
3249 WARN_ON(err);
3250#endif
3251}
3252
Jesse Barnesde151cf2008-11-12 10:03:55 -08003253/**
Eric Anholt673a3942008-07-30 12:06:12 -07003254 * Finds free space in the GTT aperture and binds the object there.
3255 */
Daniel Vetter262de142014-02-14 14:01:20 +01003256static struct i915_vma *
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003257i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3258 struct i915_address_space *vm,
3259 unsigned alignment,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003260 unsigned flags)
Eric Anholt673a3942008-07-30 12:06:12 -07003261{
Chris Wilson05394f32010-11-08 19:18:58 +00003262 struct drm_device *dev = obj->base.dev;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03003263 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter5e783302010-11-14 22:32:36 +01003264 u32 size, fence_size, fence_alignment, unfenced_alignment;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003265 size_t gtt_max =
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003266 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
Ben Widawsky2f633152013-07-17 12:19:03 -07003267 struct i915_vma *vma;
Chris Wilson07f73f62009-09-14 16:50:30 +01003268 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003269
Chris Wilsone28f8712011-07-18 13:11:49 -07003270 fence_size = i915_gem_get_gtt_size(dev,
3271 obj->base.size,
3272 obj->tiling_mode);
3273 fence_alignment = i915_gem_get_gtt_alignment(dev,
3274 obj->base.size,
Imre Deakd865110c2013-01-07 21:47:33 +02003275 obj->tiling_mode, true);
Chris Wilsone28f8712011-07-18 13:11:49 -07003276 unfenced_alignment =
Imre Deakd865110c2013-01-07 21:47:33 +02003277 i915_gem_get_gtt_alignment(dev,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003278 obj->base.size,
3279 obj->tiling_mode, false);
Chris Wilsona00b10c2010-09-24 21:15:47 +01003280
Eric Anholt673a3942008-07-30 12:06:12 -07003281 if (alignment == 0)
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003282 alignment = flags & PIN_MAPPABLE ? fence_alignment :
Daniel Vetter5e783302010-11-14 22:32:36 +01003283 unfenced_alignment;
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003284 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00003285 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
Daniel Vetter262de142014-02-14 14:01:20 +01003286 return ERR_PTR(-EINVAL);
Eric Anholt673a3942008-07-30 12:06:12 -07003287 }
3288
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003289 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
Chris Wilsona00b10c2010-09-24 21:15:47 +01003290
Chris Wilson654fc602010-05-27 13:18:21 +01003291 /* If the object is bigger than the entire aperture, reject it early
3292 * before evicting everything in a vain attempt to find space.
3293 */
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003294 if (obj->base.size > gtt_max) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00003295 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
Chris Wilsona36689c2013-05-21 16:58:49 +01003296 obj->base.size,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003297 flags & PIN_MAPPABLE ? "mappable" : "total",
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003298 gtt_max);
Daniel Vetter262de142014-02-14 14:01:20 +01003299 return ERR_PTR(-E2BIG);
Chris Wilson654fc602010-05-27 13:18:21 +01003300 }
3301
Chris Wilson37e680a2012-06-07 15:38:42 +01003302 ret = i915_gem_object_get_pages(obj);
Chris Wilson6c085a72012-08-20 11:40:46 +02003303 if (ret)
Daniel Vetter262de142014-02-14 14:01:20 +01003304 return ERR_PTR(ret);
Chris Wilson6c085a72012-08-20 11:40:46 +02003305
Chris Wilsonfbdda6f2012-11-20 10:45:16 +00003306 i915_gem_object_pin_pages(obj);
3307
Ben Widawskyaccfef22013-08-14 11:38:35 +02003308 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
Daniel Vetter262de142014-02-14 14:01:20 +01003309 if (IS_ERR(vma))
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003310 goto err_unpin;
Ben Widawsky2f633152013-07-17 12:19:03 -07003311
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003312search_free:
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003313 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003314 size, alignment,
David Herrmann31e5d7c2013-07-27 13:36:27 +02003315 obj->cache_level, 0, gtt_max,
Lauri Kasanen62347f92014-04-02 20:03:57 +03003316 DRM_MM_SEARCH_DEFAULT,
3317 DRM_MM_CREATE_DEFAULT);
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003318 if (ret) {
Ben Widawskyf6cd1f12013-07-31 17:00:11 -07003319 ret = i915_gem_evict_something(dev, vm, size, alignment,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003320 obj->cache_level, flags);
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003321 if (ret == 0)
3322 goto search_free;
Chris Wilson97311292009-09-21 00:22:34 +01003323
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003324 goto err_free_vma;
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003325 }
Ben Widawsky2f633152013-07-17 12:19:03 -07003326 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003327 obj->cache_level))) {
Ben Widawsky2f633152013-07-17 12:19:03 -07003328 ret = -EINVAL;
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003329 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003330 }
3331
Daniel Vetter74163902012-02-15 23:50:21 +01003332 ret = i915_gem_gtt_prepare_object(obj);
Ben Widawsky2f633152013-07-17 12:19:03 -07003333 if (ret)
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003334 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003335
Ben Widawsky35c20a62013-05-31 11:28:48 -07003336 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
Ben Widawskyca191b12013-07-31 17:00:14 -07003337 list_add_tail(&vma->mm_list, &vm->inactive_list);
Chris Wilsonbf1a1092010-08-07 11:01:20 +01003338
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003339 if (i915_is_ggtt(vm)) {
3340 bool mappable, fenceable;
Chris Wilsona00b10c2010-09-24 21:15:47 +01003341
Daniel Vetter49987092013-08-14 10:21:23 +02003342 fenceable = (vma->node.size == fence_size &&
3343 (vma->node.start & (fence_alignment - 1)) == 0);
Chris Wilsona00b10c2010-09-24 21:15:47 +01003344
Daniel Vetter49987092013-08-14 10:21:23 +02003345 mappable = (vma->node.start + obj->base.size <=
3346 dev_priv->gtt.mappable_end);
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003347
Ben Widawsky5cacaac2013-07-31 17:00:13 -07003348 obj->map_and_fenceable = mappable && fenceable;
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003349 }
Daniel Vetter75e9e912010-11-04 17:11:09 +01003350
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003351 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003352
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003353 trace_i915_vma_bind(vma, flags);
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003354 vma->bind_vma(vma, obj->cache_level,
3355 flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
3356
Chris Wilson42d6ab42012-07-26 11:49:32 +01003357 i915_gem_verify_gtt(dev);
Daniel Vetter262de142014-02-14 14:01:20 +01003358 return vma;
Ben Widawsky2f633152013-07-17 12:19:03 -07003359
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003360err_remove_node:
Dan Carpenter6286ef92013-07-19 08:46:27 +03003361 drm_mm_remove_node(&vma->node);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003362err_free_vma:
Ben Widawsky2f633152013-07-17 12:19:03 -07003363 i915_gem_vma_destroy(vma);
Daniel Vetter262de142014-02-14 14:01:20 +01003364 vma = ERR_PTR(ret);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003365err_unpin:
Ben Widawsky2f633152013-07-17 12:19:03 -07003366 i915_gem_object_unpin_pages(obj);
Daniel Vetter262de142014-02-14 14:01:20 +01003367 return vma;
Eric Anholt673a3942008-07-30 12:06:12 -07003368}
3369
Chris Wilson000433b2013-08-08 14:41:09 +01003370bool
Chris Wilson2c225692013-08-09 12:26:45 +01003371i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3372 bool force)
Eric Anholt673a3942008-07-30 12:06:12 -07003373{
Eric Anholt673a3942008-07-30 12:06:12 -07003374 /* If we don't have a page list set up, then we're not pinned
3375 * to GPU, and we can ignore the cache flush because it'll happen
3376 * again at bind time.
3377 */
Chris Wilson05394f32010-11-08 19:18:58 +00003378 if (obj->pages == NULL)
Chris Wilson000433b2013-08-08 14:41:09 +01003379 return false;
Eric Anholt673a3942008-07-30 12:06:12 -07003380
Imre Deak769ce462013-02-13 21:56:05 +02003381 /*
3382 * Stolen memory is always coherent with the GPU as it is explicitly
3383 * marked as wc by the system, or the system is cache-coherent.
3384 */
3385 if (obj->stolen)
Chris Wilson000433b2013-08-08 14:41:09 +01003386 return false;
Imre Deak769ce462013-02-13 21:56:05 +02003387
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003388 /* If the GPU is snooping the contents of the CPU cache,
3389 * we do not need to manually clear the CPU cache lines. However,
3390 * the caches are only snooped when the render cache is
3391 * flushed/invalidated. As we always have to emit invalidations
3392 * and flushes when moving into and out of the RENDER domain, correct
3393 * snooping behaviour occurs naturally as the result of our domain
3394 * tracking.
3395 */
Chris Wilson2c225692013-08-09 12:26:45 +01003396 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
Chris Wilson000433b2013-08-08 14:41:09 +01003397 return false;
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003398
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003399 trace_i915_gem_object_clflush(obj);
Chris Wilson9da3da62012-06-01 15:20:22 +01003400 drm_clflush_sg(obj->pages);
Chris Wilson000433b2013-08-08 14:41:09 +01003401
3402 return true;
Eric Anholte47c68e2008-11-14 13:35:19 -08003403}
3404
3405/** Flushes the GTT write domain for the object if it's dirty. */
3406static void
Chris Wilson05394f32010-11-08 19:18:58 +00003407i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08003408{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003409 uint32_t old_write_domain;
3410
Chris Wilson05394f32010-11-08 19:18:58 +00003411 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08003412 return;
3413
Chris Wilson63256ec2011-01-04 18:42:07 +00003414 /* No actual flushing is required for the GTT write domain. Writes
Eric Anholte47c68e2008-11-14 13:35:19 -08003415 * to it immediately go to main memory as far as we know, so there's
3416 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00003417 *
3418 * However, we do have to enforce the order so that all writes through
3419 * the GTT land before any writes to the device, such as updates to
3420 * the GATT itself.
Eric Anholte47c68e2008-11-14 13:35:19 -08003421 */
Chris Wilson63256ec2011-01-04 18:42:07 +00003422 wmb();
3423
Chris Wilson05394f32010-11-08 19:18:58 +00003424 old_write_domain = obj->base.write_domain;
3425 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003426
3427 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003428 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003429 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08003430}
3431
3432/** Flushes the CPU write domain for the object if it's dirty. */
3433static void
Chris Wilson2c225692013-08-09 12:26:45 +01003434i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3435 bool force)
Eric Anholte47c68e2008-11-14 13:35:19 -08003436{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003437 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08003438
Chris Wilson05394f32010-11-08 19:18:58 +00003439 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08003440 return;
3441
Chris Wilson000433b2013-08-08 14:41:09 +01003442 if (i915_gem_clflush_object(obj, force))
3443 i915_gem_chipset_flush(obj->base.dev);
3444
Chris Wilson05394f32010-11-08 19:18:58 +00003445 old_write_domain = obj->base.write_domain;
3446 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003447
3448 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003449 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003450 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08003451}
3452
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003453/**
3454 * Moves a single object to the GTT read, and possibly write domain.
3455 *
3456 * This function returns when the move is complete, including waiting on
3457 * flushes to occur.
3458 */
Jesse Barnes79e53942008-11-07 14:24:08 -08003459int
Chris Wilson20217462010-11-23 15:26:33 +00003460i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003461{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03003462 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003463 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003464 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003465
Eric Anholt02354392008-11-26 13:58:13 -08003466 /* Not valid to be called on unbound objects. */
Ben Widawsky98438772013-07-31 17:00:12 -07003467 if (!i915_gem_obj_bound_any(obj))
Eric Anholt02354392008-11-26 13:58:13 -08003468 return -EINVAL;
3469
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003470 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3471 return 0;
3472
Chris Wilson0201f1e2012-07-20 12:41:01 +01003473 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003474 if (ret)
3475 return ret;
3476
Chris Wilsonc8725f32014-03-17 12:21:55 +00003477 i915_gem_object_retire(obj);
Chris Wilson2c225692013-08-09 12:26:45 +01003478 i915_gem_object_flush_cpu_write_domain(obj, false);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003479
Chris Wilsond0a57782012-10-09 19:24:37 +01003480 /* Serialise direct access to this object with the barriers for
3481 * coherent writes from the GPU, by effectively invalidating the
3482 * GTT domain upon first access.
3483 */
3484 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3485 mb();
3486
Chris Wilson05394f32010-11-08 19:18:58 +00003487 old_write_domain = obj->base.write_domain;
3488 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003489
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003490 /* It should now be out of any other write domains, and we can update
3491 * the domain values for our changes.
3492 */
Chris Wilson05394f32010-11-08 19:18:58 +00003493 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3494 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08003495 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003496 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3497 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3498 obj->dirty = 1;
Eric Anholte47c68e2008-11-14 13:35:19 -08003499 }
3500
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003501 trace_i915_gem_object_change_domain(obj,
3502 old_read_domains,
3503 old_write_domain);
3504
Chris Wilson8325a092012-04-24 15:52:35 +01003505 /* And bump the LRU for this access */
Ben Widawskyca191b12013-07-31 17:00:14 -07003506 if (i915_gem_object_is_inactive(obj)) {
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07003507 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
Ben Widawskyca191b12013-07-31 17:00:14 -07003508 if (vma)
3509 list_move_tail(&vma->mm_list,
3510 &dev_priv->gtt.base.inactive_list);
3511
3512 }
Chris Wilson8325a092012-04-24 15:52:35 +01003513
Eric Anholte47c68e2008-11-14 13:35:19 -08003514 return 0;
3515}
3516
Chris Wilsone4ffd172011-04-04 09:44:39 +01003517int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3518 enum i915_cache_level cache_level)
3519{
Daniel Vetter7bddb012012-02-09 17:15:47 +01003520 struct drm_device *dev = obj->base.dev;
Chris Wilsondf6f7832014-03-21 07:40:56 +00003521 struct i915_vma *vma, *next;
Chris Wilsone4ffd172011-04-04 09:44:39 +01003522 int ret;
3523
3524 if (obj->cache_level == cache_level)
3525 return 0;
3526
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003527 if (i915_gem_obj_is_pinned(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003528 DRM_DEBUG("can not change the cache level of pinned objects\n");
3529 return -EBUSY;
3530 }
3531
Chris Wilsondf6f7832014-03-21 07:40:56 +00003532 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003533 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003534 ret = i915_vma_unbind(vma);
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003535 if (ret)
3536 return ret;
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003537 }
Chris Wilson42d6ab42012-07-26 11:49:32 +01003538 }
3539
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003540 if (i915_gem_obj_bound_any(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003541 ret = i915_gem_object_finish_gpu(obj);
3542 if (ret)
3543 return ret;
3544
3545 i915_gem_object_finish_gtt(obj);
3546
3547 /* Before SandyBridge, you could not use tiling or fence
3548 * registers with snooped memory, so relinquish any fences
3549 * currently pointing to our region in the aperture.
3550 */
Chris Wilson42d6ab42012-07-26 11:49:32 +01003551 if (INTEL_INFO(dev)->gen < 6) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003552 ret = i915_gem_object_put_fence(obj);
3553 if (ret)
3554 return ret;
3555 }
3556
Ben Widawsky6f65e292013-12-06 14:10:56 -08003557 list_for_each_entry(vma, &obj->vma_list, vma_link)
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003558 if (drm_mm_node_allocated(&vma->node))
3559 vma->bind_vma(vma, cache_level,
3560 obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003561 }
3562
Chris Wilson2c225692013-08-09 12:26:45 +01003563 list_for_each_entry(vma, &obj->vma_list, vma_link)
3564 vma->node.color = cache_level;
3565 obj->cache_level = cache_level;
3566
3567 if (cpu_write_needs_clflush(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003568 u32 old_read_domains, old_write_domain;
3569
3570 /* If we're coming from LLC cached, then we haven't
3571 * actually been tracking whether the data is in the
3572 * CPU cache or not, since we only allow one bit set
3573 * in obj->write_domain and have been skipping the clflushes.
3574 * Just set it to the CPU cache for now.
3575 */
Chris Wilsonc8725f32014-03-17 12:21:55 +00003576 i915_gem_object_retire(obj);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003577 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003578
3579 old_read_domains = obj->base.read_domains;
3580 old_write_domain = obj->base.write_domain;
3581
3582 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3583 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3584
3585 trace_i915_gem_object_change_domain(obj,
3586 old_read_domains,
3587 old_write_domain);
3588 }
3589
Chris Wilson42d6ab42012-07-26 11:49:32 +01003590 i915_gem_verify_gtt(dev);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003591 return 0;
3592}
3593
Ben Widawsky199adf42012-09-21 17:01:20 -07003594int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3595 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003596{
Ben Widawsky199adf42012-09-21 17:01:20 -07003597 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003598 struct drm_i915_gem_object *obj;
3599 int ret;
3600
3601 ret = i915_mutex_lock_interruptible(dev);
3602 if (ret)
3603 return ret;
3604
3605 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3606 if (&obj->base == NULL) {
3607 ret = -ENOENT;
3608 goto unlock;
3609 }
3610
Chris Wilson651d7942013-08-08 14:41:10 +01003611 switch (obj->cache_level) {
3612 case I915_CACHE_LLC:
3613 case I915_CACHE_L3_LLC:
3614 args->caching = I915_CACHING_CACHED;
3615 break;
3616
Chris Wilson4257d3b2013-08-08 14:41:11 +01003617 case I915_CACHE_WT:
3618 args->caching = I915_CACHING_DISPLAY;
3619 break;
3620
Chris Wilson651d7942013-08-08 14:41:10 +01003621 default:
3622 args->caching = I915_CACHING_NONE;
3623 break;
3624 }
Chris Wilsone6994ae2012-07-10 10:27:08 +01003625
3626 drm_gem_object_unreference(&obj->base);
3627unlock:
3628 mutex_unlock(&dev->struct_mutex);
3629 return ret;
3630}
3631
Ben Widawsky199adf42012-09-21 17:01:20 -07003632int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3633 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003634{
Ben Widawsky199adf42012-09-21 17:01:20 -07003635 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003636 struct drm_i915_gem_object *obj;
3637 enum i915_cache_level level;
3638 int ret;
3639
Ben Widawsky199adf42012-09-21 17:01:20 -07003640 switch (args->caching) {
3641 case I915_CACHING_NONE:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003642 level = I915_CACHE_NONE;
3643 break;
Ben Widawsky199adf42012-09-21 17:01:20 -07003644 case I915_CACHING_CACHED:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003645 level = I915_CACHE_LLC;
3646 break;
Chris Wilson4257d3b2013-08-08 14:41:11 +01003647 case I915_CACHING_DISPLAY:
3648 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3649 break;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003650 default:
3651 return -EINVAL;
3652 }
3653
Ben Widawsky3bc29132012-09-26 16:15:20 -07003654 ret = i915_mutex_lock_interruptible(dev);
3655 if (ret)
3656 return ret;
3657
Chris Wilsone6994ae2012-07-10 10:27:08 +01003658 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3659 if (&obj->base == NULL) {
3660 ret = -ENOENT;
3661 goto unlock;
3662 }
3663
3664 ret = i915_gem_object_set_cache_level(obj, level);
3665
3666 drm_gem_object_unreference(&obj->base);
3667unlock:
3668 mutex_unlock(&dev->struct_mutex);
3669 return ret;
3670}
3671
Chris Wilsoncc98b412013-08-09 12:25:09 +01003672static bool is_pin_display(struct drm_i915_gem_object *obj)
3673{
Oscar Mateo19656432014-05-16 14:20:43 +01003674 struct i915_vma *vma;
3675
3676 if (list_empty(&obj->vma_list))
3677 return false;
3678
3679 vma = i915_gem_obj_to_ggtt(obj);
3680 if (!vma)
3681 return false;
3682
Chris Wilsoncc98b412013-08-09 12:25:09 +01003683 /* There are 3 sources that pin objects:
3684 * 1. The display engine (scanouts, sprites, cursors);
3685 * 2. Reservations for execbuffer;
3686 * 3. The user.
3687 *
3688 * We can ignore reservations as we hold the struct_mutex and
3689 * are only called outside of the reservation path. The user
3690 * can only increment pin_count once, and so if after
3691 * subtracting the potential reference by the user, any pin_count
3692 * remains, it must be due to another use by the display engine.
3693 */
Oscar Mateo19656432014-05-16 14:20:43 +01003694 return vma->pin_count - !!obj->user_pin_count;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003695}
3696
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003697/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003698 * Prepare buffer for display plane (scanout, cursors, etc).
3699 * Can be called from an uninterruptible phase (modesetting) and allows
3700 * any flushes to be pipelined (for pageflips).
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003701 */
3702int
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003703i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3704 u32 alignment,
Oscar Mateoa4872ba2014-05-22 14:13:33 +01003705 struct intel_engine_cs *pipelined)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003706{
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003707 u32 old_read_domains, old_write_domain;
Oscar Mateo19656432014-05-16 14:20:43 +01003708 bool was_pin_display;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003709 int ret;
3710
Chris Wilson0be73282010-12-06 14:36:27 +00003711 if (pipelined != obj->ring) {
Ben Widawsky2911a352012-04-05 14:47:36 -07003712 ret = i915_gem_object_sync(obj, pipelined);
3713 if (ret)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003714 return ret;
3715 }
3716
Chris Wilsoncc98b412013-08-09 12:25:09 +01003717 /* Mark the pin_display early so that we account for the
3718 * display coherency whilst setting up the cache domains.
3719 */
Oscar Mateo19656432014-05-16 14:20:43 +01003720 was_pin_display = obj->pin_display;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003721 obj->pin_display = true;
3722
Eric Anholta7ef0642011-03-29 16:59:54 -07003723 /* The display engine is not coherent with the LLC cache on gen6. As
3724 * a result, we make sure that the pinning that is about to occur is
3725 * done with uncached PTEs. This is lowest common denominator for all
3726 * chipsets.
3727 *
3728 * However for gen6+, we could do better by using the GFDT bit instead
3729 * of uncaching, which would allow us to flush all the LLC-cached data
3730 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3731 */
Chris Wilson651d7942013-08-08 14:41:10 +01003732 ret = i915_gem_object_set_cache_level(obj,
3733 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
Eric Anholta7ef0642011-03-29 16:59:54 -07003734 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003735 goto err_unpin_display;
Eric Anholta7ef0642011-03-29 16:59:54 -07003736
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003737 /* As the user may map the buffer once pinned in the display plane
3738 * (e.g. libkms for the bootup splash), we have to ensure that we
3739 * always use map_and_fenceable for all scanout buffers.
3740 */
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003741 ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003742 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003743 goto err_unpin_display;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003744
Chris Wilson2c225692013-08-09 12:26:45 +01003745 i915_gem_object_flush_cpu_write_domain(obj, true);
Chris Wilsonb118c1e2010-05-27 13:18:14 +01003746
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003747 old_write_domain = obj->base.write_domain;
Chris Wilson05394f32010-11-08 19:18:58 +00003748 old_read_domains = obj->base.read_domains;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003749
3750 /* It should now be out of any other write domains, and we can update
3751 * the domain values for our changes.
3752 */
Chris Wilsone5f1d962012-07-20 12:41:00 +01003753 obj->base.write_domain = 0;
Chris Wilson05394f32010-11-08 19:18:58 +00003754 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003755
3756 trace_i915_gem_object_change_domain(obj,
3757 old_read_domains,
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003758 old_write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003759
3760 return 0;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003761
3762err_unpin_display:
Oscar Mateo19656432014-05-16 14:20:43 +01003763 WARN_ON(was_pin_display != is_pin_display(obj));
3764 obj->pin_display = was_pin_display;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003765 return ret;
3766}
3767
3768void
3769i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3770{
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003771 i915_gem_object_ggtt_unpin(obj);
Chris Wilsoncc98b412013-08-09 12:25:09 +01003772 obj->pin_display = is_pin_display(obj);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003773}
3774
Chris Wilson85345512010-11-13 09:49:11 +00003775int
Chris Wilsona8198ee2011-04-13 22:04:09 +01003776i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
Chris Wilson85345512010-11-13 09:49:11 +00003777{
Chris Wilson88241782011-01-07 17:09:48 +00003778 int ret;
3779
Chris Wilsona8198ee2011-04-13 22:04:09 +01003780 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson85345512010-11-13 09:49:11 +00003781 return 0;
3782
Chris Wilson0201f1e2012-07-20 12:41:01 +01003783 ret = i915_gem_object_wait_rendering(obj, false);
Chris Wilsonc501ae72011-12-14 13:57:23 +01003784 if (ret)
3785 return ret;
3786
Chris Wilsona8198ee2011-04-13 22:04:09 +01003787 /* Ensure that we invalidate the GPU's caches and TLBs. */
3788 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilsonc501ae72011-12-14 13:57:23 +01003789 return 0;
Chris Wilson85345512010-11-13 09:49:11 +00003790}
3791
Eric Anholte47c68e2008-11-14 13:35:19 -08003792/**
3793 * Moves a single object to the CPU read, and possibly write domain.
3794 *
3795 * This function returns when the move is complete, including waiting on
3796 * flushes to occur.
3797 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02003798int
Chris Wilson919926a2010-11-12 13:42:53 +00003799i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08003800{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003801 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003802 int ret;
3803
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003804 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3805 return 0;
3806
Chris Wilson0201f1e2012-07-20 12:41:01 +01003807 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003808 if (ret)
3809 return ret;
3810
Chris Wilsonc8725f32014-03-17 12:21:55 +00003811 i915_gem_object_retire(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003812 i915_gem_object_flush_gtt_write_domain(obj);
3813
Chris Wilson05394f32010-11-08 19:18:58 +00003814 old_write_domain = obj->base.write_domain;
3815 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003816
Eric Anholte47c68e2008-11-14 13:35:19 -08003817 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00003818 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Chris Wilson2c225692013-08-09 12:26:45 +01003819 i915_gem_clflush_object(obj, false);
Eric Anholte47c68e2008-11-14 13:35:19 -08003820
Chris Wilson05394f32010-11-08 19:18:58 +00003821 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003822 }
3823
3824 /* It should now be out of any other write domains, and we can update
3825 * the domain values for our changes.
3826 */
Chris Wilson05394f32010-11-08 19:18:58 +00003827 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003828
3829 /* If we're writing through the CPU, then the GPU read domains will
3830 * need to be invalidated at next use.
3831 */
3832 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003833 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3834 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003835 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003836
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003837 trace_i915_gem_object_change_domain(obj,
3838 old_read_domains,
3839 old_write_domain);
3840
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003841 return 0;
3842}
3843
Eric Anholt673a3942008-07-30 12:06:12 -07003844/* Throttle our rendering by waiting until the ring has completed our requests
3845 * emitted over 20 msec ago.
3846 *
Eric Anholtb9624422009-06-03 07:27:35 +00003847 * Note that if we were to use the current jiffies each time around the loop,
3848 * we wouldn't escape the function with any frames outstanding if the time to
3849 * render a frame was over 20ms.
3850 *
Eric Anholt673a3942008-07-30 12:06:12 -07003851 * This should get us reasonable parallelism between CPU and GPU but also
3852 * relatively low latency when blocking on a particular request to finish.
3853 */
3854static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003855i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003856{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003857 struct drm_i915_private *dev_priv = dev->dev_private;
3858 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00003859 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003860 struct drm_i915_gem_request *request;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01003861 struct intel_engine_cs *ring = NULL;
Daniel Vetterf69061b2012-12-06 09:01:42 +01003862 unsigned reset_counter;
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003863 u32 seqno = 0;
3864 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003865
Daniel Vetter308887a2012-11-14 17:14:06 +01003866 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3867 if (ret)
3868 return ret;
3869
3870 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3871 if (ret)
3872 return ret;
Chris Wilsone110e8d2011-01-26 15:39:14 +00003873
Chris Wilson1c255952010-09-26 11:03:27 +01003874 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003875 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003876 if (time_after_eq(request->emitted_jiffies, recent_enough))
3877 break;
3878
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003879 ring = request->ring;
3880 seqno = request->seqno;
Eric Anholtb9624422009-06-03 07:27:35 +00003881 }
Daniel Vetterf69061b2012-12-06 09:01:42 +01003882 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilson1c255952010-09-26 11:03:27 +01003883 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003884
3885 if (seqno == 0)
3886 return 0;
3887
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003888 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003889 if (ret == 0)
3890 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Eric Anholtb9624422009-06-03 07:27:35 +00003891
Eric Anholt673a3942008-07-30 12:06:12 -07003892 return ret;
3893}
3894
Eric Anholt673a3942008-07-30 12:06:12 -07003895int
Chris Wilson05394f32010-11-08 19:18:58 +00003896i915_gem_object_pin(struct drm_i915_gem_object *obj,
Ben Widawskyc37e2202013-07-31 16:59:58 -07003897 struct i915_address_space *vm,
Chris Wilson05394f32010-11-08 19:18:58 +00003898 uint32_t alignment,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003899 unsigned flags)
Eric Anholt673a3942008-07-30 12:06:12 -07003900{
Ben Widawsky6e7186a2014-05-06 22:21:36 -07003901 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003902 struct i915_vma *vma;
Eric Anholt673a3942008-07-30 12:06:12 -07003903 int ret;
3904
Ben Widawsky6e7186a2014-05-06 22:21:36 -07003905 if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
3906 return -ENODEV;
3907
Daniel Vetterbf3d1492014-02-14 14:01:12 +01003908 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003909 return -EINVAL;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003910
3911 vma = i915_gem_obj_to_vma(obj, vm);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003912 if (vma) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003913 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3914 return -EBUSY;
3915
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003916 if ((alignment &&
3917 vma->node.start & (alignment - 1)) ||
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003918 (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003919 WARN(vma->pin_count,
Chris Wilsonae7d49d2010-08-04 12:37:41 +01003920 "bo is already pinned with incorrect alignment:"
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003921 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
Daniel Vetter75e9e912010-11-04 17:11:09 +01003922 " obj->map_and_fenceable=%d\n",
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003923 i915_gem_obj_offset(obj, vm), alignment,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003924 flags & PIN_MAPPABLE,
Chris Wilson05394f32010-11-08 19:18:58 +00003925 obj->map_and_fenceable);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003926 ret = i915_vma_unbind(vma);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003927 if (ret)
3928 return ret;
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003929
3930 vma = NULL;
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003931 }
3932 }
3933
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003934 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
Daniel Vetter262de142014-02-14 14:01:20 +01003935 vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
3936 if (IS_ERR(vma))
3937 return PTR_ERR(vma);
Chris Wilson22c344e2009-02-11 14:26:45 +00003938 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05003939
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003940 if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
3941 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
Daniel Vetter74898d72012-02-15 23:50:22 +01003942
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003943 vma->pin_count++;
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003944 if (flags & PIN_MAPPABLE)
3945 obj->pin_mappable |= true;
Eric Anholt673a3942008-07-30 12:06:12 -07003946
3947 return 0;
3948}
3949
3950void
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003951i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003952{
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003953 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003954
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003955 BUG_ON(!vma);
3956 BUG_ON(vma->pin_count == 0);
3957 BUG_ON(!i915_gem_obj_ggtt_bound(obj));
3958
3959 if (--vma->pin_count == 0)
Chris Wilson6299f992010-11-24 12:23:44 +00003960 obj->pin_mappable = false;
Eric Anholt673a3942008-07-30 12:06:12 -07003961}
3962
Daniel Vetterd8ffa602014-05-13 12:11:26 +02003963bool
3964i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
3965{
3966 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3967 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3968 struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
3969
3970 WARN_ON(!ggtt_vma ||
3971 dev_priv->fence_regs[obj->fence_reg].pin_count >
3972 ggtt_vma->pin_count);
3973 dev_priv->fence_regs[obj->fence_reg].pin_count++;
3974 return true;
3975 } else
3976 return false;
3977}
3978
3979void
3980i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
3981{
3982 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3983 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3984 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
3985 dev_priv->fence_regs[obj->fence_reg].pin_count--;
3986 }
3987}
3988
Eric Anholt673a3942008-07-30 12:06:12 -07003989int
3990i915_gem_pin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003991 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003992{
3993 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003994 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07003995 int ret;
3996
Daniel Vetter02f6bcc2013-12-18 16:30:22 +01003997 if (INTEL_INFO(dev)->gen >= 6)
3998 return -ENODEV;
3999
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004000 ret = i915_mutex_lock_interruptible(dev);
4001 if (ret)
4002 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004003
Chris Wilson05394f32010-11-08 19:18:58 +00004004 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004005 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004006 ret = -ENOENT;
4007 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07004008 }
Eric Anholt673a3942008-07-30 12:06:12 -07004009
Chris Wilson05394f32010-11-08 19:18:58 +00004010 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00004011 DRM_DEBUG("Attempting to pin a purgeable buffer\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00004012 ret = -EFAULT;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004013 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004014 }
4015
Chris Wilson05394f32010-11-08 19:18:58 +00004016 if (obj->pin_filp != NULL && obj->pin_filp != file) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00004017 DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
Jesse Barnes79e53942008-11-07 14:24:08 -08004018 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004019 ret = -EINVAL;
4020 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08004021 }
4022
Daniel Vetteraa5f8022013-10-10 14:46:37 +02004023 if (obj->user_pin_count == ULONG_MAX) {
4024 ret = -EBUSY;
4025 goto out;
4026 }
4027
Chris Wilson93be8782013-01-02 10:31:22 +00004028 if (obj->user_pin_count == 0) {
Daniel Vetter1ec9e262014-02-14 14:01:11 +01004029 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004030 if (ret)
4031 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07004032 }
4033
Chris Wilson93be8782013-01-02 10:31:22 +00004034 obj->user_pin_count++;
4035 obj->pin_filp = file;
4036
Ben Widawskyf343c5f2013-07-05 14:41:04 -07004037 args->offset = i915_gem_obj_ggtt_offset(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004038out:
Chris Wilson05394f32010-11-08 19:18:58 +00004039 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004040unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07004041 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004042 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004043}
4044
4045int
4046i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00004047 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07004048{
4049 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004050 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01004051 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004052
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004053 ret = i915_mutex_lock_interruptible(dev);
4054 if (ret)
4055 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004056
Chris Wilson05394f32010-11-08 19:18:58 +00004057 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004058 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004059 ret = -ENOENT;
4060 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07004061 }
Chris Wilson76c1dec2010-09-25 11:22:51 +01004062
Chris Wilson05394f32010-11-08 19:18:58 +00004063 if (obj->pin_filp != file) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00004064 DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
Jesse Barnes79e53942008-11-07 14:24:08 -08004065 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004066 ret = -EINVAL;
4067 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08004068 }
Chris Wilson05394f32010-11-08 19:18:58 +00004069 obj->user_pin_count--;
4070 if (obj->user_pin_count == 0) {
4071 obj->pin_filp = NULL;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004072 i915_gem_object_ggtt_unpin(obj);
Jesse Barnes79e53942008-11-07 14:24:08 -08004073 }
Eric Anholt673a3942008-07-30 12:06:12 -07004074
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004075out:
Chris Wilson05394f32010-11-08 19:18:58 +00004076 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004077unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07004078 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004079 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004080}
4081
4082int
4083i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00004084 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07004085{
4086 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004087 struct drm_i915_gem_object *obj;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01004088 int ret;
4089
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004090 ret = i915_mutex_lock_interruptible(dev);
4091 if (ret)
4092 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004093
Chris Wilson05394f32010-11-08 19:18:58 +00004094 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004095 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004096 ret = -ENOENT;
4097 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07004098 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08004099
Chris Wilson0be555b2010-08-04 15:36:30 +01004100 /* Count all active objects as busy, even if they are currently not used
4101 * by the gpu. Users of this interface expect objects to eventually
4102 * become non-busy without any further actions, therefore emit any
4103 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08004104 */
Daniel Vetter30dfebf2012-06-01 15:21:23 +02004105 ret = i915_gem_object_flush_active(obj);
4106
Chris Wilson05394f32010-11-08 19:18:58 +00004107 args->busy = obj->active;
Chris Wilsone9808ed2012-07-04 12:25:08 +01004108 if (obj->ring) {
4109 BUILD_BUG_ON(I915_NUM_RINGS > 16);
4110 args->busy |= intel_ring_flag(obj->ring) << 16;
4111 }
Eric Anholt673a3942008-07-30 12:06:12 -07004112
Chris Wilson05394f32010-11-08 19:18:58 +00004113 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004114unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07004115 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004116 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004117}
4118
4119int
4120i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4121 struct drm_file *file_priv)
4122{
Akshay Joshi0206e352011-08-16 15:34:10 -04004123 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07004124}
4125
Chris Wilson3ef94da2009-09-14 16:50:29 +01004126int
4127i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4128 struct drm_file *file_priv)
4129{
4130 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004131 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01004132 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004133
4134 switch (args->madv) {
4135 case I915_MADV_DONTNEED:
4136 case I915_MADV_WILLNEED:
4137 break;
4138 default:
4139 return -EINVAL;
4140 }
4141
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004142 ret = i915_mutex_lock_interruptible(dev);
4143 if (ret)
4144 return ret;
4145
Chris Wilson05394f32010-11-08 19:18:58 +00004146 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004147 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004148 ret = -ENOENT;
4149 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004150 }
Chris Wilson3ef94da2009-09-14 16:50:29 +01004151
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004152 if (i915_gem_obj_is_pinned(obj)) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004153 ret = -EINVAL;
4154 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004155 }
4156
Chris Wilson05394f32010-11-08 19:18:58 +00004157 if (obj->madv != __I915_MADV_PURGED)
4158 obj->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004159
Chris Wilson6c085a72012-08-20 11:40:46 +02004160 /* if the object is no longer attached, discard its backing storage */
4161 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
Chris Wilson2d7ef392009-09-20 23:13:10 +01004162 i915_gem_object_truncate(obj);
4163
Chris Wilson05394f32010-11-08 19:18:58 +00004164 args->retained = obj->madv != __I915_MADV_PURGED;
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004165
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004166out:
Chris Wilson05394f32010-11-08 19:18:58 +00004167 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004168unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01004169 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004170 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004171}
4172
Chris Wilson37e680a2012-06-07 15:38:42 +01004173void i915_gem_object_init(struct drm_i915_gem_object *obj,
4174 const struct drm_i915_gem_object_ops *ops)
Chris Wilson0327d6b2012-08-11 15:41:06 +01004175{
Ben Widawsky35c20a62013-05-31 11:28:48 -07004176 INIT_LIST_HEAD(&obj->global_list);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004177 INIT_LIST_HEAD(&obj->ring_list);
Ben Widawskyb25cb2f2013-08-14 11:38:33 +02004178 INIT_LIST_HEAD(&obj->obj_exec_link);
Ben Widawsky2f633152013-07-17 12:19:03 -07004179 INIT_LIST_HEAD(&obj->vma_list);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004180
Chris Wilson37e680a2012-06-07 15:38:42 +01004181 obj->ops = ops;
4182
Chris Wilson0327d6b2012-08-11 15:41:06 +01004183 obj->fence_reg = I915_FENCE_REG_NONE;
4184 obj->madv = I915_MADV_WILLNEED;
4185 /* Avoid an unnecessary call to unbind on the first bind. */
4186 obj->map_and_fenceable = true;
4187
4188 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4189}
4190
Chris Wilson37e680a2012-06-07 15:38:42 +01004191static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4192 .get_pages = i915_gem_object_get_pages_gtt,
4193 .put_pages = i915_gem_object_put_pages_gtt,
4194};
4195
Chris Wilson05394f32010-11-08 19:18:58 +00004196struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4197 size_t size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00004198{
Daniel Vetterc397b902010-04-09 19:05:07 +00004199 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07004200 struct address_space *mapping;
Daniel Vetter1a240d42012-11-29 22:18:51 +01004201 gfp_t mask;
Daniel Vetterc397b902010-04-09 19:05:07 +00004202
Chris Wilson42dcedd2012-11-15 11:32:30 +00004203 obj = i915_gem_object_alloc(dev);
Daniel Vetterc397b902010-04-09 19:05:07 +00004204 if (obj == NULL)
4205 return NULL;
4206
4207 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
Chris Wilson42dcedd2012-11-15 11:32:30 +00004208 i915_gem_object_free(obj);
Daniel Vetterc397b902010-04-09 19:05:07 +00004209 return NULL;
4210 }
4211
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004212 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4213 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4214 /* 965gm cannot relocate objects above 4GiB. */
4215 mask &= ~__GFP_HIGHMEM;
4216 mask |= __GFP_DMA32;
4217 }
4218
Al Viro496ad9a2013-01-23 17:07:38 -05004219 mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004220 mapping_set_gfp_mask(mapping, mask);
Hugh Dickins5949eac2011-06-27 16:18:18 -07004221
Chris Wilson37e680a2012-06-07 15:38:42 +01004222 i915_gem_object_init(obj, &i915_gem_object_ops);
Chris Wilson73aa8082010-09-30 11:46:12 +01004223
Daniel Vetterc397b902010-04-09 19:05:07 +00004224 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4225 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4226
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02004227 if (HAS_LLC(dev)) {
4228 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07004229 * cache) for about a 10% performance improvement
4230 * compared to uncached. Graphics requests other than
4231 * display scanout are coherent with the CPU in
4232 * accessing this cache. This means in this mode we
4233 * don't need to clflush on the CPU side, and on the
4234 * GPU side we only need to flush internal caches to
4235 * get data visible to the CPU.
4236 *
4237 * However, we maintain the display planes as UC, and so
4238 * need to rebind when first used as such.
4239 */
4240 obj->cache_level = I915_CACHE_LLC;
4241 } else
4242 obj->cache_level = I915_CACHE_NONE;
4243
Daniel Vetterd861e332013-07-24 23:25:03 +02004244 trace_i915_gem_object_create(obj);
4245
Chris Wilson05394f32010-11-08 19:18:58 +00004246 return obj;
Daniel Vetterac52bc52010-04-09 19:05:06 +00004247}
4248
Chris Wilson340fbd82014-05-22 09:16:52 +01004249static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4250{
4251 /* If we are the last user of the backing storage (be it shmemfs
4252 * pages or stolen etc), we know that the pages are going to be
4253 * immediately released. In this case, we can then skip copying
4254 * back the contents from the GPU.
4255 */
4256
4257 if (obj->madv != I915_MADV_WILLNEED)
4258 return false;
4259
4260 if (obj->base.filp == NULL)
4261 return true;
4262
4263 /* At first glance, this looks racy, but then again so would be
4264 * userspace racing mmap against close. However, the first external
4265 * reference to the filp can only be obtained through the
4266 * i915_gem_mmap_ioctl() which safeguards us against the user
4267 * acquiring such a reference whilst we are in the middle of
4268 * freeing the object.
4269 */
4270 return atomic_long_read(&obj->base.filp->f_count) == 1;
4271}
4272
Chris Wilson1488fc02012-04-24 15:47:31 +01004273void i915_gem_free_object(struct drm_gem_object *gem_obj)
Chris Wilsonbe726152010-07-23 23:18:50 +01004274{
Chris Wilson1488fc02012-04-24 15:47:31 +01004275 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
Chris Wilson05394f32010-11-08 19:18:58 +00004276 struct drm_device *dev = obj->base.dev;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004277 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004278 struct i915_vma *vma, *next;
Chris Wilsonbe726152010-07-23 23:18:50 +01004279
Paulo Zanonif65c9162013-11-27 18:20:34 -02004280 intel_runtime_pm_get(dev_priv);
4281
Chris Wilson26e12f82011-03-20 11:20:19 +00004282 trace_i915_gem_object_destroy(obj);
4283
Chris Wilson1488fc02012-04-24 15:47:31 +01004284 if (obj->phys_obj)
4285 i915_gem_detach_phys_object(dev, obj);
4286
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004287 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004288 int ret;
4289
4290 vma->pin_count = 0;
4291 ret = i915_vma_unbind(vma);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004292 if (WARN_ON(ret == -ERESTARTSYS)) {
4293 bool was_interruptible;
Chris Wilson1488fc02012-04-24 15:47:31 +01004294
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004295 was_interruptible = dev_priv->mm.interruptible;
4296 dev_priv->mm.interruptible = false;
Chris Wilson1488fc02012-04-24 15:47:31 +01004297
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004298 WARN_ON(i915_vma_unbind(vma));
Chris Wilson1488fc02012-04-24 15:47:31 +01004299
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004300 dev_priv->mm.interruptible = was_interruptible;
4301 }
Chris Wilson1488fc02012-04-24 15:47:31 +01004302 }
4303
Ben Widawsky1d64ae72013-05-31 14:46:20 -07004304 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4305 * before progressing. */
4306 if (obj->stolen)
4307 i915_gem_object_unpin_pages(obj);
4308
Ben Widawsky401c29f2013-05-31 11:28:47 -07004309 if (WARN_ON(obj->pages_pin_count))
4310 obj->pages_pin_count = 0;
Chris Wilson340fbd82014-05-22 09:16:52 +01004311 if (discard_backing_storage(obj))
Chris Wilson55372522014-03-25 13:23:06 +00004312 obj->madv = I915_MADV_DONTNEED;
Chris Wilson37e680a2012-06-07 15:38:42 +01004313 i915_gem_object_put_pages(obj);
Chris Wilsond8cb5082012-08-11 15:41:03 +01004314 i915_gem_object_free_mmap_offset(obj);
Chris Wilson0104fdb2012-11-15 11:32:26 +00004315 i915_gem_object_release_stolen(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01004316
Chris Wilson9da3da62012-06-01 15:20:22 +01004317 BUG_ON(obj->pages);
4318
Chris Wilson2f745ad2012-09-04 21:02:58 +01004319 if (obj->base.import_attach)
4320 drm_prime_gem_destroy(&obj->base, NULL);
Chris Wilsonbe726152010-07-23 23:18:50 +01004321
Chris Wilson5cc9ed42014-05-16 14:22:37 +01004322 if (obj->ops->release)
4323 obj->ops->release(obj);
4324
Chris Wilson05394f32010-11-08 19:18:58 +00004325 drm_gem_object_release(&obj->base);
4326 i915_gem_info_remove_obj(dev_priv, obj->base.size);
Chris Wilsonbe726152010-07-23 23:18:50 +01004327
Chris Wilson05394f32010-11-08 19:18:58 +00004328 kfree(obj->bit_17);
Chris Wilson42dcedd2012-11-15 11:32:30 +00004329 i915_gem_object_free(obj);
Paulo Zanonif65c9162013-11-27 18:20:34 -02004330
4331 intel_runtime_pm_put(dev_priv);
Chris Wilsonbe726152010-07-23 23:18:50 +01004332}
4333
Daniel Vettere656a6c2013-08-14 14:14:04 +02004334struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
Ben Widawsky2f633152013-07-17 12:19:03 -07004335 struct i915_address_space *vm)
4336{
Daniel Vettere656a6c2013-08-14 14:14:04 +02004337 struct i915_vma *vma;
4338 list_for_each_entry(vma, &obj->vma_list, vma_link)
4339 if (vma->vm == vm)
4340 return vma;
4341
4342 return NULL;
4343}
4344
Ben Widawsky2f633152013-07-17 12:19:03 -07004345void i915_gem_vma_destroy(struct i915_vma *vma)
4346{
4347 WARN_ON(vma->node.allocated);
Chris Wilsonaaa056672013-08-20 12:56:40 +01004348
4349 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4350 if (!list_empty(&vma->exec_list))
4351 return;
4352
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07004353 list_del(&vma->vma_link);
Daniel Vetterb93dab62013-08-26 11:23:47 +02004354
Ben Widawsky2f633152013-07-17 12:19:03 -07004355 kfree(vma);
4356}
4357
Chris Wilsone3efda42014-04-09 09:19:41 +01004358static void
4359i915_gem_stop_ringbuffers(struct drm_device *dev)
4360{
4361 struct drm_i915_private *dev_priv = dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01004362 struct intel_engine_cs *ring;
Chris Wilsone3efda42014-04-09 09:19:41 +01004363 int i;
4364
4365 for_each_ring(ring, dev_priv, i)
4366 intel_stop_ring_buffer(ring);
4367}
4368
Jesse Barnes5669fca2009-02-17 15:13:31 -08004369int
Chris Wilson45c5f202013-10-16 11:50:01 +01004370i915_gem_suspend(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07004371{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004372 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson45c5f202013-10-16 11:50:01 +01004373 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07004374
Chris Wilson45c5f202013-10-16 11:50:01 +01004375 mutex_lock(&dev->struct_mutex);
Chris Wilsonf7403342013-09-13 23:57:04 +01004376 if (dev_priv->ums.mm_suspended)
Chris Wilson45c5f202013-10-16 11:50:01 +01004377 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -07004378
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004379 ret = i915_gpu_idle(dev);
Chris Wilsonf7403342013-09-13 23:57:04 +01004380 if (ret)
Chris Wilson45c5f202013-10-16 11:50:01 +01004381 goto err;
Chris Wilsonf7403342013-09-13 23:57:04 +01004382
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004383 i915_gem_retire_requests(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004384
Chris Wilson29105cc2010-01-07 10:39:13 +00004385 /* Under UMS, be paranoid and evict. */
Chris Wilsona39d7ef2012-04-24 18:22:52 +01004386 if (!drm_core_check_feature(dev, DRIVER_MODESET))
Chris Wilson6c085a72012-08-20 11:40:46 +02004387 i915_gem_evict_everything(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004388
Chris Wilson29105cc2010-01-07 10:39:13 +00004389 i915_kernel_lost_context(dev);
Chris Wilsone3efda42014-04-09 09:19:41 +01004390 i915_gem_stop_ringbuffers(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004391
Chris Wilson45c5f202013-10-16 11:50:01 +01004392 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4393 * We need to replace this with a semaphore, or something.
4394 * And not confound ums.mm_suspended!
4395 */
4396 dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4397 DRIVER_MODESET);
4398 mutex_unlock(&dev->struct_mutex);
4399
4400 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
Chris Wilson29105cc2010-01-07 10:39:13 +00004401 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004402 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
Chris Wilson29105cc2010-01-07 10:39:13 +00004403
Eric Anholt673a3942008-07-30 12:06:12 -07004404 return 0;
Chris Wilson45c5f202013-10-16 11:50:01 +01004405
4406err:
4407 mutex_unlock(&dev->struct_mutex);
4408 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004409}
4410
Oscar Mateoa4872ba2014-05-22 14:13:33 +01004411int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
Ben Widawskyb9524a12012-05-25 16:56:24 -07004412{
Ben Widawskyc3787e22013-09-17 21:12:44 -07004413 struct drm_device *dev = ring->dev;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004414 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07004415 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4416 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
Ben Widawskyc3787e22013-09-17 21:12:44 -07004417 int i, ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004418
Ben Widawsky040d2ba2013-09-19 11:01:40 -07004419 if (!HAS_L3_DPF(dev) || !remap_info)
Ben Widawskyc3787e22013-09-17 21:12:44 -07004420 return 0;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004421
Ben Widawskyc3787e22013-09-17 21:12:44 -07004422 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4423 if (ret)
4424 return ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004425
Ben Widawskyc3787e22013-09-17 21:12:44 -07004426 /*
4427 * Note: We do not worry about the concurrent register cacheline hang
4428 * here because no other code should access these registers other than
4429 * at initialization time.
4430 */
Ben Widawskyb9524a12012-05-25 16:56:24 -07004431 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
Ben Widawskyc3787e22013-09-17 21:12:44 -07004432 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4433 intel_ring_emit(ring, reg_base + i);
4434 intel_ring_emit(ring, remap_info[i/4]);
Ben Widawskyb9524a12012-05-25 16:56:24 -07004435 }
4436
Ben Widawskyc3787e22013-09-17 21:12:44 -07004437 intel_ring_advance(ring);
Ben Widawskyb9524a12012-05-25 16:56:24 -07004438
Ben Widawskyc3787e22013-09-17 21:12:44 -07004439 return ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004440}
4441
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004442void i915_gem_init_swizzling(struct drm_device *dev)
4443{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004444 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004445
Daniel Vetter11782b02012-01-31 16:47:55 +01004446 if (INTEL_INFO(dev)->gen < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004447 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4448 return;
4449
4450 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4451 DISP_TILE_SURFACE_SWIZZLING);
4452
Daniel Vetter11782b02012-01-31 16:47:55 +01004453 if (IS_GEN5(dev))
4454 return;
4455
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004456 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4457 if (IS_GEN6(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004458 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
Ben Widawsky8782e262012-12-18 10:31:23 -08004459 else if (IS_GEN7(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004460 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
Ben Widawsky31a53362013-11-02 21:07:04 -07004461 else if (IS_GEN8(dev))
4462 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
Ben Widawsky8782e262012-12-18 10:31:23 -08004463 else
4464 BUG();
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004465}
Daniel Vettere21af882012-02-09 20:53:27 +01004466
Chris Wilson67b1b572012-07-05 23:49:40 +01004467static bool
4468intel_enable_blt(struct drm_device *dev)
4469{
4470 if (!HAS_BLT(dev))
4471 return false;
4472
4473 /* The blitter was dysfunctional on early prototypes */
4474 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4475 DRM_INFO("BLT not supported on this pre-production hardware;"
4476 " graphics performance will be degraded.\n");
4477 return false;
4478 }
4479
4480 return true;
4481}
4482
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004483static int i915_gem_init_rings(struct drm_device *dev)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004484{
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004485 struct drm_i915_private *dev_priv = dev->dev_private;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004486 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004487
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004488 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004489 if (ret)
Chris Wilsonb6913e42010-11-12 10:46:37 +00004490 return ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004491
4492 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004493 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004494 if (ret)
4495 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08004496 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01004497
Chris Wilson67b1b572012-07-05 23:49:40 +01004498 if (intel_enable_blt(dev)) {
Chris Wilson549f7362010-10-19 11:19:32 +01004499 ret = intel_init_blt_ring_buffer(dev);
4500 if (ret)
4501 goto cleanup_bsd_ring;
4502 }
4503
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004504 if (HAS_VEBOX(dev)) {
4505 ret = intel_init_vebox_ring_buffer(dev);
4506 if (ret)
4507 goto cleanup_blt_ring;
4508 }
4509
Zhao Yakui845f74a2014-04-17 10:37:37 +08004510 if (HAS_BSD2(dev)) {
4511 ret = intel_init_bsd2_ring_buffer(dev);
4512 if (ret)
4513 goto cleanup_vebox_ring;
4514 }
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004515
Mika Kuoppala99433932013-01-22 14:12:17 +02004516 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4517 if (ret)
Zhao Yakui845f74a2014-04-17 10:37:37 +08004518 goto cleanup_bsd2_ring;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004519
4520 return 0;
4521
Zhao Yakui845f74a2014-04-17 10:37:37 +08004522cleanup_bsd2_ring:
4523 intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004524cleanup_vebox_ring:
4525 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004526cleanup_blt_ring:
4527 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4528cleanup_bsd_ring:
4529 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4530cleanup_render_ring:
4531 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4532
4533 return ret;
4534}
4535
4536int
4537i915_gem_init_hw(struct drm_device *dev)
4538{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004539 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07004540 int ret, i;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004541
4542 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4543 return -EIO;
4544
Ben Widawsky59124502013-07-04 11:02:05 -07004545 if (dev_priv->ellc_size)
Ben Widawsky05e21cc2013-07-04 11:02:04 -07004546 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004547
Ville Syrjälä0bf21342013-11-29 14:56:12 +02004548 if (IS_HASWELL(dev))
4549 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4550 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
Rodrigo Vivi94353732013-08-28 16:45:46 -03004551
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004552 if (HAS_PCH_NOP(dev)) {
Daniel Vetter6ba844b2014-01-22 23:39:30 +01004553 if (IS_IVYBRIDGE(dev)) {
4554 u32 temp = I915_READ(GEN7_MSG_CTL);
4555 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4556 I915_WRITE(GEN7_MSG_CTL, temp);
4557 } else if (INTEL_INFO(dev)->gen >= 7) {
4558 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4559 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4560 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4561 }
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004562 }
4563
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004564 i915_gem_init_swizzling(dev);
4565
4566 ret = i915_gem_init_rings(dev);
4567 if (ret)
Mika Kuoppala99433932013-01-22 14:12:17 +02004568 return ret;
4569
Ben Widawskyc3787e22013-09-17 21:12:44 -07004570 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4571 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4572
Ben Widawsky254f9652012-06-04 14:42:42 -07004573 /*
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004574 * XXX: Contexts should only be initialized once. Doing a switch to the
4575 * default context switch however is something we'd like to do after
4576 * reset or thaw (the latter may not actually be necessary for HW, but
4577 * goes with our code better). Context switching requires rings (for
4578 * the do_switch), but before enabling PPGTT. So don't move this.
Ben Widawsky254f9652012-06-04 14:42:42 -07004579 */
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004580 ret = i915_gem_context_enable(dev_priv);
Chris Wilson60990322014-04-09 09:19:42 +01004581 if (ret && ret != -EIO) {
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004582 DRM_ERROR("Context enable failed %d\n", ret);
Chris Wilson60990322014-04-09 09:19:42 +01004583 i915_gem_cleanup_ringbuffer(dev);
Ben Widawskyb7c36d22013-04-08 18:43:56 -07004584 }
Daniel Vettere21af882012-02-09 20:53:27 +01004585
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004586 return ret;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004587}
4588
Chris Wilson1070a422012-04-24 15:47:41 +01004589int i915_gem_init(struct drm_device *dev)
4590{
4591 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson1070a422012-04-24 15:47:41 +01004592 int ret;
4593
Chris Wilson1070a422012-04-24 15:47:41 +01004594 mutex_lock(&dev->struct_mutex);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004595
4596 if (IS_VALLEYVIEW(dev)) {
4597 /* VLVA0 (potential hack), BIOS isn't actually waking us */
Imre Deak981a5ae2014-04-14 20:24:22 +03004598 I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
4599 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
4600 VLV_GTLC_ALLOWWAKEACK), 10))
Jesse Barnesd62b4892013-03-08 10:45:53 -08004601 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4602 }
4603
Chris Wilson5cc9ed42014-05-16 14:22:37 +01004604 i915_gem_init_userptr(dev);
Ben Widawskyd7e50082012-12-18 10:31:25 -08004605 i915_gem_init_global_gtt(dev);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004606
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004607 ret = i915_gem_context_init(dev);
Mika Kuoppalae3848692014-01-31 17:14:02 +02004608 if (ret) {
4609 mutex_unlock(&dev->struct_mutex);
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004610 return ret;
Mika Kuoppalae3848692014-01-31 17:14:02 +02004611 }
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004612
Chris Wilson1070a422012-04-24 15:47:41 +01004613 ret = i915_gem_init_hw(dev);
Chris Wilson60990322014-04-09 09:19:42 +01004614 if (ret == -EIO) {
4615 /* Allow ring initialisation to fail by marking the GPU as
4616 * wedged. But we only want to do this where the GPU is angry,
4617 * for all other failure, such as an allocation failure, bail.
4618 */
4619 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4620 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4621 ret = 0;
Chris Wilson1070a422012-04-24 15:47:41 +01004622 }
Chris Wilson60990322014-04-09 09:19:42 +01004623 mutex_unlock(&dev->struct_mutex);
Chris Wilson1070a422012-04-24 15:47:41 +01004624
Daniel Vetter53ca26c2012-04-26 23:28:03 +02004625 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4626 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4627 dev_priv->dri1.allow_batchbuffer = 1;
Chris Wilson60990322014-04-09 09:19:42 +01004628 return ret;
Chris Wilson1070a422012-04-24 15:47:41 +01004629}
4630
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004631void
4632i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4633{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004634 struct drm_i915_private *dev_priv = dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01004635 struct intel_engine_cs *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004636 int i;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004637
Chris Wilsonb4519512012-05-11 14:29:30 +01004638 for_each_ring(ring, dev_priv, i)
4639 intel_cleanup_ring_buffer(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004640}
4641
4642int
Eric Anholt673a3942008-07-30 12:06:12 -07004643i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4644 struct drm_file *file_priv)
4645{
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004646 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01004647 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004648
Jesse Barnes79e53942008-11-07 14:24:08 -08004649 if (drm_core_check_feature(dev, DRIVER_MODESET))
4650 return 0;
4651
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004652 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
Eric Anholt673a3942008-07-30 12:06:12 -07004653 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004654 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004655 }
4656
Eric Anholt673a3942008-07-30 12:06:12 -07004657 mutex_lock(&dev->struct_mutex);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004658 dev_priv->ums.mm_suspended = 0;
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004659
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004660 ret = i915_gem_init_hw(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004661 if (ret != 0) {
4662 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004663 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004664 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004665
Ben Widawsky5cef07e2013-07-16 16:50:08 -07004666 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004667
Daniel Vetterbb0f1b52013-11-03 21:09:27 +01004668 ret = drm_irq_install(dev, dev->pdev->irq);
Chris Wilson5f353082010-06-07 14:03:03 +01004669 if (ret)
4670 goto cleanup_ringbuffer;
Daniel Vettere090c532013-11-03 20:27:05 +01004671 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004672
Eric Anholt673a3942008-07-30 12:06:12 -07004673 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01004674
4675cleanup_ringbuffer:
Chris Wilson5f353082010-06-07 14:03:03 +01004676 i915_gem_cleanup_ringbuffer(dev);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004677 dev_priv->ums.mm_suspended = 1;
Chris Wilson5f353082010-06-07 14:03:03 +01004678 mutex_unlock(&dev->struct_mutex);
4679
4680 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004681}
4682
4683int
4684i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4685 struct drm_file *file_priv)
4686{
Jesse Barnes79e53942008-11-07 14:24:08 -08004687 if (drm_core_check_feature(dev, DRIVER_MODESET))
4688 return 0;
4689
Daniel Vettere090c532013-11-03 20:27:05 +01004690 mutex_lock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004691 drm_irq_uninstall(dev);
Daniel Vettere090c532013-11-03 20:27:05 +01004692 mutex_unlock(&dev->struct_mutex);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004693
Chris Wilson45c5f202013-10-16 11:50:01 +01004694 return i915_gem_suspend(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004695}
4696
4697void
4698i915_gem_lastclose(struct drm_device *dev)
4699{
4700 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004701
Eric Anholte806b492009-01-22 09:56:58 -08004702 if (drm_core_check_feature(dev, DRIVER_MODESET))
4703 return;
4704
Chris Wilson45c5f202013-10-16 11:50:01 +01004705 ret = i915_gem_suspend(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004706 if (ret)
4707 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004708}
4709
Chris Wilson64193402010-10-24 12:38:05 +01004710static void
Oscar Mateoa4872ba2014-05-22 14:13:33 +01004711init_ring_lists(struct intel_engine_cs *ring)
Chris Wilson64193402010-10-24 12:38:05 +01004712{
4713 INIT_LIST_HEAD(&ring->active_list);
4714 INIT_LIST_HEAD(&ring->request_list);
Chris Wilson64193402010-10-24 12:38:05 +01004715}
4716
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08004717void i915_init_vm(struct drm_i915_private *dev_priv,
4718 struct i915_address_space *vm)
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004719{
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08004720 if (!i915_is_ggtt(vm))
4721 drm_mm_init(&vm->mm, vm->start, vm->total);
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004722 vm->dev = dev_priv->dev;
4723 INIT_LIST_HEAD(&vm->active_list);
4724 INIT_LIST_HEAD(&vm->inactive_list);
4725 INIT_LIST_HEAD(&vm->global_link);
Chris Wilsonf72d21e2014-01-09 22:57:22 +00004726 list_add_tail(&vm->global_link, &dev_priv->vm_list);
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004727}
4728
Eric Anholt673a3942008-07-30 12:06:12 -07004729void
4730i915_gem_load(struct drm_device *dev)
4731{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004732 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson42dcedd2012-11-15 11:32:30 +00004733 int i;
4734
4735 dev_priv->slab =
4736 kmem_cache_create("i915_gem_object",
4737 sizeof(struct drm_i915_gem_object), 0,
4738 SLAB_HWCACHE_ALIGN,
4739 NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07004740
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004741 INIT_LIST_HEAD(&dev_priv->vm_list);
4742 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4743
Ben Widawskya33afea2013-09-17 21:12:45 -07004744 INIT_LIST_HEAD(&dev_priv->context_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02004745 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4746 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004747 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004748 for (i = 0; i < I915_NUM_RINGS; i++)
4749 init_ring_lists(&dev_priv->ring[i]);
Daniel Vetter4b9de732011-10-09 21:52:02 +02004750 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
Daniel Vetter007cc8a2010-04-28 11:02:31 +02004751 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004752 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4753 i915_gem_retire_work_handler);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004754 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4755 i915_gem_idle_work_handler);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004756 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
Chris Wilson31169712009-09-14 16:50:28 +01004757
Dave Airlie94400122010-07-20 13:15:31 +10004758 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4759 if (IS_GEN3(dev)) {
Daniel Vetter50743292012-04-26 22:02:54 +02004760 I915_WRITE(MI_ARB_STATE,
4761 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
Dave Airlie94400122010-07-20 13:15:31 +10004762 }
4763
Chris Wilson72bfa192010-12-19 11:42:05 +00004764 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4765
Jesse Barnesde151cf2008-11-12 10:03:55 -08004766 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08004767 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4768 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08004769
Ville Syrjälä42b5aea2013-04-09 13:02:47 +03004770 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4771 dev_priv->num_fence_regs = 32;
4772 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08004773 dev_priv->num_fence_regs = 16;
4774 else
4775 dev_priv->num_fence_regs = 8;
4776
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004777 /* Initialize fence registers to zero */
Chris Wilson19b2dbd2013-06-12 10:15:12 +01004778 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4779 i915_gem_restore_fences(dev);
Eric Anholt10ed13e2011-05-06 13:53:49 -07004780
Eric Anholt673a3942008-07-30 12:06:12 -07004781 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05004782 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01004783
Chris Wilsonce453d82011-02-21 14:43:56 +00004784 dev_priv->mm.interruptible = true;
4785
Chris Wilsonceabbba52014-03-25 13:23:04 +00004786 dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
4787 dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
4788 dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
4789 register_shrinker(&dev_priv->mm.shrinker);
Chris Wilson2cfcd32a2014-05-20 08:28:43 +01004790
4791 dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
4792 register_oom_notifier(&dev_priv->mm.oom_notifier);
Eric Anholt673a3942008-07-30 12:06:12 -07004793}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004794
4795/*
4796 * Create a physically contiguous memory object for this object
4797 * e.g. for cursor + overlay regs
4798 */
Chris Wilson995b6762010-08-20 13:23:26 +01004799static int i915_gem_init_phys_object(struct drm_device *dev,
4800 int id, int size, int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004801{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004802 struct drm_i915_private *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004803 struct drm_i915_gem_phys_object *phys_obj;
4804 int ret;
4805
4806 if (dev_priv->mm.phys_objs[id - 1] || !size)
4807 return 0;
4808
Daniel Vetterb14c5672013-09-19 12:18:32 +02004809 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004810 if (!phys_obj)
4811 return -ENOMEM;
4812
4813 phys_obj->id = id;
4814
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004815 phys_obj->handle = drm_pci_alloc(dev, size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004816 if (!phys_obj->handle) {
4817 ret = -ENOMEM;
4818 goto kfree_obj;
4819 }
4820#ifdef CONFIG_X86
4821 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4822#endif
4823
4824 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4825
4826 return 0;
4827kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07004828 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004829 return ret;
4830}
4831
Chris Wilson995b6762010-08-20 13:23:26 +01004832static void i915_gem_free_phys_object(struct drm_device *dev, int id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004833{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004834 struct drm_i915_private *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004835 struct drm_i915_gem_phys_object *phys_obj;
4836
4837 if (!dev_priv->mm.phys_objs[id - 1])
4838 return;
4839
4840 phys_obj = dev_priv->mm.phys_objs[id - 1];
4841 if (phys_obj->cur_obj) {
4842 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4843 }
4844
4845#ifdef CONFIG_X86
4846 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4847#endif
4848 drm_pci_free(dev, phys_obj->handle);
4849 kfree(phys_obj);
4850 dev_priv->mm.phys_objs[id - 1] = NULL;
4851}
4852
4853void i915_gem_free_all_phys_object(struct drm_device *dev)
4854{
4855 int i;
4856
Dave Airlie260883c2009-01-22 17:58:49 +10004857 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004858 i915_gem_free_phys_object(dev, i);
4859}
4860
4861void i915_gem_detach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004862 struct drm_i915_gem_object *obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004863{
Al Viro496ad9a2013-01-23 17:07:38 -05004864 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilsone5281cc2010-10-28 13:45:36 +01004865 char *vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004866 int i;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004867 int page_count;
4868
Chris Wilson05394f32010-11-08 19:18:58 +00004869 if (!obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004870 return;
Chris Wilson05394f32010-11-08 19:18:58 +00004871 vaddr = obj->phys_obj->handle->vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004872
Chris Wilson05394f32010-11-08 19:18:58 +00004873 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004874 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07004875 struct page *page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004876 if (!IS_ERR(page)) {
4877 char *dst = kmap_atomic(page);
4878 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4879 kunmap_atomic(dst);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004880
Chris Wilsone5281cc2010-10-28 13:45:36 +01004881 drm_clflush_pages(&page, 1);
4882
4883 set_page_dirty(page);
4884 mark_page_accessed(page);
4885 page_cache_release(page);
4886 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004887 }
Ben Widawskye76e9ae2012-11-04 09:21:27 -08004888 i915_gem_chipset_flush(dev);
Chris Wilsond78b47b2009-06-17 21:52:49 +01004889
Chris Wilson05394f32010-11-08 19:18:58 +00004890 obj->phys_obj->cur_obj = NULL;
4891 obj->phys_obj = NULL;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004892}
4893
4894int
4895i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004896 struct drm_i915_gem_object *obj,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004897 int id,
4898 int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004899{
Al Viro496ad9a2013-01-23 17:07:38 -05004900 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004901 struct drm_i915_private *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004902 int ret = 0;
4903 int page_count;
4904 int i;
4905
4906 if (id > I915_MAX_PHYS_OBJECT)
4907 return -EINVAL;
4908
Chris Wilson05394f32010-11-08 19:18:58 +00004909 if (obj->phys_obj) {
4910 if (obj->phys_obj->id == id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004911 return 0;
4912 i915_gem_detach_phys_object(dev, obj);
4913 }
4914
Dave Airlie71acb5e2008-12-30 20:31:46 +10004915 /* create a new object */
4916 if (!dev_priv->mm.phys_objs[id - 1]) {
4917 ret = i915_gem_init_phys_object(dev, id,
Chris Wilson05394f32010-11-08 19:18:58 +00004918 obj->base.size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004919 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00004920 DRM_ERROR("failed to init phys object %d size: %zu\n",
4921 id, obj->base.size);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004922 return ret;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004923 }
4924 }
4925
4926 /* bind to the object */
Chris Wilson05394f32010-11-08 19:18:58 +00004927 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4928 obj->phys_obj->cur_obj = obj;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004929
Chris Wilson05394f32010-11-08 19:18:58 +00004930 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004931
4932 for (i = 0; i < page_count; i++) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01004933 struct page *page;
4934 char *dst, *src;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004935
Hugh Dickins5949eac2011-06-27 16:18:18 -07004936 page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004937 if (IS_ERR(page))
4938 return PTR_ERR(page);
4939
Chris Wilsonff75b9b2010-10-30 22:52:31 +01004940 src = kmap_atomic(page);
Chris Wilson05394f32010-11-08 19:18:58 +00004941 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004942 memcpy(dst, src, PAGE_SIZE);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07004943 kunmap_atomic(src);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004944
4945 mark_page_accessed(page);
4946 page_cache_release(page);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004947 }
4948
4949 return 0;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004950}
4951
4952static int
Chris Wilson05394f32010-11-08 19:18:58 +00004953i915_gem_phys_pwrite(struct drm_device *dev,
4954 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +10004955 struct drm_i915_gem_pwrite *args,
4956 struct drm_file *file_priv)
4957{
Chris Wilson05394f32010-11-08 19:18:58 +00004958 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
Ville Syrjälä2bb46292013-02-22 16:12:51 +02004959 char __user *user_data = to_user_ptr(args->data_ptr);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004960
Chris Wilsonb47b30c2010-11-08 01:12:29 +00004961 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4962 unsigned long unwritten;
4963
4964 /* The physical object once assigned is fixed for the lifetime
4965 * of the obj, so we can safely drop the lock and continue
4966 * to access vaddr.
4967 */
4968 mutex_unlock(&dev->struct_mutex);
4969 unwritten = copy_from_user(vaddr, user_data, args->size);
4970 mutex_lock(&dev->struct_mutex);
4971 if (unwritten)
4972 return -EFAULT;
4973 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004974
Ben Widawskye76e9ae2012-11-04 09:21:27 -08004975 i915_gem_chipset_flush(dev);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004976 return 0;
4977}
Eric Anholtb9624422009-06-03 07:27:35 +00004978
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004979void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00004980{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004981 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00004982
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004983 cancel_delayed_work_sync(&file_priv->mm.idle_work);
4984
Eric Anholtb9624422009-06-03 07:27:35 +00004985 /* Clean up our request list when the client is going away, so that
4986 * later retire_requests won't dereference our soon-to-be-gone
4987 * file_priv.
4988 */
Chris Wilson1c255952010-09-26 11:03:27 +01004989 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004990 while (!list_empty(&file_priv->mm.request_list)) {
4991 struct drm_i915_gem_request *request;
4992
4993 request = list_first_entry(&file_priv->mm.request_list,
4994 struct drm_i915_gem_request,
4995 client_list);
4996 list_del(&request->client_list);
4997 request->file_priv = NULL;
4998 }
Chris Wilson1c255952010-09-26 11:03:27 +01004999 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00005000}
Chris Wilson31169712009-09-14 16:50:28 +01005001
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005002static void
5003i915_gem_file_idle_work_handler(struct work_struct *work)
5004{
5005 struct drm_i915_file_private *file_priv =
5006 container_of(work, typeof(*file_priv), mm.idle_work.work);
5007
5008 atomic_set(&file_priv->rps_wait_boost, false);
5009}
5010
5011int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5012{
5013 struct drm_i915_file_private *file_priv;
Ben Widawskye422b8882013-12-06 14:10:58 -08005014 int ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005015
5016 DRM_DEBUG_DRIVER("\n");
5017
5018 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5019 if (!file_priv)
5020 return -ENOMEM;
5021
5022 file->driver_priv = file_priv;
5023 file_priv->dev_priv = dev->dev_private;
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02005024 file_priv->file = file;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005025
5026 spin_lock_init(&file_priv->mm.lock);
5027 INIT_LIST_HEAD(&file_priv->mm.request_list);
5028 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
5029 i915_gem_file_idle_work_handler);
5030
Ben Widawskye422b8882013-12-06 14:10:58 -08005031 ret = i915_gem_context_open(dev, file);
5032 if (ret)
5033 kfree(file_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005034
Ben Widawskye422b8882013-12-06 14:10:58 -08005035 return ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005036}
5037
Chris Wilson57745062012-11-21 13:04:04 +00005038static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
5039{
5040 if (!mutex_is_locked(mutex))
5041 return false;
5042
5043#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
5044 return mutex->owner == task;
5045#else
5046 /* Since UP may be pre-empted, we cannot assume that we own the lock */
5047 return false;
5048#endif
5049}
5050
Chris Wilsonb453c4d2014-03-25 13:23:05 +00005051static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
5052{
5053 if (!mutex_trylock(&dev->struct_mutex)) {
5054 if (!mutex_is_locked_by(&dev->struct_mutex, current))
5055 return false;
5056
5057 if (to_i915(dev)->mm.shrinker_no_lock_stealing)
5058 return false;
5059
5060 *unlock = false;
5061 } else
5062 *unlock = true;
5063
5064 return true;
5065}
5066
Chris Wilsonceabbba52014-03-25 13:23:04 +00005067static int num_vma_bound(struct drm_i915_gem_object *obj)
5068{
5069 struct i915_vma *vma;
5070 int count = 0;
5071
5072 list_for_each_entry(vma, &obj->vma_list, vma_link)
5073 if (drm_mm_node_allocated(&vma->node))
5074 count++;
5075
5076 return count;
5077}
5078
Dave Chinner7dc19d52013-08-28 10:18:11 +10005079static unsigned long
Chris Wilsonceabbba52014-03-25 13:23:04 +00005080i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
Chris Wilson31169712009-09-14 16:50:28 +01005081{
Chris Wilson17250b72010-10-28 12:51:39 +01005082 struct drm_i915_private *dev_priv =
Chris Wilsonceabbba52014-03-25 13:23:04 +00005083 container_of(shrinker, struct drm_i915_private, mm.shrinker);
Chris Wilson17250b72010-10-28 12:51:39 +01005084 struct drm_device *dev = dev_priv->dev;
Chris Wilson6c085a72012-08-20 11:40:46 +02005085 struct drm_i915_gem_object *obj;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005086 unsigned long count;
Chris Wilsonb453c4d2014-03-25 13:23:05 +00005087 bool unlock;
Chris Wilson17250b72010-10-28 12:51:39 +01005088
Chris Wilsonb453c4d2014-03-25 13:23:05 +00005089 if (!i915_gem_shrinker_lock(dev, &unlock))
5090 return 0;
Chris Wilson31169712009-09-14 16:50:28 +01005091
Dave Chinner7dc19d52013-08-28 10:18:11 +10005092 count = 0;
Ben Widawsky35c20a62013-05-31 11:28:48 -07005093 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
Chris Wilsona5570172012-09-04 21:02:54 +01005094 if (obj->pages_pin_count == 0)
Dave Chinner7dc19d52013-08-28 10:18:11 +10005095 count += obj->base.size >> PAGE_SHIFT;
Ben Widawskyfcb4a572013-07-31 16:59:57 -07005096
5097 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
Chris Wilsonceabbba52014-03-25 13:23:04 +00005098 if (!i915_gem_obj_is_pinned(obj) &&
5099 obj->pages_pin_count == num_vma_bound(obj))
Dave Chinner7dc19d52013-08-28 10:18:11 +10005100 count += obj->base.size >> PAGE_SHIFT;
Ben Widawskyfcb4a572013-07-31 16:59:57 -07005101 }
Chris Wilson31169712009-09-14 16:50:28 +01005102
Chris Wilson57745062012-11-21 13:04:04 +00005103 if (unlock)
5104 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9973b42013-10-04 10:33:00 +01005105
Dave Chinner7dc19d52013-08-28 10:18:11 +10005106 return count;
Chris Wilson31169712009-09-14 16:50:28 +01005107}
Ben Widawskya70a3142013-07-31 16:59:56 -07005108
5109/* All the new VM stuff */
5110unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
5111 struct i915_address_space *vm)
5112{
5113 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5114 struct i915_vma *vma;
5115
Ben Widawsky6f425322013-12-06 14:10:48 -08005116 if (!dev_priv->mm.aliasing_ppgtt ||
5117 vm == &dev_priv->mm.aliasing_ppgtt->base)
Ben Widawskya70a3142013-07-31 16:59:56 -07005118 vm = &dev_priv->gtt.base;
5119
5120 BUG_ON(list_empty(&o->vma_list));
5121 list_for_each_entry(vma, &o->vma_list, vma_link) {
5122 if (vma->vm == vm)
5123 return vma->node.start;
5124
5125 }
5126 return -1;
5127}
5128
5129bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5130 struct i915_address_space *vm)
5131{
5132 struct i915_vma *vma;
5133
5134 list_for_each_entry(vma, &o->vma_list, vma_link)
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07005135 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
Ben Widawskya70a3142013-07-31 16:59:56 -07005136 return true;
5137
5138 return false;
5139}
5140
5141bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5142{
Chris Wilson5a1d5eb2013-09-10 11:27:37 +01005143 struct i915_vma *vma;
Ben Widawskya70a3142013-07-31 16:59:56 -07005144
Chris Wilson5a1d5eb2013-09-10 11:27:37 +01005145 list_for_each_entry(vma, &o->vma_list, vma_link)
5146 if (drm_mm_node_allocated(&vma->node))
Ben Widawskya70a3142013-07-31 16:59:56 -07005147 return true;
5148
5149 return false;
5150}
5151
5152unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5153 struct i915_address_space *vm)
5154{
5155 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5156 struct i915_vma *vma;
5157
Ben Widawsky6f425322013-12-06 14:10:48 -08005158 if (!dev_priv->mm.aliasing_ppgtt ||
5159 vm == &dev_priv->mm.aliasing_ppgtt->base)
Ben Widawskya70a3142013-07-31 16:59:56 -07005160 vm = &dev_priv->gtt.base;
5161
5162 BUG_ON(list_empty(&o->vma_list));
5163
5164 list_for_each_entry(vma, &o->vma_list, vma_link)
5165 if (vma->vm == vm)
5166 return vma->node.size;
5167
5168 return 0;
5169}
5170
Dave Chinner7dc19d52013-08-28 10:18:11 +10005171static unsigned long
Chris Wilsonceabbba52014-03-25 13:23:04 +00005172i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
Dave Chinner7dc19d52013-08-28 10:18:11 +10005173{
5174 struct drm_i915_private *dev_priv =
Chris Wilsonceabbba52014-03-25 13:23:04 +00005175 container_of(shrinker, struct drm_i915_private, mm.shrinker);
Dave Chinner7dc19d52013-08-28 10:18:11 +10005176 struct drm_device *dev = dev_priv->dev;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005177 unsigned long freed;
Chris Wilsonb453c4d2014-03-25 13:23:05 +00005178 bool unlock;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005179
Chris Wilsonb453c4d2014-03-25 13:23:05 +00005180 if (!i915_gem_shrinker_lock(dev, &unlock))
5181 return SHRINK_STOP;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005182
Chris Wilsond9973b42013-10-04 10:33:00 +01005183 freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
5184 if (freed < sc->nr_to_scan)
5185 freed += __i915_gem_shrink(dev_priv,
5186 sc->nr_to_scan - freed,
5187 false);
Dave Chinner7dc19d52013-08-28 10:18:11 +10005188 if (unlock)
5189 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9973b42013-10-04 10:33:00 +01005190
Dave Chinner7dc19d52013-08-28 10:18:11 +10005191 return freed;
5192}
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005193
Chris Wilson2cfcd32a2014-05-20 08:28:43 +01005194static int
5195i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
5196{
5197 struct drm_i915_private *dev_priv =
5198 container_of(nb, struct drm_i915_private, mm.oom_notifier);
5199 struct drm_device *dev = dev_priv->dev;
5200 struct drm_i915_gem_object *obj;
5201 unsigned long timeout = msecs_to_jiffies(5000) + 1;
5202 unsigned long pinned, bound, unbound, freed;
5203 bool was_interruptible;
5204 bool unlock;
5205
5206 while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout)
5207 schedule_timeout_killable(1);
5208 if (timeout == 0) {
5209 pr_err("Unable to purge GPU memory due lock contention.\n");
5210 return NOTIFY_DONE;
5211 }
5212
5213 was_interruptible = dev_priv->mm.interruptible;
5214 dev_priv->mm.interruptible = false;
5215
5216 freed = i915_gem_shrink_all(dev_priv);
5217
5218 dev_priv->mm.interruptible = was_interruptible;
5219
5220 /* Because we may be allocating inside our own driver, we cannot
5221 * assert that there are no objects with pinned pages that are not
5222 * being pointed to by hardware.
5223 */
5224 unbound = bound = pinned = 0;
5225 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
5226 if (!obj->base.filp) /* not backed by a freeable object */
5227 continue;
5228
5229 if (obj->pages_pin_count)
5230 pinned += obj->base.size;
5231 else
5232 unbound += obj->base.size;
5233 }
5234 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5235 if (!obj->base.filp)
5236 continue;
5237
5238 if (obj->pages_pin_count)
5239 pinned += obj->base.size;
5240 else
5241 bound += obj->base.size;
5242 }
5243
5244 if (unlock)
5245 mutex_unlock(&dev->struct_mutex);
5246
5247 pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
5248 freed, pinned);
5249 if (unbound || bound)
5250 pr_err("%lu and %lu bytes still available in the "
5251 "bound and unbound GPU page lists.\n",
5252 bound, unbound);
5253
5254 *(unsigned long *)ptr += freed;
5255 return NOTIFY_DONE;
5256}
5257
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005258struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5259{
5260 struct i915_vma *vma;
5261
Oscar Mateo19656432014-05-16 14:20:43 +01005262 /* This WARN has probably outlived its usefulness (callers already
5263 * WARN if they don't find the GGTT vma they expect). When removing,
5264 * remember to remove the pre-check in is_pin_display() as well */
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005265 if (WARN_ON(list_empty(&obj->vma_list)))
5266 return NULL;
5267
5268 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
Ben Widawsky6e164c32013-12-06 14:10:49 -08005269 if (vma->vm != obj_to_ggtt(obj))
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005270 return NULL;
5271
5272 return vma;
5273}