blob: 5e54821af99691ee139e3170eea00315cc15c750 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070035#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080036#include <linux/pci.h>
Zhenyu Wangf8f235e2010-08-27 11:08:57 +080037#include <linux/intel-gtt.h>
Eric Anholt673a3942008-07-30 12:06:12 -070038
Daniel Vetter0108a3e2010-08-07 11:01:21 +010039static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
Daniel Vetterba3d8d72010-02-11 22:37:04 +010040
Daniel Vetterde18a292010-11-27 22:30:41 +010041static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
Eric Anholte47c68e2008-11-14 13:35:19 -080042static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
43static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
Eric Anholte47c68e2008-11-14 13:35:19 -080044static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
45 int write);
46static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
47 uint64_t offset,
48 uint64_t size);
49static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
Chris Wilson2cf34d72010-09-14 13:03:28 +010050static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
51 bool interruptible);
Jesse Barnesde151cf2008-11-12 10:03:55 -080052static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
53 unsigned alignment);
Jesse Barnesde151cf2008-11-12 10:03:55 -080054static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +100055static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
56 struct drm_i915_gem_pwrite *args,
57 struct drm_file *file_priv);
Chris Wilsonbe726152010-07-23 23:18:50 +010058static void i915_gem_free_object_tail(struct drm_gem_object *obj);
Eric Anholt673a3942008-07-30 12:06:12 -070059
Chris Wilson5cdf5882010-09-27 15:51:07 +010060static int
61i915_gem_object_get_pages(struct drm_gem_object *obj,
62 gfp_t gfpmask);
63
64static void
65i915_gem_object_put_pages(struct drm_gem_object *obj);
66
Chris Wilson31169712009-09-14 16:50:28 +010067static LIST_HEAD(shrink_list);
68static DEFINE_SPINLOCK(shrink_list_lock);
69
Chris Wilson73aa8082010-09-30 11:46:12 +010070/* some bookkeeping */
71static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
72 size_t size)
73{
74 dev_priv->mm.object_count++;
75 dev_priv->mm.object_memory += size;
76}
77
78static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
79 size_t size)
80{
81 dev_priv->mm.object_count--;
82 dev_priv->mm.object_memory -= size;
83}
84
85static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
86 size_t size)
87{
88 dev_priv->mm.gtt_count++;
89 dev_priv->mm.gtt_memory += size;
90}
91
92static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
93 size_t size)
94{
95 dev_priv->mm.gtt_count--;
96 dev_priv->mm.gtt_memory -= size;
97}
98
99static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
100 size_t size)
101{
102 dev_priv->mm.pin_count++;
103 dev_priv->mm.pin_memory += size;
104}
105
106static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
107 size_t size)
108{
109 dev_priv->mm.pin_count--;
110 dev_priv->mm.pin_memory -= size;
111}
112
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100113int
114i915_gem_check_is_wedged(struct drm_device *dev)
115{
116 struct drm_i915_private *dev_priv = dev->dev_private;
117 struct completion *x = &dev_priv->error_completion;
118 unsigned long flags;
119 int ret;
120
121 if (!atomic_read(&dev_priv->mm.wedged))
122 return 0;
123
124 ret = wait_for_completion_interruptible(x);
125 if (ret)
126 return ret;
127
128 /* Success, we reset the GPU! */
129 if (!atomic_read(&dev_priv->mm.wedged))
130 return 0;
131
132 /* GPU is hung, bump the completion count to account for
133 * the token we just consumed so that we never hit zero and
134 * end up waiting upon a subsequent completion event that
135 * will never happen.
136 */
137 spin_lock_irqsave(&x->wait.lock, flags);
138 x->done++;
139 spin_unlock_irqrestore(&x->wait.lock, flags);
140 return -EIO;
141}
142
Chris Wilson76c1dec2010-09-25 11:22:51 +0100143static int i915_mutex_lock_interruptible(struct drm_device *dev)
144{
145 struct drm_i915_private *dev_priv = dev->dev_private;
146 int ret;
147
148 ret = i915_gem_check_is_wedged(dev);
149 if (ret)
150 return ret;
151
152 ret = mutex_lock_interruptible(&dev->struct_mutex);
153 if (ret)
154 return ret;
155
156 if (atomic_read(&dev_priv->mm.wedged)) {
157 mutex_unlock(&dev->struct_mutex);
158 return -EAGAIN;
159 }
160
Chris Wilson23bc5982010-09-29 16:10:57 +0100161 WARN_ON(i915_verify_lists(dev));
Chris Wilson76c1dec2010-09-25 11:22:51 +0100162 return 0;
163}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100164
Chris Wilson7d1c4802010-08-07 21:45:03 +0100165static inline bool
166i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
167{
168 return obj_priv->gtt_space &&
169 !obj_priv->active &&
170 obj_priv->pin_count == 0;
171}
172
Chris Wilson73aa8082010-09-30 11:46:12 +0100173int i915_gem_do_init(struct drm_device *dev,
174 unsigned long start,
Jesse Barnes79e53942008-11-07 14:24:08 -0800175 unsigned long end)
176{
177 drm_i915_private_t *dev_priv = dev->dev_private;
178
179 if (start >= end ||
180 (start & (PAGE_SIZE - 1)) != 0 ||
181 (end & (PAGE_SIZE - 1)) != 0) {
182 return -EINVAL;
183 }
184
185 drm_mm_init(&dev_priv->mm.gtt_space, start,
186 end - start);
187
Chris Wilson73aa8082010-09-30 11:46:12 +0100188 dev_priv->mm.gtt_total = end - start;
Jesse Barnes79e53942008-11-07 14:24:08 -0800189
190 return 0;
191}
Keith Packard6dbe2772008-10-14 21:41:13 -0700192
Eric Anholt673a3942008-07-30 12:06:12 -0700193int
194i915_gem_init_ioctl(struct drm_device *dev, void *data,
195 struct drm_file *file_priv)
196{
Eric Anholt673a3942008-07-30 12:06:12 -0700197 struct drm_i915_gem_init *args = data;
Jesse Barnes79e53942008-11-07 14:24:08 -0800198 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700199
200 mutex_lock(&dev->struct_mutex);
Jesse Barnes79e53942008-11-07 14:24:08 -0800201 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
Eric Anholt673a3942008-07-30 12:06:12 -0700202 mutex_unlock(&dev->struct_mutex);
203
Jesse Barnes79e53942008-11-07 14:24:08 -0800204 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700205}
206
Eric Anholt5a125c32008-10-22 21:40:13 -0700207int
208i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
209 struct drm_file *file_priv)
210{
Chris Wilson73aa8082010-09-30 11:46:12 +0100211 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt5a125c32008-10-22 21:40:13 -0700212 struct drm_i915_gem_get_aperture *args = data;
Eric Anholt5a125c32008-10-22 21:40:13 -0700213
214 if (!(dev->driver->driver_features & DRIVER_GEM))
215 return -ENODEV;
216
Chris Wilson73aa8082010-09-30 11:46:12 +0100217 mutex_lock(&dev->struct_mutex);
218 args->aper_size = dev_priv->mm.gtt_total;
219 args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
220 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700221
222 return 0;
223}
224
Eric Anholt673a3942008-07-30 12:06:12 -0700225
226/**
227 * Creates a new mm object and returns a handle to it.
228 */
229int
230i915_gem_create_ioctl(struct drm_device *dev, void *data,
231 struct drm_file *file_priv)
232{
233 struct drm_i915_gem_create *args = data;
234 struct drm_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300235 int ret;
236 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700237
238 args->size = roundup(args->size, PAGE_SIZE);
239
240 /* Allocate the new object */
Daniel Vetterac52bc52010-04-09 19:05:06 +0000241 obj = i915_gem_alloc_object(dev, args->size);
Eric Anholt673a3942008-07-30 12:06:12 -0700242 if (obj == NULL)
243 return -ENOMEM;
244
245 ret = drm_gem_handle_create(file_priv, obj, &handle);
Chris Wilson1dfd9752010-09-06 14:44:14 +0100246 if (ret) {
Chris Wilson202f2fe2010-10-14 13:20:40 +0100247 drm_gem_object_release(obj);
248 i915_gem_info_remove_obj(dev->dev_private, obj->size);
249 kfree(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700250 return ret;
Chris Wilson1dfd9752010-09-06 14:44:14 +0100251 }
252
Chris Wilson202f2fe2010-10-14 13:20:40 +0100253 /* drop reference from allocate - handle holds it now */
254 drm_gem_object_unreference(obj);
255 trace_i915_gem_object_create(obj);
256
Eric Anholt673a3942008-07-30 12:06:12 -0700257 args->handle = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700258 return 0;
259}
260
Eric Anholt40123c12009-03-09 13:42:30 -0700261static inline int
Eric Anholteb014592009-03-10 11:44:52 -0700262fast_shmem_read(struct page **pages,
263 loff_t page_base, int page_offset,
264 char __user *data,
265 int length)
266{
Chris Wilsonb5e4feb2010-10-14 13:47:43 +0100267 char *vaddr;
Chris Wilson4f27b752010-10-14 15:26:45 +0100268 int ret;
Eric Anholteb014592009-03-10 11:44:52 -0700269
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700270 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
Chris Wilson4f27b752010-10-14 15:26:45 +0100271 ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700272 kunmap_atomic(vaddr);
Eric Anholteb014592009-03-10 11:44:52 -0700273
Chris Wilson4f27b752010-10-14 15:26:45 +0100274 return ret;
Eric Anholteb014592009-03-10 11:44:52 -0700275}
276
Eric Anholt280b7132009-03-12 16:56:27 -0700277static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
278{
279 drm_i915_private_t *dev_priv = obj->dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +0100280 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt280b7132009-03-12 16:56:27 -0700281
282 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
283 obj_priv->tiling_mode != I915_TILING_NONE;
284}
285
Chris Wilson99a03df2010-05-27 14:15:34 +0100286static inline void
Eric Anholt40123c12009-03-09 13:42:30 -0700287slow_shmem_copy(struct page *dst_page,
288 int dst_offset,
289 struct page *src_page,
290 int src_offset,
291 int length)
292{
293 char *dst_vaddr, *src_vaddr;
294
Chris Wilson99a03df2010-05-27 14:15:34 +0100295 dst_vaddr = kmap(dst_page);
296 src_vaddr = kmap(src_page);
Eric Anholt40123c12009-03-09 13:42:30 -0700297
298 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
299
Chris Wilson99a03df2010-05-27 14:15:34 +0100300 kunmap(src_page);
301 kunmap(dst_page);
Eric Anholt40123c12009-03-09 13:42:30 -0700302}
303
Chris Wilson99a03df2010-05-27 14:15:34 +0100304static inline void
Eric Anholt280b7132009-03-12 16:56:27 -0700305slow_shmem_bit17_copy(struct page *gpu_page,
306 int gpu_offset,
307 struct page *cpu_page,
308 int cpu_offset,
309 int length,
310 int is_read)
311{
312 char *gpu_vaddr, *cpu_vaddr;
313
314 /* Use the unswizzled path if this page isn't affected. */
315 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
316 if (is_read)
317 return slow_shmem_copy(cpu_page, cpu_offset,
318 gpu_page, gpu_offset, length);
319 else
320 return slow_shmem_copy(gpu_page, gpu_offset,
321 cpu_page, cpu_offset, length);
322 }
323
Chris Wilson99a03df2010-05-27 14:15:34 +0100324 gpu_vaddr = kmap(gpu_page);
325 cpu_vaddr = kmap(cpu_page);
Eric Anholt280b7132009-03-12 16:56:27 -0700326
327 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
328 * XORing with the other bits (A9 for Y, A9 and A10 for X)
329 */
330 while (length > 0) {
331 int cacheline_end = ALIGN(gpu_offset + 1, 64);
332 int this_length = min(cacheline_end - gpu_offset, length);
333 int swizzled_gpu_offset = gpu_offset ^ 64;
334
335 if (is_read) {
336 memcpy(cpu_vaddr + cpu_offset,
337 gpu_vaddr + swizzled_gpu_offset,
338 this_length);
339 } else {
340 memcpy(gpu_vaddr + swizzled_gpu_offset,
341 cpu_vaddr + cpu_offset,
342 this_length);
343 }
344 cpu_offset += this_length;
345 gpu_offset += this_length;
346 length -= this_length;
347 }
348
Chris Wilson99a03df2010-05-27 14:15:34 +0100349 kunmap(cpu_page);
350 kunmap(gpu_page);
Eric Anholt280b7132009-03-12 16:56:27 -0700351}
352
Eric Anholt673a3942008-07-30 12:06:12 -0700353/**
Eric Anholteb014592009-03-10 11:44:52 -0700354 * This is the fast shmem pread path, which attempts to copy_from_user directly
355 * from the backing pages of the object to the user's address space. On a
356 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
357 */
358static int
359i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
360 struct drm_i915_gem_pread *args,
361 struct drm_file *file_priv)
362{
Daniel Vetter23010e42010-03-08 13:35:02 +0100363 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700364 ssize_t remain;
365 loff_t offset, page_base;
366 char __user *user_data;
367 int page_offset, page_length;
Eric Anholteb014592009-03-10 11:44:52 -0700368
369 user_data = (char __user *) (uintptr_t) args->data_ptr;
370 remain = args->size;
371
Daniel Vetter23010e42010-03-08 13:35:02 +0100372 obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700373 offset = args->offset;
374
375 while (remain > 0) {
376 /* Operation in this page
377 *
378 * page_base = page offset within aperture
379 * page_offset = offset within page
380 * page_length = bytes to copy for this page
381 */
382 page_base = (offset & ~(PAGE_SIZE-1));
383 page_offset = offset & (PAGE_SIZE-1);
384 page_length = remain;
385 if ((page_offset + remain) > PAGE_SIZE)
386 page_length = PAGE_SIZE - page_offset;
387
Chris Wilson4f27b752010-10-14 15:26:45 +0100388 if (fast_shmem_read(obj_priv->pages,
389 page_base, page_offset,
390 user_data, page_length))
391 return -EFAULT;
Eric Anholteb014592009-03-10 11:44:52 -0700392
393 remain -= page_length;
394 user_data += page_length;
395 offset += page_length;
396 }
397
Chris Wilson4f27b752010-10-14 15:26:45 +0100398 return 0;
Eric Anholteb014592009-03-10 11:44:52 -0700399}
400
Chris Wilson07f73f62009-09-14 16:50:30 +0100401static int
402i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
403{
404 int ret;
405
Chris Wilson4bdadb92010-01-27 13:36:32 +0000406 ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
Chris Wilson07f73f62009-09-14 16:50:30 +0100407
408 /* If we've insufficient memory to map in the pages, attempt
409 * to make some space by throwing out some old buffers.
410 */
411 if (ret == -ENOMEM) {
412 struct drm_device *dev = obj->dev;
Chris Wilson07f73f62009-09-14 16:50:30 +0100413
Daniel Vetter0108a3e2010-08-07 11:01:21 +0100414 ret = i915_gem_evict_something(dev, obj->size,
415 i915_gem_get_gtt_alignment(obj));
Chris Wilson07f73f62009-09-14 16:50:30 +0100416 if (ret)
417 return ret;
418
Chris Wilson4bdadb92010-01-27 13:36:32 +0000419 ret = i915_gem_object_get_pages(obj, 0);
Chris Wilson07f73f62009-09-14 16:50:30 +0100420 }
421
422 return ret;
423}
424
Eric Anholteb014592009-03-10 11:44:52 -0700425/**
426 * This is the fallback shmem pread path, which allocates temporary storage
427 * in kernel space to copy_to_user into outside of the struct_mutex, so we
428 * can copy out of the object's backing pages while holding the struct mutex
429 * and not take page faults.
430 */
431static int
432i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
433 struct drm_i915_gem_pread *args,
434 struct drm_file *file_priv)
435{
Daniel Vetter23010e42010-03-08 13:35:02 +0100436 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700437 struct mm_struct *mm = current->mm;
438 struct page **user_pages;
439 ssize_t remain;
440 loff_t offset, pinned_pages, i;
441 loff_t first_data_page, last_data_page, num_pages;
442 int shmem_page_index, shmem_page_offset;
443 int data_page_index, data_page_offset;
444 int page_length;
445 int ret;
446 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700447 int do_bit17_swizzling;
Eric Anholteb014592009-03-10 11:44:52 -0700448
449 remain = args->size;
450
451 /* Pin the user pages containing the data. We can't fault while
452 * holding the struct mutex, yet we want to hold it while
453 * dereferencing the user data.
454 */
455 first_data_page = data_ptr / PAGE_SIZE;
456 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
457 num_pages = last_data_page - first_data_page + 1;
458
Chris Wilson4f27b752010-10-14 15:26:45 +0100459 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
Eric Anholteb014592009-03-10 11:44:52 -0700460 if (user_pages == NULL)
461 return -ENOMEM;
462
Chris Wilson4f27b752010-10-14 15:26:45 +0100463 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700464 down_read(&mm->mmap_sem);
465 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
Eric Anholte5e9ecd2009-04-07 16:01:22 -0700466 num_pages, 1, 0, user_pages, NULL);
Eric Anholteb014592009-03-10 11:44:52 -0700467 up_read(&mm->mmap_sem);
Chris Wilson4f27b752010-10-14 15:26:45 +0100468 mutex_lock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700469 if (pinned_pages < num_pages) {
470 ret = -EFAULT;
Chris Wilson4f27b752010-10-14 15:26:45 +0100471 goto out;
Eric Anholteb014592009-03-10 11:44:52 -0700472 }
473
Chris Wilson4f27b752010-10-14 15:26:45 +0100474 ret = i915_gem_object_set_cpu_read_domain_range(obj,
475 args->offset,
Eric Anholteb014592009-03-10 11:44:52 -0700476 args->size);
Chris Wilson4f27b752010-10-14 15:26:45 +0100477 if (ret)
478 goto out;
479
480 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700481
Daniel Vetter23010e42010-03-08 13:35:02 +0100482 obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700483 offset = args->offset;
484
485 while (remain > 0) {
486 /* Operation in this page
487 *
488 * shmem_page_index = page number within shmem file
489 * shmem_page_offset = offset within page in shmem file
490 * data_page_index = page number in get_user_pages return
491 * data_page_offset = offset with data_page_index page.
492 * page_length = bytes to copy for this page
493 */
494 shmem_page_index = offset / PAGE_SIZE;
495 shmem_page_offset = offset & ~PAGE_MASK;
496 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
497 data_page_offset = data_ptr & ~PAGE_MASK;
498
499 page_length = remain;
500 if ((shmem_page_offset + page_length) > PAGE_SIZE)
501 page_length = PAGE_SIZE - shmem_page_offset;
502 if ((data_page_offset + page_length) > PAGE_SIZE)
503 page_length = PAGE_SIZE - data_page_offset;
504
Eric Anholt280b7132009-03-12 16:56:27 -0700505 if (do_bit17_swizzling) {
Chris Wilson99a03df2010-05-27 14:15:34 +0100506 slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
Eric Anholt280b7132009-03-12 16:56:27 -0700507 shmem_page_offset,
Chris Wilson99a03df2010-05-27 14:15:34 +0100508 user_pages[data_page_index],
509 data_page_offset,
510 page_length,
511 1);
512 } else {
513 slow_shmem_copy(user_pages[data_page_index],
514 data_page_offset,
515 obj_priv->pages[shmem_page_index],
516 shmem_page_offset,
517 page_length);
Eric Anholt280b7132009-03-12 16:56:27 -0700518 }
Eric Anholteb014592009-03-10 11:44:52 -0700519
520 remain -= page_length;
521 data_ptr += page_length;
522 offset += page_length;
523 }
524
Chris Wilson4f27b752010-10-14 15:26:45 +0100525out:
Eric Anholteb014592009-03-10 11:44:52 -0700526 for (i = 0; i < pinned_pages; i++) {
527 SetPageDirty(user_pages[i]);
528 page_cache_release(user_pages[i]);
529 }
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700530 drm_free_large(user_pages);
Eric Anholteb014592009-03-10 11:44:52 -0700531
532 return ret;
533}
534
Eric Anholt673a3942008-07-30 12:06:12 -0700535/**
536 * Reads data from the object referenced by handle.
537 *
538 * On error, the contents of *data are undefined.
539 */
540int
541i915_gem_pread_ioctl(struct drm_device *dev, void *data,
542 struct drm_file *file_priv)
543{
544 struct drm_i915_gem_pread *args = data;
545 struct drm_gem_object *obj;
546 struct drm_i915_gem_object *obj_priv;
Chris Wilson35b62a82010-09-26 20:23:38 +0100547 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700548
Chris Wilson51311d02010-11-17 09:10:42 +0000549 if (args->size == 0)
550 return 0;
551
552 if (!access_ok(VERIFY_WRITE,
553 (char __user *)(uintptr_t)args->data_ptr,
554 args->size))
555 return -EFAULT;
556
557 ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
558 args->size);
559 if (ret)
560 return -EFAULT;
561
Chris Wilson4f27b752010-10-14 15:26:45 +0100562 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100563 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100564 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700565
566 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100567 if (obj == NULL) {
568 ret = -ENOENT;
569 goto unlock;
Chris Wilson4f27b752010-10-14 15:26:45 +0100570 }
Daniel Vetter23010e42010-03-08 13:35:02 +0100571 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700572
Chris Wilson7dcd2492010-09-26 20:21:44 +0100573 /* Bounds check source. */
574 if (args->offset > obj->size || args->size > obj->size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100575 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100576 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100577 }
578
Chris Wilson4f27b752010-10-14 15:26:45 +0100579 ret = i915_gem_object_get_pages_or_evict(obj);
580 if (ret)
581 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -0700582
Chris Wilson4f27b752010-10-14 15:26:45 +0100583 ret = i915_gem_object_set_cpu_read_domain_range(obj,
584 args->offset,
585 args->size);
586 if (ret)
587 goto out_put;
588
589 ret = -EFAULT;
590 if (!i915_gem_object_needs_bit17_swizzle(obj))
Eric Anholt673a3942008-07-30 12:06:12 -0700591 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
Chris Wilson4f27b752010-10-14 15:26:45 +0100592 if (ret == -EFAULT)
593 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -0700594
Chris Wilson4f27b752010-10-14 15:26:45 +0100595out_put:
596 i915_gem_object_put_pages(obj);
Chris Wilson35b62a82010-09-26 20:23:38 +0100597out:
Chris Wilson4f27b752010-10-14 15:26:45 +0100598 drm_gem_object_unreference(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100599unlock:
Chris Wilson4f27b752010-10-14 15:26:45 +0100600 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700601 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700602}
603
Keith Packard0839ccb2008-10-30 19:38:48 -0700604/* This is the fast write path which cannot handle
605 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700606 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700607
Keith Packard0839ccb2008-10-30 19:38:48 -0700608static inline int
609fast_user_write(struct io_mapping *mapping,
610 loff_t page_base, int page_offset,
611 char __user *user_data,
612 int length)
613{
614 char *vaddr_atomic;
615 unsigned long unwritten;
616
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700617 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Keith Packard0839ccb2008-10-30 19:38:48 -0700618 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
619 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700620 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100621 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -0700622}
623
624/* Here's the write path which can sleep for
625 * page faults
626 */
627
Chris Wilsonab34c222010-05-27 14:15:35 +0100628static inline void
Eric Anholt3de09aa2009-03-09 09:42:23 -0700629slow_kernel_write(struct io_mapping *mapping,
630 loff_t gtt_base, int gtt_offset,
631 struct page *user_page, int user_offset,
632 int length)
Keith Packard0839ccb2008-10-30 19:38:48 -0700633{
Chris Wilsonab34c222010-05-27 14:15:35 +0100634 char __iomem *dst_vaddr;
635 char *src_vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700636
Chris Wilsonab34c222010-05-27 14:15:35 +0100637 dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
638 src_vaddr = kmap(user_page);
639
640 memcpy_toio(dst_vaddr + gtt_offset,
641 src_vaddr + user_offset,
642 length);
643
644 kunmap(user_page);
645 io_mapping_unmap(dst_vaddr);
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700646}
647
Eric Anholt40123c12009-03-09 13:42:30 -0700648static inline int
649fast_shmem_write(struct page **pages,
650 loff_t page_base, int page_offset,
651 char __user *data,
652 int length)
653{
Chris Wilsonb5e4feb2010-10-14 13:47:43 +0100654 char *vaddr;
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100655 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700656
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700657 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100658 ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700659 kunmap_atomic(vaddr);
Eric Anholt40123c12009-03-09 13:42:30 -0700660
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100661 return ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700662}
663
Eric Anholt3de09aa2009-03-09 09:42:23 -0700664/**
665 * This is the fast pwrite path, where we copy the data directly from the
666 * user into the GTT, uncached.
667 */
Eric Anholt673a3942008-07-30 12:06:12 -0700668static int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700669i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
670 struct drm_i915_gem_pwrite *args,
671 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700672{
Daniel Vetter23010e42010-03-08 13:35:02 +0100673 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Keith Packard0839ccb2008-10-30 19:38:48 -0700674 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700675 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700676 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700677 char __user *user_data;
Keith Packard0839ccb2008-10-30 19:38:48 -0700678 int page_offset, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700679
680 user_data = (char __user *) (uintptr_t) args->data_ptr;
681 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700682
Daniel Vetter23010e42010-03-08 13:35:02 +0100683 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700684 offset = obj_priv->gtt_offset + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700685
686 while (remain > 0) {
687 /* Operation in this page
688 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700689 * page_base = page offset within aperture
690 * page_offset = offset within page
691 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700692 */
Keith Packard0839ccb2008-10-30 19:38:48 -0700693 page_base = (offset & ~(PAGE_SIZE-1));
694 page_offset = offset & (PAGE_SIZE-1);
695 page_length = remain;
696 if ((page_offset + remain) > PAGE_SIZE)
697 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700698
Keith Packard0839ccb2008-10-30 19:38:48 -0700699 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700700 * source page isn't available. Return the error and we'll
701 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700702 */
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100703 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
704 page_offset, user_data, page_length))
705
706 return -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700707
Keith Packard0839ccb2008-10-30 19:38:48 -0700708 remain -= page_length;
709 user_data += page_length;
710 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700711 }
Eric Anholt673a3942008-07-30 12:06:12 -0700712
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100713 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700714}
715
Eric Anholt3de09aa2009-03-09 09:42:23 -0700716/**
717 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
718 * the memory and maps it using kmap_atomic for copying.
719 *
720 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
721 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
722 */
Eric Anholt3043c602008-10-02 12:24:47 -0700723static int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700724i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
725 struct drm_i915_gem_pwrite *args,
726 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700727{
Daniel Vetter23010e42010-03-08 13:35:02 +0100728 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700729 drm_i915_private_t *dev_priv = dev->dev_private;
730 ssize_t remain;
731 loff_t gtt_page_base, offset;
732 loff_t first_data_page, last_data_page, num_pages;
733 loff_t pinned_pages, i;
734 struct page **user_pages;
735 struct mm_struct *mm = current->mm;
736 int gtt_page_offset, data_page_offset, data_page_index, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700737 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700738 uint64_t data_ptr = args->data_ptr;
739
740 remain = args->size;
741
742 /* Pin the user pages containing the data. We can't fault while
743 * holding the struct mutex, and all of the pwrite implementations
744 * want to hold it while dereferencing the user data.
745 */
746 first_data_page = data_ptr / PAGE_SIZE;
747 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
748 num_pages = last_data_page - first_data_page + 1;
749
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100750 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
Eric Anholt3de09aa2009-03-09 09:42:23 -0700751 if (user_pages == NULL)
752 return -ENOMEM;
753
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100754 mutex_unlock(&dev->struct_mutex);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700755 down_read(&mm->mmap_sem);
756 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
757 num_pages, 0, 0, user_pages, NULL);
758 up_read(&mm->mmap_sem);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100759 mutex_lock(&dev->struct_mutex);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700760 if (pinned_pages < num_pages) {
761 ret = -EFAULT;
762 goto out_unpin_pages;
763 }
764
Eric Anholt3de09aa2009-03-09 09:42:23 -0700765 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
766 if (ret)
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100767 goto out_unpin_pages;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700768
Daniel Vetter23010e42010-03-08 13:35:02 +0100769 obj_priv = to_intel_bo(obj);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700770 offset = obj_priv->gtt_offset + args->offset;
771
772 while (remain > 0) {
773 /* Operation in this page
774 *
775 * gtt_page_base = page offset within aperture
776 * gtt_page_offset = offset within page in aperture
777 * data_page_index = page number in get_user_pages return
778 * data_page_offset = offset with data_page_index page.
779 * page_length = bytes to copy for this page
780 */
781 gtt_page_base = offset & PAGE_MASK;
782 gtt_page_offset = offset & ~PAGE_MASK;
783 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
784 data_page_offset = data_ptr & ~PAGE_MASK;
785
786 page_length = remain;
787 if ((gtt_page_offset + page_length) > PAGE_SIZE)
788 page_length = PAGE_SIZE - gtt_page_offset;
789 if ((data_page_offset + page_length) > PAGE_SIZE)
790 page_length = PAGE_SIZE - data_page_offset;
791
Chris Wilsonab34c222010-05-27 14:15:35 +0100792 slow_kernel_write(dev_priv->mm.gtt_mapping,
793 gtt_page_base, gtt_page_offset,
794 user_pages[data_page_index],
795 data_page_offset,
796 page_length);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700797
798 remain -= page_length;
799 offset += page_length;
800 data_ptr += page_length;
801 }
802
Eric Anholt3de09aa2009-03-09 09:42:23 -0700803out_unpin_pages:
804 for (i = 0; i < pinned_pages; i++)
805 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700806 drm_free_large(user_pages);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700807
808 return ret;
809}
810
Eric Anholt40123c12009-03-09 13:42:30 -0700811/**
812 * This is the fast shmem pwrite path, which attempts to directly
813 * copy_from_user into the kmapped pages backing the object.
814 */
Eric Anholt673a3942008-07-30 12:06:12 -0700815static int
Eric Anholt40123c12009-03-09 13:42:30 -0700816i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
817 struct drm_i915_gem_pwrite *args,
818 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700819{
Daniel Vetter23010e42010-03-08 13:35:02 +0100820 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700821 ssize_t remain;
822 loff_t offset, page_base;
823 char __user *user_data;
824 int page_offset, page_length;
Eric Anholt40123c12009-03-09 13:42:30 -0700825
826 user_data = (char __user *) (uintptr_t) args->data_ptr;
827 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700828
Daniel Vetter23010e42010-03-08 13:35:02 +0100829 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700830 offset = args->offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700831 obj_priv->dirty = 1;
Eric Anholt673a3942008-07-30 12:06:12 -0700832
Eric Anholt40123c12009-03-09 13:42:30 -0700833 while (remain > 0) {
834 /* Operation in this page
835 *
836 * page_base = page offset within aperture
837 * page_offset = offset within page
838 * page_length = bytes to copy for this page
839 */
840 page_base = (offset & ~(PAGE_SIZE-1));
841 page_offset = offset & (PAGE_SIZE-1);
842 page_length = remain;
843 if ((page_offset + remain) > PAGE_SIZE)
844 page_length = PAGE_SIZE - page_offset;
845
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100846 if (fast_shmem_write(obj_priv->pages,
Eric Anholt40123c12009-03-09 13:42:30 -0700847 page_base, page_offset,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100848 user_data, page_length))
849 return -EFAULT;
Eric Anholt40123c12009-03-09 13:42:30 -0700850
851 remain -= page_length;
852 user_data += page_length;
853 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700854 }
855
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100856 return 0;
Eric Anholt40123c12009-03-09 13:42:30 -0700857}
858
859/**
860 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
861 * the memory and maps it using kmap_atomic for copying.
862 *
863 * This avoids taking mmap_sem for faulting on the user's address while the
864 * struct_mutex is held.
865 */
866static int
867i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
868 struct drm_i915_gem_pwrite *args,
869 struct drm_file *file_priv)
870{
Daniel Vetter23010e42010-03-08 13:35:02 +0100871 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700872 struct mm_struct *mm = current->mm;
873 struct page **user_pages;
874 ssize_t remain;
875 loff_t offset, pinned_pages, i;
876 loff_t first_data_page, last_data_page, num_pages;
877 int shmem_page_index, shmem_page_offset;
878 int data_page_index, data_page_offset;
879 int page_length;
880 int ret;
881 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700882 int do_bit17_swizzling;
Eric Anholt40123c12009-03-09 13:42:30 -0700883
884 remain = args->size;
885
886 /* Pin the user pages containing the data. We can't fault while
887 * holding the struct mutex, and all of the pwrite implementations
888 * want to hold it while dereferencing the user data.
889 */
890 first_data_page = data_ptr / PAGE_SIZE;
891 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
892 num_pages = last_data_page - first_data_page + 1;
893
Chris Wilson4f27b752010-10-14 15:26:45 +0100894 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
Eric Anholt40123c12009-03-09 13:42:30 -0700895 if (user_pages == NULL)
896 return -ENOMEM;
897
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100898 mutex_unlock(&dev->struct_mutex);
Eric Anholt40123c12009-03-09 13:42:30 -0700899 down_read(&mm->mmap_sem);
900 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
901 num_pages, 0, 0, user_pages, NULL);
902 up_read(&mm->mmap_sem);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100903 mutex_lock(&dev->struct_mutex);
Eric Anholt40123c12009-03-09 13:42:30 -0700904 if (pinned_pages < num_pages) {
905 ret = -EFAULT;
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100906 goto out;
Eric Anholt40123c12009-03-09 13:42:30 -0700907 }
908
Eric Anholt40123c12009-03-09 13:42:30 -0700909 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100910 if (ret)
911 goto out;
912
913 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700914
Daniel Vetter23010e42010-03-08 13:35:02 +0100915 obj_priv = to_intel_bo(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700916 offset = args->offset;
917 obj_priv->dirty = 1;
918
919 while (remain > 0) {
920 /* Operation in this page
921 *
922 * shmem_page_index = page number within shmem file
923 * shmem_page_offset = offset within page in shmem file
924 * data_page_index = page number in get_user_pages return
925 * data_page_offset = offset with data_page_index page.
926 * page_length = bytes to copy for this page
927 */
928 shmem_page_index = offset / PAGE_SIZE;
929 shmem_page_offset = offset & ~PAGE_MASK;
930 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
931 data_page_offset = data_ptr & ~PAGE_MASK;
932
933 page_length = remain;
934 if ((shmem_page_offset + page_length) > PAGE_SIZE)
935 page_length = PAGE_SIZE - shmem_page_offset;
936 if ((data_page_offset + page_length) > PAGE_SIZE)
937 page_length = PAGE_SIZE - data_page_offset;
938
Eric Anholt280b7132009-03-12 16:56:27 -0700939 if (do_bit17_swizzling) {
Chris Wilson99a03df2010-05-27 14:15:34 +0100940 slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
Eric Anholt280b7132009-03-12 16:56:27 -0700941 shmem_page_offset,
942 user_pages[data_page_index],
943 data_page_offset,
Chris Wilson99a03df2010-05-27 14:15:34 +0100944 page_length,
945 0);
946 } else {
947 slow_shmem_copy(obj_priv->pages[shmem_page_index],
948 shmem_page_offset,
949 user_pages[data_page_index],
950 data_page_offset,
951 page_length);
Eric Anholt280b7132009-03-12 16:56:27 -0700952 }
Eric Anholt40123c12009-03-09 13:42:30 -0700953
954 remain -= page_length;
955 data_ptr += page_length;
956 offset += page_length;
957 }
958
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100959out:
Eric Anholt40123c12009-03-09 13:42:30 -0700960 for (i = 0; i < pinned_pages; i++)
961 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700962 drm_free_large(user_pages);
Eric Anholt40123c12009-03-09 13:42:30 -0700963
964 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700965}
966
967/**
968 * Writes data to the object referenced by handle.
969 *
970 * On error, the contents of the buffer that were to be modified are undefined.
971 */
972int
973i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100974 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700975{
976 struct drm_i915_gem_pwrite *args = data;
977 struct drm_gem_object *obj;
978 struct drm_i915_gem_object *obj_priv;
Chris Wilson51311d02010-11-17 09:10:42 +0000979 int ret;
980
981 if (args->size == 0)
982 return 0;
983
984 if (!access_ok(VERIFY_READ,
985 (char __user *)(uintptr_t)args->data_ptr,
986 args->size))
987 return -EFAULT;
988
989 ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
990 args->size);
991 if (ret)
992 return -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700993
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100994 ret = i915_mutex_lock_interruptible(dev);
995 if (ret)
996 return ret;
997
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100998 obj = drm_gem_object_lookup(dev, file, args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100999 if (obj == NULL) {
1000 ret = -ENOENT;
1001 goto unlock;
1002 }
Daniel Vetter23010e42010-03-08 13:35:02 +01001003 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001004
Chris Wilson7dcd2492010-09-26 20:21:44 +01001005 /* Bounds check destination. */
1006 if (args->offset > obj->size || args->size > obj->size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +01001007 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +01001008 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +01001009 }
1010
Eric Anholt673a3942008-07-30 12:06:12 -07001011 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1012 * it would end up going through the fenced access, and we'll get
1013 * different detiling behavior between reading and writing.
1014 * pread/pwrite currently are reading and writing from the CPU
1015 * perspective, requiring manual detiling by the client.
1016 */
Dave Airlie71acb5e2008-12-30 20:31:46 +10001017 if (obj_priv->phys_obj)
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001018 ret = i915_gem_phys_pwrite(dev, obj, args, file);
Dave Airlie71acb5e2008-12-30 20:31:46 +10001019 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
Chris Wilson5cdf5882010-09-27 15:51:07 +01001020 obj_priv->gtt_space &&
Chris Wilson9b8c4a02010-05-27 14:21:01 +01001021 obj->write_domain != I915_GEM_DOMAIN_CPU) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001022 ret = i915_gem_object_pin(obj, 0);
1023 if (ret)
1024 goto out;
1025
1026 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
1027 if (ret)
1028 goto out_unpin;
1029
1030 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1031 if (ret == -EFAULT)
1032 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
1033
1034out_unpin:
1035 i915_gem_object_unpin(obj);
Eric Anholt40123c12009-03-09 13:42:30 -07001036 } else {
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001037 ret = i915_gem_object_get_pages_or_evict(obj);
1038 if (ret)
1039 goto out;
1040
1041 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1042 if (ret)
1043 goto out_put;
1044
1045 ret = -EFAULT;
1046 if (!i915_gem_object_needs_bit17_swizzle(obj))
1047 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
1048 if (ret == -EFAULT)
1049 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
1050
1051out_put:
1052 i915_gem_object_put_pages(obj);
Eric Anholt40123c12009-03-09 13:42:30 -07001053 }
Eric Anholt673a3942008-07-30 12:06:12 -07001054
Chris Wilson35b62a82010-09-26 20:23:38 +01001055out:
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001056 drm_gem_object_unreference(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001057unlock:
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001058 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07001059 return ret;
1060}
1061
1062/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001063 * Called when user space prepares to use an object with the CPU, either
1064 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -07001065 */
1066int
1067i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1068 struct drm_file *file_priv)
1069{
Eric Anholta09ba7f2009-08-29 12:49:51 -07001070 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001071 struct drm_i915_gem_set_domain *args = data;
1072 struct drm_gem_object *obj;
Jesse Barnes652c3932009-08-17 13:31:43 -07001073 struct drm_i915_gem_object *obj_priv;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001074 uint32_t read_domains = args->read_domains;
1075 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001076 int ret;
1077
1078 if (!(dev->driver->driver_features & DRIVER_GEM))
1079 return -ENODEV;
1080
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001081 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +01001082 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001083 return -EINVAL;
1084
Chris Wilson21d509e2009-06-06 09:46:02 +01001085 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001086 return -EINVAL;
1087
1088 /* Having something in the write domain implies it's in the read
1089 * domain, and only that read domain. Enforce that in the request.
1090 */
1091 if (write_domain != 0 && read_domains != write_domain)
1092 return -EINVAL;
1093
Chris Wilson76c1dec2010-09-25 11:22:51 +01001094 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001095 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001096 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001097
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001098 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1099 if (obj == NULL) {
1100 ret = -ENOENT;
1101 goto unlock;
Chris Wilson76c1dec2010-09-25 11:22:51 +01001102 }
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001103 obj_priv = to_intel_bo(obj);
Jesse Barnes652c3932009-08-17 13:31:43 -07001104
1105 intel_mark_busy(dev, obj);
1106
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001107 if (read_domains & I915_GEM_DOMAIN_GTT) {
1108 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001109
Eric Anholta09ba7f2009-08-29 12:49:51 -07001110 /* Update the LRU on the fence for the CPU access that's
1111 * about to occur.
1112 */
1113 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
Daniel Vetter007cc8a2010-04-28 11:02:31 +02001114 struct drm_i915_fence_reg *reg =
1115 &dev_priv->fence_regs[obj_priv->fence_reg];
1116 list_move_tail(&reg->lru_list,
Eric Anholta09ba7f2009-08-29 12:49:51 -07001117 &dev_priv->mm.fence_list);
1118 }
1119
Eric Anholt02354392008-11-26 13:58:13 -08001120 /* Silently promote "you're not bound, there was nothing to do"
1121 * to success, since the client was just asking us to
1122 * make sure everything was done.
1123 */
1124 if (ret == -EINVAL)
1125 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001126 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001127 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001128 }
1129
Chris Wilson7d1c4802010-08-07 21:45:03 +01001130 /* Maintain LRU order of "inactive" objects */
1131 if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
Chris Wilson69dc4982010-10-19 10:36:51 +01001132 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
Chris Wilson7d1c4802010-08-07 21:45:03 +01001133
Eric Anholt673a3942008-07-30 12:06:12 -07001134 drm_gem_object_unreference(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001135unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001136 mutex_unlock(&dev->struct_mutex);
1137 return ret;
1138}
1139
1140/**
1141 * Called when user space has done writes to this buffer
1142 */
1143int
1144i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1145 struct drm_file *file_priv)
1146{
1147 struct drm_i915_gem_sw_finish *args = data;
1148 struct drm_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001149 int ret = 0;
1150
1151 if (!(dev->driver->driver_features & DRIVER_GEM))
1152 return -ENODEV;
1153
Chris Wilson76c1dec2010-09-25 11:22:51 +01001154 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001155 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001156 return ret;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001157
Eric Anholt673a3942008-07-30 12:06:12 -07001158 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1159 if (obj == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001160 ret = -ENOENT;
1161 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07001162 }
1163
Eric Anholt673a3942008-07-30 12:06:12 -07001164 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson3d2a8122010-09-29 11:39:53 +01001165 if (to_intel_bo(obj)->pin_count)
Eric Anholte47c68e2008-11-14 13:35:19 -08001166 i915_gem_object_flush_cpu_write_domain(obj);
1167
Eric Anholt673a3942008-07-30 12:06:12 -07001168 drm_gem_object_unreference(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001169unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001170 mutex_unlock(&dev->struct_mutex);
1171 return ret;
1172}
1173
1174/**
1175 * Maps the contents of an object, returning the address it is mapped
1176 * into.
1177 *
1178 * While the mapping holds a reference on the contents of the object, it doesn't
1179 * imply a ref on the object itself.
1180 */
1181int
1182i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1183 struct drm_file *file_priv)
1184{
1185 struct drm_i915_gem_mmap *args = data;
1186 struct drm_gem_object *obj;
1187 loff_t offset;
1188 unsigned long addr;
1189
1190 if (!(dev->driver->driver_features & DRIVER_GEM))
1191 return -ENODEV;
1192
1193 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1194 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001195 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001196
1197 offset = args->offset;
1198
1199 down_write(&current->mm->mmap_sem);
1200 addr = do_mmap(obj->filp, 0, args->size,
1201 PROT_READ | PROT_WRITE, MAP_SHARED,
1202 args->offset);
1203 up_write(&current->mm->mmap_sem);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001204 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001205 if (IS_ERR((void *)addr))
1206 return addr;
1207
1208 args->addr_ptr = (uint64_t) addr;
1209
1210 return 0;
1211}
1212
Jesse Barnesde151cf2008-11-12 10:03:55 -08001213/**
1214 * i915_gem_fault - fault a page into the GTT
1215 * vma: VMA in question
1216 * vmf: fault info
1217 *
1218 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1219 * from userspace. The fault handler takes care of binding the object to
1220 * the GTT (if needed), allocating and programming a fence register (again,
1221 * only if needed based on whether the old reg is still valid or the object
1222 * is tiled) and inserting a new PTE into the faulting process.
1223 *
1224 * Note that the faulting process may involve evicting existing objects
1225 * from the GTT and/or fence registers to make room. So performance may
1226 * suffer if the GTT working set is large or there are few fence registers
1227 * left.
1228 */
1229int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1230{
1231 struct drm_gem_object *obj = vma->vm_private_data;
1232 struct drm_device *dev = obj->dev;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001233 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001234 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001235 pgoff_t page_offset;
1236 unsigned long pfn;
1237 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001238 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001239
1240 /* We don't use vmf->pgoff since that has the fake offset */
1241 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1242 PAGE_SHIFT;
1243
1244 /* Now bind it into the GTT if needed */
1245 mutex_lock(&dev->struct_mutex);
1246 if (!obj_priv->gtt_space) {
Chris Wilsone67b8ce2009-09-14 16:50:26 +01001247 ret = i915_gem_object_bind_to_gtt(obj, 0);
Chris Wilsonc7150892009-09-23 00:43:56 +01001248 if (ret)
1249 goto unlock;
Kristian Høgsberg07f4f3e2009-05-27 14:37:28 -04001250
Jesse Barnesde151cf2008-11-12 10:03:55 -08001251 ret = i915_gem_object_set_to_gtt_domain(obj, write);
Chris Wilsonc7150892009-09-23 00:43:56 +01001252 if (ret)
1253 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001254 }
1255
1256 /* Need a new fence register? */
Eric Anholta09ba7f2009-08-29 12:49:51 -07001257 if (obj_priv->tiling_mode != I915_TILING_NONE) {
Chris Wilson2cf34d72010-09-14 13:03:28 +01001258 ret = i915_gem_object_get_fence_reg(obj, true);
Chris Wilsonc7150892009-09-23 00:43:56 +01001259 if (ret)
1260 goto unlock;
Eric Anholtd9ddcb92009-01-27 10:33:49 -08001261 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001262
Chris Wilson7d1c4802010-08-07 21:45:03 +01001263 if (i915_gem_object_is_inactive(obj_priv))
Chris Wilson69dc4982010-10-19 10:36:51 +01001264 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
Chris Wilson7d1c4802010-08-07 21:45:03 +01001265
Jesse Barnesde151cf2008-11-12 10:03:55 -08001266 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1267 page_offset;
1268
1269 /* Finally, remap it using the new GTT offset */
1270 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc7150892009-09-23 00:43:56 +01001271unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001272 mutex_unlock(&dev->struct_mutex);
1273
1274 switch (ret) {
Chris Wilsonc7150892009-09-23 00:43:56 +01001275 case 0:
1276 case -ERESTARTSYS:
1277 return VM_FAULT_NOPAGE;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001278 case -ENOMEM:
1279 case -EAGAIN:
1280 return VM_FAULT_OOM;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001281 default:
Chris Wilsonc7150892009-09-23 00:43:56 +01001282 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001283 }
1284}
1285
1286/**
1287 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1288 * @obj: obj in question
1289 *
1290 * GEM memory mapping works by handing back to userspace a fake mmap offset
1291 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1292 * up the object based on the offset and sets up the various memory mapping
1293 * structures.
1294 *
1295 * This routine allocates and attaches a fake offset for @obj.
1296 */
1297static int
1298i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1299{
1300 struct drm_device *dev = obj->dev;
1301 struct drm_gem_mm *mm = dev->mm_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001302 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001303 struct drm_map_list *list;
Benjamin Herrenschmidtf77d3902009-02-02 16:55:46 +11001304 struct drm_local_map *map;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001305 int ret = 0;
1306
1307 /* Set the object up for mmap'ing */
1308 list = &obj->map_list;
Eric Anholt9a298b22009-03-24 12:23:04 -07001309 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001310 if (!list->map)
1311 return -ENOMEM;
1312
1313 map = list->map;
1314 map->type = _DRM_GEM;
1315 map->size = obj->size;
1316 map->handle = obj;
1317
1318 /* Get a DRM GEM mmap offset allocated... */
1319 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1320 obj->size / PAGE_SIZE, 0, 0);
1321 if (!list->file_offset_node) {
1322 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
Chris Wilson9e0ae5342010-09-21 15:05:24 +01001323 ret = -ENOSPC;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001324 goto out_free_list;
1325 }
1326
1327 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1328 obj->size / PAGE_SIZE, 0);
1329 if (!list->file_offset_node) {
1330 ret = -ENOMEM;
1331 goto out_free_list;
1332 }
1333
1334 list->hash.key = list->file_offset_node->start;
Chris Wilson9e0ae5342010-09-21 15:05:24 +01001335 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
1336 if (ret) {
Jesse Barnesde151cf2008-11-12 10:03:55 -08001337 DRM_ERROR("failed to add to map hash\n");
1338 goto out_free_mm;
1339 }
1340
1341 /* By now we should be all set, any drm_mmap request on the offset
1342 * below will get to our mmap & fault handler */
1343 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1344
1345 return 0;
1346
1347out_free_mm:
1348 drm_mm_put_block(list->file_offset_node);
1349out_free_list:
Eric Anholt9a298b22009-03-24 12:23:04 -07001350 kfree(list->map);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001351
1352 return ret;
1353}
1354
Chris Wilson901782b2009-07-10 08:18:50 +01001355/**
1356 * i915_gem_release_mmap - remove physical page mappings
1357 * @obj: obj in question
1358 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001359 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001360 * relinquish ownership of the pages back to the system.
1361 *
1362 * It is vital that we remove the page mapping if we have mapped a tiled
1363 * object through the GTT and then lose the fence register due to
1364 * resource pressure. Similarly if the object has been moved out of the
1365 * aperture, than pages mapped into userspace must be revoked. Removing the
1366 * mapping will then trigger a page fault on the next user access, allowing
1367 * fixup by i915_gem_fault().
1368 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001369void
Chris Wilson901782b2009-07-10 08:18:50 +01001370i915_gem_release_mmap(struct drm_gem_object *obj)
1371{
1372 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001373 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson901782b2009-07-10 08:18:50 +01001374
1375 if (dev->dev_mapping)
1376 unmap_mapping_range(dev->dev_mapping,
1377 obj_priv->mmap_offset, obj->size, 1);
1378}
1379
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001380static void
1381i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1382{
1383 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001384 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001385 struct drm_gem_mm *mm = dev->mm_private;
1386 struct drm_map_list *list;
1387
1388 list = &obj->map_list;
1389 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1390
1391 if (list->file_offset_node) {
1392 drm_mm_put_block(list->file_offset_node);
1393 list->file_offset_node = NULL;
1394 }
1395
1396 if (list->map) {
Eric Anholt9a298b22009-03-24 12:23:04 -07001397 kfree(list->map);
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001398 list->map = NULL;
1399 }
1400
1401 obj_priv->mmap_offset = 0;
1402}
1403
Jesse Barnesde151cf2008-11-12 10:03:55 -08001404/**
1405 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1406 * @obj: object to check
1407 *
1408 * Return the required GTT alignment for an object, taking into account
1409 * potential fence register mapping if needed.
1410 */
1411static uint32_t
1412i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1413{
1414 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001415 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001416 int start, i;
1417
1418 /*
1419 * Minimum alignment is 4k (GTT page size), but might be greater
1420 * if a fence register is needed for the object.
1421 */
Chris Wilsona6c45cf2010-09-17 00:32:17 +01001422 if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001423 return 4096;
1424
1425 /*
1426 * Previous chips need to be aligned to the size of the smallest
1427 * fence register that can contain the object.
1428 */
Chris Wilsona6c45cf2010-09-17 00:32:17 +01001429 if (INTEL_INFO(dev)->gen == 3)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001430 start = 1024*1024;
1431 else
1432 start = 512*1024;
1433
1434 for (i = start; i < obj->size; i <<= 1)
1435 ;
1436
1437 return i;
1438}
1439
1440/**
1441 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1442 * @dev: DRM device
1443 * @data: GTT mapping ioctl data
1444 * @file_priv: GEM object info
1445 *
1446 * Simply returns the fake offset to userspace so it can mmap it.
1447 * The mmap call will end up in drm_gem_mmap(), which will set things
1448 * up so we can get faults in the handler above.
1449 *
1450 * The fault handler will take care of binding the object into the GTT
1451 * (since it may have been evicted to make room for something), allocating
1452 * a fence register, and mapping the appropriate aperture address into
1453 * userspace.
1454 */
1455int
1456i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1457 struct drm_file *file_priv)
1458{
1459 struct drm_i915_gem_mmap_gtt *args = data;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001460 struct drm_gem_object *obj;
1461 struct drm_i915_gem_object *obj_priv;
1462 int ret;
1463
1464 if (!(dev->driver->driver_features & DRIVER_GEM))
1465 return -ENODEV;
1466
Chris Wilson76c1dec2010-09-25 11:22:51 +01001467 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001468 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001469 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001470
Jesse Barnesde151cf2008-11-12 10:03:55 -08001471 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001472 if (obj == NULL) {
1473 ret = -ENOENT;
1474 goto unlock;
1475 }
Daniel Vetter23010e42010-03-08 13:35:02 +01001476 obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001477
Chris Wilsonab182822009-09-22 18:46:17 +01001478 if (obj_priv->madv != I915_MADV_WILLNEED) {
1479 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001480 ret = -EINVAL;
1481 goto out;
Chris Wilsonab182822009-09-22 18:46:17 +01001482 }
1483
Jesse Barnesde151cf2008-11-12 10:03:55 -08001484 if (!obj_priv->mmap_offset) {
1485 ret = i915_gem_create_mmap_offset(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001486 if (ret)
1487 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001488 }
1489
1490 args->offset = obj_priv->mmap_offset;
1491
Jesse Barnesde151cf2008-11-12 10:03:55 -08001492 /*
1493 * Pull it into the GTT so that we have a page list (makes the
1494 * initial fault faster and any subsequent flushing possible).
1495 */
1496 if (!obj_priv->agp_mem) {
Chris Wilsone67b8ce2009-09-14 16:50:26 +01001497 ret = i915_gem_object_bind_to_gtt(obj, 0);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001498 if (ret)
1499 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001500 }
1501
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001502out:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001503 drm_gem_object_unreference(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001504unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001505 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001506 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001507}
1508
Chris Wilson5cdf5882010-09-27 15:51:07 +01001509static void
Eric Anholt856fa192009-03-19 14:10:50 -07001510i915_gem_object_put_pages(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001511{
Daniel Vetter23010e42010-03-08 13:35:02 +01001512 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001513 int page_count = obj->size / PAGE_SIZE;
1514 int i;
1515
Eric Anholt856fa192009-03-19 14:10:50 -07001516 BUG_ON(obj_priv->pages_refcount == 0);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001517 BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001518
1519 if (--obj_priv->pages_refcount != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07001520 return;
1521
Eric Anholt280b7132009-03-12 16:56:27 -07001522 if (obj_priv->tiling_mode != I915_TILING_NONE)
1523 i915_gem_object_save_bit_17_swizzle(obj);
1524
Chris Wilson3ef94da2009-09-14 16:50:29 +01001525 if (obj_priv->madv == I915_MADV_DONTNEED)
Chris Wilson13a05fd2009-09-20 23:03:19 +01001526 obj_priv->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001527
1528 for (i = 0; i < page_count; i++) {
Chris Wilson3ef94da2009-09-14 16:50:29 +01001529 if (obj_priv->dirty)
1530 set_page_dirty(obj_priv->pages[i]);
1531
1532 if (obj_priv->madv == I915_MADV_WILLNEED)
Eric Anholt856fa192009-03-19 14:10:50 -07001533 mark_page_accessed(obj_priv->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001534
1535 page_cache_release(obj_priv->pages[i]);
1536 }
Eric Anholt673a3942008-07-30 12:06:12 -07001537 obj_priv->dirty = 0;
1538
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07001539 drm_free_large(obj_priv->pages);
Eric Anholt856fa192009-03-19 14:10:50 -07001540 obj_priv->pages = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001541}
1542
Chris Wilsona56ba562010-09-28 10:07:56 +01001543static uint32_t
1544i915_gem_next_request_seqno(struct drm_device *dev,
1545 struct intel_ring_buffer *ring)
1546{
1547 drm_i915_private_t *dev_priv = dev->dev_private;
1548
1549 ring->outstanding_lazy_request = true;
1550 return dev_priv->next_seqno;
1551}
1552
Eric Anholt673a3942008-07-30 12:06:12 -07001553static void
Daniel Vetter617dbe22010-02-11 22:16:02 +01001554i915_gem_object_move_to_active(struct drm_gem_object *obj,
Zou Nan hai852835f2010-05-21 09:08:56 +08001555 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001556{
1557 struct drm_device *dev = obj->dev;
Chris Wilson69dc4982010-10-19 10:36:51 +01001558 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001559 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilsona56ba562010-09-28 10:07:56 +01001560 uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
Daniel Vetter617dbe22010-02-11 22:16:02 +01001561
Zou Nan hai852835f2010-05-21 09:08:56 +08001562 BUG_ON(ring == NULL);
1563 obj_priv->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001564
1565 /* Add a reference if we're newly entering the active list. */
1566 if (!obj_priv->active) {
1567 drm_gem_object_reference(obj);
1568 obj_priv->active = 1;
1569 }
Daniel Vettere35a41d2010-02-11 22:13:59 +01001570
Eric Anholt673a3942008-07-30 12:06:12 -07001571 /* Move from whatever list we were on to the tail of execution. */
Chris Wilson69dc4982010-10-19 10:36:51 +01001572 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list);
1573 list_move_tail(&obj_priv->ring_list, &ring->active_list);
Eric Anholtce44b0e2008-11-06 16:00:31 -08001574 obj_priv->last_rendering_seqno = seqno;
Eric Anholt673a3942008-07-30 12:06:12 -07001575}
1576
Eric Anholtce44b0e2008-11-06 16:00:31 -08001577static void
1578i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1579{
1580 struct drm_device *dev = obj->dev;
1581 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001582 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholtce44b0e2008-11-06 16:00:31 -08001583
1584 BUG_ON(!obj_priv->active);
Chris Wilson69dc4982010-10-19 10:36:51 +01001585 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list);
1586 list_del_init(&obj_priv->ring_list);
Eric Anholtce44b0e2008-11-06 16:00:31 -08001587 obj_priv->last_rendering_seqno = 0;
1588}
Eric Anholt673a3942008-07-30 12:06:12 -07001589
Chris Wilson963b4832009-09-20 23:03:54 +01001590/* Immediately discard the backing storage */
1591static void
1592i915_gem_object_truncate(struct drm_gem_object *obj)
1593{
Daniel Vetter23010e42010-03-08 13:35:02 +01001594 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001595 struct inode *inode;
Chris Wilson963b4832009-09-20 23:03:54 +01001596
Chris Wilsonae9fed62010-08-07 11:01:30 +01001597 /* Our goal here is to return as much of the memory as
1598 * is possible back to the system as we are called from OOM.
1599 * To do this we must instruct the shmfs to drop all of its
1600 * backing pages, *now*. Here we mirror the actions taken
1601 * when by shmem_delete_inode() to release the backing store.
1602 */
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001603 inode = obj->filp->f_path.dentry->d_inode;
Chris Wilsonae9fed62010-08-07 11:01:30 +01001604 truncate_inode_pages(inode->i_mapping, 0);
1605 if (inode->i_op->truncate_range)
1606 inode->i_op->truncate_range(inode, 0, (loff_t)-1);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001607
1608 obj_priv->madv = __I915_MADV_PURGED;
Chris Wilson963b4832009-09-20 23:03:54 +01001609}
1610
1611static inline int
1612i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
1613{
1614 return obj_priv->madv == I915_MADV_DONTNEED;
1615}
1616
Eric Anholt673a3942008-07-30 12:06:12 -07001617static void
1618i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1619{
1620 struct drm_device *dev = obj->dev;
1621 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001622 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001623
Eric Anholt673a3942008-07-30 12:06:12 -07001624 if (obj_priv->pin_count != 0)
Chris Wilson69dc4982010-10-19 10:36:51 +01001625 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
Eric Anholt673a3942008-07-30 12:06:12 -07001626 else
Chris Wilson69dc4982010-10-19 10:36:51 +01001627 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
1628 list_del_init(&obj_priv->ring_list);
Eric Anholt673a3942008-07-30 12:06:12 -07001629
Daniel Vetter99fcb762010-02-07 16:20:18 +01001630 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1631
Eric Anholtce44b0e2008-11-06 16:00:31 -08001632 obj_priv->last_rendering_seqno = 0;
Zou Nan hai852835f2010-05-21 09:08:56 +08001633 obj_priv->ring = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001634 if (obj_priv->active) {
1635 obj_priv->active = 0;
1636 drm_gem_object_unreference(obj);
1637 }
Chris Wilson23bc5982010-09-29 16:10:57 +01001638 WARN_ON(i915_verify_lists(dev));
Eric Anholt673a3942008-07-30 12:06:12 -07001639}
1640
Daniel Vetter63560392010-02-19 11:51:59 +01001641static void
1642i915_gem_process_flushing_list(struct drm_device *dev,
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01001643 uint32_t flush_domains,
Zou Nan hai852835f2010-05-21 09:08:56 +08001644 struct intel_ring_buffer *ring)
Daniel Vetter63560392010-02-19 11:51:59 +01001645{
1646 drm_i915_private_t *dev_priv = dev->dev_private;
1647 struct drm_i915_gem_object *obj_priv, *next;
1648
1649 list_for_each_entry_safe(obj_priv, next,
Chris Wilson64193402010-10-24 12:38:05 +01001650 &ring->gpu_write_list,
Daniel Vetter63560392010-02-19 11:51:59 +01001651 gpu_write_list) {
Daniel Vettera8089e82010-04-09 19:05:09 +00001652 struct drm_gem_object *obj = &obj_priv->base;
Daniel Vetter63560392010-02-19 11:51:59 +01001653
Chris Wilson64193402010-10-24 12:38:05 +01001654 if (obj->write_domain & flush_domains) {
Daniel Vetter63560392010-02-19 11:51:59 +01001655 uint32_t old_write_domain = obj->write_domain;
1656
1657 obj->write_domain = 0;
1658 list_del_init(&obj_priv->gpu_write_list);
Daniel Vetter617dbe22010-02-11 22:16:02 +01001659 i915_gem_object_move_to_active(obj, ring);
Daniel Vetter63560392010-02-19 11:51:59 +01001660
1661 /* update the fence lru list */
Daniel Vetter007cc8a2010-04-28 11:02:31 +02001662 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1663 struct drm_i915_fence_reg *reg =
1664 &dev_priv->fence_regs[obj_priv->fence_reg];
1665 list_move_tail(&reg->lru_list,
Daniel Vetter63560392010-02-19 11:51:59 +01001666 &dev_priv->mm.fence_list);
Daniel Vetter007cc8a2010-04-28 11:02:31 +02001667 }
Daniel Vetter63560392010-02-19 11:51:59 +01001668
1669 trace_i915_gem_object_change_domain(obj,
1670 obj->read_domains,
1671 old_write_domain);
1672 }
1673 }
1674}
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001675
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02001676uint32_t
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01001677i915_add_request(struct drm_device *dev,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001678 struct drm_file *file,
Chris Wilson8dc5d142010-08-12 12:36:12 +01001679 struct drm_i915_gem_request *request,
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01001680 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001681{
1682 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001683 struct drm_i915_file_private *file_priv = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001684 uint32_t seqno;
1685 int was_empty;
Eric Anholt673a3942008-07-30 12:06:12 -07001686
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001687 if (file != NULL)
1688 file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00001689
Chris Wilson8dc5d142010-08-12 12:36:12 +01001690 if (request == NULL) {
1691 request = kzalloc(sizeof(*request), GFP_KERNEL);
1692 if (request == NULL)
1693 return 0;
1694 }
Eric Anholt673a3942008-07-30 12:06:12 -07001695
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001696 seqno = ring->add_request(dev, ring, 0);
Chris Wilsona56ba562010-09-28 10:07:56 +01001697 ring->outstanding_lazy_request = false;
Eric Anholt673a3942008-07-30 12:06:12 -07001698
1699 request->seqno = seqno;
Zou Nan hai852835f2010-05-21 09:08:56 +08001700 request->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001701 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08001702 was_empty = list_empty(&ring->request_list);
1703 list_add_tail(&request->list, &ring->request_list);
1704
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001705 if (file_priv) {
Chris Wilson1c255952010-09-26 11:03:27 +01001706 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001707 request->file_priv = file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00001708 list_add_tail(&request->client_list,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001709 &file_priv->mm.request_list);
Chris Wilson1c255952010-09-26 11:03:27 +01001710 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00001711 }
Eric Anholt673a3942008-07-30 12:06:12 -07001712
Ben Gamarif65d9422009-09-14 17:48:44 -04001713 if (!dev_priv->mm.suspended) {
Chris Wilsonb3b079d2010-09-13 23:44:34 +01001714 mod_timer(&dev_priv->hangcheck_timer,
1715 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
Ben Gamarif65d9422009-09-14 17:48:44 -04001716 if (was_empty)
Chris Wilsonb3b079d2010-09-13 23:44:34 +01001717 queue_delayed_work(dev_priv->wq,
1718 &dev_priv->mm.retire_work, HZ);
Ben Gamarif65d9422009-09-14 17:48:44 -04001719 }
Eric Anholt673a3942008-07-30 12:06:12 -07001720 return seqno;
1721}
1722
1723/**
1724 * Command execution barrier
1725 *
1726 * Ensures that all commands in the ring are finished
1727 * before signalling the CPU
1728 */
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01001729static void
Zou Nan hai852835f2010-05-21 09:08:56 +08001730i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001731{
Eric Anholt673a3942008-07-30 12:06:12 -07001732 uint32_t flush_domains = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001733
1734 /* The sampler always gets flushed on i965 (sigh) */
Chris Wilsona6c45cf2010-09-17 00:32:17 +01001735 if (INTEL_INFO(dev)->gen >= 4)
Eric Anholt673a3942008-07-30 12:06:12 -07001736 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
Zou Nan hai852835f2010-05-21 09:08:56 +08001737
1738 ring->flush(dev, ring,
1739 I915_GEM_DOMAIN_COMMAND, flush_domains);
Eric Anholt673a3942008-07-30 12:06:12 -07001740}
1741
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001742static inline void
1743i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07001744{
Chris Wilson1c255952010-09-26 11:03:27 +01001745 struct drm_i915_file_private *file_priv = request->file_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07001746
Chris Wilson1c255952010-09-26 11:03:27 +01001747 if (!file_priv)
1748 return;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001749
Chris Wilson1c255952010-09-26 11:03:27 +01001750 spin_lock(&file_priv->mm.lock);
1751 list_del(&request->client_list);
1752 request->file_priv = NULL;
1753 spin_unlock(&file_priv->mm.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001754}
1755
Chris Wilsondfaae392010-09-22 10:31:52 +01001756static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1757 struct intel_ring_buffer *ring)
Chris Wilson9375e442010-09-19 12:21:28 +01001758{
Chris Wilsondfaae392010-09-22 10:31:52 +01001759 while (!list_empty(&ring->request_list)) {
1760 struct drm_i915_gem_request *request;
Chris Wilson9375e442010-09-19 12:21:28 +01001761
Chris Wilsondfaae392010-09-22 10:31:52 +01001762 request = list_first_entry(&ring->request_list,
1763 struct drm_i915_gem_request,
1764 list);
1765
1766 list_del(&request->list);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001767 i915_gem_request_remove_from_client(request);
Chris Wilsondfaae392010-09-22 10:31:52 +01001768 kfree(request);
1769 }
1770
1771 while (!list_empty(&ring->active_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07001772 struct drm_i915_gem_object *obj_priv;
1773
Chris Wilsondfaae392010-09-22 10:31:52 +01001774 obj_priv = list_first_entry(&ring->active_list,
Eric Anholt673a3942008-07-30 12:06:12 -07001775 struct drm_i915_gem_object,
Chris Wilson69dc4982010-10-19 10:36:51 +01001776 ring_list);
Eric Anholt673a3942008-07-30 12:06:12 -07001777
Chris Wilsondfaae392010-09-22 10:31:52 +01001778 obj_priv->base.write_domain = 0;
1779 list_del_init(&obj_priv->gpu_write_list);
1780 i915_gem_object_move_to_inactive(&obj_priv->base);
Eric Anholt673a3942008-07-30 12:06:12 -07001781 }
Eric Anholt673a3942008-07-30 12:06:12 -07001782}
1783
Chris Wilson069efc12010-09-30 16:53:18 +01001784void i915_gem_reset(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07001785{
Chris Wilsondfaae392010-09-22 10:31:52 +01001786 struct drm_i915_private *dev_priv = dev->dev_private;
1787 struct drm_i915_gem_object *obj_priv;
Chris Wilson069efc12010-09-30 16:53:18 +01001788 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001789
Chris Wilsondfaae392010-09-22 10:31:52 +01001790 i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
Chris Wilson87acb0a2010-10-19 10:13:00 +01001791 i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
Chris Wilson549f7362010-10-19 11:19:32 +01001792 i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
Chris Wilsondfaae392010-09-22 10:31:52 +01001793
1794 /* Remove anything from the flushing lists. The GPU cache is likely
1795 * to be lost on reset along with the data, so simply move the
1796 * lost bo to the inactive list.
1797 */
1798 while (!list_empty(&dev_priv->mm.flushing_list)) {
Chris Wilson9375e442010-09-19 12:21:28 +01001799 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1800 struct drm_i915_gem_object,
Chris Wilson69dc4982010-10-19 10:36:51 +01001801 mm_list);
Chris Wilson9375e442010-09-19 12:21:28 +01001802
1803 obj_priv->base.write_domain = 0;
Chris Wilsondfaae392010-09-22 10:31:52 +01001804 list_del_init(&obj_priv->gpu_write_list);
Chris Wilson9375e442010-09-19 12:21:28 +01001805 i915_gem_object_move_to_inactive(&obj_priv->base);
1806 }
Chris Wilson9375e442010-09-19 12:21:28 +01001807
Chris Wilsondfaae392010-09-22 10:31:52 +01001808 /* Move everything out of the GPU domains to ensure we do any
1809 * necessary invalidation upon reuse.
1810 */
Chris Wilson77f01232010-09-19 12:31:36 +01001811 list_for_each_entry(obj_priv,
1812 &dev_priv->mm.inactive_list,
Chris Wilson69dc4982010-10-19 10:36:51 +01001813 mm_list)
Chris Wilson77f01232010-09-19 12:31:36 +01001814 {
1815 obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1816 }
Chris Wilson069efc12010-09-30 16:53:18 +01001817
1818 /* The fence registers are invalidated so clear them out */
1819 for (i = 0; i < 16; i++) {
1820 struct drm_i915_fence_reg *reg;
1821
1822 reg = &dev_priv->fence_regs[i];
1823 if (!reg->obj)
1824 continue;
1825
1826 i915_gem_clear_fence_reg(reg->obj);
1827 }
Eric Anholt673a3942008-07-30 12:06:12 -07001828}
1829
1830/**
1831 * This function clears the request list as sequence numbers are passed.
1832 */
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001833static void
1834i915_gem_retire_requests_ring(struct drm_device *dev,
1835 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001836{
1837 drm_i915_private_t *dev_priv = dev->dev_private;
1838 uint32_t seqno;
1839
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001840 if (!ring->status_page.page_addr ||
1841 list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01001842 return;
1843
Chris Wilson23bc5982010-09-29 16:10:57 +01001844 WARN_ON(i915_verify_lists(dev));
Eric Anholt673a3942008-07-30 12:06:12 -07001845
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001846 seqno = ring->get_seqno(dev, ring);
Zou Nan hai852835f2010-05-21 09:08:56 +08001847 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07001848 struct drm_i915_gem_request *request;
Eric Anholt673a3942008-07-30 12:06:12 -07001849
Zou Nan hai852835f2010-05-21 09:08:56 +08001850 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07001851 struct drm_i915_gem_request,
1852 list);
Eric Anholt673a3942008-07-30 12:06:12 -07001853
Chris Wilsondfaae392010-09-22 10:31:52 +01001854 if (!i915_seqno_passed(seqno, request->seqno))
Eric Anholt673a3942008-07-30 12:06:12 -07001855 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001856
1857 trace_i915_gem_request_retire(dev, request->seqno);
1858
1859 list_del(&request->list);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001860 i915_gem_request_remove_from_client(request);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001861 kfree(request);
1862 }
1863
1864 /* Move any buffers on the active list that are no longer referenced
1865 * by the ringbuffer to the flushing/inactive lists as appropriate.
1866 */
1867 while (!list_empty(&ring->active_list)) {
1868 struct drm_gem_object *obj;
1869 struct drm_i915_gem_object *obj_priv;
1870
1871 obj_priv = list_first_entry(&ring->active_list,
1872 struct drm_i915_gem_object,
Chris Wilson69dc4982010-10-19 10:36:51 +01001873 ring_list);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001874
Chris Wilsondfaae392010-09-22 10:31:52 +01001875 if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001876 break;
1877
1878 obj = &obj_priv->base;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001879 if (obj->write_domain != 0)
1880 i915_gem_object_move_to_flushing(obj);
1881 else
1882 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001883 }
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001884
1885 if (unlikely (dev_priv->trace_irq_seqno &&
1886 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001887 ring->user_irq_put(dev, ring);
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001888 dev_priv->trace_irq_seqno = 0;
1889 }
Chris Wilson23bc5982010-09-29 16:10:57 +01001890
1891 WARN_ON(i915_verify_lists(dev));
Eric Anholt673a3942008-07-30 12:06:12 -07001892}
1893
1894void
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001895i915_gem_retire_requests(struct drm_device *dev)
1896{
1897 drm_i915_private_t *dev_priv = dev->dev_private;
1898
Chris Wilsonbe726152010-07-23 23:18:50 +01001899 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1900 struct drm_i915_gem_object *obj_priv, *tmp;
1901
1902 /* We must be careful that during unbind() we do not
1903 * accidentally infinitely recurse into retire requests.
1904 * Currently:
1905 * retire -> free -> unbind -> wait -> retire_ring
1906 */
1907 list_for_each_entry_safe(obj_priv, tmp,
1908 &dev_priv->mm.deferred_free_list,
Chris Wilson69dc4982010-10-19 10:36:51 +01001909 mm_list)
Chris Wilsonbe726152010-07-23 23:18:50 +01001910 i915_gem_free_object_tail(&obj_priv->base);
1911 }
1912
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001913 i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
Chris Wilson87acb0a2010-10-19 10:13:00 +01001914 i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
Chris Wilson549f7362010-10-19 11:19:32 +01001915 i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001916}
1917
Daniel Vetter75ef9da2010-08-21 00:25:16 +02001918static void
Eric Anholt673a3942008-07-30 12:06:12 -07001919i915_gem_retire_work_handler(struct work_struct *work)
1920{
1921 drm_i915_private_t *dev_priv;
1922 struct drm_device *dev;
1923
1924 dev_priv = container_of(work, drm_i915_private_t,
1925 mm.retire_work.work);
1926 dev = dev_priv->dev;
1927
Chris Wilson891b48c2010-09-29 12:26:37 +01001928 /* Come back later if the device is busy... */
1929 if (!mutex_trylock(&dev->struct_mutex)) {
1930 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1931 return;
1932 }
1933
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001934 i915_gem_retire_requests(dev);
Zou Nan haid1b851f2010-05-21 09:08:57 +08001935
Keith Packard6dbe2772008-10-14 21:41:13 -07001936 if (!dev_priv->mm.suspended &&
Zou Nan haid1b851f2010-05-21 09:08:57 +08001937 (!list_empty(&dev_priv->render_ring.request_list) ||
Chris Wilson549f7362010-10-19 11:19:32 +01001938 !list_empty(&dev_priv->bsd_ring.request_list) ||
1939 !list_empty(&dev_priv->blt_ring.request_list)))
Eric Anholt9c9fe1f2009-08-03 16:09:16 -07001940 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
Eric Anholt673a3942008-07-30 12:06:12 -07001941 mutex_unlock(&dev->struct_mutex);
1942}
1943
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02001944int
Zou Nan hai852835f2010-05-21 09:08:56 +08001945i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01001946 bool interruptible, struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001947{
1948 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001949 u32 ier;
Eric Anholt673a3942008-07-30 12:06:12 -07001950 int ret = 0;
1951
1952 BUG_ON(seqno == 0);
1953
Ben Gamariba1234d2009-09-14 17:48:47 -04001954 if (atomic_read(&dev_priv->mm.wedged))
Chris Wilson30dbf0c2010-09-25 10:19:17 +01001955 return -EAGAIN;
Ben Gamariffed1d02009-09-14 17:48:41 -04001956
Chris Wilsona56ba562010-09-28 10:07:56 +01001957 if (ring->outstanding_lazy_request) {
Chris Wilson8dc5d142010-08-12 12:36:12 +01001958 seqno = i915_add_request(dev, NULL, NULL, ring);
Daniel Vettere35a41d2010-02-11 22:13:59 +01001959 if (seqno == 0)
1960 return -ENOMEM;
1961 }
Chris Wilsona56ba562010-09-28 10:07:56 +01001962 BUG_ON(seqno == dev_priv->next_seqno);
Daniel Vettere35a41d2010-02-11 22:13:59 +01001963
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001964 if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
Eric Anholtbad720f2009-10-22 16:11:14 -07001965 if (HAS_PCH_SPLIT(dev))
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001966 ier = I915_READ(DEIER) | I915_READ(GTIER);
1967 else
1968 ier = I915_READ(IER);
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001969 if (!ier) {
1970 DRM_ERROR("something (likely vbetool) disabled "
1971 "interrupts, re-enabling\n");
1972 i915_driver_irq_preinstall(dev);
1973 i915_driver_irq_postinstall(dev);
1974 }
1975
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001976 trace_i915_gem_request_wait_begin(dev, seqno);
1977
Zou Nan hai852835f2010-05-21 09:08:56 +08001978 ring->waiting_gem_seqno = seqno;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001979 ring->user_irq_get(dev, ring);
Daniel Vetter48764bf2009-09-15 22:57:32 +02001980 if (interruptible)
Zou Nan hai852835f2010-05-21 09:08:56 +08001981 ret = wait_event_interruptible(ring->irq_queue,
1982 i915_seqno_passed(
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001983 ring->get_seqno(dev, ring), seqno)
Zou Nan hai852835f2010-05-21 09:08:56 +08001984 || atomic_read(&dev_priv->mm.wedged));
Daniel Vetter48764bf2009-09-15 22:57:32 +02001985 else
Zou Nan hai852835f2010-05-21 09:08:56 +08001986 wait_event(ring->irq_queue,
1987 i915_seqno_passed(
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001988 ring->get_seqno(dev, ring), seqno)
Zou Nan hai852835f2010-05-21 09:08:56 +08001989 || atomic_read(&dev_priv->mm.wedged));
Daniel Vetter48764bf2009-09-15 22:57:32 +02001990
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001991 ring->user_irq_put(dev, ring);
Zou Nan hai852835f2010-05-21 09:08:56 +08001992 ring->waiting_gem_seqno = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001993
1994 trace_i915_gem_request_wait_end(dev, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07001995 }
Ben Gamariba1234d2009-09-14 17:48:47 -04001996 if (atomic_read(&dev_priv->mm.wedged))
Chris Wilson30dbf0c2010-09-25 10:19:17 +01001997 ret = -EAGAIN;
Eric Anholt673a3942008-07-30 12:06:12 -07001998
1999 if (ret && ret != -ERESTARTSYS)
Daniel Vetter8bff9172010-02-11 22:19:40 +01002000 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002001 __func__, ret, seqno, ring->get_seqno(dev, ring),
Daniel Vetter8bff9172010-02-11 22:19:40 +01002002 dev_priv->next_seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07002003
2004 /* Directly dispatch request retiring. While we have the work queue
2005 * to handle this, the waiter on a request often wants an associated
2006 * buffer to have made it to the inactive list, and we would need
2007 * a separate wait queue to handle that.
2008 */
2009 if (ret == 0)
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002010 i915_gem_retire_requests_ring(dev, ring);
Eric Anholt673a3942008-07-30 12:06:12 -07002011
2012 return ret;
2013}
2014
Daniel Vetter48764bf2009-09-15 22:57:32 +02002015/**
2016 * Waits for a sequence number to be signaled, and cleans up the
2017 * request and object lists appropriately for that event.
2018 */
2019static int
Zou Nan hai852835f2010-05-21 09:08:56 +08002020i915_wait_request(struct drm_device *dev, uint32_t seqno,
Chris Wilsona56ba562010-09-28 10:07:56 +01002021 struct intel_ring_buffer *ring)
Daniel Vetter48764bf2009-09-15 22:57:32 +02002022{
Zou Nan hai852835f2010-05-21 09:08:56 +08002023 return i915_do_wait_request(dev, seqno, 1, ring);
Daniel Vetter48764bf2009-09-15 22:57:32 +02002024}
2025
Zou Nan hai8187a2b2010-05-21 09:08:55 +08002026static void
Chris Wilson92204342010-09-18 11:02:01 +01002027i915_gem_flush_ring(struct drm_device *dev,
Chris Wilsonc78ec302010-09-20 12:50:23 +01002028 struct drm_file *file_priv,
Chris Wilson92204342010-09-18 11:02:01 +01002029 struct intel_ring_buffer *ring,
2030 uint32_t invalidate_domains,
2031 uint32_t flush_domains)
2032{
2033 ring->flush(dev, ring, invalidate_domains, flush_domains);
2034 i915_gem_process_flushing_list(dev, flush_domains, ring);
2035}
2036
2037static void
Zou Nan hai8187a2b2010-05-21 09:08:55 +08002038i915_gem_flush(struct drm_device *dev,
Chris Wilsonc78ec302010-09-20 12:50:23 +01002039 struct drm_file *file_priv,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08002040 uint32_t invalidate_domains,
Chris Wilson92204342010-09-18 11:02:01 +01002041 uint32_t flush_domains,
2042 uint32_t flush_rings)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08002043{
2044 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter8bff9172010-02-11 22:19:40 +01002045
Zou Nan hai8187a2b2010-05-21 09:08:55 +08002046 if (flush_domains & I915_GEM_DOMAIN_CPU)
2047 drm_agp_chipset_flush(dev);
Zou Nan haid1b851f2010-05-21 09:08:57 +08002048
Chris Wilson92204342010-09-18 11:02:01 +01002049 if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
2050 if (flush_rings & RING_RENDER)
Chris Wilsonc78ec302010-09-20 12:50:23 +01002051 i915_gem_flush_ring(dev, file_priv,
Chris Wilson92204342010-09-18 11:02:01 +01002052 &dev_priv->render_ring,
2053 invalidate_domains, flush_domains);
2054 if (flush_rings & RING_BSD)
Chris Wilsonc78ec302010-09-20 12:50:23 +01002055 i915_gem_flush_ring(dev, file_priv,
Chris Wilson92204342010-09-18 11:02:01 +01002056 &dev_priv->bsd_ring,
2057 invalidate_domains, flush_domains);
Chris Wilson549f7362010-10-19 11:19:32 +01002058 if (flush_rings & RING_BLT)
2059 i915_gem_flush_ring(dev, file_priv,
2060 &dev_priv->blt_ring,
2061 invalidate_domains, flush_domains);
Chris Wilson92204342010-09-18 11:02:01 +01002062 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +08002063}
2064
Eric Anholt673a3942008-07-30 12:06:12 -07002065/**
2066 * Ensures that all rendering to the object has completed and the object is
2067 * safe to unbind from the GTT or access from the CPU.
2068 */
2069static int
Chris Wilson2cf34d72010-09-14 13:03:28 +01002070i915_gem_object_wait_rendering(struct drm_gem_object *obj,
2071 bool interruptible)
Eric Anholt673a3942008-07-30 12:06:12 -07002072{
2073 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01002074 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002075 int ret;
2076
Eric Anholte47c68e2008-11-14 13:35:19 -08002077 /* This function only exists to support waiting for existing rendering,
2078 * not for emitting required flushes.
Eric Anholt673a3942008-07-30 12:06:12 -07002079 */
Eric Anholte47c68e2008-11-14 13:35:19 -08002080 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
Eric Anholt673a3942008-07-30 12:06:12 -07002081
2082 /* If there is rendering queued on the buffer being evicted, wait for
2083 * it.
2084 */
2085 if (obj_priv->active) {
Chris Wilson2cf34d72010-09-14 13:03:28 +01002086 ret = i915_do_wait_request(dev,
2087 obj_priv->last_rendering_seqno,
2088 interruptible,
2089 obj_priv->ring);
2090 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002091 return ret;
2092 }
2093
2094 return 0;
2095}
2096
2097/**
2098 * Unbinds an object from the GTT aperture.
2099 */
Jesse Barnes0f973f22009-01-26 17:10:45 -08002100int
Eric Anholt673a3942008-07-30 12:06:12 -07002101i915_gem_object_unbind(struct drm_gem_object *obj)
2102{
2103 struct drm_device *dev = obj->dev;
Chris Wilson73aa8082010-09-30 11:46:12 +01002104 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002105 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002106 int ret = 0;
2107
Eric Anholt673a3942008-07-30 12:06:12 -07002108 if (obj_priv->gtt_space == NULL)
2109 return 0;
2110
2111 if (obj_priv->pin_count != 0) {
2112 DRM_ERROR("Attempting to unbind pinned buffer\n");
2113 return -EINVAL;
2114 }
2115
Eric Anholt5323fd02009-09-09 11:50:45 -07002116 /* blow away mappings if mapped through GTT */
2117 i915_gem_release_mmap(obj);
2118
Eric Anholt673a3942008-07-30 12:06:12 -07002119 /* Move the object to the CPU domain to ensure that
2120 * any possible CPU writes while it's not in the GTT
2121 * are flushed when we go to remap it. This will
2122 * also ensure that all pending GPU writes are finished
2123 * before we unbind.
2124 */
Eric Anholte47c68e2008-11-14 13:35:19 -08002125 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Chris Wilson8dc17752010-07-23 23:18:51 +01002126 if (ret == -ERESTARTSYS)
Eric Anholt673a3942008-07-30 12:06:12 -07002127 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01002128 /* Continue on if we fail due to EIO, the GPU is hung so we
2129 * should be safe and we need to cleanup or else we might
2130 * cause memory corruption through use-after-free.
2131 */
Chris Wilson812ed4922010-09-30 15:08:57 +01002132 if (ret) {
2133 i915_gem_clflush_object(obj);
2134 obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
2135 }
Eric Anholt673a3942008-07-30 12:06:12 -07002136
Daniel Vetter96b47b62009-12-15 17:50:00 +01002137 /* release the fence reg _after_ flushing */
2138 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2139 i915_gem_clear_fence_reg(obj);
2140
Chris Wilson73aa8082010-09-30 11:46:12 +01002141 drm_unbind_agp(obj_priv->agp_mem);
2142 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07002143
Eric Anholt856fa192009-03-19 14:10:50 -07002144 i915_gem_object_put_pages(obj);
Chris Wilsona32808c2009-09-20 21:29:47 +01002145 BUG_ON(obj_priv->pages_refcount);
Eric Anholt673a3942008-07-30 12:06:12 -07002146
Chris Wilson73aa8082010-09-30 11:46:12 +01002147 i915_gem_info_remove_gtt(dev_priv, obj->size);
Chris Wilson69dc4982010-10-19 10:36:51 +01002148 list_del_init(&obj_priv->mm_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002149
Chris Wilson73aa8082010-09-30 11:46:12 +01002150 drm_mm_put_block(obj_priv->gtt_space);
2151 obj_priv->gtt_space = NULL;
Chris Wilson9af90d12010-10-17 10:01:56 +01002152 obj_priv->gtt_offset = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002153
Chris Wilson963b4832009-09-20 23:03:54 +01002154 if (i915_gem_object_is_purgeable(obj_priv))
2155 i915_gem_object_truncate(obj);
2156
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002157 trace_i915_gem_object_unbind(obj);
2158
Chris Wilson8dc17752010-07-23 23:18:51 +01002159 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002160}
2161
Chris Wilsona56ba562010-09-28 10:07:56 +01002162static int i915_ring_idle(struct drm_device *dev,
2163 struct intel_ring_buffer *ring)
2164{
Chris Wilson395b70b2010-10-28 21:28:46 +01002165 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
Chris Wilson64193402010-10-24 12:38:05 +01002166 return 0;
2167
Chris Wilsona56ba562010-09-28 10:07:56 +01002168 i915_gem_flush_ring(dev, NULL, ring,
2169 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2170 return i915_wait_request(dev,
2171 i915_gem_next_request_seqno(dev, ring),
2172 ring);
2173}
2174
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01002175int
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002176i915_gpu_idle(struct drm_device *dev)
2177{
2178 drm_i915_private_t *dev_priv = dev->dev_private;
2179 bool lists_empty;
Zou Nan hai852835f2010-05-21 09:08:56 +08002180 int ret;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002181
Zou Nan haid1b851f2010-05-21 09:08:57 +08002182 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
Chris Wilson395b70b2010-10-28 21:28:46 +01002183 list_empty(&dev_priv->mm.active_list));
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002184 if (lists_empty)
2185 return 0;
2186
2187 /* Flush everything onto the inactive list. */
Chris Wilsona56ba562010-09-28 10:07:56 +01002188 ret = i915_ring_idle(dev, &dev_priv->render_ring);
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002189 if (ret)
2190 return ret;
Zou Nan haid1b851f2010-05-21 09:08:57 +08002191
Chris Wilson87acb0a2010-10-19 10:13:00 +01002192 ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
2193 if (ret)
2194 return ret;
Zou Nan haid1b851f2010-05-21 09:08:57 +08002195
Chris Wilson549f7362010-10-19 11:19:32 +01002196 ret = i915_ring_idle(dev, &dev_priv->blt_ring);
2197 if (ret)
2198 return ret;
Zou Nan haid1b851f2010-05-21 09:08:57 +08002199
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002200 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002201}
2202
Chris Wilson5cdf5882010-09-27 15:51:07 +01002203static int
Chris Wilson4bdadb92010-01-27 13:36:32 +00002204i915_gem_object_get_pages(struct drm_gem_object *obj,
2205 gfp_t gfpmask)
Eric Anholt673a3942008-07-30 12:06:12 -07002206{
Daniel Vetter23010e42010-03-08 13:35:02 +01002207 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002208 int page_count, i;
2209 struct address_space *mapping;
2210 struct inode *inode;
2211 struct page *page;
Eric Anholt673a3942008-07-30 12:06:12 -07002212
Daniel Vetter778c3542010-05-13 11:49:44 +02002213 BUG_ON(obj_priv->pages_refcount
2214 == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
2215
Eric Anholt856fa192009-03-19 14:10:50 -07002216 if (obj_priv->pages_refcount++ != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07002217 return 0;
2218
2219 /* Get the list of pages out of our struct file. They'll be pinned
2220 * at this point until we release them.
2221 */
2222 page_count = obj->size / PAGE_SIZE;
Eric Anholt856fa192009-03-19 14:10:50 -07002223 BUG_ON(obj_priv->pages != NULL);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07002224 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
Eric Anholt856fa192009-03-19 14:10:50 -07002225 if (obj_priv->pages == NULL) {
Eric Anholt856fa192009-03-19 14:10:50 -07002226 obj_priv->pages_refcount--;
Eric Anholt673a3942008-07-30 12:06:12 -07002227 return -ENOMEM;
2228 }
2229
2230 inode = obj->filp->f_path.dentry->d_inode;
2231 mapping = inode->i_mapping;
2232 for (i = 0; i < page_count; i++) {
Chris Wilson4bdadb92010-01-27 13:36:32 +00002233 page = read_cache_page_gfp(mapping, i,
Linus Torvalds985b8232010-07-02 10:04:42 +10002234 GFP_HIGHUSER |
Chris Wilson4bdadb92010-01-27 13:36:32 +00002235 __GFP_COLD |
Linus Torvaldscd9f0402010-07-18 09:44:37 -07002236 __GFP_RECLAIMABLE |
Chris Wilson4bdadb92010-01-27 13:36:32 +00002237 gfpmask);
Chris Wilson1f2b1012010-03-12 19:52:55 +00002238 if (IS_ERR(page))
2239 goto err_pages;
2240
Eric Anholt856fa192009-03-19 14:10:50 -07002241 obj_priv->pages[i] = page;
Eric Anholt673a3942008-07-30 12:06:12 -07002242 }
Eric Anholt280b7132009-03-12 16:56:27 -07002243
2244 if (obj_priv->tiling_mode != I915_TILING_NONE)
2245 i915_gem_object_do_bit_17_swizzle(obj);
2246
Eric Anholt673a3942008-07-30 12:06:12 -07002247 return 0;
Chris Wilson1f2b1012010-03-12 19:52:55 +00002248
2249err_pages:
2250 while (i--)
2251 page_cache_release(obj_priv->pages[i]);
2252
2253 drm_free_large(obj_priv->pages);
2254 obj_priv->pages = NULL;
2255 obj_priv->pages_refcount--;
2256 return PTR_ERR(page);
Eric Anholt673a3942008-07-30 12:06:12 -07002257}
2258
Eric Anholt4e901fd2009-10-26 16:44:17 -07002259static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
2260{
2261 struct drm_gem_object *obj = reg->obj;
2262 struct drm_device *dev = obj->dev;
2263 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002264 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt4e901fd2009-10-26 16:44:17 -07002265 int regnum = obj_priv->fence_reg;
2266 uint64_t val;
2267
2268 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2269 0xfffff000) << 32;
2270 val |= obj_priv->gtt_offset & 0xfffff000;
2271 val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
2272 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2273
2274 if (obj_priv->tiling_mode == I915_TILING_Y)
2275 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2276 val |= I965_FENCE_REG_VALID;
2277
2278 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
2279}
2280
Jesse Barnesde151cf2008-11-12 10:03:55 -08002281static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2282{
2283 struct drm_gem_object *obj = reg->obj;
2284 struct drm_device *dev = obj->dev;
2285 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002286 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002287 int regnum = obj_priv->fence_reg;
2288 uint64_t val;
2289
2290 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2291 0xfffff000) << 32;
2292 val |= obj_priv->gtt_offset & 0xfffff000;
2293 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2294 if (obj_priv->tiling_mode == I915_TILING_Y)
2295 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2296 val |= I965_FENCE_REG_VALID;
2297
2298 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2299}
2300
2301static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2302{
2303 struct drm_gem_object *obj = reg->obj;
2304 struct drm_device *dev = obj->dev;
2305 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002306 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002307 int regnum = obj_priv->fence_reg;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002308 int tile_width;
Eric Anholtdc529a42009-03-10 22:34:49 -07002309 uint32_t fence_reg, val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002310 uint32_t pitch_val;
2311
2312 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2313 (obj_priv->gtt_offset & (obj->size - 1))) {
Linus Torvaldsf06da262009-02-09 08:57:29 -08002314 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
Jesse Barnes0f973f22009-01-26 17:10:45 -08002315 __func__, obj_priv->gtt_offset, obj->size);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002316 return;
2317 }
2318
Jesse Barnes0f973f22009-01-26 17:10:45 -08002319 if (obj_priv->tiling_mode == I915_TILING_Y &&
2320 HAS_128_BYTE_Y_TILING(dev))
2321 tile_width = 128;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002322 else
Jesse Barnes0f973f22009-01-26 17:10:45 -08002323 tile_width = 512;
2324
2325 /* Note: pitch better be a power of two tile widths */
2326 pitch_val = obj_priv->stride / tile_width;
2327 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002328
Daniel Vetterc36a2a62010-04-17 15:12:03 +02002329 if (obj_priv->tiling_mode == I915_TILING_Y &&
2330 HAS_128_BYTE_Y_TILING(dev))
2331 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2332 else
2333 WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
2334
Jesse Barnesde151cf2008-11-12 10:03:55 -08002335 val = obj_priv->gtt_offset;
2336 if (obj_priv->tiling_mode == I915_TILING_Y)
2337 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2338 val |= I915_FENCE_SIZE_BITS(obj->size);
2339 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2340 val |= I830_FENCE_REG_VALID;
2341
Eric Anholtdc529a42009-03-10 22:34:49 -07002342 if (regnum < 8)
2343 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2344 else
2345 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2346 I915_WRITE(fence_reg, val);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002347}
2348
2349static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2350{
2351 struct drm_gem_object *obj = reg->obj;
2352 struct drm_device *dev = obj->dev;
2353 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002354 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002355 int regnum = obj_priv->fence_reg;
2356 uint32_t val;
2357 uint32_t pitch_val;
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002358 uint32_t fence_size_bits;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002359
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002360 if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
Jesse Barnesde151cf2008-11-12 10:03:55 -08002361 (obj_priv->gtt_offset & (obj->size - 1))) {
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002362 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
Jesse Barnes0f973f22009-01-26 17:10:45 -08002363 __func__, obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002364 return;
2365 }
2366
Eric Anholte76a16d2009-05-26 17:44:56 -07002367 pitch_val = obj_priv->stride / 128;
2368 pitch_val = ffs(pitch_val) - 1;
2369 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2370
Jesse Barnesde151cf2008-11-12 10:03:55 -08002371 val = obj_priv->gtt_offset;
2372 if (obj_priv->tiling_mode == I915_TILING_Y)
2373 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002374 fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2375 WARN_ON(fence_size_bits & ~0x00000f00);
2376 val |= fence_size_bits;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002377 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2378 val |= I830_FENCE_REG_VALID;
2379
2380 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002381}
2382
Chris Wilson2cf34d72010-09-14 13:03:28 +01002383static int i915_find_fence_reg(struct drm_device *dev,
2384 bool interruptible)
Daniel Vetterae3db242010-02-19 11:51:58 +01002385{
2386 struct drm_i915_fence_reg *reg = NULL;
2387 struct drm_i915_gem_object *obj_priv = NULL;
2388 struct drm_i915_private *dev_priv = dev->dev_private;
2389 struct drm_gem_object *obj = NULL;
2390 int i, avail, ret;
2391
2392 /* First try to find a free reg */
2393 avail = 0;
2394 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2395 reg = &dev_priv->fence_regs[i];
2396 if (!reg->obj)
2397 return i;
2398
Daniel Vetter23010e42010-03-08 13:35:02 +01002399 obj_priv = to_intel_bo(reg->obj);
Daniel Vetterae3db242010-02-19 11:51:58 +01002400 if (!obj_priv->pin_count)
2401 avail++;
2402 }
2403
2404 if (avail == 0)
2405 return -ENOSPC;
2406
2407 /* None available, try to steal one or wait for a user to finish */
2408 i = I915_FENCE_REG_NONE;
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002409 list_for_each_entry(reg, &dev_priv->mm.fence_list,
2410 lru_list) {
2411 obj = reg->obj;
2412 obj_priv = to_intel_bo(obj);
Daniel Vetterae3db242010-02-19 11:51:58 +01002413
2414 if (obj_priv->pin_count)
2415 continue;
2416
2417 /* found one! */
2418 i = obj_priv->fence_reg;
2419 break;
2420 }
2421
2422 BUG_ON(i == I915_FENCE_REG_NONE);
2423
2424 /* We only have a reference on obj from the active list. put_fence_reg
2425 * might drop that one, causing a use-after-free in it. So hold a
2426 * private reference to obj like the other callers of put_fence_reg
2427 * (set_tiling ioctl) do. */
2428 drm_gem_object_reference(obj);
Chris Wilson2cf34d72010-09-14 13:03:28 +01002429 ret = i915_gem_object_put_fence_reg(obj, interruptible);
Daniel Vetterae3db242010-02-19 11:51:58 +01002430 drm_gem_object_unreference(obj);
2431 if (ret != 0)
2432 return ret;
2433
2434 return i;
2435}
2436
Jesse Barnesde151cf2008-11-12 10:03:55 -08002437/**
2438 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2439 * @obj: object to map through a fence reg
2440 *
2441 * When mapping objects through the GTT, userspace wants to be able to write
2442 * to them without having to worry about swizzling if the object is tiled.
2443 *
2444 * This function walks the fence regs looking for a free one for @obj,
2445 * stealing one if it can't find any.
2446 *
2447 * It then sets up the reg based on the object's properties: address, pitch
2448 * and tiling format.
2449 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01002450int
Chris Wilson2cf34d72010-09-14 13:03:28 +01002451i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
2452 bool interruptible)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002453{
2454 struct drm_device *dev = obj->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002455 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002456 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002457 struct drm_i915_fence_reg *reg = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002458 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002459
Eric Anholta09ba7f2009-08-29 12:49:51 -07002460 /* Just update our place in the LRU if our fence is getting used. */
2461 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002462 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2463 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07002464 return 0;
2465 }
2466
Jesse Barnesde151cf2008-11-12 10:03:55 -08002467 switch (obj_priv->tiling_mode) {
2468 case I915_TILING_NONE:
2469 WARN(1, "allocating a fence for non-tiled object?\n");
2470 break;
2471 case I915_TILING_X:
Jesse Barnes0f973f22009-01-26 17:10:45 -08002472 if (!obj_priv->stride)
2473 return -EINVAL;
2474 WARN((obj_priv->stride & (512 - 1)),
2475 "object 0x%08x is X tiled but has non-512B pitch\n",
2476 obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002477 break;
2478 case I915_TILING_Y:
Jesse Barnes0f973f22009-01-26 17:10:45 -08002479 if (!obj_priv->stride)
2480 return -EINVAL;
2481 WARN((obj_priv->stride & (128 - 1)),
2482 "object 0x%08x is Y tiled but has non-128B pitch\n",
2483 obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002484 break;
2485 }
2486
Chris Wilson2cf34d72010-09-14 13:03:28 +01002487 ret = i915_find_fence_reg(dev, interruptible);
Daniel Vetterae3db242010-02-19 11:51:58 +01002488 if (ret < 0)
2489 return ret;
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002490
Daniel Vetterae3db242010-02-19 11:51:58 +01002491 obj_priv->fence_reg = ret;
2492 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002493 list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07002494
Jesse Barnesde151cf2008-11-12 10:03:55 -08002495 reg->obj = obj;
2496
Chris Wilsone259bef2010-09-17 00:32:02 +01002497 switch (INTEL_INFO(dev)->gen) {
2498 case 6:
Eric Anholt4e901fd2009-10-26 16:44:17 -07002499 sandybridge_write_fence_reg(reg);
Chris Wilsone259bef2010-09-17 00:32:02 +01002500 break;
2501 case 5:
2502 case 4:
Jesse Barnesde151cf2008-11-12 10:03:55 -08002503 i965_write_fence_reg(reg);
Chris Wilsone259bef2010-09-17 00:32:02 +01002504 break;
2505 case 3:
Jesse Barnesde151cf2008-11-12 10:03:55 -08002506 i915_write_fence_reg(reg);
Chris Wilsone259bef2010-09-17 00:32:02 +01002507 break;
2508 case 2:
Jesse Barnesde151cf2008-11-12 10:03:55 -08002509 i830_write_fence_reg(reg);
Chris Wilsone259bef2010-09-17 00:32:02 +01002510 break;
2511 }
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002512
Daniel Vetterae3db242010-02-19 11:51:58 +01002513 trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
2514 obj_priv->tiling_mode);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002515
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002516 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002517}
2518
2519/**
2520 * i915_gem_clear_fence_reg - clear out fence register info
2521 * @obj: object to clear
2522 *
2523 * Zeroes out the fence register itself and clears out the associated
2524 * data structures in dev_priv and obj_priv.
2525 */
2526static void
2527i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2528{
2529 struct drm_device *dev = obj->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002530 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002531 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002532 struct drm_i915_fence_reg *reg =
2533 &dev_priv->fence_regs[obj_priv->fence_reg];
Chris Wilsone259bef2010-09-17 00:32:02 +01002534 uint32_t fence_reg;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002535
Chris Wilsone259bef2010-09-17 00:32:02 +01002536 switch (INTEL_INFO(dev)->gen) {
2537 case 6:
Eric Anholt4e901fd2009-10-26 16:44:17 -07002538 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
2539 (obj_priv->fence_reg * 8), 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002540 break;
2541 case 5:
2542 case 4:
Jesse Barnesde151cf2008-11-12 10:03:55 -08002543 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002544 break;
2545 case 3:
Chris Wilson9b74f732010-09-22 19:10:44 +01002546 if (obj_priv->fence_reg >= 8)
Chris Wilsone259bef2010-09-17 00:32:02 +01002547 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002548 else
Chris Wilsone259bef2010-09-17 00:32:02 +01002549 case 2:
2550 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002551
2552 I915_WRITE(fence_reg, 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002553 break;
Eric Anholtdc529a42009-03-10 22:34:49 -07002554 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002555
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002556 reg->obj = NULL;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002557 obj_priv->fence_reg = I915_FENCE_REG_NONE;
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002558 list_del_init(&reg->lru_list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002559}
2560
Eric Anholt673a3942008-07-30 12:06:12 -07002561/**
Chris Wilson52dc7d32009-06-06 09:46:01 +01002562 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2563 * to the buffer to finish, and then resets the fence register.
2564 * @obj: tiled object holding a fence register.
Chris Wilson2cf34d72010-09-14 13:03:28 +01002565 * @bool: whether the wait upon the fence is interruptible
Chris Wilson52dc7d32009-06-06 09:46:01 +01002566 *
2567 * Zeroes out the fence register itself and clears out the associated
2568 * data structures in dev_priv and obj_priv.
2569 */
2570int
Chris Wilson2cf34d72010-09-14 13:03:28 +01002571i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
2572 bool interruptible)
Chris Wilson52dc7d32009-06-06 09:46:01 +01002573{
2574 struct drm_device *dev = obj->dev;
Chris Wilson53640e12010-09-20 11:40:50 +01002575 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002576 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson53640e12010-09-20 11:40:50 +01002577 struct drm_i915_fence_reg *reg;
Chris Wilson52dc7d32009-06-06 09:46:01 +01002578
2579 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2580 return 0;
2581
Daniel Vetter10ae9bd2010-02-01 13:59:17 +01002582 /* If we've changed tiling, GTT-mappings of the object
2583 * need to re-fault to ensure that the correct fence register
2584 * setup is in place.
2585 */
2586 i915_gem_release_mmap(obj);
2587
Chris Wilson52dc7d32009-06-06 09:46:01 +01002588 /* On the i915, GPU access to tiled buffers is via a fence,
2589 * therefore we must wait for any outstanding access to complete
2590 * before clearing the fence.
2591 */
Chris Wilson53640e12010-09-20 11:40:50 +01002592 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2593 if (reg->gpu) {
Chris Wilson52dc7d32009-06-06 09:46:01 +01002594 int ret;
2595
Daniel Vetterde18a292010-11-27 22:30:41 +01002596 ret = i915_gem_object_flush_gpu_write_domain(obj);
Chris Wilson0bc23aa2010-09-14 10:22:23 +01002597 if (ret)
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002598 return ret;
2599
Chris Wilson2cf34d72010-09-14 13:03:28 +01002600 ret = i915_gem_object_wait_rendering(obj, interruptible);
Chris Wilson0bc23aa2010-09-14 10:22:23 +01002601 if (ret)
Chris Wilson52dc7d32009-06-06 09:46:01 +01002602 return ret;
Chris Wilson53640e12010-09-20 11:40:50 +01002603
2604 reg->gpu = false;
Chris Wilson52dc7d32009-06-06 09:46:01 +01002605 }
2606
Daniel Vetter4a726612010-02-01 13:59:16 +01002607 i915_gem_object_flush_gtt_write_domain(obj);
Chris Wilson0bc23aa2010-09-14 10:22:23 +01002608 i915_gem_clear_fence_reg(obj);
Chris Wilson52dc7d32009-06-06 09:46:01 +01002609
2610 return 0;
2611}
2612
2613/**
Eric Anholt673a3942008-07-30 12:06:12 -07002614 * Finds free space in the GTT aperture and binds the object there.
2615 */
2616static int
2617i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2618{
2619 struct drm_device *dev = obj->dev;
2620 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002621 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002622 struct drm_mm_node *free_space;
Chris Wilson4bdadb92010-01-27 13:36:32 +00002623 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
Chris Wilson07f73f62009-09-14 16:50:30 +01002624 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002625
Chris Wilsonbb6baf72009-09-22 14:24:13 +01002626 if (obj_priv->madv != I915_MADV_WILLNEED) {
Chris Wilson3ef94da2009-09-14 16:50:29 +01002627 DRM_ERROR("Attempting to bind a purgeable object\n");
2628 return -EINVAL;
2629 }
2630
Eric Anholt673a3942008-07-30 12:06:12 -07002631 if (alignment == 0)
Jesse Barnes0f973f22009-01-26 17:10:45 -08002632 alignment = i915_gem_get_gtt_alignment(obj);
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002633 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002634 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2635 return -EINVAL;
2636 }
2637
Chris Wilson654fc602010-05-27 13:18:21 +01002638 /* If the object is bigger than the entire aperture, reject it early
2639 * before evicting everything in a vain attempt to find space.
2640 */
Chris Wilson73aa8082010-09-30 11:46:12 +01002641 if (obj->size > dev_priv->mm.gtt_total) {
Chris Wilson654fc602010-05-27 13:18:21 +01002642 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2643 return -E2BIG;
2644 }
2645
Eric Anholt673a3942008-07-30 12:06:12 -07002646 search_free:
2647 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2648 obj->size, alignment, 0);
Chris Wilson9af90d12010-10-17 10:01:56 +01002649 if (free_space != NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002650 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2651 alignment);
Eric Anholt673a3942008-07-30 12:06:12 -07002652 if (obj_priv->gtt_space == NULL) {
2653 /* If the gtt is empty and we're still having trouble
2654 * fitting our object in, we're out of memory.
2655 */
Daniel Vetter0108a3e2010-08-07 11:01:21 +01002656 ret = i915_gem_evict_something(dev, obj->size, alignment);
Chris Wilson97311292009-09-21 00:22:34 +01002657 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002658 return ret;
Chris Wilson97311292009-09-21 00:22:34 +01002659
Eric Anholt673a3942008-07-30 12:06:12 -07002660 goto search_free;
2661 }
2662
Chris Wilson4bdadb92010-01-27 13:36:32 +00002663 ret = i915_gem_object_get_pages(obj, gfpmask);
Eric Anholt673a3942008-07-30 12:06:12 -07002664 if (ret) {
2665 drm_mm_put_block(obj_priv->gtt_space);
2666 obj_priv->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002667
2668 if (ret == -ENOMEM) {
2669 /* first try to clear up some space from the GTT */
Daniel Vetter0108a3e2010-08-07 11:01:21 +01002670 ret = i915_gem_evict_something(dev, obj->size,
2671 alignment);
Chris Wilson07f73f62009-09-14 16:50:30 +01002672 if (ret) {
Chris Wilson07f73f62009-09-14 16:50:30 +01002673 /* now try to shrink everyone else */
Chris Wilson4bdadb92010-01-27 13:36:32 +00002674 if (gfpmask) {
2675 gfpmask = 0;
2676 goto search_free;
Chris Wilson07f73f62009-09-14 16:50:30 +01002677 }
2678
2679 return ret;
2680 }
2681
2682 goto search_free;
2683 }
2684
Eric Anholt673a3942008-07-30 12:06:12 -07002685 return ret;
2686 }
2687
Eric Anholt673a3942008-07-30 12:06:12 -07002688 /* Create an AGP memory structure pointing at our pages, and bind it
2689 * into the GTT.
2690 */
2691 obj_priv->agp_mem = drm_agp_bind_pages(dev,
Eric Anholt856fa192009-03-19 14:10:50 -07002692 obj_priv->pages,
Chris Wilson07f73f62009-09-14 16:50:30 +01002693 obj->size >> PAGE_SHIFT,
Chris Wilson9af90d12010-10-17 10:01:56 +01002694 obj_priv->gtt_space->start,
Keith Packardba1eb1d2008-10-14 19:55:10 -07002695 obj_priv->agp_type);
Eric Anholt673a3942008-07-30 12:06:12 -07002696 if (obj_priv->agp_mem == NULL) {
Eric Anholt856fa192009-03-19 14:10:50 -07002697 i915_gem_object_put_pages(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002698 drm_mm_put_block(obj_priv->gtt_space);
2699 obj_priv->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002700
Daniel Vetter0108a3e2010-08-07 11:01:21 +01002701 ret = i915_gem_evict_something(dev, obj->size, alignment);
Chris Wilson97311292009-09-21 00:22:34 +01002702 if (ret)
Chris Wilson07f73f62009-09-14 16:50:30 +01002703 return ret;
Chris Wilson07f73f62009-09-14 16:50:30 +01002704
2705 goto search_free;
Eric Anholt673a3942008-07-30 12:06:12 -07002706 }
Eric Anholt673a3942008-07-30 12:06:12 -07002707
Chris Wilsonbf1a1092010-08-07 11:01:20 +01002708 /* keep track of bounds object by adding it to the inactive list */
Chris Wilson69dc4982010-10-19 10:36:51 +01002709 list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
Chris Wilson73aa8082010-09-30 11:46:12 +01002710 i915_gem_info_add_gtt(dev_priv, obj->size);
Chris Wilsonbf1a1092010-08-07 11:01:20 +01002711
Eric Anholt673a3942008-07-30 12:06:12 -07002712 /* Assert that the object is not currently in any GPU domain. As it
2713 * wasn't in the GTT, there shouldn't be any way it could have been in
2714 * a GPU cache
2715 */
Chris Wilson21d509e2009-06-06 09:46:02 +01002716 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2717 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
Eric Anholt673a3942008-07-30 12:06:12 -07002718
Chris Wilson9af90d12010-10-17 10:01:56 +01002719 obj_priv->gtt_offset = obj_priv->gtt_space->start;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002720 trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
2721
Eric Anholt673a3942008-07-30 12:06:12 -07002722 return 0;
2723}
2724
2725void
2726i915_gem_clflush_object(struct drm_gem_object *obj)
2727{
Daniel Vetter23010e42010-03-08 13:35:02 +01002728 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002729
2730 /* If we don't have a page list set up, then we're not pinned
2731 * to GPU, and we can ignore the cache flush because it'll happen
2732 * again at bind time.
2733 */
Eric Anholt856fa192009-03-19 14:10:50 -07002734 if (obj_priv->pages == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002735 return;
2736
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002737 trace_i915_gem_object_clflush(obj);
Eric Anholtcfa16a02009-05-26 18:46:16 -07002738
Eric Anholt856fa192009-03-19 14:10:50 -07002739 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07002740}
2741
Eric Anholte47c68e2008-11-14 13:35:19 -08002742/** Flushes any GPU write domain for the object if it's dirty. */
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002743static int
Daniel Vetterde18a292010-11-27 22:30:41 +01002744i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002745{
2746 struct drm_device *dev = obj->dev;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002747 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002748
2749 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002750 return 0;
Eric Anholte47c68e2008-11-14 13:35:19 -08002751
2752 /* Queue the GPU write cache flushing we need. */
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002753 old_write_domain = obj->write_domain;
Chris Wilsonc78ec302010-09-20 12:50:23 +01002754 i915_gem_flush_ring(dev, NULL,
Chris Wilson92204342010-09-18 11:02:01 +01002755 to_intel_bo(obj)->ring,
2756 0, obj->write_domain);
Chris Wilson48b956c2010-09-14 12:50:34 +01002757 BUG_ON(obj->write_domain);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002758
2759 trace_i915_gem_object_change_domain(obj,
2760 obj->read_domains,
2761 old_write_domain);
Daniel Vetterba3d8d72010-02-11 22:37:04 +01002762
Daniel Vetterde18a292010-11-27 22:30:41 +01002763 return 0;
Eric Anholte47c68e2008-11-14 13:35:19 -08002764}
2765
2766/** Flushes the GTT write domain for the object if it's dirty. */
2767static void
2768i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2769{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002770 uint32_t old_write_domain;
2771
Eric Anholte47c68e2008-11-14 13:35:19 -08002772 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2773 return;
2774
2775 /* No actual flushing is required for the GTT write domain. Writes
2776 * to it immediately go to main memory as far as we know, so there's
2777 * no chipset flush. It also doesn't land in render cache.
2778 */
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002779 old_write_domain = obj->write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002780 obj->write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002781
2782 trace_i915_gem_object_change_domain(obj,
2783 obj->read_domains,
2784 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002785}
2786
2787/** Flushes the CPU write domain for the object if it's dirty. */
2788static void
2789i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2790{
2791 struct drm_device *dev = obj->dev;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002792 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002793
2794 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2795 return;
2796
2797 i915_gem_clflush_object(obj);
2798 drm_agp_chipset_flush(dev);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002799 old_write_domain = obj->write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002800 obj->write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002801
2802 trace_i915_gem_object_change_domain(obj,
2803 obj->read_domains,
2804 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002805}
2806
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002807/**
2808 * Moves a single object to the GTT read, and possibly write domain.
2809 *
2810 * This function returns when the move is complete, including waiting on
2811 * flushes to occur.
2812 */
Jesse Barnes79e53942008-11-07 14:24:08 -08002813int
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002814i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2815{
Daniel Vetter23010e42010-03-08 13:35:02 +01002816 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002817 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002818 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002819
Eric Anholt02354392008-11-26 13:58:13 -08002820 /* Not valid to be called on unbound objects. */
2821 if (obj_priv->gtt_space == NULL)
2822 return -EINVAL;
2823
Daniel Vetterde18a292010-11-27 22:30:41 +01002824 ret = i915_gem_object_flush_gpu_write_domain(obj);
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002825 if (ret != 0)
2826 return ret;
Daniel Vetterde18a292010-11-27 22:30:41 +01002827 ret = i915_gem_object_wait_rendering(obj, true);
2828 if (ret)
2829 return ret;
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002830
Chris Wilson72133422010-09-13 23:56:38 +01002831 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002832
Eric Anholte47c68e2008-11-14 13:35:19 -08002833 old_write_domain = obj->write_domain;
2834 old_read_domains = obj->read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002835
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002836 /* It should now be out of any other write domains, and we can update
2837 * the domain values for our changes.
2838 */
2839 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2840 obj->read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002841 if (write) {
Chris Wilson72133422010-09-13 23:56:38 +01002842 obj->read_domains = I915_GEM_DOMAIN_GTT;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002843 obj->write_domain = I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002844 obj_priv->dirty = 1;
2845 }
2846
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002847 trace_i915_gem_object_change_domain(obj,
2848 old_read_domains,
2849 old_write_domain);
2850
Eric Anholte47c68e2008-11-14 13:35:19 -08002851 return 0;
2852}
2853
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002854/*
2855 * Prepare buffer for display plane. Use uninterruptible for possible flush
2856 * wait, as in modesetting process we're not supposed to be interrupted.
2857 */
2858int
Chris Wilson48b956c2010-09-14 12:50:34 +01002859i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
2860 bool pipelined)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002861{
Daniel Vetter23010e42010-03-08 13:35:02 +01002862 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Daniel Vetterba3d8d72010-02-11 22:37:04 +01002863 uint32_t old_read_domains;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002864 int ret;
2865
2866 /* Not valid to be called on unbound objects. */
2867 if (obj_priv->gtt_space == NULL)
2868 return -EINVAL;
2869
Daniel Vetterde18a292010-11-27 22:30:41 +01002870 ret = i915_gem_object_flush_gpu_write_domain(obj);
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002871 if (ret)
2872 return ret;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002873
Chris Wilsonced270f2010-09-26 22:47:46 +01002874 /* Currently, we are always called from an non-interruptible context. */
2875 if (!pipelined) {
2876 ret = i915_gem_object_wait_rendering(obj, false);
2877 if (ret)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002878 return ret;
2879 }
2880
Chris Wilsonb118c1e2010-05-27 13:18:14 +01002881 i915_gem_object_flush_cpu_write_domain(obj);
2882
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002883 old_read_domains = obj->read_domains;
Chris Wilsonc78ec302010-09-20 12:50:23 +01002884 obj->read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002885
2886 trace_i915_gem_object_change_domain(obj,
2887 old_read_domains,
Daniel Vetterba3d8d72010-02-11 22:37:04 +01002888 obj->write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002889
2890 return 0;
2891}
2892
Chris Wilson85345512010-11-13 09:49:11 +00002893int
2894i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
2895 bool interruptible)
2896{
2897 if (!obj->active)
2898 return 0;
2899
2900 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
2901 i915_gem_flush_ring(obj->base.dev, NULL, obj->ring,
2902 0, obj->base.write_domain);
2903
2904 return i915_gem_object_wait_rendering(&obj->base, interruptible);
2905}
2906
Eric Anholte47c68e2008-11-14 13:35:19 -08002907/**
2908 * Moves a single object to the CPU read, and possibly write domain.
2909 *
2910 * This function returns when the move is complete, including waiting on
2911 * flushes to occur.
2912 */
2913static int
2914i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2915{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002916 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002917 int ret;
2918
Daniel Vetterde18a292010-11-27 22:30:41 +01002919 ret = i915_gem_object_flush_gpu_write_domain(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08002920 if (ret != 0)
2921 return ret;
Daniel Vetterde18a292010-11-27 22:30:41 +01002922 ret = i915_gem_object_wait_rendering(obj, true);
2923 if (ret)
2924 return ret;
Eric Anholte47c68e2008-11-14 13:35:19 -08002925
2926 i915_gem_object_flush_gtt_write_domain(obj);
2927
2928 /* If we have a partially-valid cache of the object in the CPU,
2929 * finish invalidating it and free the per-page flags.
2930 */
2931 i915_gem_object_set_to_full_cpu_read_domain(obj);
2932
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002933 old_write_domain = obj->write_domain;
2934 old_read_domains = obj->read_domains;
2935
Eric Anholte47c68e2008-11-14 13:35:19 -08002936 /* Flush the CPU cache if it's still invalid. */
2937 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2938 i915_gem_clflush_object(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08002939
2940 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2941 }
2942
2943 /* It should now be out of any other write domains, and we can update
2944 * the domain values for our changes.
2945 */
2946 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2947
2948 /* If we're writing through the CPU, then the GPU read domains will
2949 * need to be invalidated at next use.
2950 */
2951 if (write) {
Chris Wilsonc78ec302010-09-20 12:50:23 +01002952 obj->read_domains = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08002953 obj->write_domain = I915_GEM_DOMAIN_CPU;
2954 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002955
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002956 trace_i915_gem_object_change_domain(obj,
2957 old_read_domains,
2958 old_write_domain);
2959
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002960 return 0;
2961}
2962
Eric Anholt673a3942008-07-30 12:06:12 -07002963/*
2964 * Set the next domain for the specified object. This
2965 * may not actually perform the necessary flushing/invaliding though,
2966 * as that may want to be batched with other set_domain operations
2967 *
2968 * This is (we hope) the only really tricky part of gem. The goal
2969 * is fairly simple -- track which caches hold bits of the object
2970 * and make sure they remain coherent. A few concrete examples may
2971 * help to explain how it works. For shorthand, we use the notation
2972 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2973 * a pair of read and write domain masks.
2974 *
2975 * Case 1: the batch buffer
2976 *
2977 * 1. Allocated
2978 * 2. Written by CPU
2979 * 3. Mapped to GTT
2980 * 4. Read by GPU
2981 * 5. Unmapped from GTT
2982 * 6. Freed
2983 *
2984 * Let's take these a step at a time
2985 *
2986 * 1. Allocated
2987 * Pages allocated from the kernel may still have
2988 * cache contents, so we set them to (CPU, CPU) always.
2989 * 2. Written by CPU (using pwrite)
2990 * The pwrite function calls set_domain (CPU, CPU) and
2991 * this function does nothing (as nothing changes)
2992 * 3. Mapped by GTT
2993 * This function asserts that the object is not
2994 * currently in any GPU-based read or write domains
2995 * 4. Read by GPU
2996 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
2997 * As write_domain is zero, this function adds in the
2998 * current read domains (CPU+COMMAND, 0).
2999 * flush_domains is set to CPU.
3000 * invalidate_domains is set to COMMAND
3001 * clflush is run to get data out of the CPU caches
3002 * then i915_dev_set_domain calls i915_gem_flush to
3003 * emit an MI_FLUSH and drm_agp_chipset_flush
3004 * 5. Unmapped from GTT
3005 * i915_gem_object_unbind calls set_domain (CPU, CPU)
3006 * flush_domains and invalidate_domains end up both zero
3007 * so no flushing/invalidating happens
3008 * 6. Freed
3009 * yay, done
3010 *
3011 * Case 2: The shared render buffer
3012 *
3013 * 1. Allocated
3014 * 2. Mapped to GTT
3015 * 3. Read/written by GPU
3016 * 4. set_domain to (CPU,CPU)
3017 * 5. Read/written by CPU
3018 * 6. Read/written by GPU
3019 *
3020 * 1. Allocated
3021 * Same as last example, (CPU, CPU)
3022 * 2. Mapped to GTT
3023 * Nothing changes (assertions find that it is not in the GPU)
3024 * 3. Read/written by GPU
3025 * execbuffer calls set_domain (RENDER, RENDER)
3026 * flush_domains gets CPU
3027 * invalidate_domains gets GPU
3028 * clflush (obj)
3029 * MI_FLUSH and drm_agp_chipset_flush
3030 * 4. set_domain (CPU, CPU)
3031 * flush_domains gets GPU
3032 * invalidate_domains gets CPU
3033 * wait_rendering (obj) to make sure all drawing is complete.
3034 * This will include an MI_FLUSH to get the data from GPU
3035 * to memory
3036 * clflush (obj) to invalidate the CPU cache
3037 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
3038 * 5. Read/written by CPU
3039 * cache lines are loaded and dirtied
3040 * 6. Read written by GPU
3041 * Same as last GPU access
3042 *
3043 * Case 3: The constant buffer
3044 *
3045 * 1. Allocated
3046 * 2. Written by CPU
3047 * 3. Read by GPU
3048 * 4. Updated (written) by CPU again
3049 * 5. Read by GPU
3050 *
3051 * 1. Allocated
3052 * (CPU, CPU)
3053 * 2. Written by CPU
3054 * (CPU, CPU)
3055 * 3. Read by GPU
3056 * (CPU+RENDER, 0)
3057 * flush_domains = CPU
3058 * invalidate_domains = RENDER
3059 * clflush (obj)
3060 * MI_FLUSH
3061 * drm_agp_chipset_flush
3062 * 4. Updated (written) by CPU again
3063 * (CPU, CPU)
3064 * flush_domains = 0 (no previous write domain)
3065 * invalidate_domains = 0 (no new read domains)
3066 * 5. Read by GPU
3067 * (CPU+RENDER, 0)
3068 * flush_domains = CPU
3069 * invalidate_domains = RENDER
3070 * clflush (obj)
3071 * MI_FLUSH
3072 * drm_agp_chipset_flush
3073 */
Keith Packardc0d90822008-11-20 23:11:08 -08003074static void
Chris Wilsonb6651452010-10-23 10:15:06 +01003075i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
3076 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07003077{
3078 struct drm_device *dev = obj->dev;
Chris Wilson92204342010-09-18 11:02:01 +01003079 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01003080 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003081 uint32_t invalidate_domains = 0;
3082 uint32_t flush_domains = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003083 uint32_t old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003084
Jesse Barnes652c3932009-08-17 13:31:43 -07003085 intel_mark_busy(dev, obj);
3086
Eric Anholt673a3942008-07-30 12:06:12 -07003087 /*
3088 * If the object isn't moving to a new write domain,
3089 * let the object stay in multiple read domains
3090 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003091 if (obj->pending_write_domain == 0)
3092 obj->pending_read_domains |= obj->read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07003093 else
3094 obj_priv->dirty = 1;
3095
3096 /*
3097 * Flush the current write domain if
3098 * the new read domains don't match. Invalidate
3099 * any read domains which differ from the old
3100 * write domain
3101 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003102 if (obj->write_domain &&
Chris Wilsonc6afd652010-11-01 13:39:24 +00003103 (obj->write_domain != obj->pending_read_domains ||
3104 obj_priv->ring != ring)) {
Eric Anholt673a3942008-07-30 12:06:12 -07003105 flush_domains |= obj->write_domain;
Eric Anholt8b0e3782009-02-19 14:40:50 -08003106 invalidate_domains |=
3107 obj->pending_read_domains & ~obj->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07003108 }
3109 /*
3110 * Invalidate any read caches which may have
3111 * stale data. That is, any new read domains.
3112 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003113 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
Chris Wilson3d2a8122010-09-29 11:39:53 +01003114 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
Eric Anholt673a3942008-07-30 12:06:12 -07003115 i915_gem_clflush_object(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003116
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003117 old_read_domains = obj->read_domains;
3118
Eric Anholtefbeed92009-02-19 14:54:51 -08003119 /* The actual obj->write_domain will be updated with
3120 * pending_write_domain after we emit the accumulated flush for all
3121 * of our domain changes in execbuffers (which clears objects'
3122 * write_domains). So if we have a current write domain that we
3123 * aren't changing, set pending_write_domain to that.
3124 */
3125 if (flush_domains == 0 && obj->pending_write_domain == 0)
3126 obj->pending_write_domain = obj->write_domain;
Eric Anholt8b0e3782009-02-19 14:40:50 -08003127 obj->read_domains = obj->pending_read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07003128
3129 dev->invalidate_domains |= invalidate_domains;
3130 dev->flush_domains |= flush_domains;
Chris Wilsonb6651452010-10-23 10:15:06 +01003131 if (flush_domains & I915_GEM_GPU_DOMAINS)
Chris Wilson92204342010-09-18 11:02:01 +01003132 dev_priv->mm.flush_rings |= obj_priv->ring->id;
Chris Wilsonb6651452010-10-23 10:15:06 +01003133 if (invalidate_domains & I915_GEM_GPU_DOMAINS)
3134 dev_priv->mm.flush_rings |= ring->id;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003135
3136 trace_i915_gem_object_change_domain(obj,
3137 old_read_domains,
3138 obj->write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07003139}
3140
3141/**
Eric Anholte47c68e2008-11-14 13:35:19 -08003142 * Moves the object from a partially CPU read to a full one.
Eric Anholt673a3942008-07-30 12:06:12 -07003143 *
Eric Anholte47c68e2008-11-14 13:35:19 -08003144 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3145 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3146 */
3147static void
3148i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
3149{
Daniel Vetter23010e42010-03-08 13:35:02 +01003150 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003151
3152 if (!obj_priv->page_cpu_valid)
3153 return;
3154
3155 /* If we're partially in the CPU read domain, finish moving it in.
3156 */
3157 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
3158 int i;
3159
3160 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
3161 if (obj_priv->page_cpu_valid[i])
3162 continue;
Eric Anholt856fa192009-03-19 14:10:50 -07003163 drm_clflush_pages(obj_priv->pages + i, 1);
Eric Anholte47c68e2008-11-14 13:35:19 -08003164 }
Eric Anholte47c68e2008-11-14 13:35:19 -08003165 }
3166
3167 /* Free the page_cpu_valid mappings which are now stale, whether
3168 * or not we've got I915_GEM_DOMAIN_CPU.
3169 */
Eric Anholt9a298b22009-03-24 12:23:04 -07003170 kfree(obj_priv->page_cpu_valid);
Eric Anholte47c68e2008-11-14 13:35:19 -08003171 obj_priv->page_cpu_valid = NULL;
3172}
3173
3174/**
3175 * Set the CPU read domain on a range of the object.
3176 *
3177 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3178 * not entirely valid. The page_cpu_valid member of the object flags which
3179 * pages have been flushed, and will be respected by
3180 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3181 * of the whole object.
3182 *
3183 * This function returns when the move is complete, including waiting on
3184 * flushes to occur.
Eric Anholt673a3942008-07-30 12:06:12 -07003185 */
3186static int
Eric Anholte47c68e2008-11-14 13:35:19 -08003187i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3188 uint64_t offset, uint64_t size)
Eric Anholt673a3942008-07-30 12:06:12 -07003189{
Daniel Vetter23010e42010-03-08 13:35:02 +01003190 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003191 uint32_t old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003192 int i, ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003193
Eric Anholte47c68e2008-11-14 13:35:19 -08003194 if (offset == 0 && size == obj->size)
3195 return i915_gem_object_set_to_cpu_domain(obj, 0);
3196
Daniel Vetterde18a292010-11-27 22:30:41 +01003197 ret = i915_gem_object_flush_gpu_write_domain(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003198 if (ret != 0)
3199 return ret;
Daniel Vetterde18a292010-11-27 22:30:41 +01003200 ret = i915_gem_object_wait_rendering(obj, true);
3201 if (ret)
3202 return ret;
3203
Eric Anholte47c68e2008-11-14 13:35:19 -08003204 i915_gem_object_flush_gtt_write_domain(obj);
3205
3206 /* If we're already fully in the CPU read domain, we're done. */
3207 if (obj_priv->page_cpu_valid == NULL &&
3208 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07003209 return 0;
3210
Eric Anholte47c68e2008-11-14 13:35:19 -08003211 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3212 * newly adding I915_GEM_DOMAIN_CPU
3213 */
Eric Anholt673a3942008-07-30 12:06:12 -07003214 if (obj_priv->page_cpu_valid == NULL) {
Eric Anholt9a298b22009-03-24 12:23:04 -07003215 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
3216 GFP_KERNEL);
Eric Anholte47c68e2008-11-14 13:35:19 -08003217 if (obj_priv->page_cpu_valid == NULL)
3218 return -ENOMEM;
3219 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
3220 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07003221
3222 /* Flush the cache on any pages that are still invalid from the CPU's
3223 * perspective.
3224 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003225 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3226 i++) {
Eric Anholt673a3942008-07-30 12:06:12 -07003227 if (obj_priv->page_cpu_valid[i])
3228 continue;
3229
Eric Anholt856fa192009-03-19 14:10:50 -07003230 drm_clflush_pages(obj_priv->pages + i, 1);
Eric Anholt673a3942008-07-30 12:06:12 -07003231
3232 obj_priv->page_cpu_valid[i] = 1;
3233 }
3234
Eric Anholte47c68e2008-11-14 13:35:19 -08003235 /* It should now be out of any other write domains, and we can update
3236 * the domain values for our changes.
3237 */
3238 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3239
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003240 old_read_domains = obj->read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003241 obj->read_domains |= I915_GEM_DOMAIN_CPU;
3242
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003243 trace_i915_gem_object_change_domain(obj,
3244 old_read_domains,
3245 obj->write_domain);
3246
Eric Anholt673a3942008-07-30 12:06:12 -07003247 return 0;
3248}
3249
Eric Anholt673a3942008-07-30 12:06:12 -07003250static int
Chris Wilsonbcf50e22010-11-21 22:07:12 +00003251i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
3252 struct drm_file *file_priv,
3253 struct drm_i915_gem_exec_object2 *entry,
3254 struct drm_i915_gem_relocation_entry *reloc)
Eric Anholt673a3942008-07-30 12:06:12 -07003255{
Chris Wilson9af90d12010-10-17 10:01:56 +01003256 struct drm_device *dev = obj->base.dev;
Chris Wilsonbcf50e22010-11-21 22:07:12 +00003257 struct drm_gem_object *target_obj;
3258 uint32_t target_offset;
3259 int ret = -EINVAL;
Jesse Barnes76446ca2009-12-17 22:05:42 -05003260
Chris Wilsonbcf50e22010-11-21 22:07:12 +00003261 target_obj = drm_gem_object_lookup(dev, file_priv,
3262 reloc->target_handle);
3263 if (target_obj == NULL)
3264 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07003265
Chris Wilsonbcf50e22010-11-21 22:07:12 +00003266 target_offset = to_intel_bo(target_obj)->gtt_offset;
Eric Anholt673a3942008-07-30 12:06:12 -07003267
Chris Wilson8542a0b2009-09-09 21:15:15 +01003268#if WATCH_RELOC
Chris Wilsonbcf50e22010-11-21 22:07:12 +00003269 DRM_INFO("%s: obj %p offset %08x target %d "
3270 "read %08x write %08x gtt %08x "
3271 "presumed %08x delta %08x\n",
3272 __func__,
3273 obj,
3274 (int) reloc->offset,
3275 (int) reloc->target_handle,
3276 (int) reloc->read_domains,
3277 (int) reloc->write_domain,
3278 (int) target_offset,
3279 (int) reloc->presumed_offset,
3280 reloc->delta);
Chris Wilson8542a0b2009-09-09 21:15:15 +01003281#endif
3282
Chris Wilsonbcf50e22010-11-21 22:07:12 +00003283 /* The target buffer should have appeared before us in the
3284 * exec_object list, so it should have a GTT space bound by now.
3285 */
3286 if (target_offset == 0) {
3287 DRM_ERROR("No GTT space found for object %d\n",
3288 reloc->target_handle);
3289 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -07003290 }
3291
Chris Wilsonbcf50e22010-11-21 22:07:12 +00003292 /* Validate that the target is in a valid r/w GPU domain */
3293 if (reloc->write_domain & (reloc->write_domain - 1)) {
3294 DRM_ERROR("reloc with multiple write domains: "
3295 "obj %p target %d offset %d "
3296 "read %08x write %08x",
3297 obj, reloc->target_handle,
3298 (int) reloc->offset,
3299 reloc->read_domains,
3300 reloc->write_domain);
3301 goto err;
3302 }
3303 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3304 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3305 DRM_ERROR("reloc with read/write CPU domains: "
3306 "obj %p target %d offset %d "
3307 "read %08x write %08x",
3308 obj, reloc->target_handle,
3309 (int) reloc->offset,
3310 reloc->read_domains,
3311 reloc->write_domain);
3312 goto err;
3313 }
3314 if (reloc->write_domain && target_obj->pending_write_domain &&
3315 reloc->write_domain != target_obj->pending_write_domain) {
3316 DRM_ERROR("Write domain conflict: "
3317 "obj %p target %d offset %d "
3318 "new %08x old %08x\n",
3319 obj, reloc->target_handle,
3320 (int) reloc->offset,
3321 reloc->write_domain,
3322 target_obj->pending_write_domain);
3323 goto err;
3324 }
3325
3326 target_obj->pending_read_domains |= reloc->read_domains;
3327 target_obj->pending_write_domain |= reloc->write_domain;
3328
3329 /* If the relocation already has the right value in it, no
3330 * more work needs to be done.
3331 */
3332 if (target_offset == reloc->presumed_offset)
3333 goto out;
3334
3335 /* Check that the relocation address is valid... */
3336 if (reloc->offset > obj->base.size - 4) {
3337 DRM_ERROR("Relocation beyond object bounds: "
3338 "obj %p target %d offset %d size %d.\n",
3339 obj, reloc->target_handle,
3340 (int) reloc->offset,
3341 (int) obj->base.size);
3342 goto err;
3343 }
3344 if (reloc->offset & 3) {
3345 DRM_ERROR("Relocation not 4-byte aligned: "
3346 "obj %p target %d offset %d.\n",
3347 obj, reloc->target_handle,
3348 (int) reloc->offset);
3349 goto err;
3350 }
3351
3352 /* and points to somewhere within the target object. */
3353 if (reloc->delta >= target_obj->size) {
3354 DRM_ERROR("Relocation beyond target object bounds: "
3355 "obj %p target %d delta %d size %d.\n",
3356 obj, reloc->target_handle,
3357 (int) reloc->delta,
3358 (int) target_obj->size);
3359 goto err;
3360 }
3361
3362 reloc->delta += target_offset;
3363 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
3364 uint32_t page_offset = reloc->offset & ~PAGE_MASK;
3365 char *vaddr;
3366
3367 vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
3368 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
3369 kunmap_atomic(vaddr);
3370 } else {
3371 struct drm_i915_private *dev_priv = dev->dev_private;
3372 uint32_t __iomem *reloc_entry;
3373 void __iomem *reloc_page;
3374
3375 ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
3376 if (ret)
3377 goto err;
3378
3379 /* Map the page containing the relocation we're going to perform. */
3380 reloc->offset += obj->gtt_offset;
3381 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3382 reloc->offset & PAGE_MASK);
3383 reloc_entry = (uint32_t __iomem *)
3384 (reloc_page + (reloc->offset & ~PAGE_MASK));
3385 iowrite32(reloc->delta, reloc_entry);
3386 io_mapping_unmap_atomic(reloc_page);
3387 }
3388
3389 /* and update the user's relocation entry */
3390 reloc->presumed_offset = target_offset;
3391
3392out:
3393 ret = 0;
3394err:
Chris Wilson9af90d12010-10-17 10:01:56 +01003395 drm_gem_object_unreference(target_obj);
3396 return ret;
3397}
3398
3399static int
Chris Wilsonbcf50e22010-11-21 22:07:12 +00003400i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
3401 struct drm_file *file_priv,
3402 struct drm_i915_gem_exec_object2 *entry)
3403{
3404 struct drm_i915_gem_relocation_entry __user *user_relocs;
3405 int i, ret;
3406
3407 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
3408 for (i = 0; i < entry->relocation_count; i++) {
3409 struct drm_i915_gem_relocation_entry reloc;
3410
3411 if (__copy_from_user_inatomic(&reloc,
3412 user_relocs+i,
3413 sizeof(reloc)))
3414 return -EFAULT;
3415
3416 ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &reloc);
3417 if (ret)
3418 return ret;
3419
3420 if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
3421 &reloc.presumed_offset,
3422 sizeof(reloc.presumed_offset)))
3423 return -EFAULT;
3424 }
3425
3426 return 0;
3427}
3428
3429static int
3430i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
3431 struct drm_file *file_priv,
3432 struct drm_i915_gem_exec_object2 *entry,
3433 struct drm_i915_gem_relocation_entry *relocs)
3434{
3435 int i, ret;
3436
3437 for (i = 0; i < entry->relocation_count; i++) {
3438 ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &relocs[i]);
3439 if (ret)
3440 return ret;
3441 }
3442
3443 return 0;
3444}
3445
3446static int
3447i915_gem_execbuffer_relocate(struct drm_device *dev,
3448 struct drm_file *file,
3449 struct drm_gem_object **object_list,
3450 struct drm_i915_gem_exec_object2 *exec_list,
3451 int count)
3452{
3453 int i, ret;
3454
3455 for (i = 0; i < count; i++) {
3456 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
3457 obj->base.pending_read_domains = 0;
3458 obj->base.pending_write_domain = 0;
3459 ret = i915_gem_execbuffer_relocate_object(obj, file,
3460 &exec_list[i]);
3461 if (ret)
3462 return ret;
3463 }
3464
3465 return 0;
3466}
3467
3468static int
3469i915_gem_execbuffer_reserve(struct drm_device *dev,
3470 struct drm_file *file,
3471 struct drm_gem_object **object_list,
3472 struct drm_i915_gem_exec_object2 *exec_list,
3473 int count)
Chris Wilson9af90d12010-10-17 10:01:56 +01003474{
3475 struct drm_i915_private *dev_priv = dev->dev_private;
3476 int ret, i, retry;
3477
3478 /* attempt to pin all of the buffers into the GTT */
3479 for (retry = 0; retry < 2; retry++) {
3480 ret = 0;
3481 for (i = 0; i < count; i++) {
3482 struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
3483 struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]);
3484 bool need_fence =
3485 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3486 obj->tiling_mode != I915_TILING_NONE;
3487
3488 /* Check fence reg constraints and rebind if necessary */
3489 if (need_fence &&
3490 !i915_gem_object_fence_offset_ok(&obj->base,
3491 obj->tiling_mode)) {
3492 ret = i915_gem_object_unbind(&obj->base);
3493 if (ret)
3494 break;
3495 }
3496
3497 ret = i915_gem_object_pin(&obj->base, entry->alignment);
3498 if (ret)
3499 break;
3500
3501 /*
3502 * Pre-965 chips need a fence register set up in order
3503 * to properly handle blits to/from tiled surfaces.
3504 */
3505 if (need_fence) {
3506 ret = i915_gem_object_get_fence_reg(&obj->base, true);
3507 if (ret) {
3508 i915_gem_object_unpin(&obj->base);
3509 break;
3510 }
3511
3512 dev_priv->fence_regs[obj->fence_reg].gpu = true;
3513 }
3514
3515 entry->offset = obj->gtt_offset;
3516 }
3517
3518 while (i--)
3519 i915_gem_object_unpin(object_list[i]);
3520
3521 if (ret == 0)
3522 break;
3523
3524 if (ret != -ENOSPC || retry)
3525 return ret;
3526
3527 ret = i915_gem_evict_everything(dev);
3528 if (ret)
3529 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003530 }
3531
Eric Anholt673a3942008-07-30 12:06:12 -07003532 return 0;
3533}
3534
Chris Wilsonc6afd652010-11-01 13:39:24 +00003535static int
Chris Wilsonbcf50e22010-11-21 22:07:12 +00003536i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
3537 struct drm_file *file,
3538 struct drm_gem_object **object_list,
3539 struct drm_i915_gem_exec_object2 *exec_list,
3540 int count)
3541{
3542 struct drm_i915_gem_relocation_entry *reloc;
3543 int i, total, ret;
3544
3545 for (i = 0; i < count; i++) {
3546 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
3547 obj->in_execbuffer = false;
3548 }
3549
3550 mutex_unlock(&dev->struct_mutex);
3551
3552 total = 0;
3553 for (i = 0; i < count; i++)
3554 total += exec_list[i].relocation_count;
3555
3556 reloc = drm_malloc_ab(total, sizeof(*reloc));
3557 if (reloc == NULL) {
3558 mutex_lock(&dev->struct_mutex);
3559 return -ENOMEM;
3560 }
3561
3562 total = 0;
3563 for (i = 0; i < count; i++) {
3564 struct drm_i915_gem_relocation_entry __user *user_relocs;
3565
3566 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3567
3568 if (copy_from_user(reloc+total, user_relocs,
3569 exec_list[i].relocation_count *
3570 sizeof(*reloc))) {
3571 ret = -EFAULT;
3572 mutex_lock(&dev->struct_mutex);
3573 goto err;
3574 }
3575
3576 total += exec_list[i].relocation_count;
3577 }
3578
3579 ret = i915_mutex_lock_interruptible(dev);
3580 if (ret) {
3581 mutex_lock(&dev->struct_mutex);
3582 goto err;
3583 }
3584
3585 ret = i915_gem_execbuffer_reserve(dev, file,
3586 object_list, exec_list,
3587 count);
3588 if (ret)
3589 goto err;
3590
3591 total = 0;
3592 for (i = 0; i < count; i++) {
3593 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
3594 obj->base.pending_read_domains = 0;
3595 obj->base.pending_write_domain = 0;
3596 ret = i915_gem_execbuffer_relocate_object_slow(obj, file,
3597 &exec_list[i],
3598 reloc + total);
3599 if (ret)
3600 goto err;
3601
3602 total += exec_list[i].relocation_count;
3603 }
3604
3605 /* Leave the user relocations as are, this is the painfully slow path,
3606 * and we want to avoid the complication of dropping the lock whilst
3607 * having buffers reserved in the aperture and so causing spurious
3608 * ENOSPC for random operations.
3609 */
3610
3611err:
3612 drm_free_large(reloc);
3613 return ret;
3614}
3615
3616static int
Chris Wilsonc6afd652010-11-01 13:39:24 +00003617i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
3618 struct drm_file *file,
3619 struct intel_ring_buffer *ring,
3620 struct drm_gem_object **objects,
3621 int count)
3622{
3623 struct drm_i915_private *dev_priv = dev->dev_private;
3624 int ret, i;
3625
3626 /* Zero the global flush/invalidate flags. These
3627 * will be modified as new domains are computed
3628 * for each object
3629 */
3630 dev->invalidate_domains = 0;
3631 dev->flush_domains = 0;
3632 dev_priv->mm.flush_rings = 0;
3633 for (i = 0; i < count; i++)
3634 i915_gem_object_set_to_gpu_domain(objects[i], ring);
3635
3636 if (dev->invalidate_domains | dev->flush_domains) {
3637#if WATCH_EXEC
3638 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3639 __func__,
3640 dev->invalidate_domains,
3641 dev->flush_domains);
3642#endif
3643 i915_gem_flush(dev, file,
3644 dev->invalidate_domains,
3645 dev->flush_domains,
3646 dev_priv->mm.flush_rings);
3647 }
3648
3649 for (i = 0; i < count; i++) {
3650 struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
3651 /* XXX replace with semaphores */
3652 if (obj->ring && ring != obj->ring) {
3653 ret = i915_gem_object_wait_rendering(&obj->base, true);
3654 if (ret)
3655 return ret;
3656 }
3657 }
3658
3659 return 0;
3660}
3661
Eric Anholt673a3942008-07-30 12:06:12 -07003662/* Throttle our rendering by waiting until the ring has completed our requests
3663 * emitted over 20 msec ago.
3664 *
Eric Anholtb9624422009-06-03 07:27:35 +00003665 * Note that if we were to use the current jiffies each time around the loop,
3666 * we wouldn't escape the function with any frames outstanding if the time to
3667 * render a frame was over 20ms.
3668 *
Eric Anholt673a3942008-07-30 12:06:12 -07003669 * This should get us reasonable parallelism between CPU and GPU but also
3670 * relatively low latency when blocking on a particular request to finish.
3671 */
3672static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003673i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003674{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003675 struct drm_i915_private *dev_priv = dev->dev_private;
3676 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00003677 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003678 struct drm_i915_gem_request *request;
3679 struct intel_ring_buffer *ring = NULL;
3680 u32 seqno = 0;
3681 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003682
Chris Wilson1c255952010-09-26 11:03:27 +01003683 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003684 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003685 if (time_after_eq(request->emitted_jiffies, recent_enough))
3686 break;
3687
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003688 ring = request->ring;
3689 seqno = request->seqno;
Eric Anholtb9624422009-06-03 07:27:35 +00003690 }
Chris Wilson1c255952010-09-26 11:03:27 +01003691 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003692
3693 if (seqno == 0)
3694 return 0;
3695
3696 ret = 0;
3697 if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
3698 /* And wait for the seqno passing without holding any locks and
3699 * causing extra latency for others. This is safe as the irq
3700 * generation is designed to be run atomically and so is
3701 * lockless.
3702 */
3703 ring->user_irq_get(dev, ring);
3704 ret = wait_event_interruptible(ring->irq_queue,
3705 i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
3706 || atomic_read(&dev_priv->mm.wedged));
3707 ring->user_irq_put(dev, ring);
3708
3709 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3710 ret = -EIO;
3711 }
3712
3713 if (ret == 0)
3714 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Eric Anholtb9624422009-06-03 07:27:35 +00003715
Eric Anholt673a3942008-07-30 12:06:12 -07003716 return ret;
3717}
3718
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003719static int
Chris Wilson2549d6c2010-10-14 12:10:41 +01003720i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
3721 uint64_t exec_offset)
Chris Wilson83d60792009-06-06 09:45:57 +01003722{
3723 uint32_t exec_start, exec_len;
3724
3725 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3726 exec_len = (uint32_t) exec->batch_len;
3727
3728 if ((exec_start | exec_len) & 0x7)
3729 return -EINVAL;
3730
3731 if (!exec_start)
3732 return -EINVAL;
3733
3734 return 0;
3735}
3736
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003737static int
Chris Wilson2549d6c2010-10-14 12:10:41 +01003738validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
3739 int count)
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003740{
Chris Wilson2549d6c2010-10-14 12:10:41 +01003741 int i;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003742
Chris Wilson2549d6c2010-10-14 12:10:41 +01003743 for (i = 0; i < count; i++) {
3744 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
Chris Wilsond1d78832010-11-21 09:23:48 +00003745 int length; /* limited by fault_in_pages_readable() */
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003746
Chris Wilsond1d78832010-11-21 09:23:48 +00003747 /* First check for malicious input causing overflow */
3748 if (exec[i].relocation_count >
3749 INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
3750 return -EINVAL;
3751
3752 length = exec[i].relocation_count *
3753 sizeof(struct drm_i915_gem_relocation_entry);
Chris Wilson2549d6c2010-10-14 12:10:41 +01003754 if (!access_ok(VERIFY_READ, ptr, length))
3755 return -EFAULT;
3756
Chris Wilsonb5dc6082010-10-20 20:59:57 +01003757 /* we may also need to update the presumed offsets */
3758 if (!access_ok(VERIFY_WRITE, ptr, length))
3759 return -EFAULT;
3760
Chris Wilson2549d6c2010-10-14 12:10:41 +01003761 if (fault_in_pages_readable(ptr, length))
3762 return -EFAULT;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003763 }
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003764
Chris Wilson2549d6c2010-10-14 12:10:41 +01003765 return 0;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003766}
3767
Chris Wilson2549d6c2010-10-14 12:10:41 +01003768static int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003769i915_gem_do_execbuffer(struct drm_device *dev, void *data,
Chris Wilson9af90d12010-10-17 10:01:56 +01003770 struct drm_file *file,
Jesse Barnes76446ca2009-12-17 22:05:42 -05003771 struct drm_i915_gem_execbuffer2 *args,
3772 struct drm_i915_gem_exec_object2 *exec_list)
Eric Anholt673a3942008-07-30 12:06:12 -07003773{
3774 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003775 struct drm_gem_object **object_list = NULL;
3776 struct drm_gem_object *batch_obj;
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003777 struct drm_i915_gem_object *obj_priv;
Eric Anholt201361a2009-03-11 12:30:04 -07003778 struct drm_clip_rect *cliprects = NULL;
Chris Wilson8dc5d142010-08-12 12:36:12 +01003779 struct drm_i915_gem_request *request = NULL;
Chris Wilson9af90d12010-10-17 10:01:56 +01003780 int ret, i, flips;
Eric Anholt673a3942008-07-30 12:06:12 -07003781 uint64_t exec_offset;
Eric Anholt673a3942008-07-30 12:06:12 -07003782
Zou Nan hai852835f2010-05-21 09:08:56 +08003783 struct intel_ring_buffer *ring = NULL;
3784
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003785 ret = i915_gem_check_is_wedged(dev);
3786 if (ret)
3787 return ret;
3788
Chris Wilson2549d6c2010-10-14 12:10:41 +01003789 ret = validate_exec_list(exec_list, args->buffer_count);
3790 if (ret)
3791 return ret;
3792
Eric Anholt673a3942008-07-30 12:06:12 -07003793#if WATCH_EXEC
3794 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3795 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3796#endif
Chris Wilson549f7362010-10-19 11:19:32 +01003797 switch (args->flags & I915_EXEC_RING_MASK) {
3798 case I915_EXEC_DEFAULT:
3799 case I915_EXEC_RENDER:
3800 ring = &dev_priv->render_ring;
3801 break;
3802 case I915_EXEC_BSD:
Zou Nan haid1b851f2010-05-21 09:08:57 +08003803 if (!HAS_BSD(dev)) {
Chris Wilson549f7362010-10-19 11:19:32 +01003804 DRM_ERROR("execbuf with invalid ring (BSD)\n");
Zou Nan haid1b851f2010-05-21 09:08:57 +08003805 return -EINVAL;
3806 }
3807 ring = &dev_priv->bsd_ring;
Chris Wilson549f7362010-10-19 11:19:32 +01003808 break;
3809 case I915_EXEC_BLT:
3810 if (!HAS_BLT(dev)) {
3811 DRM_ERROR("execbuf with invalid ring (BLT)\n");
3812 return -EINVAL;
3813 }
3814 ring = &dev_priv->blt_ring;
3815 break;
3816 default:
3817 DRM_ERROR("execbuf with unknown ring: %d\n",
3818 (int)(args->flags & I915_EXEC_RING_MASK));
3819 return -EINVAL;
Zou Nan haid1b851f2010-05-21 09:08:57 +08003820 }
3821
Eric Anholt4f481ed2008-09-10 14:22:49 -07003822 if (args->buffer_count < 1) {
3823 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3824 return -EINVAL;
3825 }
Eric Anholtc8e0f932009-11-22 03:49:37 +01003826 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
Jesse Barnes76446ca2009-12-17 22:05:42 -05003827 if (object_list == NULL) {
3828 DRM_ERROR("Failed to allocate object list for %d buffers\n",
Eric Anholt673a3942008-07-30 12:06:12 -07003829 args->buffer_count);
3830 ret = -ENOMEM;
3831 goto pre_mutex_err;
3832 }
Eric Anholt673a3942008-07-30 12:06:12 -07003833
Eric Anholt201361a2009-03-11 12:30:04 -07003834 if (args->num_cliprects != 0) {
Eric Anholt9a298b22009-03-24 12:23:04 -07003835 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3836 GFP_KERNEL);
Owain Ainswortha40e8d32010-02-09 14:25:55 +00003837 if (cliprects == NULL) {
3838 ret = -ENOMEM;
Eric Anholt201361a2009-03-11 12:30:04 -07003839 goto pre_mutex_err;
Owain Ainswortha40e8d32010-02-09 14:25:55 +00003840 }
Eric Anholt201361a2009-03-11 12:30:04 -07003841
3842 ret = copy_from_user(cliprects,
3843 (struct drm_clip_rect __user *)
3844 (uintptr_t) args->cliprects_ptr,
3845 sizeof(*cliprects) * args->num_cliprects);
3846 if (ret != 0) {
3847 DRM_ERROR("copy %d cliprects failed: %d\n",
3848 args->num_cliprects, ret);
Dan Carpenterc877cdce2010-06-23 19:03:01 +02003849 ret = -EFAULT;
Eric Anholt201361a2009-03-11 12:30:04 -07003850 goto pre_mutex_err;
3851 }
3852 }
3853
Chris Wilson8dc5d142010-08-12 12:36:12 +01003854 request = kzalloc(sizeof(*request), GFP_KERNEL);
3855 if (request == NULL) {
3856 ret = -ENOMEM;
Chris Wilsona198bc82009-02-06 16:55:20 +00003857 goto pre_mutex_err;
Eric Anholt673a3942008-07-30 12:06:12 -07003858 }
3859
Chris Wilson76c1dec2010-09-25 11:22:51 +01003860 ret = i915_mutex_lock_interruptible(dev);
3861 if (ret)
3862 goto pre_mutex_err;
Eric Anholt673a3942008-07-30 12:06:12 -07003863
Eric Anholt673a3942008-07-30 12:06:12 -07003864 if (dev_priv->mm.suspended) {
Eric Anholt673a3942008-07-30 12:06:12 -07003865 mutex_unlock(&dev->struct_mutex);
Chris Wilsona198bc82009-02-06 16:55:20 +00003866 ret = -EBUSY;
3867 goto pre_mutex_err;
Eric Anholt673a3942008-07-30 12:06:12 -07003868 }
3869
Keith Packardac94a962008-11-20 23:30:27 -08003870 /* Look up object handles */
Eric Anholt673a3942008-07-30 12:06:12 -07003871 for (i = 0; i < args->buffer_count; i++) {
Chris Wilson9af90d12010-10-17 10:01:56 +01003872 object_list[i] = drm_gem_object_lookup(dev, file,
Eric Anholt673a3942008-07-30 12:06:12 -07003873 exec_list[i].handle);
3874 if (object_list[i] == NULL) {
3875 DRM_ERROR("Invalid object handle %d at index %d\n",
3876 exec_list[i].handle, i);
Chris Wilson0ce907f2010-01-23 20:26:35 +00003877 /* prevent error path from reading uninitialized data */
3878 args->buffer_count = i + 1;
Chris Wilsonbf79cb92010-08-04 14:19:46 +01003879 ret = -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07003880 goto err;
3881 }
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003882
Daniel Vetter23010e42010-03-08 13:35:02 +01003883 obj_priv = to_intel_bo(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003884 if (obj_priv->in_execbuffer) {
3885 DRM_ERROR("Object %p appears more than once in object list\n",
3886 object_list[i]);
Chris Wilson0ce907f2010-01-23 20:26:35 +00003887 /* prevent error path from reading uninitialized data */
3888 args->buffer_count = i + 1;
Chris Wilsonbf79cb92010-08-04 14:19:46 +01003889 ret = -EINVAL;
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003890 goto err;
3891 }
3892 obj_priv->in_execbuffer = true;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003893 }
3894
Chris Wilson9af90d12010-10-17 10:01:56 +01003895 /* Move the objects en-masse into the GTT, evicting if necessary. */
Chris Wilsonbcf50e22010-11-21 22:07:12 +00003896 ret = i915_gem_execbuffer_reserve(dev, file,
3897 object_list, exec_list,
3898 args->buffer_count);
Chris Wilson9af90d12010-10-17 10:01:56 +01003899 if (ret)
3900 goto err;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003901
Chris Wilson9af90d12010-10-17 10:01:56 +01003902 /* The objects are in their final locations, apply the relocations. */
Chris Wilsonbcf50e22010-11-21 22:07:12 +00003903 ret = i915_gem_execbuffer_relocate(dev, file,
3904 object_list, exec_list,
3905 args->buffer_count);
3906 if (ret) {
3907 if (ret == -EFAULT) {
3908 ret = i915_gem_execbuffer_relocate_slow(dev, file,
3909 object_list,
3910 exec_list,
3911 args->buffer_count);
3912 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
3913 }
Eric Anholt673a3942008-07-30 12:06:12 -07003914 if (ret)
3915 goto err;
3916 }
3917
Eric Anholt673a3942008-07-30 12:06:12 -07003918 /* Set the pending read domains for the batch buffer to COMMAND */
3919 batch_obj = object_list[args->buffer_count-1];
Chris Wilson5f26a2c2009-06-06 09:45:58 +01003920 if (batch_obj->pending_write_domain) {
3921 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3922 ret = -EINVAL;
3923 goto err;
3924 }
3925 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
Eric Anholt673a3942008-07-30 12:06:12 -07003926
Chris Wilson9af90d12010-10-17 10:01:56 +01003927 /* Sanity check the batch buffer */
3928 exec_offset = to_intel_bo(batch_obj)->gtt_offset;
3929 ret = i915_gem_check_execbuffer(args, exec_offset);
Chris Wilson83d60792009-06-06 09:45:57 +01003930 if (ret != 0) {
3931 DRM_ERROR("execbuf with invalid offset/length\n");
3932 goto err;
3933 }
3934
Chris Wilsonc6afd652010-11-01 13:39:24 +00003935 ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
3936 object_list, args->buffer_count);
3937 if (ret)
3938 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -07003939
Eric Anholtefbeed92009-02-19 14:54:51 -08003940 for (i = 0; i < args->buffer_count; i++) {
3941 struct drm_gem_object *obj = object_list[i];
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003942 uint32_t old_write_domain = obj->write_domain;
Eric Anholtefbeed92009-02-19 14:54:51 -08003943 obj->write_domain = obj->pending_write_domain;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003944 trace_i915_gem_object_change_domain(obj,
3945 obj->read_domains,
3946 old_write_domain);
Eric Anholtefbeed92009-02-19 14:54:51 -08003947 }
3948
Eric Anholt673a3942008-07-30 12:06:12 -07003949#if WATCH_COHERENCY
3950 for (i = 0; i < args->buffer_count; i++) {
3951 i915_gem_object_check_coherency(object_list[i],
3952 exec_list[i].handle);
3953 }
3954#endif
3955
Eric Anholt673a3942008-07-30 12:06:12 -07003956#if WATCH_EXEC
Ben Gamari6911a9b2009-04-02 11:24:54 -07003957 i915_gem_dump_object(batch_obj,
Eric Anholt673a3942008-07-30 12:06:12 -07003958 args->batch_len,
3959 __func__,
3960 ~0);
3961#endif
3962
Chris Wilsone59f2ba2010-10-07 17:28:15 +01003963 /* Check for any pending flips. As we only maintain a flip queue depth
3964 * of 1, we can simply insert a WAIT for the next display flip prior
3965 * to executing the batch and avoid stalling the CPU.
3966 */
3967 flips = 0;
3968 for (i = 0; i < args->buffer_count; i++) {
3969 if (object_list[i]->write_domain)
3970 flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
3971 }
3972 if (flips) {
3973 int plane, flip_mask;
3974
3975 for (plane = 0; flips >> plane; plane++) {
3976 if (((flips >> plane) & 1) == 0)
3977 continue;
3978
3979 if (plane)
3980 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
3981 else
3982 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
3983
3984 intel_ring_begin(dev, ring, 2);
3985 intel_ring_emit(dev, ring,
3986 MI_WAIT_FOR_EVENT | flip_mask);
3987 intel_ring_emit(dev, ring, MI_NOOP);
3988 intel_ring_advance(dev, ring);
3989 }
3990 }
3991
Eric Anholt673a3942008-07-30 12:06:12 -07003992 /* Exec the batchbuffer */
Zou Nan hai852835f2010-05-21 09:08:56 +08003993 ret = ring->dispatch_gem_execbuffer(dev, ring, args,
Chris Wilsone59f2ba2010-10-07 17:28:15 +01003994 cliprects, exec_offset);
Eric Anholt673a3942008-07-30 12:06:12 -07003995 if (ret) {
3996 DRM_ERROR("dispatch failed %d\n", ret);
3997 goto err;
3998 }
3999
4000 /*
4001 * Ensure that the commands in the batch buffer are
4002 * finished before the interrupt fires
4003 */
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01004004 i915_retire_commands(dev, ring);
Eric Anholt673a3942008-07-30 12:06:12 -07004005
Eric Anholt673a3942008-07-30 12:06:12 -07004006 for (i = 0; i < args->buffer_count; i++) {
4007 struct drm_gem_object *obj = object_list[i];
Eric Anholt673a3942008-07-30 12:06:12 -07004008
Daniel Vetter617dbe22010-02-11 22:16:02 +01004009 i915_gem_object_move_to_active(obj, ring);
Chris Wilson64193402010-10-24 12:38:05 +01004010 if (obj->write_domain)
4011 list_move_tail(&to_intel_bo(obj)->gpu_write_list,
4012 &ring->gpu_write_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004013 }
Eric Anholt673a3942008-07-30 12:06:12 -07004014
Chris Wilson9af90d12010-10-17 10:01:56 +01004015 i915_add_request(dev, file, request, ring);
Chris Wilson8dc5d142010-08-12 12:36:12 +01004016 request = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07004017
Eric Anholt673a3942008-07-30 12:06:12 -07004018err:
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05004019 for (i = 0; i < args->buffer_count; i++) {
4020 if (object_list[i]) {
Daniel Vetter23010e42010-03-08 13:35:02 +01004021 obj_priv = to_intel_bo(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05004022 obj_priv->in_execbuffer = false;
4023 }
Julia Lawallaad87df2008-12-21 16:28:47 +01004024 drm_gem_object_unreference(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05004025 }
Julia Lawallaad87df2008-12-21 16:28:47 +01004026
Eric Anholt673a3942008-07-30 12:06:12 -07004027 mutex_unlock(&dev->struct_mutex);
4028
Chris Wilson93533c22010-01-31 10:40:48 +00004029pre_mutex_err:
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07004030 drm_free_large(object_list);
Eric Anholt9a298b22009-03-24 12:23:04 -07004031 kfree(cliprects);
Chris Wilson8dc5d142010-08-12 12:36:12 +01004032 kfree(request);
Eric Anholt673a3942008-07-30 12:06:12 -07004033
4034 return ret;
4035}
4036
Jesse Barnes76446ca2009-12-17 22:05:42 -05004037/*
4038 * Legacy execbuffer just creates an exec2 list from the original exec object
4039 * list array and passes it to the real function.
4040 */
4041int
4042i915_gem_execbuffer(struct drm_device *dev, void *data,
4043 struct drm_file *file_priv)
4044{
4045 struct drm_i915_gem_execbuffer *args = data;
4046 struct drm_i915_gem_execbuffer2 exec2;
4047 struct drm_i915_gem_exec_object *exec_list = NULL;
4048 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4049 int ret, i;
4050
4051#if WATCH_EXEC
4052 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4053 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4054#endif
4055
4056 if (args->buffer_count < 1) {
4057 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
4058 return -EINVAL;
4059 }
4060
4061 /* Copy in the exec list from userland */
4062 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
4063 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4064 if (exec_list == NULL || exec2_list == NULL) {
4065 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4066 args->buffer_count);
4067 drm_free_large(exec_list);
4068 drm_free_large(exec2_list);
4069 return -ENOMEM;
4070 }
4071 ret = copy_from_user(exec_list,
4072 (struct drm_i915_relocation_entry __user *)
4073 (uintptr_t) args->buffers_ptr,
4074 sizeof(*exec_list) * args->buffer_count);
4075 if (ret != 0) {
4076 DRM_ERROR("copy %d exec entries failed %d\n",
4077 args->buffer_count, ret);
4078 drm_free_large(exec_list);
4079 drm_free_large(exec2_list);
4080 return -EFAULT;
4081 }
4082
4083 for (i = 0; i < args->buffer_count; i++) {
4084 exec2_list[i].handle = exec_list[i].handle;
4085 exec2_list[i].relocation_count = exec_list[i].relocation_count;
4086 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
4087 exec2_list[i].alignment = exec_list[i].alignment;
4088 exec2_list[i].offset = exec_list[i].offset;
Chris Wilsona6c45cf2010-09-17 00:32:17 +01004089 if (INTEL_INFO(dev)->gen < 4)
Jesse Barnes76446ca2009-12-17 22:05:42 -05004090 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
4091 else
4092 exec2_list[i].flags = 0;
4093 }
4094
4095 exec2.buffers_ptr = args->buffers_ptr;
4096 exec2.buffer_count = args->buffer_count;
4097 exec2.batch_start_offset = args->batch_start_offset;
4098 exec2.batch_len = args->batch_len;
4099 exec2.DR1 = args->DR1;
4100 exec2.DR4 = args->DR4;
4101 exec2.num_cliprects = args->num_cliprects;
4102 exec2.cliprects_ptr = args->cliprects_ptr;
Zou Nan hai852835f2010-05-21 09:08:56 +08004103 exec2.flags = I915_EXEC_RENDER;
Jesse Barnes76446ca2009-12-17 22:05:42 -05004104
4105 ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
4106 if (!ret) {
4107 /* Copy the new buffer offsets back to the user's exec list. */
4108 for (i = 0; i < args->buffer_count; i++)
4109 exec_list[i].offset = exec2_list[i].offset;
4110 /* ... and back out to userspace */
4111 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4112 (uintptr_t) args->buffers_ptr,
4113 exec_list,
4114 sizeof(*exec_list) * args->buffer_count);
4115 if (ret) {
4116 ret = -EFAULT;
4117 DRM_ERROR("failed to copy %d exec entries "
4118 "back to user (%d)\n",
4119 args->buffer_count, ret);
4120 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05004121 }
4122
4123 drm_free_large(exec_list);
4124 drm_free_large(exec2_list);
4125 return ret;
4126}
4127
4128int
4129i915_gem_execbuffer2(struct drm_device *dev, void *data,
4130 struct drm_file *file_priv)
4131{
4132 struct drm_i915_gem_execbuffer2 *args = data;
4133 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4134 int ret;
4135
4136#if WATCH_EXEC
4137 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4138 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4139#endif
4140
4141 if (args->buffer_count < 1) {
4142 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
4143 return -EINVAL;
4144 }
4145
4146 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4147 if (exec2_list == NULL) {
4148 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4149 args->buffer_count);
4150 return -ENOMEM;
4151 }
4152 ret = copy_from_user(exec2_list,
4153 (struct drm_i915_relocation_entry __user *)
4154 (uintptr_t) args->buffers_ptr,
4155 sizeof(*exec2_list) * args->buffer_count);
4156 if (ret != 0) {
4157 DRM_ERROR("copy %d exec entries failed %d\n",
4158 args->buffer_count, ret);
4159 drm_free_large(exec2_list);
4160 return -EFAULT;
4161 }
4162
4163 ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
4164 if (!ret) {
4165 /* Copy the new buffer offsets back to the user's exec list. */
4166 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4167 (uintptr_t) args->buffers_ptr,
4168 exec2_list,
4169 sizeof(*exec2_list) * args->buffer_count);
4170 if (ret) {
4171 ret = -EFAULT;
4172 DRM_ERROR("failed to copy %d exec entries "
4173 "back to user (%d)\n",
4174 args->buffer_count, ret);
4175 }
4176 }
4177
4178 drm_free_large(exec2_list);
4179 return ret;
4180}
4181
Eric Anholt673a3942008-07-30 12:06:12 -07004182int
4183i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4184{
4185 struct drm_device *dev = obj->dev;
Chris Wilsonf13d3f72010-09-20 17:36:15 +01004186 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01004187 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004188 int ret;
4189
Daniel Vetter778c3542010-05-13 11:49:44 +02004190 BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
Chris Wilson23bc5982010-09-29 16:10:57 +01004191 WARN_ON(i915_verify_lists(dev));
Chris Wilsonac0c6b52010-05-27 13:18:18 +01004192
4193 if (obj_priv->gtt_space != NULL) {
4194 if (alignment == 0)
4195 alignment = i915_gem_get_gtt_alignment(obj);
4196 if (obj_priv->gtt_offset & (alignment - 1)) {
Chris Wilsonae7d49d2010-08-04 12:37:41 +01004197 WARN(obj_priv->pin_count,
Joe Perchesfce7d612010-10-30 21:08:30 +00004198 "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n",
Chris Wilsonae7d49d2010-08-04 12:37:41 +01004199 obj_priv->gtt_offset, alignment);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01004200 ret = i915_gem_object_unbind(obj);
4201 if (ret)
4202 return ret;
4203 }
4204 }
4205
Eric Anholt673a3942008-07-30 12:06:12 -07004206 if (obj_priv->gtt_space == NULL) {
4207 ret = i915_gem_object_bind_to_gtt(obj, alignment);
Chris Wilson97311292009-09-21 00:22:34 +01004208 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07004209 return ret;
Chris Wilson22c344e2009-02-11 14:26:45 +00004210 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05004211
Eric Anholt673a3942008-07-30 12:06:12 -07004212 obj_priv->pin_count++;
4213
4214 /* If the object is not active and not pending a flush,
4215 * remove it from the inactive list
4216 */
4217 if (obj_priv->pin_count == 1) {
Chris Wilson73aa8082010-09-30 11:46:12 +01004218 i915_gem_info_add_pin(dev_priv, obj->size);
Chris Wilsonf13d3f72010-09-20 17:36:15 +01004219 if (!obj_priv->active)
Chris Wilson69dc4982010-10-19 10:36:51 +01004220 list_move_tail(&obj_priv->mm_list,
Chris Wilsonf13d3f72010-09-20 17:36:15 +01004221 &dev_priv->mm.pinned_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004222 }
Eric Anholt673a3942008-07-30 12:06:12 -07004223
Chris Wilson23bc5982010-09-29 16:10:57 +01004224 WARN_ON(i915_verify_lists(dev));
Eric Anholt673a3942008-07-30 12:06:12 -07004225 return 0;
4226}
4227
4228void
4229i915_gem_object_unpin(struct drm_gem_object *obj)
4230{
4231 struct drm_device *dev = obj->dev;
4232 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01004233 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004234
Chris Wilson23bc5982010-09-29 16:10:57 +01004235 WARN_ON(i915_verify_lists(dev));
Eric Anholt673a3942008-07-30 12:06:12 -07004236 obj_priv->pin_count--;
4237 BUG_ON(obj_priv->pin_count < 0);
4238 BUG_ON(obj_priv->gtt_space == NULL);
4239
4240 /* If the object is no longer pinned, and is
4241 * neither active nor being flushed, then stick it on
4242 * the inactive list
4243 */
4244 if (obj_priv->pin_count == 0) {
Chris Wilsonf13d3f72010-09-20 17:36:15 +01004245 if (!obj_priv->active)
Chris Wilson69dc4982010-10-19 10:36:51 +01004246 list_move_tail(&obj_priv->mm_list,
Eric Anholt673a3942008-07-30 12:06:12 -07004247 &dev_priv->mm.inactive_list);
Chris Wilson73aa8082010-09-30 11:46:12 +01004248 i915_gem_info_remove_pin(dev_priv, obj->size);
Eric Anholt673a3942008-07-30 12:06:12 -07004249 }
Chris Wilson23bc5982010-09-29 16:10:57 +01004250 WARN_ON(i915_verify_lists(dev));
Eric Anholt673a3942008-07-30 12:06:12 -07004251}
4252
4253int
4254i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4255 struct drm_file *file_priv)
4256{
4257 struct drm_i915_gem_pin *args = data;
4258 struct drm_gem_object *obj;
4259 struct drm_i915_gem_object *obj_priv;
4260 int ret;
4261
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004262 ret = i915_mutex_lock_interruptible(dev);
4263 if (ret)
4264 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004265
4266 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4267 if (obj == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004268 ret = -ENOENT;
4269 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07004270 }
Daniel Vetter23010e42010-03-08 13:35:02 +01004271 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004272
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004273 if (obj_priv->madv != I915_MADV_WILLNEED) {
4274 DRM_ERROR("Attempting to pin a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004275 ret = -EINVAL;
4276 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004277 }
4278
Jesse Barnes79e53942008-11-07 14:24:08 -08004279 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
4280 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
4281 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004282 ret = -EINVAL;
4283 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08004284 }
4285
4286 obj_priv->user_pin_count++;
4287 obj_priv->pin_filp = file_priv;
4288 if (obj_priv->user_pin_count == 1) {
4289 ret = i915_gem_object_pin(obj, args->alignment);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004290 if (ret)
4291 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07004292 }
4293
4294 /* XXX - flush the CPU caches for pinned objects
4295 * as the X server doesn't manage domains yet
4296 */
Eric Anholte47c68e2008-11-14 13:35:19 -08004297 i915_gem_object_flush_cpu_write_domain(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004298 args->offset = obj_priv->gtt_offset;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004299out:
Eric Anholt673a3942008-07-30 12:06:12 -07004300 drm_gem_object_unreference(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004301unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07004302 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004303 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004304}
4305
4306int
4307i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4308 struct drm_file *file_priv)
4309{
4310 struct drm_i915_gem_pin *args = data;
4311 struct drm_gem_object *obj;
Jesse Barnes79e53942008-11-07 14:24:08 -08004312 struct drm_i915_gem_object *obj_priv;
Chris Wilson76c1dec2010-09-25 11:22:51 +01004313 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004314
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004315 ret = i915_mutex_lock_interruptible(dev);
4316 if (ret)
4317 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004318
4319 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4320 if (obj == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004321 ret = -ENOENT;
4322 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07004323 }
Daniel Vetter23010e42010-03-08 13:35:02 +01004324 obj_priv = to_intel_bo(obj);
Chris Wilson76c1dec2010-09-25 11:22:51 +01004325
Jesse Barnes79e53942008-11-07 14:24:08 -08004326 if (obj_priv->pin_filp != file_priv) {
4327 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4328 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004329 ret = -EINVAL;
4330 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08004331 }
4332 obj_priv->user_pin_count--;
4333 if (obj_priv->user_pin_count == 0) {
4334 obj_priv->pin_filp = NULL;
4335 i915_gem_object_unpin(obj);
4336 }
Eric Anholt673a3942008-07-30 12:06:12 -07004337
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004338out:
Eric Anholt673a3942008-07-30 12:06:12 -07004339 drm_gem_object_unreference(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004340unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07004341 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004342 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004343}
4344
4345int
4346i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4347 struct drm_file *file_priv)
4348{
4349 struct drm_i915_gem_busy *args = data;
4350 struct drm_gem_object *obj;
4351 struct drm_i915_gem_object *obj_priv;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01004352 int ret;
4353
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004354 ret = i915_mutex_lock_interruptible(dev);
4355 if (ret)
4356 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004357
Eric Anholt673a3942008-07-30 12:06:12 -07004358 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4359 if (obj == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004360 ret = -ENOENT;
4361 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07004362 }
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004363 obj_priv = to_intel_bo(obj);
Zou Nan haid1b851f2010-05-21 09:08:57 +08004364
Chris Wilson0be555b2010-08-04 15:36:30 +01004365 /* Count all active objects as busy, even if they are currently not used
4366 * by the gpu. Users of this interface expect objects to eventually
4367 * become non-busy without any further actions, therefore emit any
4368 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08004369 */
Chris Wilson0be555b2010-08-04 15:36:30 +01004370 args->busy = obj_priv->active;
4371 if (args->busy) {
4372 /* Unconditionally flush objects, even when the gpu still uses this
4373 * object. Userspace calling this function indicates that it wants to
4374 * use this buffer rather sooner than later, so issuing the required
4375 * flush earlier is beneficial.
4376 */
Chris Wilsonc78ec302010-09-20 12:50:23 +01004377 if (obj->write_domain & I915_GEM_GPU_DOMAINS)
4378 i915_gem_flush_ring(dev, file_priv,
Chris Wilson92204342010-09-18 11:02:01 +01004379 obj_priv->ring,
4380 0, obj->write_domain);
Chris Wilson0be555b2010-08-04 15:36:30 +01004381
4382 /* Update the active list for the hardware's current position.
4383 * Otherwise this only updates on a delayed timer or when irqs
4384 * are actually unmasked, and our working set ends up being
4385 * larger than required.
4386 */
4387 i915_gem_retire_requests_ring(dev, obj_priv->ring);
4388
4389 args->busy = obj_priv->active;
4390 }
Eric Anholt673a3942008-07-30 12:06:12 -07004391
4392 drm_gem_object_unreference(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004393unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07004394 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004395 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004396}
4397
4398int
4399i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4400 struct drm_file *file_priv)
4401{
4402 return i915_gem_ring_throttle(dev, file_priv);
4403}
4404
Chris Wilson3ef94da2009-09-14 16:50:29 +01004405int
4406i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4407 struct drm_file *file_priv)
4408{
4409 struct drm_i915_gem_madvise *args = data;
4410 struct drm_gem_object *obj;
4411 struct drm_i915_gem_object *obj_priv;
Chris Wilson76c1dec2010-09-25 11:22:51 +01004412 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004413
4414 switch (args->madv) {
4415 case I915_MADV_DONTNEED:
4416 case I915_MADV_WILLNEED:
4417 break;
4418 default:
4419 return -EINVAL;
4420 }
4421
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004422 ret = i915_mutex_lock_interruptible(dev);
4423 if (ret)
4424 return ret;
4425
Chris Wilson3ef94da2009-09-14 16:50:29 +01004426 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4427 if (obj == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004428 ret = -ENOENT;
4429 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004430 }
Daniel Vetter23010e42010-03-08 13:35:02 +01004431 obj_priv = to_intel_bo(obj);
Chris Wilson3ef94da2009-09-14 16:50:29 +01004432
4433 if (obj_priv->pin_count) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004434 ret = -EINVAL;
4435 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004436 }
4437
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004438 if (obj_priv->madv != __I915_MADV_PURGED)
4439 obj_priv->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004440
Chris Wilson2d7ef392009-09-20 23:13:10 +01004441 /* if the object is no longer bound, discard its backing storage */
4442 if (i915_gem_object_is_purgeable(obj_priv) &&
4443 obj_priv->gtt_space == NULL)
4444 i915_gem_object_truncate(obj);
4445
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004446 args->retained = obj_priv->madv != __I915_MADV_PURGED;
4447
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004448out:
Chris Wilson3ef94da2009-09-14 16:50:29 +01004449 drm_gem_object_unreference(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004450unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01004451 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004452 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004453}
4454
Daniel Vetterac52bc52010-04-09 19:05:06 +00004455struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4456 size_t size)
4457{
Chris Wilson73aa8082010-09-30 11:46:12 +01004458 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterc397b902010-04-09 19:05:07 +00004459 struct drm_i915_gem_object *obj;
4460
4461 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
4462 if (obj == NULL)
4463 return NULL;
4464
4465 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4466 kfree(obj);
4467 return NULL;
4468 }
4469
Chris Wilson73aa8082010-09-30 11:46:12 +01004470 i915_gem_info_add_obj(dev_priv, size);
4471
Daniel Vetterc397b902010-04-09 19:05:07 +00004472 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4473 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4474
4475 obj->agp_type = AGP_USER_MEMORY;
Daniel Vetter62b8b212010-04-09 19:05:08 +00004476 obj->base.driver_private = NULL;
Daniel Vetterc397b902010-04-09 19:05:07 +00004477 obj->fence_reg = I915_FENCE_REG_NONE;
Chris Wilson69dc4982010-10-19 10:36:51 +01004478 INIT_LIST_HEAD(&obj->mm_list);
4479 INIT_LIST_HEAD(&obj->ring_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00004480 INIT_LIST_HEAD(&obj->gpu_write_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00004481 obj->madv = I915_MADV_WILLNEED;
4482
Daniel Vetterc397b902010-04-09 19:05:07 +00004483 return &obj->base;
Daniel Vetterac52bc52010-04-09 19:05:06 +00004484}
4485
Eric Anholt673a3942008-07-30 12:06:12 -07004486int i915_gem_init_object(struct drm_gem_object *obj)
4487{
Daniel Vetterc397b902010-04-09 19:05:07 +00004488 BUG();
Jesse Barnesde151cf2008-11-12 10:03:55 -08004489
Eric Anholt673a3942008-07-30 12:06:12 -07004490 return 0;
4491}
4492
Chris Wilsonbe726152010-07-23 23:18:50 +01004493static void i915_gem_free_object_tail(struct drm_gem_object *obj)
4494{
4495 struct drm_device *dev = obj->dev;
4496 drm_i915_private_t *dev_priv = dev->dev_private;
4497 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4498 int ret;
4499
4500 ret = i915_gem_object_unbind(obj);
4501 if (ret == -ERESTARTSYS) {
Chris Wilson69dc4982010-10-19 10:36:51 +01004502 list_move(&obj_priv->mm_list,
Chris Wilsonbe726152010-07-23 23:18:50 +01004503 &dev_priv->mm.deferred_free_list);
4504 return;
4505 }
4506
4507 if (obj_priv->mmap_offset)
4508 i915_gem_free_mmap_offset(obj);
4509
4510 drm_gem_object_release(obj);
Chris Wilson73aa8082010-09-30 11:46:12 +01004511 i915_gem_info_remove_obj(dev_priv, obj->size);
Chris Wilsonbe726152010-07-23 23:18:50 +01004512
4513 kfree(obj_priv->page_cpu_valid);
4514 kfree(obj_priv->bit_17);
4515 kfree(obj_priv);
4516}
4517
Eric Anholt673a3942008-07-30 12:06:12 -07004518void i915_gem_free_object(struct drm_gem_object *obj)
4519{
Jesse Barnesde151cf2008-11-12 10:03:55 -08004520 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01004521 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004522
Chris Wilson1c5d22f2009-08-25 11:15:50 +01004523 trace_i915_gem_object_destroy(obj);
4524
Eric Anholt673a3942008-07-30 12:06:12 -07004525 while (obj_priv->pin_count > 0)
4526 i915_gem_object_unpin(obj);
4527
Dave Airlie71acb5e2008-12-30 20:31:46 +10004528 if (obj_priv->phys_obj)
4529 i915_gem_detach_phys_object(dev, obj);
4530
Chris Wilsonbe726152010-07-23 23:18:50 +01004531 i915_gem_free_object_tail(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004532}
4533
Jesse Barnes5669fca2009-02-17 15:13:31 -08004534int
Eric Anholt673a3942008-07-30 12:06:12 -07004535i915_gem_idle(struct drm_device *dev)
4536{
4537 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson29105cc2010-01-07 10:39:13 +00004538 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004539
Keith Packard6dbe2772008-10-14 21:41:13 -07004540 mutex_lock(&dev->struct_mutex);
4541
Chris Wilson87acb0a2010-10-19 10:13:00 +01004542 if (dev_priv->mm.suspended) {
Keith Packard6dbe2772008-10-14 21:41:13 -07004543 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07004544 return 0;
Keith Packard6dbe2772008-10-14 21:41:13 -07004545 }
Eric Anholt673a3942008-07-30 12:06:12 -07004546
Chris Wilson29105cc2010-01-07 10:39:13 +00004547 ret = i915_gpu_idle(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004548 if (ret) {
4549 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07004550 return ret;
Keith Packard6dbe2772008-10-14 21:41:13 -07004551 }
Eric Anholt673a3942008-07-30 12:06:12 -07004552
Chris Wilson29105cc2010-01-07 10:39:13 +00004553 /* Under UMS, be paranoid and evict. */
4554 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01004555 ret = i915_gem_evict_inactive(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004556 if (ret) {
4557 mutex_unlock(&dev->struct_mutex);
4558 return ret;
4559 }
4560 }
4561
4562 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4563 * We need to replace this with a semaphore, or something.
4564 * And not confound mm.suspended!
4565 */
4566 dev_priv->mm.suspended = 1;
Daniel Vetterbc0c7f12010-08-20 18:18:48 +02004567 del_timer_sync(&dev_priv->hangcheck_timer);
Chris Wilson29105cc2010-01-07 10:39:13 +00004568
4569 i915_kernel_lost_context(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004570 i915_gem_cleanup_ringbuffer(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004571
Keith Packard6dbe2772008-10-14 21:41:13 -07004572 mutex_unlock(&dev->struct_mutex);
4573
Chris Wilson29105cc2010-01-07 10:39:13 +00004574 /* Cancel the retire work handler, which should be idle now. */
4575 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4576
Eric Anholt673a3942008-07-30 12:06:12 -07004577 return 0;
4578}
4579
Jesse Barnese552eb72010-04-21 11:39:23 -07004580/*
4581 * 965+ support PIPE_CONTROL commands, which provide finer grained control
4582 * over cache flushing.
4583 */
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004584static int
Jesse Barnese552eb72010-04-21 11:39:23 -07004585i915_gem_init_pipe_control(struct drm_device *dev)
4586{
4587 drm_i915_private_t *dev_priv = dev->dev_private;
4588 struct drm_gem_object *obj;
4589 struct drm_i915_gem_object *obj_priv;
4590 int ret;
4591
Eric Anholt34dc4d42010-05-07 14:30:03 -07004592 obj = i915_gem_alloc_object(dev, 4096);
Jesse Barnese552eb72010-04-21 11:39:23 -07004593 if (obj == NULL) {
4594 DRM_ERROR("Failed to allocate seqno page\n");
4595 ret = -ENOMEM;
4596 goto err;
4597 }
4598 obj_priv = to_intel_bo(obj);
4599 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4600
4601 ret = i915_gem_object_pin(obj, 4096);
4602 if (ret)
4603 goto err_unref;
4604
4605 dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
4606 dev_priv->seqno_page = kmap(obj_priv->pages[0]);
4607 if (dev_priv->seqno_page == NULL)
4608 goto err_unpin;
4609
4610 dev_priv->seqno_obj = obj;
4611 memset(dev_priv->seqno_page, 0, PAGE_SIZE);
4612
4613 return 0;
4614
4615err_unpin:
4616 i915_gem_object_unpin(obj);
4617err_unref:
4618 drm_gem_object_unreference(obj);
4619err:
4620 return ret;
4621}
4622
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004623
4624static void
Jesse Barnese552eb72010-04-21 11:39:23 -07004625i915_gem_cleanup_pipe_control(struct drm_device *dev)
4626{
4627 drm_i915_private_t *dev_priv = dev->dev_private;
4628 struct drm_gem_object *obj;
4629 struct drm_i915_gem_object *obj_priv;
4630
4631 obj = dev_priv->seqno_obj;
4632 obj_priv = to_intel_bo(obj);
4633 kunmap(obj_priv->pages[0]);
4634 i915_gem_object_unpin(obj);
4635 drm_gem_object_unreference(obj);
4636 dev_priv->seqno_obj = NULL;
4637
4638 dev_priv->seqno_page = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07004639}
4640
Eric Anholt673a3942008-07-30 12:06:12 -07004641int
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004642i915_gem_init_ringbuffer(struct drm_device *dev)
4643{
4644 drm_i915_private_t *dev_priv = dev->dev_private;
4645 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004646
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004647 if (HAS_PIPE_CONTROL(dev)) {
4648 ret = i915_gem_init_pipe_control(dev);
4649 if (ret)
4650 return ret;
4651 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01004652
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004653 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004654 if (ret)
4655 goto cleanup_pipe_control;
4656
4657 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004658 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004659 if (ret)
4660 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08004661 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01004662
Chris Wilson549f7362010-10-19 11:19:32 +01004663 if (HAS_BLT(dev)) {
4664 ret = intel_init_blt_ring_buffer(dev);
4665 if (ret)
4666 goto cleanup_bsd_ring;
4667 }
4668
Chris Wilson6f392d52010-08-07 11:01:22 +01004669 dev_priv->next_seqno = 1;
4670
Chris Wilson68f95ba2010-05-27 13:18:22 +01004671 return 0;
4672
Chris Wilson549f7362010-10-19 11:19:32 +01004673cleanup_bsd_ring:
4674 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004675cleanup_render_ring:
4676 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4677cleanup_pipe_control:
4678 if (HAS_PIPE_CONTROL(dev))
4679 i915_gem_cleanup_pipe_control(dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004680 return ret;
4681}
4682
4683void
4684i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4685{
4686 drm_i915_private_t *dev_priv = dev->dev_private;
4687
4688 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
Chris Wilson87acb0a2010-10-19 10:13:00 +01004689 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
Chris Wilson549f7362010-10-19 11:19:32 +01004690 intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004691 if (HAS_PIPE_CONTROL(dev))
4692 i915_gem_cleanup_pipe_control(dev);
4693}
4694
4695int
Eric Anholt673a3942008-07-30 12:06:12 -07004696i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4697 struct drm_file *file_priv)
4698{
4699 drm_i915_private_t *dev_priv = dev->dev_private;
4700 int ret;
4701
Jesse Barnes79e53942008-11-07 14:24:08 -08004702 if (drm_core_check_feature(dev, DRIVER_MODESET))
4703 return 0;
4704
Ben Gamariba1234d2009-09-14 17:48:47 -04004705 if (atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07004706 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Ben Gamariba1234d2009-09-14 17:48:47 -04004707 atomic_set(&dev_priv->mm.wedged, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004708 }
4709
Eric Anholt673a3942008-07-30 12:06:12 -07004710 mutex_lock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004711 dev_priv->mm.suspended = 0;
4712
4713 ret = i915_gem_init_ringbuffer(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004714 if (ret != 0) {
4715 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004716 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004717 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004718
Chris Wilson69dc4982010-10-19 10:36:51 +01004719 BUG_ON(!list_empty(&dev_priv->mm.active_list));
Zou Nan hai852835f2010-05-21 09:08:56 +08004720 BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
Chris Wilson87acb0a2010-10-19 10:13:00 +01004721 BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
Chris Wilson549f7362010-10-19 11:19:32 +01004722 BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
Eric Anholt673a3942008-07-30 12:06:12 -07004723 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4724 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
Zou Nan hai852835f2010-05-21 09:08:56 +08004725 BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
Chris Wilson87acb0a2010-10-19 10:13:00 +01004726 BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list));
Chris Wilson549f7362010-10-19 11:19:32 +01004727 BUG_ON(!list_empty(&dev_priv->blt_ring.request_list));
Eric Anholt673a3942008-07-30 12:06:12 -07004728 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004729
Chris Wilson5f353082010-06-07 14:03:03 +01004730 ret = drm_irq_install(dev);
4731 if (ret)
4732 goto cleanup_ringbuffer;
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004733
Eric Anholt673a3942008-07-30 12:06:12 -07004734 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01004735
4736cleanup_ringbuffer:
4737 mutex_lock(&dev->struct_mutex);
4738 i915_gem_cleanup_ringbuffer(dev);
4739 dev_priv->mm.suspended = 1;
4740 mutex_unlock(&dev->struct_mutex);
4741
4742 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004743}
4744
4745int
4746i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4747 struct drm_file *file_priv)
4748{
Jesse Barnes79e53942008-11-07 14:24:08 -08004749 if (drm_core_check_feature(dev, DRIVER_MODESET))
4750 return 0;
4751
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004752 drm_irq_uninstall(dev);
Linus Torvaldse6890f62009-09-08 17:09:24 -07004753 return i915_gem_idle(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004754}
4755
4756void
4757i915_gem_lastclose(struct drm_device *dev)
4758{
4759 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004760
Eric Anholte806b492009-01-22 09:56:58 -08004761 if (drm_core_check_feature(dev, DRIVER_MODESET))
4762 return;
4763
Keith Packard6dbe2772008-10-14 21:41:13 -07004764 ret = i915_gem_idle(dev);
4765 if (ret)
4766 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004767}
4768
Chris Wilson64193402010-10-24 12:38:05 +01004769static void
4770init_ring_lists(struct intel_ring_buffer *ring)
4771{
4772 INIT_LIST_HEAD(&ring->active_list);
4773 INIT_LIST_HEAD(&ring->request_list);
4774 INIT_LIST_HEAD(&ring->gpu_write_list);
4775}
4776
Eric Anholt673a3942008-07-30 12:06:12 -07004777void
4778i915_gem_load(struct drm_device *dev)
4779{
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004780 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07004781 drm_i915_private_t *dev_priv = dev->dev_private;
4782
Chris Wilson69dc4982010-10-19 10:36:51 +01004783 INIT_LIST_HEAD(&dev_priv->mm.active_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004784 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4785 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
Chris Wilsonf13d3f72010-09-20 17:36:15 +01004786 INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004787 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilsonbe726152010-07-23 23:18:50 +01004788 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
Chris Wilson64193402010-10-24 12:38:05 +01004789 init_ring_lists(&dev_priv->render_ring);
4790 init_ring_lists(&dev_priv->bsd_ring);
4791 init_ring_lists(&dev_priv->blt_ring);
Daniel Vetter007cc8a2010-04-28 11:02:31 +02004792 for (i = 0; i < 16; i++)
4793 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004794 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4795 i915_gem_retire_work_handler);
Chris Wilson30dbf0c2010-09-25 10:19:17 +01004796 init_completion(&dev_priv->error_completion);
Chris Wilson31169712009-09-14 16:50:28 +01004797 spin_lock(&shrink_list_lock);
4798 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4799 spin_unlock(&shrink_list_lock);
4800
Dave Airlie94400122010-07-20 13:15:31 +10004801 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4802 if (IS_GEN3(dev)) {
4803 u32 tmp = I915_READ(MI_ARB_STATE);
4804 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
4805 /* arb state is a masked write, so set bit + bit in mask */
4806 tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
4807 I915_WRITE(MI_ARB_STATE, tmp);
4808 }
4809 }
4810
Jesse Barnesde151cf2008-11-12 10:03:55 -08004811 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08004812 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4813 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08004814
Chris Wilsona6c45cf2010-09-17 00:32:17 +01004815 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08004816 dev_priv->num_fence_regs = 16;
4817 else
4818 dev_priv->num_fence_regs = 8;
4819
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004820 /* Initialize fence registers to zero */
Chris Wilsona6c45cf2010-09-17 00:32:17 +01004821 switch (INTEL_INFO(dev)->gen) {
4822 case 6:
4823 for (i = 0; i < 16; i++)
4824 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
4825 break;
4826 case 5:
4827 case 4:
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004828 for (i = 0; i < 16; i++)
4829 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
Chris Wilsona6c45cf2010-09-17 00:32:17 +01004830 break;
4831 case 3:
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004832 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4833 for (i = 0; i < 8; i++)
4834 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
Chris Wilsona6c45cf2010-09-17 00:32:17 +01004835 case 2:
4836 for (i = 0; i < 8; i++)
4837 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4838 break;
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004839 }
Eric Anholt673a3942008-07-30 12:06:12 -07004840 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05004841 init_waitqueue_head(&dev_priv->pending_flip_queue);
Eric Anholt673a3942008-07-30 12:06:12 -07004842}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004843
4844/*
4845 * Create a physically contiguous memory object for this object
4846 * e.g. for cursor + overlay regs
4847 */
Chris Wilson995b6762010-08-20 13:23:26 +01004848static int i915_gem_init_phys_object(struct drm_device *dev,
4849 int id, int size, int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004850{
4851 drm_i915_private_t *dev_priv = dev->dev_private;
4852 struct drm_i915_gem_phys_object *phys_obj;
4853 int ret;
4854
4855 if (dev_priv->mm.phys_objs[id - 1] || !size)
4856 return 0;
4857
Eric Anholt9a298b22009-03-24 12:23:04 -07004858 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004859 if (!phys_obj)
4860 return -ENOMEM;
4861
4862 phys_obj->id = id;
4863
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004864 phys_obj->handle = drm_pci_alloc(dev, size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004865 if (!phys_obj->handle) {
4866 ret = -ENOMEM;
4867 goto kfree_obj;
4868 }
4869#ifdef CONFIG_X86
4870 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4871#endif
4872
4873 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4874
4875 return 0;
4876kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07004877 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004878 return ret;
4879}
4880
Chris Wilson995b6762010-08-20 13:23:26 +01004881static void i915_gem_free_phys_object(struct drm_device *dev, int id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004882{
4883 drm_i915_private_t *dev_priv = dev->dev_private;
4884 struct drm_i915_gem_phys_object *phys_obj;
4885
4886 if (!dev_priv->mm.phys_objs[id - 1])
4887 return;
4888
4889 phys_obj = dev_priv->mm.phys_objs[id - 1];
4890 if (phys_obj->cur_obj) {
4891 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4892 }
4893
4894#ifdef CONFIG_X86
4895 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4896#endif
4897 drm_pci_free(dev, phys_obj->handle);
4898 kfree(phys_obj);
4899 dev_priv->mm.phys_objs[id - 1] = NULL;
4900}
4901
4902void i915_gem_free_all_phys_object(struct drm_device *dev)
4903{
4904 int i;
4905
Dave Airlie260883c2009-01-22 17:58:49 +10004906 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004907 i915_gem_free_phys_object(dev, i);
4908}
4909
4910void i915_gem_detach_phys_object(struct drm_device *dev,
4911 struct drm_gem_object *obj)
4912{
4913 struct drm_i915_gem_object *obj_priv;
4914 int i;
4915 int ret;
4916 int page_count;
4917
Daniel Vetter23010e42010-03-08 13:35:02 +01004918 obj_priv = to_intel_bo(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004919 if (!obj_priv->phys_obj)
4920 return;
4921
Chris Wilson4bdadb92010-01-27 13:36:32 +00004922 ret = i915_gem_object_get_pages(obj, 0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004923 if (ret)
4924 goto out;
4925
4926 page_count = obj->size / PAGE_SIZE;
4927
4928 for (i = 0; i < page_count; i++) {
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07004929 char *dst = kmap_atomic(obj_priv->pages[i]);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004930 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4931
4932 memcpy(dst, src, PAGE_SIZE);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07004933 kunmap_atomic(dst);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004934 }
Eric Anholt856fa192009-03-19 14:10:50 -07004935 drm_clflush_pages(obj_priv->pages, page_count);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004936 drm_agp_chipset_flush(dev);
Chris Wilsond78b47b2009-06-17 21:52:49 +01004937
4938 i915_gem_object_put_pages(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004939out:
4940 obj_priv->phys_obj->cur_obj = NULL;
4941 obj_priv->phys_obj = NULL;
4942}
4943
4944int
4945i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004946 struct drm_gem_object *obj,
4947 int id,
4948 int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004949{
4950 drm_i915_private_t *dev_priv = dev->dev_private;
4951 struct drm_i915_gem_object *obj_priv;
4952 int ret = 0;
4953 int page_count;
4954 int i;
4955
4956 if (id > I915_MAX_PHYS_OBJECT)
4957 return -EINVAL;
4958
Daniel Vetter23010e42010-03-08 13:35:02 +01004959 obj_priv = to_intel_bo(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004960
4961 if (obj_priv->phys_obj) {
4962 if (obj_priv->phys_obj->id == id)
4963 return 0;
4964 i915_gem_detach_phys_object(dev, obj);
4965 }
4966
Dave Airlie71acb5e2008-12-30 20:31:46 +10004967 /* create a new object */
4968 if (!dev_priv->mm.phys_objs[id - 1]) {
4969 ret = i915_gem_init_phys_object(dev, id,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004970 obj->size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004971 if (ret) {
Linus Torvaldsaeb565d2009-01-26 10:01:53 -08004972 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004973 goto out;
4974 }
4975 }
4976
4977 /* bind to the object */
4978 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4979 obj_priv->phys_obj->cur_obj = obj;
4980
Chris Wilson4bdadb92010-01-27 13:36:32 +00004981 ret = i915_gem_object_get_pages(obj, 0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004982 if (ret) {
4983 DRM_ERROR("failed to get page list\n");
4984 goto out;
4985 }
4986
4987 page_count = obj->size / PAGE_SIZE;
4988
4989 for (i = 0; i < page_count; i++) {
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07004990 char *src = kmap_atomic(obj_priv->pages[i]);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004991 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4992
4993 memcpy(dst, src, PAGE_SIZE);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07004994 kunmap_atomic(src);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004995 }
4996
Chris Wilsond78b47b2009-06-17 21:52:49 +01004997 i915_gem_object_put_pages(obj);
4998
Dave Airlie71acb5e2008-12-30 20:31:46 +10004999 return 0;
5000out:
5001 return ret;
5002}
5003
5004static int
5005i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
5006 struct drm_i915_gem_pwrite *args,
5007 struct drm_file *file_priv)
5008{
Daniel Vetter23010e42010-03-08 13:35:02 +01005009 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilsonb47b30c2010-11-08 01:12:29 +00005010 void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
5011 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10005012
Chris Wilsonb47b30c2010-11-08 01:12:29 +00005013 DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
Dave Airlie71acb5e2008-12-30 20:31:46 +10005014
Chris Wilsonb47b30c2010-11-08 01:12:29 +00005015 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
5016 unsigned long unwritten;
5017
5018 /* The physical object once assigned is fixed for the lifetime
5019 * of the obj, so we can safely drop the lock and continue
5020 * to access vaddr.
5021 */
5022 mutex_unlock(&dev->struct_mutex);
5023 unwritten = copy_from_user(vaddr, user_data, args->size);
5024 mutex_lock(&dev->struct_mutex);
5025 if (unwritten)
5026 return -EFAULT;
5027 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10005028
5029 drm_agp_chipset_flush(dev);
5030 return 0;
5031}
Eric Anholtb9624422009-06-03 07:27:35 +00005032
Chris Wilsonf787a5f2010-09-24 16:02:42 +01005033void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00005034{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01005035 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00005036
5037 /* Clean up our request list when the client is going away, so that
5038 * later retire_requests won't dereference our soon-to-be-gone
5039 * file_priv.
5040 */
Chris Wilson1c255952010-09-26 11:03:27 +01005041 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01005042 while (!list_empty(&file_priv->mm.request_list)) {
5043 struct drm_i915_gem_request *request;
5044
5045 request = list_first_entry(&file_priv->mm.request_list,
5046 struct drm_i915_gem_request,
5047 client_list);
5048 list_del(&request->client_list);
5049 request->file_priv = NULL;
5050 }
Chris Wilson1c255952010-09-26 11:03:27 +01005051 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00005052}
Chris Wilson31169712009-09-14 16:50:28 +01005053
Chris Wilson31169712009-09-14 16:50:28 +01005054static int
Chris Wilson1637ef42010-04-20 17:10:35 +01005055i915_gpu_is_active(struct drm_device *dev)
5056{
5057 drm_i915_private_t *dev_priv = dev->dev_private;
5058 int lists_empty;
5059
Chris Wilson1637ef42010-04-20 17:10:35 +01005060 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
Chris Wilson395b70b2010-10-28 21:28:46 +01005061 list_empty(&dev_priv->mm.active_list);
Chris Wilson1637ef42010-04-20 17:10:35 +01005062
5063 return !lists_empty;
5064}
5065
5066static int
Dave Chinner7f8275d2010-07-19 14:56:17 +10005067i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
Chris Wilson31169712009-09-14 16:50:28 +01005068{
5069 drm_i915_private_t *dev_priv, *next_dev;
5070 struct drm_i915_gem_object *obj_priv, *next_obj;
5071 int cnt = 0;
5072 int would_deadlock = 1;
5073
5074 /* "fast-path" to count number of available objects */
5075 if (nr_to_scan == 0) {
5076 spin_lock(&shrink_list_lock);
5077 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
5078 struct drm_device *dev = dev_priv->dev;
5079
5080 if (mutex_trylock(&dev->struct_mutex)) {
5081 list_for_each_entry(obj_priv,
5082 &dev_priv->mm.inactive_list,
Chris Wilson69dc4982010-10-19 10:36:51 +01005083 mm_list)
Chris Wilson31169712009-09-14 16:50:28 +01005084 cnt++;
5085 mutex_unlock(&dev->struct_mutex);
5086 }
5087 }
5088 spin_unlock(&shrink_list_lock);
5089
5090 return (cnt / 100) * sysctl_vfs_cache_pressure;
5091 }
5092
5093 spin_lock(&shrink_list_lock);
5094
Chris Wilson1637ef42010-04-20 17:10:35 +01005095rescan:
Chris Wilson31169712009-09-14 16:50:28 +01005096 /* first scan for clean buffers */
5097 list_for_each_entry_safe(dev_priv, next_dev,
5098 &shrink_list, mm.shrink_list) {
5099 struct drm_device *dev = dev_priv->dev;
5100
5101 if (! mutex_trylock(&dev->struct_mutex))
5102 continue;
5103
5104 spin_unlock(&shrink_list_lock);
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01005105 i915_gem_retire_requests(dev);
Zou Nan haid1b851f2010-05-21 09:08:57 +08005106
Chris Wilson31169712009-09-14 16:50:28 +01005107 list_for_each_entry_safe(obj_priv, next_obj,
5108 &dev_priv->mm.inactive_list,
Chris Wilson69dc4982010-10-19 10:36:51 +01005109 mm_list) {
Chris Wilson31169712009-09-14 16:50:28 +01005110 if (i915_gem_object_is_purgeable(obj_priv)) {
Daniel Vettera8089e82010-04-09 19:05:09 +00005111 i915_gem_object_unbind(&obj_priv->base);
Chris Wilson31169712009-09-14 16:50:28 +01005112 if (--nr_to_scan <= 0)
5113 break;
5114 }
5115 }
5116
5117 spin_lock(&shrink_list_lock);
5118 mutex_unlock(&dev->struct_mutex);
5119
Chris Wilson963b4832009-09-20 23:03:54 +01005120 would_deadlock = 0;
5121
Chris Wilson31169712009-09-14 16:50:28 +01005122 if (nr_to_scan <= 0)
5123 break;
5124 }
5125
5126 /* second pass, evict/count anything still on the inactive list */
5127 list_for_each_entry_safe(dev_priv, next_dev,
5128 &shrink_list, mm.shrink_list) {
5129 struct drm_device *dev = dev_priv->dev;
5130
5131 if (! mutex_trylock(&dev->struct_mutex))
5132 continue;
5133
5134 spin_unlock(&shrink_list_lock);
5135
5136 list_for_each_entry_safe(obj_priv, next_obj,
5137 &dev_priv->mm.inactive_list,
Chris Wilson69dc4982010-10-19 10:36:51 +01005138 mm_list) {
Chris Wilson31169712009-09-14 16:50:28 +01005139 if (nr_to_scan > 0) {
Daniel Vettera8089e82010-04-09 19:05:09 +00005140 i915_gem_object_unbind(&obj_priv->base);
Chris Wilson31169712009-09-14 16:50:28 +01005141 nr_to_scan--;
5142 } else
5143 cnt++;
5144 }
5145
5146 spin_lock(&shrink_list_lock);
5147 mutex_unlock(&dev->struct_mutex);
5148
5149 would_deadlock = 0;
5150 }
5151
Chris Wilson1637ef42010-04-20 17:10:35 +01005152 if (nr_to_scan) {
5153 int active = 0;
5154
5155 /*
5156 * We are desperate for pages, so as a last resort, wait
5157 * for the GPU to finish and discard whatever we can.
5158 * This has a dramatic impact to reduce the number of
5159 * OOM-killer events whilst running the GPU aggressively.
5160 */
5161 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
5162 struct drm_device *dev = dev_priv->dev;
5163
5164 if (!mutex_trylock(&dev->struct_mutex))
5165 continue;
5166
5167 spin_unlock(&shrink_list_lock);
5168
5169 if (i915_gpu_is_active(dev)) {
5170 i915_gpu_idle(dev);
5171 active++;
5172 }
5173
5174 spin_lock(&shrink_list_lock);
5175 mutex_unlock(&dev->struct_mutex);
5176 }
5177
5178 if (active)
5179 goto rescan;
5180 }
5181
Chris Wilson31169712009-09-14 16:50:28 +01005182 spin_unlock(&shrink_list_lock);
5183
5184 if (would_deadlock)
5185 return -1;
5186 else if (cnt > 0)
5187 return (cnt / 100) * sysctl_vfs_cache_pressure;
5188 else
5189 return 0;
5190}
5191
5192static struct shrinker shrinker = {
5193 .shrink = i915_gem_shrink,
5194 .seeks = DEFAULT_SEEKS,
5195};
5196
5197__init void
5198i915_gem_shrinker_init(void)
5199{
5200 register_shrinker(&shrinker);
5201}
5202
5203__exit void
5204i915_gem_shrinker_exit(void)
5205{
5206 unregister_shrinker(&shrinker);
5207}