blob: da3c8bbd9d8ff568ede7f9e36c5314f68118b9ae [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright (C) 2008 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26#include "drmP.h"
27#include "drm.h"
28
29#include "nouveau_drv.h"
30#include "nouveau_drm.h"
31#include "nouveau_dma.h"
32
33#define nouveau_gem_pushbuf_sync(chan) 0
34
35int
36nouveau_gem_object_new(struct drm_gem_object *gem)
37{
38 return 0;
39}
40
41void
42nouveau_gem_object_del(struct drm_gem_object *gem)
43{
44 struct nouveau_bo *nvbo = gem->driver_private;
45 struct ttm_buffer_object *bo = &nvbo->bo;
46
47 if (!nvbo)
48 return;
49 nvbo->gem = NULL;
50
Ben Skeggs6ee73862009-12-11 19:24:15 +100051 if (unlikely(nvbo->pin_refcnt)) {
52 nvbo->pin_refcnt = 1;
53 nouveau_bo_unpin(nvbo);
54 }
55
56 ttm_bo_unref(&bo);
Daniel Vetterfd632aa2010-04-09 19:05:05 +000057
58 drm_gem_object_release(gem);
59 kfree(gem);
Ben Skeggs6ee73862009-12-11 19:24:15 +100060}
61
62int
Ben Skeggs639212d2011-06-03 16:18:26 +100063nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
64{
65 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
66
67 if (!fpriv->vm)
68 return 0;
69
70 return 0;
71}
72
73void
74nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
75{
76 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
77
78 if (!fpriv->vm)
79 return;
80}
81
82int
Ben Skeggsf6d4e622011-06-07 12:25:36 +100083nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
84 uint32_t tile_mode, uint32_t tile_flags,
85 struct nouveau_bo **pnvbo)
Ben Skeggs6ee73862009-12-11 19:24:15 +100086{
Ben Skeggsdb5c8e22011-02-10 13:41:01 +100087 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +100088 struct nouveau_bo *nvbo;
Ben Skeggs6ba9a682011-02-10 14:42:08 +100089 u32 flags = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +100090 int ret;
91
Ben Skeggs6ba9a682011-02-10 14:42:08 +100092 if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
93 flags |= TTM_PL_FLAG_VRAM;
94 if (domain & NOUVEAU_GEM_DOMAIN_GART)
95 flags |= TTM_PL_FLAG_TT;
96 if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
97 flags |= TTM_PL_FLAG_SYSTEM;
98
Ben Skeggsf6d4e622011-06-07 12:25:36 +100099 ret = nouveau_bo_new(dev, NULL, size, align, flags, tile_mode,
Ben Skeggsd550c412011-02-16 08:41:56 +1000100 tile_flags, pnvbo);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000101 if (ret)
102 return ret;
103 nvbo = *pnvbo;
104
Ben Skeggsdb5c8e22011-02-10 13:41:01 +1000105 /* we restrict allowed domains on nv50+ to only the types
106 * that were requested at creation time. not possibly on
107 * earlier chips without busting the ABI.
108 */
109 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
110 NOUVEAU_GEM_DOMAIN_GART;
111 if (dev_priv->card_type >= NV_50)
112 nvbo->valid_domains &= domain;
113
Ben Skeggs6ee73862009-12-11 19:24:15 +1000114 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
115 if (!nvbo->gem) {
116 nouveau_bo_ref(NULL, pnvbo);
117 return -ENOMEM;
118 }
119
Jan Engelhardt5df23972011-04-04 01:25:18 +0200120 nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000121 nvbo->gem->driver_private = nvbo;
122 return 0;
123}
124
125static int
126nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
127{
128 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
129
130 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
131 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
132 else
133 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
134
135 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
136 rep->offset = nvbo->bo.offset;
Ben Skeggsd550c412011-02-16 08:41:56 +1000137 rep->map_handle = nvbo->bo.addr_space_offset;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000138 rep->tile_mode = nvbo->tile_mode;
139 rep->tile_flags = nvbo->tile_flags;
140 return 0;
141}
142
Ben Skeggs6ee73862009-12-11 19:24:15 +1000143int
144nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
145 struct drm_file *file_priv)
146{
147 struct drm_nouveau_private *dev_priv = dev->dev_private;
148 struct drm_nouveau_gem_new *req = data;
149 struct nouveau_bo *nvbo = NULL;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000150 int ret = 0;
151
Ben Skeggs6ee73862009-12-11 19:24:15 +1000152 if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
153 dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
154
Ben Skeggs60d2a882010-12-06 15:28:54 +1000155 if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
156 NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000157 return -EINVAL;
Ben Skeggs60d2a882010-12-06 15:28:54 +1000158 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000159
Ben Skeggsf6d4e622011-06-07 12:25:36 +1000160 ret = nouveau_gem_new(dev, req->info.size, req->align,
Ben Skeggs6ba9a682011-02-10 14:42:08 +1000161 req->info.domain, req->info.tile_mode,
162 req->info.tile_flags, &nvbo);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000163 if (ret)
164 return ret;
165
166 ret = nouveau_gem_info(nvbo->gem, &req->info);
167 if (ret)
168 goto out;
169
170 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
Dave Airlie29d08b32010-09-27 16:17:17 +1000171 /* drop reference from allocate - handle holds it now */
172 drm_gem_object_unreference_unlocked(nvbo->gem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000173out:
Ben Skeggs6ee73862009-12-11 19:24:15 +1000174 return ret;
175}
176
177static int
178nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
179 uint32_t write_domains, uint32_t valid_domains)
180{
181 struct nouveau_bo *nvbo = gem->driver_private;
182 struct ttm_buffer_object *bo = &nvbo->bo;
Ben Skeggsdb5c8e22011-02-10 13:41:01 +1000183 uint32_t domains = valid_domains & nvbo->valid_domains &
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100184 (write_domains ? write_domains : read_domains);
185 uint32_t pref_flags = 0, valid_flags = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000186
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100187 if (!domains)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000188 return -EINVAL;
189
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100190 if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
191 valid_flags |= TTM_PL_FLAG_VRAM;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000192
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100193 if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
194 valid_flags |= TTM_PL_FLAG_TT;
195
196 if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
197 bo->mem.mem_type == TTM_PL_VRAM)
198 pref_flags |= TTM_PL_FLAG_VRAM;
199
200 else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
201 bo->mem.mem_type == TTM_PL_TT)
202 pref_flags |= TTM_PL_FLAG_TT;
203
204 else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
205 pref_flags |= TTM_PL_FLAG_VRAM;
206
207 else
208 pref_flags |= TTM_PL_FLAG_TT;
209
210 nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
211
Ben Skeggs6ee73862009-12-11 19:24:15 +1000212 return 0;
213}
214
215struct validate_op {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000216 struct list_head vram_list;
217 struct list_head gart_list;
218 struct list_head both_list;
219};
220
221static void
222validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
223{
224 struct list_head *entry, *tmp;
225 struct nouveau_bo *nvbo;
226
227 list_for_each_safe(entry, tmp, list) {
228 nvbo = list_entry(entry, struct nouveau_bo, entry);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000229
Francisco Jerez332b2422010-10-20 23:35:40 +0200230 nouveau_bo_fence(nvbo, fence);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000231
Ben Skeggsa1606a92010-02-12 10:27:35 +1000232 if (unlikely(nvbo->validate_mapped)) {
233 ttm_bo_kunmap(&nvbo->kmap);
234 nvbo->validate_mapped = false;
235 }
236
Ben Skeggs6ee73862009-12-11 19:24:15 +1000237 list_del(&nvbo->entry);
238 nvbo->reserved_by = NULL;
239 ttm_bo_unreserve(&nvbo->bo);
Francisco Jerez374c3af2010-08-29 12:21:16 +0200240 drm_gem_object_unreference_unlocked(nvbo->gem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000241 }
242}
243
244static void
Luca Barbieri234896a2010-01-06 04:02:45 +0100245validate_fini(struct validate_op *op, struct nouveau_fence* fence)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000246{
Luca Barbieri234896a2010-01-06 04:02:45 +0100247 validate_fini_list(&op->vram_list, fence);
248 validate_fini_list(&op->gart_list, fence);
249 validate_fini_list(&op->both_list, fence);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000250}
251
252static int
253validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
254 struct drm_nouveau_gem_pushbuf_bo *pbbo,
255 int nr_buffers, struct validate_op *op)
256{
257 struct drm_device *dev = chan->dev;
258 struct drm_nouveau_private *dev_priv = dev->dev_private;
259 uint32_t sequence;
260 int trycnt = 0;
261 int ret, i;
262
263 sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
264retry:
265 if (++trycnt > 100000) {
266 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
267 return -EINVAL;
268 }
269
270 for (i = 0; i < nr_buffers; i++) {
271 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
272 struct drm_gem_object *gem;
273 struct nouveau_bo *nvbo;
274
275 gem = drm_gem_object_lookup(dev, file_priv, b->handle);
276 if (!gem) {
277 NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
278 validate_fini(op, NULL);
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100279 return -ENOENT;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000280 }
281 nvbo = gem->driver_private;
282
283 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
284 NV_ERROR(dev, "multiple instances of buffer %d on "
285 "validation list\n", b->handle);
286 validate_fini(op, NULL);
287 return -EINVAL;
288 }
289
Ben Skeggs938c40e2010-10-12 09:54:54 +1000290 ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000291 if (ret) {
292 validate_fini(op, NULL);
Ben Skeggs938c40e2010-10-12 09:54:54 +1000293 if (unlikely(ret == -EAGAIN))
294 ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
Francisco Jerez374c3af2010-08-29 12:21:16 +0200295 drm_gem_object_unreference_unlocked(gem);
Ben Skeggs938c40e2010-10-12 09:54:54 +1000296 if (unlikely(ret)) {
297 if (ret != -ERESTARTSYS)
298 NV_ERROR(dev, "fail reserve\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000299 return ret;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000300 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000301 goto retry;
302 }
303
Ben Skeggsa1606a92010-02-12 10:27:35 +1000304 b->user_priv = (uint64_t)(unsigned long)nvbo;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000305 nvbo->reserved_by = file_priv;
306 nvbo->pbbo_index = i;
307 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
308 (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
309 list_add_tail(&nvbo->entry, &op->both_list);
310 else
311 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
312 list_add_tail(&nvbo->entry, &op->vram_list);
313 else
314 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
315 list_add_tail(&nvbo->entry, &op->gart_list);
316 else {
317 NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
318 b->valid_domains);
Ben Skeggs02088432010-01-21 15:03:23 +1000319 list_add_tail(&nvbo->entry, &op->both_list);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000320 validate_fini(op, NULL);
321 return -EINVAL;
322 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000323 }
324
325 return 0;
326}
327
328static int
329validate_list(struct nouveau_channel *chan, struct list_head *list,
330 struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
331{
332 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
333 (void __force __user *)(uintptr_t)user_pbbo_ptr;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000334 struct drm_device *dev = chan->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000335 struct nouveau_bo *nvbo;
336 int ret, relocs = 0;
337
338 list_for_each_entry(nvbo, list, entry) {
339 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
Ben Skeggs6ee73862009-12-11 19:24:15 +1000340
Francisco Jerez27307232010-09-21 18:57:11 +0200341 ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
Ben Skeggs415e6182010-07-23 09:06:52 +1000342 if (unlikely(ret)) {
343 NV_ERROR(dev, "fail pre-validate sync\n");
344 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000345 }
346
347 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
348 b->write_domains,
349 b->valid_domains);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000350 if (unlikely(ret)) {
351 NV_ERROR(dev, "fail set_domain\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000352 return ret;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000353 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000354
Ben Skeggs415e6182010-07-23 09:06:52 +1000355 nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
Ben Skeggs7a45d762010-11-22 08:50:27 +1000356 ret = nouveau_bo_validate(nvbo, true, false, false);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000357 nvbo->channel = NULL;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000358 if (unlikely(ret)) {
Ben Skeggs938c40e2010-10-12 09:54:54 +1000359 if (ret != -ERESTARTSYS)
360 NV_ERROR(dev, "fail ttm_validate\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000361 return ret;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000362 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000363
Francisco Jerez27307232010-09-21 18:57:11 +0200364 ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
Ben Skeggs415e6182010-07-23 09:06:52 +1000365 if (unlikely(ret)) {
366 NV_ERROR(dev, "fail post-validate sync\n");
367 return ret;
368 }
369
Ben Skeggsa1606a92010-02-12 10:27:35 +1000370 if (nvbo->bo.offset == b->presumed.offset &&
Ben Skeggs6ee73862009-12-11 19:24:15 +1000371 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
Ben Skeggsa1606a92010-02-12 10:27:35 +1000372 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
Ben Skeggs6ee73862009-12-11 19:24:15 +1000373 (nvbo->bo.mem.mem_type == TTM_PL_TT &&
Ben Skeggsa1606a92010-02-12 10:27:35 +1000374 b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
Ben Skeggs6ee73862009-12-11 19:24:15 +1000375 continue;
376
377 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
Ben Skeggsa1606a92010-02-12 10:27:35 +1000378 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000379 else
Ben Skeggsa1606a92010-02-12 10:27:35 +1000380 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
381 b->presumed.offset = nvbo->bo.offset;
382 b->presumed.valid = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000383 relocs++;
384
Ben Skeggsa1606a92010-02-12 10:27:35 +1000385 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
386 &b->presumed, sizeof(b->presumed)))
Ben Skeggs6ee73862009-12-11 19:24:15 +1000387 return -EFAULT;
388 }
389
390 return relocs;
391}
392
393static int
394nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
395 struct drm_file *file_priv,
396 struct drm_nouveau_gem_pushbuf_bo *pbbo,
397 uint64_t user_buffers, int nr_buffers,
398 struct validate_op *op, int *apply_relocs)
399{
Ben Skeggsa1606a92010-02-12 10:27:35 +1000400 struct drm_device *dev = chan->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000401 int ret, relocs = 0;
402
403 INIT_LIST_HEAD(&op->vram_list);
404 INIT_LIST_HEAD(&op->gart_list);
405 INIT_LIST_HEAD(&op->both_list);
406
Ben Skeggs6ee73862009-12-11 19:24:15 +1000407 if (nr_buffers == 0)
408 return 0;
409
410 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000411 if (unlikely(ret)) {
Ben Skeggs938c40e2010-10-12 09:54:54 +1000412 if (ret != -ERESTARTSYS)
413 NV_ERROR(dev, "validate_init\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000414 return ret;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000415 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000416
417 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
418 if (unlikely(ret < 0)) {
Ben Skeggs938c40e2010-10-12 09:54:54 +1000419 if (ret != -ERESTARTSYS)
420 NV_ERROR(dev, "validate vram_list\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000421 validate_fini(op, NULL);
422 return ret;
423 }
424 relocs += ret;
425
426 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
427 if (unlikely(ret < 0)) {
Ben Skeggs938c40e2010-10-12 09:54:54 +1000428 if (ret != -ERESTARTSYS)
429 NV_ERROR(dev, "validate gart_list\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000430 validate_fini(op, NULL);
431 return ret;
432 }
433 relocs += ret;
434
435 ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
436 if (unlikely(ret < 0)) {
Ben Skeggs938c40e2010-10-12 09:54:54 +1000437 if (ret != -ERESTARTSYS)
438 NV_ERROR(dev, "validate both_list\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000439 validate_fini(op, NULL);
440 return ret;
441 }
442 relocs += ret;
443
444 *apply_relocs = relocs;
445 return 0;
446}
447
448static inline void *
449u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
450{
451 void *mem;
452 void __user *userptr = (void __force __user *)(uintptr_t)user;
453
454 mem = kmalloc(nmemb * size, GFP_KERNEL);
455 if (!mem)
456 return ERR_PTR(-ENOMEM);
457
458 if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
459 kfree(mem);
460 return ERR_PTR(-EFAULT);
461 }
462
463 return mem;
464}
465
466static int
Ben Skeggsa1606a92010-02-12 10:27:35 +1000467nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
468 struct drm_nouveau_gem_pushbuf *req,
469 struct drm_nouveau_gem_pushbuf_bo *bo)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000470{
471 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
Luca Barbieri12f735b2010-01-10 20:10:53 +0100472 int ret = 0;
473 unsigned i;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000474
Ben Skeggsa1606a92010-02-12 10:27:35 +1000475 reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
Ben Skeggs6ee73862009-12-11 19:24:15 +1000476 if (IS_ERR(reloc))
477 return PTR_ERR(reloc);
478
Ben Skeggsa1606a92010-02-12 10:27:35 +1000479 for (i = 0; i < req->nr_relocs; i++) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000480 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
481 struct drm_nouveau_gem_pushbuf_bo *b;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000482 struct nouveau_bo *nvbo;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000483 uint32_t data;
484
Ben Skeggsa1606a92010-02-12 10:27:35 +1000485 if (unlikely(r->bo_index > req->nr_buffers)) {
486 NV_ERROR(dev, "reloc bo index invalid\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000487 ret = -EINVAL;
488 break;
489 }
490
491 b = &bo[r->bo_index];
Ben Skeggsa1606a92010-02-12 10:27:35 +1000492 if (b->presumed.valid)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000493 continue;
494
Ben Skeggsa1606a92010-02-12 10:27:35 +1000495 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
496 NV_ERROR(dev, "reloc container bo index invalid\n");
497 ret = -EINVAL;
498 break;
499 }
500 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
501
502 if (unlikely(r->reloc_bo_offset + 4 >
503 nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
504 NV_ERROR(dev, "reloc outside of bo\n");
505 ret = -EINVAL;
506 break;
507 }
508
509 if (!nvbo->kmap.virtual) {
510 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
511 &nvbo->kmap);
512 if (ret) {
513 NV_ERROR(dev, "failed kmap for reloc\n");
514 break;
515 }
516 nvbo->validate_mapped = true;
517 }
518
Ben Skeggs6ee73862009-12-11 19:24:15 +1000519 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
Ben Skeggsa1606a92010-02-12 10:27:35 +1000520 data = b->presumed.offset + r->data;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000521 else
522 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
Ben Skeggsa1606a92010-02-12 10:27:35 +1000523 data = (b->presumed.offset + r->data) >> 32;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000524 else
525 data = r->data;
526
527 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
Ben Skeggsa1606a92010-02-12 10:27:35 +1000528 if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000529 data |= r->tor;
530 else
531 data |= r->vor;
532 }
533
Thomas Hellstrom702adba2010-11-17 12:28:29 +0000534 spin_lock(&nvbo->bo.bdev->fence_lock);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000535 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
Thomas Hellstrom702adba2010-11-17 12:28:29 +0000536 spin_unlock(&nvbo->bo.bdev->fence_lock);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000537 if (ret) {
538 NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
539 break;
540 }
Ben Skeggsa1606a92010-02-12 10:27:35 +1000541
542 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000543 }
544
545 kfree(reloc);
546 return ret;
547}
548
549int
550nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
551 struct drm_file *file_priv)
552{
Ben Skeggsa1606a92010-02-12 10:27:35 +1000553 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000554 struct drm_nouveau_gem_pushbuf *req = data;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000555 struct drm_nouveau_gem_pushbuf_push *push;
556 struct drm_nouveau_gem_pushbuf_bo *bo;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000557 struct nouveau_channel *chan;
558 struct validate_op op;
Francisco Jerez6e86e042010-07-03 18:36:39 +0200559 struct nouveau_fence *fence = NULL;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000560 int i, j, ret = 0, do_reloc = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000561
Ben Skeggse8a863c2011-06-01 19:18:48 +1000562 chan = nouveau_channel_get(file_priv, req->channel);
Ben Skeggscff5c132010-10-06 16:16:59 +1000563 if (IS_ERR(chan))
564 return PTR_ERR(chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000565
Ben Skeggsa1606a92010-02-12 10:27:35 +1000566 req->vram_available = dev_priv->fb_aper_free;
567 req->gart_available = dev_priv->gart_info.aper_free;
568 if (unlikely(req->nr_push == 0))
569 goto out_next;
570
571 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
572 NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
573 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
Ben Skeggscff5c132010-10-06 16:16:59 +1000574 nouveau_channel_put(&chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000575 return -EINVAL;
576 }
577
Ben Skeggsa1606a92010-02-12 10:27:35 +1000578 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
579 NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
580 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
Ben Skeggscff5c132010-10-06 16:16:59 +1000581 nouveau_channel_put(&chan);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000582 return -EINVAL;
583 }
584
585 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
586 NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
587 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
Ben Skeggscff5c132010-10-06 16:16:59 +1000588 nouveau_channel_put(&chan);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000589 return -EINVAL;
590 }
591
592 push = u_memcpya(req->push, req->nr_push, sizeof(*push));
Ben Skeggscff5c132010-10-06 16:16:59 +1000593 if (IS_ERR(push)) {
594 nouveau_channel_put(&chan);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000595 return PTR_ERR(push);
Ben Skeggscff5c132010-10-06 16:16:59 +1000596 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000597
598 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
599 if (IS_ERR(bo)) {
Ben Skeggsa1606a92010-02-12 10:27:35 +1000600 kfree(push);
Ben Skeggscff5c132010-10-06 16:16:59 +1000601 nouveau_channel_put(&chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000602 return PTR_ERR(bo);
603 }
604
Ben Skeggs415e6182010-07-23 09:06:52 +1000605 /* Mark push buffers as being used on PFIFO, the validation code
606 * will then make sure that if the pushbuf bo moves, that they
607 * happen on the kernel channel, which will in turn cause a sync
608 * to happen before we try and submit the push buffer.
609 */
610 for (i = 0; i < req->nr_push; i++) {
611 if (push[i].bo_index >= req->nr_buffers) {
612 NV_ERROR(dev, "push %d buffer not in list\n", i);
613 ret = -EINVAL;
Marcin Slusarz7fa0cba22011-03-07 12:31:35 +0100614 goto out_prevalid;
Ben Skeggs415e6182010-07-23 09:06:52 +1000615 }
616
617 bo[push[i].bo_index].read_domains |= (1 << 31);
618 }
619
Ben Skeggs6ee73862009-12-11 19:24:15 +1000620 /* Validate buffer list */
621 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
622 req->nr_buffers, &op, &do_reloc);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000623 if (ret) {
Ben Skeggs938c40e2010-10-12 09:54:54 +1000624 if (ret != -ERESTARTSYS)
625 NV_ERROR(dev, "validate: %d\n", ret);
Marcin Slusarz7fa0cba22011-03-07 12:31:35 +0100626 goto out_prevalid;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000627 }
628
Ben Skeggs6ee73862009-12-11 19:24:15 +1000629 /* Apply any relocations that are required */
630 if (do_reloc) {
Ben Skeggsa1606a92010-02-12 10:27:35 +1000631 ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000632 if (ret) {
633 NV_ERROR(dev, "reloc apply: %d\n", ret);
634 goto out;
635 }
636 }
637
Ben Skeggs9a391ad2010-02-11 16:37:26 +1000638 if (chan->dma.ib_max) {
Ben Skeggsa1606a92010-02-12 10:27:35 +1000639 ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
Ben Skeggs9a391ad2010-02-11 16:37:26 +1000640 if (ret) {
641 NV_INFO(dev, "nv50cal_space: %d\n", ret);
642 goto out;
643 }
644
Ben Skeggsa1606a92010-02-12 10:27:35 +1000645 for (i = 0; i < req->nr_push; i++) {
646 struct nouveau_bo *nvbo = (void *)(unsigned long)
647 bo[push[i].bo_index].user_priv;
648
649 nv50_dma_push(chan, nvbo, push[i].offset,
650 push[i].length);
651 }
Ben Skeggs9a391ad2010-02-11 16:37:26 +1000652 } else
Francisco Jerezee508b82010-08-25 12:54:53 +0200653 if (dev_priv->chipset >= 0x25) {
Ben Skeggsa1606a92010-02-12 10:27:35 +1000654 ret = RING_SPACE(chan, req->nr_push * 2);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000655 if (ret) {
656 NV_ERROR(dev, "cal_space: %d\n", ret);
657 goto out;
658 }
Ben Skeggsa1606a92010-02-12 10:27:35 +1000659
660 for (i = 0; i < req->nr_push; i++) {
661 struct nouveau_bo *nvbo = (void *)(unsigned long)
662 bo[push[i].bo_index].user_priv;
663 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
664
665 OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
666 push[i].offset) | 2);
667 OUT_RING(chan, 0);
668 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000669 } else {
Ben Skeggsa1606a92010-02-12 10:27:35 +1000670 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
Ben Skeggs6ee73862009-12-11 19:24:15 +1000671 if (ret) {
672 NV_ERROR(dev, "jmp_space: %d\n", ret);
673 goto out;
674 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000675
Ben Skeggsa1606a92010-02-12 10:27:35 +1000676 for (i = 0; i < req->nr_push; i++) {
677 struct nouveau_bo *nvbo = (void *)(unsigned long)
678 bo[push[i].bo_index].user_priv;
679 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
680 uint32_t cmd;
681
682 cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
683 cmd |= 0x20000000;
684 if (unlikely(cmd != req->suffix0)) {
685 if (!nvbo->kmap.virtual) {
686 ret = ttm_bo_kmap(&nvbo->bo, 0,
687 nvbo->bo.mem.
688 num_pages,
689 &nvbo->kmap);
690 if (ret) {
691 WIND_RING(chan);
692 goto out;
693 }
694 nvbo->validate_mapped = true;
695 }
696
697 nouveau_bo_wr32(nvbo, (push[i].offset +
698 push[i].length - 8) / 4, cmd);
699 }
700
701 OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
702 push[i].offset) | 0x20000000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000703 OUT_RING(chan, 0);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000704 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
705 OUT_RING(chan, 0);
706 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000707 }
708
Luca Barbieri234896a2010-01-06 04:02:45 +0100709 ret = nouveau_fence_new(chan, &fence, true);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000710 if (ret) {
711 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
712 WIND_RING(chan);
713 goto out;
714 }
715
716out:
Luca Barbieri234896a2010-01-06 04:02:45 +0100717 validate_fini(&op, fence);
Marcin Slusarz382d62e2010-10-20 21:50:24 +0200718 nouveau_fence_unref(&fence);
Marcin Slusarz7fa0cba22011-03-07 12:31:35 +0100719
720out_prevalid:
Ben Skeggs6ee73862009-12-11 19:24:15 +1000721 kfree(bo);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000722 kfree(push);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000723
724out_next:
Ben Skeggs9a391ad2010-02-11 16:37:26 +1000725 if (chan->dma.ib_max) {
726 req->suffix0 = 0x00000000;
727 req->suffix1 = 0x00000000;
728 } else
Francisco Jerezee508b82010-08-25 12:54:53 +0200729 if (dev_priv->chipset >= 0x25) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000730 req->suffix0 = 0x00020000;
731 req->suffix1 = 0x00000000;
732 } else {
733 req->suffix0 = 0x20000000 |
734 (chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
735 req->suffix1 = 0x00000000;
736 }
737
Ben Skeggscff5c132010-10-06 16:16:59 +1000738 nouveau_channel_put(&chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000739 return ret;
740}
741
Ben Skeggs6ee73862009-12-11 19:24:15 +1000742static inline uint32_t
743domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
744{
745 uint32_t flags = 0;
746
747 if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
748 flags |= TTM_PL_FLAG_VRAM;
749 if (domain & NOUVEAU_GEM_DOMAIN_GART)
750 flags |= TTM_PL_FLAG_TT;
751
752 return flags;
753}
754
755int
Ben Skeggs6ee73862009-12-11 19:24:15 +1000756nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
757 struct drm_file *file_priv)
758{
759 struct drm_nouveau_gem_cpu_prep *req = data;
760 struct drm_gem_object *gem;
761 struct nouveau_bo *nvbo;
762 bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
763 int ret = -EINVAL;
764
Ben Skeggs6ee73862009-12-11 19:24:15 +1000765 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
766 if (!gem)
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100767 return -ENOENT;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000768 nvbo = nouveau_gem_object(gem);
769
Ben Skeggs21e86c12010-10-11 11:48:45 +1000770 spin_lock(&nvbo->bo.bdev->fence_lock);
771 ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
772 spin_unlock(&nvbo->bo.bdev->fence_lock);
Luca Barbieribc9025b2010-02-09 05:49:12 +0000773 drm_gem_object_unreference_unlocked(gem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000774 return ret;
775}
776
777int
778nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
779 struct drm_file *file_priv)
780{
Ben Skeggs21e86c12010-10-11 11:48:45 +1000781 return 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000782}
783
784int
785nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
786 struct drm_file *file_priv)
787{
788 struct drm_nouveau_gem_info *req = data;
789 struct drm_gem_object *gem;
790 int ret;
791
Ben Skeggs6ee73862009-12-11 19:24:15 +1000792 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
793 if (!gem)
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100794 return -ENOENT;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000795
796 ret = nouveau_gem_info(gem, req);
Luca Barbieribc9025b2010-02-09 05:49:12 +0000797 drm_gem_object_unreference_unlocked(gem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000798 return ret;
799}
800