blob: 133e2636cea08e13d34857a6dddfd0f2fc14ac01 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020030#include "drmP.h"
31#include "radeon_drm.h"
32#include "radeon_reg.h"
33#include "radeon.h"
34#include "atom.h"
35
36int radeon_debugfs_ib_init(struct radeon_device *rdev);
Christian Königaf9720f2011-10-24 17:08:44 +020037int radeon_debugfs_ring_init(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020038
Andi Kleence580fa2011-10-13 16:08:47 -070039u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
40{
41 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
42 u32 pg_idx, pg_offset;
43 u32 idx_value = 0;
44 int new_page;
45
46 pg_idx = (idx * 4) / PAGE_SIZE;
47 pg_offset = (idx * 4) % PAGE_SIZE;
48
49 if (ibc->kpage_idx[0] == pg_idx)
50 return ibc->kpage[0][pg_offset/4];
51 if (ibc->kpage_idx[1] == pg_idx)
52 return ibc->kpage[1][pg_offset/4];
53
54 new_page = radeon_cs_update_pages(p, pg_idx);
55 if (new_page < 0) {
56 p->parser_error = new_page;
57 return 0;
58 }
59
60 idx_value = ibc->kpage[new_page][pg_offset/4];
61 return idx_value;
62}
63
Christian Könige32eb502011-10-23 12:56:27 +020064void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
Andi Kleence580fa2011-10-13 16:08:47 -070065{
66#if DRM_DEBUG_CODE
Christian Könige32eb502011-10-23 12:56:27 +020067 if (ring->count_dw <= 0) {
Andi Kleence580fa2011-10-13 16:08:47 -070068 DRM_ERROR("radeon: writting more dword to ring than expected !\n");
69 }
70#endif
Christian Könige32eb502011-10-23 12:56:27 +020071 ring->ring[ring->wptr++] = v;
72 ring->wptr &= ring->ptr_mask;
73 ring->count_dw--;
74 ring->ring_free_dw--;
Andi Kleence580fa2011-10-13 16:08:47 -070075}
76
Jerome Glisse9f93ed32010-01-28 18:22:31 +010077void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
78{
79 struct radeon_ib *ib, *n;
80
81 list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
82 list_del(&ib->list);
83 vfree(ib->ptr);
84 kfree(ib);
85 }
86}
87
88void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
89{
90 struct radeon_ib *bib;
91
92 bib = kmalloc(sizeof(*bib), GFP_KERNEL);
93 if (bib == NULL)
94 return;
95 bib->ptr = vmalloc(ib->length_dw * 4);
96 if (bib->ptr == NULL) {
97 kfree(bib);
98 return;
99 }
100 memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
101 bib->length_dw = ib->length_dw;
102 mutex_lock(&rdev->ib_pool.mutex);
103 list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
104 mutex_unlock(&rdev->ib_pool.mutex);
105}
106
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200107/*
108 * IB.
109 */
Christian König7b1f2482011-09-23 15:11:23 +0200110int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib **ib)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200111{
112 struct radeon_fence *fence;
113 struct radeon_ib *nib;
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100114 int r = 0, i, c;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200115
116 *ib = NULL;
Christian König7b1f2482011-09-23 15:11:23 +0200117 r = radeon_fence_create(rdev, &fence, ring);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200118 if (r) {
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100119 dev_err(rdev->dev, "failed to create fence for new IB\n");
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200120 return r;
121 }
122 mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100123 for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
124 i &= (RADEON_IB_POOL_SIZE - 1);
125 if (rdev->ib_pool.ibs[i].free) {
126 nib = &rdev->ib_pool.ibs[i];
127 break;
128 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200129 }
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100130 if (nib == NULL) {
131 /* This should never happen, it means we allocated all
132 * IB and haven't scheduled one yet, return EBUSY to
133 * userspace hoping that on ioctl recall we get better
134 * luck
135 */
136 dev_err(rdev->dev, "no free indirect buffer !\n");
Dave Airlieecb114a2009-09-15 11:12:56 +1000137 mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100138 radeon_fence_unref(&fence);
139 return -EBUSY;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200140 }
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100141 rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
142 nib->free = false;
143 if (nib->fence) {
Dave Airlieecb114a2009-09-15 11:12:56 +1000144 mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100145 r = radeon_fence_wait(nib->fence, false);
146 if (r) {
147 dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
148 nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
149 mutex_lock(&rdev->ib_pool.mutex);
150 nib->free = true;
151 mutex_unlock(&rdev->ib_pool.mutex);
152 radeon_fence_unref(&fence);
153 return r;
154 }
155 mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200156 }
157 radeon_fence_unref(&nib->fence);
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100158 nib->fence = fence;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200159 nib->length_dw = 0;
Dave Airlieecb114a2009-09-15 11:12:56 +1000160 mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200161 *ib = nib;
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100162 return 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200163}
164
165void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
166{
167 struct radeon_ib *tmp = *ib;
168
169 *ib = NULL;
170 if (tmp == NULL) {
171 return;
172 }
Christian König851a6bd2011-10-24 15:05:29 +0200173 if (!tmp->fence->emitted)
Jerome Glisse7d404c72010-02-18 13:13:29 +0000174 radeon_fence_unref(&tmp->fence);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200175 mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100176 tmp->free = true;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200177 mutex_unlock(&rdev->ib_pool.mutex);
178}
179
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200180int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
181{
Christian Könige32eb502011-10-23 12:56:27 +0200182 struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200183 int r = 0;
184
Christian Könige32eb502011-10-23 12:56:27 +0200185 if (!ib->length_dw || !ring->ready) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200186 /* TODO: Nothings in the ib we should report. */
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100187 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200188 return -EINVAL;
189 }
Dave Airlieecb114a2009-09-15 11:12:56 +1000190
Dave Airlie6cdf6582009-06-29 18:29:13 +1000191 /* 64 dwords should be enough for fence too */
Christian Könige32eb502011-10-23 12:56:27 +0200192 r = radeon_ring_lock(rdev, ring, 64);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200193 if (r) {
Paul Bolleec4f2ac2011-01-28 23:32:04 +0100194 DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200195 return r;
196 }
Christian König4c87bc22011-10-19 19:02:21 +0200197 radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200198 radeon_fence_emit(rdev, ib->fence);
Dave Airlieecb114a2009-09-15 11:12:56 +1000199 mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100200 /* once scheduled IB is considered free and protected by the fence */
201 ib->free = true;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200202 mutex_unlock(&rdev->ib_pool.mutex);
Christian Könige32eb502011-10-23 12:56:27 +0200203 radeon_ring_unlock_commit(rdev, ring);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200204 return 0;
205}
206
207int radeon_ib_pool_init(struct radeon_device *rdev)
208{
209 void *ptr;
210 uint64_t gpu_addr;
211 int i;
212 int r = 0;
213
Jerome Glisse9f022dd2009-09-11 15:35:22 +0200214 if (rdev->ib_pool.robj)
215 return 0;
Jerome Glisse9f93ed32010-01-28 18:22:31 +0100216 INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200217 /* Allocate 1M object buffer */
Daniel Vetter441921d2011-02-18 17:59:16 +0100218 r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
Alex Deucher268b2512010-11-17 19:00:26 -0500219 PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
220 &rdev->ib_pool.robj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200221 if (r) {
222 DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
223 return r;
224 }
Jerome Glisse4c788672009-11-20 14:29:23 +0100225 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
226 if (unlikely(r != 0))
227 return r;
228 r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200229 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100230 radeon_bo_unreserve(rdev->ib_pool.robj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200231 DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
232 return r;
233 }
Jerome Glisse4c788672009-11-20 14:29:23 +0100234 r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
235 radeon_bo_unreserve(rdev->ib_pool.robj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200236 if (r) {
Paul Bolle205a44a2011-03-16 21:36:32 +0100237 DRM_ERROR("radeon: failed to map ib pool (%d).\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200238 return r;
239 }
240 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
241 unsigned offset;
242
243 offset = i * 64 * 1024;
244 rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
245 rdev->ib_pool.ibs[i].ptr = ptr + offset;
246 rdev->ib_pool.ibs[i].idx = i;
247 rdev->ib_pool.ibs[i].length_dw = 0;
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100248 rdev->ib_pool.ibs[i].free = true;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200249 }
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100250 rdev->ib_pool.head_id = 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200251 rdev->ib_pool.ready = true;
252 DRM_INFO("radeon: ib pool ready.\n");
253 if (radeon_debugfs_ib_init(rdev)) {
254 DRM_ERROR("Failed to register debugfs file for IB !\n");
255 }
Christian Königaf9720f2011-10-24 17:08:44 +0200256 if (radeon_debugfs_ring_init(rdev)) {
257 DRM_ERROR("Failed to register debugfs file for rings !\n");
258 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200259 return r;
260}
261
262void radeon_ib_pool_fini(struct radeon_device *rdev)
263{
Jerome Glisse4c788672009-11-20 14:29:23 +0100264 int r;
Alex Deucherca2af922010-05-06 11:02:24 -0400265 struct radeon_bo *robj;
Jerome Glisse4c788672009-11-20 14:29:23 +0100266
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200267 if (!rdev->ib_pool.ready) {
268 return;
269 }
270 mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisse9f93ed32010-01-28 18:22:31 +0100271 radeon_ib_bogus_cleanup(rdev);
Alex Deucherca2af922010-05-06 11:02:24 -0400272 robj = rdev->ib_pool.robj;
273 rdev->ib_pool.robj = NULL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200274 mutex_unlock(&rdev->ib_pool.mutex);
Alex Deucherca2af922010-05-06 11:02:24 -0400275
276 if (robj) {
277 r = radeon_bo_reserve(robj, false);
278 if (likely(r == 0)) {
279 radeon_bo_kunmap(robj);
280 radeon_bo_unpin(robj);
281 radeon_bo_unreserve(robj);
282 }
283 radeon_bo_unref(&robj);
284 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200285}
286
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200287
288/*
289 * Ring.
290 */
Christian Könige32eb502011-10-23 12:56:27 +0200291int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
Christian Königbf852792011-10-13 13:19:22 +0200292{
293 /* r1xx-r5xx only has CP ring */
294 if (rdev->family < CHIP_R600)
295 return RADEON_RING_TYPE_GFX_INDEX;
296
297 if (rdev->family >= CHIP_CAYMAN) {
Christian Könige32eb502011-10-23 12:56:27 +0200298 if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])
Christian Königbf852792011-10-13 13:19:22 +0200299 return CAYMAN_RING_TYPE_CP1_INDEX;
Christian Könige32eb502011-10-23 12:56:27 +0200300 else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])
Christian Königbf852792011-10-13 13:19:22 +0200301 return CAYMAN_RING_TYPE_CP2_INDEX;
302 }
303 return RADEON_RING_TYPE_GFX_INDEX;
304}
305
Christian Könige32eb502011-10-23 12:56:27 +0200306void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200307{
Alex Deucher78c55602011-11-17 14:25:56 -0500308 u32 rptr;
309
Alex Deucher724c80e2010-08-27 18:25:25 -0400310 if (rdev->wb.enabled)
Alex Deucher78c55602011-11-17 14:25:56 -0500311 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
Christian König5596a9d2011-10-13 12:48:45 +0200312 else
Alex Deucher78c55602011-11-17 14:25:56 -0500313 rptr = RREG32(ring->rptr_reg);
314 ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200315 /* This works because ring_size is a power of 2 */
Christian Könige32eb502011-10-23 12:56:27 +0200316 ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
317 ring->ring_free_dw -= ring->wptr;
318 ring->ring_free_dw &= ring->ptr_mask;
319 if (!ring->ring_free_dw) {
320 ring->ring_free_dw = ring->ring_size / 4;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200321 }
322}
323
Christian König7b1f2482011-09-23 15:11:23 +0200324
Christian Könige32eb502011-10-23 12:56:27 +0200325int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200326{
327 int r;
328
329 /* Align requested size with padding so unlock_commit can
330 * pad safely */
Christian Könige32eb502011-10-23 12:56:27 +0200331 ndw = (ndw + ring->align_mask) & ~ring->align_mask;
332 while (ndw > (ring->ring_free_dw - 1)) {
333 radeon_ring_free_size(rdev, ring);
334 if (ndw < ring->ring_free_dw) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200335 break;
336 }
Christian Könige32eb502011-10-23 12:56:27 +0200337 r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
Matthew Garrett91700f32010-04-30 15:24:17 -0400338 if (r)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200339 return r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200340 }
Christian Könige32eb502011-10-23 12:56:27 +0200341 ring->count_dw = ndw;
342 ring->wptr_old = ring->wptr;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200343 return 0;
344}
345
Christian Könige32eb502011-10-23 12:56:27 +0200346int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
Matthew Garrett91700f32010-04-30 15:24:17 -0400347{
348 int r;
349
Christian Könige32eb502011-10-23 12:56:27 +0200350 mutex_lock(&ring->mutex);
351 r = radeon_ring_alloc(rdev, ring, ndw);
Matthew Garrett91700f32010-04-30 15:24:17 -0400352 if (r) {
Christian Könige32eb502011-10-23 12:56:27 +0200353 mutex_unlock(&ring->mutex);
Matthew Garrett91700f32010-04-30 15:24:17 -0400354 return r;
355 }
356 return 0;
357}
358
Christian Könige32eb502011-10-23 12:56:27 +0200359void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200360{
361 unsigned count_dw_pad;
362 unsigned i;
363
364 /* We pad to match fetch size */
Christian Könige32eb502011-10-23 12:56:27 +0200365 count_dw_pad = (ring->align_mask + 1) -
366 (ring->wptr & ring->align_mask);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200367 for (i = 0; i < count_dw_pad; i++) {
Alex Deucher78c55602011-11-17 14:25:56 -0500368 radeon_ring_write(ring, ring->nop);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200369 }
370 DRM_MEMORYBARRIER();
Alex Deucher78c55602011-11-17 14:25:56 -0500371 WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
Christian Könige32eb502011-10-23 12:56:27 +0200372 (void)RREG32(ring->wptr_reg);
Matthew Garrett91700f32010-04-30 15:24:17 -0400373}
374
Christian Könige32eb502011-10-23 12:56:27 +0200375void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
Matthew Garrett91700f32010-04-30 15:24:17 -0400376{
Christian Könige32eb502011-10-23 12:56:27 +0200377 radeon_ring_commit(rdev, ring);
378 mutex_unlock(&ring->mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200379}
380
Christian Könige32eb502011-10-23 12:56:27 +0200381void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200382{
Christian Könige32eb502011-10-23 12:56:27 +0200383 ring->wptr = ring->wptr_old;
384 mutex_unlock(&ring->mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200385}
386
Christian Könige32eb502011-10-23 12:56:27 +0200387int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
Alex Deucher78c55602011-11-17 14:25:56 -0500388 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
389 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200390{
391 int r;
392
Christian Könige32eb502011-10-23 12:56:27 +0200393 ring->ring_size = ring_size;
394 ring->rptr_offs = rptr_offs;
395 ring->rptr_reg = rptr_reg;
396 ring->wptr_reg = wptr_reg;
Alex Deucher78c55602011-11-17 14:25:56 -0500397 ring->ptr_reg_shift = ptr_reg_shift;
398 ring->ptr_reg_mask = ptr_reg_mask;
399 ring->nop = nop;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200400 /* Allocate ring buffer */
Christian Könige32eb502011-10-23 12:56:27 +0200401 if (ring->ring_obj == NULL) {
402 r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
Jerome Glisse4c788672009-11-20 14:29:23 +0100403 RADEON_GEM_DOMAIN_GTT,
Christian Könige32eb502011-10-23 12:56:27 +0200404 &ring->ring_obj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200405 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100406 dev_err(rdev->dev, "(%d) ring create failed\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200407 return r;
408 }
Christian Könige32eb502011-10-23 12:56:27 +0200409 r = radeon_bo_reserve(ring->ring_obj, false);
Jerome Glisse4c788672009-11-20 14:29:23 +0100410 if (unlikely(r != 0))
411 return r;
Christian Könige32eb502011-10-23 12:56:27 +0200412 r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
413 &ring->gpu_addr);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200414 if (r) {
Christian Könige32eb502011-10-23 12:56:27 +0200415 radeon_bo_unreserve(ring->ring_obj);
Jerome Glisse4c788672009-11-20 14:29:23 +0100416 dev_err(rdev->dev, "(%d) ring pin failed\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200417 return r;
418 }
Christian Könige32eb502011-10-23 12:56:27 +0200419 r = radeon_bo_kmap(ring->ring_obj,
420 (void **)&ring->ring);
421 radeon_bo_unreserve(ring->ring_obj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200422 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100423 dev_err(rdev->dev, "(%d) ring map failed\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200424 return r;
425 }
426 }
Christian Könige32eb502011-10-23 12:56:27 +0200427 ring->ptr_mask = (ring->ring_size / 4) - 1;
428 ring->ring_free_dw = ring->ring_size / 4;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200429 return 0;
430}
431
Christian Könige32eb502011-10-23 12:56:27 +0200432void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200433{
Jerome Glisse4c788672009-11-20 14:29:23 +0100434 int r;
Alex Deucherca2af922010-05-06 11:02:24 -0400435 struct radeon_bo *ring_obj;
Jerome Glisse4c788672009-11-20 14:29:23 +0100436
Christian Könige32eb502011-10-23 12:56:27 +0200437 mutex_lock(&ring->mutex);
438 ring_obj = ring->ring_obj;
439 ring->ring = NULL;
440 ring->ring_obj = NULL;
441 mutex_unlock(&ring->mutex);
Alex Deucherca2af922010-05-06 11:02:24 -0400442
443 if (ring_obj) {
444 r = radeon_bo_reserve(ring_obj, false);
445 if (likely(r == 0)) {
446 radeon_bo_kunmap(ring_obj);
447 radeon_bo_unpin(ring_obj);
448 radeon_bo_unreserve(ring_obj);
449 }
450 radeon_bo_unref(&ring_obj);
451 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200452}
453
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200454/*
455 * Debugfs info
456 */
457#if defined(CONFIG_DEBUG_FS)
Christian Königaf9720f2011-10-24 17:08:44 +0200458
459static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
460{
461 struct drm_info_node *node = (struct drm_info_node *) m->private;
462 struct drm_device *dev = node->minor->dev;
463 struct radeon_device *rdev = dev->dev_private;
464 int ridx = *(int*)node->info_ent->data;
465 struct radeon_ring *ring = &rdev->ring[ridx];
466 unsigned count, i, j;
467
468 radeon_ring_free_size(rdev, ring);
469 count = (ring->ring_size / 4) - ring->ring_free_dw;
470 seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
471 seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
472 seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
473 seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
474 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
475 seq_printf(m, "%u dwords in ring\n", count);
476 i = ring->rptr;
477 for (j = 0; j <= count; j++) {
478 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
479 i = (i + 1) & ring->ptr_mask;
480 }
481 return 0;
482}
483
484static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
485static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
486static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
487
488static struct drm_info_list radeon_debugfs_ring_info_list[] = {
489 {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
490 {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
491 {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
492};
493
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200494static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
495{
496 struct drm_info_node *node = (struct drm_info_node *) m->private;
497 struct radeon_ib *ib = node->info_ent->data;
498 unsigned i;
499
500 if (ib == NULL) {
501 return 0;
502 }
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100503 seq_printf(m, "IB %04u\n", ib->idx);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200504 seq_printf(m, "IB fence %p\n", ib->fence);
505 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
506 for (i = 0; i < ib->length_dw; i++) {
507 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
508 }
509 return 0;
510}
511
Jerome Glisse9f93ed32010-01-28 18:22:31 +0100512static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
513{
514 struct drm_info_node *node = (struct drm_info_node *) m->private;
515 struct radeon_device *rdev = node->info_ent->data;
516 struct radeon_ib *ib;
517 unsigned i;
518
519 mutex_lock(&rdev->ib_pool.mutex);
520 if (list_empty(&rdev->ib_pool.bogus_ib)) {
521 mutex_unlock(&rdev->ib_pool.mutex);
522 seq_printf(m, "no bogus IB recorded\n");
523 return 0;
524 }
525 ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
526 list_del_init(&ib->list);
527 mutex_unlock(&rdev->ib_pool.mutex);
528 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
529 for (i = 0; i < ib->length_dw; i++) {
530 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
531 }
532 vfree(ib->ptr);
533 kfree(ib);
534 return 0;
535}
536
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200537static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
538static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
Jerome Glisse9f93ed32010-01-28 18:22:31 +0100539
540static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
541 {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
542};
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200543#endif
544
Christian Königaf9720f2011-10-24 17:08:44 +0200545int radeon_debugfs_ring_init(struct radeon_device *rdev)
546{
547#if defined(CONFIG_DEBUG_FS)
548 return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list,
549 ARRAY_SIZE(radeon_debugfs_ring_info_list));
550#else
551 return 0;
552#endif
553}
554
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200555int radeon_debugfs_ib_init(struct radeon_device *rdev)
556{
557#if defined(CONFIG_DEBUG_FS)
558 unsigned i;
Jerome Glisse9f93ed32010-01-28 18:22:31 +0100559 int r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200560
Jerome Glisse9f93ed32010-01-28 18:22:31 +0100561 radeon_debugfs_ib_bogus_info_list[0].data = rdev;
562 r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
563 if (r)
564 return r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200565 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
566 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
567 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
568 radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
569 radeon_debugfs_ib_list[i].driver_features = 0;
570 radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
571 }
572 return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
573 RADEON_IB_POOL_SIZE);
574#else
575 return 0;
576#endif
577}