blob: f6a4fbd102a0accfd7635f18518003ba9a460c7f [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020030#include "drmP.h"
31#include "radeon_drm.h"
32#include "radeon_reg.h"
33#include "radeon.h"
34#include "atom.h"
35
36int radeon_debugfs_ib_init(struct radeon_device *rdev);
Christian Königaf9720f2011-10-24 17:08:44 +020037int radeon_debugfs_ring_init(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020038
Andi Kleence580fa2011-10-13 16:08:47 -070039u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
40{
41 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
42 u32 pg_idx, pg_offset;
43 u32 idx_value = 0;
44 int new_page;
45
46 pg_idx = (idx * 4) / PAGE_SIZE;
47 pg_offset = (idx * 4) % PAGE_SIZE;
48
49 if (ibc->kpage_idx[0] == pg_idx)
50 return ibc->kpage[0][pg_offset/4];
51 if (ibc->kpage_idx[1] == pg_idx)
52 return ibc->kpage[1][pg_offset/4];
53
54 new_page = radeon_cs_update_pages(p, pg_idx);
55 if (new_page < 0) {
56 p->parser_error = new_page;
57 return 0;
58 }
59
60 idx_value = ibc->kpage[new_page][pg_offset/4];
61 return idx_value;
62}
63
Christian Könige32eb502011-10-23 12:56:27 +020064void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
Andi Kleence580fa2011-10-13 16:08:47 -070065{
66#if DRM_DEBUG_CODE
Christian Könige32eb502011-10-23 12:56:27 +020067 if (ring->count_dw <= 0) {
Andi Kleence580fa2011-10-13 16:08:47 -070068 DRM_ERROR("radeon: writting more dword to ring than expected !\n");
69 }
70#endif
Christian Könige32eb502011-10-23 12:56:27 +020071 ring->ring[ring->wptr++] = v;
72 ring->wptr &= ring->ptr_mask;
73 ring->count_dw--;
74 ring->ring_free_dw--;
Andi Kleence580fa2011-10-13 16:08:47 -070075}
76
Jerome Glisse771fe6b2009-06-05 14:42:42 +020077/*
78 * IB.
79 */
Jerome Glisseb15ba512011-11-15 11:48:34 -050080static bool radeon_ib_try_free(struct radeon_device *rdev,
81 struct radeon_ib *ib)
82{
83 bool done = false;
84
85 /* only free ib which have been emited */
86 if (ib->fence && ib->fence->emitted) {
87 if (radeon_fence_signaled(ib->fence)) {
88 radeon_fence_unref(&ib->fence);
89 radeon_sa_bo_free(rdev, &ib->sa_bo);
90 done = true;
91 }
92 }
93 return done;
94}
95
Christian König7b1f2482011-09-23 15:11:23 +020096int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib **ib)
Jerome Glisse771fe6b2009-06-05 14:42:42 +020097{
98 struct radeon_fence *fence;
Jerome Glisseb15ba512011-11-15 11:48:34 -050099 unsigned cretry = 0;
100 int r = 0, i, idx;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200101
102 *ib = NULL;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500103
Christian König7b1f2482011-09-23 15:11:23 +0200104 r = radeon_fence_create(rdev, &fence, ring);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200105 if (r) {
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100106 dev_err(rdev->dev, "failed to create fence for new IB\n");
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200107 return r;
108 }
Jerome Glisseb15ba512011-11-15 11:48:34 -0500109
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200110 mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500111 idx = rdev->ib_pool.head_id;
112retry:
113 if (cretry > 5) {
114 dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
Dave Airlieecb114a2009-09-15 11:12:56 +1000115 mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100116 radeon_fence_unref(&fence);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500117 return -ENOMEM;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200118 }
Jerome Glisseb15ba512011-11-15 11:48:34 -0500119 cretry++;
120 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
121 radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]);
122 if (rdev->ib_pool.ibs[idx].fence == NULL) {
123 r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
124 &rdev->ib_pool.ibs[idx].sa_bo,
125 64*1024, 64);
126 if (!r) {
127 *ib = &rdev->ib_pool.ibs[idx];
128 (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
129 (*ib)->ptr += ((*ib)->sa_bo.offset >> 2);
130 (*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
131 (*ib)->gpu_addr += (*ib)->sa_bo.offset;
132 (*ib)->fence = fence;
133 /* ib are most likely to be allocated in a ring fashion
134 * thus rdev->ib_pool.head_id should be the id of the
135 * oldest ib
136 */
137 rdev->ib_pool.head_id = (1 + idx);
138 rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
139 mutex_unlock(&rdev->ib_pool.mutex);
140 return 0;
141 }
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100142 }
Jerome Glisseb15ba512011-11-15 11:48:34 -0500143 idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200144 }
Jerome Glisseb15ba512011-11-15 11:48:34 -0500145 /* this should be rare event, ie all ib scheduled none signaled yet.
146 */
147 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
148 if (rdev->ib_pool.ibs[idx].fence) {
149 r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false);
150 if (!r) {
151 goto retry;
152 }
153 /* an error happened */
154 break;
155 }
156 idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
157 }
Dave Airlieecb114a2009-09-15 11:12:56 +1000158 mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500159 radeon_fence_unref(&fence);
160 return r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200161}
162
163void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
164{
165 struct radeon_ib *tmp = *ib;
166
167 *ib = NULL;
168 if (tmp == NULL) {
169 return;
170 }
171 mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500172 if (tmp->fence && !tmp->fence->emitted) {
173 radeon_sa_bo_free(rdev, &tmp->sa_bo);
174 radeon_fence_unref(&tmp->fence);
175 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200176 mutex_unlock(&rdev->ib_pool.mutex);
177}
178
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200179int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
180{
Christian Könige32eb502011-10-23 12:56:27 +0200181 struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200182 int r = 0;
183
Christian Könige32eb502011-10-23 12:56:27 +0200184 if (!ib->length_dw || !ring->ready) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200185 /* TODO: Nothings in the ib we should report. */
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100186 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200187 return -EINVAL;
188 }
Dave Airlieecb114a2009-09-15 11:12:56 +1000189
Dave Airlie6cdf6582009-06-29 18:29:13 +1000190 /* 64 dwords should be enough for fence too */
Christian Könige32eb502011-10-23 12:56:27 +0200191 r = radeon_ring_lock(rdev, ring, 64);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200192 if (r) {
Paul Bolleec4f2ac2011-01-28 23:32:04 +0100193 DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200194 return r;
195 }
Christian König4c87bc22011-10-19 19:02:21 +0200196 radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200197 radeon_fence_emit(rdev, ib->fence);
Christian Könige32eb502011-10-23 12:56:27 +0200198 radeon_ring_unlock_commit(rdev, ring);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200199 return 0;
200}
201
202int radeon_ib_pool_init(struct radeon_device *rdev)
203{
Jerome Glisseb15ba512011-11-15 11:48:34 -0500204 int i, r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200205
Jerome Glisseb15ba512011-11-15 11:48:34 -0500206 mutex_lock(&rdev->ib_pool.mutex);
207 if (rdev->ib_pool.ready) {
208 mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisse9f022dd2009-09-11 15:35:22 +0200209 return 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200210 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200211
Jerome Glisseb15ba512011-11-15 11:48:34 -0500212 r = radeon_sa_bo_manager_init(rdev, &rdev->ib_pool.sa_manager,
213 RADEON_IB_POOL_SIZE*64*1024,
214 RADEON_GEM_DOMAIN_GTT);
215 if (r) {
216 mutex_unlock(&rdev->ib_pool.mutex);
217 return r;
218 }
219
220 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
221 rdev->ib_pool.ibs[i].fence = NULL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200222 rdev->ib_pool.ibs[i].idx = i;
223 rdev->ib_pool.ibs[i].length_dw = 0;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500224 INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200225 }
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100226 rdev->ib_pool.head_id = 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200227 rdev->ib_pool.ready = true;
228 DRM_INFO("radeon: ib pool ready.\n");
Jerome Glisseb15ba512011-11-15 11:48:34 -0500229
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200230 if (radeon_debugfs_ib_init(rdev)) {
231 DRM_ERROR("Failed to register debugfs file for IB !\n");
232 }
Christian Königaf9720f2011-10-24 17:08:44 +0200233 if (radeon_debugfs_ring_init(rdev)) {
234 DRM_ERROR("Failed to register debugfs file for rings !\n");
235 }
Jerome Glisseb15ba512011-11-15 11:48:34 -0500236 mutex_unlock(&rdev->ib_pool.mutex);
237 return 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200238}
239
240void radeon_ib_pool_fini(struct radeon_device *rdev)
241{
Jerome Glisseb15ba512011-11-15 11:48:34 -0500242 unsigned i;
Jerome Glisse4c788672009-11-20 14:29:23 +0100243
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200244 mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500245 if (rdev->ib_pool.ready) {
246 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
247 radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
248 radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
Alex Deucherca2af922010-05-06 11:02:24 -0400249 }
Jerome Glisseb15ba512011-11-15 11:48:34 -0500250 radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
251 rdev->ib_pool.ready = false;
Alex Deucherca2af922010-05-06 11:02:24 -0400252 }
Jerome Glisseb15ba512011-11-15 11:48:34 -0500253 mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200254}
255
Jerome Glisseb15ba512011-11-15 11:48:34 -0500256int radeon_ib_pool_start(struct radeon_device *rdev)
257{
258 return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager);
259}
260
261int radeon_ib_pool_suspend(struct radeon_device *rdev)
262{
263 return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager);
264}
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200265
266/*
267 * Ring.
268 */
Christian Könige32eb502011-10-23 12:56:27 +0200269int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
Christian Königbf852792011-10-13 13:19:22 +0200270{
271 /* r1xx-r5xx only has CP ring */
272 if (rdev->family < CHIP_R600)
273 return RADEON_RING_TYPE_GFX_INDEX;
274
275 if (rdev->family >= CHIP_CAYMAN) {
Christian Könige32eb502011-10-23 12:56:27 +0200276 if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])
Christian Königbf852792011-10-13 13:19:22 +0200277 return CAYMAN_RING_TYPE_CP1_INDEX;
Christian Könige32eb502011-10-23 12:56:27 +0200278 else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])
Christian Königbf852792011-10-13 13:19:22 +0200279 return CAYMAN_RING_TYPE_CP2_INDEX;
280 }
281 return RADEON_RING_TYPE_GFX_INDEX;
282}
283
Christian Könige32eb502011-10-23 12:56:27 +0200284void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200285{
Alex Deucher78c55602011-11-17 14:25:56 -0500286 u32 rptr;
287
Alex Deucher724c80e2010-08-27 18:25:25 -0400288 if (rdev->wb.enabled)
Alex Deucher78c55602011-11-17 14:25:56 -0500289 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
Christian König5596a9d2011-10-13 12:48:45 +0200290 else
Alex Deucher78c55602011-11-17 14:25:56 -0500291 rptr = RREG32(ring->rptr_reg);
292 ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200293 /* This works because ring_size is a power of 2 */
Christian Könige32eb502011-10-23 12:56:27 +0200294 ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
295 ring->ring_free_dw -= ring->wptr;
296 ring->ring_free_dw &= ring->ptr_mask;
297 if (!ring->ring_free_dw) {
298 ring->ring_free_dw = ring->ring_size / 4;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200299 }
300}
301
Christian König7b1f2482011-09-23 15:11:23 +0200302
Christian Könige32eb502011-10-23 12:56:27 +0200303int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200304{
305 int r;
306
307 /* Align requested size with padding so unlock_commit can
308 * pad safely */
Christian Könige32eb502011-10-23 12:56:27 +0200309 ndw = (ndw + ring->align_mask) & ~ring->align_mask;
310 while (ndw > (ring->ring_free_dw - 1)) {
311 radeon_ring_free_size(rdev, ring);
312 if (ndw < ring->ring_free_dw) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200313 break;
314 }
Christian Könige32eb502011-10-23 12:56:27 +0200315 r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
Matthew Garrett91700f32010-04-30 15:24:17 -0400316 if (r)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200317 return r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200318 }
Christian Könige32eb502011-10-23 12:56:27 +0200319 ring->count_dw = ndw;
320 ring->wptr_old = ring->wptr;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200321 return 0;
322}
323
Christian Könige32eb502011-10-23 12:56:27 +0200324int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
Matthew Garrett91700f32010-04-30 15:24:17 -0400325{
326 int r;
327
Christian Könige32eb502011-10-23 12:56:27 +0200328 mutex_lock(&ring->mutex);
329 r = radeon_ring_alloc(rdev, ring, ndw);
Matthew Garrett91700f32010-04-30 15:24:17 -0400330 if (r) {
Christian Könige32eb502011-10-23 12:56:27 +0200331 mutex_unlock(&ring->mutex);
Matthew Garrett91700f32010-04-30 15:24:17 -0400332 return r;
333 }
334 return 0;
335}
336
Christian Könige32eb502011-10-23 12:56:27 +0200337void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200338{
339 unsigned count_dw_pad;
340 unsigned i;
341
342 /* We pad to match fetch size */
Christian Könige32eb502011-10-23 12:56:27 +0200343 count_dw_pad = (ring->align_mask + 1) -
344 (ring->wptr & ring->align_mask);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200345 for (i = 0; i < count_dw_pad; i++) {
Alex Deucher78c55602011-11-17 14:25:56 -0500346 radeon_ring_write(ring, ring->nop);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200347 }
348 DRM_MEMORYBARRIER();
Alex Deucher78c55602011-11-17 14:25:56 -0500349 WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
Christian Könige32eb502011-10-23 12:56:27 +0200350 (void)RREG32(ring->wptr_reg);
Matthew Garrett91700f32010-04-30 15:24:17 -0400351}
352
Christian Könige32eb502011-10-23 12:56:27 +0200353void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
Matthew Garrett91700f32010-04-30 15:24:17 -0400354{
Christian Könige32eb502011-10-23 12:56:27 +0200355 radeon_ring_commit(rdev, ring);
356 mutex_unlock(&ring->mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200357}
358
Christian Könige32eb502011-10-23 12:56:27 +0200359void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200360{
Christian Könige32eb502011-10-23 12:56:27 +0200361 ring->wptr = ring->wptr_old;
362 mutex_unlock(&ring->mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200363}
364
Christian Könige32eb502011-10-23 12:56:27 +0200365int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
Alex Deucher78c55602011-11-17 14:25:56 -0500366 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
367 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200368{
369 int r;
370
Christian Könige32eb502011-10-23 12:56:27 +0200371 ring->ring_size = ring_size;
372 ring->rptr_offs = rptr_offs;
373 ring->rptr_reg = rptr_reg;
374 ring->wptr_reg = wptr_reg;
Alex Deucher78c55602011-11-17 14:25:56 -0500375 ring->ptr_reg_shift = ptr_reg_shift;
376 ring->ptr_reg_mask = ptr_reg_mask;
377 ring->nop = nop;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200378 /* Allocate ring buffer */
Christian Könige32eb502011-10-23 12:56:27 +0200379 if (ring->ring_obj == NULL) {
380 r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
Jerome Glisse4c788672009-11-20 14:29:23 +0100381 RADEON_GEM_DOMAIN_GTT,
Christian Könige32eb502011-10-23 12:56:27 +0200382 &ring->ring_obj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200383 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100384 dev_err(rdev->dev, "(%d) ring create failed\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200385 return r;
386 }
Christian Könige32eb502011-10-23 12:56:27 +0200387 r = radeon_bo_reserve(ring->ring_obj, false);
Jerome Glisse4c788672009-11-20 14:29:23 +0100388 if (unlikely(r != 0))
389 return r;
Christian Könige32eb502011-10-23 12:56:27 +0200390 r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
391 &ring->gpu_addr);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200392 if (r) {
Christian Könige32eb502011-10-23 12:56:27 +0200393 radeon_bo_unreserve(ring->ring_obj);
Jerome Glisse4c788672009-11-20 14:29:23 +0100394 dev_err(rdev->dev, "(%d) ring pin failed\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200395 return r;
396 }
Christian Könige32eb502011-10-23 12:56:27 +0200397 r = radeon_bo_kmap(ring->ring_obj,
398 (void **)&ring->ring);
399 radeon_bo_unreserve(ring->ring_obj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200400 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100401 dev_err(rdev->dev, "(%d) ring map failed\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200402 return r;
403 }
404 }
Christian Könige32eb502011-10-23 12:56:27 +0200405 ring->ptr_mask = (ring->ring_size / 4) - 1;
406 ring->ring_free_dw = ring->ring_size / 4;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200407 return 0;
408}
409
Christian Könige32eb502011-10-23 12:56:27 +0200410void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200411{
Jerome Glisse4c788672009-11-20 14:29:23 +0100412 int r;
Alex Deucherca2af922010-05-06 11:02:24 -0400413 struct radeon_bo *ring_obj;
Jerome Glisse4c788672009-11-20 14:29:23 +0100414
Christian Könige32eb502011-10-23 12:56:27 +0200415 mutex_lock(&ring->mutex);
416 ring_obj = ring->ring_obj;
417 ring->ring = NULL;
418 ring->ring_obj = NULL;
419 mutex_unlock(&ring->mutex);
Alex Deucherca2af922010-05-06 11:02:24 -0400420
421 if (ring_obj) {
422 r = radeon_bo_reserve(ring_obj, false);
423 if (likely(r == 0)) {
424 radeon_bo_kunmap(ring_obj);
425 radeon_bo_unpin(ring_obj);
426 radeon_bo_unreserve(ring_obj);
427 }
428 radeon_bo_unref(&ring_obj);
429 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200430}
431
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200432/*
433 * Debugfs info
434 */
435#if defined(CONFIG_DEBUG_FS)
Christian Königaf9720f2011-10-24 17:08:44 +0200436
437static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
438{
439 struct drm_info_node *node = (struct drm_info_node *) m->private;
440 struct drm_device *dev = node->minor->dev;
441 struct radeon_device *rdev = dev->dev_private;
442 int ridx = *(int*)node->info_ent->data;
443 struct radeon_ring *ring = &rdev->ring[ridx];
444 unsigned count, i, j;
445
446 radeon_ring_free_size(rdev, ring);
447 count = (ring->ring_size / 4) - ring->ring_free_dw;
448 seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
449 seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
450 seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
451 seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
452 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
453 seq_printf(m, "%u dwords in ring\n", count);
454 i = ring->rptr;
455 for (j = 0; j <= count; j++) {
456 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
457 i = (i + 1) & ring->ptr_mask;
458 }
459 return 0;
460}
461
462static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
463static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
464static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
465
466static struct drm_info_list radeon_debugfs_ring_info_list[] = {
467 {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
468 {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
469 {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
470};
471
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200472static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
473{
474 struct drm_info_node *node = (struct drm_info_node *) m->private;
475 struct radeon_ib *ib = node->info_ent->data;
476 unsigned i;
477
478 if (ib == NULL) {
479 return 0;
480 }
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100481 seq_printf(m, "IB %04u\n", ib->idx);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200482 seq_printf(m, "IB fence %p\n", ib->fence);
483 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
484 for (i = 0; i < ib->length_dw; i++) {
485 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
486 }
487 return 0;
488}
489
490static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
491static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
492#endif
493
Christian Königaf9720f2011-10-24 17:08:44 +0200494int radeon_debugfs_ring_init(struct radeon_device *rdev)
495{
496#if defined(CONFIG_DEBUG_FS)
497 return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list,
498 ARRAY_SIZE(radeon_debugfs_ring_info_list));
499#else
500 return 0;
501#endif
502}
503
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200504int radeon_debugfs_ib_init(struct radeon_device *rdev)
505{
506#if defined(CONFIG_DEBUG_FS)
507 unsigned i;
508
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200509 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
510 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
511 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
512 radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
513 radeon_debugfs_ib_list[i].driver_features = 0;
514 radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
515 }
516 return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
517 RADEON_IB_POOL_SIZE);
518#else
519 return 0;
520#endif
521}