blob: 124c19365392d80805113dc8178ad6d232098dfa [file] [log] [blame]
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <linux/platform_device.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
David Howells760285e2012-10-02 18:01:07 +010027#include <drm/drmP.h>
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050028#include "radeon.h"
Daniel Vettere6990372010-03-11 21:19:17 +000029#include "radeon_asic.h"
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/radeon_drm.h>
Alex Deucher0fcdb612010-03-24 13:20:41 -040031#include "evergreend.h"
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050032#include "atom.h"
33#include "avivod.h"
34#include "evergreen_reg.h"
Alex Deucher2281a372010-10-21 13:31:38 -040035#include "evergreen_blit_shaders.h"
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050036
Alex Deucherfe251e22010-03-24 13:36:43 -040037#define EVERGREEN_PFP_UCODE_SIZE 1120
38#define EVERGREEN_PM4_UCODE_SIZE 1376
39
Alex Deucher4a159032012-08-15 17:13:53 -040040static const u32 crtc_offsets[6] =
41{
42 EVERGREEN_CRTC0_REGISTER_OFFSET,
43 EVERGREEN_CRTC1_REGISTER_OFFSET,
44 EVERGREEN_CRTC2_REGISTER_OFFSET,
45 EVERGREEN_CRTC3_REGISTER_OFFSET,
46 EVERGREEN_CRTC4_REGISTER_OFFSET,
47 EVERGREEN_CRTC5_REGISTER_OFFSET
48};
49
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050050static void evergreen_gpu_init(struct radeon_device *rdev);
51void evergreen_fini(struct radeon_device *rdev);
Ilija Hadzicb07759b2011-09-20 10:22:58 -040052void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
Alex Deucher1b370782011-11-17 20:13:28 -050053extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
54 int ring, u32 cp_int_cntl);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050055
Jerome Glisse285484e2011-12-16 17:03:42 -050056void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
57 unsigned *bankh, unsigned *mtaspect,
58 unsigned *tile_split)
59{
60 *bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
61 *bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
62 *mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
63 *tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
64 switch (*bankw) {
65 default:
66 case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break;
67 case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break;
68 case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break;
69 case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break;
70 }
71 switch (*bankh) {
72 default:
73 case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break;
74 case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break;
75 case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break;
76 case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break;
77 }
78 switch (*mtaspect) {
79 default:
80 case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break;
81 case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break;
82 case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break;
83 case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break;
84 }
85}
86
Alex Deucher23d33ba2013-04-08 12:41:32 +020087static int sumo_set_uvd_clock(struct radeon_device *rdev, u32 clock,
88 u32 cntl_reg, u32 status_reg)
89{
90 int r, i;
91 struct atom_clock_dividers dividers;
92
93 r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
94 clock, false, &dividers);
95 if (r)
96 return r;
97
98 WREG32_P(cntl_reg, dividers.post_div, ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK));
99
100 for (i = 0; i < 100; i++) {
101 if (RREG32(status_reg) & DCLK_STATUS)
102 break;
103 mdelay(10);
104 }
105 if (i == 100)
106 return -ETIMEDOUT;
107
108 return 0;
109}
110
111int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
112{
113 int r = 0;
114 u32 cg_scratch = RREG32(CG_SCRATCH1);
115
116 r = sumo_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
117 if (r)
118 goto done;
119 cg_scratch &= 0xffff0000;
120 cg_scratch |= vclk / 100; /* Mhz */
121
122 r = sumo_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
123 if (r)
124 goto done;
125 cg_scratch &= 0x0000ffff;
126 cg_scratch |= (dclk / 100) << 16; /* Mhz */
127
128done:
129 WREG32(CG_SCRATCH1, cg_scratch);
130
131 return r;
132}
133
Alex Deuchera8b49252013-04-08 12:41:33 +0200134static int evergreen_uvd_calc_post_div(unsigned target_freq,
135 unsigned vco_freq,
136 unsigned *div)
137{
138 /* target larger than vco frequency ? */
139 if (vco_freq < target_freq)
140 return -1; /* forget it */
141
142 /* Fclk = Fvco / PDIV */
143 *div = vco_freq / target_freq;
144
145 /* we alway need a frequency less than or equal the target */
146 if ((vco_freq / *div) > target_freq)
147 *div += 1;
148
149 /* dividers above 5 must be even */
150 if (*div > 5 && *div % 2)
151 *div += 1;
152
153 /* out of range ? */
154 if (*div >= 128)
155 return -1; /* forget it */
156
157 return vco_freq / *div;
158}
159
160static int evergreen_uvd_send_upll_ctlreq(struct radeon_device *rdev)
161{
162 unsigned i;
163
164 /* assert UPLL_CTLREQ */
165 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
166
167 /* wait for CTLACK and CTLACK2 to get asserted */
168 for (i = 0; i < 100; ++i) {
169 uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
170 if ((RREG32(CG_UPLL_FUNC_CNTL) & mask) == mask)
171 break;
172 mdelay(10);
173 }
174 if (i == 100)
175 return -ETIMEDOUT;
176
177 /* deassert UPLL_CTLREQ */
178 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
179
180 return 0;
181}
182
183int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
184{
185 /* start off with something large */
186 int optimal_diff_score = 0x7FFFFFF;
187 unsigned optimal_fb_div = 0, optimal_vclk_div = 0;
188 unsigned optimal_dclk_div = 0, optimal_vco_freq = 0;
189 unsigned vco_freq;
190 int r;
191
192 /* loop through vco from low to high */
193 for (vco_freq = 125000; vco_freq <= 250000; vco_freq += 100) {
194 unsigned fb_div = vco_freq / rdev->clock.spll.reference_freq * 16384;
195 int calc_clk, diff_score, diff_vclk, diff_dclk;
196 unsigned vclk_div, dclk_div;
197
198 /* fb div out of range ? */
199 if (fb_div > 0x03FFFFFF)
200 break; /* it can oly get worse */
201
202 /* calc vclk with current vco freq. */
203 calc_clk = evergreen_uvd_calc_post_div(vclk, vco_freq, &vclk_div);
204 if (calc_clk == -1)
205 break; /* vco is too big, it has to stop. */
206 diff_vclk = vclk - calc_clk;
207
208 /* calc dclk with current vco freq. */
209 calc_clk = evergreen_uvd_calc_post_div(dclk, vco_freq, &dclk_div);
210 if (calc_clk == -1)
211 break; /* vco is too big, it has to stop. */
212 diff_dclk = dclk - calc_clk;
213
214 /* determine if this vco setting is better than current optimal settings */
215 diff_score = abs(diff_vclk) + abs(diff_dclk);
216 if (diff_score < optimal_diff_score) {
217 optimal_fb_div = fb_div;
218 optimal_vclk_div = vclk_div;
219 optimal_dclk_div = dclk_div;
220 optimal_vco_freq = vco_freq;
221 optimal_diff_score = diff_score;
222 if (optimal_diff_score == 0)
223 break; /* it can't get better than this */
224 }
225 }
226
227 /* set VCO_MODE to 1 */
228 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
229
230 /* toggle UPLL_SLEEP to 1 then back to 0 */
231 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
232 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
233
234 /* deassert UPLL_RESET */
235 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
236
237 mdelay(1);
238
239 /* bypass vclk and dclk with bclk */
240 WREG32_P(CG_UPLL_FUNC_CNTL_2,
241 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
242 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
243
244 /* put PLL in bypass mode */
245 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
246
247 r = evergreen_uvd_send_upll_ctlreq(rdev);
248 if (r)
249 return r;
250
251 /* assert UPLL_RESET again */
252 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
253
254 /* disable spread spectrum. */
255 WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
256
257 /* set feedback divider */
258 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(optimal_fb_div), ~UPLL_FB_DIV_MASK);
259
260 /* set ref divider to 0 */
261 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
262
263 if (optimal_vco_freq < 187500)
264 WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
265 else
266 WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
267
268 /* set PDIV_A and PDIV_B */
269 WREG32_P(CG_UPLL_FUNC_CNTL_2,
270 UPLL_PDIV_A(optimal_vclk_div) | UPLL_PDIV_B(optimal_dclk_div),
271 ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
272
273 /* give the PLL some time to settle */
274 mdelay(15);
275
276 /* deassert PLL_RESET */
277 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
278
279 mdelay(15);
280
281 /* switch from bypass mode to normal mode */
282 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
283
284 r = evergreen_uvd_send_upll_ctlreq(rdev);
285 if (r)
286 return r;
287
288 /* switch VCLK and DCLK selection */
289 WREG32_P(CG_UPLL_FUNC_CNTL_2,
290 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
291 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
292
293 mdelay(100);
294
295 return 0;
296}
297
Alex Deucherd054ac12011-09-01 17:46:15 +0000298void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
299{
300 u16 ctl, v;
Jiang Liu32195ae2012-07-24 17:20:30 +0800301 int err;
Alex Deucherd054ac12011-09-01 17:46:15 +0000302
Jiang Liu32195ae2012-07-24 17:20:30 +0800303 err = pcie_capability_read_word(rdev->pdev, PCI_EXP_DEVCTL, &ctl);
Alex Deucherd054ac12011-09-01 17:46:15 +0000304 if (err)
305 return;
306
307 v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
308
309 /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
310 * to avoid hangs or perfomance issues
311 */
312 if ((v == 0) || (v == 6) || (v == 7)) {
313 ctl &= ~PCI_EXP_DEVCTL_READRQ;
314 ctl |= (2 << 12);
Jiang Liu32195ae2012-07-24 17:20:30 +0800315 pcie_capability_write_word(rdev->pdev, PCI_EXP_DEVCTL, ctl);
Alex Deucherd054ac12011-09-01 17:46:15 +0000316 }
317}
318
Alex Deucher377edc82012-07-17 14:02:42 -0400319/**
320 * dce4_wait_for_vblank - vblank wait asic callback.
321 *
322 * @rdev: radeon_device pointer
323 * @crtc: crtc to wait for vblank on
324 *
325 * Wait for vblank on the requested crtc (evergreen+).
326 */
Alex Deucher3ae19b72012-02-23 17:53:37 -0500327void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
328{
Alex Deucher3ae19b72012-02-23 17:53:37 -0500329 int i;
330
Alex Deucher4a159032012-08-15 17:13:53 -0400331 if (crtc >= rdev->num_crtc)
332 return;
333
334 if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) {
Alex Deucher3ae19b72012-02-23 17:53:37 -0500335 for (i = 0; i < rdev->usec_timeout; i++) {
Alex Deucher4a159032012-08-15 17:13:53 -0400336 if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
Alex Deucher3ae19b72012-02-23 17:53:37 -0500337 break;
338 udelay(1);
339 }
340 for (i = 0; i < rdev->usec_timeout; i++) {
Alex Deucher4a159032012-08-15 17:13:53 -0400341 if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
Alex Deucher3ae19b72012-02-23 17:53:37 -0500342 break;
343 udelay(1);
344 }
345 }
346}
347
Alex Deucher377edc82012-07-17 14:02:42 -0400348/**
349 * radeon_irq_kms_pflip_irq_get - pre-pageflip callback.
350 *
351 * @rdev: radeon_device pointer
352 * @crtc: crtc to prepare for pageflip on
353 *
354 * Pre-pageflip callback (evergreen+).
355 * Enables the pageflip irq (vblank irq).
356 */
Alex Deucher6f34be52010-11-21 10:59:01 -0500357void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
358{
Alex Deucher6f34be52010-11-21 10:59:01 -0500359 /* enable the pflip int */
360 radeon_irq_kms_pflip_irq_get(rdev, crtc);
361}
362
Alex Deucher377edc82012-07-17 14:02:42 -0400363/**
364 * evergreen_post_page_flip - pos-pageflip callback.
365 *
366 * @rdev: radeon_device pointer
367 * @crtc: crtc to cleanup pageflip on
368 *
369 * Post-pageflip callback (evergreen+).
370 * Disables the pageflip irq (vblank irq).
371 */
Alex Deucher6f34be52010-11-21 10:59:01 -0500372void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
373{
374 /* disable the pflip int */
375 radeon_irq_kms_pflip_irq_put(rdev, crtc);
376}
377
Alex Deucher377edc82012-07-17 14:02:42 -0400378/**
379 * evergreen_page_flip - pageflip callback.
380 *
381 * @rdev: radeon_device pointer
382 * @crtc_id: crtc to cleanup pageflip on
383 * @crtc_base: new address of the crtc (GPU MC address)
384 *
385 * Does the actual pageflip (evergreen+).
386 * During vblank we take the crtc lock and wait for the update_pending
387 * bit to go high, when it does, we release the lock, and allow the
388 * double buffered update to take place.
389 * Returns the current update pending status.
390 */
Alex Deucher6f34be52010-11-21 10:59:01 -0500391u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
392{
393 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
394 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
Alex Deucherf6496472011-11-28 14:49:26 -0500395 int i;
Alex Deucher6f34be52010-11-21 10:59:01 -0500396
397 /* Lock the graphics update lock */
398 tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
399 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
400
401 /* update the scanout addresses */
402 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
403 upper_32_bits(crtc_base));
404 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
405 (u32)crtc_base);
406
407 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
408 upper_32_bits(crtc_base));
409 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
410 (u32)crtc_base);
411
412 /* Wait for update_pending to go high. */
Alex Deucherf6496472011-11-28 14:49:26 -0500413 for (i = 0; i < rdev->usec_timeout; i++) {
414 if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
415 break;
416 udelay(1);
417 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500418 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
419
420 /* Unlock the lock, so double-buffering can take place inside vblank */
421 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
422 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
423
424 /* Return current update_pending status: */
425 return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
426}
427
Alex Deucher21a81222010-07-02 12:58:16 -0400428/* get temperature in millidegrees */
Alex Deucher20d391d2011-02-01 16:12:34 -0500429int evergreen_get_temp(struct radeon_device *rdev)
Alex Deucher21a81222010-07-02 12:58:16 -0400430{
Alex Deucher1c88d742011-06-14 19:15:53 +0000431 u32 temp, toffset;
432 int actual_temp = 0;
Alex Deucher21a81222010-07-02 12:58:16 -0400433
Alex Deucher67b3f822011-05-25 18:45:37 -0400434 if (rdev->family == CHIP_JUNIPER) {
435 toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
436 TOFFSET_SHIFT;
437 temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >>
438 TS0_ADC_DOUT_SHIFT;
Alex Deucher21a81222010-07-02 12:58:16 -0400439
Alex Deucher67b3f822011-05-25 18:45:37 -0400440 if (toffset & 0x100)
441 actual_temp = temp / 2 - (0x200 - toffset);
442 else
443 actual_temp = temp / 2 + toffset;
444
445 actual_temp = actual_temp * 1000;
446
447 } else {
448 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
449 ASIC_T_SHIFT;
450
451 if (temp & 0x400)
452 actual_temp = -256;
453 else if (temp & 0x200)
454 actual_temp = 255;
455 else if (temp & 0x100) {
456 actual_temp = temp & 0x1ff;
457 actual_temp |= ~0x1ff;
458 } else
459 actual_temp = temp & 0xff;
460
461 actual_temp = (actual_temp * 1000) / 2;
462 }
463
464 return actual_temp;
Alex Deucher21a81222010-07-02 12:58:16 -0400465}
466
Alex Deucher20d391d2011-02-01 16:12:34 -0500467int sumo_get_temp(struct radeon_device *rdev)
Alex Deuchere33df252010-11-22 17:56:32 -0500468{
469 u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
Alex Deucher20d391d2011-02-01 16:12:34 -0500470 int actual_temp = temp - 49;
Alex Deuchere33df252010-11-22 17:56:32 -0500471
472 return actual_temp * 1000;
473}
474
Alex Deucher377edc82012-07-17 14:02:42 -0400475/**
476 * sumo_pm_init_profile - Initialize power profiles callback.
477 *
478 * @rdev: radeon_device pointer
479 *
480 * Initialize the power states used in profile mode
481 * (sumo, trinity, SI).
482 * Used for profile mode only.
483 */
Alex Deuchera4c9e2e2011-11-04 10:09:41 -0400484void sumo_pm_init_profile(struct radeon_device *rdev)
485{
486 int idx;
487
488 /* default */
489 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
490 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
491 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
492 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
493
494 /* low,mid sh/mh */
495 if (rdev->flags & RADEON_IS_MOBILITY)
496 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
497 else
498 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
499
500 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
501 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
502 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
503 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
504
505 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
506 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
507 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
508 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
509
510 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
511 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
512 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
513 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
514
515 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
516 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
517 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
518 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
519
520 /* high sh/mh */
521 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
522 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
523 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
524 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
525 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
526 rdev->pm.power_state[idx].num_clock_modes - 1;
527
528 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
529 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
530 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
531 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
532 rdev->pm.power_state[idx].num_clock_modes - 1;
533}
534
Alex Deucher377edc82012-07-17 14:02:42 -0400535/**
Alex Deucher27810fb2012-10-01 19:25:11 -0400536 * btc_pm_init_profile - Initialize power profiles callback.
537 *
538 * @rdev: radeon_device pointer
539 *
540 * Initialize the power states used in profile mode
541 * (BTC, cayman).
542 * Used for profile mode only.
543 */
544void btc_pm_init_profile(struct radeon_device *rdev)
545{
546 int idx;
547
548 /* default */
549 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
550 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
551 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
552 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
553 /* starting with BTC, there is one state that is used for both
554 * MH and SH. Difference is that we always use the high clock index for
555 * mclk.
556 */
557 if (rdev->flags & RADEON_IS_MOBILITY)
558 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
559 else
560 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
561 /* low sh */
562 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
563 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
564 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
565 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
566 /* mid sh */
567 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
568 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
569 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
570 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
571 /* high sh */
572 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
573 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
574 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
575 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
576 /* low mh */
577 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
578 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
579 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
580 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
581 /* mid mh */
582 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
583 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
584 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
585 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
586 /* high mh */
587 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
588 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
589 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
590 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
591}
592
593/**
Alex Deucher377edc82012-07-17 14:02:42 -0400594 * evergreen_pm_misc - set additional pm hw parameters callback.
595 *
596 * @rdev: radeon_device pointer
597 *
598 * Set non-clock parameters associated with a power state
599 * (voltage, etc.) (evergreen+).
600 */
Alex Deucher49e02b72010-04-23 17:57:27 -0400601void evergreen_pm_misc(struct radeon_device *rdev)
602{
Rafał Miłeckia081a9d2010-06-07 18:20:25 -0400603 int req_ps_idx = rdev->pm.requested_power_state_index;
604 int req_cm_idx = rdev->pm.requested_clock_mode_index;
605 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
606 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
Alex Deucher49e02b72010-04-23 17:57:27 -0400607
Alex Deucher2feea492011-04-12 14:49:24 -0400608 if (voltage->type == VOLTAGE_SW) {
Alex Deuchera377e182011-06-20 13:00:31 -0400609 /* 0xff01 is a flag rather then an actual voltage */
610 if (voltage->voltage == 0xff01)
611 return;
Alex Deucher2feea492011-04-12 14:49:24 -0400612 if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
Alex Deucher8a83ec52011-04-12 14:49:23 -0400613 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
Alex Deucher4d601732010-06-07 18:15:18 -0400614 rdev->pm.current_vddc = voltage->voltage;
Alex Deucher2feea492011-04-12 14:49:24 -0400615 DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
616 }
Alex Deucher7ae764b2013-02-11 08:44:48 -0500617
618 /* starting with BTC, there is one state that is used for both
619 * MH and SH. Difference is that we always use the high clock index for
620 * mclk and vddci.
621 */
622 if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
623 (rdev->family >= CHIP_BARTS) &&
624 rdev->pm.active_crtc_count &&
625 ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
626 (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
627 voltage = &rdev->pm.power_state[req_ps_idx].
628 clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].voltage;
629
Alex Deuchera377e182011-06-20 13:00:31 -0400630 /* 0xff01 is a flag rather then an actual voltage */
631 if (voltage->vddci == 0xff01)
632 return;
Alex Deucher2feea492011-04-12 14:49:24 -0400633 if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
634 radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
635 rdev->pm.current_vddci = voltage->vddci;
636 DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
Alex Deucher4d601732010-06-07 18:15:18 -0400637 }
638 }
Alex Deucher49e02b72010-04-23 17:57:27 -0400639}
640
Alex Deucher377edc82012-07-17 14:02:42 -0400641/**
642 * evergreen_pm_prepare - pre-power state change callback.
643 *
644 * @rdev: radeon_device pointer
645 *
646 * Prepare for a power state change (evergreen+).
647 */
Alex Deucher49e02b72010-04-23 17:57:27 -0400648void evergreen_pm_prepare(struct radeon_device *rdev)
649{
650 struct drm_device *ddev = rdev->ddev;
651 struct drm_crtc *crtc;
652 struct radeon_crtc *radeon_crtc;
653 u32 tmp;
654
655 /* disable any active CRTCs */
656 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
657 radeon_crtc = to_radeon_crtc(crtc);
658 if (radeon_crtc->enabled) {
659 tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
660 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
661 WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
662 }
663 }
664}
665
Alex Deucher377edc82012-07-17 14:02:42 -0400666/**
667 * evergreen_pm_finish - post-power state change callback.
668 *
669 * @rdev: radeon_device pointer
670 *
671 * Clean up after a power state change (evergreen+).
672 */
Alex Deucher49e02b72010-04-23 17:57:27 -0400673void evergreen_pm_finish(struct radeon_device *rdev)
674{
675 struct drm_device *ddev = rdev->ddev;
676 struct drm_crtc *crtc;
677 struct radeon_crtc *radeon_crtc;
678 u32 tmp;
679
680 /* enable any active CRTCs */
681 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
682 radeon_crtc = to_radeon_crtc(crtc);
683 if (radeon_crtc->enabled) {
684 tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
685 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
686 WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
687 }
688 }
689}
690
Alex Deucher377edc82012-07-17 14:02:42 -0400691/**
692 * evergreen_hpd_sense - hpd sense callback.
693 *
694 * @rdev: radeon_device pointer
695 * @hpd: hpd (hotplug detect) pin
696 *
697 * Checks if a digital monitor is connected (evergreen+).
698 * Returns true if connected, false if not connected.
699 */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500700bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
701{
702 bool connected = false;
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500703
704 switch (hpd) {
705 case RADEON_HPD_1:
706 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
707 connected = true;
708 break;
709 case RADEON_HPD_2:
710 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
711 connected = true;
712 break;
713 case RADEON_HPD_3:
714 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
715 connected = true;
716 break;
717 case RADEON_HPD_4:
718 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
719 connected = true;
720 break;
721 case RADEON_HPD_5:
722 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
723 connected = true;
724 break;
725 case RADEON_HPD_6:
726 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
727 connected = true;
728 break;
729 default:
730 break;
731 }
732
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500733 return connected;
734}
735
Alex Deucher377edc82012-07-17 14:02:42 -0400736/**
737 * evergreen_hpd_set_polarity - hpd set polarity callback.
738 *
739 * @rdev: radeon_device pointer
740 * @hpd: hpd (hotplug detect) pin
741 *
742 * Set the polarity of the hpd pin (evergreen+).
743 */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500744void evergreen_hpd_set_polarity(struct radeon_device *rdev,
745 enum radeon_hpd_id hpd)
746{
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500747 u32 tmp;
748 bool connected = evergreen_hpd_sense(rdev, hpd);
749
750 switch (hpd) {
751 case RADEON_HPD_1:
752 tmp = RREG32(DC_HPD1_INT_CONTROL);
753 if (connected)
754 tmp &= ~DC_HPDx_INT_POLARITY;
755 else
756 tmp |= DC_HPDx_INT_POLARITY;
757 WREG32(DC_HPD1_INT_CONTROL, tmp);
758 break;
759 case RADEON_HPD_2:
760 tmp = RREG32(DC_HPD2_INT_CONTROL);
761 if (connected)
762 tmp &= ~DC_HPDx_INT_POLARITY;
763 else
764 tmp |= DC_HPDx_INT_POLARITY;
765 WREG32(DC_HPD2_INT_CONTROL, tmp);
766 break;
767 case RADEON_HPD_3:
768 tmp = RREG32(DC_HPD3_INT_CONTROL);
769 if (connected)
770 tmp &= ~DC_HPDx_INT_POLARITY;
771 else
772 tmp |= DC_HPDx_INT_POLARITY;
773 WREG32(DC_HPD3_INT_CONTROL, tmp);
774 break;
775 case RADEON_HPD_4:
776 tmp = RREG32(DC_HPD4_INT_CONTROL);
777 if (connected)
778 tmp &= ~DC_HPDx_INT_POLARITY;
779 else
780 tmp |= DC_HPDx_INT_POLARITY;
781 WREG32(DC_HPD4_INT_CONTROL, tmp);
782 break;
783 case RADEON_HPD_5:
784 tmp = RREG32(DC_HPD5_INT_CONTROL);
785 if (connected)
786 tmp &= ~DC_HPDx_INT_POLARITY;
787 else
788 tmp |= DC_HPDx_INT_POLARITY;
789 WREG32(DC_HPD5_INT_CONTROL, tmp);
790 break;
791 case RADEON_HPD_6:
792 tmp = RREG32(DC_HPD6_INT_CONTROL);
793 if (connected)
794 tmp &= ~DC_HPDx_INT_POLARITY;
795 else
796 tmp |= DC_HPDx_INT_POLARITY;
797 WREG32(DC_HPD6_INT_CONTROL, tmp);
798 break;
799 default:
800 break;
801 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500802}
803
Alex Deucher377edc82012-07-17 14:02:42 -0400804/**
805 * evergreen_hpd_init - hpd setup callback.
806 *
807 * @rdev: radeon_device pointer
808 *
809 * Setup the hpd pins used by the card (evergreen+).
810 * Enable the pin, set the polarity, and enable the hpd interrupts.
811 */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500812void evergreen_hpd_init(struct radeon_device *rdev)
813{
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500814 struct drm_device *dev = rdev->ddev;
815 struct drm_connector *connector;
Christian Koenigfb982572012-05-17 01:33:30 +0200816 unsigned enabled = 0;
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500817 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
818 DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500819
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500820 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
821 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
Alex Deucher2e97be72013-04-11 12:45:34 -0400822
823 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
824 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
825 /* don't try to enable hpd on eDP or LVDS avoid breaking the
826 * aux dp channel on imac and help (but not completely fix)
827 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
828 * also avoid interrupt storms during dpms.
829 */
830 continue;
831 }
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500832 switch (radeon_connector->hpd.hpd) {
833 case RADEON_HPD_1:
834 WREG32(DC_HPD1_CONTROL, tmp);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500835 break;
836 case RADEON_HPD_2:
837 WREG32(DC_HPD2_CONTROL, tmp);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500838 break;
839 case RADEON_HPD_3:
840 WREG32(DC_HPD3_CONTROL, tmp);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500841 break;
842 case RADEON_HPD_4:
843 WREG32(DC_HPD4_CONTROL, tmp);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500844 break;
845 case RADEON_HPD_5:
846 WREG32(DC_HPD5_CONTROL, tmp);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500847 break;
848 case RADEON_HPD_6:
849 WREG32(DC_HPD6_CONTROL, tmp);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500850 break;
851 default:
852 break;
853 }
Alex Deucher64912e92011-11-03 11:21:39 -0400854 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
Christian Koenigfb982572012-05-17 01:33:30 +0200855 enabled |= 1 << radeon_connector->hpd.hpd;
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500856 }
Christian Koenigfb982572012-05-17 01:33:30 +0200857 radeon_irq_kms_enable_hpd(rdev, enabled);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500858}
859
Alex Deucher377edc82012-07-17 14:02:42 -0400860/**
861 * evergreen_hpd_fini - hpd tear down callback.
862 *
863 * @rdev: radeon_device pointer
864 *
865 * Tear down the hpd pins used by the card (evergreen+).
866 * Disable the hpd interrupts.
867 */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500868void evergreen_hpd_fini(struct radeon_device *rdev)
869{
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500870 struct drm_device *dev = rdev->ddev;
871 struct drm_connector *connector;
Christian Koenigfb982572012-05-17 01:33:30 +0200872 unsigned disabled = 0;
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500873
874 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
875 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
876 switch (radeon_connector->hpd.hpd) {
877 case RADEON_HPD_1:
878 WREG32(DC_HPD1_CONTROL, 0);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500879 break;
880 case RADEON_HPD_2:
881 WREG32(DC_HPD2_CONTROL, 0);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500882 break;
883 case RADEON_HPD_3:
884 WREG32(DC_HPD3_CONTROL, 0);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500885 break;
886 case RADEON_HPD_4:
887 WREG32(DC_HPD4_CONTROL, 0);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500888 break;
889 case RADEON_HPD_5:
890 WREG32(DC_HPD5_CONTROL, 0);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500891 break;
892 case RADEON_HPD_6:
893 WREG32(DC_HPD6_CONTROL, 0);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500894 break;
895 default:
896 break;
897 }
Christian Koenigfb982572012-05-17 01:33:30 +0200898 disabled |= 1 << radeon_connector->hpd.hpd;
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500899 }
Christian Koenigfb982572012-05-17 01:33:30 +0200900 radeon_irq_kms_disable_hpd(rdev, disabled);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500901}
902
Alex Deucherf9d9c362010-10-22 02:51:05 -0400903/* watermark setup */
904
905static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
906 struct radeon_crtc *radeon_crtc,
907 struct drm_display_mode *mode,
908 struct drm_display_mode *other_mode)
909{
Alex Deucher12dfc842011-04-14 19:07:34 -0400910 u32 tmp;
Alex Deucherf9d9c362010-10-22 02:51:05 -0400911 /*
912 * Line Buffer Setup
913 * There are 3 line buffers, each one shared by 2 display controllers.
914 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
915 * the display controllers. The paritioning is done via one of four
916 * preset allocations specified in bits 2:0:
917 * first display controller
918 * 0 - first half of lb (3840 * 2)
919 * 1 - first 3/4 of lb (5760 * 2)
Alex Deucher12dfc842011-04-14 19:07:34 -0400920 * 2 - whole lb (7680 * 2), other crtc must be disabled
Alex Deucherf9d9c362010-10-22 02:51:05 -0400921 * 3 - first 1/4 of lb (1920 * 2)
922 * second display controller
923 * 4 - second half of lb (3840 * 2)
924 * 5 - second 3/4 of lb (5760 * 2)
Alex Deucher12dfc842011-04-14 19:07:34 -0400925 * 6 - whole lb (7680 * 2), other crtc must be disabled
Alex Deucherf9d9c362010-10-22 02:51:05 -0400926 * 7 - last 1/4 of lb (1920 * 2)
927 */
Alex Deucher12dfc842011-04-14 19:07:34 -0400928 /* this can get tricky if we have two large displays on a paired group
929 * of crtcs. Ideally for multiple large displays we'd assign them to
930 * non-linked crtcs for maximum line buffer allocation.
931 */
932 if (radeon_crtc->base.enabled && mode) {
933 if (other_mode)
Alex Deucherf9d9c362010-10-22 02:51:05 -0400934 tmp = 0; /* 1/2 */
Alex Deucher12dfc842011-04-14 19:07:34 -0400935 else
936 tmp = 2; /* whole */
937 } else
938 tmp = 0;
Alex Deucherf9d9c362010-10-22 02:51:05 -0400939
940 /* second controller of the pair uses second half of the lb */
941 if (radeon_crtc->crtc_id % 2)
942 tmp += 4;
943 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
944
Alex Deucher12dfc842011-04-14 19:07:34 -0400945 if (radeon_crtc->base.enabled && mode) {
946 switch (tmp) {
947 case 0:
948 case 4:
949 default:
950 if (ASIC_IS_DCE5(rdev))
951 return 4096 * 2;
952 else
953 return 3840 * 2;
954 case 1:
955 case 5:
956 if (ASIC_IS_DCE5(rdev))
957 return 6144 * 2;
958 else
959 return 5760 * 2;
960 case 2:
961 case 6:
962 if (ASIC_IS_DCE5(rdev))
963 return 8192 * 2;
964 else
965 return 7680 * 2;
966 case 3:
967 case 7:
968 if (ASIC_IS_DCE5(rdev))
969 return 2048 * 2;
970 else
971 return 1920 * 2;
972 }
Alex Deucherf9d9c362010-10-22 02:51:05 -0400973 }
Alex Deucher12dfc842011-04-14 19:07:34 -0400974
975 /* controller not enabled, so no lb used */
976 return 0;
Alex Deucherf9d9c362010-10-22 02:51:05 -0400977}
978
Alex Deucherca7db222012-03-20 17:18:30 -0400979u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
Alex Deucherf9d9c362010-10-22 02:51:05 -0400980{
981 u32 tmp = RREG32(MC_SHARED_CHMAP);
982
983 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
984 case 0:
985 default:
986 return 1;
987 case 1:
988 return 2;
989 case 2:
990 return 4;
991 case 3:
992 return 8;
993 }
994}
995
996struct evergreen_wm_params {
997 u32 dram_channels; /* number of dram channels */
998 u32 yclk; /* bandwidth per dram data pin in kHz */
999 u32 sclk; /* engine clock in kHz */
1000 u32 disp_clk; /* display clock in kHz */
1001 u32 src_width; /* viewport width */
1002 u32 active_time; /* active display time in ns */
1003 u32 blank_time; /* blank time in ns */
1004 bool interlaced; /* mode is interlaced */
1005 fixed20_12 vsc; /* vertical scale ratio */
1006 u32 num_heads; /* number of active crtcs */
1007 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
1008 u32 lb_size; /* line buffer allocated to pipe */
1009 u32 vtaps; /* vertical scaler taps */
1010};
1011
1012static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
1013{
1014 /* Calculate DRAM Bandwidth and the part allocated to display. */
1015 fixed20_12 dram_efficiency; /* 0.7 */
1016 fixed20_12 yclk, dram_channels, bandwidth;
1017 fixed20_12 a;
1018
1019 a.full = dfixed_const(1000);
1020 yclk.full = dfixed_const(wm->yclk);
1021 yclk.full = dfixed_div(yclk, a);
1022 dram_channels.full = dfixed_const(wm->dram_channels * 4);
1023 a.full = dfixed_const(10);
1024 dram_efficiency.full = dfixed_const(7);
1025 dram_efficiency.full = dfixed_div(dram_efficiency, a);
1026 bandwidth.full = dfixed_mul(dram_channels, yclk);
1027 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
1028
1029 return dfixed_trunc(bandwidth);
1030}
1031
1032static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
1033{
1034 /* Calculate DRAM Bandwidth and the part allocated to display. */
1035 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
1036 fixed20_12 yclk, dram_channels, bandwidth;
1037 fixed20_12 a;
1038
1039 a.full = dfixed_const(1000);
1040 yclk.full = dfixed_const(wm->yclk);
1041 yclk.full = dfixed_div(yclk, a);
1042 dram_channels.full = dfixed_const(wm->dram_channels * 4);
1043 a.full = dfixed_const(10);
1044 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
1045 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
1046 bandwidth.full = dfixed_mul(dram_channels, yclk);
1047 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
1048
1049 return dfixed_trunc(bandwidth);
1050}
1051
1052static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
1053{
1054 /* Calculate the display Data return Bandwidth */
1055 fixed20_12 return_efficiency; /* 0.8 */
1056 fixed20_12 sclk, bandwidth;
1057 fixed20_12 a;
1058
1059 a.full = dfixed_const(1000);
1060 sclk.full = dfixed_const(wm->sclk);
1061 sclk.full = dfixed_div(sclk, a);
1062 a.full = dfixed_const(10);
1063 return_efficiency.full = dfixed_const(8);
1064 return_efficiency.full = dfixed_div(return_efficiency, a);
1065 a.full = dfixed_const(32);
1066 bandwidth.full = dfixed_mul(a, sclk);
1067 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
1068
1069 return dfixed_trunc(bandwidth);
1070}
1071
1072static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
1073{
1074 /* Calculate the DMIF Request Bandwidth */
1075 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
1076 fixed20_12 disp_clk, bandwidth;
1077 fixed20_12 a;
1078
1079 a.full = dfixed_const(1000);
1080 disp_clk.full = dfixed_const(wm->disp_clk);
1081 disp_clk.full = dfixed_div(disp_clk, a);
1082 a.full = dfixed_const(10);
1083 disp_clk_request_efficiency.full = dfixed_const(8);
1084 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
1085 a.full = dfixed_const(32);
1086 bandwidth.full = dfixed_mul(a, disp_clk);
1087 bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
1088
1089 return dfixed_trunc(bandwidth);
1090}
1091
1092static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
1093{
1094 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
1095 u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
1096 u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
1097 u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
1098
1099 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
1100}
1101
1102static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
1103{
1104 /* Calculate the display mode Average Bandwidth
1105 * DisplayMode should contain the source and destination dimensions,
1106 * timing, etc.
1107 */
1108 fixed20_12 bpp;
1109 fixed20_12 line_time;
1110 fixed20_12 src_width;
1111 fixed20_12 bandwidth;
1112 fixed20_12 a;
1113
1114 a.full = dfixed_const(1000);
1115 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
1116 line_time.full = dfixed_div(line_time, a);
1117 bpp.full = dfixed_const(wm->bytes_per_pixel);
1118 src_width.full = dfixed_const(wm->src_width);
1119 bandwidth.full = dfixed_mul(src_width, bpp);
1120 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
1121 bandwidth.full = dfixed_div(bandwidth, line_time);
1122
1123 return dfixed_trunc(bandwidth);
1124}
1125
1126static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
1127{
1128 /* First calcualte the latency in ns */
1129 u32 mc_latency = 2000; /* 2000 ns. */
1130 u32 available_bandwidth = evergreen_available_bandwidth(wm);
1131 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
1132 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
1133 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
1134 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
1135 (wm->num_heads * cursor_line_pair_return_time);
1136 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
1137 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
1138 fixed20_12 a, b, c;
1139
1140 if (wm->num_heads == 0)
1141 return 0;
1142
1143 a.full = dfixed_const(2);
1144 b.full = dfixed_const(1);
1145 if ((wm->vsc.full > a.full) ||
1146 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
1147 (wm->vtaps >= 5) ||
1148 ((wm->vsc.full >= a.full) && wm->interlaced))
1149 max_src_lines_per_dst_line = 4;
1150 else
1151 max_src_lines_per_dst_line = 2;
1152
1153 a.full = dfixed_const(available_bandwidth);
1154 b.full = dfixed_const(wm->num_heads);
1155 a.full = dfixed_div(a, b);
1156
1157 b.full = dfixed_const(1000);
1158 c.full = dfixed_const(wm->disp_clk);
1159 b.full = dfixed_div(c, b);
1160 c.full = dfixed_const(wm->bytes_per_pixel);
1161 b.full = dfixed_mul(b, c);
1162
1163 lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b));
1164
1165 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
1166 b.full = dfixed_const(1000);
1167 c.full = dfixed_const(lb_fill_bw);
1168 b.full = dfixed_div(c, b);
1169 a.full = dfixed_div(a, b);
1170 line_fill_time = dfixed_trunc(a);
1171
1172 if (line_fill_time < wm->active_time)
1173 return latency;
1174 else
1175 return latency + (line_fill_time - wm->active_time);
1176
1177}
1178
1179static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
1180{
1181 if (evergreen_average_bandwidth(wm) <=
1182 (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
1183 return true;
1184 else
1185 return false;
1186};
1187
1188static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
1189{
1190 if (evergreen_average_bandwidth(wm) <=
1191 (evergreen_available_bandwidth(wm) / wm->num_heads))
1192 return true;
1193 else
1194 return false;
1195};
1196
1197static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
1198{
1199 u32 lb_partitions = wm->lb_size / wm->src_width;
1200 u32 line_time = wm->active_time + wm->blank_time;
1201 u32 latency_tolerant_lines;
1202 u32 latency_hiding;
1203 fixed20_12 a;
1204
1205 a.full = dfixed_const(1);
1206 if (wm->vsc.full > a.full)
1207 latency_tolerant_lines = 1;
1208 else {
1209 if (lb_partitions <= (wm->vtaps + 1))
1210 latency_tolerant_lines = 1;
1211 else
1212 latency_tolerant_lines = 2;
1213 }
1214
1215 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1216
1217 if (evergreen_latency_watermark(wm) <= latency_hiding)
1218 return true;
1219 else
1220 return false;
1221}
1222
1223static void evergreen_program_watermarks(struct radeon_device *rdev,
1224 struct radeon_crtc *radeon_crtc,
1225 u32 lb_size, u32 num_heads)
1226{
1227 struct drm_display_mode *mode = &radeon_crtc->base.mode;
1228 struct evergreen_wm_params wm;
1229 u32 pixel_period;
1230 u32 line_time = 0;
1231 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1232 u32 priority_a_mark = 0, priority_b_mark = 0;
1233 u32 priority_a_cnt = PRIORITY_OFF;
1234 u32 priority_b_cnt = PRIORITY_OFF;
1235 u32 pipe_offset = radeon_crtc->crtc_id * 16;
1236 u32 tmp, arb_control3;
1237 fixed20_12 a, b, c;
1238
1239 if (radeon_crtc->base.enabled && num_heads && mode) {
1240 pixel_period = 1000000 / (u32)mode->clock;
1241 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
1242 priority_a_cnt = 0;
1243 priority_b_cnt = 0;
1244
1245 wm.yclk = rdev->pm.current_mclk * 10;
1246 wm.sclk = rdev->pm.current_sclk * 10;
1247 wm.disp_clk = mode->clock;
1248 wm.src_width = mode->crtc_hdisplay;
1249 wm.active_time = mode->crtc_hdisplay * pixel_period;
1250 wm.blank_time = line_time - wm.active_time;
1251 wm.interlaced = false;
1252 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1253 wm.interlaced = true;
1254 wm.vsc = radeon_crtc->vsc;
1255 wm.vtaps = 1;
1256 if (radeon_crtc->rmx_type != RMX_OFF)
1257 wm.vtaps = 2;
1258 wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
1259 wm.lb_size = lb_size;
1260 wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
1261 wm.num_heads = num_heads;
1262
1263 /* set for high clocks */
1264 latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535);
1265 /* set for low clocks */
1266 /* wm.yclk = low clk; wm.sclk = low clk */
1267 latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
1268
1269 /* possibly force display priority to high */
1270 /* should really do this at mode validation time... */
1271 if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
1272 !evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
1273 !evergreen_check_latency_hiding(&wm) ||
1274 (rdev->disp_priority == 2)) {
Alex Deucher92bdfd42011-08-04 17:28:40 +00001275 DRM_DEBUG_KMS("force priority to high\n");
Alex Deucherf9d9c362010-10-22 02:51:05 -04001276 priority_a_cnt |= PRIORITY_ALWAYS_ON;
1277 priority_b_cnt |= PRIORITY_ALWAYS_ON;
1278 }
1279
1280 a.full = dfixed_const(1000);
1281 b.full = dfixed_const(mode->clock);
1282 b.full = dfixed_div(b, a);
1283 c.full = dfixed_const(latency_watermark_a);
1284 c.full = dfixed_mul(c, b);
1285 c.full = dfixed_mul(c, radeon_crtc->hsc);
1286 c.full = dfixed_div(c, a);
1287 a.full = dfixed_const(16);
1288 c.full = dfixed_div(c, a);
1289 priority_a_mark = dfixed_trunc(c);
1290 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
1291
1292 a.full = dfixed_const(1000);
1293 b.full = dfixed_const(mode->clock);
1294 b.full = dfixed_div(b, a);
1295 c.full = dfixed_const(latency_watermark_b);
1296 c.full = dfixed_mul(c, b);
1297 c.full = dfixed_mul(c, radeon_crtc->hsc);
1298 c.full = dfixed_div(c, a);
1299 a.full = dfixed_const(16);
1300 c.full = dfixed_div(c, a);
1301 priority_b_mark = dfixed_trunc(c);
1302 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
1303 }
1304
1305 /* select wm A */
1306 arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
1307 tmp = arb_control3;
1308 tmp &= ~LATENCY_WATERMARK_MASK(3);
1309 tmp |= LATENCY_WATERMARK_MASK(1);
1310 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
1311 WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
1312 (LATENCY_LOW_WATERMARK(latency_watermark_a) |
1313 LATENCY_HIGH_WATERMARK(line_time)));
1314 /* select wm B */
1315 tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
1316 tmp &= ~LATENCY_WATERMARK_MASK(3);
1317 tmp |= LATENCY_WATERMARK_MASK(2);
1318 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
1319 WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
1320 (LATENCY_LOW_WATERMARK(latency_watermark_b) |
1321 LATENCY_HIGH_WATERMARK(line_time)));
1322 /* restore original selection */
1323 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
1324
1325 /* write the priority marks */
1326 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
1327 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
1328
1329}
1330
Alex Deucher377edc82012-07-17 14:02:42 -04001331/**
1332 * evergreen_bandwidth_update - update display watermarks callback.
1333 *
1334 * @rdev: radeon_device pointer
1335 *
1336 * Update the display watermarks based on the requested mode(s)
1337 * (evergreen+).
1338 */
Alex Deucher0ca2ab52010-02-26 13:57:45 -05001339void evergreen_bandwidth_update(struct radeon_device *rdev)
1340{
Alex Deucherf9d9c362010-10-22 02:51:05 -04001341 struct drm_display_mode *mode0 = NULL;
1342 struct drm_display_mode *mode1 = NULL;
1343 u32 num_heads = 0, lb_size;
1344 int i;
1345
1346 radeon_update_display_priority(rdev);
1347
1348 for (i = 0; i < rdev->num_crtc; i++) {
1349 if (rdev->mode_info.crtcs[i]->base.enabled)
1350 num_heads++;
1351 }
1352 for (i = 0; i < rdev->num_crtc; i += 2) {
1353 mode0 = &rdev->mode_info.crtcs[i]->base.mode;
1354 mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
1355 lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
1356 evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
1357 lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
1358 evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
1359 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001360}
1361
Alex Deucher377edc82012-07-17 14:02:42 -04001362/**
1363 * evergreen_mc_wait_for_idle - wait for MC idle callback.
1364 *
1365 * @rdev: radeon_device pointer
1366 *
1367 * Wait for the MC (memory controller) to be idle.
1368 * (evergreen+).
1369 * Returns 0 if the MC is idle, -1 if not.
1370 */
Alex Deucherb9952a82011-03-02 20:07:33 -05001371int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001372{
1373 unsigned i;
1374 u32 tmp;
1375
1376 for (i = 0; i < rdev->usec_timeout; i++) {
1377 /* read MC_STATUS */
1378 tmp = RREG32(SRBM_STATUS) & 0x1F00;
1379 if (!tmp)
1380 return 0;
1381 udelay(1);
1382 }
1383 return -1;
1384}
1385
1386/*
1387 * GART
1388 */
Alex Deucher0fcdb612010-03-24 13:20:41 -04001389void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
1390{
1391 unsigned i;
1392 u32 tmp;
1393
Alex Deucher6f2f48a2010-12-15 11:01:56 -05001394 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1395
Alex Deucher0fcdb612010-03-24 13:20:41 -04001396 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
1397 for (i = 0; i < rdev->usec_timeout; i++) {
1398 /* read MC_STATUS */
1399 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
1400 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
1401 if (tmp == 2) {
1402 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
1403 return;
1404 }
1405 if (tmp) {
1406 return;
1407 }
1408 udelay(1);
1409 }
1410}
1411
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001412static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001413{
1414 u32 tmp;
Alex Deucher0fcdb612010-03-24 13:20:41 -04001415 int r;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001416
Jerome Glissec9a1be92011-11-03 11:16:49 -04001417 if (rdev->gart.robj == NULL) {
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001418 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
1419 return -EINVAL;
1420 }
1421 r = radeon_gart_table_vram_pin(rdev);
1422 if (r)
1423 return r;
Dave Airlie82568562010-02-05 16:00:07 +10001424 radeon_gart_restore(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001425 /* Setup L2 cache */
1426 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1427 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1428 EFFECTIVE_L2_QUEUE_SIZE(7));
1429 WREG32(VM_L2_CNTL2, 0);
1430 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1431 /* Setup TLB control */
1432 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1433 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1434 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
1435 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
Alex Deucher8aeb96f82011-05-03 19:28:02 -04001436 if (rdev->flags & RADEON_IS_IGP) {
1437 WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp);
1438 WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp);
1439 WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp);
1440 } else {
1441 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1442 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1443 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
Alex Deucher0b8c30b2012-05-31 18:54:43 -04001444 if ((rdev->family == CHIP_JUNIPER) ||
1445 (rdev->family == CHIP_CYPRESS) ||
1446 (rdev->family == CHIP_HEMLOCK) ||
1447 (rdev->family == CHIP_BARTS))
1448 WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
Alex Deucher8aeb96f82011-05-03 19:28:02 -04001449 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001450 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1451 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1452 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1453 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
1454 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1455 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1456 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1457 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1458 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1459 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1460 (u32)(rdev->dummy_page.addr >> 12));
Alex Deucher0fcdb612010-03-24 13:20:41 -04001461 WREG32(VM_CONTEXT1_CNTL, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001462
Alex Deucher0fcdb612010-03-24 13:20:41 -04001463 evergreen_pcie_gart_tlb_flush(rdev);
Tormod Voldenfcf4de52011-08-31 21:54:07 +00001464 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1465 (unsigned)(rdev->mc.gtt_size >> 20),
1466 (unsigned long long)rdev->gart.table_addr);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001467 rdev->gart.ready = true;
1468 return 0;
1469}
1470
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001471static void evergreen_pcie_gart_disable(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001472{
1473 u32 tmp;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001474
1475 /* Disable all tables */
Alex Deucher0fcdb612010-03-24 13:20:41 -04001476 WREG32(VM_CONTEXT0_CNTL, 0);
1477 WREG32(VM_CONTEXT1_CNTL, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001478
1479 /* Setup L2 cache */
1480 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
1481 EFFECTIVE_L2_QUEUE_SIZE(7));
1482 WREG32(VM_L2_CNTL2, 0);
1483 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1484 /* Setup TLB control */
1485 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
1486 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1487 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1488 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
1489 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1490 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1491 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1492 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
Jerome Glissec9a1be92011-11-03 11:16:49 -04001493 radeon_gart_table_vram_unpin(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001494}
1495
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001496static void evergreen_pcie_gart_fini(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001497{
1498 evergreen_pcie_gart_disable(rdev);
1499 radeon_gart_table_vram_free(rdev);
1500 radeon_gart_fini(rdev);
1501}
1502
1503
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001504static void evergreen_agp_enable(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001505{
1506 u32 tmp;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001507
1508 /* Setup L2 cache */
1509 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1510 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1511 EFFECTIVE_L2_QUEUE_SIZE(7));
1512 WREG32(VM_L2_CNTL2, 0);
1513 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1514 /* Setup TLB control */
1515 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1516 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1517 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
1518 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
1519 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1520 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1521 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
1522 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1523 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1524 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1525 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
Alex Deucher0fcdb612010-03-24 13:20:41 -04001526 WREG32(VM_CONTEXT0_CNTL, 0);
1527 WREG32(VM_CONTEXT1_CNTL, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001528}
1529
Alex Deucherb9952a82011-03-02 20:07:33 -05001530void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001531{
Alex Deucher62444b72012-08-15 17:18:42 -04001532 u32 crtc_enabled, tmp, frame_count, blackout;
1533 int i, j;
1534
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001535 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
1536 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001537
Alex Deucher62444b72012-08-15 17:18:42 -04001538 /* disable VGA render */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001539 WREG32(VGA_RENDER_CONTROL, 0);
Alex Deucher62444b72012-08-15 17:18:42 -04001540 /* blank the display controllers */
1541 for (i = 0; i < rdev->num_crtc; i++) {
1542 crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
1543 if (crtc_enabled) {
1544 save->crtc_enabled[i] = true;
1545 if (ASIC_IS_DCE6(rdev)) {
1546 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1547 if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
1548 radeon_wait_for_vblank(rdev, i);
1549 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
Christopher Staitebb5888202013-01-26 11:10:58 -05001550 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
Alex Deucher62444b72012-08-15 17:18:42 -04001551 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
Christopher Staitebb5888202013-01-26 11:10:58 -05001552 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
Alex Deucher62444b72012-08-15 17:18:42 -04001553 }
1554 } else {
1555 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1556 if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
1557 radeon_wait_for_vblank(rdev, i);
1558 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
Christopher Staitebb5888202013-01-26 11:10:58 -05001559 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
Alex Deucher62444b72012-08-15 17:18:42 -04001560 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
Christopher Staitebb5888202013-01-26 11:10:58 -05001561 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
Alex Deucher62444b72012-08-15 17:18:42 -04001562 }
1563 }
1564 /* wait for the next frame */
1565 frame_count = radeon_get_vblank_counter(rdev, i);
1566 for (j = 0; j < rdev->usec_timeout; j++) {
1567 if (radeon_get_vblank_counter(rdev, i) != frame_count)
1568 break;
1569 udelay(1);
1570 }
Alex Deucher804cc4a02012-11-19 09:11:27 -05001571 } else {
1572 save->crtc_enabled[i] = false;
Alex Deucher62444b72012-08-15 17:18:42 -04001573 }
Alex Deucher18007402010-11-22 17:56:28 -05001574 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001575
Alex Deucher62444b72012-08-15 17:18:42 -04001576 radeon_mc_wait_for_idle(rdev);
1577
1578 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
1579 if ((blackout & BLACKOUT_MODE_MASK) != 1) {
1580 /* Block CPU access */
1581 WREG32(BIF_FB_EN, 0);
1582 /* blackout the MC */
1583 blackout &= ~BLACKOUT_MODE_MASK;
1584 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
Alex Deucherb7eff392011-07-08 11:44:56 -04001585 }
Alex Deuchered39fad2013-01-31 09:00:52 -05001586 /* wait for the MC to settle */
1587 udelay(100);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001588}
1589
Alex Deucherb9952a82011-03-02 20:07:33 -05001590void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001591{
Alex Deucher62444b72012-08-15 17:18:42 -04001592 u32 tmp, frame_count;
1593 int i, j;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001594
Alex Deucher62444b72012-08-15 17:18:42 -04001595 /* update crtc base addresses */
1596 for (i = 0; i < rdev->num_crtc; i++) {
1597 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
Alex Deucher18007402010-11-22 17:56:28 -05001598 upper_32_bits(rdev->mc.vram_start));
Alex Deucher62444b72012-08-15 17:18:42 -04001599 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
Alex Deucher18007402010-11-22 17:56:28 -05001600 upper_32_bits(rdev->mc.vram_start));
Alex Deucher62444b72012-08-15 17:18:42 -04001601 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
Alex Deucher18007402010-11-22 17:56:28 -05001602 (u32)rdev->mc.vram_start);
Alex Deucher62444b72012-08-15 17:18:42 -04001603 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
Alex Deucher18007402010-11-22 17:56:28 -05001604 (u32)rdev->mc.vram_start);
Alex Deucherb7eff392011-07-08 11:44:56 -04001605 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001606 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
1607 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
Alex Deucher62444b72012-08-15 17:18:42 -04001608
1609 /* unblackout the MC */
1610 tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
1611 tmp &= ~BLACKOUT_MODE_MASK;
1612 WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
1613 /* allow CPU access */
1614 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
1615
1616 for (i = 0; i < rdev->num_crtc; i++) {
Alex Deucher695ddeb2012-11-05 16:34:58 +00001617 if (save->crtc_enabled[i]) {
Alex Deucher62444b72012-08-15 17:18:42 -04001618 if (ASIC_IS_DCE6(rdev)) {
1619 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1620 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
Christopher Staitebb5888202013-01-26 11:10:58 -05001621 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
Alex Deucher62444b72012-08-15 17:18:42 -04001622 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
Christopher Staitebb5888202013-01-26 11:10:58 -05001623 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
Alex Deucher62444b72012-08-15 17:18:42 -04001624 } else {
1625 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1626 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
Christopher Staitebb5888202013-01-26 11:10:58 -05001627 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
Alex Deucher62444b72012-08-15 17:18:42 -04001628 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
Christopher Staitebb5888202013-01-26 11:10:58 -05001629 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
Alex Deucher62444b72012-08-15 17:18:42 -04001630 }
1631 /* wait for the next frame */
1632 frame_count = radeon_get_vblank_counter(rdev, i);
1633 for (j = 0; j < rdev->usec_timeout; j++) {
1634 if (radeon_get_vblank_counter(rdev, i) != frame_count)
1635 break;
1636 udelay(1);
1637 }
1638 }
1639 }
1640 /* Unlock vga access */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001641 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
1642 mdelay(1);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001643 WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
1644}
1645
Alex Deucher755d8192011-03-02 20:07:34 -05001646void evergreen_mc_program(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001647{
1648 struct evergreen_mc_save save;
1649 u32 tmp;
1650 int i, j;
1651
1652 /* Initialize HDP */
1653 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1654 WREG32((0x2c14 + j), 0x00000000);
1655 WREG32((0x2c18 + j), 0x00000000);
1656 WREG32((0x2c1c + j), 0x00000000);
1657 WREG32((0x2c20 + j), 0x00000000);
1658 WREG32((0x2c24 + j), 0x00000000);
1659 }
1660 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1661
1662 evergreen_mc_stop(rdev, &save);
1663 if (evergreen_mc_wait_for_idle(rdev)) {
1664 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1665 }
1666 /* Lockout access through VGA aperture*/
1667 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1668 /* Update configuration */
1669 if (rdev->flags & RADEON_IS_AGP) {
1670 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1671 /* VRAM before AGP */
1672 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1673 rdev->mc.vram_start >> 12);
1674 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1675 rdev->mc.gtt_end >> 12);
1676 } else {
1677 /* VRAM after AGP */
1678 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1679 rdev->mc.gtt_start >> 12);
1680 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1681 rdev->mc.vram_end >> 12);
1682 }
1683 } else {
1684 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1685 rdev->mc.vram_start >> 12);
1686 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1687 rdev->mc.vram_end >> 12);
1688 }
Alex Deucher3b9832f2011-11-10 08:59:39 -05001689 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
Alex Deucher05b3ef62012-03-20 17:18:37 -04001690 /* llano/ontario only */
1691 if ((rdev->family == CHIP_PALM) ||
1692 (rdev->family == CHIP_SUMO) ||
1693 (rdev->family == CHIP_SUMO2)) {
Alex Deucherb4183e32010-12-15 11:04:10 -05001694 tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
1695 tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
1696 tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
1697 WREG32(MC_FUS_VM_FB_OFFSET, tmp);
1698 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001699 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1700 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1701 WREG32(MC_VM_FB_LOCATION, tmp);
1702 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
Alex Deucherc46cb4d2011-01-06 19:12:37 -05001703 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
Jerome Glisse46fcd2b2010-06-03 19:34:48 +02001704 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001705 if (rdev->flags & RADEON_IS_AGP) {
1706 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
1707 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
1708 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1709 } else {
1710 WREG32(MC_VM_AGP_BASE, 0);
1711 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1712 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1713 }
1714 if (evergreen_mc_wait_for_idle(rdev)) {
1715 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1716 }
1717 evergreen_mc_resume(rdev, &save);
1718 /* we need to own VRAM, so turn off the VGA renderer here
1719 * to stop it overwriting our objects */
1720 rv515_vga_render_disable(rdev);
1721}
1722
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001723/*
1724 * CP.
1725 */
Alex Deucher12920592011-02-02 12:37:40 -05001726void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1727{
Christian König876dc9f2012-05-08 14:24:01 +02001728 struct radeon_ring *ring = &rdev->ring[ib->ring];
Alex Deucher89d35802012-07-17 14:02:31 -04001729 u32 next_rptr;
Christian König7b1f2482011-09-23 15:11:23 +02001730
Alex Deucher12920592011-02-02 12:37:40 -05001731 /* set to DX10/11 mode */
Christian Könige32eb502011-10-23 12:56:27 +02001732 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
1733 radeon_ring_write(ring, 1);
Christian König45df6802012-07-06 16:22:55 +02001734
1735 if (ring->rptr_save_reg) {
Alex Deucher89d35802012-07-17 14:02:31 -04001736 next_rptr = ring->wptr + 3 + 4;
Christian König45df6802012-07-06 16:22:55 +02001737 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1738 radeon_ring_write(ring, ((ring->rptr_save_reg -
1739 PACKET3_SET_CONFIG_REG_START) >> 2));
1740 radeon_ring_write(ring, next_rptr);
Alex Deucher89d35802012-07-17 14:02:31 -04001741 } else if (rdev->wb.enabled) {
1742 next_rptr = ring->wptr + 5 + 4;
1743 radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
1744 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
1745 radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
1746 radeon_ring_write(ring, next_rptr);
1747 radeon_ring_write(ring, 0);
Christian König45df6802012-07-06 16:22:55 +02001748 }
1749
Christian Könige32eb502011-10-23 12:56:27 +02001750 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1751 radeon_ring_write(ring,
Alex Deucher0f234f5f2011-02-13 19:06:33 -05001752#ifdef __BIG_ENDIAN
1753 (2 << 0) |
1754#endif
1755 (ib->gpu_addr & 0xFFFFFFFC));
Christian Könige32eb502011-10-23 12:56:27 +02001756 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
1757 radeon_ring_write(ring, ib->length_dw);
Alex Deucher12920592011-02-02 12:37:40 -05001758}
1759
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001760
1761static int evergreen_cp_load_microcode(struct radeon_device *rdev)
1762{
Alex Deucherfe251e22010-03-24 13:36:43 -04001763 const __be32 *fw_data;
1764 int i;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001765
Alex Deucherfe251e22010-03-24 13:36:43 -04001766 if (!rdev->me_fw || !rdev->pfp_fw)
1767 return -EINVAL;
1768
1769 r700_cp_stop(rdev);
Alex Deucher0f234f5f2011-02-13 19:06:33 -05001770 WREG32(CP_RB_CNTL,
1771#ifdef __BIG_ENDIAN
1772 BUF_SWAP_32BIT |
1773#endif
1774 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
Alex Deucherfe251e22010-03-24 13:36:43 -04001775
1776 fw_data = (const __be32 *)rdev->pfp_fw->data;
1777 WREG32(CP_PFP_UCODE_ADDR, 0);
1778 for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
1779 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
1780 WREG32(CP_PFP_UCODE_ADDR, 0);
1781
1782 fw_data = (const __be32 *)rdev->me_fw->data;
1783 WREG32(CP_ME_RAM_WADDR, 0);
1784 for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
1785 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
1786
1787 WREG32(CP_PFP_UCODE_ADDR, 0);
1788 WREG32(CP_ME_RAM_WADDR, 0);
1789 WREG32(CP_ME_RAM_RADDR, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001790 return 0;
1791}
1792
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001793static int evergreen_cp_start(struct radeon_device *rdev)
1794{
Christian Könige32eb502011-10-23 12:56:27 +02001795 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucher2281a372010-10-21 13:31:38 -04001796 int r, i;
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001797 uint32_t cp_me;
1798
Christian Könige32eb502011-10-23 12:56:27 +02001799 r = radeon_ring_lock(rdev, ring, 7);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001800 if (r) {
1801 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1802 return r;
1803 }
Christian Könige32eb502011-10-23 12:56:27 +02001804 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
1805 radeon_ring_write(ring, 0x1);
1806 radeon_ring_write(ring, 0x0);
1807 radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
1808 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1809 radeon_ring_write(ring, 0);
1810 radeon_ring_write(ring, 0);
1811 radeon_ring_unlock_commit(rdev, ring);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001812
1813 cp_me = 0xff;
1814 WREG32(CP_ME_CNTL, cp_me);
1815
Christian Könige32eb502011-10-23 12:56:27 +02001816 r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001817 if (r) {
1818 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1819 return r;
1820 }
Alex Deucher2281a372010-10-21 13:31:38 -04001821
1822 /* setup clear context state */
Christian Könige32eb502011-10-23 12:56:27 +02001823 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1824 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
Alex Deucher2281a372010-10-21 13:31:38 -04001825
1826 for (i = 0; i < evergreen_default_size; i++)
Christian Könige32eb502011-10-23 12:56:27 +02001827 radeon_ring_write(ring, evergreen_default_state[i]);
Alex Deucher2281a372010-10-21 13:31:38 -04001828
Christian Könige32eb502011-10-23 12:56:27 +02001829 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1830 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
Alex Deucher2281a372010-10-21 13:31:38 -04001831
1832 /* set clear context state */
Christian Könige32eb502011-10-23 12:56:27 +02001833 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1834 radeon_ring_write(ring, 0);
Alex Deucher2281a372010-10-21 13:31:38 -04001835
1836 /* SQ_VTX_BASE_VTX_LOC */
Christian Könige32eb502011-10-23 12:56:27 +02001837 radeon_ring_write(ring, 0xc0026f00);
1838 radeon_ring_write(ring, 0x00000000);
1839 radeon_ring_write(ring, 0x00000000);
1840 radeon_ring_write(ring, 0x00000000);
Alex Deucher2281a372010-10-21 13:31:38 -04001841
1842 /* Clear consts */
Christian Könige32eb502011-10-23 12:56:27 +02001843 radeon_ring_write(ring, 0xc0036f00);
1844 radeon_ring_write(ring, 0x00000bc4);
1845 radeon_ring_write(ring, 0xffffffff);
1846 radeon_ring_write(ring, 0xffffffff);
1847 radeon_ring_write(ring, 0xffffffff);
Alex Deucher2281a372010-10-21 13:31:38 -04001848
Christian Könige32eb502011-10-23 12:56:27 +02001849 radeon_ring_write(ring, 0xc0026900);
1850 radeon_ring_write(ring, 0x00000316);
1851 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1852 radeon_ring_write(ring, 0x00000010); /* */
Alex Deucher18ff84d2011-02-02 12:37:41 -05001853
Christian Könige32eb502011-10-23 12:56:27 +02001854 radeon_ring_unlock_commit(rdev, ring);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001855
1856 return 0;
1857}
1858
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001859static int evergreen_cp_resume(struct radeon_device *rdev)
Alex Deucherfe251e22010-03-24 13:36:43 -04001860{
Christian Könige32eb502011-10-23 12:56:27 +02001861 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucherfe251e22010-03-24 13:36:43 -04001862 u32 tmp;
1863 u32 rb_bufsz;
1864 int r;
1865
1866 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1867 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1868 SOFT_RESET_PA |
1869 SOFT_RESET_SH |
1870 SOFT_RESET_VGT |
Jerome Glissea49a50d2011-08-24 20:00:17 +00001871 SOFT_RESET_SPI |
Alex Deucherfe251e22010-03-24 13:36:43 -04001872 SOFT_RESET_SX));
1873 RREG32(GRBM_SOFT_RESET);
1874 mdelay(15);
1875 WREG32(GRBM_SOFT_RESET, 0);
1876 RREG32(GRBM_SOFT_RESET);
1877
1878 /* Set ring buffer size */
Christian Könige32eb502011-10-23 12:56:27 +02001879 rb_bufsz = drm_order(ring->ring_size / 8);
Alex Deucher724c80e2010-08-27 18:25:25 -04001880 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
Alex Deucherfe251e22010-03-24 13:36:43 -04001881#ifdef __BIG_ENDIAN
1882 tmp |= BUF_SWAP_32BIT;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001883#endif
Alex Deucherfe251e22010-03-24 13:36:43 -04001884 WREG32(CP_RB_CNTL, tmp);
Christian König15d33322011-09-15 19:02:22 +02001885 WREG32(CP_SEM_WAIT_TIMER, 0x0);
Alex Deucher11ef3f1f2012-01-20 14:47:43 -05001886 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
Alex Deucherfe251e22010-03-24 13:36:43 -04001887
1888 /* Set the write pointer delay */
1889 WREG32(CP_RB_WPTR_DELAY, 0);
1890
1891 /* Initialize the ring buffer's read and write pointers */
1892 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1893 WREG32(CP_RB_RPTR_WR, 0);
Christian Könige32eb502011-10-23 12:56:27 +02001894 ring->wptr = 0;
1895 WREG32(CP_RB_WPTR, ring->wptr);
Alex Deucher724c80e2010-08-27 18:25:25 -04001896
Adam Buchbinder48fc7f72012-09-19 21:48:00 -04001897 /* set the wb address whether it's enabled or not */
Alex Deucher0f234f5f2011-02-13 19:06:33 -05001898 WREG32(CP_RB_RPTR_ADDR,
Alex Deucher0f234f5f2011-02-13 19:06:33 -05001899 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
Alex Deucher724c80e2010-08-27 18:25:25 -04001900 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1901 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1902
1903 if (rdev->wb.enabled)
1904 WREG32(SCRATCH_UMSK, 0xff);
1905 else {
1906 tmp |= RB_NO_UPDATE;
1907 WREG32(SCRATCH_UMSK, 0);
1908 }
1909
Alex Deucherfe251e22010-03-24 13:36:43 -04001910 mdelay(1);
1911 WREG32(CP_RB_CNTL, tmp);
1912
Christian Könige32eb502011-10-23 12:56:27 +02001913 WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
Alex Deucherfe251e22010-03-24 13:36:43 -04001914 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1915
Christian Könige32eb502011-10-23 12:56:27 +02001916 ring->rptr = RREG32(CP_RB_RPTR);
Alex Deucherfe251e22010-03-24 13:36:43 -04001917
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001918 evergreen_cp_start(rdev);
Christian Könige32eb502011-10-23 12:56:27 +02001919 ring->ready = true;
Alex Deucherf7128122012-02-23 17:53:45 -05001920 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
Alex Deucherfe251e22010-03-24 13:36:43 -04001921 if (r) {
Christian Könige32eb502011-10-23 12:56:27 +02001922 ring->ready = false;
Alex Deucherfe251e22010-03-24 13:36:43 -04001923 return r;
1924 }
1925 return 0;
1926}
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001927
1928/*
1929 * Core functions
1930 */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001931static void evergreen_gpu_init(struct radeon_device *rdev)
1932{
Alex Deucher416a2bd2012-05-31 19:00:25 -04001933 u32 gb_addr_config;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001934 u32 mc_shared_chmap, mc_arb_ramcfg;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001935 u32 sx_debug_1;
1936 u32 smx_dc_ctl0;
1937 u32 sq_config;
1938 u32 sq_lds_resource_mgmt;
1939 u32 sq_gpr_resource_mgmt_1;
1940 u32 sq_gpr_resource_mgmt_2;
1941 u32 sq_gpr_resource_mgmt_3;
1942 u32 sq_thread_resource_mgmt;
1943 u32 sq_thread_resource_mgmt_2;
1944 u32 sq_stack_resource_mgmt_1;
1945 u32 sq_stack_resource_mgmt_2;
1946 u32 sq_stack_resource_mgmt_3;
1947 u32 vgt_cache_invalidation;
Alex Deucherf25a5c62011-05-19 11:07:57 -04001948 u32 hdp_host_path_cntl, tmp;
Alex Deucher416a2bd2012-05-31 19:00:25 -04001949 u32 disabled_rb_mask;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001950 int i, j, num_shader_engines, ps_thread_count;
1951
1952 switch (rdev->family) {
1953 case CHIP_CYPRESS:
1954 case CHIP_HEMLOCK:
1955 rdev->config.evergreen.num_ses = 2;
1956 rdev->config.evergreen.max_pipes = 4;
1957 rdev->config.evergreen.max_tile_pipes = 8;
1958 rdev->config.evergreen.max_simds = 10;
1959 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1960 rdev->config.evergreen.max_gprs = 256;
1961 rdev->config.evergreen.max_threads = 248;
1962 rdev->config.evergreen.max_gs_threads = 32;
1963 rdev->config.evergreen.max_stack_entries = 512;
1964 rdev->config.evergreen.sx_num_of_sets = 4;
1965 rdev->config.evergreen.sx_max_export_size = 256;
1966 rdev->config.evergreen.sx_max_export_pos_size = 64;
1967 rdev->config.evergreen.sx_max_export_smx_size = 192;
1968 rdev->config.evergreen.max_hw_contexts = 8;
1969 rdev->config.evergreen.sq_num_cf_insts = 2;
1970
1971 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1972 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1973 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04001974 gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001975 break;
1976 case CHIP_JUNIPER:
1977 rdev->config.evergreen.num_ses = 1;
1978 rdev->config.evergreen.max_pipes = 4;
1979 rdev->config.evergreen.max_tile_pipes = 4;
1980 rdev->config.evergreen.max_simds = 10;
1981 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1982 rdev->config.evergreen.max_gprs = 256;
1983 rdev->config.evergreen.max_threads = 248;
1984 rdev->config.evergreen.max_gs_threads = 32;
1985 rdev->config.evergreen.max_stack_entries = 512;
1986 rdev->config.evergreen.sx_num_of_sets = 4;
1987 rdev->config.evergreen.sx_max_export_size = 256;
1988 rdev->config.evergreen.sx_max_export_pos_size = 64;
1989 rdev->config.evergreen.sx_max_export_smx_size = 192;
1990 rdev->config.evergreen.max_hw_contexts = 8;
1991 rdev->config.evergreen.sq_num_cf_insts = 2;
1992
1993 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1994 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1995 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04001996 gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001997 break;
1998 case CHIP_REDWOOD:
1999 rdev->config.evergreen.num_ses = 1;
2000 rdev->config.evergreen.max_pipes = 4;
2001 rdev->config.evergreen.max_tile_pipes = 4;
2002 rdev->config.evergreen.max_simds = 5;
2003 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
2004 rdev->config.evergreen.max_gprs = 256;
2005 rdev->config.evergreen.max_threads = 248;
2006 rdev->config.evergreen.max_gs_threads = 32;
2007 rdev->config.evergreen.max_stack_entries = 256;
2008 rdev->config.evergreen.sx_num_of_sets = 4;
2009 rdev->config.evergreen.sx_max_export_size = 256;
2010 rdev->config.evergreen.sx_max_export_pos_size = 64;
2011 rdev->config.evergreen.sx_max_export_smx_size = 192;
2012 rdev->config.evergreen.max_hw_contexts = 8;
2013 rdev->config.evergreen.sq_num_cf_insts = 2;
2014
2015 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
2016 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2017 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04002018 gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002019 break;
2020 case CHIP_CEDAR:
2021 default:
2022 rdev->config.evergreen.num_ses = 1;
2023 rdev->config.evergreen.max_pipes = 2;
2024 rdev->config.evergreen.max_tile_pipes = 2;
2025 rdev->config.evergreen.max_simds = 2;
2026 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
2027 rdev->config.evergreen.max_gprs = 256;
2028 rdev->config.evergreen.max_threads = 192;
2029 rdev->config.evergreen.max_gs_threads = 16;
2030 rdev->config.evergreen.max_stack_entries = 256;
2031 rdev->config.evergreen.sx_num_of_sets = 4;
2032 rdev->config.evergreen.sx_max_export_size = 128;
2033 rdev->config.evergreen.sx_max_export_pos_size = 32;
2034 rdev->config.evergreen.sx_max_export_smx_size = 96;
2035 rdev->config.evergreen.max_hw_contexts = 4;
2036 rdev->config.evergreen.sq_num_cf_insts = 1;
2037
2038 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
2039 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2040 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04002041 gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002042 break;
Alex Deucherd5e455e2010-11-22 17:56:29 -05002043 case CHIP_PALM:
2044 rdev->config.evergreen.num_ses = 1;
2045 rdev->config.evergreen.max_pipes = 2;
2046 rdev->config.evergreen.max_tile_pipes = 2;
2047 rdev->config.evergreen.max_simds = 2;
2048 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
2049 rdev->config.evergreen.max_gprs = 256;
2050 rdev->config.evergreen.max_threads = 192;
2051 rdev->config.evergreen.max_gs_threads = 16;
2052 rdev->config.evergreen.max_stack_entries = 256;
2053 rdev->config.evergreen.sx_num_of_sets = 4;
2054 rdev->config.evergreen.sx_max_export_size = 128;
2055 rdev->config.evergreen.sx_max_export_pos_size = 32;
2056 rdev->config.evergreen.sx_max_export_smx_size = 96;
2057 rdev->config.evergreen.max_hw_contexts = 4;
2058 rdev->config.evergreen.sq_num_cf_insts = 1;
2059
2060 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
2061 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2062 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04002063 gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
Alex Deucherd5e455e2010-11-22 17:56:29 -05002064 break;
Alex Deucherd5c5a722011-05-31 15:42:48 -04002065 case CHIP_SUMO:
2066 rdev->config.evergreen.num_ses = 1;
2067 rdev->config.evergreen.max_pipes = 4;
Jerome Glissebd25f072012-12-11 11:56:52 -05002068 rdev->config.evergreen.max_tile_pipes = 4;
Alex Deucherd5c5a722011-05-31 15:42:48 -04002069 if (rdev->pdev->device == 0x9648)
2070 rdev->config.evergreen.max_simds = 3;
2071 else if ((rdev->pdev->device == 0x9647) ||
2072 (rdev->pdev->device == 0x964a))
2073 rdev->config.evergreen.max_simds = 4;
2074 else
2075 rdev->config.evergreen.max_simds = 5;
2076 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
2077 rdev->config.evergreen.max_gprs = 256;
2078 rdev->config.evergreen.max_threads = 248;
2079 rdev->config.evergreen.max_gs_threads = 32;
2080 rdev->config.evergreen.max_stack_entries = 256;
2081 rdev->config.evergreen.sx_num_of_sets = 4;
2082 rdev->config.evergreen.sx_max_export_size = 256;
2083 rdev->config.evergreen.sx_max_export_pos_size = 64;
2084 rdev->config.evergreen.sx_max_export_smx_size = 192;
2085 rdev->config.evergreen.max_hw_contexts = 8;
2086 rdev->config.evergreen.sq_num_cf_insts = 2;
2087
2088 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
2089 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2090 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Jerome Glissebd25f072012-12-11 11:56:52 -05002091 gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN;
Alex Deucherd5c5a722011-05-31 15:42:48 -04002092 break;
2093 case CHIP_SUMO2:
2094 rdev->config.evergreen.num_ses = 1;
2095 rdev->config.evergreen.max_pipes = 4;
2096 rdev->config.evergreen.max_tile_pipes = 4;
2097 rdev->config.evergreen.max_simds = 2;
2098 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
2099 rdev->config.evergreen.max_gprs = 256;
2100 rdev->config.evergreen.max_threads = 248;
2101 rdev->config.evergreen.max_gs_threads = 32;
2102 rdev->config.evergreen.max_stack_entries = 512;
2103 rdev->config.evergreen.sx_num_of_sets = 4;
2104 rdev->config.evergreen.sx_max_export_size = 256;
2105 rdev->config.evergreen.sx_max_export_pos_size = 64;
2106 rdev->config.evergreen.sx_max_export_smx_size = 192;
2107 rdev->config.evergreen.max_hw_contexts = 8;
2108 rdev->config.evergreen.sq_num_cf_insts = 2;
2109
2110 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
2111 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2112 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Jerome Glissebd25f072012-12-11 11:56:52 -05002113 gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN;
Alex Deucherd5c5a722011-05-31 15:42:48 -04002114 break;
Alex Deucheradb68fa2011-01-06 21:19:24 -05002115 case CHIP_BARTS:
2116 rdev->config.evergreen.num_ses = 2;
2117 rdev->config.evergreen.max_pipes = 4;
2118 rdev->config.evergreen.max_tile_pipes = 8;
2119 rdev->config.evergreen.max_simds = 7;
2120 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
2121 rdev->config.evergreen.max_gprs = 256;
2122 rdev->config.evergreen.max_threads = 248;
2123 rdev->config.evergreen.max_gs_threads = 32;
2124 rdev->config.evergreen.max_stack_entries = 512;
2125 rdev->config.evergreen.sx_num_of_sets = 4;
2126 rdev->config.evergreen.sx_max_export_size = 256;
2127 rdev->config.evergreen.sx_max_export_pos_size = 64;
2128 rdev->config.evergreen.sx_max_export_smx_size = 192;
2129 rdev->config.evergreen.max_hw_contexts = 8;
2130 rdev->config.evergreen.sq_num_cf_insts = 2;
2131
2132 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
2133 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2134 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04002135 gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
Alex Deucheradb68fa2011-01-06 21:19:24 -05002136 break;
2137 case CHIP_TURKS:
2138 rdev->config.evergreen.num_ses = 1;
2139 rdev->config.evergreen.max_pipes = 4;
2140 rdev->config.evergreen.max_tile_pipes = 4;
2141 rdev->config.evergreen.max_simds = 6;
2142 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
2143 rdev->config.evergreen.max_gprs = 256;
2144 rdev->config.evergreen.max_threads = 248;
2145 rdev->config.evergreen.max_gs_threads = 32;
2146 rdev->config.evergreen.max_stack_entries = 256;
2147 rdev->config.evergreen.sx_num_of_sets = 4;
2148 rdev->config.evergreen.sx_max_export_size = 256;
2149 rdev->config.evergreen.sx_max_export_pos_size = 64;
2150 rdev->config.evergreen.sx_max_export_smx_size = 192;
2151 rdev->config.evergreen.max_hw_contexts = 8;
2152 rdev->config.evergreen.sq_num_cf_insts = 2;
2153
2154 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
2155 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2156 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04002157 gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
Alex Deucheradb68fa2011-01-06 21:19:24 -05002158 break;
2159 case CHIP_CAICOS:
2160 rdev->config.evergreen.num_ses = 1;
Jerome Glissebd25f072012-12-11 11:56:52 -05002161 rdev->config.evergreen.max_pipes = 2;
Alex Deucheradb68fa2011-01-06 21:19:24 -05002162 rdev->config.evergreen.max_tile_pipes = 2;
2163 rdev->config.evergreen.max_simds = 2;
2164 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
2165 rdev->config.evergreen.max_gprs = 256;
2166 rdev->config.evergreen.max_threads = 192;
2167 rdev->config.evergreen.max_gs_threads = 16;
2168 rdev->config.evergreen.max_stack_entries = 256;
2169 rdev->config.evergreen.sx_num_of_sets = 4;
2170 rdev->config.evergreen.sx_max_export_size = 128;
2171 rdev->config.evergreen.sx_max_export_pos_size = 32;
2172 rdev->config.evergreen.sx_max_export_smx_size = 96;
2173 rdev->config.evergreen.max_hw_contexts = 4;
2174 rdev->config.evergreen.sq_num_cf_insts = 1;
2175
2176 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
2177 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2178 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04002179 gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
Alex Deucheradb68fa2011-01-06 21:19:24 -05002180 break;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002181 }
2182
2183 /* Initialize HDP */
2184 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
2185 WREG32((0x2c14 + j), 0x00000000);
2186 WREG32((0x2c18 + j), 0x00000000);
2187 WREG32((0x2c1c + j), 0x00000000);
2188 WREG32((0x2c20 + j), 0x00000000);
2189 WREG32((0x2c24 + j), 0x00000000);
2190 }
2191
2192 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
2193
Alex Deucherd054ac12011-09-01 17:46:15 +00002194 evergreen_fix_pci_max_read_req_size(rdev);
2195
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002196 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
Alex Deucher05b3ef62012-03-20 17:18:37 -04002197 if ((rdev->family == CHIP_PALM) ||
2198 (rdev->family == CHIP_SUMO) ||
2199 (rdev->family == CHIP_SUMO2))
Alex Deucherd9282fc2011-05-11 03:15:24 -04002200 mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
2201 else
2202 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002203
Alex Deucher1aa52bd2010-11-17 12:11:03 -05002204 /* setup tiling info dword. gb_addr_config is not adequate since it does
2205 * not have bank info, so create a custom tiling dword.
2206 * bits 3:0 num_pipes
2207 * bits 7:4 num_banks
2208 * bits 11:8 group_size
2209 * bits 15:12 row_size
2210 */
2211 rdev->config.evergreen.tile_config = 0;
2212 switch (rdev->config.evergreen.max_tile_pipes) {
2213 case 1:
2214 default:
2215 rdev->config.evergreen.tile_config |= (0 << 0);
2216 break;
2217 case 2:
2218 rdev->config.evergreen.tile_config |= (1 << 0);
2219 break;
2220 case 4:
2221 rdev->config.evergreen.tile_config |= (2 << 0);
2222 break;
2223 case 8:
2224 rdev->config.evergreen.tile_config |= (3 << 0);
2225 break;
2226 }
Alex Deucherd698a342011-06-23 00:49:29 -04002227 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
Alex Deucher5bfa4872011-05-20 12:35:22 -04002228 if (rdev->flags & RADEON_IS_IGP)
Alex Deucherd698a342011-06-23 00:49:29 -04002229 rdev->config.evergreen.tile_config |= 1 << 4;
Alex Deucher29d65402012-05-31 18:53:36 -04002230 else {
Alex Deucherc8d15ed2012-07-31 11:01:10 -04002231 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
2232 case 0: /* four banks */
Alex Deucher29d65402012-05-31 18:53:36 -04002233 rdev->config.evergreen.tile_config |= 0 << 4;
Alex Deucherc8d15ed2012-07-31 11:01:10 -04002234 break;
2235 case 1: /* eight banks */
2236 rdev->config.evergreen.tile_config |= 1 << 4;
2237 break;
2238 case 2: /* sixteen banks */
2239 default:
2240 rdev->config.evergreen.tile_config |= 2 << 4;
2241 break;
2242 }
Alex Deucher29d65402012-05-31 18:53:36 -04002243 }
Alex Deucher416a2bd2012-05-31 19:00:25 -04002244 rdev->config.evergreen.tile_config |= 0 << 8;
Alex Deucher1aa52bd2010-11-17 12:11:03 -05002245 rdev->config.evergreen.tile_config |=
2246 ((gb_addr_config & 0x30000000) >> 28) << 12;
2247
Alex Deucher416a2bd2012-05-31 19:00:25 -04002248 num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
2249
2250 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
2251 u32 efuse_straps_4;
2252 u32 efuse_straps_3;
2253
2254 WREG32(RCU_IND_INDEX, 0x204);
2255 efuse_straps_4 = RREG32(RCU_IND_DATA);
2256 WREG32(RCU_IND_INDEX, 0x203);
2257 efuse_straps_3 = RREG32(RCU_IND_DATA);
2258 tmp = (((efuse_straps_4 & 0xf) << 4) |
2259 ((efuse_straps_3 & 0xf0000000) >> 28));
2260 } else {
2261 tmp = 0;
2262 for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
2263 u32 rb_disable_bitmap;
2264
2265 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
2266 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
2267 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
2268 tmp <<= 4;
2269 tmp |= rb_disable_bitmap;
2270 }
2271 }
2272 /* enabled rb are just the one not disabled :) */
2273 disabled_rb_mask = tmp;
Alex Deuchercedb6552013-04-09 10:13:22 -04002274 tmp = 0;
2275 for (i = 0; i < rdev->config.evergreen.max_backends; i++)
2276 tmp |= (1 << i);
2277 /* if all the backends are disabled, fix it up here */
2278 if ((disabled_rb_mask & tmp) == tmp) {
2279 for (i = 0; i < rdev->config.evergreen.max_backends; i++)
2280 disabled_rb_mask &= ~(1 << i);
2281 }
Alex Deucher416a2bd2012-05-31 19:00:25 -04002282
2283 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
2284 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
2285
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002286 WREG32(GB_ADDR_CONFIG, gb_addr_config);
2287 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
2288 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
Alex Deucher233d1ad2012-12-04 15:25:59 -05002289 WREG32(DMA_TILING_CONFIG, gb_addr_config);
Christian König9a210592013-04-08 12:41:37 +02002290 WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
2291 WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
2292 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002293
Alex Deucherf7eb9732013-01-30 13:57:40 -05002294 if ((rdev->config.evergreen.max_backends == 1) &&
2295 (rdev->flags & RADEON_IS_IGP)) {
2296 if ((disabled_rb_mask & 3) == 1) {
2297 /* RB0 disabled, RB1 enabled */
2298 tmp = 0x11111111;
2299 } else {
2300 /* RB1 disabled, RB0 enabled */
2301 tmp = 0x00000000;
2302 }
2303 } else {
2304 tmp = gb_addr_config & NUM_PIPES_MASK;
2305 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
2306 EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
2307 }
Alex Deucher416a2bd2012-05-31 19:00:25 -04002308 WREG32(GB_BACKEND_MAP, tmp);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002309
2310 WREG32(CGTS_SYS_TCC_DISABLE, 0);
2311 WREG32(CGTS_TCC_DISABLE, 0);
2312 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
2313 WREG32(CGTS_USER_TCC_DISABLE, 0);
2314
2315 /* set HW defaults for 3D engine */
2316 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
2317 ROQ_IB2_START(0x2b)));
2318
2319 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
2320
2321 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
2322 SYNC_GRADIENT |
2323 SYNC_WALKER |
2324 SYNC_ALIGNER));
2325
2326 sx_debug_1 = RREG32(SX_DEBUG_1);
2327 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
2328 WREG32(SX_DEBUG_1, sx_debug_1);
2329
2330
2331 smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
2332 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
2333 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
2334 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
2335
Alex Deucherb866d132012-06-14 22:06:36 +02002336 if (rdev->family <= CHIP_SUMO2)
2337 WREG32(SMX_SAR_CTL0, 0x00010000);
2338
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002339 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
2340 POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
2341 SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
2342
2343 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
2344 SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
2345 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
2346
2347 WREG32(VGT_NUM_INSTANCES, 1);
2348 WREG32(SPI_CONFIG_CNTL, 0);
2349 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
2350 WREG32(CP_PERFMON_CNTL, 0);
2351
2352 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
2353 FETCH_FIFO_HIWATER(0x4) |
2354 DONE_FIFO_HIWATER(0xe0) |
2355 ALU_UPDATE_FIFO_HIWATER(0x8)));
2356
2357 sq_config = RREG32(SQ_CONFIG);
2358 sq_config &= ~(PS_PRIO(3) |
2359 VS_PRIO(3) |
2360 GS_PRIO(3) |
2361 ES_PRIO(3));
2362 sq_config |= (VC_ENABLE |
2363 EXPORT_SRC_C |
2364 PS_PRIO(0) |
2365 VS_PRIO(1) |
2366 GS_PRIO(2) |
2367 ES_PRIO(3));
2368
Alex Deucherd5e455e2010-11-22 17:56:29 -05002369 switch (rdev->family) {
2370 case CHIP_CEDAR:
2371 case CHIP_PALM:
Alex Deucherd5c5a722011-05-31 15:42:48 -04002372 case CHIP_SUMO:
2373 case CHIP_SUMO2:
Alex Deucheradb68fa2011-01-06 21:19:24 -05002374 case CHIP_CAICOS:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002375 /* no vertex cache */
2376 sq_config &= ~VC_ENABLE;
Alex Deucherd5e455e2010-11-22 17:56:29 -05002377 break;
2378 default:
2379 break;
2380 }
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002381
2382 sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
2383
2384 sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
2385 sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
2386 sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
2387 sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
2388 sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
2389 sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
2390 sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
2391
Alex Deucherd5e455e2010-11-22 17:56:29 -05002392 switch (rdev->family) {
2393 case CHIP_CEDAR:
2394 case CHIP_PALM:
Alex Deucherd5c5a722011-05-31 15:42:48 -04002395 case CHIP_SUMO:
2396 case CHIP_SUMO2:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002397 ps_thread_count = 96;
Alex Deucherd5e455e2010-11-22 17:56:29 -05002398 break;
2399 default:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002400 ps_thread_count = 128;
Alex Deucherd5e455e2010-11-22 17:56:29 -05002401 break;
2402 }
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002403
2404 sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
Alex Deucherf96b35c2010-06-16 12:24:07 -04002405 sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2406 sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2407 sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2408 sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2409 sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002410
2411 sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2412 sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2413 sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2414 sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2415 sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2416 sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2417
2418 WREG32(SQ_CONFIG, sq_config);
2419 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
2420 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
2421 WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
2422 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
2423 WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
2424 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
2425 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
2426 WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
2427 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
2428 WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
2429
2430 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
2431 FORCE_EOV_MAX_REZ_CNT(255)));
2432
Alex Deucherd5e455e2010-11-22 17:56:29 -05002433 switch (rdev->family) {
2434 case CHIP_CEDAR:
2435 case CHIP_PALM:
Alex Deucherd5c5a722011-05-31 15:42:48 -04002436 case CHIP_SUMO:
2437 case CHIP_SUMO2:
Alex Deucheradb68fa2011-01-06 21:19:24 -05002438 case CHIP_CAICOS:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002439 vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
Alex Deucherd5e455e2010-11-22 17:56:29 -05002440 break;
2441 default:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002442 vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
Alex Deucherd5e455e2010-11-22 17:56:29 -05002443 break;
2444 }
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002445 vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
2446 WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
2447
2448 WREG32(VGT_GS_VERTEX_REUSE, 16);
Alex Deucher12920592011-02-02 12:37:40 -05002449 WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002450 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2451
Alex Deucher60a4a3e2010-06-29 17:03:35 -04002452 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
2453 WREG32(VGT_OUT_DEALLOC_CNTL, 16);
2454
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002455 WREG32(CB_PERF_CTR0_SEL_0, 0);
2456 WREG32(CB_PERF_CTR0_SEL_1, 0);
2457 WREG32(CB_PERF_CTR1_SEL_0, 0);
2458 WREG32(CB_PERF_CTR1_SEL_1, 0);
2459 WREG32(CB_PERF_CTR2_SEL_0, 0);
2460 WREG32(CB_PERF_CTR2_SEL_1, 0);
2461 WREG32(CB_PERF_CTR3_SEL_0, 0);
2462 WREG32(CB_PERF_CTR3_SEL_1, 0);
2463
Alex Deucher60a4a3e2010-06-29 17:03:35 -04002464 /* clear render buffer base addresses */
2465 WREG32(CB_COLOR0_BASE, 0);
2466 WREG32(CB_COLOR1_BASE, 0);
2467 WREG32(CB_COLOR2_BASE, 0);
2468 WREG32(CB_COLOR3_BASE, 0);
2469 WREG32(CB_COLOR4_BASE, 0);
2470 WREG32(CB_COLOR5_BASE, 0);
2471 WREG32(CB_COLOR6_BASE, 0);
2472 WREG32(CB_COLOR7_BASE, 0);
2473 WREG32(CB_COLOR8_BASE, 0);
2474 WREG32(CB_COLOR9_BASE, 0);
2475 WREG32(CB_COLOR10_BASE, 0);
2476 WREG32(CB_COLOR11_BASE, 0);
2477
2478 /* set the shader const cache sizes to 0 */
2479 for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
2480 WREG32(i, 0);
2481 for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
2482 WREG32(i, 0);
2483
Alex Deucherf25a5c62011-05-19 11:07:57 -04002484 tmp = RREG32(HDP_MISC_CNTL);
2485 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
2486 WREG32(HDP_MISC_CNTL, tmp);
2487
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002488 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
2489 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
2490
2491 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
2492
2493 udelay(50);
2494
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002495}
2496
2497int evergreen_mc_init(struct radeon_device *rdev)
2498{
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002499 u32 tmp;
2500 int chansize, numchan;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002501
2502 /* Get VRAM informations */
2503 rdev->mc.vram_is_ddr = true;
Alex Deucher05b3ef62012-03-20 17:18:37 -04002504 if ((rdev->family == CHIP_PALM) ||
2505 (rdev->family == CHIP_SUMO) ||
2506 (rdev->family == CHIP_SUMO2))
Alex Deucher82084412011-07-01 13:18:28 -04002507 tmp = RREG32(FUS_MC_ARB_RAMCFG);
2508 else
2509 tmp = RREG32(MC_ARB_RAMCFG);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002510 if (tmp & CHANSIZE_OVERRIDE) {
2511 chansize = 16;
2512 } else if (tmp & CHANSIZE_MASK) {
2513 chansize = 64;
2514 } else {
2515 chansize = 32;
2516 }
2517 tmp = RREG32(MC_SHARED_CHMAP);
2518 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
2519 case 0:
2520 default:
2521 numchan = 1;
2522 break;
2523 case 1:
2524 numchan = 2;
2525 break;
2526 case 2:
2527 numchan = 4;
2528 break;
2529 case 3:
2530 numchan = 8;
2531 break;
2532 }
2533 rdev->mc.vram_width = numchan * chansize;
2534 /* Could aper size report 0 ? */
Jordan Crouse01d73a62010-05-27 13:40:24 -06002535 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
2536 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002537 /* Setup GPU memory space */
Alex Deucher05b3ef62012-03-20 17:18:37 -04002538 if ((rdev->family == CHIP_PALM) ||
2539 (rdev->family == CHIP_SUMO) ||
2540 (rdev->family == CHIP_SUMO2)) {
Alex Deucher6eb18f82010-11-22 17:56:27 -05002541 /* size in bytes on fusion */
2542 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
2543 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
2544 } else {
Alex Deucher05b3ef62012-03-20 17:18:37 -04002545 /* size in MB on evergreen/cayman/tn */
Alex Deucher6eb18f82010-11-22 17:56:27 -05002546 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2547 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2548 }
Jerome Glisse51e5fcd2010-02-19 14:33:54 +00002549 rdev->mc.visible_vram_size = rdev->mc.aper_size;
Alex Deucher0ef0c1f2010-11-22 17:56:26 -05002550 r700_vram_gtt_location(rdev, &rdev->mc);
Alex Deucherf47299c2010-03-16 20:54:38 -04002551 radeon_update_bandwidth_info(rdev);
2552
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002553 return 0;
2554}
Jerome Glissed594e462010-02-17 21:54:29 +00002555
Alex Deucher187e3592013-01-18 14:51:38 -05002556void evergreen_print_gpu_status_regs(struct radeon_device *rdev)
Alex Deucher747943e2010-03-24 13:26:36 -04002557{
Jerome Glisse64c56e82013-01-02 17:30:35 -05002558 dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
Alex Deucher747943e2010-03-24 13:26:36 -04002559 RREG32(GRBM_STATUS));
Jerome Glisse64c56e82013-01-02 17:30:35 -05002560 dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
Alex Deucher747943e2010-03-24 13:26:36 -04002561 RREG32(GRBM_STATUS_SE0));
Jerome Glisse64c56e82013-01-02 17:30:35 -05002562 dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
Alex Deucher747943e2010-03-24 13:26:36 -04002563 RREG32(GRBM_STATUS_SE1));
Jerome Glisse64c56e82013-01-02 17:30:35 -05002564 dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
Alex Deucher747943e2010-03-24 13:26:36 -04002565 RREG32(SRBM_STATUS));
Alex Deuchera65a4362013-01-18 18:55:54 -05002566 dev_info(rdev->dev, " SRBM_STATUS2 = 0x%08X\n",
2567 RREG32(SRBM_STATUS2));
Jerome Glisse440a7cd2012-06-27 12:25:01 -04002568 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
2569 RREG32(CP_STALLED_STAT1));
2570 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
2571 RREG32(CP_STALLED_STAT2));
2572 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
2573 RREG32(CP_BUSY_STAT));
2574 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
2575 RREG32(CP_STAT));
Alex Deucher0ecebb92013-01-03 12:40:13 -05002576 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
2577 RREG32(DMA_STATUS_REG));
Alex Deucher168757e2013-01-18 19:17:22 -05002578 if (rdev->family >= CHIP_CAYMAN) {
2579 dev_info(rdev->dev, " R_00D834_DMA_STATUS_REG = 0x%08X\n",
2580 RREG32(DMA_STATUS_REG + 0x800));
2581 }
Alex Deucher0ecebb92013-01-03 12:40:13 -05002582}
2583
Alex Deucher168757e2013-01-18 19:17:22 -05002584bool evergreen_is_display_hung(struct radeon_device *rdev)
Alex Deuchera65a4362013-01-18 18:55:54 -05002585{
2586 u32 crtc_hung = 0;
2587 u32 crtc_status[6];
2588 u32 i, j, tmp;
2589
2590 for (i = 0; i < rdev->num_crtc; i++) {
2591 if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN) {
2592 crtc_status[i] = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
2593 crtc_hung |= (1 << i);
2594 }
2595 }
2596
2597 for (j = 0; j < 10; j++) {
2598 for (i = 0; i < rdev->num_crtc; i++) {
2599 if (crtc_hung & (1 << i)) {
2600 tmp = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
2601 if (tmp != crtc_status[i])
2602 crtc_hung &= ~(1 << i);
2603 }
2604 }
2605 if (crtc_hung == 0)
2606 return false;
2607 udelay(100);
2608 }
2609
2610 return true;
2611}
2612
2613static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
2614{
2615 u32 reset_mask = 0;
2616 u32 tmp;
2617
2618 /* GRBM_STATUS */
2619 tmp = RREG32(GRBM_STATUS);
2620 if (tmp & (PA_BUSY | SC_BUSY |
2621 SH_BUSY | SX_BUSY |
2622 TA_BUSY | VGT_BUSY |
2623 DB_BUSY | CB_BUSY |
2624 SPI_BUSY | VGT_BUSY_NO_DMA))
2625 reset_mask |= RADEON_RESET_GFX;
2626
2627 if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
2628 CP_BUSY | CP_COHERENCY_BUSY))
2629 reset_mask |= RADEON_RESET_CP;
2630
2631 if (tmp & GRBM_EE_BUSY)
2632 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
2633
2634 /* DMA_STATUS_REG */
2635 tmp = RREG32(DMA_STATUS_REG);
2636 if (!(tmp & DMA_IDLE))
2637 reset_mask |= RADEON_RESET_DMA;
2638
2639 /* SRBM_STATUS2 */
2640 tmp = RREG32(SRBM_STATUS2);
2641 if (tmp & DMA_BUSY)
2642 reset_mask |= RADEON_RESET_DMA;
2643
2644 /* SRBM_STATUS */
2645 tmp = RREG32(SRBM_STATUS);
2646 if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
2647 reset_mask |= RADEON_RESET_RLC;
2648
2649 if (tmp & IH_BUSY)
2650 reset_mask |= RADEON_RESET_IH;
2651
2652 if (tmp & SEM_BUSY)
2653 reset_mask |= RADEON_RESET_SEM;
2654
2655 if (tmp & GRBM_RQ_PENDING)
2656 reset_mask |= RADEON_RESET_GRBM;
2657
2658 if (tmp & VMC_BUSY)
2659 reset_mask |= RADEON_RESET_VMC;
2660
2661 if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
2662 MCC_BUSY | MCD_BUSY))
2663 reset_mask |= RADEON_RESET_MC;
2664
2665 if (evergreen_is_display_hung(rdev))
2666 reset_mask |= RADEON_RESET_DISPLAY;
2667
2668 /* VM_L2_STATUS */
2669 tmp = RREG32(VM_L2_STATUS);
2670 if (tmp & L2_BUSY)
2671 reset_mask |= RADEON_RESET_VMC;
2672
Alex Deucherd808fc82013-02-28 10:03:08 -05002673 /* Skip MC reset as it's mostly likely not hung, just busy */
2674 if (reset_mask & RADEON_RESET_MC) {
2675 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
2676 reset_mask &= ~RADEON_RESET_MC;
2677 }
2678
Alex Deuchera65a4362013-01-18 18:55:54 -05002679 return reset_mask;
2680}
2681
2682static void evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
Alex Deucher0ecebb92013-01-03 12:40:13 -05002683{
2684 struct evergreen_mc_save save;
Alex Deucherb7630472013-01-18 14:28:41 -05002685 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
2686 u32 tmp;
Alex Deucher19fc42e2013-01-14 11:04:39 -05002687
Alex Deucher0ecebb92013-01-03 12:40:13 -05002688 if (reset_mask == 0)
Alex Deuchera65a4362013-01-18 18:55:54 -05002689 return;
Alex Deucher0ecebb92013-01-03 12:40:13 -05002690
2691 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
2692
Alex Deucherb7630472013-01-18 14:28:41 -05002693 evergreen_print_gpu_status_regs(rdev);
2694
Alex Deucherb7630472013-01-18 14:28:41 -05002695 /* Disable CP parsing/prefetching */
2696 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
2697
2698 if (reset_mask & RADEON_RESET_DMA) {
2699 /* Disable DMA */
2700 tmp = RREG32(DMA_RB_CNTL);
2701 tmp &= ~DMA_RB_ENABLE;
2702 WREG32(DMA_RB_CNTL, tmp);
2703 }
2704
Alex Deucherb21b6e72013-01-23 18:57:56 -05002705 udelay(50);
2706
2707 evergreen_mc_stop(rdev, &save);
2708 if (evergreen_mc_wait_for_idle(rdev)) {
2709 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2710 }
2711
Alex Deucherb7630472013-01-18 14:28:41 -05002712 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
2713 grbm_soft_reset |= SOFT_RESET_DB |
2714 SOFT_RESET_CB |
2715 SOFT_RESET_PA |
2716 SOFT_RESET_SC |
2717 SOFT_RESET_SPI |
2718 SOFT_RESET_SX |
2719 SOFT_RESET_SH |
2720 SOFT_RESET_TC |
2721 SOFT_RESET_TA |
2722 SOFT_RESET_VC |
2723 SOFT_RESET_VGT;
2724 }
2725
2726 if (reset_mask & RADEON_RESET_CP) {
2727 grbm_soft_reset |= SOFT_RESET_CP |
2728 SOFT_RESET_VGT;
2729
2730 srbm_soft_reset |= SOFT_RESET_GRBM;
2731 }
Alex Deucher0ecebb92013-01-03 12:40:13 -05002732
2733 if (reset_mask & RADEON_RESET_DMA)
Alex Deucherb7630472013-01-18 14:28:41 -05002734 srbm_soft_reset |= SOFT_RESET_DMA;
2735
Alex Deuchera65a4362013-01-18 18:55:54 -05002736 if (reset_mask & RADEON_RESET_DISPLAY)
2737 srbm_soft_reset |= SOFT_RESET_DC;
2738
2739 if (reset_mask & RADEON_RESET_RLC)
2740 srbm_soft_reset |= SOFT_RESET_RLC;
2741
2742 if (reset_mask & RADEON_RESET_SEM)
2743 srbm_soft_reset |= SOFT_RESET_SEM;
2744
2745 if (reset_mask & RADEON_RESET_IH)
2746 srbm_soft_reset |= SOFT_RESET_IH;
2747
2748 if (reset_mask & RADEON_RESET_GRBM)
2749 srbm_soft_reset |= SOFT_RESET_GRBM;
2750
2751 if (reset_mask & RADEON_RESET_VMC)
2752 srbm_soft_reset |= SOFT_RESET_VMC;
2753
Alex Deucher24178ec2013-01-24 15:00:17 -05002754 if (!(rdev->flags & RADEON_IS_IGP)) {
2755 if (reset_mask & RADEON_RESET_MC)
2756 srbm_soft_reset |= SOFT_RESET_MC;
2757 }
Alex Deuchera65a4362013-01-18 18:55:54 -05002758
Alex Deucherb7630472013-01-18 14:28:41 -05002759 if (grbm_soft_reset) {
2760 tmp = RREG32(GRBM_SOFT_RESET);
2761 tmp |= grbm_soft_reset;
2762 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
2763 WREG32(GRBM_SOFT_RESET, tmp);
2764 tmp = RREG32(GRBM_SOFT_RESET);
2765
2766 udelay(50);
2767
2768 tmp &= ~grbm_soft_reset;
2769 WREG32(GRBM_SOFT_RESET, tmp);
2770 tmp = RREG32(GRBM_SOFT_RESET);
2771 }
2772
2773 if (srbm_soft_reset) {
2774 tmp = RREG32(SRBM_SOFT_RESET);
2775 tmp |= srbm_soft_reset;
2776 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2777 WREG32(SRBM_SOFT_RESET, tmp);
2778 tmp = RREG32(SRBM_SOFT_RESET);
2779
2780 udelay(50);
2781
2782 tmp &= ~srbm_soft_reset;
2783 WREG32(SRBM_SOFT_RESET, tmp);
2784 tmp = RREG32(SRBM_SOFT_RESET);
2785 }
Alex Deucher0ecebb92013-01-03 12:40:13 -05002786
2787 /* Wait a little for things to settle down */
2788 udelay(50);
2789
Alex Deucher747943e2010-03-24 13:26:36 -04002790 evergreen_mc_resume(rdev, &save);
Alex Deucherb7630472013-01-18 14:28:41 -05002791 udelay(50);
Alex Deucher410a3412013-01-18 13:05:39 -05002792
Alex Deucherb7630472013-01-18 14:28:41 -05002793 evergreen_print_gpu_status_regs(rdev);
Alex Deucher747943e2010-03-24 13:26:36 -04002794}
2795
Jerome Glissea2d07b72010-03-09 14:45:11 +00002796int evergreen_asic_reset(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002797{
Alex Deuchera65a4362013-01-18 18:55:54 -05002798 u32 reset_mask;
2799
2800 reset_mask = evergreen_gpu_check_soft_reset(rdev);
2801
2802 if (reset_mask)
2803 r600_set_bios_scratch_engine_hung(rdev, true);
2804
2805 evergreen_gpu_soft_reset(rdev, reset_mask);
2806
2807 reset_mask = evergreen_gpu_check_soft_reset(rdev);
2808
2809 if (!reset_mask)
2810 r600_set_bios_scratch_engine_hung(rdev, false);
2811
2812 return 0;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002813}
2814
Alex Deucher123bc182013-01-24 11:37:19 -05002815/**
2816 * evergreen_gfx_is_lockup - Check if the GFX engine is locked up
2817 *
2818 * @rdev: radeon_device pointer
2819 * @ring: radeon_ring structure holding ring information
2820 *
2821 * Check if the GFX engine is locked up.
2822 * Returns true if the engine appears to be locked up, false if not.
2823 */
2824bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2825{
2826 u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
2827
2828 if (!(reset_mask & (RADEON_RESET_GFX |
2829 RADEON_RESET_COMPUTE |
2830 RADEON_RESET_CP))) {
2831 radeon_ring_lockup_update(ring);
2832 return false;
2833 }
2834 /* force CP activities */
2835 radeon_ring_force_activity(rdev, ring);
2836 return radeon_ring_test_lockup(rdev, ring);
2837}
2838
2839/**
2840 * evergreen_dma_is_lockup - Check if the DMA engine is locked up
2841 *
2842 * @rdev: radeon_device pointer
2843 * @ring: radeon_ring structure holding ring information
2844 *
2845 * Check if the async DMA engine is locked up.
2846 * Returns true if the engine appears to be locked up, false if not.
2847 */
2848bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2849{
2850 u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
2851
2852 if (!(reset_mask & RADEON_RESET_DMA)) {
2853 radeon_ring_lockup_update(ring);
2854 return false;
2855 }
2856 /* force ring activities */
2857 radeon_ring_force_activity(rdev, ring);
2858 return radeon_ring_test_lockup(rdev, ring);
2859}
2860
Alex Deucher45f9a392010-03-24 13:55:51 -04002861/* Interrupts */
2862
2863u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
2864{
Alex Deucher46437052012-08-15 17:10:32 -04002865 if (crtc >= rdev->num_crtc)
Alex Deucher45f9a392010-03-24 13:55:51 -04002866 return 0;
Alex Deucher46437052012-08-15 17:10:32 -04002867 else
2868 return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
Alex Deucher45f9a392010-03-24 13:55:51 -04002869}
2870
2871void evergreen_disable_interrupt_state(struct radeon_device *rdev)
2872{
2873 u32 tmp;
2874
Alex Deucher1b370782011-11-17 20:13:28 -05002875 if (rdev->family >= CHIP_CAYMAN) {
2876 cayman_cp_int_cntl_setup(rdev, 0,
2877 CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2878 cayman_cp_int_cntl_setup(rdev, 1, 0);
2879 cayman_cp_int_cntl_setup(rdev, 2, 0);
Alex Deucherf60cbd12012-12-04 15:27:33 -05002880 tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
2881 WREG32(CAYMAN_DMA1_CNTL, tmp);
Alex Deucher1b370782011-11-17 20:13:28 -05002882 } else
2883 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
Alex Deucher233d1ad2012-12-04 15:25:59 -05002884 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
2885 WREG32(DMA_CNTL, tmp);
Alex Deucher45f9a392010-03-24 13:55:51 -04002886 WREG32(GRBM_INT_CNTL, 0);
2887 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2888 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
Alex Deucherb7eff392011-07-08 11:44:56 -04002889 if (rdev->num_crtc >= 4) {
Alex Deucher18007402010-11-22 17:56:28 -05002890 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2891 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
Alex Deucherb7eff392011-07-08 11:44:56 -04002892 }
2893 if (rdev->num_crtc >= 6) {
Alex Deucher18007402010-11-22 17:56:28 -05002894 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2895 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2896 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002897
2898 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2899 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
Alex Deucherb7eff392011-07-08 11:44:56 -04002900 if (rdev->num_crtc >= 4) {
Alex Deucher18007402010-11-22 17:56:28 -05002901 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2902 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
Alex Deucherb7eff392011-07-08 11:44:56 -04002903 }
2904 if (rdev->num_crtc >= 6) {
Alex Deucher18007402010-11-22 17:56:28 -05002905 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2906 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2907 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002908
Alex Deucher05b3ef62012-03-20 17:18:37 -04002909 /* only one DAC on DCE6 */
2910 if (!ASIC_IS_DCE6(rdev))
2911 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
Alex Deucher45f9a392010-03-24 13:55:51 -04002912 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2913
2914 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2915 WREG32(DC_HPD1_INT_CONTROL, tmp);
2916 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2917 WREG32(DC_HPD2_INT_CONTROL, tmp);
2918 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2919 WREG32(DC_HPD3_INT_CONTROL, tmp);
2920 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2921 WREG32(DC_HPD4_INT_CONTROL, tmp);
2922 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2923 WREG32(DC_HPD5_INT_CONTROL, tmp);
2924 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2925 WREG32(DC_HPD6_INT_CONTROL, tmp);
2926
2927}
2928
2929int evergreen_irq_set(struct radeon_device *rdev)
2930{
2931 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
Alex Deucher1b370782011-11-17 20:13:28 -05002932 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
Alex Deucher45f9a392010-03-24 13:55:51 -04002933 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
2934 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
Alex Deucher2031f772010-04-22 12:52:11 -04002935 u32 grbm_int_cntl = 0;
Alex Deucher6f34be52010-11-21 10:59:01 -05002936 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
Alex Deucherf122c612012-03-30 08:59:57 -04002937 u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
Alex Deucherf60cbd12012-12-04 15:27:33 -05002938 u32 dma_cntl, dma_cntl1 = 0;
Alex Deucher45f9a392010-03-24 13:55:51 -04002939
2940 if (!rdev->irq.installed) {
Joe Perchesfce7d612010-10-30 21:08:30 +00002941 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
Alex Deucher45f9a392010-03-24 13:55:51 -04002942 return -EINVAL;
2943 }
2944 /* don't enable anything if the ih is disabled */
2945 if (!rdev->ih.enabled) {
2946 r600_disable_interrupts(rdev);
2947 /* force the active interrupt state to all disabled */
2948 evergreen_disable_interrupt_state(rdev);
2949 return 0;
2950 }
2951
2952 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2953 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2954 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2955 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
2956 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
2957 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2958
Alex Deucherf122c612012-03-30 08:59:57 -04002959 afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2960 afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2961 afmt3 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2962 afmt4 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2963 afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2964 afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2965
Alex Deucher233d1ad2012-12-04 15:25:59 -05002966 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
2967
Alex Deucher1b370782011-11-17 20:13:28 -05002968 if (rdev->family >= CHIP_CAYMAN) {
2969 /* enable CP interrupts on all rings */
Christian Koenig736fc372012-05-17 19:52:00 +02002970 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
Alex Deucher1b370782011-11-17 20:13:28 -05002971 DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
2972 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
2973 }
Christian Koenig736fc372012-05-17 19:52:00 +02002974 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
Alex Deucher1b370782011-11-17 20:13:28 -05002975 DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
2976 cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
2977 }
Christian Koenig736fc372012-05-17 19:52:00 +02002978 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
Alex Deucher1b370782011-11-17 20:13:28 -05002979 DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
2980 cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
2981 }
2982 } else {
Christian Koenig736fc372012-05-17 19:52:00 +02002983 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
Alex Deucher1b370782011-11-17 20:13:28 -05002984 DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
2985 cp_int_cntl |= RB_INT_ENABLE;
2986 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
2987 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002988 }
Alex Deucher1b370782011-11-17 20:13:28 -05002989
Alex Deucher233d1ad2012-12-04 15:25:59 -05002990 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
2991 DRM_DEBUG("r600_irq_set: sw int dma\n");
2992 dma_cntl |= TRAP_ENABLE;
2993 }
2994
Alex Deucherf60cbd12012-12-04 15:27:33 -05002995 if (rdev->family >= CHIP_CAYMAN) {
2996 dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
2997 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
2998 DRM_DEBUG("r600_irq_set: sw int dma1\n");
2999 dma_cntl1 |= TRAP_ENABLE;
3000 }
3001 }
3002
Alex Deucher6f34be52010-11-21 10:59:01 -05003003 if (rdev->irq.crtc_vblank_int[0] ||
Christian Koenig736fc372012-05-17 19:52:00 +02003004 atomic_read(&rdev->irq.pflip[0])) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003005 DRM_DEBUG("evergreen_irq_set: vblank 0\n");
3006 crtc1 |= VBLANK_INT_MASK;
3007 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003008 if (rdev->irq.crtc_vblank_int[1] ||
Christian Koenig736fc372012-05-17 19:52:00 +02003009 atomic_read(&rdev->irq.pflip[1])) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003010 DRM_DEBUG("evergreen_irq_set: vblank 1\n");
3011 crtc2 |= VBLANK_INT_MASK;
3012 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003013 if (rdev->irq.crtc_vblank_int[2] ||
Christian Koenig736fc372012-05-17 19:52:00 +02003014 atomic_read(&rdev->irq.pflip[2])) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003015 DRM_DEBUG("evergreen_irq_set: vblank 2\n");
3016 crtc3 |= VBLANK_INT_MASK;
3017 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003018 if (rdev->irq.crtc_vblank_int[3] ||
Christian Koenig736fc372012-05-17 19:52:00 +02003019 atomic_read(&rdev->irq.pflip[3])) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003020 DRM_DEBUG("evergreen_irq_set: vblank 3\n");
3021 crtc4 |= VBLANK_INT_MASK;
3022 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003023 if (rdev->irq.crtc_vblank_int[4] ||
Christian Koenig736fc372012-05-17 19:52:00 +02003024 atomic_read(&rdev->irq.pflip[4])) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003025 DRM_DEBUG("evergreen_irq_set: vblank 4\n");
3026 crtc5 |= VBLANK_INT_MASK;
3027 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003028 if (rdev->irq.crtc_vblank_int[5] ||
Christian Koenig736fc372012-05-17 19:52:00 +02003029 atomic_read(&rdev->irq.pflip[5])) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003030 DRM_DEBUG("evergreen_irq_set: vblank 5\n");
3031 crtc6 |= VBLANK_INT_MASK;
3032 }
3033 if (rdev->irq.hpd[0]) {
3034 DRM_DEBUG("evergreen_irq_set: hpd 1\n");
3035 hpd1 |= DC_HPDx_INT_EN;
3036 }
3037 if (rdev->irq.hpd[1]) {
3038 DRM_DEBUG("evergreen_irq_set: hpd 2\n");
3039 hpd2 |= DC_HPDx_INT_EN;
3040 }
3041 if (rdev->irq.hpd[2]) {
3042 DRM_DEBUG("evergreen_irq_set: hpd 3\n");
3043 hpd3 |= DC_HPDx_INT_EN;
3044 }
3045 if (rdev->irq.hpd[3]) {
3046 DRM_DEBUG("evergreen_irq_set: hpd 4\n");
3047 hpd4 |= DC_HPDx_INT_EN;
3048 }
3049 if (rdev->irq.hpd[4]) {
3050 DRM_DEBUG("evergreen_irq_set: hpd 5\n");
3051 hpd5 |= DC_HPDx_INT_EN;
3052 }
3053 if (rdev->irq.hpd[5]) {
3054 DRM_DEBUG("evergreen_irq_set: hpd 6\n");
3055 hpd6 |= DC_HPDx_INT_EN;
3056 }
Alex Deucherf122c612012-03-30 08:59:57 -04003057 if (rdev->irq.afmt[0]) {
3058 DRM_DEBUG("evergreen_irq_set: hdmi 0\n");
3059 afmt1 |= AFMT_AZ_FORMAT_WTRIG_MASK;
3060 }
3061 if (rdev->irq.afmt[1]) {
3062 DRM_DEBUG("evergreen_irq_set: hdmi 1\n");
3063 afmt2 |= AFMT_AZ_FORMAT_WTRIG_MASK;
3064 }
3065 if (rdev->irq.afmt[2]) {
3066 DRM_DEBUG("evergreen_irq_set: hdmi 2\n");
3067 afmt3 |= AFMT_AZ_FORMAT_WTRIG_MASK;
3068 }
3069 if (rdev->irq.afmt[3]) {
3070 DRM_DEBUG("evergreen_irq_set: hdmi 3\n");
3071 afmt4 |= AFMT_AZ_FORMAT_WTRIG_MASK;
3072 }
3073 if (rdev->irq.afmt[4]) {
3074 DRM_DEBUG("evergreen_irq_set: hdmi 4\n");
3075 afmt5 |= AFMT_AZ_FORMAT_WTRIG_MASK;
3076 }
3077 if (rdev->irq.afmt[5]) {
3078 DRM_DEBUG("evergreen_irq_set: hdmi 5\n");
3079 afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK;
3080 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003081
Alex Deucher1b370782011-11-17 20:13:28 -05003082 if (rdev->family >= CHIP_CAYMAN) {
3083 cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
3084 cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
3085 cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
3086 } else
3087 WREG32(CP_INT_CNTL, cp_int_cntl);
Alex Deucher233d1ad2012-12-04 15:25:59 -05003088
3089 WREG32(DMA_CNTL, dma_cntl);
3090
Alex Deucherf60cbd12012-12-04 15:27:33 -05003091 if (rdev->family >= CHIP_CAYMAN)
3092 WREG32(CAYMAN_DMA1_CNTL, dma_cntl1);
3093
Alex Deucher2031f772010-04-22 12:52:11 -04003094 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
Alex Deucher45f9a392010-03-24 13:55:51 -04003095
3096 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
3097 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
Alex Deucherb7eff392011-07-08 11:44:56 -04003098 if (rdev->num_crtc >= 4) {
Alex Deucher18007402010-11-22 17:56:28 -05003099 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
3100 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
Alex Deucherb7eff392011-07-08 11:44:56 -04003101 }
3102 if (rdev->num_crtc >= 6) {
Alex Deucher18007402010-11-22 17:56:28 -05003103 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
3104 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
3105 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003106
Alex Deucher6f34be52010-11-21 10:59:01 -05003107 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
3108 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
Alex Deucherb7eff392011-07-08 11:44:56 -04003109 if (rdev->num_crtc >= 4) {
3110 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
3111 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
3112 }
3113 if (rdev->num_crtc >= 6) {
3114 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
3115 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
3116 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003117
Alex Deucher45f9a392010-03-24 13:55:51 -04003118 WREG32(DC_HPD1_INT_CONTROL, hpd1);
3119 WREG32(DC_HPD2_INT_CONTROL, hpd2);
3120 WREG32(DC_HPD3_INT_CONTROL, hpd3);
3121 WREG32(DC_HPD4_INT_CONTROL, hpd4);
3122 WREG32(DC_HPD5_INT_CONTROL, hpd5);
3123 WREG32(DC_HPD6_INT_CONTROL, hpd6);
3124
Alex Deucherf122c612012-03-30 08:59:57 -04003125 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1);
3126 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2);
3127 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, afmt3);
3128 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, afmt4);
3129 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
3130 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
3131
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003132 return 0;
3133}
3134
Andi Kleencbdd4502011-10-13 16:08:46 -07003135static void evergreen_irq_ack(struct radeon_device *rdev)
Alex Deucher45f9a392010-03-24 13:55:51 -04003136{
3137 u32 tmp;
3138
Alex Deucher6f34be52010-11-21 10:59:01 -05003139 rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3140 rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3141 rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
3142 rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
3143 rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
3144 rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
3145 rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
3146 rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
Alex Deucherb7eff392011-07-08 11:44:56 -04003147 if (rdev->num_crtc >= 4) {
3148 rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
3149 rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
3150 }
3151 if (rdev->num_crtc >= 6) {
3152 rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
3153 rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
3154 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003155
Alex Deucherf122c612012-03-30 08:59:57 -04003156 rdev->irq.stat_regs.evergreen.afmt_status1 = RREG32(AFMT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
3157 rdev->irq.stat_regs.evergreen.afmt_status2 = RREG32(AFMT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
3158 rdev->irq.stat_regs.evergreen.afmt_status3 = RREG32(AFMT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
3159 rdev->irq.stat_regs.evergreen.afmt_status4 = RREG32(AFMT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
3160 rdev->irq.stat_regs.evergreen.afmt_status5 = RREG32(AFMT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
3161 rdev->irq.stat_regs.evergreen.afmt_status6 = RREG32(AFMT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
3162
Alex Deucher6f34be52010-11-21 10:59:01 -05003163 if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
3164 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
3165 if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
3166 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
Alex Deucher6f34be52010-11-21 10:59:01 -05003167 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
Alex Deucher45f9a392010-03-24 13:55:51 -04003168 WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05003169 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
Alex Deucher45f9a392010-03-24 13:55:51 -04003170 WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05003171 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
Alex Deucher45f9a392010-03-24 13:55:51 -04003172 WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05003173 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
Alex Deucher45f9a392010-03-24 13:55:51 -04003174 WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
3175
Alex Deucherb7eff392011-07-08 11:44:56 -04003176 if (rdev->num_crtc >= 4) {
3177 if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
3178 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
3179 if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
3180 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
3181 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
3182 WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
3183 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
3184 WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
3185 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
3186 WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
3187 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
3188 WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
3189 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003190
Alex Deucherb7eff392011-07-08 11:44:56 -04003191 if (rdev->num_crtc >= 6) {
3192 if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
3193 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
3194 if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
3195 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
3196 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
3197 WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
3198 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
3199 WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
3200 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
3201 WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
3202 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
3203 WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
3204 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003205
Alex Deucher6f34be52010-11-21 10:59:01 -05003206 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003207 tmp = RREG32(DC_HPD1_INT_CONTROL);
3208 tmp |= DC_HPDx_INT_ACK;
3209 WREG32(DC_HPD1_INT_CONTROL, tmp);
3210 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003211 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003212 tmp = RREG32(DC_HPD2_INT_CONTROL);
3213 tmp |= DC_HPDx_INT_ACK;
3214 WREG32(DC_HPD2_INT_CONTROL, tmp);
3215 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003216 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003217 tmp = RREG32(DC_HPD3_INT_CONTROL);
3218 tmp |= DC_HPDx_INT_ACK;
3219 WREG32(DC_HPD3_INT_CONTROL, tmp);
3220 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003221 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003222 tmp = RREG32(DC_HPD4_INT_CONTROL);
3223 tmp |= DC_HPDx_INT_ACK;
3224 WREG32(DC_HPD4_INT_CONTROL, tmp);
3225 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003226 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003227 tmp = RREG32(DC_HPD5_INT_CONTROL);
3228 tmp |= DC_HPDx_INT_ACK;
3229 WREG32(DC_HPD5_INT_CONTROL, tmp);
3230 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003231 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003232 tmp = RREG32(DC_HPD5_INT_CONTROL);
3233 tmp |= DC_HPDx_INT_ACK;
3234 WREG32(DC_HPD6_INT_CONTROL, tmp);
3235 }
Alex Deucherf122c612012-03-30 08:59:57 -04003236 if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
3237 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
3238 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3239 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, tmp);
3240 }
3241 if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
3242 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
3243 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3244 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, tmp);
3245 }
3246 if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
3247 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
3248 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3249 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, tmp);
3250 }
3251 if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
3252 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
3253 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3254 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, tmp);
3255 }
3256 if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
3257 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
3258 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3259 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, tmp);
3260 }
3261 if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
3262 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
3263 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3264 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, tmp);
3265 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003266}
3267
Lauri Kasanen1109ca02012-08-31 13:43:50 -04003268static void evergreen_irq_disable(struct radeon_device *rdev)
Alex Deucher45f9a392010-03-24 13:55:51 -04003269{
Alex Deucher45f9a392010-03-24 13:55:51 -04003270 r600_disable_interrupts(rdev);
3271 /* Wait and acknowledge irq */
3272 mdelay(1);
Alex Deucher6f34be52010-11-21 10:59:01 -05003273 evergreen_irq_ack(rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -04003274 evergreen_disable_interrupt_state(rdev);
3275}
3276
Alex Deucher755d8192011-03-02 20:07:34 -05003277void evergreen_irq_suspend(struct radeon_device *rdev)
Alex Deucher45f9a392010-03-24 13:55:51 -04003278{
3279 evergreen_irq_disable(rdev);
3280 r600_rlc_stop(rdev);
3281}
3282
Andi Kleencbdd4502011-10-13 16:08:46 -07003283static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
Alex Deucher45f9a392010-03-24 13:55:51 -04003284{
3285 u32 wptr, tmp;
3286
Alex Deucher724c80e2010-08-27 18:25:25 -04003287 if (rdev->wb.enabled)
Cédric Cano204ae242011-04-19 11:07:13 -04003288 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
Alex Deucher724c80e2010-08-27 18:25:25 -04003289 else
3290 wptr = RREG32(IH_RB_WPTR);
Alex Deucher45f9a392010-03-24 13:55:51 -04003291
3292 if (wptr & RB_OVERFLOW) {
3293 /* When a ring buffer overflow happen start parsing interrupt
3294 * from the last not overwritten vector (wptr + 16). Hopefully
3295 * this should allow us to catchup.
3296 */
3297 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3298 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3299 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3300 tmp = RREG32(IH_RB_CNTL);
3301 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3302 WREG32(IH_RB_CNTL, tmp);
3303 }
3304 return (wptr & rdev->ih.ptr_mask);
3305}
3306
3307int evergreen_irq_process(struct radeon_device *rdev)
3308{
Dave Airlie682f1a52011-06-18 03:59:51 +00003309 u32 wptr;
3310 u32 rptr;
Alex Deucher45f9a392010-03-24 13:55:51 -04003311 u32 src_id, src_data;
3312 u32 ring_index;
Alex Deucher45f9a392010-03-24 13:55:51 -04003313 bool queue_hotplug = false;
Alex Deucherf122c612012-03-30 08:59:57 -04003314 bool queue_hdmi = false;
Alex Deucher45f9a392010-03-24 13:55:51 -04003315
Dave Airlie682f1a52011-06-18 03:59:51 +00003316 if (!rdev->ih.enabled || rdev->shutdown)
Alex Deucher45f9a392010-03-24 13:55:51 -04003317 return IRQ_NONE;
3318
Dave Airlie682f1a52011-06-18 03:59:51 +00003319 wptr = evergreen_get_ih_wptr(rdev);
Christian Koenigc20dc362012-05-16 21:45:24 +02003320
3321restart_ih:
3322 /* is somebody else already processing irqs? */
3323 if (atomic_xchg(&rdev->ih.lock, 1))
3324 return IRQ_NONE;
3325
Dave Airlie682f1a52011-06-18 03:59:51 +00003326 rptr = rdev->ih.rptr;
3327 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
Alex Deucher45f9a392010-03-24 13:55:51 -04003328
Benjamin Herrenschmidt964f6642011-07-13 16:28:19 +10003329 /* Order reading of wptr vs. reading of IH ring data */
3330 rmb();
3331
Alex Deucher45f9a392010-03-24 13:55:51 -04003332 /* display interrupts */
Alex Deucher6f34be52010-11-21 10:59:01 -05003333 evergreen_irq_ack(rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -04003334
Alex Deucher45f9a392010-03-24 13:55:51 -04003335 while (rptr != wptr) {
3336 /* wptr/rptr are in bytes! */
3337 ring_index = rptr / 4;
Alex Deucher0f234f5f2011-02-13 19:06:33 -05003338 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3339 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
Alex Deucher45f9a392010-03-24 13:55:51 -04003340
3341 switch (src_id) {
3342 case 1: /* D1 vblank/vline */
3343 switch (src_data) {
3344 case 0: /* D1 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003345 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
Alex Deucher6f34be52010-11-21 10:59:01 -05003346 if (rdev->irq.crtc_vblank_int[0]) {
3347 drm_handle_vblank(rdev->ddev, 0);
3348 rdev->pm.vblank_sync = true;
3349 wake_up(&rdev->irq.vblank_queue);
3350 }
Christian Koenig736fc372012-05-17 19:52:00 +02003351 if (atomic_read(&rdev->irq.pflip[0]))
Mario Kleiner3e4ea742010-11-21 10:59:02 -05003352 radeon_crtc_handle_flip(rdev, 0);
Alex Deucher6f34be52010-11-21 10:59:01 -05003353 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003354 DRM_DEBUG("IH: D1 vblank\n");
3355 }
3356 break;
3357 case 1: /* D1 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003358 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
3359 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003360 DRM_DEBUG("IH: D1 vline\n");
3361 }
3362 break;
3363 default:
3364 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3365 break;
3366 }
3367 break;
3368 case 2: /* D2 vblank/vline */
3369 switch (src_data) {
3370 case 0: /* D2 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003371 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
Alex Deucher6f34be52010-11-21 10:59:01 -05003372 if (rdev->irq.crtc_vblank_int[1]) {
3373 drm_handle_vblank(rdev->ddev, 1);
3374 rdev->pm.vblank_sync = true;
3375 wake_up(&rdev->irq.vblank_queue);
3376 }
Christian Koenig736fc372012-05-17 19:52:00 +02003377 if (atomic_read(&rdev->irq.pflip[1]))
Mario Kleiner3e4ea742010-11-21 10:59:02 -05003378 radeon_crtc_handle_flip(rdev, 1);
Alex Deucher6f34be52010-11-21 10:59:01 -05003379 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003380 DRM_DEBUG("IH: D2 vblank\n");
3381 }
3382 break;
3383 case 1: /* D2 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003384 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
3385 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003386 DRM_DEBUG("IH: D2 vline\n");
3387 }
3388 break;
3389 default:
3390 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3391 break;
3392 }
3393 break;
3394 case 3: /* D3 vblank/vline */
3395 switch (src_data) {
3396 case 0: /* D3 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003397 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
3398 if (rdev->irq.crtc_vblank_int[2]) {
3399 drm_handle_vblank(rdev->ddev, 2);
3400 rdev->pm.vblank_sync = true;
3401 wake_up(&rdev->irq.vblank_queue);
3402 }
Christian Koenig736fc372012-05-17 19:52:00 +02003403 if (atomic_read(&rdev->irq.pflip[2]))
Alex Deucher6f34be52010-11-21 10:59:01 -05003404 radeon_crtc_handle_flip(rdev, 2);
3405 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003406 DRM_DEBUG("IH: D3 vblank\n");
3407 }
3408 break;
3409 case 1: /* D3 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003410 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
3411 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003412 DRM_DEBUG("IH: D3 vline\n");
3413 }
3414 break;
3415 default:
3416 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3417 break;
3418 }
3419 break;
3420 case 4: /* D4 vblank/vline */
3421 switch (src_data) {
3422 case 0: /* D4 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003423 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
3424 if (rdev->irq.crtc_vblank_int[3]) {
3425 drm_handle_vblank(rdev->ddev, 3);
3426 rdev->pm.vblank_sync = true;
3427 wake_up(&rdev->irq.vblank_queue);
3428 }
Christian Koenig736fc372012-05-17 19:52:00 +02003429 if (atomic_read(&rdev->irq.pflip[3]))
Alex Deucher6f34be52010-11-21 10:59:01 -05003430 radeon_crtc_handle_flip(rdev, 3);
3431 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003432 DRM_DEBUG("IH: D4 vblank\n");
3433 }
3434 break;
3435 case 1: /* D4 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003436 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
3437 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003438 DRM_DEBUG("IH: D4 vline\n");
3439 }
3440 break;
3441 default:
3442 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3443 break;
3444 }
3445 break;
3446 case 5: /* D5 vblank/vline */
3447 switch (src_data) {
3448 case 0: /* D5 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003449 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
3450 if (rdev->irq.crtc_vblank_int[4]) {
3451 drm_handle_vblank(rdev->ddev, 4);
3452 rdev->pm.vblank_sync = true;
3453 wake_up(&rdev->irq.vblank_queue);
3454 }
Christian Koenig736fc372012-05-17 19:52:00 +02003455 if (atomic_read(&rdev->irq.pflip[4]))
Alex Deucher6f34be52010-11-21 10:59:01 -05003456 radeon_crtc_handle_flip(rdev, 4);
3457 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003458 DRM_DEBUG("IH: D5 vblank\n");
3459 }
3460 break;
3461 case 1: /* D5 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003462 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
3463 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003464 DRM_DEBUG("IH: D5 vline\n");
3465 }
3466 break;
3467 default:
3468 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3469 break;
3470 }
3471 break;
3472 case 6: /* D6 vblank/vline */
3473 switch (src_data) {
3474 case 0: /* D6 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003475 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
3476 if (rdev->irq.crtc_vblank_int[5]) {
3477 drm_handle_vblank(rdev->ddev, 5);
3478 rdev->pm.vblank_sync = true;
3479 wake_up(&rdev->irq.vblank_queue);
3480 }
Christian Koenig736fc372012-05-17 19:52:00 +02003481 if (atomic_read(&rdev->irq.pflip[5]))
Alex Deucher6f34be52010-11-21 10:59:01 -05003482 radeon_crtc_handle_flip(rdev, 5);
3483 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003484 DRM_DEBUG("IH: D6 vblank\n");
3485 }
3486 break;
3487 case 1: /* D6 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003488 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
3489 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003490 DRM_DEBUG("IH: D6 vline\n");
3491 }
3492 break;
3493 default:
3494 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3495 break;
3496 }
3497 break;
3498 case 42: /* HPD hotplug */
3499 switch (src_data) {
3500 case 0:
Alex Deucher6f34be52010-11-21 10:59:01 -05003501 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
3502 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003503 queue_hotplug = true;
3504 DRM_DEBUG("IH: HPD1\n");
3505 }
3506 break;
3507 case 1:
Alex Deucher6f34be52010-11-21 10:59:01 -05003508 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
3509 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003510 queue_hotplug = true;
3511 DRM_DEBUG("IH: HPD2\n");
3512 }
3513 break;
3514 case 2:
Alex Deucher6f34be52010-11-21 10:59:01 -05003515 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
3516 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003517 queue_hotplug = true;
3518 DRM_DEBUG("IH: HPD3\n");
3519 }
3520 break;
3521 case 3:
Alex Deucher6f34be52010-11-21 10:59:01 -05003522 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
3523 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003524 queue_hotplug = true;
3525 DRM_DEBUG("IH: HPD4\n");
3526 }
3527 break;
3528 case 4:
Alex Deucher6f34be52010-11-21 10:59:01 -05003529 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
3530 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003531 queue_hotplug = true;
3532 DRM_DEBUG("IH: HPD5\n");
3533 }
3534 break;
3535 case 5:
Alex Deucher6f34be52010-11-21 10:59:01 -05003536 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
3537 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003538 queue_hotplug = true;
3539 DRM_DEBUG("IH: HPD6\n");
3540 }
3541 break;
3542 default:
3543 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3544 break;
3545 }
3546 break;
Alex Deucherf122c612012-03-30 08:59:57 -04003547 case 44: /* hdmi */
3548 switch (src_data) {
3549 case 0:
3550 if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
3551 rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
3552 queue_hdmi = true;
3553 DRM_DEBUG("IH: HDMI0\n");
3554 }
3555 break;
3556 case 1:
3557 if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
3558 rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
3559 queue_hdmi = true;
3560 DRM_DEBUG("IH: HDMI1\n");
3561 }
3562 break;
3563 case 2:
3564 if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
3565 rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
3566 queue_hdmi = true;
3567 DRM_DEBUG("IH: HDMI2\n");
3568 }
3569 break;
3570 case 3:
3571 if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
3572 rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
3573 queue_hdmi = true;
3574 DRM_DEBUG("IH: HDMI3\n");
3575 }
3576 break;
3577 case 4:
3578 if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
3579 rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
3580 queue_hdmi = true;
3581 DRM_DEBUG("IH: HDMI4\n");
3582 }
3583 break;
3584 case 5:
3585 if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
3586 rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
3587 queue_hdmi = true;
3588 DRM_DEBUG("IH: HDMI5\n");
3589 }
3590 break;
3591 default:
3592 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
3593 break;
3594 }
Christian Königf2ba57b2013-04-08 12:41:29 +02003595 case 124: /* UVD */
3596 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
3597 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
Alex Deucherf122c612012-03-30 08:59:57 -04003598 break;
Christian Königae133a12012-09-18 15:30:44 -04003599 case 146:
3600 case 147:
3601 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
3602 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
3603 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
3604 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
3605 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
3606 /* reset addr and status */
3607 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
3608 break;
Alex Deucher45f9a392010-03-24 13:55:51 -04003609 case 176: /* CP_INT in ring buffer */
3610 case 177: /* CP_INT in IB1 */
3611 case 178: /* CP_INT in IB2 */
3612 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
Alex Deucher74652802011-08-25 13:39:48 -04003613 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
Alex Deucher45f9a392010-03-24 13:55:51 -04003614 break;
3615 case 181: /* CP EOP event */
3616 DRM_DEBUG("IH: CP EOP\n");
Alex Deucher1b370782011-11-17 20:13:28 -05003617 if (rdev->family >= CHIP_CAYMAN) {
3618 switch (src_data) {
3619 case 0:
3620 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3621 break;
3622 case 1:
3623 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
3624 break;
3625 case 2:
3626 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
3627 break;
3628 }
3629 } else
3630 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
Alex Deucher45f9a392010-03-24 13:55:51 -04003631 break;
Alex Deucher233d1ad2012-12-04 15:25:59 -05003632 case 224: /* DMA trap event */
3633 DRM_DEBUG("IH: DMA trap\n");
3634 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
3635 break;
Alex Deucher2031f772010-04-22 12:52:11 -04003636 case 233: /* GUI IDLE */
Ilija Hadzic303c8052011-06-07 14:54:48 -04003637 DRM_DEBUG("IH: GUI idle\n");
Alex Deucher2031f772010-04-22 12:52:11 -04003638 break;
Alex Deucherf60cbd12012-12-04 15:27:33 -05003639 case 244: /* DMA trap event */
3640 if (rdev->family >= CHIP_CAYMAN) {
3641 DRM_DEBUG("IH: DMA1 trap\n");
3642 radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
3643 }
3644 break;
Alex Deucher45f9a392010-03-24 13:55:51 -04003645 default:
3646 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3647 break;
3648 }
3649
3650 /* wptr/rptr are in bytes! */
3651 rptr += 16;
3652 rptr &= rdev->ih.ptr_mask;
3653 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003654 if (queue_hotplug)
Tejun Heo32c87fc2011-01-03 14:49:32 +01003655 schedule_work(&rdev->hotplug_work);
Alex Deucherf122c612012-03-30 08:59:57 -04003656 if (queue_hdmi)
3657 schedule_work(&rdev->audio_work);
Alex Deucher45f9a392010-03-24 13:55:51 -04003658 rdev->ih.rptr = rptr;
3659 WREG32(IH_RB_RPTR, rdev->ih.rptr);
Christian Koenigc20dc362012-05-16 21:45:24 +02003660 atomic_set(&rdev->ih.lock, 0);
3661
3662 /* make sure wptr hasn't changed while processing */
3663 wptr = evergreen_get_ih_wptr(rdev);
3664 if (wptr != rptr)
3665 goto restart_ih;
3666
Alex Deucher45f9a392010-03-24 13:55:51 -04003667 return IRQ_HANDLED;
3668}
3669
Alex Deucher233d1ad2012-12-04 15:25:59 -05003670/**
3671 * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
3672 *
3673 * @rdev: radeon_device pointer
3674 * @fence: radeon fence object
3675 *
3676 * Add a DMA fence packet to the ring to write
3677 * the fence seq number and DMA trap packet to generate
3678 * an interrupt if needed (evergreen-SI).
3679 */
3680void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
3681 struct radeon_fence *fence)
3682{
3683 struct radeon_ring *ring = &rdev->ring[fence->ring];
3684 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3685 /* write the fence */
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003686 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003687 radeon_ring_write(ring, addr & 0xfffffffc);
3688 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
3689 radeon_ring_write(ring, fence->seq);
3690 /* generate an interrupt */
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003691 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003692 /* flush HDP */
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003693 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
Alex Deucher4b681c22013-01-03 19:54:34 -05003694 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003695 radeon_ring_write(ring, 1);
3696}
3697
3698/**
3699 * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
3700 *
3701 * @rdev: radeon_device pointer
3702 * @ib: IB object to schedule
3703 *
3704 * Schedule an IB in the DMA ring (evergreen).
3705 */
3706void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
3707 struct radeon_ib *ib)
3708{
3709 struct radeon_ring *ring = &rdev->ring[ib->ring];
3710
3711 if (rdev->wb.enabled) {
3712 u32 next_rptr = ring->wptr + 4;
3713 while ((next_rptr & 7) != 5)
3714 next_rptr++;
3715 next_rptr += 3;
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003716 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003717 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3718 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
3719 radeon_ring_write(ring, next_rptr);
3720 }
3721
3722 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
3723 * Pad as necessary with NOPs.
3724 */
3725 while ((ring->wptr & 7) != 5)
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003726 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
3727 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003728 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
3729 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
3730
3731}
3732
3733/**
3734 * evergreen_copy_dma - copy pages using the DMA engine
3735 *
3736 * @rdev: radeon_device pointer
3737 * @src_offset: src GPU address
3738 * @dst_offset: dst GPU address
3739 * @num_gpu_pages: number of GPU pages to xfer
3740 * @fence: radeon fence object
3741 *
3742 * Copy GPU paging using the DMA engine (evergreen-cayman).
3743 * Used by the radeon ttm implementation to move pages if
3744 * registered as the asic copy callback.
3745 */
3746int evergreen_copy_dma(struct radeon_device *rdev,
3747 uint64_t src_offset, uint64_t dst_offset,
3748 unsigned num_gpu_pages,
3749 struct radeon_fence **fence)
3750{
3751 struct radeon_semaphore *sem = NULL;
3752 int ring_index = rdev->asic->copy.dma_ring_index;
3753 struct radeon_ring *ring = &rdev->ring[ring_index];
3754 u32 size_in_dw, cur_size_in_dw;
3755 int i, num_loops;
3756 int r = 0;
3757
3758 r = radeon_semaphore_create(rdev, &sem);
3759 if (r) {
3760 DRM_ERROR("radeon: moving bo (%d).\n", r);
3761 return r;
3762 }
3763
3764 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
3765 num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
3766 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
3767 if (r) {
3768 DRM_ERROR("radeon: moving bo (%d).\n", r);
3769 radeon_semaphore_free(rdev, &sem, NULL);
3770 return r;
3771 }
3772
3773 if (radeon_fence_need_sync(*fence, ring->idx)) {
3774 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
3775 ring->idx);
3776 radeon_fence_note_sync(*fence, ring->idx);
3777 } else {
3778 radeon_semaphore_free(rdev, &sem, NULL);
3779 }
3780
3781 for (i = 0; i < num_loops; i++) {
3782 cur_size_in_dw = size_in_dw;
3783 if (cur_size_in_dw > 0xFFFFF)
3784 cur_size_in_dw = 0xFFFFF;
3785 size_in_dw -= cur_size_in_dw;
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003786 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003787 radeon_ring_write(ring, dst_offset & 0xfffffffc);
3788 radeon_ring_write(ring, src_offset & 0xfffffffc);
3789 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
3790 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
3791 src_offset += cur_size_in_dw * 4;
3792 dst_offset += cur_size_in_dw * 4;
3793 }
3794
3795 r = radeon_fence_emit(rdev, fence, ring->idx);
3796 if (r) {
3797 radeon_ring_unlock_undo(rdev, ring);
3798 return r;
3799 }
3800
3801 radeon_ring_unlock_commit(rdev, ring);
3802 radeon_semaphore_free(rdev, &sem, *fence);
3803
3804 return r;
3805}
3806
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003807static int evergreen_startup(struct radeon_device *rdev)
3808{
Christian Königf2ba57b2013-04-08 12:41:29 +02003809 struct radeon_ring *ring;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003810 int r;
3811
Alex Deucher9e46a482011-01-06 18:49:35 -05003812 /* enable pcie gen2 link */
Ilija Hadziccd540332011-09-20 10:22:57 -04003813 evergreen_pcie_gen2_enable(rdev);
Alex Deucher9e46a482011-01-06 18:49:35 -05003814
Alex Deucher0af62b02011-01-06 21:19:31 -05003815 if (ASIC_IS_DCE5(rdev)) {
3816 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
3817 r = ni_init_microcode(rdev);
3818 if (r) {
3819 DRM_ERROR("Failed to load firmware!\n");
3820 return r;
3821 }
3822 }
Alex Deucher755d8192011-03-02 20:07:34 -05003823 r = ni_mc_load_microcode(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003824 if (r) {
Alex Deucher0af62b02011-01-06 21:19:31 -05003825 DRM_ERROR("Failed to load MC firmware!\n");
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003826 return r;
3827 }
Alex Deucher0af62b02011-01-06 21:19:31 -05003828 } else {
3829 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
3830 r = r600_init_microcode(rdev);
3831 if (r) {
3832 DRM_ERROR("Failed to load firmware!\n");
3833 return r;
3834 }
3835 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003836 }
Alex Deucherfe251e22010-03-24 13:36:43 -04003837
Alex Deucher16cdf042011-10-28 10:30:02 -04003838 r = r600_vram_scratch_init(rdev);
3839 if (r)
3840 return r;
3841
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003842 evergreen_mc_program(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003843 if (rdev->flags & RADEON_IS_AGP) {
Alex Deucher0fcdb612010-03-24 13:20:41 -04003844 evergreen_agp_enable(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003845 } else {
3846 r = evergreen_pcie_gart_enable(rdev);
3847 if (r)
3848 return r;
3849 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003850 evergreen_gpu_init(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003851
Alex Deucherd7ccd8f2010-09-09 11:33:36 -04003852 r = evergreen_blit_init(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003853 if (r) {
Ilija Hadzicfb3d9e92011-10-12 23:29:41 -04003854 r600_blit_fini(rdev);
Alex Deucher27cd7762012-02-23 17:53:42 -05003855 rdev->asic->copy.copy = NULL;
Alex Deucherd7ccd8f2010-09-09 11:33:36 -04003856 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003857 }
3858
Alex Deucher724c80e2010-08-27 18:25:25 -04003859 /* allocate wb buffer */
3860 r = radeon_wb_init(rdev);
3861 if (r)
3862 return r;
3863
Jerome Glisse30eb77f2011-11-20 20:45:34 +00003864 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
3865 if (r) {
3866 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3867 return r;
3868 }
3869
Alex Deucher233d1ad2012-12-04 15:25:59 -05003870 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
3871 if (r) {
3872 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
3873 return r;
3874 }
3875
Christian Königf2ba57b2013-04-08 12:41:29 +02003876 r = rv770_uvd_resume(rdev);
3877 if (!r) {
3878 r = radeon_fence_driver_start_ring(rdev,
3879 R600_RING_TYPE_UVD_INDEX);
3880 if (r)
3881 dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
3882 }
3883
3884 if (r)
3885 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
3886
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003887 /* Enable IRQ */
3888 r = r600_irq_init(rdev);
3889 if (r) {
3890 DRM_ERROR("radeon: IH init failed (%d).\n", r);
3891 radeon_irq_kms_fini(rdev);
3892 return r;
3893 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003894 evergreen_irq_set(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003895
Christian Königf2ba57b2013-04-08 12:41:29 +02003896 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Christian Könige32eb502011-10-23 12:56:27 +02003897 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
Alex Deucher78c55602011-11-17 14:25:56 -05003898 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
3899 0, 0xfffff, RADEON_CP_PACKET2);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003900 if (r)
3901 return r;
Alex Deucher233d1ad2012-12-04 15:25:59 -05003902
3903 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
3904 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
3905 DMA_RB_RPTR, DMA_RB_WPTR,
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003906 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003907 if (r)
3908 return r;
3909
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003910 r = evergreen_cp_load_microcode(rdev);
3911 if (r)
3912 return r;
Alex Deucherfe251e22010-03-24 13:36:43 -04003913 r = evergreen_cp_resume(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003914 if (r)
3915 return r;
Alex Deucher233d1ad2012-12-04 15:25:59 -05003916 r = r600_dma_resume(rdev);
3917 if (r)
3918 return r;
Alex Deucherfe251e22010-03-24 13:36:43 -04003919
Christian Königf2ba57b2013-04-08 12:41:29 +02003920 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
3921 if (ring->ring_size) {
3922 r = radeon_ring_init(rdev, ring, ring->ring_size,
3923 R600_WB_UVD_RPTR_OFFSET,
3924 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
3925 0, 0xfffff, RADEON_CP_PACKET2);
3926 if (!r)
3927 r = r600_uvd_init(rdev);
3928
3929 if (r)
3930 DRM_ERROR("radeon: error initializing UVD (%d).\n", r);
3931 }
3932
Christian König2898c342012-07-05 11:55:34 +02003933 r = radeon_ib_pool_init(rdev);
3934 if (r) {
3935 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
Jerome Glisseb15ba512011-11-15 11:48:34 -05003936 return r;
Christian König2898c342012-07-05 11:55:34 +02003937 }
Jerome Glisseb15ba512011-11-15 11:48:34 -05003938
Rafał Miłecki69d2ae52011-12-07 23:32:24 +01003939 r = r600_audio_init(rdev);
3940 if (r) {
3941 DRM_ERROR("radeon: audio init failed\n");
Jerome Glisseb15ba512011-11-15 11:48:34 -05003942 return r;
3943 }
3944
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003945 return 0;
3946}
3947
3948int evergreen_resume(struct radeon_device *rdev)
3949{
3950 int r;
3951
Alex Deucher86f5c9e2010-12-20 12:35:04 -05003952 /* reset the asic, the gfx blocks are often in a bad state
3953 * after the driver is unloaded or after a resume
3954 */
3955 if (radeon_asic_reset(rdev))
3956 dev_warn(rdev->dev, "GPU reset failed !\n");
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003957 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
3958 * posting will perform necessary task to bring back GPU into good
3959 * shape.
3960 */
3961 /* post card */
3962 atom_asic_init(rdev->mode_info.atom_context);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003963
Jerome Glisseb15ba512011-11-15 11:48:34 -05003964 rdev->accel_working = true;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003965 r = evergreen_startup(rdev);
3966 if (r) {
Alex Deucher755d8192011-03-02 20:07:34 -05003967 DRM_ERROR("evergreen startup failed on resume\n");
Jerome Glisse6b7746e2012-02-20 17:57:20 -05003968 rdev->accel_working = false;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003969 return r;
3970 }
Alex Deucherfe251e22010-03-24 13:36:43 -04003971
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003972 return r;
3973
3974}
3975
3976int evergreen_suspend(struct radeon_device *rdev)
3977{
Rafał Miłecki69d2ae52011-12-07 23:32:24 +01003978 r600_audio_fini(rdev);
Christian Königf2ba57b2013-04-08 12:41:29 +02003979 radeon_uvd_suspend(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003980 r700_cp_stop(rdev);
Alex Deucher233d1ad2012-12-04 15:25:59 -05003981 r600_dma_stop(rdev);
Christian Königf2ba57b2013-04-08 12:41:29 +02003982 r600_uvd_rbc_stop(rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -04003983 evergreen_irq_suspend(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04003984 radeon_wb_disable(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003985 evergreen_pcie_gart_disable(rdev);
Alex Deucherd7ccd8f2010-09-09 11:33:36 -04003986
3987 return 0;
3988}
3989
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003990/* Plan is to move initialization in that function and use
3991 * helper function so that radeon_device_init pretty much
3992 * do nothing more than calling asic specific function. This
3993 * should also allow to remove a bunch of callback function
3994 * like vram_info.
3995 */
3996int evergreen_init(struct radeon_device *rdev)
3997{
3998 int r;
3999
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004000 /* Read BIOS */
4001 if (!radeon_get_bios(rdev)) {
4002 if (ASIC_IS_AVIVO(rdev))
4003 return -EINVAL;
4004 }
4005 /* Must be an ATOMBIOS */
4006 if (!rdev->is_atom_bios) {
Alex Deucher755d8192011-03-02 20:07:34 -05004007 dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004008 return -EINVAL;
4009 }
4010 r = radeon_atombios_init(rdev);
4011 if (r)
4012 return r;
Alex Deucher86f5c9e2010-12-20 12:35:04 -05004013 /* reset the asic, the gfx blocks are often in a bad state
4014 * after the driver is unloaded or after a resume
4015 */
4016 if (radeon_asic_reset(rdev))
4017 dev_warn(rdev->dev, "GPU reset failed !\n");
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004018 /* Post card if necessary */
Alex Deucherfd909c32011-01-11 18:08:59 -05004019 if (!radeon_card_posted(rdev)) {
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004020 if (!rdev->bios) {
4021 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
4022 return -EINVAL;
4023 }
4024 DRM_INFO("GPU not posted. posting now...\n");
4025 atom_asic_init(rdev->mode_info.atom_context);
4026 }
4027 /* Initialize scratch registers */
4028 r600_scratch_init(rdev);
4029 /* Initialize surface registers */
4030 radeon_surface_init(rdev);
4031 /* Initialize clocks */
4032 radeon_get_clock_info(rdev->ddev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004033 /* Fence driver */
4034 r = radeon_fence_driver_init(rdev);
4035 if (r)
4036 return r;
Jerome Glissed594e462010-02-17 21:54:29 +00004037 /* initialize AGP */
4038 if (rdev->flags & RADEON_IS_AGP) {
4039 r = radeon_agp_init(rdev);
4040 if (r)
4041 radeon_agp_disable(rdev);
4042 }
4043 /* initialize memory controller */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004044 r = evergreen_mc_init(rdev);
4045 if (r)
4046 return r;
4047 /* Memory manager */
4048 r = radeon_bo_init(rdev);
4049 if (r)
4050 return r;
Alex Deucher45f9a392010-03-24 13:55:51 -04004051
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004052 r = radeon_irq_kms_init(rdev);
4053 if (r)
4054 return r;
4055
Christian Könige32eb502011-10-23 12:56:27 +02004056 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
4057 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004058
Alex Deucher233d1ad2012-12-04 15:25:59 -05004059 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
4060 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
4061
Christian Königf2ba57b2013-04-08 12:41:29 +02004062 r = radeon_uvd_init(rdev);
4063 if (!r) {
4064 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
4065 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX],
4066 4096);
4067 }
4068
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004069 rdev->ih.ring_obj = NULL;
4070 r600_ih_ring_init(rdev, 64 * 1024);
4071
4072 r = r600_pcie_gart_init(rdev);
4073 if (r)
4074 return r;
Alex Deucher0fcdb612010-03-24 13:20:41 -04004075
Alex Deucher148a03b2010-06-03 19:00:03 -04004076 rdev->accel_working = true;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004077 r = evergreen_startup(rdev);
4078 if (r) {
Alex Deucherfe251e22010-03-24 13:36:43 -04004079 dev_err(rdev->dev, "disabling GPU acceleration\n");
4080 r700_cp_fini(rdev);
Alex Deucher233d1ad2012-12-04 15:25:59 -05004081 r600_dma_fini(rdev);
Alex Deucherfe251e22010-03-24 13:36:43 -04004082 r600_irq_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04004083 radeon_wb_fini(rdev);
Christian König2898c342012-07-05 11:55:34 +02004084 radeon_ib_pool_fini(rdev);
Alex Deucherfe251e22010-03-24 13:36:43 -04004085 radeon_irq_kms_fini(rdev);
Alex Deucher0fcdb612010-03-24 13:20:41 -04004086 evergreen_pcie_gart_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004087 rdev->accel_working = false;
4088 }
Alex Deucher77e00f22011-12-21 11:58:17 -05004089
4090 /* Don't start up if the MC ucode is missing on BTC parts.
4091 * The default clocks and voltages before the MC ucode
4092 * is loaded are not suffient for advanced operations.
4093 */
4094 if (ASIC_IS_DCE5(rdev)) {
4095 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
4096 DRM_ERROR("radeon: MC ucode required for NI+.\n");
4097 return -EINVAL;
4098 }
4099 }
4100
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004101 return 0;
4102}
4103
4104void evergreen_fini(struct radeon_device *rdev)
4105{
Rafał Miłecki69d2ae52011-12-07 23:32:24 +01004106 r600_audio_fini(rdev);
Ilija Hadzicfb3d9e92011-10-12 23:29:41 -04004107 r600_blit_fini(rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -04004108 r700_cp_fini(rdev);
Alex Deucher233d1ad2012-12-04 15:25:59 -05004109 r600_dma_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004110 r600_irq_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04004111 radeon_wb_fini(rdev);
Christian König2898c342012-07-05 11:55:34 +02004112 radeon_ib_pool_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004113 radeon_irq_kms_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004114 evergreen_pcie_gart_fini(rdev);
Christian Königf2ba57b2013-04-08 12:41:29 +02004115 radeon_uvd_fini(rdev);
Alex Deucher16cdf042011-10-28 10:30:02 -04004116 r600_vram_scratch_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004117 radeon_gem_fini(rdev);
4118 radeon_fence_driver_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004119 radeon_agp_fini(rdev);
4120 radeon_bo_fini(rdev);
4121 radeon_atombios_fini(rdev);
4122 kfree(rdev->bios);
4123 rdev->bios = NULL;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004124}
Alex Deucher9e46a482011-01-06 18:49:35 -05004125
Ilija Hadzicb07759b2011-09-20 10:22:58 -04004126void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
Alex Deucher9e46a482011-01-06 18:49:35 -05004127{
Dave Airlie197bbb32012-06-27 08:35:54 +01004128 u32 link_width_cntl, speed_cntl, mask;
4129 int ret;
Alex Deucher9e46a482011-01-06 18:49:35 -05004130
Alex Deucherd42dd572011-01-12 20:05:11 -05004131 if (radeon_pcie_gen2 == 0)
4132 return;
4133
Alex Deucher9e46a482011-01-06 18:49:35 -05004134 if (rdev->flags & RADEON_IS_IGP)
4135 return;
4136
4137 if (!(rdev->flags & RADEON_IS_PCIE))
4138 return;
4139
4140 /* x2 cards have a special sequence */
4141 if (ASIC_IS_X2(rdev))
4142 return;
4143
Dave Airlie197bbb32012-06-27 08:35:54 +01004144 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
4145 if (ret != 0)
4146 return;
4147
4148 if (!(mask & DRM_PCIE_SPEED_50))
4149 return;
4150
Alex Deucher492d2b62012-10-25 16:06:59 -04004151 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
Alex Deucher3691fee2012-10-08 17:46:27 -04004152 if (speed_cntl & LC_CURRENT_DATA_RATE) {
4153 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
4154 return;
4155 }
4156
Dave Airlie197bbb32012-06-27 08:35:54 +01004157 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
4158
Alex Deucher9e46a482011-01-06 18:49:35 -05004159 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
4160 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
4161
Alex Deucher492d2b62012-10-25 16:06:59 -04004162 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004163 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
Alex Deucher492d2b62012-10-25 16:06:59 -04004164 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004165
Alex Deucher492d2b62012-10-25 16:06:59 -04004166 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004167 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
Alex Deucher492d2b62012-10-25 16:06:59 -04004168 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004169
Alex Deucher492d2b62012-10-25 16:06:59 -04004170 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004171 speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
Alex Deucher492d2b62012-10-25 16:06:59 -04004172 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004173
Alex Deucher492d2b62012-10-25 16:06:59 -04004174 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004175 speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
Alex Deucher492d2b62012-10-25 16:06:59 -04004176 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004177
Alex Deucher492d2b62012-10-25 16:06:59 -04004178 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004179 speed_cntl |= LC_GEN2_EN_STRAP;
Alex Deucher492d2b62012-10-25 16:06:59 -04004180 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004181
4182 } else {
Alex Deucher492d2b62012-10-25 16:06:59 -04004183 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004184 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
4185 if (1)
4186 link_width_cntl |= LC_UPCONFIGURE_DIS;
4187 else
4188 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
Alex Deucher492d2b62012-10-25 16:06:59 -04004189 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004190 }
4191}