blob: 0af36e7731e2536c0c28a94923e4eec461cfa8eb [file] [log] [blame]
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <linux/platform_device.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
David Howells760285e2012-10-02 18:01:07 +010027#include <drm/drmP.h>
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050028#include "radeon.h"
Daniel Vettere6990372010-03-11 21:19:17 +000029#include "radeon_asic.h"
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/radeon_drm.h>
Alex Deucher0fcdb612010-03-24 13:20:41 -040031#include "evergreend.h"
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050032#include "atom.h"
33#include "avivod.h"
34#include "evergreen_reg.h"
Alex Deucher2281a372010-10-21 13:31:38 -040035#include "evergreen_blit_shaders.h"
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050036
Alex Deucherfe251e22010-03-24 13:36:43 -040037#define EVERGREEN_PFP_UCODE_SIZE 1120
38#define EVERGREEN_PM4_UCODE_SIZE 1376
39
Alex Deucher4a159032012-08-15 17:13:53 -040040static const u32 crtc_offsets[6] =
41{
42 EVERGREEN_CRTC0_REGISTER_OFFSET,
43 EVERGREEN_CRTC1_REGISTER_OFFSET,
44 EVERGREEN_CRTC2_REGISTER_OFFSET,
45 EVERGREEN_CRTC3_REGISTER_OFFSET,
46 EVERGREEN_CRTC4_REGISTER_OFFSET,
47 EVERGREEN_CRTC5_REGISTER_OFFSET
48};
49
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050050static void evergreen_gpu_init(struct radeon_device *rdev);
51void evergreen_fini(struct radeon_device *rdev);
Ilija Hadzicb07759b2011-09-20 10:22:58 -040052void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
Alex Deucher1b370782011-11-17 20:13:28 -050053extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
54 int ring, u32 cp_int_cntl);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050055
Jerome Glisse285484e2011-12-16 17:03:42 -050056void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
57 unsigned *bankh, unsigned *mtaspect,
58 unsigned *tile_split)
59{
60 *bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
61 *bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
62 *mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
63 *tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
64 switch (*bankw) {
65 default:
66 case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break;
67 case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break;
68 case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break;
69 case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break;
70 }
71 switch (*bankh) {
72 default:
73 case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break;
74 case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break;
75 case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break;
76 case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break;
77 }
78 switch (*mtaspect) {
79 default:
80 case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break;
81 case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break;
82 case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break;
83 case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break;
84 }
85}
86
Alex Deucher23d33ba2013-04-08 12:41:32 +020087static int sumo_set_uvd_clock(struct radeon_device *rdev, u32 clock,
88 u32 cntl_reg, u32 status_reg)
89{
90 int r, i;
91 struct atom_clock_dividers dividers;
92
93 r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
94 clock, false, &dividers);
95 if (r)
96 return r;
97
98 WREG32_P(cntl_reg, dividers.post_div, ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK));
99
100 for (i = 0; i < 100; i++) {
101 if (RREG32(status_reg) & DCLK_STATUS)
102 break;
103 mdelay(10);
104 }
105 if (i == 100)
106 return -ETIMEDOUT;
107
108 return 0;
109}
110
111int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
112{
113 int r = 0;
114 u32 cg_scratch = RREG32(CG_SCRATCH1);
115
116 r = sumo_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
117 if (r)
118 goto done;
119 cg_scratch &= 0xffff0000;
120 cg_scratch |= vclk / 100; /* Mhz */
121
122 r = sumo_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
123 if (r)
124 goto done;
125 cg_scratch &= 0x0000ffff;
126 cg_scratch |= (dclk / 100) << 16; /* Mhz */
127
128done:
129 WREG32(CG_SCRATCH1, cg_scratch);
130
131 return r;
132}
133
Alex Deuchera8b49252013-04-08 12:41:33 +0200134static int evergreen_uvd_calc_post_div(unsigned target_freq,
135 unsigned vco_freq,
136 unsigned *div)
137{
138 /* target larger than vco frequency ? */
139 if (vco_freq < target_freq)
140 return -1; /* forget it */
141
142 /* Fclk = Fvco / PDIV */
143 *div = vco_freq / target_freq;
144
145 /* we alway need a frequency less than or equal the target */
146 if ((vco_freq / *div) > target_freq)
147 *div += 1;
148
149 /* dividers above 5 must be even */
150 if (*div > 5 && *div % 2)
151 *div += 1;
152
153 /* out of range ? */
154 if (*div >= 128)
155 return -1; /* forget it */
156
157 return vco_freq / *div;
158}
159
160static int evergreen_uvd_send_upll_ctlreq(struct radeon_device *rdev)
161{
162 unsigned i;
163
164 /* assert UPLL_CTLREQ */
165 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
166
167 /* wait for CTLACK and CTLACK2 to get asserted */
168 for (i = 0; i < 100; ++i) {
169 uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
170 if ((RREG32(CG_UPLL_FUNC_CNTL) & mask) == mask)
171 break;
172 mdelay(10);
173 }
174 if (i == 100)
175 return -ETIMEDOUT;
176
177 /* deassert UPLL_CTLREQ */
178 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
179
180 return 0;
181}
182
183int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
184{
185 /* start off with something large */
186 int optimal_diff_score = 0x7FFFFFF;
187 unsigned optimal_fb_div = 0, optimal_vclk_div = 0;
188 unsigned optimal_dclk_div = 0, optimal_vco_freq = 0;
189 unsigned vco_freq;
190 int r;
191
Christian König4ed10832013-04-18 15:25:58 +0200192 /* bypass vclk and dclk with bclk */
193 WREG32_P(CG_UPLL_FUNC_CNTL_2,
194 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
195 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
196
197 /* put PLL in bypass mode */
198 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
199
200 if (!vclk || !dclk) {
201 /* keep the Bypass mode, put PLL to sleep */
202 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
203 return 0;
204 }
205
Alex Deuchera8b49252013-04-08 12:41:33 +0200206 /* loop through vco from low to high */
207 for (vco_freq = 125000; vco_freq <= 250000; vco_freq += 100) {
208 unsigned fb_div = vco_freq / rdev->clock.spll.reference_freq * 16384;
209 int calc_clk, diff_score, diff_vclk, diff_dclk;
210 unsigned vclk_div, dclk_div;
211
212 /* fb div out of range ? */
213 if (fb_div > 0x03FFFFFF)
214 break; /* it can oly get worse */
215
216 /* calc vclk with current vco freq. */
217 calc_clk = evergreen_uvd_calc_post_div(vclk, vco_freq, &vclk_div);
218 if (calc_clk == -1)
219 break; /* vco is too big, it has to stop. */
220 diff_vclk = vclk - calc_clk;
221
222 /* calc dclk with current vco freq. */
223 calc_clk = evergreen_uvd_calc_post_div(dclk, vco_freq, &dclk_div);
224 if (calc_clk == -1)
225 break; /* vco is too big, it has to stop. */
226 diff_dclk = dclk - calc_clk;
227
228 /* determine if this vco setting is better than current optimal settings */
229 diff_score = abs(diff_vclk) + abs(diff_dclk);
230 if (diff_score < optimal_diff_score) {
231 optimal_fb_div = fb_div;
232 optimal_vclk_div = vclk_div;
233 optimal_dclk_div = dclk_div;
234 optimal_vco_freq = vco_freq;
235 optimal_diff_score = diff_score;
236 if (optimal_diff_score == 0)
237 break; /* it can't get better than this */
238 }
239 }
240
241 /* set VCO_MODE to 1 */
242 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
243
244 /* toggle UPLL_SLEEP to 1 then back to 0 */
245 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
246 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
247
248 /* deassert UPLL_RESET */
249 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
250
251 mdelay(1);
252
Alex Deuchera8b49252013-04-08 12:41:33 +0200253 r = evergreen_uvd_send_upll_ctlreq(rdev);
254 if (r)
255 return r;
256
257 /* assert UPLL_RESET again */
258 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
259
260 /* disable spread spectrum. */
261 WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
262
263 /* set feedback divider */
264 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(optimal_fb_div), ~UPLL_FB_DIV_MASK);
265
266 /* set ref divider to 0 */
267 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
268
269 if (optimal_vco_freq < 187500)
270 WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
271 else
272 WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
273
274 /* set PDIV_A and PDIV_B */
275 WREG32_P(CG_UPLL_FUNC_CNTL_2,
276 UPLL_PDIV_A(optimal_vclk_div) | UPLL_PDIV_B(optimal_dclk_div),
277 ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
278
279 /* give the PLL some time to settle */
280 mdelay(15);
281
282 /* deassert PLL_RESET */
283 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
284
285 mdelay(15);
286
287 /* switch from bypass mode to normal mode */
288 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
289
290 r = evergreen_uvd_send_upll_ctlreq(rdev);
291 if (r)
292 return r;
293
294 /* switch VCLK and DCLK selection */
295 WREG32_P(CG_UPLL_FUNC_CNTL_2,
296 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
297 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
298
299 mdelay(100);
300
301 return 0;
302}
303
Alex Deucherd054ac12011-09-01 17:46:15 +0000304void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
305{
306 u16 ctl, v;
Jiang Liu32195ae2012-07-24 17:20:30 +0800307 int err;
Alex Deucherd054ac12011-09-01 17:46:15 +0000308
Jiang Liu32195ae2012-07-24 17:20:30 +0800309 err = pcie_capability_read_word(rdev->pdev, PCI_EXP_DEVCTL, &ctl);
Alex Deucherd054ac12011-09-01 17:46:15 +0000310 if (err)
311 return;
312
313 v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
314
315 /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
316 * to avoid hangs or perfomance issues
317 */
318 if ((v == 0) || (v == 6) || (v == 7)) {
319 ctl &= ~PCI_EXP_DEVCTL_READRQ;
320 ctl |= (2 << 12);
Jiang Liu32195ae2012-07-24 17:20:30 +0800321 pcie_capability_write_word(rdev->pdev, PCI_EXP_DEVCTL, ctl);
Alex Deucherd054ac12011-09-01 17:46:15 +0000322 }
323}
324
Alex Deucher10257a62013-04-09 18:49:59 -0400325static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
326{
327 if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
328 return true;
329 else
330 return false;
331}
332
333static bool dce4_is_counter_moving(struct radeon_device *rdev, int crtc)
334{
335 u32 pos1, pos2;
336
337 pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
338 pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
339
340 if (pos1 != pos2)
341 return true;
342 else
343 return false;
344}
345
Alex Deucher377edc82012-07-17 14:02:42 -0400346/**
347 * dce4_wait_for_vblank - vblank wait asic callback.
348 *
349 * @rdev: radeon_device pointer
350 * @crtc: crtc to wait for vblank on
351 *
352 * Wait for vblank on the requested crtc (evergreen+).
353 */
Alex Deucher3ae19b72012-02-23 17:53:37 -0500354void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
355{
Alex Deucher10257a62013-04-09 18:49:59 -0400356 unsigned i = 0;
Alex Deucher3ae19b72012-02-23 17:53:37 -0500357
Alex Deucher4a159032012-08-15 17:13:53 -0400358 if (crtc >= rdev->num_crtc)
359 return;
360
Alex Deucher10257a62013-04-09 18:49:59 -0400361 if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
362 return;
363
364 /* depending on when we hit vblank, we may be close to active; if so,
365 * wait for another frame.
366 */
367 while (dce4_is_in_vblank(rdev, crtc)) {
368 if (i++ % 100 == 0) {
369 if (!dce4_is_counter_moving(rdev, crtc))
Alex Deucher3ae19b72012-02-23 17:53:37 -0500370 break;
Alex Deucher3ae19b72012-02-23 17:53:37 -0500371 }
Alex Deucher10257a62013-04-09 18:49:59 -0400372 }
373
374 while (!dce4_is_in_vblank(rdev, crtc)) {
375 if (i++ % 100 == 0) {
376 if (!dce4_is_counter_moving(rdev, crtc))
Alex Deucher3ae19b72012-02-23 17:53:37 -0500377 break;
Alex Deucher3ae19b72012-02-23 17:53:37 -0500378 }
379 }
380}
381
Alex Deucher377edc82012-07-17 14:02:42 -0400382/**
383 * radeon_irq_kms_pflip_irq_get - pre-pageflip callback.
384 *
385 * @rdev: radeon_device pointer
386 * @crtc: crtc to prepare for pageflip on
387 *
388 * Pre-pageflip callback (evergreen+).
389 * Enables the pageflip irq (vblank irq).
390 */
Alex Deucher6f34be52010-11-21 10:59:01 -0500391void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
392{
Alex Deucher6f34be52010-11-21 10:59:01 -0500393 /* enable the pflip int */
394 radeon_irq_kms_pflip_irq_get(rdev, crtc);
395}
396
Alex Deucher377edc82012-07-17 14:02:42 -0400397/**
398 * evergreen_post_page_flip - pos-pageflip callback.
399 *
400 * @rdev: radeon_device pointer
401 * @crtc: crtc to cleanup pageflip on
402 *
403 * Post-pageflip callback (evergreen+).
404 * Disables the pageflip irq (vblank irq).
405 */
Alex Deucher6f34be52010-11-21 10:59:01 -0500406void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
407{
408 /* disable the pflip int */
409 radeon_irq_kms_pflip_irq_put(rdev, crtc);
410}
411
Alex Deucher377edc82012-07-17 14:02:42 -0400412/**
413 * evergreen_page_flip - pageflip callback.
414 *
415 * @rdev: radeon_device pointer
416 * @crtc_id: crtc to cleanup pageflip on
417 * @crtc_base: new address of the crtc (GPU MC address)
418 *
419 * Does the actual pageflip (evergreen+).
420 * During vblank we take the crtc lock and wait for the update_pending
421 * bit to go high, when it does, we release the lock, and allow the
422 * double buffered update to take place.
423 * Returns the current update pending status.
424 */
Alex Deucher6f34be52010-11-21 10:59:01 -0500425u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
426{
427 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
428 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
Alex Deucherf6496472011-11-28 14:49:26 -0500429 int i;
Alex Deucher6f34be52010-11-21 10:59:01 -0500430
431 /* Lock the graphics update lock */
432 tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
433 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
434
435 /* update the scanout addresses */
436 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
437 upper_32_bits(crtc_base));
438 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
439 (u32)crtc_base);
440
441 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
442 upper_32_bits(crtc_base));
443 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
444 (u32)crtc_base);
445
446 /* Wait for update_pending to go high. */
Alex Deucherf6496472011-11-28 14:49:26 -0500447 for (i = 0; i < rdev->usec_timeout; i++) {
448 if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
449 break;
450 udelay(1);
451 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500452 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
453
454 /* Unlock the lock, so double-buffering can take place inside vblank */
455 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
456 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
457
458 /* Return current update_pending status: */
459 return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
460}
461
Alex Deucher21a81222010-07-02 12:58:16 -0400462/* get temperature in millidegrees */
Alex Deucher20d391d2011-02-01 16:12:34 -0500463int evergreen_get_temp(struct radeon_device *rdev)
Alex Deucher21a81222010-07-02 12:58:16 -0400464{
Alex Deucher1c88d742011-06-14 19:15:53 +0000465 u32 temp, toffset;
466 int actual_temp = 0;
Alex Deucher21a81222010-07-02 12:58:16 -0400467
Alex Deucher67b3f822011-05-25 18:45:37 -0400468 if (rdev->family == CHIP_JUNIPER) {
469 toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
470 TOFFSET_SHIFT;
471 temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >>
472 TS0_ADC_DOUT_SHIFT;
Alex Deucher21a81222010-07-02 12:58:16 -0400473
Alex Deucher67b3f822011-05-25 18:45:37 -0400474 if (toffset & 0x100)
475 actual_temp = temp / 2 - (0x200 - toffset);
476 else
477 actual_temp = temp / 2 + toffset;
478
479 actual_temp = actual_temp * 1000;
480
481 } else {
482 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
483 ASIC_T_SHIFT;
484
485 if (temp & 0x400)
486 actual_temp = -256;
487 else if (temp & 0x200)
488 actual_temp = 255;
489 else if (temp & 0x100) {
490 actual_temp = temp & 0x1ff;
491 actual_temp |= ~0x1ff;
492 } else
493 actual_temp = temp & 0xff;
494
495 actual_temp = (actual_temp * 1000) / 2;
496 }
497
498 return actual_temp;
Alex Deucher21a81222010-07-02 12:58:16 -0400499}
500
Alex Deucher20d391d2011-02-01 16:12:34 -0500501int sumo_get_temp(struct radeon_device *rdev)
Alex Deuchere33df252010-11-22 17:56:32 -0500502{
503 u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
Alex Deucher20d391d2011-02-01 16:12:34 -0500504 int actual_temp = temp - 49;
Alex Deuchere33df252010-11-22 17:56:32 -0500505
506 return actual_temp * 1000;
507}
508
Alex Deucher377edc82012-07-17 14:02:42 -0400509/**
510 * sumo_pm_init_profile - Initialize power profiles callback.
511 *
512 * @rdev: radeon_device pointer
513 *
514 * Initialize the power states used in profile mode
515 * (sumo, trinity, SI).
516 * Used for profile mode only.
517 */
Alex Deuchera4c9e2e2011-11-04 10:09:41 -0400518void sumo_pm_init_profile(struct radeon_device *rdev)
519{
520 int idx;
521
522 /* default */
523 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
524 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
525 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
526 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
527
528 /* low,mid sh/mh */
529 if (rdev->flags & RADEON_IS_MOBILITY)
530 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
531 else
532 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
533
534 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
535 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
536 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
537 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
538
539 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
540 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
541 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
542 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
543
544 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
545 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
546 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
547 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
548
549 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
550 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
551 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
552 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
553
554 /* high sh/mh */
555 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
556 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
557 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
558 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
559 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
560 rdev->pm.power_state[idx].num_clock_modes - 1;
561
562 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
563 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
564 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
565 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
566 rdev->pm.power_state[idx].num_clock_modes - 1;
567}
568
Alex Deucher377edc82012-07-17 14:02:42 -0400569/**
Alex Deucher27810fb2012-10-01 19:25:11 -0400570 * btc_pm_init_profile - Initialize power profiles callback.
571 *
572 * @rdev: radeon_device pointer
573 *
574 * Initialize the power states used in profile mode
575 * (BTC, cayman).
576 * Used for profile mode only.
577 */
578void btc_pm_init_profile(struct radeon_device *rdev)
579{
580 int idx;
581
582 /* default */
583 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
584 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
585 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
586 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
587 /* starting with BTC, there is one state that is used for both
588 * MH and SH. Difference is that we always use the high clock index for
589 * mclk.
590 */
591 if (rdev->flags & RADEON_IS_MOBILITY)
592 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
593 else
594 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
595 /* low sh */
596 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
597 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
598 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
599 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
600 /* mid sh */
601 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
602 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
603 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
604 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
605 /* high sh */
606 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
607 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
608 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
609 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
610 /* low mh */
611 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
612 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
613 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
614 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
615 /* mid mh */
616 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
617 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
618 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
619 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
620 /* high mh */
621 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
622 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
623 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
624 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
625}
626
627/**
Alex Deucher377edc82012-07-17 14:02:42 -0400628 * evergreen_pm_misc - set additional pm hw parameters callback.
629 *
630 * @rdev: radeon_device pointer
631 *
632 * Set non-clock parameters associated with a power state
633 * (voltage, etc.) (evergreen+).
634 */
Alex Deucher49e02b72010-04-23 17:57:27 -0400635void evergreen_pm_misc(struct radeon_device *rdev)
636{
Rafał Miłeckia081a9d2010-06-07 18:20:25 -0400637 int req_ps_idx = rdev->pm.requested_power_state_index;
638 int req_cm_idx = rdev->pm.requested_clock_mode_index;
639 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
640 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
Alex Deucher49e02b72010-04-23 17:57:27 -0400641
Alex Deucher2feea492011-04-12 14:49:24 -0400642 if (voltage->type == VOLTAGE_SW) {
Alex Deuchera377e182011-06-20 13:00:31 -0400643 /* 0xff01 is a flag rather then an actual voltage */
644 if (voltage->voltage == 0xff01)
645 return;
Alex Deucher2feea492011-04-12 14:49:24 -0400646 if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
Alex Deucher8a83ec52011-04-12 14:49:23 -0400647 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
Alex Deucher4d601732010-06-07 18:15:18 -0400648 rdev->pm.current_vddc = voltage->voltage;
Alex Deucher2feea492011-04-12 14:49:24 -0400649 DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
650 }
Alex Deucher7ae764b2013-02-11 08:44:48 -0500651
652 /* starting with BTC, there is one state that is used for both
653 * MH and SH. Difference is that we always use the high clock index for
654 * mclk and vddci.
655 */
656 if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
657 (rdev->family >= CHIP_BARTS) &&
658 rdev->pm.active_crtc_count &&
659 ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
660 (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
661 voltage = &rdev->pm.power_state[req_ps_idx].
662 clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].voltage;
663
Alex Deuchera377e182011-06-20 13:00:31 -0400664 /* 0xff01 is a flag rather then an actual voltage */
665 if (voltage->vddci == 0xff01)
666 return;
Alex Deucher2feea492011-04-12 14:49:24 -0400667 if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
668 radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
669 rdev->pm.current_vddci = voltage->vddci;
670 DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
Alex Deucher4d601732010-06-07 18:15:18 -0400671 }
672 }
Alex Deucher49e02b72010-04-23 17:57:27 -0400673}
674
Alex Deucher377edc82012-07-17 14:02:42 -0400675/**
676 * evergreen_pm_prepare - pre-power state change callback.
677 *
678 * @rdev: radeon_device pointer
679 *
680 * Prepare for a power state change (evergreen+).
681 */
Alex Deucher49e02b72010-04-23 17:57:27 -0400682void evergreen_pm_prepare(struct radeon_device *rdev)
683{
684 struct drm_device *ddev = rdev->ddev;
685 struct drm_crtc *crtc;
686 struct radeon_crtc *radeon_crtc;
687 u32 tmp;
688
689 /* disable any active CRTCs */
690 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
691 radeon_crtc = to_radeon_crtc(crtc);
692 if (radeon_crtc->enabled) {
693 tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
694 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
695 WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
696 }
697 }
698}
699
Alex Deucher377edc82012-07-17 14:02:42 -0400700/**
701 * evergreen_pm_finish - post-power state change callback.
702 *
703 * @rdev: radeon_device pointer
704 *
705 * Clean up after a power state change (evergreen+).
706 */
Alex Deucher49e02b72010-04-23 17:57:27 -0400707void evergreen_pm_finish(struct radeon_device *rdev)
708{
709 struct drm_device *ddev = rdev->ddev;
710 struct drm_crtc *crtc;
711 struct radeon_crtc *radeon_crtc;
712 u32 tmp;
713
714 /* enable any active CRTCs */
715 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
716 radeon_crtc = to_radeon_crtc(crtc);
717 if (radeon_crtc->enabled) {
718 tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
719 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
720 WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
721 }
722 }
723}
724
Alex Deucher377edc82012-07-17 14:02:42 -0400725/**
726 * evergreen_hpd_sense - hpd sense callback.
727 *
728 * @rdev: radeon_device pointer
729 * @hpd: hpd (hotplug detect) pin
730 *
731 * Checks if a digital monitor is connected (evergreen+).
732 * Returns true if connected, false if not connected.
733 */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500734bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
735{
736 bool connected = false;
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500737
738 switch (hpd) {
739 case RADEON_HPD_1:
740 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
741 connected = true;
742 break;
743 case RADEON_HPD_2:
744 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
745 connected = true;
746 break;
747 case RADEON_HPD_3:
748 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
749 connected = true;
750 break;
751 case RADEON_HPD_4:
752 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
753 connected = true;
754 break;
755 case RADEON_HPD_5:
756 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
757 connected = true;
758 break;
759 case RADEON_HPD_6:
760 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
761 connected = true;
762 break;
763 default:
764 break;
765 }
766
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500767 return connected;
768}
769
Alex Deucher377edc82012-07-17 14:02:42 -0400770/**
771 * evergreen_hpd_set_polarity - hpd set polarity callback.
772 *
773 * @rdev: radeon_device pointer
774 * @hpd: hpd (hotplug detect) pin
775 *
776 * Set the polarity of the hpd pin (evergreen+).
777 */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500778void evergreen_hpd_set_polarity(struct radeon_device *rdev,
779 enum radeon_hpd_id hpd)
780{
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500781 u32 tmp;
782 bool connected = evergreen_hpd_sense(rdev, hpd);
783
784 switch (hpd) {
785 case RADEON_HPD_1:
786 tmp = RREG32(DC_HPD1_INT_CONTROL);
787 if (connected)
788 tmp &= ~DC_HPDx_INT_POLARITY;
789 else
790 tmp |= DC_HPDx_INT_POLARITY;
791 WREG32(DC_HPD1_INT_CONTROL, tmp);
792 break;
793 case RADEON_HPD_2:
794 tmp = RREG32(DC_HPD2_INT_CONTROL);
795 if (connected)
796 tmp &= ~DC_HPDx_INT_POLARITY;
797 else
798 tmp |= DC_HPDx_INT_POLARITY;
799 WREG32(DC_HPD2_INT_CONTROL, tmp);
800 break;
801 case RADEON_HPD_3:
802 tmp = RREG32(DC_HPD3_INT_CONTROL);
803 if (connected)
804 tmp &= ~DC_HPDx_INT_POLARITY;
805 else
806 tmp |= DC_HPDx_INT_POLARITY;
807 WREG32(DC_HPD3_INT_CONTROL, tmp);
808 break;
809 case RADEON_HPD_4:
810 tmp = RREG32(DC_HPD4_INT_CONTROL);
811 if (connected)
812 tmp &= ~DC_HPDx_INT_POLARITY;
813 else
814 tmp |= DC_HPDx_INT_POLARITY;
815 WREG32(DC_HPD4_INT_CONTROL, tmp);
816 break;
817 case RADEON_HPD_5:
818 tmp = RREG32(DC_HPD5_INT_CONTROL);
819 if (connected)
820 tmp &= ~DC_HPDx_INT_POLARITY;
821 else
822 tmp |= DC_HPDx_INT_POLARITY;
823 WREG32(DC_HPD5_INT_CONTROL, tmp);
824 break;
825 case RADEON_HPD_6:
826 tmp = RREG32(DC_HPD6_INT_CONTROL);
827 if (connected)
828 tmp &= ~DC_HPDx_INT_POLARITY;
829 else
830 tmp |= DC_HPDx_INT_POLARITY;
831 WREG32(DC_HPD6_INT_CONTROL, tmp);
832 break;
833 default:
834 break;
835 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500836}
837
Alex Deucher377edc82012-07-17 14:02:42 -0400838/**
839 * evergreen_hpd_init - hpd setup callback.
840 *
841 * @rdev: radeon_device pointer
842 *
843 * Setup the hpd pins used by the card (evergreen+).
844 * Enable the pin, set the polarity, and enable the hpd interrupts.
845 */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500846void evergreen_hpd_init(struct radeon_device *rdev)
847{
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500848 struct drm_device *dev = rdev->ddev;
849 struct drm_connector *connector;
Christian Koenigfb982572012-05-17 01:33:30 +0200850 unsigned enabled = 0;
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500851 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
852 DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500853
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500854 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
855 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
Alex Deucher2e97be72013-04-11 12:45:34 -0400856
857 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
858 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
859 /* don't try to enable hpd on eDP or LVDS avoid breaking the
860 * aux dp channel on imac and help (but not completely fix)
861 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
862 * also avoid interrupt storms during dpms.
863 */
864 continue;
865 }
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500866 switch (radeon_connector->hpd.hpd) {
867 case RADEON_HPD_1:
868 WREG32(DC_HPD1_CONTROL, tmp);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500869 break;
870 case RADEON_HPD_2:
871 WREG32(DC_HPD2_CONTROL, tmp);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500872 break;
873 case RADEON_HPD_3:
874 WREG32(DC_HPD3_CONTROL, tmp);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500875 break;
876 case RADEON_HPD_4:
877 WREG32(DC_HPD4_CONTROL, tmp);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500878 break;
879 case RADEON_HPD_5:
880 WREG32(DC_HPD5_CONTROL, tmp);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500881 break;
882 case RADEON_HPD_6:
883 WREG32(DC_HPD6_CONTROL, tmp);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500884 break;
885 default:
886 break;
887 }
Alex Deucher64912e92011-11-03 11:21:39 -0400888 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
Christian Koenigfb982572012-05-17 01:33:30 +0200889 enabled |= 1 << radeon_connector->hpd.hpd;
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500890 }
Christian Koenigfb982572012-05-17 01:33:30 +0200891 radeon_irq_kms_enable_hpd(rdev, enabled);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500892}
893
Alex Deucher377edc82012-07-17 14:02:42 -0400894/**
895 * evergreen_hpd_fini - hpd tear down callback.
896 *
897 * @rdev: radeon_device pointer
898 *
899 * Tear down the hpd pins used by the card (evergreen+).
900 * Disable the hpd interrupts.
901 */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500902void evergreen_hpd_fini(struct radeon_device *rdev)
903{
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500904 struct drm_device *dev = rdev->ddev;
905 struct drm_connector *connector;
Christian Koenigfb982572012-05-17 01:33:30 +0200906 unsigned disabled = 0;
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500907
908 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
909 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
910 switch (radeon_connector->hpd.hpd) {
911 case RADEON_HPD_1:
912 WREG32(DC_HPD1_CONTROL, 0);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500913 break;
914 case RADEON_HPD_2:
915 WREG32(DC_HPD2_CONTROL, 0);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500916 break;
917 case RADEON_HPD_3:
918 WREG32(DC_HPD3_CONTROL, 0);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500919 break;
920 case RADEON_HPD_4:
921 WREG32(DC_HPD4_CONTROL, 0);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500922 break;
923 case RADEON_HPD_5:
924 WREG32(DC_HPD5_CONTROL, 0);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500925 break;
926 case RADEON_HPD_6:
927 WREG32(DC_HPD6_CONTROL, 0);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500928 break;
929 default:
930 break;
931 }
Christian Koenigfb982572012-05-17 01:33:30 +0200932 disabled |= 1 << radeon_connector->hpd.hpd;
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500933 }
Christian Koenigfb982572012-05-17 01:33:30 +0200934 radeon_irq_kms_disable_hpd(rdev, disabled);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500935}
936
Alex Deucherf9d9c362010-10-22 02:51:05 -0400937/* watermark setup */
938
939static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
940 struct radeon_crtc *radeon_crtc,
941 struct drm_display_mode *mode,
942 struct drm_display_mode *other_mode)
943{
Alex Deucher12dfc842011-04-14 19:07:34 -0400944 u32 tmp;
Alex Deucherf9d9c362010-10-22 02:51:05 -0400945 /*
946 * Line Buffer Setup
947 * There are 3 line buffers, each one shared by 2 display controllers.
948 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
949 * the display controllers. The paritioning is done via one of four
950 * preset allocations specified in bits 2:0:
951 * first display controller
952 * 0 - first half of lb (3840 * 2)
953 * 1 - first 3/4 of lb (5760 * 2)
Alex Deucher12dfc842011-04-14 19:07:34 -0400954 * 2 - whole lb (7680 * 2), other crtc must be disabled
Alex Deucherf9d9c362010-10-22 02:51:05 -0400955 * 3 - first 1/4 of lb (1920 * 2)
956 * second display controller
957 * 4 - second half of lb (3840 * 2)
958 * 5 - second 3/4 of lb (5760 * 2)
Alex Deucher12dfc842011-04-14 19:07:34 -0400959 * 6 - whole lb (7680 * 2), other crtc must be disabled
Alex Deucherf9d9c362010-10-22 02:51:05 -0400960 * 7 - last 1/4 of lb (1920 * 2)
961 */
Alex Deucher12dfc842011-04-14 19:07:34 -0400962 /* this can get tricky if we have two large displays on a paired group
963 * of crtcs. Ideally for multiple large displays we'd assign them to
964 * non-linked crtcs for maximum line buffer allocation.
965 */
966 if (radeon_crtc->base.enabled && mode) {
967 if (other_mode)
Alex Deucherf9d9c362010-10-22 02:51:05 -0400968 tmp = 0; /* 1/2 */
Alex Deucher12dfc842011-04-14 19:07:34 -0400969 else
970 tmp = 2; /* whole */
971 } else
972 tmp = 0;
Alex Deucherf9d9c362010-10-22 02:51:05 -0400973
974 /* second controller of the pair uses second half of the lb */
975 if (radeon_crtc->crtc_id % 2)
976 tmp += 4;
977 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
978
Alex Deucher12dfc842011-04-14 19:07:34 -0400979 if (radeon_crtc->base.enabled && mode) {
980 switch (tmp) {
981 case 0:
982 case 4:
983 default:
984 if (ASIC_IS_DCE5(rdev))
985 return 4096 * 2;
986 else
987 return 3840 * 2;
988 case 1:
989 case 5:
990 if (ASIC_IS_DCE5(rdev))
991 return 6144 * 2;
992 else
993 return 5760 * 2;
994 case 2:
995 case 6:
996 if (ASIC_IS_DCE5(rdev))
997 return 8192 * 2;
998 else
999 return 7680 * 2;
1000 case 3:
1001 case 7:
1002 if (ASIC_IS_DCE5(rdev))
1003 return 2048 * 2;
1004 else
1005 return 1920 * 2;
1006 }
Alex Deucherf9d9c362010-10-22 02:51:05 -04001007 }
Alex Deucher12dfc842011-04-14 19:07:34 -04001008
1009 /* controller not enabled, so no lb used */
1010 return 0;
Alex Deucherf9d9c362010-10-22 02:51:05 -04001011}
1012
Alex Deucherca7db222012-03-20 17:18:30 -04001013u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
Alex Deucherf9d9c362010-10-22 02:51:05 -04001014{
1015 u32 tmp = RREG32(MC_SHARED_CHMAP);
1016
1017 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1018 case 0:
1019 default:
1020 return 1;
1021 case 1:
1022 return 2;
1023 case 2:
1024 return 4;
1025 case 3:
1026 return 8;
1027 }
1028}
1029
1030struct evergreen_wm_params {
1031 u32 dram_channels; /* number of dram channels */
1032 u32 yclk; /* bandwidth per dram data pin in kHz */
1033 u32 sclk; /* engine clock in kHz */
1034 u32 disp_clk; /* display clock in kHz */
1035 u32 src_width; /* viewport width */
1036 u32 active_time; /* active display time in ns */
1037 u32 blank_time; /* blank time in ns */
1038 bool interlaced; /* mode is interlaced */
1039 fixed20_12 vsc; /* vertical scale ratio */
1040 u32 num_heads; /* number of active crtcs */
1041 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
1042 u32 lb_size; /* line buffer allocated to pipe */
1043 u32 vtaps; /* vertical scaler taps */
1044};
1045
1046static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
1047{
1048 /* Calculate DRAM Bandwidth and the part allocated to display. */
1049 fixed20_12 dram_efficiency; /* 0.7 */
1050 fixed20_12 yclk, dram_channels, bandwidth;
1051 fixed20_12 a;
1052
1053 a.full = dfixed_const(1000);
1054 yclk.full = dfixed_const(wm->yclk);
1055 yclk.full = dfixed_div(yclk, a);
1056 dram_channels.full = dfixed_const(wm->dram_channels * 4);
1057 a.full = dfixed_const(10);
1058 dram_efficiency.full = dfixed_const(7);
1059 dram_efficiency.full = dfixed_div(dram_efficiency, a);
1060 bandwidth.full = dfixed_mul(dram_channels, yclk);
1061 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
1062
1063 return dfixed_trunc(bandwidth);
1064}
1065
1066static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
1067{
1068 /* Calculate DRAM Bandwidth and the part allocated to display. */
1069 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
1070 fixed20_12 yclk, dram_channels, bandwidth;
1071 fixed20_12 a;
1072
1073 a.full = dfixed_const(1000);
1074 yclk.full = dfixed_const(wm->yclk);
1075 yclk.full = dfixed_div(yclk, a);
1076 dram_channels.full = dfixed_const(wm->dram_channels * 4);
1077 a.full = dfixed_const(10);
1078 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
1079 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
1080 bandwidth.full = dfixed_mul(dram_channels, yclk);
1081 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
1082
1083 return dfixed_trunc(bandwidth);
1084}
1085
1086static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
1087{
1088 /* Calculate the display Data return Bandwidth */
1089 fixed20_12 return_efficiency; /* 0.8 */
1090 fixed20_12 sclk, bandwidth;
1091 fixed20_12 a;
1092
1093 a.full = dfixed_const(1000);
1094 sclk.full = dfixed_const(wm->sclk);
1095 sclk.full = dfixed_div(sclk, a);
1096 a.full = dfixed_const(10);
1097 return_efficiency.full = dfixed_const(8);
1098 return_efficiency.full = dfixed_div(return_efficiency, a);
1099 a.full = dfixed_const(32);
1100 bandwidth.full = dfixed_mul(a, sclk);
1101 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
1102
1103 return dfixed_trunc(bandwidth);
1104}
1105
1106static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
1107{
1108 /* Calculate the DMIF Request Bandwidth */
1109 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
1110 fixed20_12 disp_clk, bandwidth;
1111 fixed20_12 a;
1112
1113 a.full = dfixed_const(1000);
1114 disp_clk.full = dfixed_const(wm->disp_clk);
1115 disp_clk.full = dfixed_div(disp_clk, a);
1116 a.full = dfixed_const(10);
1117 disp_clk_request_efficiency.full = dfixed_const(8);
1118 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
1119 a.full = dfixed_const(32);
1120 bandwidth.full = dfixed_mul(a, disp_clk);
1121 bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
1122
1123 return dfixed_trunc(bandwidth);
1124}
1125
1126static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
1127{
1128 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
1129 u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
1130 u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
1131 u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
1132
1133 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
1134}
1135
1136static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
1137{
1138 /* Calculate the display mode Average Bandwidth
1139 * DisplayMode should contain the source and destination dimensions,
1140 * timing, etc.
1141 */
1142 fixed20_12 bpp;
1143 fixed20_12 line_time;
1144 fixed20_12 src_width;
1145 fixed20_12 bandwidth;
1146 fixed20_12 a;
1147
1148 a.full = dfixed_const(1000);
1149 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
1150 line_time.full = dfixed_div(line_time, a);
1151 bpp.full = dfixed_const(wm->bytes_per_pixel);
1152 src_width.full = dfixed_const(wm->src_width);
1153 bandwidth.full = dfixed_mul(src_width, bpp);
1154 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
1155 bandwidth.full = dfixed_div(bandwidth, line_time);
1156
1157 return dfixed_trunc(bandwidth);
1158}
1159
1160static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
1161{
1162 /* First calcualte the latency in ns */
1163 u32 mc_latency = 2000; /* 2000 ns. */
1164 u32 available_bandwidth = evergreen_available_bandwidth(wm);
1165 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
1166 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
1167 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
1168 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
1169 (wm->num_heads * cursor_line_pair_return_time);
1170 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
1171 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
1172 fixed20_12 a, b, c;
1173
1174 if (wm->num_heads == 0)
1175 return 0;
1176
1177 a.full = dfixed_const(2);
1178 b.full = dfixed_const(1);
1179 if ((wm->vsc.full > a.full) ||
1180 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
1181 (wm->vtaps >= 5) ||
1182 ((wm->vsc.full >= a.full) && wm->interlaced))
1183 max_src_lines_per_dst_line = 4;
1184 else
1185 max_src_lines_per_dst_line = 2;
1186
1187 a.full = dfixed_const(available_bandwidth);
1188 b.full = dfixed_const(wm->num_heads);
1189 a.full = dfixed_div(a, b);
1190
1191 b.full = dfixed_const(1000);
1192 c.full = dfixed_const(wm->disp_clk);
1193 b.full = dfixed_div(c, b);
1194 c.full = dfixed_const(wm->bytes_per_pixel);
1195 b.full = dfixed_mul(b, c);
1196
1197 lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b));
1198
1199 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
1200 b.full = dfixed_const(1000);
1201 c.full = dfixed_const(lb_fill_bw);
1202 b.full = dfixed_div(c, b);
1203 a.full = dfixed_div(a, b);
1204 line_fill_time = dfixed_trunc(a);
1205
1206 if (line_fill_time < wm->active_time)
1207 return latency;
1208 else
1209 return latency + (line_fill_time - wm->active_time);
1210
1211}
1212
1213static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
1214{
1215 if (evergreen_average_bandwidth(wm) <=
1216 (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
1217 return true;
1218 else
1219 return false;
1220};
1221
1222static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
1223{
1224 if (evergreen_average_bandwidth(wm) <=
1225 (evergreen_available_bandwidth(wm) / wm->num_heads))
1226 return true;
1227 else
1228 return false;
1229};
1230
1231static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
1232{
1233 u32 lb_partitions = wm->lb_size / wm->src_width;
1234 u32 line_time = wm->active_time + wm->blank_time;
1235 u32 latency_tolerant_lines;
1236 u32 latency_hiding;
1237 fixed20_12 a;
1238
1239 a.full = dfixed_const(1);
1240 if (wm->vsc.full > a.full)
1241 latency_tolerant_lines = 1;
1242 else {
1243 if (lb_partitions <= (wm->vtaps + 1))
1244 latency_tolerant_lines = 1;
1245 else
1246 latency_tolerant_lines = 2;
1247 }
1248
1249 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1250
1251 if (evergreen_latency_watermark(wm) <= latency_hiding)
1252 return true;
1253 else
1254 return false;
1255}
1256
1257static void evergreen_program_watermarks(struct radeon_device *rdev,
1258 struct radeon_crtc *radeon_crtc,
1259 u32 lb_size, u32 num_heads)
1260{
1261 struct drm_display_mode *mode = &radeon_crtc->base.mode;
1262 struct evergreen_wm_params wm;
1263 u32 pixel_period;
1264 u32 line_time = 0;
1265 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1266 u32 priority_a_mark = 0, priority_b_mark = 0;
1267 u32 priority_a_cnt = PRIORITY_OFF;
1268 u32 priority_b_cnt = PRIORITY_OFF;
1269 u32 pipe_offset = radeon_crtc->crtc_id * 16;
1270 u32 tmp, arb_control3;
1271 fixed20_12 a, b, c;
1272
1273 if (radeon_crtc->base.enabled && num_heads && mode) {
1274 pixel_period = 1000000 / (u32)mode->clock;
1275 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
1276 priority_a_cnt = 0;
1277 priority_b_cnt = 0;
1278
1279 wm.yclk = rdev->pm.current_mclk * 10;
1280 wm.sclk = rdev->pm.current_sclk * 10;
1281 wm.disp_clk = mode->clock;
1282 wm.src_width = mode->crtc_hdisplay;
1283 wm.active_time = mode->crtc_hdisplay * pixel_period;
1284 wm.blank_time = line_time - wm.active_time;
1285 wm.interlaced = false;
1286 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1287 wm.interlaced = true;
1288 wm.vsc = radeon_crtc->vsc;
1289 wm.vtaps = 1;
1290 if (radeon_crtc->rmx_type != RMX_OFF)
1291 wm.vtaps = 2;
1292 wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
1293 wm.lb_size = lb_size;
1294 wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
1295 wm.num_heads = num_heads;
1296
1297 /* set for high clocks */
1298 latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535);
1299 /* set for low clocks */
1300 /* wm.yclk = low clk; wm.sclk = low clk */
1301 latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
1302
1303 /* possibly force display priority to high */
1304 /* should really do this at mode validation time... */
1305 if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
1306 !evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
1307 !evergreen_check_latency_hiding(&wm) ||
1308 (rdev->disp_priority == 2)) {
Alex Deucher92bdfd42011-08-04 17:28:40 +00001309 DRM_DEBUG_KMS("force priority to high\n");
Alex Deucherf9d9c362010-10-22 02:51:05 -04001310 priority_a_cnt |= PRIORITY_ALWAYS_ON;
1311 priority_b_cnt |= PRIORITY_ALWAYS_ON;
1312 }
1313
1314 a.full = dfixed_const(1000);
1315 b.full = dfixed_const(mode->clock);
1316 b.full = dfixed_div(b, a);
1317 c.full = dfixed_const(latency_watermark_a);
1318 c.full = dfixed_mul(c, b);
1319 c.full = dfixed_mul(c, radeon_crtc->hsc);
1320 c.full = dfixed_div(c, a);
1321 a.full = dfixed_const(16);
1322 c.full = dfixed_div(c, a);
1323 priority_a_mark = dfixed_trunc(c);
1324 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
1325
1326 a.full = dfixed_const(1000);
1327 b.full = dfixed_const(mode->clock);
1328 b.full = dfixed_div(b, a);
1329 c.full = dfixed_const(latency_watermark_b);
1330 c.full = dfixed_mul(c, b);
1331 c.full = dfixed_mul(c, radeon_crtc->hsc);
1332 c.full = dfixed_div(c, a);
1333 a.full = dfixed_const(16);
1334 c.full = dfixed_div(c, a);
1335 priority_b_mark = dfixed_trunc(c);
1336 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
1337 }
1338
1339 /* select wm A */
1340 arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
1341 tmp = arb_control3;
1342 tmp &= ~LATENCY_WATERMARK_MASK(3);
1343 tmp |= LATENCY_WATERMARK_MASK(1);
1344 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
1345 WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
1346 (LATENCY_LOW_WATERMARK(latency_watermark_a) |
1347 LATENCY_HIGH_WATERMARK(line_time)));
1348 /* select wm B */
1349 tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
1350 tmp &= ~LATENCY_WATERMARK_MASK(3);
1351 tmp |= LATENCY_WATERMARK_MASK(2);
1352 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
1353 WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
1354 (LATENCY_LOW_WATERMARK(latency_watermark_b) |
1355 LATENCY_HIGH_WATERMARK(line_time)));
1356 /* restore original selection */
1357 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
1358
1359 /* write the priority marks */
1360 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
1361 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
1362
1363}
1364
Alex Deucher377edc82012-07-17 14:02:42 -04001365/**
1366 * evergreen_bandwidth_update - update display watermarks callback.
1367 *
1368 * @rdev: radeon_device pointer
1369 *
1370 * Update the display watermarks based on the requested mode(s)
1371 * (evergreen+).
1372 */
Alex Deucher0ca2ab52010-02-26 13:57:45 -05001373void evergreen_bandwidth_update(struct radeon_device *rdev)
1374{
Alex Deucherf9d9c362010-10-22 02:51:05 -04001375 struct drm_display_mode *mode0 = NULL;
1376 struct drm_display_mode *mode1 = NULL;
1377 u32 num_heads = 0, lb_size;
1378 int i;
1379
1380 radeon_update_display_priority(rdev);
1381
1382 for (i = 0; i < rdev->num_crtc; i++) {
1383 if (rdev->mode_info.crtcs[i]->base.enabled)
1384 num_heads++;
1385 }
1386 for (i = 0; i < rdev->num_crtc; i += 2) {
1387 mode0 = &rdev->mode_info.crtcs[i]->base.mode;
1388 mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
1389 lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
1390 evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
1391 lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
1392 evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
1393 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001394}
1395
Alex Deucher377edc82012-07-17 14:02:42 -04001396/**
1397 * evergreen_mc_wait_for_idle - wait for MC idle callback.
1398 *
1399 * @rdev: radeon_device pointer
1400 *
1401 * Wait for the MC (memory controller) to be idle.
1402 * (evergreen+).
1403 * Returns 0 if the MC is idle, -1 if not.
1404 */
Alex Deucherb9952a82011-03-02 20:07:33 -05001405int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001406{
1407 unsigned i;
1408 u32 tmp;
1409
1410 for (i = 0; i < rdev->usec_timeout; i++) {
1411 /* read MC_STATUS */
1412 tmp = RREG32(SRBM_STATUS) & 0x1F00;
1413 if (!tmp)
1414 return 0;
1415 udelay(1);
1416 }
1417 return -1;
1418}
1419
1420/*
1421 * GART
1422 */
Alex Deucher0fcdb612010-03-24 13:20:41 -04001423void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
1424{
1425 unsigned i;
1426 u32 tmp;
1427
Alex Deucher6f2f48a2010-12-15 11:01:56 -05001428 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1429
Alex Deucher0fcdb612010-03-24 13:20:41 -04001430 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
1431 for (i = 0; i < rdev->usec_timeout; i++) {
1432 /* read MC_STATUS */
1433 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
1434 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
1435 if (tmp == 2) {
1436 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
1437 return;
1438 }
1439 if (tmp) {
1440 return;
1441 }
1442 udelay(1);
1443 }
1444}
1445
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001446static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001447{
1448 u32 tmp;
Alex Deucher0fcdb612010-03-24 13:20:41 -04001449 int r;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001450
Jerome Glissec9a1be92011-11-03 11:16:49 -04001451 if (rdev->gart.robj == NULL) {
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001452 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
1453 return -EINVAL;
1454 }
1455 r = radeon_gart_table_vram_pin(rdev);
1456 if (r)
1457 return r;
Dave Airlie82568562010-02-05 16:00:07 +10001458 radeon_gart_restore(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001459 /* Setup L2 cache */
1460 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1461 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1462 EFFECTIVE_L2_QUEUE_SIZE(7));
1463 WREG32(VM_L2_CNTL2, 0);
1464 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1465 /* Setup TLB control */
1466 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1467 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1468 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
1469 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
Alex Deucher8aeb96f82011-05-03 19:28:02 -04001470 if (rdev->flags & RADEON_IS_IGP) {
1471 WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp);
1472 WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp);
1473 WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp);
1474 } else {
1475 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1476 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1477 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
Alex Deucher0b8c30b2012-05-31 18:54:43 -04001478 if ((rdev->family == CHIP_JUNIPER) ||
1479 (rdev->family == CHIP_CYPRESS) ||
1480 (rdev->family == CHIP_HEMLOCK) ||
1481 (rdev->family == CHIP_BARTS))
1482 WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
Alex Deucher8aeb96f82011-05-03 19:28:02 -04001483 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001484 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1485 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1486 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1487 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
1488 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1489 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1490 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1491 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1492 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1493 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1494 (u32)(rdev->dummy_page.addr >> 12));
Alex Deucher0fcdb612010-03-24 13:20:41 -04001495 WREG32(VM_CONTEXT1_CNTL, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001496
Alex Deucher0fcdb612010-03-24 13:20:41 -04001497 evergreen_pcie_gart_tlb_flush(rdev);
Tormod Voldenfcf4de52011-08-31 21:54:07 +00001498 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1499 (unsigned)(rdev->mc.gtt_size >> 20),
1500 (unsigned long long)rdev->gart.table_addr);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001501 rdev->gart.ready = true;
1502 return 0;
1503}
1504
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001505static void evergreen_pcie_gart_disable(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001506{
1507 u32 tmp;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001508
1509 /* Disable all tables */
Alex Deucher0fcdb612010-03-24 13:20:41 -04001510 WREG32(VM_CONTEXT0_CNTL, 0);
1511 WREG32(VM_CONTEXT1_CNTL, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001512
1513 /* Setup L2 cache */
1514 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
1515 EFFECTIVE_L2_QUEUE_SIZE(7));
1516 WREG32(VM_L2_CNTL2, 0);
1517 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1518 /* Setup TLB control */
1519 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
1520 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1521 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1522 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
1523 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1524 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1525 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1526 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
Jerome Glissec9a1be92011-11-03 11:16:49 -04001527 radeon_gart_table_vram_unpin(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001528}
1529
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001530static void evergreen_pcie_gart_fini(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001531{
1532 evergreen_pcie_gart_disable(rdev);
1533 radeon_gart_table_vram_free(rdev);
1534 radeon_gart_fini(rdev);
1535}
1536
1537
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001538static void evergreen_agp_enable(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001539{
1540 u32 tmp;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001541
1542 /* Setup L2 cache */
1543 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1544 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1545 EFFECTIVE_L2_QUEUE_SIZE(7));
1546 WREG32(VM_L2_CNTL2, 0);
1547 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1548 /* Setup TLB control */
1549 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1550 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1551 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
1552 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
1553 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1554 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1555 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
1556 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1557 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1558 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1559 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
Alex Deucher0fcdb612010-03-24 13:20:41 -04001560 WREG32(VM_CONTEXT0_CNTL, 0);
1561 WREG32(VM_CONTEXT1_CNTL, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001562}
1563
Alex Deucherb9952a82011-03-02 20:07:33 -05001564void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001565{
Alex Deucher62444b72012-08-15 17:18:42 -04001566 u32 crtc_enabled, tmp, frame_count, blackout;
1567 int i, j;
1568
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001569 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
1570 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001571
Alex Deucher62444b72012-08-15 17:18:42 -04001572 /* disable VGA render */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001573 WREG32(VGA_RENDER_CONTROL, 0);
Alex Deucher62444b72012-08-15 17:18:42 -04001574 /* blank the display controllers */
1575 for (i = 0; i < rdev->num_crtc; i++) {
1576 crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
1577 if (crtc_enabled) {
1578 save->crtc_enabled[i] = true;
1579 if (ASIC_IS_DCE6(rdev)) {
1580 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1581 if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
1582 radeon_wait_for_vblank(rdev, i);
Alex Deucherabf14572013-04-10 19:08:14 -04001583 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
Alex Deucher62444b72012-08-15 17:18:42 -04001584 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
1585 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
1586 }
1587 } else {
1588 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1589 if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
1590 radeon_wait_for_vblank(rdev, i);
Alex Deucherabf14572013-04-10 19:08:14 -04001591 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
Alex Deucher62444b72012-08-15 17:18:42 -04001592 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
1593 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
Alex Deucherabf14572013-04-10 19:08:14 -04001594 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
Alex Deucher62444b72012-08-15 17:18:42 -04001595 }
1596 }
1597 /* wait for the next frame */
1598 frame_count = radeon_get_vblank_counter(rdev, i);
1599 for (j = 0; j < rdev->usec_timeout; j++) {
1600 if (radeon_get_vblank_counter(rdev, i) != frame_count)
1601 break;
1602 udelay(1);
1603 }
Alex Deucherabf14572013-04-10 19:08:14 -04001604
1605 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
1606 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
1607 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1608 tmp &= ~EVERGREEN_CRTC_MASTER_EN;
1609 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
1610 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
1611 save->crtc_enabled[i] = false;
1612 /* ***** */
Alex Deucher804cc4a02012-11-19 09:11:27 -05001613 } else {
1614 save->crtc_enabled[i] = false;
Alex Deucher62444b72012-08-15 17:18:42 -04001615 }
Alex Deucher18007402010-11-22 17:56:28 -05001616 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001617
Alex Deucher62444b72012-08-15 17:18:42 -04001618 radeon_mc_wait_for_idle(rdev);
1619
1620 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
1621 if ((blackout & BLACKOUT_MODE_MASK) != 1) {
1622 /* Block CPU access */
1623 WREG32(BIF_FB_EN, 0);
1624 /* blackout the MC */
1625 blackout &= ~BLACKOUT_MODE_MASK;
1626 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
Alex Deucherb7eff392011-07-08 11:44:56 -04001627 }
Alex Deuchered39fad2013-01-31 09:00:52 -05001628 /* wait for the MC to settle */
1629 udelay(100);
Alex Deucher968c0162013-04-10 09:58:42 -04001630
1631 /* lock double buffered regs */
1632 for (i = 0; i < rdev->num_crtc; i++) {
1633 if (save->crtc_enabled[i]) {
1634 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
1635 if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) {
1636 tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
1637 WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
1638 }
1639 tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
1640 if (!(tmp & 1)) {
1641 tmp |= 1;
1642 WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
1643 }
1644 }
1645 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001646}
1647
Alex Deucherb9952a82011-03-02 20:07:33 -05001648void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001649{
Alex Deucher62444b72012-08-15 17:18:42 -04001650 u32 tmp, frame_count;
1651 int i, j;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001652
Alex Deucher62444b72012-08-15 17:18:42 -04001653 /* update crtc base addresses */
1654 for (i = 0; i < rdev->num_crtc; i++) {
1655 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
Alex Deucher18007402010-11-22 17:56:28 -05001656 upper_32_bits(rdev->mc.vram_start));
Alex Deucher62444b72012-08-15 17:18:42 -04001657 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
Alex Deucher18007402010-11-22 17:56:28 -05001658 upper_32_bits(rdev->mc.vram_start));
Alex Deucher62444b72012-08-15 17:18:42 -04001659 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
Alex Deucher18007402010-11-22 17:56:28 -05001660 (u32)rdev->mc.vram_start);
Alex Deucher62444b72012-08-15 17:18:42 -04001661 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
Alex Deucher18007402010-11-22 17:56:28 -05001662 (u32)rdev->mc.vram_start);
Alex Deucherb7eff392011-07-08 11:44:56 -04001663 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001664 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
1665 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
Alex Deucher62444b72012-08-15 17:18:42 -04001666
Alex Deucher968c0162013-04-10 09:58:42 -04001667 /* unlock regs and wait for update */
1668 for (i = 0; i < rdev->num_crtc; i++) {
1669 if (save->crtc_enabled[i]) {
1670 tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
1671 if ((tmp & 0x3) != 0) {
1672 tmp &= ~0x3;
1673 WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
1674 }
1675 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
1676 if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
1677 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
1678 WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
1679 }
1680 tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
1681 if (tmp & 1) {
1682 tmp &= ~1;
1683 WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
1684 }
1685 for (j = 0; j < rdev->usec_timeout; j++) {
1686 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
1687 if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
1688 break;
1689 udelay(1);
1690 }
1691 }
1692 }
1693
Alex Deucher62444b72012-08-15 17:18:42 -04001694 /* unblackout the MC */
1695 tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
1696 tmp &= ~BLACKOUT_MODE_MASK;
1697 WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
1698 /* allow CPU access */
1699 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
1700
1701 for (i = 0; i < rdev->num_crtc; i++) {
Alex Deucher695ddeb2012-11-05 16:34:58 +00001702 if (save->crtc_enabled[i]) {
Alex Deucher62444b72012-08-15 17:18:42 -04001703 if (ASIC_IS_DCE6(rdev)) {
1704 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1705 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
Christopher Staitebb5888202013-01-26 11:10:58 -05001706 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
Alex Deucher62444b72012-08-15 17:18:42 -04001707 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
Christopher Staitebb5888202013-01-26 11:10:58 -05001708 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
Alex Deucher62444b72012-08-15 17:18:42 -04001709 } else {
1710 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1711 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
Christopher Staitebb5888202013-01-26 11:10:58 -05001712 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
Alex Deucher62444b72012-08-15 17:18:42 -04001713 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
Christopher Staitebb5888202013-01-26 11:10:58 -05001714 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
Alex Deucher62444b72012-08-15 17:18:42 -04001715 }
1716 /* wait for the next frame */
1717 frame_count = radeon_get_vblank_counter(rdev, i);
1718 for (j = 0; j < rdev->usec_timeout; j++) {
1719 if (radeon_get_vblank_counter(rdev, i) != frame_count)
1720 break;
1721 udelay(1);
1722 }
1723 }
1724 }
1725 /* Unlock vga access */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001726 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
1727 mdelay(1);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001728 WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
1729}
1730
Alex Deucher755d8192011-03-02 20:07:34 -05001731void evergreen_mc_program(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001732{
1733 struct evergreen_mc_save save;
1734 u32 tmp;
1735 int i, j;
1736
1737 /* Initialize HDP */
1738 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1739 WREG32((0x2c14 + j), 0x00000000);
1740 WREG32((0x2c18 + j), 0x00000000);
1741 WREG32((0x2c1c + j), 0x00000000);
1742 WREG32((0x2c20 + j), 0x00000000);
1743 WREG32((0x2c24 + j), 0x00000000);
1744 }
1745 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1746
1747 evergreen_mc_stop(rdev, &save);
1748 if (evergreen_mc_wait_for_idle(rdev)) {
1749 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1750 }
1751 /* Lockout access through VGA aperture*/
1752 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1753 /* Update configuration */
1754 if (rdev->flags & RADEON_IS_AGP) {
1755 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1756 /* VRAM before AGP */
1757 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1758 rdev->mc.vram_start >> 12);
1759 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1760 rdev->mc.gtt_end >> 12);
1761 } else {
1762 /* VRAM after AGP */
1763 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1764 rdev->mc.gtt_start >> 12);
1765 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1766 rdev->mc.vram_end >> 12);
1767 }
1768 } else {
1769 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1770 rdev->mc.vram_start >> 12);
1771 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1772 rdev->mc.vram_end >> 12);
1773 }
Alex Deucher3b9832f2011-11-10 08:59:39 -05001774 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
Alex Deucher05b3ef62012-03-20 17:18:37 -04001775 /* llano/ontario only */
1776 if ((rdev->family == CHIP_PALM) ||
1777 (rdev->family == CHIP_SUMO) ||
1778 (rdev->family == CHIP_SUMO2)) {
Alex Deucherb4183e32010-12-15 11:04:10 -05001779 tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
1780 tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
1781 tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
1782 WREG32(MC_FUS_VM_FB_OFFSET, tmp);
1783 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001784 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1785 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1786 WREG32(MC_VM_FB_LOCATION, tmp);
1787 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
Alex Deucherc46cb4d2011-01-06 19:12:37 -05001788 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
Jerome Glisse46fcd2b2010-06-03 19:34:48 +02001789 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001790 if (rdev->flags & RADEON_IS_AGP) {
1791 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
1792 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
1793 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1794 } else {
1795 WREG32(MC_VM_AGP_BASE, 0);
1796 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1797 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1798 }
1799 if (evergreen_mc_wait_for_idle(rdev)) {
1800 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1801 }
1802 evergreen_mc_resume(rdev, &save);
1803 /* we need to own VRAM, so turn off the VGA renderer here
1804 * to stop it overwriting our objects */
1805 rv515_vga_render_disable(rdev);
1806}
1807
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001808/*
1809 * CP.
1810 */
Alex Deucher12920592011-02-02 12:37:40 -05001811void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1812{
Christian König876dc9f2012-05-08 14:24:01 +02001813 struct radeon_ring *ring = &rdev->ring[ib->ring];
Alex Deucher89d35802012-07-17 14:02:31 -04001814 u32 next_rptr;
Christian König7b1f2482011-09-23 15:11:23 +02001815
Alex Deucher12920592011-02-02 12:37:40 -05001816 /* set to DX10/11 mode */
Christian Könige32eb502011-10-23 12:56:27 +02001817 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
1818 radeon_ring_write(ring, 1);
Christian König45df6802012-07-06 16:22:55 +02001819
1820 if (ring->rptr_save_reg) {
Alex Deucher89d35802012-07-17 14:02:31 -04001821 next_rptr = ring->wptr + 3 + 4;
Christian König45df6802012-07-06 16:22:55 +02001822 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1823 radeon_ring_write(ring, ((ring->rptr_save_reg -
1824 PACKET3_SET_CONFIG_REG_START) >> 2));
1825 radeon_ring_write(ring, next_rptr);
Alex Deucher89d35802012-07-17 14:02:31 -04001826 } else if (rdev->wb.enabled) {
1827 next_rptr = ring->wptr + 5 + 4;
1828 radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
1829 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
1830 radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
1831 radeon_ring_write(ring, next_rptr);
1832 radeon_ring_write(ring, 0);
Christian König45df6802012-07-06 16:22:55 +02001833 }
1834
Christian Könige32eb502011-10-23 12:56:27 +02001835 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1836 radeon_ring_write(ring,
Alex Deucher0f234f5f2011-02-13 19:06:33 -05001837#ifdef __BIG_ENDIAN
1838 (2 << 0) |
1839#endif
1840 (ib->gpu_addr & 0xFFFFFFFC));
Christian Könige32eb502011-10-23 12:56:27 +02001841 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
1842 radeon_ring_write(ring, ib->length_dw);
Alex Deucher12920592011-02-02 12:37:40 -05001843}
1844
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001845
1846static int evergreen_cp_load_microcode(struct radeon_device *rdev)
1847{
Alex Deucherfe251e22010-03-24 13:36:43 -04001848 const __be32 *fw_data;
1849 int i;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001850
Alex Deucherfe251e22010-03-24 13:36:43 -04001851 if (!rdev->me_fw || !rdev->pfp_fw)
1852 return -EINVAL;
1853
1854 r700_cp_stop(rdev);
Alex Deucher0f234f5f2011-02-13 19:06:33 -05001855 WREG32(CP_RB_CNTL,
1856#ifdef __BIG_ENDIAN
1857 BUF_SWAP_32BIT |
1858#endif
1859 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
Alex Deucherfe251e22010-03-24 13:36:43 -04001860
1861 fw_data = (const __be32 *)rdev->pfp_fw->data;
1862 WREG32(CP_PFP_UCODE_ADDR, 0);
1863 for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
1864 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
1865 WREG32(CP_PFP_UCODE_ADDR, 0);
1866
1867 fw_data = (const __be32 *)rdev->me_fw->data;
1868 WREG32(CP_ME_RAM_WADDR, 0);
1869 for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
1870 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
1871
1872 WREG32(CP_PFP_UCODE_ADDR, 0);
1873 WREG32(CP_ME_RAM_WADDR, 0);
1874 WREG32(CP_ME_RAM_RADDR, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001875 return 0;
1876}
1877
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001878static int evergreen_cp_start(struct radeon_device *rdev)
1879{
Christian Könige32eb502011-10-23 12:56:27 +02001880 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucher2281a372010-10-21 13:31:38 -04001881 int r, i;
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001882 uint32_t cp_me;
1883
Christian Könige32eb502011-10-23 12:56:27 +02001884 r = radeon_ring_lock(rdev, ring, 7);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001885 if (r) {
1886 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1887 return r;
1888 }
Christian Könige32eb502011-10-23 12:56:27 +02001889 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
1890 radeon_ring_write(ring, 0x1);
1891 radeon_ring_write(ring, 0x0);
1892 radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
1893 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1894 radeon_ring_write(ring, 0);
1895 radeon_ring_write(ring, 0);
1896 radeon_ring_unlock_commit(rdev, ring);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001897
1898 cp_me = 0xff;
1899 WREG32(CP_ME_CNTL, cp_me);
1900
Christian Könige32eb502011-10-23 12:56:27 +02001901 r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001902 if (r) {
1903 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1904 return r;
1905 }
Alex Deucher2281a372010-10-21 13:31:38 -04001906
1907 /* setup clear context state */
Christian Könige32eb502011-10-23 12:56:27 +02001908 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1909 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
Alex Deucher2281a372010-10-21 13:31:38 -04001910
1911 for (i = 0; i < evergreen_default_size; i++)
Christian Könige32eb502011-10-23 12:56:27 +02001912 radeon_ring_write(ring, evergreen_default_state[i]);
Alex Deucher2281a372010-10-21 13:31:38 -04001913
Christian Könige32eb502011-10-23 12:56:27 +02001914 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1915 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
Alex Deucher2281a372010-10-21 13:31:38 -04001916
1917 /* set clear context state */
Christian Könige32eb502011-10-23 12:56:27 +02001918 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1919 radeon_ring_write(ring, 0);
Alex Deucher2281a372010-10-21 13:31:38 -04001920
1921 /* SQ_VTX_BASE_VTX_LOC */
Christian Könige32eb502011-10-23 12:56:27 +02001922 radeon_ring_write(ring, 0xc0026f00);
1923 radeon_ring_write(ring, 0x00000000);
1924 radeon_ring_write(ring, 0x00000000);
1925 radeon_ring_write(ring, 0x00000000);
Alex Deucher2281a372010-10-21 13:31:38 -04001926
1927 /* Clear consts */
Christian Könige32eb502011-10-23 12:56:27 +02001928 radeon_ring_write(ring, 0xc0036f00);
1929 radeon_ring_write(ring, 0x00000bc4);
1930 radeon_ring_write(ring, 0xffffffff);
1931 radeon_ring_write(ring, 0xffffffff);
1932 radeon_ring_write(ring, 0xffffffff);
Alex Deucher2281a372010-10-21 13:31:38 -04001933
Christian Könige32eb502011-10-23 12:56:27 +02001934 radeon_ring_write(ring, 0xc0026900);
1935 radeon_ring_write(ring, 0x00000316);
1936 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1937 radeon_ring_write(ring, 0x00000010); /* */
Alex Deucher18ff84d2011-02-02 12:37:41 -05001938
Christian Könige32eb502011-10-23 12:56:27 +02001939 radeon_ring_unlock_commit(rdev, ring);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001940
1941 return 0;
1942}
1943
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001944static int evergreen_cp_resume(struct radeon_device *rdev)
Alex Deucherfe251e22010-03-24 13:36:43 -04001945{
Christian Könige32eb502011-10-23 12:56:27 +02001946 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucherfe251e22010-03-24 13:36:43 -04001947 u32 tmp;
1948 u32 rb_bufsz;
1949 int r;
1950
1951 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1952 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1953 SOFT_RESET_PA |
1954 SOFT_RESET_SH |
1955 SOFT_RESET_VGT |
Jerome Glissea49a50d2011-08-24 20:00:17 +00001956 SOFT_RESET_SPI |
Alex Deucherfe251e22010-03-24 13:36:43 -04001957 SOFT_RESET_SX));
1958 RREG32(GRBM_SOFT_RESET);
1959 mdelay(15);
1960 WREG32(GRBM_SOFT_RESET, 0);
1961 RREG32(GRBM_SOFT_RESET);
1962
1963 /* Set ring buffer size */
Christian Könige32eb502011-10-23 12:56:27 +02001964 rb_bufsz = drm_order(ring->ring_size / 8);
Alex Deucher724c80e2010-08-27 18:25:25 -04001965 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
Alex Deucherfe251e22010-03-24 13:36:43 -04001966#ifdef __BIG_ENDIAN
1967 tmp |= BUF_SWAP_32BIT;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001968#endif
Alex Deucherfe251e22010-03-24 13:36:43 -04001969 WREG32(CP_RB_CNTL, tmp);
Christian König15d33322011-09-15 19:02:22 +02001970 WREG32(CP_SEM_WAIT_TIMER, 0x0);
Alex Deucher11ef3f1f2012-01-20 14:47:43 -05001971 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
Alex Deucherfe251e22010-03-24 13:36:43 -04001972
1973 /* Set the write pointer delay */
1974 WREG32(CP_RB_WPTR_DELAY, 0);
1975
1976 /* Initialize the ring buffer's read and write pointers */
1977 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1978 WREG32(CP_RB_RPTR_WR, 0);
Christian Könige32eb502011-10-23 12:56:27 +02001979 ring->wptr = 0;
1980 WREG32(CP_RB_WPTR, ring->wptr);
Alex Deucher724c80e2010-08-27 18:25:25 -04001981
Adam Buchbinder48fc7f72012-09-19 21:48:00 -04001982 /* set the wb address whether it's enabled or not */
Alex Deucher0f234f5f2011-02-13 19:06:33 -05001983 WREG32(CP_RB_RPTR_ADDR,
Alex Deucher0f234f5f2011-02-13 19:06:33 -05001984 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
Alex Deucher724c80e2010-08-27 18:25:25 -04001985 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1986 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1987
1988 if (rdev->wb.enabled)
1989 WREG32(SCRATCH_UMSK, 0xff);
1990 else {
1991 tmp |= RB_NO_UPDATE;
1992 WREG32(SCRATCH_UMSK, 0);
1993 }
1994
Alex Deucherfe251e22010-03-24 13:36:43 -04001995 mdelay(1);
1996 WREG32(CP_RB_CNTL, tmp);
1997
Christian Könige32eb502011-10-23 12:56:27 +02001998 WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
Alex Deucherfe251e22010-03-24 13:36:43 -04001999 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2000
Christian Könige32eb502011-10-23 12:56:27 +02002001 ring->rptr = RREG32(CP_RB_RPTR);
Alex Deucherfe251e22010-03-24 13:36:43 -04002002
Alex Deucher7e7b41d2010-09-02 21:32:32 -04002003 evergreen_cp_start(rdev);
Christian Könige32eb502011-10-23 12:56:27 +02002004 ring->ready = true;
Alex Deucherf7128122012-02-23 17:53:45 -05002005 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
Alex Deucherfe251e22010-03-24 13:36:43 -04002006 if (r) {
Christian Könige32eb502011-10-23 12:56:27 +02002007 ring->ready = false;
Alex Deucherfe251e22010-03-24 13:36:43 -04002008 return r;
2009 }
2010 return 0;
2011}
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002012
2013/*
2014 * Core functions
2015 */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002016static void evergreen_gpu_init(struct radeon_device *rdev)
2017{
Alex Deucher416a2bd2012-05-31 19:00:25 -04002018 u32 gb_addr_config;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002019 u32 mc_shared_chmap, mc_arb_ramcfg;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002020 u32 sx_debug_1;
2021 u32 smx_dc_ctl0;
2022 u32 sq_config;
2023 u32 sq_lds_resource_mgmt;
2024 u32 sq_gpr_resource_mgmt_1;
2025 u32 sq_gpr_resource_mgmt_2;
2026 u32 sq_gpr_resource_mgmt_3;
2027 u32 sq_thread_resource_mgmt;
2028 u32 sq_thread_resource_mgmt_2;
2029 u32 sq_stack_resource_mgmt_1;
2030 u32 sq_stack_resource_mgmt_2;
2031 u32 sq_stack_resource_mgmt_3;
2032 u32 vgt_cache_invalidation;
Alex Deucherf25a5c62011-05-19 11:07:57 -04002033 u32 hdp_host_path_cntl, tmp;
Alex Deucher416a2bd2012-05-31 19:00:25 -04002034 u32 disabled_rb_mask;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002035 int i, j, num_shader_engines, ps_thread_count;
2036
2037 switch (rdev->family) {
2038 case CHIP_CYPRESS:
2039 case CHIP_HEMLOCK:
2040 rdev->config.evergreen.num_ses = 2;
2041 rdev->config.evergreen.max_pipes = 4;
2042 rdev->config.evergreen.max_tile_pipes = 8;
2043 rdev->config.evergreen.max_simds = 10;
2044 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
2045 rdev->config.evergreen.max_gprs = 256;
2046 rdev->config.evergreen.max_threads = 248;
2047 rdev->config.evergreen.max_gs_threads = 32;
2048 rdev->config.evergreen.max_stack_entries = 512;
2049 rdev->config.evergreen.sx_num_of_sets = 4;
2050 rdev->config.evergreen.sx_max_export_size = 256;
2051 rdev->config.evergreen.sx_max_export_pos_size = 64;
2052 rdev->config.evergreen.sx_max_export_smx_size = 192;
2053 rdev->config.evergreen.max_hw_contexts = 8;
2054 rdev->config.evergreen.sq_num_cf_insts = 2;
2055
2056 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
2057 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2058 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04002059 gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002060 break;
2061 case CHIP_JUNIPER:
2062 rdev->config.evergreen.num_ses = 1;
2063 rdev->config.evergreen.max_pipes = 4;
2064 rdev->config.evergreen.max_tile_pipes = 4;
2065 rdev->config.evergreen.max_simds = 10;
2066 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
2067 rdev->config.evergreen.max_gprs = 256;
2068 rdev->config.evergreen.max_threads = 248;
2069 rdev->config.evergreen.max_gs_threads = 32;
2070 rdev->config.evergreen.max_stack_entries = 512;
2071 rdev->config.evergreen.sx_num_of_sets = 4;
2072 rdev->config.evergreen.sx_max_export_size = 256;
2073 rdev->config.evergreen.sx_max_export_pos_size = 64;
2074 rdev->config.evergreen.sx_max_export_smx_size = 192;
2075 rdev->config.evergreen.max_hw_contexts = 8;
2076 rdev->config.evergreen.sq_num_cf_insts = 2;
2077
2078 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
2079 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2080 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04002081 gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002082 break;
2083 case CHIP_REDWOOD:
2084 rdev->config.evergreen.num_ses = 1;
2085 rdev->config.evergreen.max_pipes = 4;
2086 rdev->config.evergreen.max_tile_pipes = 4;
2087 rdev->config.evergreen.max_simds = 5;
2088 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
2089 rdev->config.evergreen.max_gprs = 256;
2090 rdev->config.evergreen.max_threads = 248;
2091 rdev->config.evergreen.max_gs_threads = 32;
2092 rdev->config.evergreen.max_stack_entries = 256;
2093 rdev->config.evergreen.sx_num_of_sets = 4;
2094 rdev->config.evergreen.sx_max_export_size = 256;
2095 rdev->config.evergreen.sx_max_export_pos_size = 64;
2096 rdev->config.evergreen.sx_max_export_smx_size = 192;
2097 rdev->config.evergreen.max_hw_contexts = 8;
2098 rdev->config.evergreen.sq_num_cf_insts = 2;
2099
2100 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
2101 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2102 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04002103 gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002104 break;
2105 case CHIP_CEDAR:
2106 default:
2107 rdev->config.evergreen.num_ses = 1;
2108 rdev->config.evergreen.max_pipes = 2;
2109 rdev->config.evergreen.max_tile_pipes = 2;
2110 rdev->config.evergreen.max_simds = 2;
2111 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
2112 rdev->config.evergreen.max_gprs = 256;
2113 rdev->config.evergreen.max_threads = 192;
2114 rdev->config.evergreen.max_gs_threads = 16;
2115 rdev->config.evergreen.max_stack_entries = 256;
2116 rdev->config.evergreen.sx_num_of_sets = 4;
2117 rdev->config.evergreen.sx_max_export_size = 128;
2118 rdev->config.evergreen.sx_max_export_pos_size = 32;
2119 rdev->config.evergreen.sx_max_export_smx_size = 96;
2120 rdev->config.evergreen.max_hw_contexts = 4;
2121 rdev->config.evergreen.sq_num_cf_insts = 1;
2122
2123 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
2124 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2125 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04002126 gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002127 break;
Alex Deucherd5e455e2010-11-22 17:56:29 -05002128 case CHIP_PALM:
2129 rdev->config.evergreen.num_ses = 1;
2130 rdev->config.evergreen.max_pipes = 2;
2131 rdev->config.evergreen.max_tile_pipes = 2;
2132 rdev->config.evergreen.max_simds = 2;
2133 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
2134 rdev->config.evergreen.max_gprs = 256;
2135 rdev->config.evergreen.max_threads = 192;
2136 rdev->config.evergreen.max_gs_threads = 16;
2137 rdev->config.evergreen.max_stack_entries = 256;
2138 rdev->config.evergreen.sx_num_of_sets = 4;
2139 rdev->config.evergreen.sx_max_export_size = 128;
2140 rdev->config.evergreen.sx_max_export_pos_size = 32;
2141 rdev->config.evergreen.sx_max_export_smx_size = 96;
2142 rdev->config.evergreen.max_hw_contexts = 4;
2143 rdev->config.evergreen.sq_num_cf_insts = 1;
2144
2145 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
2146 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2147 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04002148 gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
Alex Deucherd5e455e2010-11-22 17:56:29 -05002149 break;
Alex Deucherd5c5a722011-05-31 15:42:48 -04002150 case CHIP_SUMO:
2151 rdev->config.evergreen.num_ses = 1;
2152 rdev->config.evergreen.max_pipes = 4;
Jerome Glissebd25f072012-12-11 11:56:52 -05002153 rdev->config.evergreen.max_tile_pipes = 4;
Alex Deucherd5c5a722011-05-31 15:42:48 -04002154 if (rdev->pdev->device == 0x9648)
2155 rdev->config.evergreen.max_simds = 3;
2156 else if ((rdev->pdev->device == 0x9647) ||
2157 (rdev->pdev->device == 0x964a))
2158 rdev->config.evergreen.max_simds = 4;
2159 else
2160 rdev->config.evergreen.max_simds = 5;
2161 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
2162 rdev->config.evergreen.max_gprs = 256;
2163 rdev->config.evergreen.max_threads = 248;
2164 rdev->config.evergreen.max_gs_threads = 32;
2165 rdev->config.evergreen.max_stack_entries = 256;
2166 rdev->config.evergreen.sx_num_of_sets = 4;
2167 rdev->config.evergreen.sx_max_export_size = 256;
2168 rdev->config.evergreen.sx_max_export_pos_size = 64;
2169 rdev->config.evergreen.sx_max_export_smx_size = 192;
2170 rdev->config.evergreen.max_hw_contexts = 8;
2171 rdev->config.evergreen.sq_num_cf_insts = 2;
2172
2173 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
2174 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2175 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Jerome Glissebd25f072012-12-11 11:56:52 -05002176 gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN;
Alex Deucherd5c5a722011-05-31 15:42:48 -04002177 break;
2178 case CHIP_SUMO2:
2179 rdev->config.evergreen.num_ses = 1;
2180 rdev->config.evergreen.max_pipes = 4;
2181 rdev->config.evergreen.max_tile_pipes = 4;
2182 rdev->config.evergreen.max_simds = 2;
2183 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
2184 rdev->config.evergreen.max_gprs = 256;
2185 rdev->config.evergreen.max_threads = 248;
2186 rdev->config.evergreen.max_gs_threads = 32;
2187 rdev->config.evergreen.max_stack_entries = 512;
2188 rdev->config.evergreen.sx_num_of_sets = 4;
2189 rdev->config.evergreen.sx_max_export_size = 256;
2190 rdev->config.evergreen.sx_max_export_pos_size = 64;
2191 rdev->config.evergreen.sx_max_export_smx_size = 192;
2192 rdev->config.evergreen.max_hw_contexts = 8;
2193 rdev->config.evergreen.sq_num_cf_insts = 2;
2194
2195 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
2196 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2197 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Jerome Glissebd25f072012-12-11 11:56:52 -05002198 gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN;
Alex Deucherd5c5a722011-05-31 15:42:48 -04002199 break;
Alex Deucheradb68fa2011-01-06 21:19:24 -05002200 case CHIP_BARTS:
2201 rdev->config.evergreen.num_ses = 2;
2202 rdev->config.evergreen.max_pipes = 4;
2203 rdev->config.evergreen.max_tile_pipes = 8;
2204 rdev->config.evergreen.max_simds = 7;
2205 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
2206 rdev->config.evergreen.max_gprs = 256;
2207 rdev->config.evergreen.max_threads = 248;
2208 rdev->config.evergreen.max_gs_threads = 32;
2209 rdev->config.evergreen.max_stack_entries = 512;
2210 rdev->config.evergreen.sx_num_of_sets = 4;
2211 rdev->config.evergreen.sx_max_export_size = 256;
2212 rdev->config.evergreen.sx_max_export_pos_size = 64;
2213 rdev->config.evergreen.sx_max_export_smx_size = 192;
2214 rdev->config.evergreen.max_hw_contexts = 8;
2215 rdev->config.evergreen.sq_num_cf_insts = 2;
2216
2217 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
2218 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2219 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04002220 gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
Alex Deucheradb68fa2011-01-06 21:19:24 -05002221 break;
2222 case CHIP_TURKS:
2223 rdev->config.evergreen.num_ses = 1;
2224 rdev->config.evergreen.max_pipes = 4;
2225 rdev->config.evergreen.max_tile_pipes = 4;
2226 rdev->config.evergreen.max_simds = 6;
2227 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
2228 rdev->config.evergreen.max_gprs = 256;
2229 rdev->config.evergreen.max_threads = 248;
2230 rdev->config.evergreen.max_gs_threads = 32;
2231 rdev->config.evergreen.max_stack_entries = 256;
2232 rdev->config.evergreen.sx_num_of_sets = 4;
2233 rdev->config.evergreen.sx_max_export_size = 256;
2234 rdev->config.evergreen.sx_max_export_pos_size = 64;
2235 rdev->config.evergreen.sx_max_export_smx_size = 192;
2236 rdev->config.evergreen.max_hw_contexts = 8;
2237 rdev->config.evergreen.sq_num_cf_insts = 2;
2238
2239 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
2240 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2241 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04002242 gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
Alex Deucheradb68fa2011-01-06 21:19:24 -05002243 break;
2244 case CHIP_CAICOS:
2245 rdev->config.evergreen.num_ses = 1;
Jerome Glissebd25f072012-12-11 11:56:52 -05002246 rdev->config.evergreen.max_pipes = 2;
Alex Deucheradb68fa2011-01-06 21:19:24 -05002247 rdev->config.evergreen.max_tile_pipes = 2;
2248 rdev->config.evergreen.max_simds = 2;
2249 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
2250 rdev->config.evergreen.max_gprs = 256;
2251 rdev->config.evergreen.max_threads = 192;
2252 rdev->config.evergreen.max_gs_threads = 16;
2253 rdev->config.evergreen.max_stack_entries = 256;
2254 rdev->config.evergreen.sx_num_of_sets = 4;
2255 rdev->config.evergreen.sx_max_export_size = 128;
2256 rdev->config.evergreen.sx_max_export_pos_size = 32;
2257 rdev->config.evergreen.sx_max_export_smx_size = 96;
2258 rdev->config.evergreen.max_hw_contexts = 4;
2259 rdev->config.evergreen.sq_num_cf_insts = 1;
2260
2261 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
2262 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
2263 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04002264 gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
Alex Deucheradb68fa2011-01-06 21:19:24 -05002265 break;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002266 }
2267
2268 /* Initialize HDP */
2269 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
2270 WREG32((0x2c14 + j), 0x00000000);
2271 WREG32((0x2c18 + j), 0x00000000);
2272 WREG32((0x2c1c + j), 0x00000000);
2273 WREG32((0x2c20 + j), 0x00000000);
2274 WREG32((0x2c24 + j), 0x00000000);
2275 }
2276
2277 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
2278
Alex Deucherd054ac12011-09-01 17:46:15 +00002279 evergreen_fix_pci_max_read_req_size(rdev);
2280
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002281 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
Alex Deucher05b3ef62012-03-20 17:18:37 -04002282 if ((rdev->family == CHIP_PALM) ||
2283 (rdev->family == CHIP_SUMO) ||
2284 (rdev->family == CHIP_SUMO2))
Alex Deucherd9282fc2011-05-11 03:15:24 -04002285 mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
2286 else
2287 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002288
Alex Deucher1aa52bd2010-11-17 12:11:03 -05002289 /* setup tiling info dword. gb_addr_config is not adequate since it does
2290 * not have bank info, so create a custom tiling dword.
2291 * bits 3:0 num_pipes
2292 * bits 7:4 num_banks
2293 * bits 11:8 group_size
2294 * bits 15:12 row_size
2295 */
2296 rdev->config.evergreen.tile_config = 0;
2297 switch (rdev->config.evergreen.max_tile_pipes) {
2298 case 1:
2299 default:
2300 rdev->config.evergreen.tile_config |= (0 << 0);
2301 break;
2302 case 2:
2303 rdev->config.evergreen.tile_config |= (1 << 0);
2304 break;
2305 case 4:
2306 rdev->config.evergreen.tile_config |= (2 << 0);
2307 break;
2308 case 8:
2309 rdev->config.evergreen.tile_config |= (3 << 0);
2310 break;
2311 }
Alex Deucherd698a342011-06-23 00:49:29 -04002312 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
Alex Deucher5bfa4872011-05-20 12:35:22 -04002313 if (rdev->flags & RADEON_IS_IGP)
Alex Deucherd698a342011-06-23 00:49:29 -04002314 rdev->config.evergreen.tile_config |= 1 << 4;
Alex Deucher29d65402012-05-31 18:53:36 -04002315 else {
Alex Deucherc8d15ed2012-07-31 11:01:10 -04002316 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
2317 case 0: /* four banks */
Alex Deucher29d65402012-05-31 18:53:36 -04002318 rdev->config.evergreen.tile_config |= 0 << 4;
Alex Deucherc8d15ed2012-07-31 11:01:10 -04002319 break;
2320 case 1: /* eight banks */
2321 rdev->config.evergreen.tile_config |= 1 << 4;
2322 break;
2323 case 2: /* sixteen banks */
2324 default:
2325 rdev->config.evergreen.tile_config |= 2 << 4;
2326 break;
2327 }
Alex Deucher29d65402012-05-31 18:53:36 -04002328 }
Alex Deucher416a2bd2012-05-31 19:00:25 -04002329 rdev->config.evergreen.tile_config |= 0 << 8;
Alex Deucher1aa52bd2010-11-17 12:11:03 -05002330 rdev->config.evergreen.tile_config |=
2331 ((gb_addr_config & 0x30000000) >> 28) << 12;
2332
Alex Deucher416a2bd2012-05-31 19:00:25 -04002333 num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
2334
2335 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
2336 u32 efuse_straps_4;
2337 u32 efuse_straps_3;
2338
2339 WREG32(RCU_IND_INDEX, 0x204);
2340 efuse_straps_4 = RREG32(RCU_IND_DATA);
2341 WREG32(RCU_IND_INDEX, 0x203);
2342 efuse_straps_3 = RREG32(RCU_IND_DATA);
2343 tmp = (((efuse_straps_4 & 0xf) << 4) |
2344 ((efuse_straps_3 & 0xf0000000) >> 28));
2345 } else {
2346 tmp = 0;
2347 for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
2348 u32 rb_disable_bitmap;
2349
2350 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
2351 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
2352 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
2353 tmp <<= 4;
2354 tmp |= rb_disable_bitmap;
2355 }
2356 }
2357 /* enabled rb are just the one not disabled :) */
2358 disabled_rb_mask = tmp;
Alex Deuchercedb6552013-04-09 10:13:22 -04002359 tmp = 0;
2360 for (i = 0; i < rdev->config.evergreen.max_backends; i++)
2361 tmp |= (1 << i);
2362 /* if all the backends are disabled, fix it up here */
2363 if ((disabled_rb_mask & tmp) == tmp) {
2364 for (i = 0; i < rdev->config.evergreen.max_backends; i++)
2365 disabled_rb_mask &= ~(1 << i);
2366 }
Alex Deucher416a2bd2012-05-31 19:00:25 -04002367
2368 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
2369 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
2370
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002371 WREG32(GB_ADDR_CONFIG, gb_addr_config);
2372 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
2373 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
Alex Deucher233d1ad2012-12-04 15:25:59 -05002374 WREG32(DMA_TILING_CONFIG, gb_addr_config);
Christian König9a210592013-04-08 12:41:37 +02002375 WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
2376 WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
2377 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002378
Alex Deucherf7eb9732013-01-30 13:57:40 -05002379 if ((rdev->config.evergreen.max_backends == 1) &&
2380 (rdev->flags & RADEON_IS_IGP)) {
2381 if ((disabled_rb_mask & 3) == 1) {
2382 /* RB0 disabled, RB1 enabled */
2383 tmp = 0x11111111;
2384 } else {
2385 /* RB1 disabled, RB0 enabled */
2386 tmp = 0x00000000;
2387 }
2388 } else {
2389 tmp = gb_addr_config & NUM_PIPES_MASK;
2390 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
2391 EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
2392 }
Alex Deucher416a2bd2012-05-31 19:00:25 -04002393 WREG32(GB_BACKEND_MAP, tmp);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002394
2395 WREG32(CGTS_SYS_TCC_DISABLE, 0);
2396 WREG32(CGTS_TCC_DISABLE, 0);
2397 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
2398 WREG32(CGTS_USER_TCC_DISABLE, 0);
2399
2400 /* set HW defaults for 3D engine */
2401 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
2402 ROQ_IB2_START(0x2b)));
2403
2404 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
2405
2406 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
2407 SYNC_GRADIENT |
2408 SYNC_WALKER |
2409 SYNC_ALIGNER));
2410
2411 sx_debug_1 = RREG32(SX_DEBUG_1);
2412 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
2413 WREG32(SX_DEBUG_1, sx_debug_1);
2414
2415
2416 smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
2417 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
2418 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
2419 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
2420
Alex Deucherb866d132012-06-14 22:06:36 +02002421 if (rdev->family <= CHIP_SUMO2)
2422 WREG32(SMX_SAR_CTL0, 0x00010000);
2423
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002424 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
2425 POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
2426 SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
2427
2428 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
2429 SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
2430 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
2431
2432 WREG32(VGT_NUM_INSTANCES, 1);
2433 WREG32(SPI_CONFIG_CNTL, 0);
2434 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
2435 WREG32(CP_PERFMON_CNTL, 0);
2436
2437 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
2438 FETCH_FIFO_HIWATER(0x4) |
2439 DONE_FIFO_HIWATER(0xe0) |
2440 ALU_UPDATE_FIFO_HIWATER(0x8)));
2441
2442 sq_config = RREG32(SQ_CONFIG);
2443 sq_config &= ~(PS_PRIO(3) |
2444 VS_PRIO(3) |
2445 GS_PRIO(3) |
2446 ES_PRIO(3));
2447 sq_config |= (VC_ENABLE |
2448 EXPORT_SRC_C |
2449 PS_PRIO(0) |
2450 VS_PRIO(1) |
2451 GS_PRIO(2) |
2452 ES_PRIO(3));
2453
Alex Deucherd5e455e2010-11-22 17:56:29 -05002454 switch (rdev->family) {
2455 case CHIP_CEDAR:
2456 case CHIP_PALM:
Alex Deucherd5c5a722011-05-31 15:42:48 -04002457 case CHIP_SUMO:
2458 case CHIP_SUMO2:
Alex Deucheradb68fa2011-01-06 21:19:24 -05002459 case CHIP_CAICOS:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002460 /* no vertex cache */
2461 sq_config &= ~VC_ENABLE;
Alex Deucherd5e455e2010-11-22 17:56:29 -05002462 break;
2463 default:
2464 break;
2465 }
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002466
2467 sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
2468
2469 sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
2470 sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
2471 sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
2472 sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
2473 sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
2474 sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
2475 sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
2476
Alex Deucherd5e455e2010-11-22 17:56:29 -05002477 switch (rdev->family) {
2478 case CHIP_CEDAR:
2479 case CHIP_PALM:
Alex Deucherd5c5a722011-05-31 15:42:48 -04002480 case CHIP_SUMO:
2481 case CHIP_SUMO2:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002482 ps_thread_count = 96;
Alex Deucherd5e455e2010-11-22 17:56:29 -05002483 break;
2484 default:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002485 ps_thread_count = 128;
Alex Deucherd5e455e2010-11-22 17:56:29 -05002486 break;
2487 }
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002488
2489 sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
Alex Deucherf96b35c2010-06-16 12:24:07 -04002490 sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2491 sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2492 sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2493 sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2494 sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002495
2496 sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2497 sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2498 sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2499 sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2500 sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2501 sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2502
2503 WREG32(SQ_CONFIG, sq_config);
2504 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
2505 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
2506 WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
2507 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
2508 WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
2509 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
2510 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
2511 WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
2512 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
2513 WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
2514
2515 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
2516 FORCE_EOV_MAX_REZ_CNT(255)));
2517
Alex Deucherd5e455e2010-11-22 17:56:29 -05002518 switch (rdev->family) {
2519 case CHIP_CEDAR:
2520 case CHIP_PALM:
Alex Deucherd5c5a722011-05-31 15:42:48 -04002521 case CHIP_SUMO:
2522 case CHIP_SUMO2:
Alex Deucheradb68fa2011-01-06 21:19:24 -05002523 case CHIP_CAICOS:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002524 vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
Alex Deucherd5e455e2010-11-22 17:56:29 -05002525 break;
2526 default:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002527 vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
Alex Deucherd5e455e2010-11-22 17:56:29 -05002528 break;
2529 }
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002530 vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
2531 WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
2532
2533 WREG32(VGT_GS_VERTEX_REUSE, 16);
Alex Deucher12920592011-02-02 12:37:40 -05002534 WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002535 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2536
Alex Deucher60a4a3e2010-06-29 17:03:35 -04002537 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
2538 WREG32(VGT_OUT_DEALLOC_CNTL, 16);
2539
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002540 WREG32(CB_PERF_CTR0_SEL_0, 0);
2541 WREG32(CB_PERF_CTR0_SEL_1, 0);
2542 WREG32(CB_PERF_CTR1_SEL_0, 0);
2543 WREG32(CB_PERF_CTR1_SEL_1, 0);
2544 WREG32(CB_PERF_CTR2_SEL_0, 0);
2545 WREG32(CB_PERF_CTR2_SEL_1, 0);
2546 WREG32(CB_PERF_CTR3_SEL_0, 0);
2547 WREG32(CB_PERF_CTR3_SEL_1, 0);
2548
Alex Deucher60a4a3e2010-06-29 17:03:35 -04002549 /* clear render buffer base addresses */
2550 WREG32(CB_COLOR0_BASE, 0);
2551 WREG32(CB_COLOR1_BASE, 0);
2552 WREG32(CB_COLOR2_BASE, 0);
2553 WREG32(CB_COLOR3_BASE, 0);
2554 WREG32(CB_COLOR4_BASE, 0);
2555 WREG32(CB_COLOR5_BASE, 0);
2556 WREG32(CB_COLOR6_BASE, 0);
2557 WREG32(CB_COLOR7_BASE, 0);
2558 WREG32(CB_COLOR8_BASE, 0);
2559 WREG32(CB_COLOR9_BASE, 0);
2560 WREG32(CB_COLOR10_BASE, 0);
2561 WREG32(CB_COLOR11_BASE, 0);
2562
2563 /* set the shader const cache sizes to 0 */
2564 for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
2565 WREG32(i, 0);
2566 for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
2567 WREG32(i, 0);
2568
Alex Deucherf25a5c62011-05-19 11:07:57 -04002569 tmp = RREG32(HDP_MISC_CNTL);
2570 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
2571 WREG32(HDP_MISC_CNTL, tmp);
2572
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002573 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
2574 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
2575
2576 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
2577
2578 udelay(50);
2579
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002580}
2581
2582int evergreen_mc_init(struct radeon_device *rdev)
2583{
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002584 u32 tmp;
2585 int chansize, numchan;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002586
2587 /* Get VRAM informations */
2588 rdev->mc.vram_is_ddr = true;
Alex Deucher05b3ef62012-03-20 17:18:37 -04002589 if ((rdev->family == CHIP_PALM) ||
2590 (rdev->family == CHIP_SUMO) ||
2591 (rdev->family == CHIP_SUMO2))
Alex Deucher82084412011-07-01 13:18:28 -04002592 tmp = RREG32(FUS_MC_ARB_RAMCFG);
2593 else
2594 tmp = RREG32(MC_ARB_RAMCFG);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002595 if (tmp & CHANSIZE_OVERRIDE) {
2596 chansize = 16;
2597 } else if (tmp & CHANSIZE_MASK) {
2598 chansize = 64;
2599 } else {
2600 chansize = 32;
2601 }
2602 tmp = RREG32(MC_SHARED_CHMAP);
2603 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
2604 case 0:
2605 default:
2606 numchan = 1;
2607 break;
2608 case 1:
2609 numchan = 2;
2610 break;
2611 case 2:
2612 numchan = 4;
2613 break;
2614 case 3:
2615 numchan = 8;
2616 break;
2617 }
2618 rdev->mc.vram_width = numchan * chansize;
2619 /* Could aper size report 0 ? */
Jordan Crouse01d73a62010-05-27 13:40:24 -06002620 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
2621 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002622 /* Setup GPU memory space */
Alex Deucher05b3ef62012-03-20 17:18:37 -04002623 if ((rdev->family == CHIP_PALM) ||
2624 (rdev->family == CHIP_SUMO) ||
2625 (rdev->family == CHIP_SUMO2)) {
Alex Deucher6eb18f82010-11-22 17:56:27 -05002626 /* size in bytes on fusion */
2627 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
2628 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
2629 } else {
Alex Deucher05b3ef62012-03-20 17:18:37 -04002630 /* size in MB on evergreen/cayman/tn */
Alex Deucher6eb18f82010-11-22 17:56:27 -05002631 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2632 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2633 }
Jerome Glisse51e5fcd2010-02-19 14:33:54 +00002634 rdev->mc.visible_vram_size = rdev->mc.aper_size;
Alex Deucher0ef0c1f2010-11-22 17:56:26 -05002635 r700_vram_gtt_location(rdev, &rdev->mc);
Alex Deucherf47299c2010-03-16 20:54:38 -04002636 radeon_update_bandwidth_info(rdev);
2637
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002638 return 0;
2639}
Jerome Glissed594e462010-02-17 21:54:29 +00002640
Alex Deucher187e3592013-01-18 14:51:38 -05002641void evergreen_print_gpu_status_regs(struct radeon_device *rdev)
Alex Deucher747943e2010-03-24 13:26:36 -04002642{
Jerome Glisse64c56e82013-01-02 17:30:35 -05002643 dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
Alex Deucher747943e2010-03-24 13:26:36 -04002644 RREG32(GRBM_STATUS));
Jerome Glisse64c56e82013-01-02 17:30:35 -05002645 dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
Alex Deucher747943e2010-03-24 13:26:36 -04002646 RREG32(GRBM_STATUS_SE0));
Jerome Glisse64c56e82013-01-02 17:30:35 -05002647 dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
Alex Deucher747943e2010-03-24 13:26:36 -04002648 RREG32(GRBM_STATUS_SE1));
Jerome Glisse64c56e82013-01-02 17:30:35 -05002649 dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
Alex Deucher747943e2010-03-24 13:26:36 -04002650 RREG32(SRBM_STATUS));
Alex Deuchera65a4362013-01-18 18:55:54 -05002651 dev_info(rdev->dev, " SRBM_STATUS2 = 0x%08X\n",
2652 RREG32(SRBM_STATUS2));
Jerome Glisse440a7cd2012-06-27 12:25:01 -04002653 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
2654 RREG32(CP_STALLED_STAT1));
2655 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
2656 RREG32(CP_STALLED_STAT2));
2657 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
2658 RREG32(CP_BUSY_STAT));
2659 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
2660 RREG32(CP_STAT));
Alex Deucher0ecebb92013-01-03 12:40:13 -05002661 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
2662 RREG32(DMA_STATUS_REG));
Alex Deucher168757e2013-01-18 19:17:22 -05002663 if (rdev->family >= CHIP_CAYMAN) {
2664 dev_info(rdev->dev, " R_00D834_DMA_STATUS_REG = 0x%08X\n",
2665 RREG32(DMA_STATUS_REG + 0x800));
2666 }
Alex Deucher0ecebb92013-01-03 12:40:13 -05002667}
2668
Alex Deucher168757e2013-01-18 19:17:22 -05002669bool evergreen_is_display_hung(struct radeon_device *rdev)
Alex Deuchera65a4362013-01-18 18:55:54 -05002670{
2671 u32 crtc_hung = 0;
2672 u32 crtc_status[6];
2673 u32 i, j, tmp;
2674
2675 for (i = 0; i < rdev->num_crtc; i++) {
2676 if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN) {
2677 crtc_status[i] = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
2678 crtc_hung |= (1 << i);
2679 }
2680 }
2681
2682 for (j = 0; j < 10; j++) {
2683 for (i = 0; i < rdev->num_crtc; i++) {
2684 if (crtc_hung & (1 << i)) {
2685 tmp = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
2686 if (tmp != crtc_status[i])
2687 crtc_hung &= ~(1 << i);
2688 }
2689 }
2690 if (crtc_hung == 0)
2691 return false;
2692 udelay(100);
2693 }
2694
2695 return true;
2696}
2697
2698static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
2699{
2700 u32 reset_mask = 0;
2701 u32 tmp;
2702
2703 /* GRBM_STATUS */
2704 tmp = RREG32(GRBM_STATUS);
2705 if (tmp & (PA_BUSY | SC_BUSY |
2706 SH_BUSY | SX_BUSY |
2707 TA_BUSY | VGT_BUSY |
2708 DB_BUSY | CB_BUSY |
2709 SPI_BUSY | VGT_BUSY_NO_DMA))
2710 reset_mask |= RADEON_RESET_GFX;
2711
2712 if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
2713 CP_BUSY | CP_COHERENCY_BUSY))
2714 reset_mask |= RADEON_RESET_CP;
2715
2716 if (tmp & GRBM_EE_BUSY)
2717 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
2718
2719 /* DMA_STATUS_REG */
2720 tmp = RREG32(DMA_STATUS_REG);
2721 if (!(tmp & DMA_IDLE))
2722 reset_mask |= RADEON_RESET_DMA;
2723
2724 /* SRBM_STATUS2 */
2725 tmp = RREG32(SRBM_STATUS2);
2726 if (tmp & DMA_BUSY)
2727 reset_mask |= RADEON_RESET_DMA;
2728
2729 /* SRBM_STATUS */
2730 tmp = RREG32(SRBM_STATUS);
2731 if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
2732 reset_mask |= RADEON_RESET_RLC;
2733
2734 if (tmp & IH_BUSY)
2735 reset_mask |= RADEON_RESET_IH;
2736
2737 if (tmp & SEM_BUSY)
2738 reset_mask |= RADEON_RESET_SEM;
2739
2740 if (tmp & GRBM_RQ_PENDING)
2741 reset_mask |= RADEON_RESET_GRBM;
2742
2743 if (tmp & VMC_BUSY)
2744 reset_mask |= RADEON_RESET_VMC;
2745
2746 if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
2747 MCC_BUSY | MCD_BUSY))
2748 reset_mask |= RADEON_RESET_MC;
2749
2750 if (evergreen_is_display_hung(rdev))
2751 reset_mask |= RADEON_RESET_DISPLAY;
2752
2753 /* VM_L2_STATUS */
2754 tmp = RREG32(VM_L2_STATUS);
2755 if (tmp & L2_BUSY)
2756 reset_mask |= RADEON_RESET_VMC;
2757
Alex Deucherd808fc82013-02-28 10:03:08 -05002758 /* Skip MC reset as it's mostly likely not hung, just busy */
2759 if (reset_mask & RADEON_RESET_MC) {
2760 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
2761 reset_mask &= ~RADEON_RESET_MC;
2762 }
2763
Alex Deuchera65a4362013-01-18 18:55:54 -05002764 return reset_mask;
2765}
2766
2767static void evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
Alex Deucher0ecebb92013-01-03 12:40:13 -05002768{
2769 struct evergreen_mc_save save;
Alex Deucherb7630472013-01-18 14:28:41 -05002770 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
2771 u32 tmp;
Alex Deucher19fc42e2013-01-14 11:04:39 -05002772
Alex Deucher0ecebb92013-01-03 12:40:13 -05002773 if (reset_mask == 0)
Alex Deuchera65a4362013-01-18 18:55:54 -05002774 return;
Alex Deucher0ecebb92013-01-03 12:40:13 -05002775
2776 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
2777
Alex Deucherb7630472013-01-18 14:28:41 -05002778 evergreen_print_gpu_status_regs(rdev);
2779
Alex Deucherb7630472013-01-18 14:28:41 -05002780 /* Disable CP parsing/prefetching */
2781 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
2782
2783 if (reset_mask & RADEON_RESET_DMA) {
2784 /* Disable DMA */
2785 tmp = RREG32(DMA_RB_CNTL);
2786 tmp &= ~DMA_RB_ENABLE;
2787 WREG32(DMA_RB_CNTL, tmp);
2788 }
2789
Alex Deucherb21b6e72013-01-23 18:57:56 -05002790 udelay(50);
2791
2792 evergreen_mc_stop(rdev, &save);
2793 if (evergreen_mc_wait_for_idle(rdev)) {
2794 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2795 }
2796
Alex Deucherb7630472013-01-18 14:28:41 -05002797 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
2798 grbm_soft_reset |= SOFT_RESET_DB |
2799 SOFT_RESET_CB |
2800 SOFT_RESET_PA |
2801 SOFT_RESET_SC |
2802 SOFT_RESET_SPI |
2803 SOFT_RESET_SX |
2804 SOFT_RESET_SH |
2805 SOFT_RESET_TC |
2806 SOFT_RESET_TA |
2807 SOFT_RESET_VC |
2808 SOFT_RESET_VGT;
2809 }
2810
2811 if (reset_mask & RADEON_RESET_CP) {
2812 grbm_soft_reset |= SOFT_RESET_CP |
2813 SOFT_RESET_VGT;
2814
2815 srbm_soft_reset |= SOFT_RESET_GRBM;
2816 }
Alex Deucher0ecebb92013-01-03 12:40:13 -05002817
2818 if (reset_mask & RADEON_RESET_DMA)
Alex Deucherb7630472013-01-18 14:28:41 -05002819 srbm_soft_reset |= SOFT_RESET_DMA;
2820
Alex Deuchera65a4362013-01-18 18:55:54 -05002821 if (reset_mask & RADEON_RESET_DISPLAY)
2822 srbm_soft_reset |= SOFT_RESET_DC;
2823
2824 if (reset_mask & RADEON_RESET_RLC)
2825 srbm_soft_reset |= SOFT_RESET_RLC;
2826
2827 if (reset_mask & RADEON_RESET_SEM)
2828 srbm_soft_reset |= SOFT_RESET_SEM;
2829
2830 if (reset_mask & RADEON_RESET_IH)
2831 srbm_soft_reset |= SOFT_RESET_IH;
2832
2833 if (reset_mask & RADEON_RESET_GRBM)
2834 srbm_soft_reset |= SOFT_RESET_GRBM;
2835
2836 if (reset_mask & RADEON_RESET_VMC)
2837 srbm_soft_reset |= SOFT_RESET_VMC;
2838
Alex Deucher24178ec2013-01-24 15:00:17 -05002839 if (!(rdev->flags & RADEON_IS_IGP)) {
2840 if (reset_mask & RADEON_RESET_MC)
2841 srbm_soft_reset |= SOFT_RESET_MC;
2842 }
Alex Deuchera65a4362013-01-18 18:55:54 -05002843
Alex Deucherb7630472013-01-18 14:28:41 -05002844 if (grbm_soft_reset) {
2845 tmp = RREG32(GRBM_SOFT_RESET);
2846 tmp |= grbm_soft_reset;
2847 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
2848 WREG32(GRBM_SOFT_RESET, tmp);
2849 tmp = RREG32(GRBM_SOFT_RESET);
2850
2851 udelay(50);
2852
2853 tmp &= ~grbm_soft_reset;
2854 WREG32(GRBM_SOFT_RESET, tmp);
2855 tmp = RREG32(GRBM_SOFT_RESET);
2856 }
2857
2858 if (srbm_soft_reset) {
2859 tmp = RREG32(SRBM_SOFT_RESET);
2860 tmp |= srbm_soft_reset;
2861 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2862 WREG32(SRBM_SOFT_RESET, tmp);
2863 tmp = RREG32(SRBM_SOFT_RESET);
2864
2865 udelay(50);
2866
2867 tmp &= ~srbm_soft_reset;
2868 WREG32(SRBM_SOFT_RESET, tmp);
2869 tmp = RREG32(SRBM_SOFT_RESET);
2870 }
Alex Deucher0ecebb92013-01-03 12:40:13 -05002871
2872 /* Wait a little for things to settle down */
2873 udelay(50);
2874
Alex Deucher747943e2010-03-24 13:26:36 -04002875 evergreen_mc_resume(rdev, &save);
Alex Deucherb7630472013-01-18 14:28:41 -05002876 udelay(50);
Alex Deucher410a3412013-01-18 13:05:39 -05002877
Alex Deucherb7630472013-01-18 14:28:41 -05002878 evergreen_print_gpu_status_regs(rdev);
Alex Deucher747943e2010-03-24 13:26:36 -04002879}
2880
Jerome Glissea2d07b72010-03-09 14:45:11 +00002881int evergreen_asic_reset(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002882{
Alex Deuchera65a4362013-01-18 18:55:54 -05002883 u32 reset_mask;
2884
2885 reset_mask = evergreen_gpu_check_soft_reset(rdev);
2886
2887 if (reset_mask)
2888 r600_set_bios_scratch_engine_hung(rdev, true);
2889
2890 evergreen_gpu_soft_reset(rdev, reset_mask);
2891
2892 reset_mask = evergreen_gpu_check_soft_reset(rdev);
2893
2894 if (!reset_mask)
2895 r600_set_bios_scratch_engine_hung(rdev, false);
2896
2897 return 0;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002898}
2899
Alex Deucher123bc182013-01-24 11:37:19 -05002900/**
2901 * evergreen_gfx_is_lockup - Check if the GFX engine is locked up
2902 *
2903 * @rdev: radeon_device pointer
2904 * @ring: radeon_ring structure holding ring information
2905 *
2906 * Check if the GFX engine is locked up.
2907 * Returns true if the engine appears to be locked up, false if not.
2908 */
2909bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2910{
2911 u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
2912
2913 if (!(reset_mask & (RADEON_RESET_GFX |
2914 RADEON_RESET_COMPUTE |
2915 RADEON_RESET_CP))) {
2916 radeon_ring_lockup_update(ring);
2917 return false;
2918 }
2919 /* force CP activities */
2920 radeon_ring_force_activity(rdev, ring);
2921 return radeon_ring_test_lockup(rdev, ring);
2922}
2923
2924/**
2925 * evergreen_dma_is_lockup - Check if the DMA engine is locked up
2926 *
2927 * @rdev: radeon_device pointer
2928 * @ring: radeon_ring structure holding ring information
2929 *
2930 * Check if the async DMA engine is locked up.
2931 * Returns true if the engine appears to be locked up, false if not.
2932 */
2933bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2934{
2935 u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
2936
2937 if (!(reset_mask & RADEON_RESET_DMA)) {
2938 radeon_ring_lockup_update(ring);
2939 return false;
2940 }
2941 /* force ring activities */
2942 radeon_ring_force_activity(rdev, ring);
2943 return radeon_ring_test_lockup(rdev, ring);
2944}
2945
Alex Deucher45f9a392010-03-24 13:55:51 -04002946/* Interrupts */
2947
2948u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
2949{
Alex Deucher46437052012-08-15 17:10:32 -04002950 if (crtc >= rdev->num_crtc)
Alex Deucher45f9a392010-03-24 13:55:51 -04002951 return 0;
Alex Deucher46437052012-08-15 17:10:32 -04002952 else
2953 return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
Alex Deucher45f9a392010-03-24 13:55:51 -04002954}
2955
2956void evergreen_disable_interrupt_state(struct radeon_device *rdev)
2957{
2958 u32 tmp;
2959
Alex Deucher1b370782011-11-17 20:13:28 -05002960 if (rdev->family >= CHIP_CAYMAN) {
2961 cayman_cp_int_cntl_setup(rdev, 0,
2962 CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2963 cayman_cp_int_cntl_setup(rdev, 1, 0);
2964 cayman_cp_int_cntl_setup(rdev, 2, 0);
Alex Deucherf60cbd12012-12-04 15:27:33 -05002965 tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
2966 WREG32(CAYMAN_DMA1_CNTL, tmp);
Alex Deucher1b370782011-11-17 20:13:28 -05002967 } else
2968 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
Alex Deucher233d1ad2012-12-04 15:25:59 -05002969 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
2970 WREG32(DMA_CNTL, tmp);
Alex Deucher45f9a392010-03-24 13:55:51 -04002971 WREG32(GRBM_INT_CNTL, 0);
2972 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2973 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
Alex Deucherb7eff392011-07-08 11:44:56 -04002974 if (rdev->num_crtc >= 4) {
Alex Deucher18007402010-11-22 17:56:28 -05002975 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2976 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
Alex Deucherb7eff392011-07-08 11:44:56 -04002977 }
2978 if (rdev->num_crtc >= 6) {
Alex Deucher18007402010-11-22 17:56:28 -05002979 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2980 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2981 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002982
2983 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2984 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
Alex Deucherb7eff392011-07-08 11:44:56 -04002985 if (rdev->num_crtc >= 4) {
Alex Deucher18007402010-11-22 17:56:28 -05002986 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2987 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
Alex Deucherb7eff392011-07-08 11:44:56 -04002988 }
2989 if (rdev->num_crtc >= 6) {
Alex Deucher18007402010-11-22 17:56:28 -05002990 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2991 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2992 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002993
Alex Deucher05b3ef62012-03-20 17:18:37 -04002994 /* only one DAC on DCE6 */
2995 if (!ASIC_IS_DCE6(rdev))
2996 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
Alex Deucher45f9a392010-03-24 13:55:51 -04002997 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2998
2999 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3000 WREG32(DC_HPD1_INT_CONTROL, tmp);
3001 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3002 WREG32(DC_HPD2_INT_CONTROL, tmp);
3003 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3004 WREG32(DC_HPD3_INT_CONTROL, tmp);
3005 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3006 WREG32(DC_HPD4_INT_CONTROL, tmp);
3007 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3008 WREG32(DC_HPD5_INT_CONTROL, tmp);
3009 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3010 WREG32(DC_HPD6_INT_CONTROL, tmp);
3011
3012}
3013
3014int evergreen_irq_set(struct radeon_device *rdev)
3015{
3016 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
Alex Deucher1b370782011-11-17 20:13:28 -05003017 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
Alex Deucher45f9a392010-03-24 13:55:51 -04003018 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
3019 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
Alex Deucher2031f772010-04-22 12:52:11 -04003020 u32 grbm_int_cntl = 0;
Alex Deucher6f34be52010-11-21 10:59:01 -05003021 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
Alex Deucherf122c612012-03-30 08:59:57 -04003022 u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
Alex Deucherf60cbd12012-12-04 15:27:33 -05003023 u32 dma_cntl, dma_cntl1 = 0;
Alex Deucher45f9a392010-03-24 13:55:51 -04003024
3025 if (!rdev->irq.installed) {
Joe Perchesfce7d612010-10-30 21:08:30 +00003026 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
Alex Deucher45f9a392010-03-24 13:55:51 -04003027 return -EINVAL;
3028 }
3029 /* don't enable anything if the ih is disabled */
3030 if (!rdev->ih.enabled) {
3031 r600_disable_interrupts(rdev);
3032 /* force the active interrupt state to all disabled */
3033 evergreen_disable_interrupt_state(rdev);
3034 return 0;
3035 }
3036
3037 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3038 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3039 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3040 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3041 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3042 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3043
Alex Deucherf122c612012-03-30 08:59:57 -04003044 afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3045 afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3046 afmt3 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3047 afmt4 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3048 afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3049 afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3050
Alex Deucher233d1ad2012-12-04 15:25:59 -05003051 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3052
Alex Deucher1b370782011-11-17 20:13:28 -05003053 if (rdev->family >= CHIP_CAYMAN) {
3054 /* enable CP interrupts on all rings */
Christian Koenig736fc372012-05-17 19:52:00 +02003055 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
Alex Deucher1b370782011-11-17 20:13:28 -05003056 DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
3057 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3058 }
Christian Koenig736fc372012-05-17 19:52:00 +02003059 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
Alex Deucher1b370782011-11-17 20:13:28 -05003060 DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
3061 cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
3062 }
Christian Koenig736fc372012-05-17 19:52:00 +02003063 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
Alex Deucher1b370782011-11-17 20:13:28 -05003064 DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
3065 cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
3066 }
3067 } else {
Christian Koenig736fc372012-05-17 19:52:00 +02003068 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
Alex Deucher1b370782011-11-17 20:13:28 -05003069 DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
3070 cp_int_cntl |= RB_INT_ENABLE;
3071 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3072 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003073 }
Alex Deucher1b370782011-11-17 20:13:28 -05003074
Alex Deucher233d1ad2012-12-04 15:25:59 -05003075 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
3076 DRM_DEBUG("r600_irq_set: sw int dma\n");
3077 dma_cntl |= TRAP_ENABLE;
3078 }
3079
Alex Deucherf60cbd12012-12-04 15:27:33 -05003080 if (rdev->family >= CHIP_CAYMAN) {
3081 dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
3082 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
3083 DRM_DEBUG("r600_irq_set: sw int dma1\n");
3084 dma_cntl1 |= TRAP_ENABLE;
3085 }
3086 }
3087
Alex Deucher6f34be52010-11-21 10:59:01 -05003088 if (rdev->irq.crtc_vblank_int[0] ||
Christian Koenig736fc372012-05-17 19:52:00 +02003089 atomic_read(&rdev->irq.pflip[0])) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003090 DRM_DEBUG("evergreen_irq_set: vblank 0\n");
3091 crtc1 |= VBLANK_INT_MASK;
3092 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003093 if (rdev->irq.crtc_vblank_int[1] ||
Christian Koenig736fc372012-05-17 19:52:00 +02003094 atomic_read(&rdev->irq.pflip[1])) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003095 DRM_DEBUG("evergreen_irq_set: vblank 1\n");
3096 crtc2 |= VBLANK_INT_MASK;
3097 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003098 if (rdev->irq.crtc_vblank_int[2] ||
Christian Koenig736fc372012-05-17 19:52:00 +02003099 atomic_read(&rdev->irq.pflip[2])) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003100 DRM_DEBUG("evergreen_irq_set: vblank 2\n");
3101 crtc3 |= VBLANK_INT_MASK;
3102 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003103 if (rdev->irq.crtc_vblank_int[3] ||
Christian Koenig736fc372012-05-17 19:52:00 +02003104 atomic_read(&rdev->irq.pflip[3])) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003105 DRM_DEBUG("evergreen_irq_set: vblank 3\n");
3106 crtc4 |= VBLANK_INT_MASK;
3107 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003108 if (rdev->irq.crtc_vblank_int[4] ||
Christian Koenig736fc372012-05-17 19:52:00 +02003109 atomic_read(&rdev->irq.pflip[4])) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003110 DRM_DEBUG("evergreen_irq_set: vblank 4\n");
3111 crtc5 |= VBLANK_INT_MASK;
3112 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003113 if (rdev->irq.crtc_vblank_int[5] ||
Christian Koenig736fc372012-05-17 19:52:00 +02003114 atomic_read(&rdev->irq.pflip[5])) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003115 DRM_DEBUG("evergreen_irq_set: vblank 5\n");
3116 crtc6 |= VBLANK_INT_MASK;
3117 }
3118 if (rdev->irq.hpd[0]) {
3119 DRM_DEBUG("evergreen_irq_set: hpd 1\n");
3120 hpd1 |= DC_HPDx_INT_EN;
3121 }
3122 if (rdev->irq.hpd[1]) {
3123 DRM_DEBUG("evergreen_irq_set: hpd 2\n");
3124 hpd2 |= DC_HPDx_INT_EN;
3125 }
3126 if (rdev->irq.hpd[2]) {
3127 DRM_DEBUG("evergreen_irq_set: hpd 3\n");
3128 hpd3 |= DC_HPDx_INT_EN;
3129 }
3130 if (rdev->irq.hpd[3]) {
3131 DRM_DEBUG("evergreen_irq_set: hpd 4\n");
3132 hpd4 |= DC_HPDx_INT_EN;
3133 }
3134 if (rdev->irq.hpd[4]) {
3135 DRM_DEBUG("evergreen_irq_set: hpd 5\n");
3136 hpd5 |= DC_HPDx_INT_EN;
3137 }
3138 if (rdev->irq.hpd[5]) {
3139 DRM_DEBUG("evergreen_irq_set: hpd 6\n");
3140 hpd6 |= DC_HPDx_INT_EN;
3141 }
Alex Deucherf122c612012-03-30 08:59:57 -04003142 if (rdev->irq.afmt[0]) {
3143 DRM_DEBUG("evergreen_irq_set: hdmi 0\n");
3144 afmt1 |= AFMT_AZ_FORMAT_WTRIG_MASK;
3145 }
3146 if (rdev->irq.afmt[1]) {
3147 DRM_DEBUG("evergreen_irq_set: hdmi 1\n");
3148 afmt2 |= AFMT_AZ_FORMAT_WTRIG_MASK;
3149 }
3150 if (rdev->irq.afmt[2]) {
3151 DRM_DEBUG("evergreen_irq_set: hdmi 2\n");
3152 afmt3 |= AFMT_AZ_FORMAT_WTRIG_MASK;
3153 }
3154 if (rdev->irq.afmt[3]) {
3155 DRM_DEBUG("evergreen_irq_set: hdmi 3\n");
3156 afmt4 |= AFMT_AZ_FORMAT_WTRIG_MASK;
3157 }
3158 if (rdev->irq.afmt[4]) {
3159 DRM_DEBUG("evergreen_irq_set: hdmi 4\n");
3160 afmt5 |= AFMT_AZ_FORMAT_WTRIG_MASK;
3161 }
3162 if (rdev->irq.afmt[5]) {
3163 DRM_DEBUG("evergreen_irq_set: hdmi 5\n");
3164 afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK;
3165 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003166
Alex Deucher1b370782011-11-17 20:13:28 -05003167 if (rdev->family >= CHIP_CAYMAN) {
3168 cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
3169 cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
3170 cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
3171 } else
3172 WREG32(CP_INT_CNTL, cp_int_cntl);
Alex Deucher233d1ad2012-12-04 15:25:59 -05003173
3174 WREG32(DMA_CNTL, dma_cntl);
3175
Alex Deucherf60cbd12012-12-04 15:27:33 -05003176 if (rdev->family >= CHIP_CAYMAN)
3177 WREG32(CAYMAN_DMA1_CNTL, dma_cntl1);
3178
Alex Deucher2031f772010-04-22 12:52:11 -04003179 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
Alex Deucher45f9a392010-03-24 13:55:51 -04003180
3181 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
3182 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
Alex Deucherb7eff392011-07-08 11:44:56 -04003183 if (rdev->num_crtc >= 4) {
Alex Deucher18007402010-11-22 17:56:28 -05003184 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
3185 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
Alex Deucherb7eff392011-07-08 11:44:56 -04003186 }
3187 if (rdev->num_crtc >= 6) {
Alex Deucher18007402010-11-22 17:56:28 -05003188 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
3189 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
3190 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003191
Alex Deucher6f34be52010-11-21 10:59:01 -05003192 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
3193 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
Alex Deucherb7eff392011-07-08 11:44:56 -04003194 if (rdev->num_crtc >= 4) {
3195 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
3196 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
3197 }
3198 if (rdev->num_crtc >= 6) {
3199 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
3200 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
3201 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003202
Alex Deucher45f9a392010-03-24 13:55:51 -04003203 WREG32(DC_HPD1_INT_CONTROL, hpd1);
3204 WREG32(DC_HPD2_INT_CONTROL, hpd2);
3205 WREG32(DC_HPD3_INT_CONTROL, hpd3);
3206 WREG32(DC_HPD4_INT_CONTROL, hpd4);
3207 WREG32(DC_HPD5_INT_CONTROL, hpd5);
3208 WREG32(DC_HPD6_INT_CONTROL, hpd6);
3209
Alex Deucherf122c612012-03-30 08:59:57 -04003210 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1);
3211 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2);
3212 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, afmt3);
3213 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, afmt4);
3214 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
3215 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
3216
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003217 return 0;
3218}
3219
Andi Kleencbdd4502011-10-13 16:08:46 -07003220static void evergreen_irq_ack(struct radeon_device *rdev)
Alex Deucher45f9a392010-03-24 13:55:51 -04003221{
3222 u32 tmp;
3223
Alex Deucher6f34be52010-11-21 10:59:01 -05003224 rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3225 rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3226 rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
3227 rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
3228 rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
3229 rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
3230 rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
3231 rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
Alex Deucherb7eff392011-07-08 11:44:56 -04003232 if (rdev->num_crtc >= 4) {
3233 rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
3234 rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
3235 }
3236 if (rdev->num_crtc >= 6) {
3237 rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
3238 rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
3239 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003240
Alex Deucherf122c612012-03-30 08:59:57 -04003241 rdev->irq.stat_regs.evergreen.afmt_status1 = RREG32(AFMT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
3242 rdev->irq.stat_regs.evergreen.afmt_status2 = RREG32(AFMT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
3243 rdev->irq.stat_regs.evergreen.afmt_status3 = RREG32(AFMT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
3244 rdev->irq.stat_regs.evergreen.afmt_status4 = RREG32(AFMT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
3245 rdev->irq.stat_regs.evergreen.afmt_status5 = RREG32(AFMT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
3246 rdev->irq.stat_regs.evergreen.afmt_status6 = RREG32(AFMT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
3247
Alex Deucher6f34be52010-11-21 10:59:01 -05003248 if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
3249 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
3250 if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
3251 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
Alex Deucher6f34be52010-11-21 10:59:01 -05003252 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
Alex Deucher45f9a392010-03-24 13:55:51 -04003253 WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05003254 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
Alex Deucher45f9a392010-03-24 13:55:51 -04003255 WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05003256 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
Alex Deucher45f9a392010-03-24 13:55:51 -04003257 WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05003258 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
Alex Deucher45f9a392010-03-24 13:55:51 -04003259 WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
3260
Alex Deucherb7eff392011-07-08 11:44:56 -04003261 if (rdev->num_crtc >= 4) {
3262 if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
3263 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
3264 if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
3265 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
3266 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
3267 WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
3268 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
3269 WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
3270 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
3271 WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
3272 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
3273 WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
3274 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003275
Alex Deucherb7eff392011-07-08 11:44:56 -04003276 if (rdev->num_crtc >= 6) {
3277 if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
3278 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
3279 if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
3280 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
3281 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
3282 WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
3283 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
3284 WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
3285 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
3286 WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
3287 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
3288 WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
3289 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003290
Alex Deucher6f34be52010-11-21 10:59:01 -05003291 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003292 tmp = RREG32(DC_HPD1_INT_CONTROL);
3293 tmp |= DC_HPDx_INT_ACK;
3294 WREG32(DC_HPD1_INT_CONTROL, tmp);
3295 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003296 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003297 tmp = RREG32(DC_HPD2_INT_CONTROL);
3298 tmp |= DC_HPDx_INT_ACK;
3299 WREG32(DC_HPD2_INT_CONTROL, tmp);
3300 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003301 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003302 tmp = RREG32(DC_HPD3_INT_CONTROL);
3303 tmp |= DC_HPDx_INT_ACK;
3304 WREG32(DC_HPD3_INT_CONTROL, tmp);
3305 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003306 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003307 tmp = RREG32(DC_HPD4_INT_CONTROL);
3308 tmp |= DC_HPDx_INT_ACK;
3309 WREG32(DC_HPD4_INT_CONTROL, tmp);
3310 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003311 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003312 tmp = RREG32(DC_HPD5_INT_CONTROL);
3313 tmp |= DC_HPDx_INT_ACK;
3314 WREG32(DC_HPD5_INT_CONTROL, tmp);
3315 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003316 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04003317 tmp = RREG32(DC_HPD5_INT_CONTROL);
3318 tmp |= DC_HPDx_INT_ACK;
3319 WREG32(DC_HPD6_INT_CONTROL, tmp);
3320 }
Alex Deucherf122c612012-03-30 08:59:57 -04003321 if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
3322 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
3323 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3324 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, tmp);
3325 }
3326 if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
3327 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
3328 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3329 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, tmp);
3330 }
3331 if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
3332 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
3333 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3334 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, tmp);
3335 }
3336 if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
3337 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
3338 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3339 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, tmp);
3340 }
3341 if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
3342 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
3343 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3344 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, tmp);
3345 }
3346 if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
3347 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
3348 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3349 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, tmp);
3350 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003351}
3352
Lauri Kasanen1109ca02012-08-31 13:43:50 -04003353static void evergreen_irq_disable(struct radeon_device *rdev)
Alex Deucher45f9a392010-03-24 13:55:51 -04003354{
Alex Deucher45f9a392010-03-24 13:55:51 -04003355 r600_disable_interrupts(rdev);
3356 /* Wait and acknowledge irq */
3357 mdelay(1);
Alex Deucher6f34be52010-11-21 10:59:01 -05003358 evergreen_irq_ack(rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -04003359 evergreen_disable_interrupt_state(rdev);
3360}
3361
Alex Deucher755d8192011-03-02 20:07:34 -05003362void evergreen_irq_suspend(struct radeon_device *rdev)
Alex Deucher45f9a392010-03-24 13:55:51 -04003363{
3364 evergreen_irq_disable(rdev);
3365 r600_rlc_stop(rdev);
3366}
3367
Andi Kleencbdd4502011-10-13 16:08:46 -07003368static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
Alex Deucher45f9a392010-03-24 13:55:51 -04003369{
3370 u32 wptr, tmp;
3371
Alex Deucher724c80e2010-08-27 18:25:25 -04003372 if (rdev->wb.enabled)
Cédric Cano204ae242011-04-19 11:07:13 -04003373 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
Alex Deucher724c80e2010-08-27 18:25:25 -04003374 else
3375 wptr = RREG32(IH_RB_WPTR);
Alex Deucher45f9a392010-03-24 13:55:51 -04003376
3377 if (wptr & RB_OVERFLOW) {
3378 /* When a ring buffer overflow happen start parsing interrupt
3379 * from the last not overwritten vector (wptr + 16). Hopefully
3380 * this should allow us to catchup.
3381 */
3382 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3383 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3384 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3385 tmp = RREG32(IH_RB_CNTL);
3386 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3387 WREG32(IH_RB_CNTL, tmp);
3388 }
3389 return (wptr & rdev->ih.ptr_mask);
3390}
3391
3392int evergreen_irq_process(struct radeon_device *rdev)
3393{
Dave Airlie682f1a52011-06-18 03:59:51 +00003394 u32 wptr;
3395 u32 rptr;
Alex Deucher45f9a392010-03-24 13:55:51 -04003396 u32 src_id, src_data;
3397 u32 ring_index;
Alex Deucher45f9a392010-03-24 13:55:51 -04003398 bool queue_hotplug = false;
Alex Deucherf122c612012-03-30 08:59:57 -04003399 bool queue_hdmi = false;
Alex Deucher45f9a392010-03-24 13:55:51 -04003400
Dave Airlie682f1a52011-06-18 03:59:51 +00003401 if (!rdev->ih.enabled || rdev->shutdown)
Alex Deucher45f9a392010-03-24 13:55:51 -04003402 return IRQ_NONE;
3403
Dave Airlie682f1a52011-06-18 03:59:51 +00003404 wptr = evergreen_get_ih_wptr(rdev);
Christian Koenigc20dc362012-05-16 21:45:24 +02003405
3406restart_ih:
3407 /* is somebody else already processing irqs? */
3408 if (atomic_xchg(&rdev->ih.lock, 1))
3409 return IRQ_NONE;
3410
Dave Airlie682f1a52011-06-18 03:59:51 +00003411 rptr = rdev->ih.rptr;
3412 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
Alex Deucher45f9a392010-03-24 13:55:51 -04003413
Benjamin Herrenschmidt964f6642011-07-13 16:28:19 +10003414 /* Order reading of wptr vs. reading of IH ring data */
3415 rmb();
3416
Alex Deucher45f9a392010-03-24 13:55:51 -04003417 /* display interrupts */
Alex Deucher6f34be52010-11-21 10:59:01 -05003418 evergreen_irq_ack(rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -04003419
Alex Deucher45f9a392010-03-24 13:55:51 -04003420 while (rptr != wptr) {
3421 /* wptr/rptr are in bytes! */
3422 ring_index = rptr / 4;
Alex Deucher0f234f5f2011-02-13 19:06:33 -05003423 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3424 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
Alex Deucher45f9a392010-03-24 13:55:51 -04003425
3426 switch (src_id) {
3427 case 1: /* D1 vblank/vline */
3428 switch (src_data) {
3429 case 0: /* D1 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003430 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
Alex Deucher6f34be52010-11-21 10:59:01 -05003431 if (rdev->irq.crtc_vblank_int[0]) {
3432 drm_handle_vblank(rdev->ddev, 0);
3433 rdev->pm.vblank_sync = true;
3434 wake_up(&rdev->irq.vblank_queue);
3435 }
Christian Koenig736fc372012-05-17 19:52:00 +02003436 if (atomic_read(&rdev->irq.pflip[0]))
Mario Kleiner3e4ea742010-11-21 10:59:02 -05003437 radeon_crtc_handle_flip(rdev, 0);
Alex Deucher6f34be52010-11-21 10:59:01 -05003438 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003439 DRM_DEBUG("IH: D1 vblank\n");
3440 }
3441 break;
3442 case 1: /* D1 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003443 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
3444 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003445 DRM_DEBUG("IH: D1 vline\n");
3446 }
3447 break;
3448 default:
3449 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3450 break;
3451 }
3452 break;
3453 case 2: /* D2 vblank/vline */
3454 switch (src_data) {
3455 case 0: /* D2 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003456 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
Alex Deucher6f34be52010-11-21 10:59:01 -05003457 if (rdev->irq.crtc_vblank_int[1]) {
3458 drm_handle_vblank(rdev->ddev, 1);
3459 rdev->pm.vblank_sync = true;
3460 wake_up(&rdev->irq.vblank_queue);
3461 }
Christian Koenig736fc372012-05-17 19:52:00 +02003462 if (atomic_read(&rdev->irq.pflip[1]))
Mario Kleiner3e4ea742010-11-21 10:59:02 -05003463 radeon_crtc_handle_flip(rdev, 1);
Alex Deucher6f34be52010-11-21 10:59:01 -05003464 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003465 DRM_DEBUG("IH: D2 vblank\n");
3466 }
3467 break;
3468 case 1: /* D2 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003469 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
3470 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003471 DRM_DEBUG("IH: D2 vline\n");
3472 }
3473 break;
3474 default:
3475 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3476 break;
3477 }
3478 break;
3479 case 3: /* D3 vblank/vline */
3480 switch (src_data) {
3481 case 0: /* D3 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003482 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
3483 if (rdev->irq.crtc_vblank_int[2]) {
3484 drm_handle_vblank(rdev->ddev, 2);
3485 rdev->pm.vblank_sync = true;
3486 wake_up(&rdev->irq.vblank_queue);
3487 }
Christian Koenig736fc372012-05-17 19:52:00 +02003488 if (atomic_read(&rdev->irq.pflip[2]))
Alex Deucher6f34be52010-11-21 10:59:01 -05003489 radeon_crtc_handle_flip(rdev, 2);
3490 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003491 DRM_DEBUG("IH: D3 vblank\n");
3492 }
3493 break;
3494 case 1: /* D3 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003495 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
3496 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003497 DRM_DEBUG("IH: D3 vline\n");
3498 }
3499 break;
3500 default:
3501 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3502 break;
3503 }
3504 break;
3505 case 4: /* D4 vblank/vline */
3506 switch (src_data) {
3507 case 0: /* D4 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003508 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
3509 if (rdev->irq.crtc_vblank_int[3]) {
3510 drm_handle_vblank(rdev->ddev, 3);
3511 rdev->pm.vblank_sync = true;
3512 wake_up(&rdev->irq.vblank_queue);
3513 }
Christian Koenig736fc372012-05-17 19:52:00 +02003514 if (atomic_read(&rdev->irq.pflip[3]))
Alex Deucher6f34be52010-11-21 10:59:01 -05003515 radeon_crtc_handle_flip(rdev, 3);
3516 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003517 DRM_DEBUG("IH: D4 vblank\n");
3518 }
3519 break;
3520 case 1: /* D4 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003521 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
3522 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003523 DRM_DEBUG("IH: D4 vline\n");
3524 }
3525 break;
3526 default:
3527 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3528 break;
3529 }
3530 break;
3531 case 5: /* D5 vblank/vline */
3532 switch (src_data) {
3533 case 0: /* D5 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003534 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
3535 if (rdev->irq.crtc_vblank_int[4]) {
3536 drm_handle_vblank(rdev->ddev, 4);
3537 rdev->pm.vblank_sync = true;
3538 wake_up(&rdev->irq.vblank_queue);
3539 }
Christian Koenig736fc372012-05-17 19:52:00 +02003540 if (atomic_read(&rdev->irq.pflip[4]))
Alex Deucher6f34be52010-11-21 10:59:01 -05003541 radeon_crtc_handle_flip(rdev, 4);
3542 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003543 DRM_DEBUG("IH: D5 vblank\n");
3544 }
3545 break;
3546 case 1: /* D5 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003547 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
3548 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003549 DRM_DEBUG("IH: D5 vline\n");
3550 }
3551 break;
3552 default:
3553 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3554 break;
3555 }
3556 break;
3557 case 6: /* D6 vblank/vline */
3558 switch (src_data) {
3559 case 0: /* D6 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003560 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
3561 if (rdev->irq.crtc_vblank_int[5]) {
3562 drm_handle_vblank(rdev->ddev, 5);
3563 rdev->pm.vblank_sync = true;
3564 wake_up(&rdev->irq.vblank_queue);
3565 }
Christian Koenig736fc372012-05-17 19:52:00 +02003566 if (atomic_read(&rdev->irq.pflip[5]))
Alex Deucher6f34be52010-11-21 10:59:01 -05003567 radeon_crtc_handle_flip(rdev, 5);
3568 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003569 DRM_DEBUG("IH: D6 vblank\n");
3570 }
3571 break;
3572 case 1: /* D6 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003573 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
3574 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003575 DRM_DEBUG("IH: D6 vline\n");
3576 }
3577 break;
3578 default:
3579 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3580 break;
3581 }
3582 break;
3583 case 42: /* HPD hotplug */
3584 switch (src_data) {
3585 case 0:
Alex Deucher6f34be52010-11-21 10:59:01 -05003586 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
3587 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003588 queue_hotplug = true;
3589 DRM_DEBUG("IH: HPD1\n");
3590 }
3591 break;
3592 case 1:
Alex Deucher6f34be52010-11-21 10:59:01 -05003593 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
3594 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003595 queue_hotplug = true;
3596 DRM_DEBUG("IH: HPD2\n");
3597 }
3598 break;
3599 case 2:
Alex Deucher6f34be52010-11-21 10:59:01 -05003600 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
3601 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003602 queue_hotplug = true;
3603 DRM_DEBUG("IH: HPD3\n");
3604 }
3605 break;
3606 case 3:
Alex Deucher6f34be52010-11-21 10:59:01 -05003607 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
3608 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003609 queue_hotplug = true;
3610 DRM_DEBUG("IH: HPD4\n");
3611 }
3612 break;
3613 case 4:
Alex Deucher6f34be52010-11-21 10:59:01 -05003614 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
3615 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003616 queue_hotplug = true;
3617 DRM_DEBUG("IH: HPD5\n");
3618 }
3619 break;
3620 case 5:
Alex Deucher6f34be52010-11-21 10:59:01 -05003621 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
3622 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003623 queue_hotplug = true;
3624 DRM_DEBUG("IH: HPD6\n");
3625 }
3626 break;
3627 default:
3628 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3629 break;
3630 }
3631 break;
Alex Deucherf122c612012-03-30 08:59:57 -04003632 case 44: /* hdmi */
3633 switch (src_data) {
3634 case 0:
3635 if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
3636 rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
3637 queue_hdmi = true;
3638 DRM_DEBUG("IH: HDMI0\n");
3639 }
3640 break;
3641 case 1:
3642 if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
3643 rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
3644 queue_hdmi = true;
3645 DRM_DEBUG("IH: HDMI1\n");
3646 }
3647 break;
3648 case 2:
3649 if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
3650 rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
3651 queue_hdmi = true;
3652 DRM_DEBUG("IH: HDMI2\n");
3653 }
3654 break;
3655 case 3:
3656 if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
3657 rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
3658 queue_hdmi = true;
3659 DRM_DEBUG("IH: HDMI3\n");
3660 }
3661 break;
3662 case 4:
3663 if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
3664 rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
3665 queue_hdmi = true;
3666 DRM_DEBUG("IH: HDMI4\n");
3667 }
3668 break;
3669 case 5:
3670 if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
3671 rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
3672 queue_hdmi = true;
3673 DRM_DEBUG("IH: HDMI5\n");
3674 }
3675 break;
3676 default:
3677 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
3678 break;
3679 }
Christian Königf2ba57b2013-04-08 12:41:29 +02003680 case 124: /* UVD */
3681 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
3682 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
Alex Deucherf122c612012-03-30 08:59:57 -04003683 break;
Christian Königae133a12012-09-18 15:30:44 -04003684 case 146:
3685 case 147:
3686 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
3687 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
3688 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
3689 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
3690 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
3691 /* reset addr and status */
3692 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
3693 break;
Alex Deucher45f9a392010-03-24 13:55:51 -04003694 case 176: /* CP_INT in ring buffer */
3695 case 177: /* CP_INT in IB1 */
3696 case 178: /* CP_INT in IB2 */
3697 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
Alex Deucher74652802011-08-25 13:39:48 -04003698 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
Alex Deucher45f9a392010-03-24 13:55:51 -04003699 break;
3700 case 181: /* CP EOP event */
3701 DRM_DEBUG("IH: CP EOP\n");
Alex Deucher1b370782011-11-17 20:13:28 -05003702 if (rdev->family >= CHIP_CAYMAN) {
3703 switch (src_data) {
3704 case 0:
3705 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3706 break;
3707 case 1:
3708 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
3709 break;
3710 case 2:
3711 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
3712 break;
3713 }
3714 } else
3715 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
Alex Deucher45f9a392010-03-24 13:55:51 -04003716 break;
Alex Deucher233d1ad2012-12-04 15:25:59 -05003717 case 224: /* DMA trap event */
3718 DRM_DEBUG("IH: DMA trap\n");
3719 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
3720 break;
Alex Deucher2031f772010-04-22 12:52:11 -04003721 case 233: /* GUI IDLE */
Ilija Hadzic303c8052011-06-07 14:54:48 -04003722 DRM_DEBUG("IH: GUI idle\n");
Alex Deucher2031f772010-04-22 12:52:11 -04003723 break;
Alex Deucherf60cbd12012-12-04 15:27:33 -05003724 case 244: /* DMA trap event */
3725 if (rdev->family >= CHIP_CAYMAN) {
3726 DRM_DEBUG("IH: DMA1 trap\n");
3727 radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
3728 }
3729 break;
Alex Deucher45f9a392010-03-24 13:55:51 -04003730 default:
3731 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3732 break;
3733 }
3734
3735 /* wptr/rptr are in bytes! */
3736 rptr += 16;
3737 rptr &= rdev->ih.ptr_mask;
3738 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003739 if (queue_hotplug)
Tejun Heo32c87fc2011-01-03 14:49:32 +01003740 schedule_work(&rdev->hotplug_work);
Alex Deucherf122c612012-03-30 08:59:57 -04003741 if (queue_hdmi)
3742 schedule_work(&rdev->audio_work);
Alex Deucher45f9a392010-03-24 13:55:51 -04003743 rdev->ih.rptr = rptr;
3744 WREG32(IH_RB_RPTR, rdev->ih.rptr);
Christian Koenigc20dc362012-05-16 21:45:24 +02003745 atomic_set(&rdev->ih.lock, 0);
3746
3747 /* make sure wptr hasn't changed while processing */
3748 wptr = evergreen_get_ih_wptr(rdev);
3749 if (wptr != rptr)
3750 goto restart_ih;
3751
Alex Deucher45f9a392010-03-24 13:55:51 -04003752 return IRQ_HANDLED;
3753}
3754
Alex Deucher233d1ad2012-12-04 15:25:59 -05003755/**
3756 * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
3757 *
3758 * @rdev: radeon_device pointer
3759 * @fence: radeon fence object
3760 *
3761 * Add a DMA fence packet to the ring to write
3762 * the fence seq number and DMA trap packet to generate
3763 * an interrupt if needed (evergreen-SI).
3764 */
3765void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
3766 struct radeon_fence *fence)
3767{
3768 struct radeon_ring *ring = &rdev->ring[fence->ring];
3769 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3770 /* write the fence */
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003771 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003772 radeon_ring_write(ring, addr & 0xfffffffc);
3773 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
3774 radeon_ring_write(ring, fence->seq);
3775 /* generate an interrupt */
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003776 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003777 /* flush HDP */
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003778 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
Alex Deucher4b681c22013-01-03 19:54:34 -05003779 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003780 radeon_ring_write(ring, 1);
3781}
3782
3783/**
3784 * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
3785 *
3786 * @rdev: radeon_device pointer
3787 * @ib: IB object to schedule
3788 *
3789 * Schedule an IB in the DMA ring (evergreen).
3790 */
3791void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
3792 struct radeon_ib *ib)
3793{
3794 struct radeon_ring *ring = &rdev->ring[ib->ring];
3795
3796 if (rdev->wb.enabled) {
3797 u32 next_rptr = ring->wptr + 4;
3798 while ((next_rptr & 7) != 5)
3799 next_rptr++;
3800 next_rptr += 3;
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003801 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003802 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3803 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
3804 radeon_ring_write(ring, next_rptr);
3805 }
3806
3807 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
3808 * Pad as necessary with NOPs.
3809 */
3810 while ((ring->wptr & 7) != 5)
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003811 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
3812 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003813 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
3814 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
3815
3816}
3817
3818/**
3819 * evergreen_copy_dma - copy pages using the DMA engine
3820 *
3821 * @rdev: radeon_device pointer
3822 * @src_offset: src GPU address
3823 * @dst_offset: dst GPU address
3824 * @num_gpu_pages: number of GPU pages to xfer
3825 * @fence: radeon fence object
3826 *
3827 * Copy GPU paging using the DMA engine (evergreen-cayman).
3828 * Used by the radeon ttm implementation to move pages if
3829 * registered as the asic copy callback.
3830 */
3831int evergreen_copy_dma(struct radeon_device *rdev,
3832 uint64_t src_offset, uint64_t dst_offset,
3833 unsigned num_gpu_pages,
3834 struct radeon_fence **fence)
3835{
3836 struct radeon_semaphore *sem = NULL;
3837 int ring_index = rdev->asic->copy.dma_ring_index;
3838 struct radeon_ring *ring = &rdev->ring[ring_index];
3839 u32 size_in_dw, cur_size_in_dw;
3840 int i, num_loops;
3841 int r = 0;
3842
3843 r = radeon_semaphore_create(rdev, &sem);
3844 if (r) {
3845 DRM_ERROR("radeon: moving bo (%d).\n", r);
3846 return r;
3847 }
3848
3849 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
3850 num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
3851 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
3852 if (r) {
3853 DRM_ERROR("radeon: moving bo (%d).\n", r);
3854 radeon_semaphore_free(rdev, &sem, NULL);
3855 return r;
3856 }
3857
3858 if (radeon_fence_need_sync(*fence, ring->idx)) {
3859 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
3860 ring->idx);
3861 radeon_fence_note_sync(*fence, ring->idx);
3862 } else {
3863 radeon_semaphore_free(rdev, &sem, NULL);
3864 }
3865
3866 for (i = 0; i < num_loops; i++) {
3867 cur_size_in_dw = size_in_dw;
3868 if (cur_size_in_dw > 0xFFFFF)
3869 cur_size_in_dw = 0xFFFFF;
3870 size_in_dw -= cur_size_in_dw;
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003871 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003872 radeon_ring_write(ring, dst_offset & 0xfffffffc);
3873 radeon_ring_write(ring, src_offset & 0xfffffffc);
3874 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
3875 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
3876 src_offset += cur_size_in_dw * 4;
3877 dst_offset += cur_size_in_dw * 4;
3878 }
3879
3880 r = radeon_fence_emit(rdev, fence, ring->idx);
3881 if (r) {
3882 radeon_ring_unlock_undo(rdev, ring);
3883 return r;
3884 }
3885
3886 radeon_ring_unlock_commit(rdev, ring);
3887 radeon_semaphore_free(rdev, &sem, *fence);
3888
3889 return r;
3890}
3891
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003892static int evergreen_startup(struct radeon_device *rdev)
3893{
Christian Königf2ba57b2013-04-08 12:41:29 +02003894 struct radeon_ring *ring;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003895 int r;
3896
Alex Deucher9e46a482011-01-06 18:49:35 -05003897 /* enable pcie gen2 link */
Ilija Hadziccd540332011-09-20 10:22:57 -04003898 evergreen_pcie_gen2_enable(rdev);
Alex Deucher9e46a482011-01-06 18:49:35 -05003899
Alex Deucher0af62b02011-01-06 21:19:31 -05003900 if (ASIC_IS_DCE5(rdev)) {
3901 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
3902 r = ni_init_microcode(rdev);
3903 if (r) {
3904 DRM_ERROR("Failed to load firmware!\n");
3905 return r;
3906 }
3907 }
Alex Deucher755d8192011-03-02 20:07:34 -05003908 r = ni_mc_load_microcode(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003909 if (r) {
Alex Deucher0af62b02011-01-06 21:19:31 -05003910 DRM_ERROR("Failed to load MC firmware!\n");
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003911 return r;
3912 }
Alex Deucher0af62b02011-01-06 21:19:31 -05003913 } else {
3914 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
3915 r = r600_init_microcode(rdev);
3916 if (r) {
3917 DRM_ERROR("Failed to load firmware!\n");
3918 return r;
3919 }
3920 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003921 }
Alex Deucherfe251e22010-03-24 13:36:43 -04003922
Alex Deucher16cdf042011-10-28 10:30:02 -04003923 r = r600_vram_scratch_init(rdev);
3924 if (r)
3925 return r;
3926
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003927 evergreen_mc_program(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003928 if (rdev->flags & RADEON_IS_AGP) {
Alex Deucher0fcdb612010-03-24 13:20:41 -04003929 evergreen_agp_enable(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003930 } else {
3931 r = evergreen_pcie_gart_enable(rdev);
3932 if (r)
3933 return r;
3934 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003935 evergreen_gpu_init(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003936
Alex Deucherd7ccd8f2010-09-09 11:33:36 -04003937 r = evergreen_blit_init(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003938 if (r) {
Ilija Hadzicfb3d9e92011-10-12 23:29:41 -04003939 r600_blit_fini(rdev);
Alex Deucher27cd7762012-02-23 17:53:42 -05003940 rdev->asic->copy.copy = NULL;
Alex Deucherd7ccd8f2010-09-09 11:33:36 -04003941 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003942 }
3943
Alex Deucher724c80e2010-08-27 18:25:25 -04003944 /* allocate wb buffer */
3945 r = radeon_wb_init(rdev);
3946 if (r)
3947 return r;
3948
Jerome Glisse30eb77f2011-11-20 20:45:34 +00003949 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
3950 if (r) {
3951 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3952 return r;
3953 }
3954
Alex Deucher233d1ad2012-12-04 15:25:59 -05003955 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
3956 if (r) {
3957 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
3958 return r;
3959 }
3960
Christian Königf2ba57b2013-04-08 12:41:29 +02003961 r = rv770_uvd_resume(rdev);
3962 if (!r) {
3963 r = radeon_fence_driver_start_ring(rdev,
3964 R600_RING_TYPE_UVD_INDEX);
3965 if (r)
3966 dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
3967 }
3968
3969 if (r)
3970 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
3971
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003972 /* Enable IRQ */
3973 r = r600_irq_init(rdev);
3974 if (r) {
3975 DRM_ERROR("radeon: IH init failed (%d).\n", r);
3976 radeon_irq_kms_fini(rdev);
3977 return r;
3978 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003979 evergreen_irq_set(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003980
Christian Königf2ba57b2013-04-08 12:41:29 +02003981 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Christian Könige32eb502011-10-23 12:56:27 +02003982 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
Alex Deucher78c55602011-11-17 14:25:56 -05003983 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
3984 0, 0xfffff, RADEON_CP_PACKET2);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003985 if (r)
3986 return r;
Alex Deucher233d1ad2012-12-04 15:25:59 -05003987
3988 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
3989 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
3990 DMA_RB_RPTR, DMA_RB_WPTR,
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003991 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003992 if (r)
3993 return r;
3994
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003995 r = evergreen_cp_load_microcode(rdev);
3996 if (r)
3997 return r;
Alex Deucherfe251e22010-03-24 13:36:43 -04003998 r = evergreen_cp_resume(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003999 if (r)
4000 return r;
Alex Deucher233d1ad2012-12-04 15:25:59 -05004001 r = r600_dma_resume(rdev);
4002 if (r)
4003 return r;
Alex Deucherfe251e22010-03-24 13:36:43 -04004004
Christian Königf2ba57b2013-04-08 12:41:29 +02004005 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
4006 if (ring->ring_size) {
4007 r = radeon_ring_init(rdev, ring, ring->ring_size,
4008 R600_WB_UVD_RPTR_OFFSET,
4009 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
4010 0, 0xfffff, RADEON_CP_PACKET2);
4011 if (!r)
4012 r = r600_uvd_init(rdev);
4013
4014 if (r)
4015 DRM_ERROR("radeon: error initializing UVD (%d).\n", r);
4016 }
4017
Christian König2898c342012-07-05 11:55:34 +02004018 r = radeon_ib_pool_init(rdev);
4019 if (r) {
4020 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
Jerome Glisseb15ba512011-11-15 11:48:34 -05004021 return r;
Christian König2898c342012-07-05 11:55:34 +02004022 }
Jerome Glisseb15ba512011-11-15 11:48:34 -05004023
Rafał Miłecki69d2ae52011-12-07 23:32:24 +01004024 r = r600_audio_init(rdev);
4025 if (r) {
4026 DRM_ERROR("radeon: audio init failed\n");
Jerome Glisseb15ba512011-11-15 11:48:34 -05004027 return r;
4028 }
4029
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004030 return 0;
4031}
4032
4033int evergreen_resume(struct radeon_device *rdev)
4034{
4035 int r;
4036
Alex Deucher86f5c9e2010-12-20 12:35:04 -05004037 /* reset the asic, the gfx blocks are often in a bad state
4038 * after the driver is unloaded or after a resume
4039 */
4040 if (radeon_asic_reset(rdev))
4041 dev_warn(rdev->dev, "GPU reset failed !\n");
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004042 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
4043 * posting will perform necessary task to bring back GPU into good
4044 * shape.
4045 */
4046 /* post card */
4047 atom_asic_init(rdev->mode_info.atom_context);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004048
Jerome Glisseb15ba512011-11-15 11:48:34 -05004049 rdev->accel_working = true;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004050 r = evergreen_startup(rdev);
4051 if (r) {
Alex Deucher755d8192011-03-02 20:07:34 -05004052 DRM_ERROR("evergreen startup failed on resume\n");
Jerome Glisse6b7746e2012-02-20 17:57:20 -05004053 rdev->accel_working = false;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004054 return r;
4055 }
Alex Deucherfe251e22010-03-24 13:36:43 -04004056
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004057 return r;
4058
4059}
4060
4061int evergreen_suspend(struct radeon_device *rdev)
4062{
Rafał Miłecki69d2ae52011-12-07 23:32:24 +01004063 r600_audio_fini(rdev);
Christian Königf2ba57b2013-04-08 12:41:29 +02004064 radeon_uvd_suspend(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004065 r700_cp_stop(rdev);
Alex Deucher233d1ad2012-12-04 15:25:59 -05004066 r600_dma_stop(rdev);
Christian Königf2ba57b2013-04-08 12:41:29 +02004067 r600_uvd_rbc_stop(rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -04004068 evergreen_irq_suspend(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04004069 radeon_wb_disable(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004070 evergreen_pcie_gart_disable(rdev);
Alex Deucherd7ccd8f2010-09-09 11:33:36 -04004071
4072 return 0;
4073}
4074
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004075/* Plan is to move initialization in that function and use
4076 * helper function so that radeon_device_init pretty much
4077 * do nothing more than calling asic specific function. This
4078 * should also allow to remove a bunch of callback function
4079 * like vram_info.
4080 */
4081int evergreen_init(struct radeon_device *rdev)
4082{
4083 int r;
4084
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004085 /* Read BIOS */
4086 if (!radeon_get_bios(rdev)) {
4087 if (ASIC_IS_AVIVO(rdev))
4088 return -EINVAL;
4089 }
4090 /* Must be an ATOMBIOS */
4091 if (!rdev->is_atom_bios) {
Alex Deucher755d8192011-03-02 20:07:34 -05004092 dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004093 return -EINVAL;
4094 }
4095 r = radeon_atombios_init(rdev);
4096 if (r)
4097 return r;
Alex Deucher86f5c9e2010-12-20 12:35:04 -05004098 /* reset the asic, the gfx blocks are often in a bad state
4099 * after the driver is unloaded or after a resume
4100 */
4101 if (radeon_asic_reset(rdev))
4102 dev_warn(rdev->dev, "GPU reset failed !\n");
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004103 /* Post card if necessary */
Alex Deucherfd909c32011-01-11 18:08:59 -05004104 if (!radeon_card_posted(rdev)) {
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004105 if (!rdev->bios) {
4106 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
4107 return -EINVAL;
4108 }
4109 DRM_INFO("GPU not posted. posting now...\n");
4110 atom_asic_init(rdev->mode_info.atom_context);
4111 }
4112 /* Initialize scratch registers */
4113 r600_scratch_init(rdev);
4114 /* Initialize surface registers */
4115 radeon_surface_init(rdev);
4116 /* Initialize clocks */
4117 radeon_get_clock_info(rdev->ddev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004118 /* Fence driver */
4119 r = radeon_fence_driver_init(rdev);
4120 if (r)
4121 return r;
Jerome Glissed594e462010-02-17 21:54:29 +00004122 /* initialize AGP */
4123 if (rdev->flags & RADEON_IS_AGP) {
4124 r = radeon_agp_init(rdev);
4125 if (r)
4126 radeon_agp_disable(rdev);
4127 }
4128 /* initialize memory controller */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004129 r = evergreen_mc_init(rdev);
4130 if (r)
4131 return r;
4132 /* Memory manager */
4133 r = radeon_bo_init(rdev);
4134 if (r)
4135 return r;
Alex Deucher45f9a392010-03-24 13:55:51 -04004136
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004137 r = radeon_irq_kms_init(rdev);
4138 if (r)
4139 return r;
4140
Christian Könige32eb502011-10-23 12:56:27 +02004141 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
4142 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004143
Alex Deucher233d1ad2012-12-04 15:25:59 -05004144 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
4145 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
4146
Christian Königf2ba57b2013-04-08 12:41:29 +02004147 r = radeon_uvd_init(rdev);
4148 if (!r) {
4149 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
4150 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX],
4151 4096);
4152 }
4153
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004154 rdev->ih.ring_obj = NULL;
4155 r600_ih_ring_init(rdev, 64 * 1024);
4156
4157 r = r600_pcie_gart_init(rdev);
4158 if (r)
4159 return r;
Alex Deucher0fcdb612010-03-24 13:20:41 -04004160
Alex Deucher148a03b2010-06-03 19:00:03 -04004161 rdev->accel_working = true;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004162 r = evergreen_startup(rdev);
4163 if (r) {
Alex Deucherfe251e22010-03-24 13:36:43 -04004164 dev_err(rdev->dev, "disabling GPU acceleration\n");
4165 r700_cp_fini(rdev);
Alex Deucher233d1ad2012-12-04 15:25:59 -05004166 r600_dma_fini(rdev);
Alex Deucherfe251e22010-03-24 13:36:43 -04004167 r600_irq_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04004168 radeon_wb_fini(rdev);
Christian König2898c342012-07-05 11:55:34 +02004169 radeon_ib_pool_fini(rdev);
Alex Deucherfe251e22010-03-24 13:36:43 -04004170 radeon_irq_kms_fini(rdev);
Alex Deucher0fcdb612010-03-24 13:20:41 -04004171 evergreen_pcie_gart_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004172 rdev->accel_working = false;
4173 }
Alex Deucher77e00f22011-12-21 11:58:17 -05004174
4175 /* Don't start up if the MC ucode is missing on BTC parts.
4176 * The default clocks and voltages before the MC ucode
4177 * is loaded are not suffient for advanced operations.
4178 */
4179 if (ASIC_IS_DCE5(rdev)) {
4180 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
4181 DRM_ERROR("radeon: MC ucode required for NI+.\n");
4182 return -EINVAL;
4183 }
4184 }
4185
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004186 return 0;
4187}
4188
4189void evergreen_fini(struct radeon_device *rdev)
4190{
Rafał Miłecki69d2ae52011-12-07 23:32:24 +01004191 r600_audio_fini(rdev);
Ilija Hadzicfb3d9e92011-10-12 23:29:41 -04004192 r600_blit_fini(rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -04004193 r700_cp_fini(rdev);
Alex Deucher233d1ad2012-12-04 15:25:59 -05004194 r600_dma_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004195 r600_irq_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04004196 radeon_wb_fini(rdev);
Christian König2898c342012-07-05 11:55:34 +02004197 radeon_ib_pool_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004198 radeon_irq_kms_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004199 evergreen_pcie_gart_fini(rdev);
Christian Königf2ba57b2013-04-08 12:41:29 +02004200 radeon_uvd_fini(rdev);
Alex Deucher16cdf042011-10-28 10:30:02 -04004201 r600_vram_scratch_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004202 radeon_gem_fini(rdev);
4203 radeon_fence_driver_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004204 radeon_agp_fini(rdev);
4205 radeon_bo_fini(rdev);
4206 radeon_atombios_fini(rdev);
4207 kfree(rdev->bios);
4208 rdev->bios = NULL;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05004209}
Alex Deucher9e46a482011-01-06 18:49:35 -05004210
Ilija Hadzicb07759b2011-09-20 10:22:58 -04004211void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
Alex Deucher9e46a482011-01-06 18:49:35 -05004212{
Dave Airlie197bbb32012-06-27 08:35:54 +01004213 u32 link_width_cntl, speed_cntl, mask;
4214 int ret;
Alex Deucher9e46a482011-01-06 18:49:35 -05004215
Alex Deucherd42dd572011-01-12 20:05:11 -05004216 if (radeon_pcie_gen2 == 0)
4217 return;
4218
Alex Deucher9e46a482011-01-06 18:49:35 -05004219 if (rdev->flags & RADEON_IS_IGP)
4220 return;
4221
4222 if (!(rdev->flags & RADEON_IS_PCIE))
4223 return;
4224
4225 /* x2 cards have a special sequence */
4226 if (ASIC_IS_X2(rdev))
4227 return;
4228
Dave Airlie197bbb32012-06-27 08:35:54 +01004229 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
4230 if (ret != 0)
4231 return;
4232
4233 if (!(mask & DRM_PCIE_SPEED_50))
4234 return;
4235
Alex Deucher492d2b62012-10-25 16:06:59 -04004236 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
Alex Deucher3691fee2012-10-08 17:46:27 -04004237 if (speed_cntl & LC_CURRENT_DATA_RATE) {
4238 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
4239 return;
4240 }
4241
Dave Airlie197bbb32012-06-27 08:35:54 +01004242 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
4243
Alex Deucher9e46a482011-01-06 18:49:35 -05004244 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
4245 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
4246
Alex Deucher492d2b62012-10-25 16:06:59 -04004247 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004248 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
Alex Deucher492d2b62012-10-25 16:06:59 -04004249 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004250
Alex Deucher492d2b62012-10-25 16:06:59 -04004251 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004252 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
Alex Deucher492d2b62012-10-25 16:06:59 -04004253 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004254
Alex Deucher492d2b62012-10-25 16:06:59 -04004255 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004256 speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
Alex Deucher492d2b62012-10-25 16:06:59 -04004257 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004258
Alex Deucher492d2b62012-10-25 16:06:59 -04004259 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004260 speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
Alex Deucher492d2b62012-10-25 16:06:59 -04004261 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004262
Alex Deucher492d2b62012-10-25 16:06:59 -04004263 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004264 speed_cntl |= LC_GEN2_EN_STRAP;
Alex Deucher492d2b62012-10-25 16:06:59 -04004265 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004266
4267 } else {
Alex Deucher492d2b62012-10-25 16:06:59 -04004268 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004269 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
4270 if (1)
4271 link_width_cntl |= LC_UPCONFIGURE_DIS;
4272 else
4273 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
Alex Deucher492d2b62012-10-25 16:06:59 -04004274 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004275 }
4276}