blob: ef3f07dc89cd403c6f04000538d2c159bfe2e191 [file] [log] [blame]
Hawking Zhangc6b6a422019-03-04 14:07:37 +08001/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include <linux/slab.h>
25#include <linux/module.h>
Alex Deuchere9eea902019-07-31 10:39:40 -050026#include <linux/pci.h>
27
Hawking Zhangc6b6a422019-03-04 14:07:37 +080028#include "amdgpu.h"
29#include "amdgpu_atombios.h"
30#include "amdgpu_ih.h"
31#include "amdgpu_uvd.h"
32#include "amdgpu_vce.h"
33#include "amdgpu_ucode.h"
34#include "amdgpu_psp.h"
Kevin Wang767acab2019-07-05 15:58:46 -050035#include "amdgpu_smu.h"
Hawking Zhangc6b6a422019-03-04 14:07:37 +080036#include "atom.h"
37#include "amd_pcie.h"
38
39#include "gc/gc_10_1_0_offset.h"
40#include "gc/gc_10_1_0_sh_mask.h"
41#include "hdp/hdp_5_0_0_offset.h"
42#include "hdp/hdp_5_0_0_sh_mask.h"
Alex Deucher29bc37b2019-11-13 14:27:54 -050043#include "smuio/smuio_11_0_0_offset.h"
Alex Deucher3967ae62020-05-28 17:28:17 -040044#include "mp/mp_11_0_offset.h"
Hawking Zhangc6b6a422019-03-04 14:07:37 +080045
46#include "soc15.h"
47#include "soc15_common.h"
48#include "gmc_v10_0.h"
49#include "gfxhub_v2_0.h"
50#include "mmhub_v2_0.h"
Hawking Zhangbebc0762019-08-23 19:39:18 +080051#include "nbio_v2_3.h"
Hawking Zhangc6b6a422019-03-04 14:07:37 +080052#include "nv.h"
53#include "navi10_ih.h"
54#include "gfx_v10_0.h"
55#include "sdma_v5_0.h"
Likun Gao157e72e2019-06-17 13:38:29 +080056#include "sdma_v5_2.h"
Hawking Zhangc6b6a422019-03-04 14:07:37 +080057#include "vcn_v2_0.h"
Leo Liu5be45a22019-11-08 15:01:42 -050058#include "jpeg_v2_0.h"
Leo Liub8f10582020-03-24 16:30:24 -040059#include "vcn_v3_0.h"
Leo Liu4d72dd12020-03-24 16:31:23 -040060#include "jpeg_v3_0.h"
Hawking Zhangc6b6a422019-03-04 14:07:37 +080061#include "dce_virtual.h"
62#include "mes_v10_1.h"
Jiange Zhaob05b6902019-09-11 17:29:07 +080063#include "mxgpu_nv.h"
Hawking Zhangc6b6a422019-03-04 14:07:37 +080064
65static const struct amd_ip_funcs nv_common_ip_funcs;
66
67/*
68 * Indirect registers accessor
69 */
70static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
71{
72 unsigned long flags, address, data;
73 u32 r;
Hawking Zhangbebc0762019-08-23 19:39:18 +080074 address = adev->nbio.funcs->get_pcie_index_offset(adev);
75 data = adev->nbio.funcs->get_pcie_data_offset(adev);
Hawking Zhangc6b6a422019-03-04 14:07:37 +080076
77 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
78 WREG32(address, reg);
79 (void)RREG32(address);
80 r = RREG32(data);
81 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
82 return r;
83}
84
85static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
86{
87 unsigned long flags, address, data;
88
Hawking Zhangbebc0762019-08-23 19:39:18 +080089 address = adev->nbio.funcs->get_pcie_index_offset(adev);
90 data = adev->nbio.funcs->get_pcie_data_offset(adev);
Hawking Zhangc6b6a422019-03-04 14:07:37 +080091
92 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
93 WREG32(address, reg);
94 (void)RREG32(address);
95 WREG32(data, v);
96 (void)RREG32(data);
97 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
98}
99
100static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
101{
102 unsigned long flags, address, data;
103 u32 r;
104
105 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
106 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
107
108 spin_lock_irqsave(&adev->didt_idx_lock, flags);
109 WREG32(address, (reg));
110 r = RREG32(data);
111 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
112 return r;
113}
114
115static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
116{
117 unsigned long flags, address, data;
118
119 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
120 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
121
122 spin_lock_irqsave(&adev->didt_idx_lock, flags);
123 WREG32(address, (reg));
124 WREG32(data, (v));
125 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
126}
127
128static u32 nv_get_config_memsize(struct amdgpu_device *adev)
129{
Hawking Zhangbebc0762019-08-23 19:39:18 +0800130 return adev->nbio.funcs->get_memsize(adev);
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800131}
132
133static u32 nv_get_xclk(struct amdgpu_device *adev)
134{
Tao Zhou462a70d2019-05-14 11:37:32 +0800135 return adev->clock.spll.reference_freq;
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800136}
137
138
139void nv_grbm_select(struct amdgpu_device *adev,
140 u32 me, u32 pipe, u32 queue, u32 vmid)
141{
142 u32 grbm_gfx_cntl = 0;
143 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
144 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
145 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
146 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
147
148 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
149}
150
151static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
152{
153 /* todo */
154}
155
156static bool nv_read_disabled_bios(struct amdgpu_device *adev)
157{
158 /* todo */
159 return false;
160}
161
162static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
163 u8 *bios, u32 length_bytes)
164{
Alex Deucher29bc37b2019-11-13 14:27:54 -0500165 u32 *dw_ptr;
166 u32 i, length_dw;
167
168 if (bios == NULL)
169 return false;
170 if (length_bytes == 0)
171 return false;
172 /* APU vbios image is part of sbios image */
173 if (adev->flags & AMD_IS_APU)
174 return false;
175
176 dw_ptr = (u32 *)bios;
177 length_dw = ALIGN(length_bytes, 4) / 4;
178
179 /* set rom index to 0 */
180 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
181 /* read out the rom data */
182 for (i = 0; i < length_dw; i++)
183 dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
184
185 return true;
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800186}
187
188static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
189 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
190 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
191 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
192 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
193 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
194 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800195 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
196 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800197 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
198 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
199 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
200 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
201 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
202 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
203 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
Marek Olšák664fe852019-10-22 17:22:38 -0400204 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800205 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
206 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
207 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
208};
209
210static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
211 u32 sh_num, u32 reg_offset)
212{
213 uint32_t val;
214
215 mutex_lock(&adev->grbm_idx_mutex);
216 if (se_num != 0xffffffff || sh_num != 0xffffffff)
217 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
218
219 val = RREG32(reg_offset);
220
221 if (se_num != 0xffffffff || sh_num != 0xffffffff)
222 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
223 mutex_unlock(&adev->grbm_idx_mutex);
224 return val;
225}
226
227static uint32_t nv_get_register_value(struct amdgpu_device *adev,
228 bool indexed, u32 se_num,
229 u32 sh_num, u32 reg_offset)
230{
231 if (indexed) {
232 return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
233 } else {
234 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
235 return adev->gfx.config.gb_addr_config;
236 return RREG32(reg_offset);
237 }
238}
239
240static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
241 u32 sh_num, u32 reg_offset, u32 *value)
242{
243 uint32_t i;
244 struct soc15_allowed_register_entry *en;
245
246 *value = 0;
247 for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
248 en = &nv_allowed_read_registers[i];
249 if (reg_offset !=
250 (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset))
251 continue;
252
253 *value = nv_get_register_value(adev,
254 nv_allowed_read_registers[i].grbm_indexed,
255 se_num, sh_num, reg_offset);
256 return 0;
257 }
258 return -EINVAL;
259}
260
Kevin Wang3e2bb602019-07-05 12:51:45 +0800261static int nv_asic_mode1_reset(struct amdgpu_device *adev)
262{
263 u32 i;
264 int ret = 0;
265
266 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
267
268 dev_info(adev->dev, "GPU mode1 reset\n");
269
270 /* disable BM */
271 pci_clear_master(adev->pdev);
272
273 pci_save_state(adev->pdev);
274
275 ret = psp_gpu_reset(adev);
276 if (ret)
277 dev_err(adev->dev, "GPU mode1 reset failed\n");
278
279 pci_restore_state(adev->pdev);
280
281 /* wait for asic to come out of reset */
282 for (i = 0; i < adev->usec_timeout; i++) {
Hawking Zhangbebc0762019-08-23 19:39:18 +0800283 u32 memsize = adev->nbio.funcs->get_memsize(adev);
Kevin Wang3e2bb602019-07-05 12:51:45 +0800284
285 if (memsize != 0xffffffff)
286 break;
287 udelay(1);
288 }
289
290 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
291
292 return ret;
293}
Alex Deucher2ddc6c32019-07-23 23:48:21 -0500294
Alex Deucherac742612019-11-07 18:12:17 -0500295static bool nv_asic_supports_baco(struct amdgpu_device *adev)
296{
297 struct smu_context *smu = &adev->smu;
298
299 if (smu_baco_is_support(smu))
300 return true;
301 else
302 return false;
303}
304
Alex Deucher2ddc6c32019-07-23 23:48:21 -0500305static enum amd_reset_method
306nv_asic_reset_method(struct amdgpu_device *adev)
307{
308 struct smu_context *smu = &adev->smu;
309
Jiange Zhaob4def372019-10-28 18:04:14 +0800310 if (!amdgpu_sriov_vf(adev) && smu_baco_is_support(smu))
Alex Deucher2ddc6c32019-07-23 23:48:21 -0500311 return AMD_RESET_METHOD_BACO;
312 else
313 return AMD_RESET_METHOD_MODE1;
314}
315
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800316static int nv_asic_reset(struct amdgpu_device *adev)
317{
Kevin Wang767acab2019-07-05 15:58:46 -0500318 int ret = 0;
319 struct smu_context *smu = &adev->smu;
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800320
Monk Liue3526252019-08-27 16:32:55 +0800321 if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
Alex Deucher11520f22019-10-28 15:20:03 -0400322 ret = smu_baco_enter(smu);
323 if (ret)
324 return ret;
325 ret = smu_baco_exit(smu);
326 if (ret)
327 return ret;
Monk Liue3526252019-08-27 16:32:55 +0800328 } else {
Kevin Wang3e2bb602019-07-05 12:51:45 +0800329 ret = nv_asic_mode1_reset(adev);
Monk Liue3526252019-08-27 16:32:55 +0800330 }
Kevin Wang767acab2019-07-05 15:58:46 -0500331
332 return ret;
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800333}
334
335static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
336{
337 /* todo */
338 return 0;
339}
340
341static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
342{
343 /* todo */
344 return 0;
345}
346
347static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
348{
349 if (pci_is_root_bus(adev->pdev->bus))
350 return;
351
352 if (amdgpu_pcie_gen2 == 0)
353 return;
354
355 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
356 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
357 return;
358
359 /* todo */
360}
361
362static void nv_program_aspm(struct amdgpu_device *adev)
363{
364
365 if (amdgpu_aspm == 0)
366 return;
367
368 /* todo */
369}
370
371static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
372 bool enable)
373{
Hawking Zhangbebc0762019-08-23 19:39:18 +0800374 adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
375 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800376}
377
378static const struct amdgpu_ip_block_version nv_common_ip_block =
379{
380 .type = AMD_IP_BLOCK_TYPE_COMMON,
381 .major = 1,
382 .minor = 0,
383 .rev = 0,
384 .funcs = &nv_common_ip_funcs,
385};
386
Xiaojie Yuanb5c73852019-08-05 16:19:45 +0800387static int nv_reg_base_init(struct amdgpu_device *adev)
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800388{
Xiaojie Yuanb5c73852019-08-05 16:19:45 +0800389 int r;
390
391 if (amdgpu_discovery) {
392 r = amdgpu_discovery_reg_base_init(adev);
393 if (r) {
394 DRM_WARN("failed to init reg base from ip discovery table, "
395 "fallback to legacy init method\n");
396 goto legacy_init;
397 }
398
399 return 0;
400 }
401
402legacy_init:
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800403 switch (adev->asic_type) {
404 case CHIP_NAVI10:
405 navi10_reg_base_init(adev);
406 break;
Xiaojie Yuana0f6d9262018-12-17 18:24:03 +0800407 case CHIP_NAVI14:
408 navi14_reg_base_init(adev);
409 break;
Xiaojie Yuan03d0a072019-05-14 15:22:53 +0800410 case CHIP_NAVI12:
411 navi12_reg_base_init(adev);
412 break;
Likun Gaodccdbf32019-11-07 16:28:14 +0800413 case CHIP_SIENNA_CICHLID:
414 sienna_cichlid_reg_base_init(adev);
415 break;
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800416 default:
417 return -EINVAL;
418 }
419
Xiaojie Yuanb5c73852019-08-05 16:19:45 +0800420 return 0;
421}
422
423int nv_set_ip_blocks(struct amdgpu_device *adev)
424{
425 int r;
426
Monk Liu122078d2020-03-04 23:51:51 +0800427 adev->nbio.funcs = &nbio_v2_3_funcs;
428 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
429
430 if (amdgpu_sriov_vf(adev)) {
431 adev->virt.ops = &xgpu_nv_virt_ops;
432 /* try send GPU_INIT_DATA request to host */
433 amdgpu_virt_request_init_data(adev);
434 }
435
Xiaojie Yuanb5c73852019-08-05 16:19:45 +0800436 /* Set IP register base before any HW register access */
437 r = nv_reg_base_init(adev);
438 if (r)
439 return r;
440
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800441 switch (adev->asic_type) {
442 case CHIP_NAVI10:
Alex Deucherd1daf852019-07-02 14:42:25 -0500443 case CHIP_NAVI14:
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800444 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
445 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
446 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
447 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
448 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
Evan Quan95302732020-01-07 16:57:39 +0800449 !amdgpu_sriov_vf(adev))
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800450 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
451 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
452 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
Alex Deucherf8a79762019-07-05 15:39:39 -0500453#if defined(CONFIG_DRM_AMD_DC)
Harry Wentlandb4f199c2019-02-26 16:25:27 -0500454 else if (amdgpu_device_has_dc_support(adev))
455 amdgpu_device_ip_block_add(adev, &dm_ip_block);
Alex Deucherf8a79762019-07-05 15:39:39 -0500456#endif
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800457 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
458 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
459 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
Evan Quan95302732020-01-07 16:57:39 +0800460 !amdgpu_sriov_vf(adev))
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800461 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
462 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
Leo Liu5be45a22019-11-08 15:01:42 -0500463 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800464 if (adev->enable_mes)
465 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
466 break;
Xiaojie Yuan44e9e7c2019-05-16 19:58:19 +0800467 case CHIP_NAVI12:
468 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
469 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
470 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
Xiaojie Yuan6b66ae22019-07-18 02:54:29 +0800471 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
Monk Liu79bebab2020-04-22 12:09:16 +0800472 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
Xiaojie Yuan7f47efe2019-07-16 03:26:49 +0800473 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
Xiaojie Yuan79902022019-06-26 19:19:57 +0800474 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
475 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
Petr Cvek20c14ee2019-08-30 16:31:58 +0200476#if defined(CONFIG_DRM_AMD_DC)
Leo Li078655d92019-07-16 18:12:13 -0400477 else if (amdgpu_device_has_dc_support(adev))
478 amdgpu_device_ip_block_add(adev, &dm_ip_block);
Petr Cvek20c14ee2019-08-30 16:31:58 +0200479#endif
Xiaojie Yuan44e9e7c2019-05-16 19:58:19 +0800480 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
481 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
Xiaojie Yuan7f47efe2019-07-16 03:26:49 +0800482 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
Evan Quan95302732020-01-07 16:57:39 +0800483 !amdgpu_sriov_vf(adev))
Xiaojie Yuan7f47efe2019-07-16 03:26:49 +0800484 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
Boyuan Zhang1fbed282019-07-18 10:13:23 -0400485 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
Monk Liufe442492020-03-05 21:10:03 +0800486 if (!amdgpu_sriov_vf(adev))
487 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
Xiaojie Yuan44e9e7c2019-05-16 19:58:19 +0800488 break;
Likun Gao2e1ba102019-04-18 13:49:07 +0800489 case CHIP_SIENNA_CICHLID:
490 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
Likun Gao0b3df162019-06-16 22:34:59 +0800491 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
Likun Gao757b3af2019-06-16 22:37:56 +0800492 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
Likun Gao56304e72020-03-24 16:27:43 -0400493 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
494 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
Likun Gaob07e5c62020-03-24 16:24:44 -0400495 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
shaoyunl38d5bbe2020-03-17 11:41:34 -0400496 is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
Likun Gaob07e5c62020-03-24 16:24:44 -0400497 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
Likun Gao9a986762019-08-14 17:39:03 +0800498 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
499 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
Likun Gao933c8a92020-05-01 10:21:23 -0400500 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
Likun Gao157e72e2019-06-17 13:38:29 +0800501 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
Leo Liub8f10582020-03-24 16:30:24 -0400502 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
Leo Liu4d72dd12020-03-24 16:31:23 -0400503 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
Jack Xiaoa346ef82020-03-24 16:28:43 -0400504 if (adev->enable_mes)
505 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
Likun Gao2e1ba102019-04-18 13:49:07 +0800506 break;
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800507 default:
508 return -EINVAL;
509 }
510
511 return 0;
512}
513
514static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
515{
Hawking Zhangbebc0762019-08-23 19:39:18 +0800516 return adev->nbio.funcs->get_rev_id(adev);
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800517}
518
519static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
520{
Hawking Zhangbebc0762019-08-23 19:39:18 +0800521 adev->nbio.funcs->hdp_flush(adev, ring);
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800522}
523
524static void nv_invalidate_hdp(struct amdgpu_device *adev,
525 struct amdgpu_ring *ring)
526{
527 if (!ring || !ring->funcs->emit_wreg) {
528 WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
529 } else {
530 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
531 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
532 }
533}
534
535static bool nv_need_full_reset(struct amdgpu_device *adev)
536{
537 return true;
538}
539
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800540static bool nv_need_reset_on_init(struct amdgpu_device *adev)
541{
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800542 u32 sol_reg;
543
544 if (adev->flags & AMD_IS_APU)
545 return false;
546
547 /* Check sOS sign of life register to confirm sys driver and sOS
548 * are already been loaded.
549 */
550 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
551 if (sol_reg)
552 return true;
Alex Deucher3967ae62020-05-28 17:28:17 -0400553
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800554 return false;
555}
556
Kevin Wang2af815312019-11-05 18:53:30 +0800557static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
558{
559
560 /* TODO
561 * dummy implement for pcie_replay_count sysfs interface
562 * */
563
564 return 0;
565}
566
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800567static void nv_init_doorbell_index(struct amdgpu_device *adev)
568{
569 adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
570 adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
571 adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
572 adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
573 adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
574 adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
575 adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
576 adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
577 adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
578 adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
579 adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
580 adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
581 adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
Jack Xiao20519232019-04-26 18:58:41 +0800582 adev->doorbell_index.mes_ring = AMDGPU_NAVI10_DOORBELL_MES_RING;
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800583 adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
584 adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
Likun Gao157e72e2019-06-17 13:38:29 +0800585 adev->doorbell_index.sdma_engine[2] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE2;
586 adev->doorbell_index.sdma_engine[3] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE3;
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800587 adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
588 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
589 adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
590 adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
591 adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
592 adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
593 adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
594
595 adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
596 adev->doorbell_index.sdma_doorbell_range = 20;
597}
598
599static const struct amdgpu_asic_funcs nv_asic_funcs =
600{
601 .read_disabled_bios = &nv_read_disabled_bios,
602 .read_bios_from_rom = &nv_read_bios_from_rom,
603 .read_register = &nv_read_register,
604 .reset = &nv_asic_reset,
Alex Deucher2ddc6c32019-07-23 23:48:21 -0500605 .reset_method = &nv_asic_reset_method,
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800606 .set_vga_state = &nv_vga_set_state,
607 .get_xclk = &nv_get_xclk,
608 .set_uvd_clocks = &nv_set_uvd_clocks,
609 .set_vce_clocks = &nv_set_vce_clocks,
610 .get_config_memsize = &nv_get_config_memsize,
611 .flush_hdp = &nv_flush_hdp,
612 .invalidate_hdp = &nv_invalidate_hdp,
613 .init_doorbell_index = &nv_init_doorbell_index,
614 .need_full_reset = &nv_need_full_reset,
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800615 .need_reset_on_init = &nv_need_reset_on_init,
Kevin Wang2af815312019-11-05 18:53:30 +0800616 .get_pcie_replay_count = &nv_get_pcie_replay_count,
Alex Deucherac742612019-11-07 18:12:17 -0500617 .supports_baco = &nv_asic_supports_baco,
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800618};
619
620static int nv_common_early_init(void *handle)
621{
Yong Zhao923c0872019-09-27 23:30:05 -0400622#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800623 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
624
Yong Zhao923c0872019-09-27 23:30:05 -0400625 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
626 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800627 adev->smc_rreg = NULL;
628 adev->smc_wreg = NULL;
629 adev->pcie_rreg = &nv_pcie_rreg;
630 adev->pcie_wreg = &nv_pcie_wreg;
631
632 /* TODO: will add them during VCN v2 implementation */
633 adev->uvd_ctx_rreg = NULL;
634 adev->uvd_ctx_wreg = NULL;
635
636 adev->didt_rreg = &nv_didt_rreg;
637 adev->didt_wreg = &nv_didt_wreg;
638
639 adev->asic_funcs = &nv_asic_funcs;
640
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800641 adev->rev_id = nv_get_rev_id(adev);
642 adev->external_rev_id = 0xff;
643 switch (adev->asic_type) {
644 case CHIP_NAVI10:
645 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800646 AMD_CG_SUPPORT_GFX_CGCG |
647 AMD_CG_SUPPORT_IH_CG |
648 AMD_CG_SUPPORT_HDP_MGCG |
649 AMD_CG_SUPPORT_HDP_LS |
650 AMD_CG_SUPPORT_SDMA_MGCG |
651 AMD_CG_SUPPORT_SDMA_LS |
652 AMD_CG_SUPPORT_MC_MGCG |
653 AMD_CG_SUPPORT_MC_LS |
654 AMD_CG_SUPPORT_ATHUB_MGCG |
655 AMD_CG_SUPPORT_ATHUB_LS |
656 AMD_CG_SUPPORT_VCN_MGCG |
Leo Liu099d66e2019-11-11 15:09:25 -0500657 AMD_CG_SUPPORT_JPEG_MGCG |
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800658 AMD_CG_SUPPORT_BIF_MGCG |
659 AMD_CG_SUPPORT_BIF_LS;
Leo Liu157710e2019-05-15 13:58:20 -0400660 adev->pg_flags = AMD_PG_SUPPORT_VCN |
Huang Ruic12d4102019-06-14 16:12:51 +0800661 AMD_PG_SUPPORT_VCN_DPG |
Leo Liu099d66e2019-11-11 15:09:25 -0500662 AMD_PG_SUPPORT_JPEG |
Huang Ruia201b6a2019-06-14 16:19:36 +0800663 AMD_PG_SUPPORT_ATHUB;
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800664 adev->external_rev_id = adev->rev_id + 0x1;
665 break;
Xiaojie Yuan5e71e012018-12-17 18:23:27 +0800666 case CHIP_NAVI14:
Xiaojie Yuand0c39f82019-03-20 16:12:54 +0800667 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
668 AMD_CG_SUPPORT_GFX_CGCG |
669 AMD_CG_SUPPORT_IH_CG |
670 AMD_CG_SUPPORT_HDP_MGCG |
671 AMD_CG_SUPPORT_HDP_LS |
672 AMD_CG_SUPPORT_SDMA_MGCG |
673 AMD_CG_SUPPORT_SDMA_LS |
674 AMD_CG_SUPPORT_MC_MGCG |
675 AMD_CG_SUPPORT_MC_LS |
676 AMD_CG_SUPPORT_ATHUB_MGCG |
677 AMD_CG_SUPPORT_ATHUB_LS |
678 AMD_CG_SUPPORT_VCN_MGCG |
Leo Liu099d66e2019-11-11 15:09:25 -0500679 AMD_CG_SUPPORT_JPEG_MGCG |
Xiaojie Yuand0c39f82019-03-20 16:12:54 +0800680 AMD_CG_SUPPORT_BIF_MGCG |
681 AMD_CG_SUPPORT_BIF_LS;
Xiaojie Yuan0377b082019-07-02 12:52:52 -0500682 adev->pg_flags = AMD_PG_SUPPORT_VCN |
Leo Liu099d66e2019-11-11 15:09:25 -0500683 AMD_PG_SUPPORT_JPEG |
Xiaojie Yuan0377b082019-07-02 12:52:52 -0500684 AMD_PG_SUPPORT_VCN_DPG;
tiancyin35ef88f2019-08-05 17:32:45 +0800685 adev->external_rev_id = adev->rev_id + 20;
Xiaojie Yuan5e71e012018-12-17 18:23:27 +0800686 break;
Xiaojie Yuan74b5e502019-05-16 19:47:33 +0800687 case CHIP_NAVI12:
Xiaojie Yuandca009e2019-07-30 11:28:20 +0800688 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
689 AMD_CG_SUPPORT_GFX_MGLS |
690 AMD_CG_SUPPORT_GFX_CGCG |
691 AMD_CG_SUPPORT_GFX_CP_LS |
Xiaojie Yuan5211c372019-08-01 15:00:28 +0800692 AMD_CG_SUPPORT_GFX_RLC_LS |
Xiaojie Yuanfbe0bc52019-08-01 15:01:23 +0800693 AMD_CG_SUPPORT_IH_CG |
Xiaojie Yuan5211c372019-08-01 15:00:28 +0800694 AMD_CG_SUPPORT_HDP_MGCG |
Xiaojie Yuan358ab972019-07-30 12:18:55 +0800695 AMD_CG_SUPPORT_HDP_LS |
696 AMD_CG_SUPPORT_SDMA_MGCG |
Xiaojie Yuan8b797b32019-08-01 15:39:59 +0800697 AMD_CG_SUPPORT_SDMA_LS |
698 AMD_CG_SUPPORT_MC_MGCG |
Xiaojie Yuanca516782019-08-01 15:19:10 +0800699 AMD_CG_SUPPORT_MC_LS |
700 AMD_CG_SUPPORT_ATHUB_MGCG |
Xiaojie Yuan65872e52019-08-01 15:22:59 +0800701 AMD_CG_SUPPORT_ATHUB_LS |
Leo Liu099d66e2019-11-11 15:09:25 -0500702 AMD_CG_SUPPORT_VCN_MGCG |
703 AMD_CG_SUPPORT_JPEG_MGCG;
Xiaojie Yuanc1653ea2019-08-27 11:05:23 +0800704 adev->pg_flags = AMD_PG_SUPPORT_VCN |
Xiaojie Yuan5ef3b8a2019-08-27 11:06:13 +0800705 AMD_PG_SUPPORT_VCN_DPG |
Leo Liu099d66e2019-11-11 15:09:25 -0500706 AMD_PG_SUPPORT_JPEG |
Kenneth Feng846938c2020-03-27 12:23:14 +0800707 AMD_PG_SUPPORT_ATHUB |
708 AMD_PG_SUPPORT_MMHUB;
Tiecheng Zhoudf5e9842020-01-08 13:44:29 +0800709 /* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
710 * as a consequence, the rev_id and external_rev_id are wrong.
711 * workaround it by hardcoding rev_id to 0 (default value).
712 */
713 if (amdgpu_sriov_vf(adev))
714 adev->rev_id = 0;
Xiaojie Yuan74b5e502019-05-16 19:47:33 +0800715 adev->external_rev_id = adev->rev_id + 0xa;
716 break;
Likun Gao117910e2019-03-19 11:04:03 +0800717 case CHIP_SIENNA_CICHLID:
Likun Gao00194de2020-01-24 03:57:55 +0800718 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
719 AMD_CG_SUPPORT_GFX_CGCG |
720 AMD_CG_SUPPORT_GFX_3D_CGCG |
Likun Gao98f8ea22020-03-18 17:33:47 -0400721 AMD_CG_SUPPORT_MC_MGCG |
Likun Gao00194de2020-01-24 03:57:55 +0800722 AMD_CG_SUPPORT_VCN_MGCG |
Kenneth Fengca364612020-02-28 11:57:04 +0800723 AMD_CG_SUPPORT_JPEG_MGCG |
724 AMD_CG_SUPPORT_HDP_MGCG |
Kenneth Feng3a32c252020-02-28 14:09:31 +0800725 AMD_CG_SUPPORT_HDP_LS |
Kenneth Fengbcc83672020-02-28 14:14:00 +0800726 AMD_CG_SUPPORT_IH_CG |
727 AMD_CG_SUPPORT_MC_LS;
Leo Liub467c4f2019-12-03 09:23:24 -0500728 adev->pg_flags = AMD_PG_SUPPORT_VCN |
Boyuan Zhangd00b0fa2020-04-02 13:28:07 -0400729 AMD_PG_SUPPORT_VCN_DPG |
Kenneth Fengb7946162020-03-26 12:01:15 +0800730 AMD_PG_SUPPORT_JPEG |
731 AMD_PG_SUPPORT_ATHUB;
Likun Gao117910e2019-03-19 11:04:03 +0800732 adev->external_rev_id = adev->rev_id + 0x28;
733 break;
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800734 default:
735 /* FIXME: not supported yet */
736 return -EINVAL;
737 }
738
Jiange Zhaob05b6902019-09-11 17:29:07 +0800739 if (amdgpu_sriov_vf(adev)) {
740 amdgpu_virt_init_setting(adev);
741 xgpu_nv_mailbox_set_irq_funcs(adev);
742 }
743
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800744 return 0;
745}
746
747static int nv_common_late_init(void *handle)
748{
Jiange Zhaob05b6902019-09-11 17:29:07 +0800749 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
750
751 if (amdgpu_sriov_vf(adev))
752 xgpu_nv_mailbox_get_irq(adev);
753
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800754 return 0;
755}
756
757static int nv_common_sw_init(void *handle)
758{
Jiange Zhaob05b6902019-09-11 17:29:07 +0800759 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
760
761 if (amdgpu_sriov_vf(adev))
762 xgpu_nv_mailbox_add_irq_id(adev);
763
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800764 return 0;
765}
766
767static int nv_common_sw_fini(void *handle)
768{
769 return 0;
770}
771
772static int nv_common_hw_init(void *handle)
773{
774 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
775
776 /* enable pcie gen2/3 link */
777 nv_pcie_gen3_enable(adev);
778 /* enable aspm */
779 nv_program_aspm(adev);
780 /* setup nbio registers */
Hawking Zhangbebc0762019-08-23 19:39:18 +0800781 adev->nbio.funcs->init_registers(adev);
Yong Zhao923c0872019-09-27 23:30:05 -0400782 /* remap HDP registers to a hole in mmio space,
783 * for the purpose of expose those registers
784 * to process space
785 */
786 if (adev->nbio.funcs->remap_hdp_registers)
787 adev->nbio.funcs->remap_hdp_registers(adev);
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800788 /* enable the doorbell aperture */
789 nv_enable_doorbell_aperture(adev, true);
790
791 return 0;
792}
793
794static int nv_common_hw_fini(void *handle)
795{
796 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
797
798 /* disable the doorbell aperture */
799 nv_enable_doorbell_aperture(adev, false);
800
801 return 0;
802}
803
804static int nv_common_suspend(void *handle)
805{
806 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
807
808 return nv_common_hw_fini(adev);
809}
810
811static int nv_common_resume(void *handle)
812{
813 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
814
815 return nv_common_hw_init(adev);
816}
817
818static bool nv_common_is_idle(void *handle)
819{
820 return true;
821}
822
823static int nv_common_wait_for_idle(void *handle)
824{
825 return 0;
826}
827
828static int nv_common_soft_reset(void *handle)
829{
830 return 0;
831}
832
833static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev,
834 bool enable)
835{
836 uint32_t hdp_clk_cntl, hdp_clk_cntl1;
837 uint32_t hdp_mem_pwr_cntl;
838
839 if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
840 AMD_CG_SUPPORT_HDP_DS |
841 AMD_CG_SUPPORT_HDP_SD)))
842 return;
843
844 hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
845 hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
846
847 /* Before doing clock/power mode switch,
848 * forced on IPH & RC clock */
849 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
850 IPH_MEM_CLK_SOFT_OVERRIDE, 1);
851 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
852 RC_MEM_CLK_SOFT_OVERRIDE, 1);
853 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
854
855 /* HDP 5.0 doesn't support dynamic power mode switch,
856 * disable clock and power gating before any changing */
857 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
858 IPH_MEM_POWER_CTRL_EN, 0);
859 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
860 IPH_MEM_POWER_LS_EN, 0);
861 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
862 IPH_MEM_POWER_DS_EN, 0);
863 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
864 IPH_MEM_POWER_SD_EN, 0);
865 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
866 RC_MEM_POWER_CTRL_EN, 0);
867 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
868 RC_MEM_POWER_LS_EN, 0);
869 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
870 RC_MEM_POWER_DS_EN, 0);
871 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
872 RC_MEM_POWER_SD_EN, 0);
873 WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
874
875 /* only one clock gating mode (LS/DS/SD) can be enabled */
876 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
877 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
878 HDP_MEM_POWER_CTRL,
879 IPH_MEM_POWER_LS_EN, enable);
880 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
881 HDP_MEM_POWER_CTRL,
882 RC_MEM_POWER_LS_EN, enable);
883 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
884 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
885 HDP_MEM_POWER_CTRL,
886 IPH_MEM_POWER_DS_EN, enable);
887 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
888 HDP_MEM_POWER_CTRL,
889 RC_MEM_POWER_DS_EN, enable);
890 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
891 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
892 HDP_MEM_POWER_CTRL,
893 IPH_MEM_POWER_SD_EN, enable);
894 /* RC should not use shut down mode, fallback to ds */
895 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
896 HDP_MEM_POWER_CTRL,
897 RC_MEM_POWER_DS_EN, enable);
898 }
899
Kenneth Feng91c6adf2020-02-28 11:57:04 +0800900 /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to
901 * be set for SRAM LS/DS/SD */
902 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
903 AMD_CG_SUPPORT_HDP_SD)) {
904 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
905 IPH_MEM_POWER_CTRL_EN, 1);
906 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
907 RC_MEM_POWER_CTRL_EN, 1);
908 }
909
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800910 WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
911
912 /* restore IPH & RC clock override after clock/power mode changing */
913 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1);
914}
915
916static void nv_update_hdp_clock_gating(struct amdgpu_device *adev,
917 bool enable)
918{
919 uint32_t hdp_clk_cntl;
920
921 if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
922 return;
923
924 hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
925
926 if (enable) {
927 hdp_clk_cntl &=
928 ~(uint32_t)
929 (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
930 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
931 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
932 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
933 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
934 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
935 } else {
936 hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
937 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
938 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
939 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
940 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
941 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
942 }
943
944 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
945}
946
947static int nv_common_set_clockgating_state(void *handle,
948 enum amd_clockgating_state state)
949{
950 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
951
952 if (amdgpu_sriov_vf(adev))
953 return 0;
954
955 switch (adev->asic_type) {
956 case CHIP_NAVI10:
Xiaojie Yuan5e71e012018-12-17 18:23:27 +0800957 case CHIP_NAVI14:
Xiaojie Yuan7e17e582019-05-16 19:51:12 +0800958 case CHIP_NAVI12:
Likun Gao117910e2019-03-19 11:04:03 +0800959 case CHIP_SIENNA_CICHLID:
Hawking Zhangbebc0762019-08-23 19:39:18 +0800960 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
Nirmoy Dasa9d4fe22020-01-20 13:54:30 +0100961 state == AMD_CG_STATE_GATE);
Hawking Zhangbebc0762019-08-23 19:39:18 +0800962 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
Nirmoy Dasa9d4fe22020-01-20 13:54:30 +0100963 state == AMD_CG_STATE_GATE);
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800964 nv_update_hdp_mem_power_gating(adev,
Nirmoy Dasa9d4fe22020-01-20 13:54:30 +0100965 state == AMD_CG_STATE_GATE);
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800966 nv_update_hdp_clock_gating(adev,
Nirmoy Dasa9d4fe22020-01-20 13:54:30 +0100967 state == AMD_CG_STATE_GATE);
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800968 break;
969 default:
970 break;
971 }
972 return 0;
973}
974
975static int nv_common_set_powergating_state(void *handle,
976 enum amd_powergating_state state)
977{
978 /* TODO */
979 return 0;
980}
981
982static void nv_common_get_clockgating_state(void *handle, u32 *flags)
983{
984 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
985 uint32_t tmp;
986
987 if (amdgpu_sriov_vf(adev))
988 *flags = 0;
989
Hawking Zhangbebc0762019-08-23 19:39:18 +0800990 adev->nbio.funcs->get_clockgating_state(adev, flags);
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800991
992 /* AMD_CG_SUPPORT_HDP_MGCG */
993 tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
994 if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
995 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
996 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
997 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
998 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
999 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
1000 *flags |= AMD_CG_SUPPORT_HDP_MGCG;
1001
1002 /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
1003 tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
1004 if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
1005 *flags |= AMD_CG_SUPPORT_HDP_LS;
1006 else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
1007 *flags |= AMD_CG_SUPPORT_HDP_DS;
1008 else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
1009 *flags |= AMD_CG_SUPPORT_HDP_SD;
1010
1011 return;
1012}
1013
1014static const struct amd_ip_funcs nv_common_ip_funcs = {
1015 .name = "nv_common",
1016 .early_init = nv_common_early_init,
1017 .late_init = nv_common_late_init,
1018 .sw_init = nv_common_sw_init,
1019 .sw_fini = nv_common_sw_fini,
1020 .hw_init = nv_common_hw_init,
1021 .hw_fini = nv_common_hw_fini,
1022 .suspend = nv_common_suspend,
1023 .resume = nv_common_resume,
1024 .is_idle = nv_common_is_idle,
1025 .wait_for_idle = nv_common_wait_for_idle,
1026 .soft_reset = nv_common_soft_reset,
1027 .set_clockgating_state = nv_common_set_clockgating_state,
1028 .set_powergating_state = nv_common_set_powergating_state,
1029 .get_clockgating_state = nv_common_get_clockgating_state,
1030};