blob: 1b17fca98fef36247a288eeb96afea631cc9b5de [file] [log] [blame]
Hawking Zhangc6b6a422019-03-04 14:07:37 +08001/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include <linux/slab.h>
25#include <linux/module.h>
Alex Deuchere9eea902019-07-31 10:39:40 -050026#include <linux/pci.h>
27
Hawking Zhangc6b6a422019-03-04 14:07:37 +080028#include "amdgpu.h"
29#include "amdgpu_atombios.h"
30#include "amdgpu_ih.h"
31#include "amdgpu_uvd.h"
32#include "amdgpu_vce.h"
33#include "amdgpu_ucode.h"
34#include "amdgpu_psp.h"
Kevin Wang767acab2019-07-05 15:58:46 -050035#include "amdgpu_smu.h"
Hawking Zhangc6b6a422019-03-04 14:07:37 +080036#include "atom.h"
37#include "amd_pcie.h"
38
39#include "gc/gc_10_1_0_offset.h"
40#include "gc/gc_10_1_0_sh_mask.h"
41#include "hdp/hdp_5_0_0_offset.h"
42#include "hdp/hdp_5_0_0_sh_mask.h"
Alex Deucher29bc37b2019-11-13 14:27:54 -050043#include "smuio/smuio_11_0_0_offset.h"
Alex Deucher3967ae62020-05-28 17:28:17 -040044#include "mp/mp_11_0_offset.h"
Hawking Zhangc6b6a422019-03-04 14:07:37 +080045
46#include "soc15.h"
47#include "soc15_common.h"
48#include "gmc_v10_0.h"
49#include "gfxhub_v2_0.h"
50#include "mmhub_v2_0.h"
Hawking Zhangbebc0762019-08-23 19:39:18 +080051#include "nbio_v2_3.h"
Hawking Zhangc6b6a422019-03-04 14:07:37 +080052#include "nv.h"
53#include "navi10_ih.h"
54#include "gfx_v10_0.h"
55#include "sdma_v5_0.h"
Likun Gao157e72e2019-06-17 13:38:29 +080056#include "sdma_v5_2.h"
Hawking Zhangc6b6a422019-03-04 14:07:37 +080057#include "vcn_v2_0.h"
Leo Liu5be45a22019-11-08 15:01:42 -050058#include "jpeg_v2_0.h"
Hawking Zhangc6b6a422019-03-04 14:07:37 +080059#include "dce_virtual.h"
60#include "mes_v10_1.h"
Jiange Zhaob05b6902019-09-11 17:29:07 +080061#include "mxgpu_nv.h"
Hawking Zhangc6b6a422019-03-04 14:07:37 +080062
63static const struct amd_ip_funcs nv_common_ip_funcs;
64
65/*
66 * Indirect registers accessor
67 */
68static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
69{
70 unsigned long flags, address, data;
71 u32 r;
Hawking Zhangbebc0762019-08-23 19:39:18 +080072 address = adev->nbio.funcs->get_pcie_index_offset(adev);
73 data = adev->nbio.funcs->get_pcie_data_offset(adev);
Hawking Zhangc6b6a422019-03-04 14:07:37 +080074
75 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
76 WREG32(address, reg);
77 (void)RREG32(address);
78 r = RREG32(data);
79 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
80 return r;
81}
82
83static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
84{
85 unsigned long flags, address, data;
86
Hawking Zhangbebc0762019-08-23 19:39:18 +080087 address = adev->nbio.funcs->get_pcie_index_offset(adev);
88 data = adev->nbio.funcs->get_pcie_data_offset(adev);
Hawking Zhangc6b6a422019-03-04 14:07:37 +080089
90 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
91 WREG32(address, reg);
92 (void)RREG32(address);
93 WREG32(data, v);
94 (void)RREG32(data);
95 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
96}
97
98static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
99{
100 unsigned long flags, address, data;
101 u32 r;
102
103 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
104 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
105
106 spin_lock_irqsave(&adev->didt_idx_lock, flags);
107 WREG32(address, (reg));
108 r = RREG32(data);
109 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
110 return r;
111}
112
113static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
114{
115 unsigned long flags, address, data;
116
117 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
118 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
119
120 spin_lock_irqsave(&adev->didt_idx_lock, flags);
121 WREG32(address, (reg));
122 WREG32(data, (v));
123 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
124}
125
126static u32 nv_get_config_memsize(struct amdgpu_device *adev)
127{
Hawking Zhangbebc0762019-08-23 19:39:18 +0800128 return adev->nbio.funcs->get_memsize(adev);
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800129}
130
131static u32 nv_get_xclk(struct amdgpu_device *adev)
132{
Tao Zhou462a70d2019-05-14 11:37:32 +0800133 return adev->clock.spll.reference_freq;
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800134}
135
136
137void nv_grbm_select(struct amdgpu_device *adev,
138 u32 me, u32 pipe, u32 queue, u32 vmid)
139{
140 u32 grbm_gfx_cntl = 0;
141 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
142 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
143 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
144 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
145
146 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
147}
148
149static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
150{
151 /* todo */
152}
153
154static bool nv_read_disabled_bios(struct amdgpu_device *adev)
155{
156 /* todo */
157 return false;
158}
159
160static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
161 u8 *bios, u32 length_bytes)
162{
Alex Deucher29bc37b2019-11-13 14:27:54 -0500163 u32 *dw_ptr;
164 u32 i, length_dw;
165
166 if (bios == NULL)
167 return false;
168 if (length_bytes == 0)
169 return false;
170 /* APU vbios image is part of sbios image */
171 if (adev->flags & AMD_IS_APU)
172 return false;
173
174 dw_ptr = (u32 *)bios;
175 length_dw = ALIGN(length_bytes, 4) / 4;
176
177 /* set rom index to 0 */
178 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
179 /* read out the rom data */
180 for (i = 0; i < length_dw; i++)
181 dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
182
183 return true;
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800184}
185
186static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
187 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
188 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
189 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
190 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
191 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
192 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800193 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
194 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800195 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
196 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
197 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
198 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
199 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
200 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
201 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
Marek Olšák664fe852019-10-22 17:22:38 -0400202 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800203 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
204 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
205 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
206};
207
208static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
209 u32 sh_num, u32 reg_offset)
210{
211 uint32_t val;
212
213 mutex_lock(&adev->grbm_idx_mutex);
214 if (se_num != 0xffffffff || sh_num != 0xffffffff)
215 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
216
217 val = RREG32(reg_offset);
218
219 if (se_num != 0xffffffff || sh_num != 0xffffffff)
220 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
221 mutex_unlock(&adev->grbm_idx_mutex);
222 return val;
223}
224
225static uint32_t nv_get_register_value(struct amdgpu_device *adev,
226 bool indexed, u32 se_num,
227 u32 sh_num, u32 reg_offset)
228{
229 if (indexed) {
230 return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
231 } else {
232 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
233 return adev->gfx.config.gb_addr_config;
234 return RREG32(reg_offset);
235 }
236}
237
238static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
239 u32 sh_num, u32 reg_offset, u32 *value)
240{
241 uint32_t i;
242 struct soc15_allowed_register_entry *en;
243
244 *value = 0;
245 for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
246 en = &nv_allowed_read_registers[i];
247 if (reg_offset !=
248 (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset))
249 continue;
250
251 *value = nv_get_register_value(adev,
252 nv_allowed_read_registers[i].grbm_indexed,
253 se_num, sh_num, reg_offset);
254 return 0;
255 }
256 return -EINVAL;
257}
258
Kevin Wang3e2bb602019-07-05 12:51:45 +0800259static int nv_asic_mode1_reset(struct amdgpu_device *adev)
260{
261 u32 i;
262 int ret = 0;
263
264 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
265
266 dev_info(adev->dev, "GPU mode1 reset\n");
267
268 /* disable BM */
269 pci_clear_master(adev->pdev);
270
271 pci_save_state(adev->pdev);
272
273 ret = psp_gpu_reset(adev);
274 if (ret)
275 dev_err(adev->dev, "GPU mode1 reset failed\n");
276
277 pci_restore_state(adev->pdev);
278
279 /* wait for asic to come out of reset */
280 for (i = 0; i < adev->usec_timeout; i++) {
Hawking Zhangbebc0762019-08-23 19:39:18 +0800281 u32 memsize = adev->nbio.funcs->get_memsize(adev);
Kevin Wang3e2bb602019-07-05 12:51:45 +0800282
283 if (memsize != 0xffffffff)
284 break;
285 udelay(1);
286 }
287
288 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
289
290 return ret;
291}
Alex Deucher2ddc6c32019-07-23 23:48:21 -0500292
Alex Deucherac742612019-11-07 18:12:17 -0500293static bool nv_asic_supports_baco(struct amdgpu_device *adev)
294{
295 struct smu_context *smu = &adev->smu;
296
297 if (smu_baco_is_support(smu))
298 return true;
299 else
300 return false;
301}
302
Alex Deucher2ddc6c32019-07-23 23:48:21 -0500303static enum amd_reset_method
304nv_asic_reset_method(struct amdgpu_device *adev)
305{
306 struct smu_context *smu = &adev->smu;
307
Jiange Zhaob4def372019-10-28 18:04:14 +0800308 if (!amdgpu_sriov_vf(adev) && smu_baco_is_support(smu))
Alex Deucher2ddc6c32019-07-23 23:48:21 -0500309 return AMD_RESET_METHOD_BACO;
310 else
311 return AMD_RESET_METHOD_MODE1;
312}
313
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800314static int nv_asic_reset(struct amdgpu_device *adev)
315{
Kevin Wang767acab2019-07-05 15:58:46 -0500316 int ret = 0;
317 struct smu_context *smu = &adev->smu;
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800318
Monk Liue3526252019-08-27 16:32:55 +0800319 if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
Alex Deucher11520f22019-10-28 15:20:03 -0400320 ret = smu_baco_enter(smu);
321 if (ret)
322 return ret;
323 ret = smu_baco_exit(smu);
324 if (ret)
325 return ret;
Monk Liue3526252019-08-27 16:32:55 +0800326 } else {
Kevin Wang3e2bb602019-07-05 12:51:45 +0800327 ret = nv_asic_mode1_reset(adev);
Monk Liue3526252019-08-27 16:32:55 +0800328 }
Kevin Wang767acab2019-07-05 15:58:46 -0500329
330 return ret;
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800331}
332
333static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
334{
335 /* todo */
336 return 0;
337}
338
339static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
340{
341 /* todo */
342 return 0;
343}
344
345static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
346{
347 if (pci_is_root_bus(adev->pdev->bus))
348 return;
349
350 if (amdgpu_pcie_gen2 == 0)
351 return;
352
353 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
354 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
355 return;
356
357 /* todo */
358}
359
360static void nv_program_aspm(struct amdgpu_device *adev)
361{
362
363 if (amdgpu_aspm == 0)
364 return;
365
366 /* todo */
367}
368
369static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
370 bool enable)
371{
Hawking Zhangbebc0762019-08-23 19:39:18 +0800372 adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
373 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800374}
375
376static const struct amdgpu_ip_block_version nv_common_ip_block =
377{
378 .type = AMD_IP_BLOCK_TYPE_COMMON,
379 .major = 1,
380 .minor = 0,
381 .rev = 0,
382 .funcs = &nv_common_ip_funcs,
383};
384
Xiaojie Yuanb5c73852019-08-05 16:19:45 +0800385static int nv_reg_base_init(struct amdgpu_device *adev)
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800386{
Xiaojie Yuanb5c73852019-08-05 16:19:45 +0800387 int r;
388
389 if (amdgpu_discovery) {
390 r = amdgpu_discovery_reg_base_init(adev);
391 if (r) {
392 DRM_WARN("failed to init reg base from ip discovery table, "
393 "fallback to legacy init method\n");
394 goto legacy_init;
395 }
396
397 return 0;
398 }
399
400legacy_init:
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800401 switch (adev->asic_type) {
402 case CHIP_NAVI10:
403 navi10_reg_base_init(adev);
404 break;
Xiaojie Yuana0f6d9262018-12-17 18:24:03 +0800405 case CHIP_NAVI14:
406 navi14_reg_base_init(adev);
407 break;
Xiaojie Yuan03d0a072019-05-14 15:22:53 +0800408 case CHIP_NAVI12:
409 navi12_reg_base_init(adev);
410 break;
Likun Gaodccdbf32019-11-07 16:28:14 +0800411 case CHIP_SIENNA_CICHLID:
412 sienna_cichlid_reg_base_init(adev);
413 break;
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800414 default:
415 return -EINVAL;
416 }
417
Xiaojie Yuanb5c73852019-08-05 16:19:45 +0800418 return 0;
419}
420
421int nv_set_ip_blocks(struct amdgpu_device *adev)
422{
423 int r;
424
Monk Liu122078d2020-03-04 23:51:51 +0800425 adev->nbio.funcs = &nbio_v2_3_funcs;
426 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
427
428 if (amdgpu_sriov_vf(adev)) {
429 adev->virt.ops = &xgpu_nv_virt_ops;
430 /* try send GPU_INIT_DATA request to host */
431 amdgpu_virt_request_init_data(adev);
432 }
433
Xiaojie Yuanb5c73852019-08-05 16:19:45 +0800434 /* Set IP register base before any HW register access */
435 r = nv_reg_base_init(adev);
436 if (r)
437 return r;
438
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800439 switch (adev->asic_type) {
440 case CHIP_NAVI10:
Alex Deucherd1daf852019-07-02 14:42:25 -0500441 case CHIP_NAVI14:
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800442 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
443 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
444 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
445 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
446 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
Evan Quan95302732020-01-07 16:57:39 +0800447 !amdgpu_sriov_vf(adev))
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800448 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
449 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
450 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
Alex Deucherf8a79762019-07-05 15:39:39 -0500451#if defined(CONFIG_DRM_AMD_DC)
Harry Wentlandb4f199c2019-02-26 16:25:27 -0500452 else if (amdgpu_device_has_dc_support(adev))
453 amdgpu_device_ip_block_add(adev, &dm_ip_block);
Alex Deucherf8a79762019-07-05 15:39:39 -0500454#endif
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800455 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
456 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
457 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
Evan Quan95302732020-01-07 16:57:39 +0800458 !amdgpu_sriov_vf(adev))
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800459 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
460 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
Leo Liu5be45a22019-11-08 15:01:42 -0500461 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800462 if (adev->enable_mes)
463 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
464 break;
Xiaojie Yuan44e9e7c2019-05-16 19:58:19 +0800465 case CHIP_NAVI12:
466 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
467 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
468 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
Xiaojie Yuan6b66ae22019-07-18 02:54:29 +0800469 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
Monk Liu79bebab2020-04-22 12:09:16 +0800470 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
Xiaojie Yuan7f47efe2019-07-16 03:26:49 +0800471 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
Xiaojie Yuan79902022019-06-26 19:19:57 +0800472 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
473 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
Petr Cvek20c14ee2019-08-30 16:31:58 +0200474#if defined(CONFIG_DRM_AMD_DC)
Leo Li078655d92019-07-16 18:12:13 -0400475 else if (amdgpu_device_has_dc_support(adev))
476 amdgpu_device_ip_block_add(adev, &dm_ip_block);
Petr Cvek20c14ee2019-08-30 16:31:58 +0200477#endif
Xiaojie Yuan44e9e7c2019-05-16 19:58:19 +0800478 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
479 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
Xiaojie Yuan7f47efe2019-07-16 03:26:49 +0800480 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
Evan Quan95302732020-01-07 16:57:39 +0800481 !amdgpu_sriov_vf(adev))
Xiaojie Yuan7f47efe2019-07-16 03:26:49 +0800482 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
Boyuan Zhang1fbed282019-07-18 10:13:23 -0400483 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
Monk Liufe442492020-03-05 21:10:03 +0800484 if (!amdgpu_sriov_vf(adev))
485 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
Xiaojie Yuan44e9e7c2019-05-16 19:58:19 +0800486 break;
Likun Gao2e1ba102019-04-18 13:49:07 +0800487 case CHIP_SIENNA_CICHLID:
488 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
Likun Gao0b3df162019-06-16 22:34:59 +0800489 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
Likun Gao757b3af2019-06-16 22:37:56 +0800490 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
Likun Gao9a986762019-08-14 17:39:03 +0800491 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
492 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
Likun Gao933c8a92020-05-01 10:21:23 -0400493 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
Likun Gao157e72e2019-06-17 13:38:29 +0800494 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
Likun Gao2e1ba102019-04-18 13:49:07 +0800495 break;
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800496 default:
497 return -EINVAL;
498 }
499
500 return 0;
501}
502
503static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
504{
Hawking Zhangbebc0762019-08-23 19:39:18 +0800505 return adev->nbio.funcs->get_rev_id(adev);
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800506}
507
508static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
509{
Hawking Zhangbebc0762019-08-23 19:39:18 +0800510 adev->nbio.funcs->hdp_flush(adev, ring);
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800511}
512
513static void nv_invalidate_hdp(struct amdgpu_device *adev,
514 struct amdgpu_ring *ring)
515{
516 if (!ring || !ring->funcs->emit_wreg) {
517 WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
518 } else {
519 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
520 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
521 }
522}
523
524static bool nv_need_full_reset(struct amdgpu_device *adev)
525{
526 return true;
527}
528
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800529static bool nv_need_reset_on_init(struct amdgpu_device *adev)
530{
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800531 u32 sol_reg;
532
533 if (adev->flags & AMD_IS_APU)
534 return false;
535
536 /* Check sOS sign of life register to confirm sys driver and sOS
537 * are already been loaded.
538 */
539 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
540 if (sol_reg)
541 return true;
Alex Deucher3967ae62020-05-28 17:28:17 -0400542
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800543 return false;
544}
545
Kevin Wang2af815312019-11-05 18:53:30 +0800546static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
547{
548
549 /* TODO
550 * dummy implement for pcie_replay_count sysfs interface
551 * */
552
553 return 0;
554}
555
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800556static void nv_init_doorbell_index(struct amdgpu_device *adev)
557{
558 adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
559 adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
560 adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
561 adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
562 adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
563 adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
564 adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
565 adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
566 adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
567 adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
568 adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
569 adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
570 adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
571 adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
572 adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
Likun Gao157e72e2019-06-17 13:38:29 +0800573 adev->doorbell_index.sdma_engine[2] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE2;
574 adev->doorbell_index.sdma_engine[3] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE3;
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800575 adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
576 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
577 adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
578 adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
579 adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
580 adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
581 adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
582
583 adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
584 adev->doorbell_index.sdma_doorbell_range = 20;
585}
586
587static const struct amdgpu_asic_funcs nv_asic_funcs =
588{
589 .read_disabled_bios = &nv_read_disabled_bios,
590 .read_bios_from_rom = &nv_read_bios_from_rom,
591 .read_register = &nv_read_register,
592 .reset = &nv_asic_reset,
Alex Deucher2ddc6c32019-07-23 23:48:21 -0500593 .reset_method = &nv_asic_reset_method,
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800594 .set_vga_state = &nv_vga_set_state,
595 .get_xclk = &nv_get_xclk,
596 .set_uvd_clocks = &nv_set_uvd_clocks,
597 .set_vce_clocks = &nv_set_vce_clocks,
598 .get_config_memsize = &nv_get_config_memsize,
599 .flush_hdp = &nv_flush_hdp,
600 .invalidate_hdp = &nv_invalidate_hdp,
601 .init_doorbell_index = &nv_init_doorbell_index,
602 .need_full_reset = &nv_need_full_reset,
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800603 .need_reset_on_init = &nv_need_reset_on_init,
Kevin Wang2af815312019-11-05 18:53:30 +0800604 .get_pcie_replay_count = &nv_get_pcie_replay_count,
Alex Deucherac742612019-11-07 18:12:17 -0500605 .supports_baco = &nv_asic_supports_baco,
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800606};
607
608static int nv_common_early_init(void *handle)
609{
Yong Zhao923c0872019-09-27 23:30:05 -0400610#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800611 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
612
Yong Zhao923c0872019-09-27 23:30:05 -0400613 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
614 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800615 adev->smc_rreg = NULL;
616 adev->smc_wreg = NULL;
617 adev->pcie_rreg = &nv_pcie_rreg;
618 adev->pcie_wreg = &nv_pcie_wreg;
619
620 /* TODO: will add them during VCN v2 implementation */
621 adev->uvd_ctx_rreg = NULL;
622 adev->uvd_ctx_wreg = NULL;
623
624 adev->didt_rreg = &nv_didt_rreg;
625 adev->didt_wreg = &nv_didt_wreg;
626
627 adev->asic_funcs = &nv_asic_funcs;
628
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800629 adev->rev_id = nv_get_rev_id(adev);
630 adev->external_rev_id = 0xff;
631 switch (adev->asic_type) {
632 case CHIP_NAVI10:
633 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800634 AMD_CG_SUPPORT_GFX_CGCG |
635 AMD_CG_SUPPORT_IH_CG |
636 AMD_CG_SUPPORT_HDP_MGCG |
637 AMD_CG_SUPPORT_HDP_LS |
638 AMD_CG_SUPPORT_SDMA_MGCG |
639 AMD_CG_SUPPORT_SDMA_LS |
640 AMD_CG_SUPPORT_MC_MGCG |
641 AMD_CG_SUPPORT_MC_LS |
642 AMD_CG_SUPPORT_ATHUB_MGCG |
643 AMD_CG_SUPPORT_ATHUB_LS |
644 AMD_CG_SUPPORT_VCN_MGCG |
Leo Liu099d66e2019-11-11 15:09:25 -0500645 AMD_CG_SUPPORT_JPEG_MGCG |
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800646 AMD_CG_SUPPORT_BIF_MGCG |
647 AMD_CG_SUPPORT_BIF_LS;
Leo Liu157710e2019-05-15 13:58:20 -0400648 adev->pg_flags = AMD_PG_SUPPORT_VCN |
Huang Ruic12d4102019-06-14 16:12:51 +0800649 AMD_PG_SUPPORT_VCN_DPG |
Leo Liu099d66e2019-11-11 15:09:25 -0500650 AMD_PG_SUPPORT_JPEG |
Huang Ruia201b6a2019-06-14 16:19:36 +0800651 AMD_PG_SUPPORT_ATHUB;
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800652 adev->external_rev_id = adev->rev_id + 0x1;
653 break;
Xiaojie Yuan5e71e012018-12-17 18:23:27 +0800654 case CHIP_NAVI14:
Xiaojie Yuand0c39f82019-03-20 16:12:54 +0800655 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
656 AMD_CG_SUPPORT_GFX_CGCG |
657 AMD_CG_SUPPORT_IH_CG |
658 AMD_CG_SUPPORT_HDP_MGCG |
659 AMD_CG_SUPPORT_HDP_LS |
660 AMD_CG_SUPPORT_SDMA_MGCG |
661 AMD_CG_SUPPORT_SDMA_LS |
662 AMD_CG_SUPPORT_MC_MGCG |
663 AMD_CG_SUPPORT_MC_LS |
664 AMD_CG_SUPPORT_ATHUB_MGCG |
665 AMD_CG_SUPPORT_ATHUB_LS |
666 AMD_CG_SUPPORT_VCN_MGCG |
Leo Liu099d66e2019-11-11 15:09:25 -0500667 AMD_CG_SUPPORT_JPEG_MGCG |
Xiaojie Yuand0c39f82019-03-20 16:12:54 +0800668 AMD_CG_SUPPORT_BIF_MGCG |
669 AMD_CG_SUPPORT_BIF_LS;
Xiaojie Yuan0377b082019-07-02 12:52:52 -0500670 adev->pg_flags = AMD_PG_SUPPORT_VCN |
Leo Liu099d66e2019-11-11 15:09:25 -0500671 AMD_PG_SUPPORT_JPEG |
Xiaojie Yuan0377b082019-07-02 12:52:52 -0500672 AMD_PG_SUPPORT_VCN_DPG;
tiancyin35ef88f2019-08-05 17:32:45 +0800673 adev->external_rev_id = adev->rev_id + 20;
Xiaojie Yuan5e71e012018-12-17 18:23:27 +0800674 break;
Xiaojie Yuan74b5e502019-05-16 19:47:33 +0800675 case CHIP_NAVI12:
Xiaojie Yuandca009e2019-07-30 11:28:20 +0800676 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
677 AMD_CG_SUPPORT_GFX_MGLS |
678 AMD_CG_SUPPORT_GFX_CGCG |
679 AMD_CG_SUPPORT_GFX_CP_LS |
Xiaojie Yuan5211c372019-08-01 15:00:28 +0800680 AMD_CG_SUPPORT_GFX_RLC_LS |
Xiaojie Yuanfbe0bc52019-08-01 15:01:23 +0800681 AMD_CG_SUPPORT_IH_CG |
Xiaojie Yuan5211c372019-08-01 15:00:28 +0800682 AMD_CG_SUPPORT_HDP_MGCG |
Xiaojie Yuan358ab972019-07-30 12:18:55 +0800683 AMD_CG_SUPPORT_HDP_LS |
684 AMD_CG_SUPPORT_SDMA_MGCG |
Xiaojie Yuan8b797b32019-08-01 15:39:59 +0800685 AMD_CG_SUPPORT_SDMA_LS |
686 AMD_CG_SUPPORT_MC_MGCG |
Xiaojie Yuanca516782019-08-01 15:19:10 +0800687 AMD_CG_SUPPORT_MC_LS |
688 AMD_CG_SUPPORT_ATHUB_MGCG |
Xiaojie Yuan65872e52019-08-01 15:22:59 +0800689 AMD_CG_SUPPORT_ATHUB_LS |
Leo Liu099d66e2019-11-11 15:09:25 -0500690 AMD_CG_SUPPORT_VCN_MGCG |
691 AMD_CG_SUPPORT_JPEG_MGCG;
Xiaojie Yuanc1653ea2019-08-27 11:05:23 +0800692 adev->pg_flags = AMD_PG_SUPPORT_VCN |
Xiaojie Yuan5ef3b8a2019-08-27 11:06:13 +0800693 AMD_PG_SUPPORT_VCN_DPG |
Leo Liu099d66e2019-11-11 15:09:25 -0500694 AMD_PG_SUPPORT_JPEG |
Xiaojie Yuan5ef3b8a2019-08-27 11:06:13 +0800695 AMD_PG_SUPPORT_ATHUB;
Tiecheng Zhoudf5e9842020-01-08 13:44:29 +0800696 /* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
697 * as a consequence, the rev_id and external_rev_id are wrong.
698 * workaround it by hardcoding rev_id to 0 (default value).
699 */
700 if (amdgpu_sriov_vf(adev))
701 adev->rev_id = 0;
Xiaojie Yuan74b5e502019-05-16 19:47:33 +0800702 adev->external_rev_id = adev->rev_id + 0xa;
703 break;
Likun Gao117910e2019-03-19 11:04:03 +0800704 case CHIP_SIENNA_CICHLID:
705 adev->cg_flags = 0;
706 adev->pg_flags = 0;
707 adev->external_rev_id = adev->rev_id + 0x28;
708 break;
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800709 default:
710 /* FIXME: not supported yet */
711 return -EINVAL;
712 }
713
Jiange Zhaob05b6902019-09-11 17:29:07 +0800714 if (amdgpu_sriov_vf(adev)) {
715 amdgpu_virt_init_setting(adev);
716 xgpu_nv_mailbox_set_irq_funcs(adev);
717 }
718
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800719 return 0;
720}
721
722static int nv_common_late_init(void *handle)
723{
Jiange Zhaob05b6902019-09-11 17:29:07 +0800724 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
725
726 if (amdgpu_sriov_vf(adev))
727 xgpu_nv_mailbox_get_irq(adev);
728
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800729 return 0;
730}
731
732static int nv_common_sw_init(void *handle)
733{
Jiange Zhaob05b6902019-09-11 17:29:07 +0800734 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
735
736 if (amdgpu_sriov_vf(adev))
737 xgpu_nv_mailbox_add_irq_id(adev);
738
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800739 return 0;
740}
741
742static int nv_common_sw_fini(void *handle)
743{
744 return 0;
745}
746
747static int nv_common_hw_init(void *handle)
748{
749 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
750
751 /* enable pcie gen2/3 link */
752 nv_pcie_gen3_enable(adev);
753 /* enable aspm */
754 nv_program_aspm(adev);
755 /* setup nbio registers */
Hawking Zhangbebc0762019-08-23 19:39:18 +0800756 adev->nbio.funcs->init_registers(adev);
Yong Zhao923c0872019-09-27 23:30:05 -0400757 /* remap HDP registers to a hole in mmio space,
758 * for the purpose of expose those registers
759 * to process space
760 */
761 if (adev->nbio.funcs->remap_hdp_registers)
762 adev->nbio.funcs->remap_hdp_registers(adev);
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800763 /* enable the doorbell aperture */
764 nv_enable_doorbell_aperture(adev, true);
765
766 return 0;
767}
768
769static int nv_common_hw_fini(void *handle)
770{
771 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
772
773 /* disable the doorbell aperture */
774 nv_enable_doorbell_aperture(adev, false);
775
776 return 0;
777}
778
779static int nv_common_suspend(void *handle)
780{
781 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
782
783 return nv_common_hw_fini(adev);
784}
785
786static int nv_common_resume(void *handle)
787{
788 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
789
790 return nv_common_hw_init(adev);
791}
792
793static bool nv_common_is_idle(void *handle)
794{
795 return true;
796}
797
798static int nv_common_wait_for_idle(void *handle)
799{
800 return 0;
801}
802
803static int nv_common_soft_reset(void *handle)
804{
805 return 0;
806}
807
808static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev,
809 bool enable)
810{
811 uint32_t hdp_clk_cntl, hdp_clk_cntl1;
812 uint32_t hdp_mem_pwr_cntl;
813
814 if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
815 AMD_CG_SUPPORT_HDP_DS |
816 AMD_CG_SUPPORT_HDP_SD)))
817 return;
818
819 hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
820 hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
821
822 /* Before doing clock/power mode switch,
823 * forced on IPH & RC clock */
824 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
825 IPH_MEM_CLK_SOFT_OVERRIDE, 1);
826 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
827 RC_MEM_CLK_SOFT_OVERRIDE, 1);
828 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
829
830 /* HDP 5.0 doesn't support dynamic power mode switch,
831 * disable clock and power gating before any changing */
832 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
833 IPH_MEM_POWER_CTRL_EN, 0);
834 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
835 IPH_MEM_POWER_LS_EN, 0);
836 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
837 IPH_MEM_POWER_DS_EN, 0);
838 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
839 IPH_MEM_POWER_SD_EN, 0);
840 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
841 RC_MEM_POWER_CTRL_EN, 0);
842 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
843 RC_MEM_POWER_LS_EN, 0);
844 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
845 RC_MEM_POWER_DS_EN, 0);
846 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
847 RC_MEM_POWER_SD_EN, 0);
848 WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
849
850 /* only one clock gating mode (LS/DS/SD) can be enabled */
851 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
852 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
853 HDP_MEM_POWER_CTRL,
854 IPH_MEM_POWER_LS_EN, enable);
855 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
856 HDP_MEM_POWER_CTRL,
857 RC_MEM_POWER_LS_EN, enable);
858 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
859 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
860 HDP_MEM_POWER_CTRL,
861 IPH_MEM_POWER_DS_EN, enable);
862 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
863 HDP_MEM_POWER_CTRL,
864 RC_MEM_POWER_DS_EN, enable);
865 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
866 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
867 HDP_MEM_POWER_CTRL,
868 IPH_MEM_POWER_SD_EN, enable);
869 /* RC should not use shut down mode, fallback to ds */
870 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
871 HDP_MEM_POWER_CTRL,
872 RC_MEM_POWER_DS_EN, enable);
873 }
874
875 WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
876
877 /* restore IPH & RC clock override after clock/power mode changing */
878 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1);
879}
880
881static void nv_update_hdp_clock_gating(struct amdgpu_device *adev,
882 bool enable)
883{
884 uint32_t hdp_clk_cntl;
885
886 if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
887 return;
888
889 hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
890
891 if (enable) {
892 hdp_clk_cntl &=
893 ~(uint32_t)
894 (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
895 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
896 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
897 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
898 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
899 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
900 } else {
901 hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
902 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
903 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
904 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
905 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
906 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
907 }
908
909 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
910}
911
912static int nv_common_set_clockgating_state(void *handle,
913 enum amd_clockgating_state state)
914{
915 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
916
917 if (amdgpu_sriov_vf(adev))
918 return 0;
919
920 switch (adev->asic_type) {
921 case CHIP_NAVI10:
Xiaojie Yuan5e71e012018-12-17 18:23:27 +0800922 case CHIP_NAVI14:
Xiaojie Yuan7e17e582019-05-16 19:51:12 +0800923 case CHIP_NAVI12:
Likun Gao117910e2019-03-19 11:04:03 +0800924 case CHIP_SIENNA_CICHLID:
Hawking Zhangbebc0762019-08-23 19:39:18 +0800925 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
Nirmoy Dasa9d4fe22020-01-20 13:54:30 +0100926 state == AMD_CG_STATE_GATE);
Hawking Zhangbebc0762019-08-23 19:39:18 +0800927 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
Nirmoy Dasa9d4fe22020-01-20 13:54:30 +0100928 state == AMD_CG_STATE_GATE);
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800929 nv_update_hdp_mem_power_gating(adev,
Nirmoy Dasa9d4fe22020-01-20 13:54:30 +0100930 state == AMD_CG_STATE_GATE);
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800931 nv_update_hdp_clock_gating(adev,
Nirmoy Dasa9d4fe22020-01-20 13:54:30 +0100932 state == AMD_CG_STATE_GATE);
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800933 break;
934 default:
935 break;
936 }
937 return 0;
938}
939
940static int nv_common_set_powergating_state(void *handle,
941 enum amd_powergating_state state)
942{
943 /* TODO */
944 return 0;
945}
946
947static void nv_common_get_clockgating_state(void *handle, u32 *flags)
948{
949 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
950 uint32_t tmp;
951
952 if (amdgpu_sriov_vf(adev))
953 *flags = 0;
954
Hawking Zhangbebc0762019-08-23 19:39:18 +0800955 adev->nbio.funcs->get_clockgating_state(adev, flags);
Hawking Zhangc6b6a422019-03-04 14:07:37 +0800956
957 /* AMD_CG_SUPPORT_HDP_MGCG */
958 tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
959 if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
960 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
961 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
962 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
963 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
964 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
965 *flags |= AMD_CG_SUPPORT_HDP_MGCG;
966
967 /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
968 tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
969 if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
970 *flags |= AMD_CG_SUPPORT_HDP_LS;
971 else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
972 *flags |= AMD_CG_SUPPORT_HDP_DS;
973 else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
974 *flags |= AMD_CG_SUPPORT_HDP_SD;
975
976 return;
977}
978
979static const struct amd_ip_funcs nv_common_ip_funcs = {
980 .name = "nv_common",
981 .early_init = nv_common_early_init,
982 .late_init = nv_common_late_init,
983 .sw_init = nv_common_sw_init,
984 .sw_fini = nv_common_sw_fini,
985 .hw_init = nv_common_hw_init,
986 .hw_fini = nv_common_hw_fini,
987 .suspend = nv_common_suspend,
988 .resume = nv_common_resume,
989 .is_idle = nv_common_is_idle,
990 .wait_for_idle = nv_common_wait_for_idle,
991 .soft_reset = nv_common_soft_reset,
992 .set_clockgating_state = nv_common_set_clockgating_state,
993 .set_powergating_state = nv_common_set_powergating_state,
994 .get_clockgating_state = nv_common_get_clockgating_state,
995};