Hawking Zhang | c6b6a42 | 2019-03-04 14:07:37 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2019 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | */ |
| 23 | #include <linux/firmware.h> |
| 24 | #include <linux/slab.h> |
| 25 | #include <linux/module.h> |
| 26 | #include <drm/drmP.h> |
| 27 | #include "amdgpu.h" |
| 28 | #include "amdgpu_atombios.h" |
| 29 | #include "amdgpu_ih.h" |
| 30 | #include "amdgpu_uvd.h" |
| 31 | #include "amdgpu_vce.h" |
| 32 | #include "amdgpu_ucode.h" |
| 33 | #include "amdgpu_psp.h" |
Kevin Wang | 767acab | 2019-07-05 15:58:46 -0500 | [diff] [blame] | 34 | #include "amdgpu_smu.h" |
Hawking Zhang | c6b6a42 | 2019-03-04 14:07:37 +0800 | [diff] [blame] | 35 | #include "atom.h" |
| 36 | #include "amd_pcie.h" |
| 37 | |
| 38 | #include "gc/gc_10_1_0_offset.h" |
| 39 | #include "gc/gc_10_1_0_sh_mask.h" |
| 40 | #include "hdp/hdp_5_0_0_offset.h" |
| 41 | #include "hdp/hdp_5_0_0_sh_mask.h" |
| 42 | |
| 43 | #include "soc15.h" |
| 44 | #include "soc15_common.h" |
| 45 | #include "gmc_v10_0.h" |
| 46 | #include "gfxhub_v2_0.h" |
| 47 | #include "mmhub_v2_0.h" |
| 48 | #include "nv.h" |
| 49 | #include "navi10_ih.h" |
| 50 | #include "gfx_v10_0.h" |
| 51 | #include "sdma_v5_0.h" |
| 52 | #include "vcn_v2_0.h" |
| 53 | #include "dce_virtual.h" |
| 54 | #include "mes_v10_1.h" |
| 55 | |
| 56 | static const struct amd_ip_funcs nv_common_ip_funcs; |
| 57 | |
| 58 | /* |
| 59 | * Indirect registers accessor |
| 60 | */ |
| 61 | static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg) |
| 62 | { |
| 63 | unsigned long flags, address, data; |
| 64 | u32 r; |
| 65 | address = adev->nbio_funcs->get_pcie_index_offset(adev); |
| 66 | data = adev->nbio_funcs->get_pcie_data_offset(adev); |
| 67 | |
| 68 | spin_lock_irqsave(&adev->pcie_idx_lock, flags); |
| 69 | WREG32(address, reg); |
| 70 | (void)RREG32(address); |
| 71 | r = RREG32(data); |
| 72 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); |
| 73 | return r; |
| 74 | } |
| 75 | |
| 76 | static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) |
| 77 | { |
| 78 | unsigned long flags, address, data; |
| 79 | |
| 80 | address = adev->nbio_funcs->get_pcie_index_offset(adev); |
| 81 | data = adev->nbio_funcs->get_pcie_data_offset(adev); |
| 82 | |
| 83 | spin_lock_irqsave(&adev->pcie_idx_lock, flags); |
| 84 | WREG32(address, reg); |
| 85 | (void)RREG32(address); |
| 86 | WREG32(data, v); |
| 87 | (void)RREG32(data); |
| 88 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); |
| 89 | } |
| 90 | |
| 91 | static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg) |
| 92 | { |
| 93 | unsigned long flags, address, data; |
| 94 | u32 r; |
| 95 | |
| 96 | address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); |
| 97 | data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); |
| 98 | |
| 99 | spin_lock_irqsave(&adev->didt_idx_lock, flags); |
| 100 | WREG32(address, (reg)); |
| 101 | r = RREG32(data); |
| 102 | spin_unlock_irqrestore(&adev->didt_idx_lock, flags); |
| 103 | return r; |
| 104 | } |
| 105 | |
| 106 | static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) |
| 107 | { |
| 108 | unsigned long flags, address, data; |
| 109 | |
| 110 | address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); |
| 111 | data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); |
| 112 | |
| 113 | spin_lock_irqsave(&adev->didt_idx_lock, flags); |
| 114 | WREG32(address, (reg)); |
| 115 | WREG32(data, (v)); |
| 116 | spin_unlock_irqrestore(&adev->didt_idx_lock, flags); |
| 117 | } |
| 118 | |
| 119 | static u32 nv_get_config_memsize(struct amdgpu_device *adev) |
| 120 | { |
| 121 | return adev->nbio_funcs->get_memsize(adev); |
| 122 | } |
| 123 | |
| 124 | static u32 nv_get_xclk(struct amdgpu_device *adev) |
| 125 | { |
Tao Zhou | 462a70d | 2019-05-14 11:37:32 +0800 | [diff] [blame] | 126 | return adev->clock.spll.reference_freq; |
Hawking Zhang | c6b6a42 | 2019-03-04 14:07:37 +0800 | [diff] [blame] | 127 | } |
| 128 | |
| 129 | |
| 130 | void nv_grbm_select(struct amdgpu_device *adev, |
| 131 | u32 me, u32 pipe, u32 queue, u32 vmid) |
| 132 | { |
| 133 | u32 grbm_gfx_cntl = 0; |
| 134 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); |
| 135 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); |
| 136 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); |
| 137 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); |
| 138 | |
| 139 | WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl); |
| 140 | } |
| 141 | |
| 142 | static void nv_vga_set_state(struct amdgpu_device *adev, bool state) |
| 143 | { |
| 144 | /* todo */ |
| 145 | } |
| 146 | |
| 147 | static bool nv_read_disabled_bios(struct amdgpu_device *adev) |
| 148 | { |
| 149 | /* todo */ |
| 150 | return false; |
| 151 | } |
| 152 | |
| 153 | static bool nv_read_bios_from_rom(struct amdgpu_device *adev, |
| 154 | u8 *bios, u32 length_bytes) |
| 155 | { |
| 156 | /* TODO: will implement it when SMU header is available */ |
| 157 | return false; |
| 158 | } |
| 159 | |
| 160 | static struct soc15_allowed_register_entry nv_allowed_read_registers[] = { |
| 161 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, |
| 162 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, |
| 163 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)}, |
| 164 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)}, |
| 165 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)}, |
| 166 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)}, |
| 167 | #if 0 /* TODO: will set it when SDMA header is available */ |
| 168 | { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)}, |
| 169 | { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)}, |
| 170 | #endif |
| 171 | { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)}, |
| 172 | { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)}, |
| 173 | { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)}, |
| 174 | { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)}, |
| 175 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, |
| 176 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, |
| 177 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, |
| 178 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, |
| 179 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, |
| 180 | { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, |
| 181 | }; |
| 182 | |
| 183 | static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num, |
| 184 | u32 sh_num, u32 reg_offset) |
| 185 | { |
| 186 | uint32_t val; |
| 187 | |
| 188 | mutex_lock(&adev->grbm_idx_mutex); |
| 189 | if (se_num != 0xffffffff || sh_num != 0xffffffff) |
| 190 | amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); |
| 191 | |
| 192 | val = RREG32(reg_offset); |
| 193 | |
| 194 | if (se_num != 0xffffffff || sh_num != 0xffffffff) |
| 195 | amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); |
| 196 | mutex_unlock(&adev->grbm_idx_mutex); |
| 197 | return val; |
| 198 | } |
| 199 | |
| 200 | static uint32_t nv_get_register_value(struct amdgpu_device *adev, |
| 201 | bool indexed, u32 se_num, |
| 202 | u32 sh_num, u32 reg_offset) |
| 203 | { |
| 204 | if (indexed) { |
| 205 | return nv_read_indexed_register(adev, se_num, sh_num, reg_offset); |
| 206 | } else { |
| 207 | if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) |
| 208 | return adev->gfx.config.gb_addr_config; |
| 209 | return RREG32(reg_offset); |
| 210 | } |
| 211 | } |
| 212 | |
| 213 | static int nv_read_register(struct amdgpu_device *adev, u32 se_num, |
| 214 | u32 sh_num, u32 reg_offset, u32 *value) |
| 215 | { |
| 216 | uint32_t i; |
| 217 | struct soc15_allowed_register_entry *en; |
| 218 | |
| 219 | *value = 0; |
| 220 | for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) { |
| 221 | en = &nv_allowed_read_registers[i]; |
| 222 | if (reg_offset != |
| 223 | (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset)) |
| 224 | continue; |
| 225 | |
| 226 | *value = nv_get_register_value(adev, |
| 227 | nv_allowed_read_registers[i].grbm_indexed, |
| 228 | se_num, sh_num, reg_offset); |
| 229 | return 0; |
| 230 | } |
| 231 | return -EINVAL; |
| 232 | } |
| 233 | |
| 234 | #if 0 |
| 235 | static void nv_gpu_pci_config_reset(struct amdgpu_device *adev) |
| 236 | { |
| 237 | u32 i; |
| 238 | |
| 239 | dev_info(adev->dev, "GPU pci config reset\n"); |
| 240 | |
| 241 | /* disable BM */ |
| 242 | pci_clear_master(adev->pdev); |
| 243 | /* reset */ |
| 244 | amdgpu_pci_config_reset(adev); |
| 245 | |
| 246 | udelay(100); |
| 247 | |
| 248 | /* wait for asic to come out of reset */ |
| 249 | for (i = 0; i < adev->usec_timeout; i++) { |
| 250 | u32 memsize = nbio_v2_3_get_memsize(adev); |
| 251 | if (memsize != 0xffffffff) |
| 252 | break; |
| 253 | udelay(1); |
| 254 | } |
| 255 | |
| 256 | } |
| 257 | #endif |
| 258 | |
Kevin Wang | 3e2bb60 | 2019-07-05 12:51:45 +0800 | [diff] [blame] | 259 | static int nv_asic_mode1_reset(struct amdgpu_device *adev) |
| 260 | { |
| 261 | u32 i; |
| 262 | int ret = 0; |
| 263 | |
| 264 | amdgpu_atombios_scratch_regs_engine_hung(adev, true); |
| 265 | |
| 266 | dev_info(adev->dev, "GPU mode1 reset\n"); |
| 267 | |
| 268 | /* disable BM */ |
| 269 | pci_clear_master(adev->pdev); |
| 270 | |
| 271 | pci_save_state(adev->pdev); |
| 272 | |
| 273 | ret = psp_gpu_reset(adev); |
| 274 | if (ret) |
| 275 | dev_err(adev->dev, "GPU mode1 reset failed\n"); |
| 276 | |
| 277 | pci_restore_state(adev->pdev); |
| 278 | |
| 279 | /* wait for asic to come out of reset */ |
| 280 | for (i = 0; i < adev->usec_timeout; i++) { |
| 281 | u32 memsize = adev->nbio_funcs->get_memsize(adev); |
| 282 | |
| 283 | if (memsize != 0xffffffff) |
| 284 | break; |
| 285 | udelay(1); |
| 286 | } |
| 287 | |
| 288 | amdgpu_atombios_scratch_regs_engine_hung(adev, false); |
| 289 | |
| 290 | return ret; |
| 291 | } |
Hawking Zhang | c6b6a42 | 2019-03-04 14:07:37 +0800 | [diff] [blame] | 292 | static int nv_asic_reset(struct amdgpu_device *adev) |
| 293 | { |
| 294 | |
| 295 | /* FIXME: it doesn't work since vega10 */ |
| 296 | #if 0 |
| 297 | amdgpu_atombios_scratch_regs_engine_hung(adev, true); |
| 298 | |
| 299 | nv_gpu_pci_config_reset(adev); |
| 300 | |
| 301 | amdgpu_atombios_scratch_regs_engine_hung(adev, false); |
| 302 | #endif |
Kevin Wang | 767acab | 2019-07-05 15:58:46 -0500 | [diff] [blame] | 303 | int ret = 0; |
| 304 | struct smu_context *smu = &adev->smu; |
Hawking Zhang | c6b6a42 | 2019-03-04 14:07:37 +0800 | [diff] [blame] | 305 | |
Kevin Wang | 3e2bb60 | 2019-07-05 12:51:45 +0800 | [diff] [blame] | 306 | if (smu_baco_is_support(smu)) |
Kevin Wang | 767acab | 2019-07-05 15:58:46 -0500 | [diff] [blame] | 307 | ret = smu_baco_reset(smu); |
Kevin Wang | 3e2bb60 | 2019-07-05 12:51:45 +0800 | [diff] [blame] | 308 | else |
| 309 | ret = nv_asic_mode1_reset(adev); |
Kevin Wang | 767acab | 2019-07-05 15:58:46 -0500 | [diff] [blame] | 310 | |
| 311 | return ret; |
Hawking Zhang | c6b6a42 | 2019-03-04 14:07:37 +0800 | [diff] [blame] | 312 | } |
| 313 | |
| 314 | static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) |
| 315 | { |
| 316 | /* todo */ |
| 317 | return 0; |
| 318 | } |
| 319 | |
| 320 | static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) |
| 321 | { |
| 322 | /* todo */ |
| 323 | return 0; |
| 324 | } |
| 325 | |
| 326 | static void nv_pcie_gen3_enable(struct amdgpu_device *adev) |
| 327 | { |
| 328 | if (pci_is_root_bus(adev->pdev->bus)) |
| 329 | return; |
| 330 | |
| 331 | if (amdgpu_pcie_gen2 == 0) |
| 332 | return; |
| 333 | |
| 334 | if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | |
| 335 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) |
| 336 | return; |
| 337 | |
| 338 | /* todo */ |
| 339 | } |
| 340 | |
| 341 | static void nv_program_aspm(struct amdgpu_device *adev) |
| 342 | { |
| 343 | |
| 344 | if (amdgpu_aspm == 0) |
| 345 | return; |
| 346 | |
| 347 | /* todo */ |
| 348 | } |
| 349 | |
| 350 | static void nv_enable_doorbell_aperture(struct amdgpu_device *adev, |
| 351 | bool enable) |
| 352 | { |
| 353 | adev->nbio_funcs->enable_doorbell_aperture(adev, enable); |
| 354 | adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable); |
| 355 | } |
| 356 | |
| 357 | static const struct amdgpu_ip_block_version nv_common_ip_block = |
| 358 | { |
| 359 | .type = AMD_IP_BLOCK_TYPE_COMMON, |
| 360 | .major = 1, |
| 361 | .minor = 0, |
| 362 | .rev = 0, |
| 363 | .funcs = &nv_common_ip_funcs, |
| 364 | }; |
| 365 | |
| 366 | int nv_set_ip_blocks(struct amdgpu_device *adev) |
| 367 | { |
| 368 | /* Set IP register base before any HW register access */ |
| 369 | switch (adev->asic_type) { |
| 370 | case CHIP_NAVI10: |
| 371 | navi10_reg_base_init(adev); |
| 372 | break; |
| 373 | default: |
| 374 | return -EINVAL; |
| 375 | } |
| 376 | |
| 377 | adev->nbio_funcs = &nbio_v2_3_funcs; |
| 378 | |
| 379 | adev->nbio_funcs->detect_hw_virt(adev); |
| 380 | |
| 381 | switch (adev->asic_type) { |
| 382 | case CHIP_NAVI10: |
| 383 | amdgpu_device_ip_block_add(adev, &nv_common_ip_block); |
| 384 | amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); |
| 385 | amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); |
| 386 | amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); |
| 387 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && |
| 388 | is_support_sw_smu(adev)) |
| 389 | amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); |
| 390 | if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) |
| 391 | amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); |
Alex Deucher | f8a7976 | 2019-07-05 15:39:39 -0500 | [diff] [blame^] | 392 | #if defined(CONFIG_DRM_AMD_DC) |
Harry Wentland | b4f199c | 2019-02-26 16:25:27 -0500 | [diff] [blame] | 393 | else if (amdgpu_device_has_dc_support(adev)) |
| 394 | amdgpu_device_ip_block_add(adev, &dm_ip_block); |
Alex Deucher | f8a7976 | 2019-07-05 15:39:39 -0500 | [diff] [blame^] | 395 | #else |
| 396 | # warning "Enable CONFIG_DRM_AMD_DC for display support on navi." |
| 397 | #endif |
Hawking Zhang | c6b6a42 | 2019-03-04 14:07:37 +0800 | [diff] [blame] | 398 | amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); |
| 399 | amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); |
| 400 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && |
| 401 | is_support_sw_smu(adev)) |
| 402 | amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); |
| 403 | amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); |
| 404 | if (adev->enable_mes) |
| 405 | amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); |
| 406 | break; |
| 407 | default: |
| 408 | return -EINVAL; |
| 409 | } |
| 410 | |
| 411 | return 0; |
| 412 | } |
| 413 | |
| 414 | static uint32_t nv_get_rev_id(struct amdgpu_device *adev) |
| 415 | { |
| 416 | return adev->nbio_funcs->get_rev_id(adev); |
| 417 | } |
| 418 | |
| 419 | static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) |
| 420 | { |
| 421 | adev->nbio_funcs->hdp_flush(adev, ring); |
| 422 | } |
| 423 | |
| 424 | static void nv_invalidate_hdp(struct amdgpu_device *adev, |
| 425 | struct amdgpu_ring *ring) |
| 426 | { |
| 427 | if (!ring || !ring->funcs->emit_wreg) { |
| 428 | WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1); |
| 429 | } else { |
| 430 | amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( |
| 431 | HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); |
| 432 | } |
| 433 | } |
| 434 | |
| 435 | static bool nv_need_full_reset(struct amdgpu_device *adev) |
| 436 | { |
| 437 | return true; |
| 438 | } |
| 439 | |
| 440 | static void nv_get_pcie_usage(struct amdgpu_device *adev, |
| 441 | uint64_t *count0, |
| 442 | uint64_t *count1) |
| 443 | { |
| 444 | /*TODO*/ |
| 445 | } |
| 446 | |
| 447 | static bool nv_need_reset_on_init(struct amdgpu_device *adev) |
| 448 | { |
| 449 | #if 0 |
| 450 | u32 sol_reg; |
| 451 | |
| 452 | if (adev->flags & AMD_IS_APU) |
| 453 | return false; |
| 454 | |
| 455 | /* Check sOS sign of life register to confirm sys driver and sOS |
| 456 | * are already been loaded. |
| 457 | */ |
| 458 | sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); |
| 459 | if (sol_reg) |
| 460 | return true; |
| 461 | #endif |
| 462 | /* TODO: re-enable it when mode1 reset is functional */ |
| 463 | return false; |
| 464 | } |
| 465 | |
| 466 | static void nv_init_doorbell_index(struct amdgpu_device *adev) |
| 467 | { |
| 468 | adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ; |
| 469 | adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0; |
| 470 | adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1; |
| 471 | adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2; |
| 472 | adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3; |
| 473 | adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4; |
| 474 | adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5; |
| 475 | adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6; |
| 476 | adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7; |
| 477 | adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START; |
| 478 | adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END; |
| 479 | adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0; |
| 480 | adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1; |
| 481 | adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0; |
| 482 | adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1; |
| 483 | adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH; |
| 484 | adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1; |
| 485 | adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3; |
| 486 | adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5; |
| 487 | adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7; |
| 488 | adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP; |
| 489 | adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP; |
| 490 | |
| 491 | adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1; |
| 492 | adev->doorbell_index.sdma_doorbell_range = 20; |
| 493 | } |
| 494 | |
| 495 | static const struct amdgpu_asic_funcs nv_asic_funcs = |
| 496 | { |
| 497 | .read_disabled_bios = &nv_read_disabled_bios, |
| 498 | .read_bios_from_rom = &nv_read_bios_from_rom, |
| 499 | .read_register = &nv_read_register, |
| 500 | .reset = &nv_asic_reset, |
| 501 | .set_vga_state = &nv_vga_set_state, |
| 502 | .get_xclk = &nv_get_xclk, |
| 503 | .set_uvd_clocks = &nv_set_uvd_clocks, |
| 504 | .set_vce_clocks = &nv_set_vce_clocks, |
| 505 | .get_config_memsize = &nv_get_config_memsize, |
| 506 | .flush_hdp = &nv_flush_hdp, |
| 507 | .invalidate_hdp = &nv_invalidate_hdp, |
| 508 | .init_doorbell_index = &nv_init_doorbell_index, |
| 509 | .need_full_reset = &nv_need_full_reset, |
| 510 | .get_pcie_usage = &nv_get_pcie_usage, |
| 511 | .need_reset_on_init = &nv_need_reset_on_init, |
| 512 | }; |
| 513 | |
| 514 | static int nv_common_early_init(void *handle) |
| 515 | { |
| 516 | bool psp_enabled = false; |
| 517 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 518 | |
| 519 | adev->smc_rreg = NULL; |
| 520 | adev->smc_wreg = NULL; |
| 521 | adev->pcie_rreg = &nv_pcie_rreg; |
| 522 | adev->pcie_wreg = &nv_pcie_wreg; |
| 523 | |
| 524 | /* TODO: will add them during VCN v2 implementation */ |
| 525 | adev->uvd_ctx_rreg = NULL; |
| 526 | adev->uvd_ctx_wreg = NULL; |
| 527 | |
| 528 | adev->didt_rreg = &nv_didt_rreg; |
| 529 | adev->didt_wreg = &nv_didt_wreg; |
| 530 | |
| 531 | adev->asic_funcs = &nv_asic_funcs; |
| 532 | |
| 533 | if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) && |
| 534 | (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP))) |
| 535 | psp_enabled = true; |
| 536 | |
| 537 | adev->rev_id = nv_get_rev_id(adev); |
| 538 | adev->external_rev_id = 0xff; |
| 539 | switch (adev->asic_type) { |
| 540 | case CHIP_NAVI10: |
| 541 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
Hawking Zhang | c6b6a42 | 2019-03-04 14:07:37 +0800 | [diff] [blame] | 542 | AMD_CG_SUPPORT_GFX_CGCG | |
| 543 | AMD_CG_SUPPORT_IH_CG | |
| 544 | AMD_CG_SUPPORT_HDP_MGCG | |
| 545 | AMD_CG_SUPPORT_HDP_LS | |
| 546 | AMD_CG_SUPPORT_SDMA_MGCG | |
| 547 | AMD_CG_SUPPORT_SDMA_LS | |
| 548 | AMD_CG_SUPPORT_MC_MGCG | |
| 549 | AMD_CG_SUPPORT_MC_LS | |
| 550 | AMD_CG_SUPPORT_ATHUB_MGCG | |
| 551 | AMD_CG_SUPPORT_ATHUB_LS | |
| 552 | AMD_CG_SUPPORT_VCN_MGCG | |
| 553 | AMD_CG_SUPPORT_BIF_MGCG | |
| 554 | AMD_CG_SUPPORT_BIF_LS; |
Leo Liu | 157710e | 2019-05-15 13:58:20 -0400 | [diff] [blame] | 555 | adev->pg_flags = AMD_PG_SUPPORT_VCN | |
Huang Rui | c12d410 | 2019-06-14 16:12:51 +0800 | [diff] [blame] | 556 | AMD_PG_SUPPORT_VCN_DPG | |
Huang Rui | a201b6a | 2019-06-14 16:19:36 +0800 | [diff] [blame] | 557 | AMD_PG_SUPPORT_MMHUB | |
| 558 | AMD_PG_SUPPORT_ATHUB; |
Hawking Zhang | c6b6a42 | 2019-03-04 14:07:37 +0800 | [diff] [blame] | 559 | adev->external_rev_id = adev->rev_id + 0x1; |
| 560 | break; |
| 561 | default: |
| 562 | /* FIXME: not supported yet */ |
| 563 | return -EINVAL; |
| 564 | } |
| 565 | |
| 566 | return 0; |
| 567 | } |
| 568 | |
| 569 | static int nv_common_late_init(void *handle) |
| 570 | { |
| 571 | return 0; |
| 572 | } |
| 573 | |
| 574 | static int nv_common_sw_init(void *handle) |
| 575 | { |
| 576 | return 0; |
| 577 | } |
| 578 | |
| 579 | static int nv_common_sw_fini(void *handle) |
| 580 | { |
| 581 | return 0; |
| 582 | } |
| 583 | |
| 584 | static int nv_common_hw_init(void *handle) |
| 585 | { |
| 586 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 587 | |
| 588 | /* enable pcie gen2/3 link */ |
| 589 | nv_pcie_gen3_enable(adev); |
| 590 | /* enable aspm */ |
| 591 | nv_program_aspm(adev); |
| 592 | /* setup nbio registers */ |
| 593 | adev->nbio_funcs->init_registers(adev); |
| 594 | /* enable the doorbell aperture */ |
| 595 | nv_enable_doorbell_aperture(adev, true); |
| 596 | |
| 597 | return 0; |
| 598 | } |
| 599 | |
| 600 | static int nv_common_hw_fini(void *handle) |
| 601 | { |
| 602 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 603 | |
| 604 | /* disable the doorbell aperture */ |
| 605 | nv_enable_doorbell_aperture(adev, false); |
| 606 | |
| 607 | return 0; |
| 608 | } |
| 609 | |
| 610 | static int nv_common_suspend(void *handle) |
| 611 | { |
| 612 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 613 | |
| 614 | return nv_common_hw_fini(adev); |
| 615 | } |
| 616 | |
| 617 | static int nv_common_resume(void *handle) |
| 618 | { |
| 619 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 620 | |
| 621 | return nv_common_hw_init(adev); |
| 622 | } |
| 623 | |
| 624 | static bool nv_common_is_idle(void *handle) |
| 625 | { |
| 626 | return true; |
| 627 | } |
| 628 | |
| 629 | static int nv_common_wait_for_idle(void *handle) |
| 630 | { |
| 631 | return 0; |
| 632 | } |
| 633 | |
| 634 | static int nv_common_soft_reset(void *handle) |
| 635 | { |
| 636 | return 0; |
| 637 | } |
| 638 | |
| 639 | static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev, |
| 640 | bool enable) |
| 641 | { |
| 642 | uint32_t hdp_clk_cntl, hdp_clk_cntl1; |
| 643 | uint32_t hdp_mem_pwr_cntl; |
| 644 | |
| 645 | if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | |
| 646 | AMD_CG_SUPPORT_HDP_DS | |
| 647 | AMD_CG_SUPPORT_HDP_SD))) |
| 648 | return; |
| 649 | |
| 650 | hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); |
| 651 | hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); |
| 652 | |
| 653 | /* Before doing clock/power mode switch, |
| 654 | * forced on IPH & RC clock */ |
| 655 | hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, |
| 656 | IPH_MEM_CLK_SOFT_OVERRIDE, 1); |
| 657 | hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, |
| 658 | RC_MEM_CLK_SOFT_OVERRIDE, 1); |
| 659 | WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); |
| 660 | |
| 661 | /* HDP 5.0 doesn't support dynamic power mode switch, |
| 662 | * disable clock and power gating before any changing */ |
| 663 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, |
| 664 | IPH_MEM_POWER_CTRL_EN, 0); |
| 665 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, |
| 666 | IPH_MEM_POWER_LS_EN, 0); |
| 667 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, |
| 668 | IPH_MEM_POWER_DS_EN, 0); |
| 669 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, |
| 670 | IPH_MEM_POWER_SD_EN, 0); |
| 671 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, |
| 672 | RC_MEM_POWER_CTRL_EN, 0); |
| 673 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, |
| 674 | RC_MEM_POWER_LS_EN, 0); |
| 675 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, |
| 676 | RC_MEM_POWER_DS_EN, 0); |
| 677 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, |
| 678 | RC_MEM_POWER_SD_EN, 0); |
| 679 | WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); |
| 680 | |
| 681 | /* only one clock gating mode (LS/DS/SD) can be enabled */ |
| 682 | if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { |
| 683 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, |
| 684 | HDP_MEM_POWER_CTRL, |
| 685 | IPH_MEM_POWER_LS_EN, enable); |
| 686 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, |
| 687 | HDP_MEM_POWER_CTRL, |
| 688 | RC_MEM_POWER_LS_EN, enable); |
| 689 | } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) { |
| 690 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, |
| 691 | HDP_MEM_POWER_CTRL, |
| 692 | IPH_MEM_POWER_DS_EN, enable); |
| 693 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, |
| 694 | HDP_MEM_POWER_CTRL, |
| 695 | RC_MEM_POWER_DS_EN, enable); |
| 696 | } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) { |
| 697 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, |
| 698 | HDP_MEM_POWER_CTRL, |
| 699 | IPH_MEM_POWER_SD_EN, enable); |
| 700 | /* RC should not use shut down mode, fallback to ds */ |
| 701 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, |
| 702 | HDP_MEM_POWER_CTRL, |
| 703 | RC_MEM_POWER_DS_EN, enable); |
| 704 | } |
| 705 | |
| 706 | WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); |
| 707 | |
| 708 | /* restore IPH & RC clock override after clock/power mode changing */ |
| 709 | WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1); |
| 710 | } |
| 711 | |
| 712 | static void nv_update_hdp_clock_gating(struct amdgpu_device *adev, |
| 713 | bool enable) |
| 714 | { |
| 715 | uint32_t hdp_clk_cntl; |
| 716 | |
| 717 | if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) |
| 718 | return; |
| 719 | |
| 720 | hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); |
| 721 | |
| 722 | if (enable) { |
| 723 | hdp_clk_cntl &= |
| 724 | ~(uint32_t) |
| 725 | (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | |
| 726 | HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | |
| 727 | HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | |
| 728 | HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | |
| 729 | HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | |
| 730 | HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK); |
| 731 | } else { |
| 732 | hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | |
| 733 | HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | |
| 734 | HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | |
| 735 | HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | |
| 736 | HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | |
| 737 | HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK; |
| 738 | } |
| 739 | |
| 740 | WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); |
| 741 | } |
| 742 | |
| 743 | static int nv_common_set_clockgating_state(void *handle, |
| 744 | enum amd_clockgating_state state) |
| 745 | { |
| 746 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 747 | |
| 748 | if (amdgpu_sriov_vf(adev)) |
| 749 | return 0; |
| 750 | |
| 751 | switch (adev->asic_type) { |
| 752 | case CHIP_NAVI10: |
| 753 | adev->nbio_funcs->update_medium_grain_clock_gating(adev, |
| 754 | state == AMD_CG_STATE_GATE ? true : false); |
| 755 | adev->nbio_funcs->update_medium_grain_light_sleep(adev, |
| 756 | state == AMD_CG_STATE_GATE ? true : false); |
| 757 | nv_update_hdp_mem_power_gating(adev, |
| 758 | state == AMD_CG_STATE_GATE ? true : false); |
| 759 | nv_update_hdp_clock_gating(adev, |
| 760 | state == AMD_CG_STATE_GATE ? true : false); |
| 761 | break; |
| 762 | default: |
| 763 | break; |
| 764 | } |
| 765 | return 0; |
| 766 | } |
| 767 | |
| 768 | static int nv_common_set_powergating_state(void *handle, |
| 769 | enum amd_powergating_state state) |
| 770 | { |
| 771 | /* TODO */ |
| 772 | return 0; |
| 773 | } |
| 774 | |
| 775 | static void nv_common_get_clockgating_state(void *handle, u32 *flags) |
| 776 | { |
| 777 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 778 | uint32_t tmp; |
| 779 | |
| 780 | if (amdgpu_sriov_vf(adev)) |
| 781 | *flags = 0; |
| 782 | |
| 783 | adev->nbio_funcs->get_clockgating_state(adev, flags); |
| 784 | |
| 785 | /* AMD_CG_SUPPORT_HDP_MGCG */ |
| 786 | tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); |
| 787 | if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | |
| 788 | HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | |
| 789 | HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | |
| 790 | HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | |
| 791 | HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | |
| 792 | HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK))) |
| 793 | *flags |= AMD_CG_SUPPORT_HDP_MGCG; |
| 794 | |
| 795 | /* AMD_CG_SUPPORT_HDP_LS/DS/SD */ |
| 796 | tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); |
| 797 | if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK) |
| 798 | *flags |= AMD_CG_SUPPORT_HDP_LS; |
| 799 | else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK) |
| 800 | *flags |= AMD_CG_SUPPORT_HDP_DS; |
| 801 | else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK) |
| 802 | *flags |= AMD_CG_SUPPORT_HDP_SD; |
| 803 | |
| 804 | return; |
| 805 | } |
| 806 | |
| 807 | static const struct amd_ip_funcs nv_common_ip_funcs = { |
| 808 | .name = "nv_common", |
| 809 | .early_init = nv_common_early_init, |
| 810 | .late_init = nv_common_late_init, |
| 811 | .sw_init = nv_common_sw_init, |
| 812 | .sw_fini = nv_common_sw_fini, |
| 813 | .hw_init = nv_common_hw_init, |
| 814 | .hw_fini = nv_common_hw_fini, |
| 815 | .suspend = nv_common_suspend, |
| 816 | .resume = nv_common_resume, |
| 817 | .is_idle = nv_common_is_idle, |
| 818 | .wait_for_idle = nv_common_wait_for_idle, |
| 819 | .soft_reset = nv_common_soft_reset, |
| 820 | .set_clockgating_state = nv_common_set_clockgating_state, |
| 821 | .set_powergating_state = nv_common_set_powergating_state, |
| 822 | .get_clockgating_state = nv_common_get_clockgating_state, |
| 823 | }; |