Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* |
| 3 | * PCIe host controller driver for Tegra194 SoC |
| 4 | * |
| 5 | * Copyright (C) 2019 NVIDIA Corporation. |
| 6 | * |
| 7 | * Author: Vidya Sagar <vidyas@nvidia.com> |
| 8 | */ |
| 9 | |
| 10 | #include <linux/clk.h> |
| 11 | #include <linux/debugfs.h> |
| 12 | #include <linux/delay.h> |
| 13 | #include <linux/gpio.h> |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 14 | #include <linux/gpio/consumer.h> |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 15 | #include <linux/interrupt.h> |
| 16 | #include <linux/iopoll.h> |
| 17 | #include <linux/kernel.h> |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/of.h> |
| 20 | #include <linux/of_device.h> |
| 21 | #include <linux/of_gpio.h> |
| 22 | #include <linux/of_irq.h> |
| 23 | #include <linux/of_pci.h> |
| 24 | #include <linux/pci.h> |
| 25 | #include <linux/phy/phy.h> |
Vidya Sagar | f4ff4fa | 2019-09-05 16:15:50 +0530 | [diff] [blame] | 26 | #include <linux/pinctrl/consumer.h> |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 27 | #include <linux/platform_device.h> |
| 28 | #include <linux/pm_runtime.h> |
| 29 | #include <linux/random.h> |
| 30 | #include <linux/reset.h> |
| 31 | #include <linux/resource.h> |
| 32 | #include <linux/types.h> |
| 33 | #include "pcie-designware.h" |
| 34 | #include <soc/tegra/bpmp.h> |
| 35 | #include <soc/tegra/bpmp-abi.h> |
| 36 | #include "../../pci.h" |
| 37 | |
| 38 | #define APPL_PINMUX 0x0 |
| 39 | #define APPL_PINMUX_PEX_RST BIT(0) |
| 40 | #define APPL_PINMUX_CLKREQ_OVERRIDE_EN BIT(2) |
| 41 | #define APPL_PINMUX_CLKREQ_OVERRIDE BIT(3) |
| 42 | #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN BIT(4) |
| 43 | #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE BIT(5) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 44 | |
| 45 | #define APPL_CTRL 0x4 |
| 46 | #define APPL_CTRL_SYS_PRE_DET_STATE BIT(6) |
| 47 | #define APPL_CTRL_LTSSM_EN BIT(7) |
| 48 | #define APPL_CTRL_HW_HOT_RST_EN BIT(20) |
| 49 | #define APPL_CTRL_HW_HOT_RST_MODE_MASK GENMASK(1, 0) |
| 50 | #define APPL_CTRL_HW_HOT_RST_MODE_SHIFT 22 |
| 51 | #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST 0x1 |
| 52 | |
| 53 | #define APPL_INTR_EN_L0_0 0x8 |
| 54 | #define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0) |
| 55 | #define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4) |
| 56 | #define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8) |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 57 | #define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN BIT(15) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 58 | #define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19) |
| 59 | #define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30) |
| 60 | #define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31) |
| 61 | |
| 62 | #define APPL_INTR_STATUS_L0 0xC |
| 63 | #define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0) |
| 64 | #define APPL_INTR_STATUS_L0_INT_INT BIT(8) |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 65 | #define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT BIT(15) |
| 66 | #define APPL_INTR_STATUS_L0_PEX_RST_INT BIT(16) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 67 | #define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18) |
| 68 | |
| 69 | #define APPL_INTR_EN_L1_0_0 0x1C |
| 70 | #define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1) |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 71 | #define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN BIT(3) |
| 72 | #define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN BIT(30) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 73 | |
| 74 | #define APPL_INTR_STATUS_L1_0_0 0x20 |
| 75 | #define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1) |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 76 | #define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED BIT(3) |
| 77 | #define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE BIT(30) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 78 | |
| 79 | #define APPL_INTR_STATUS_L1_1 0x2C |
| 80 | #define APPL_INTR_STATUS_L1_2 0x30 |
| 81 | #define APPL_INTR_STATUS_L1_3 0x34 |
| 82 | #define APPL_INTR_STATUS_L1_6 0x3C |
| 83 | #define APPL_INTR_STATUS_L1_7 0x40 |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 84 | #define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED BIT(1) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 85 | |
| 86 | #define APPL_INTR_EN_L1_8_0 0x44 |
| 87 | #define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2) |
| 88 | #define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN BIT(3) |
| 89 | #define APPL_INTR_EN_L1_8_INTX_EN BIT(11) |
| 90 | #define APPL_INTR_EN_L1_8_AER_INT_EN BIT(15) |
| 91 | |
| 92 | #define APPL_INTR_STATUS_L1_8_0 0x4C |
| 93 | #define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK GENMASK(11, 6) |
| 94 | #define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS BIT(2) |
| 95 | #define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS BIT(3) |
| 96 | |
| 97 | #define APPL_INTR_STATUS_L1_9 0x54 |
| 98 | #define APPL_INTR_STATUS_L1_10 0x58 |
| 99 | #define APPL_INTR_STATUS_L1_11 0x64 |
| 100 | #define APPL_INTR_STATUS_L1_13 0x74 |
| 101 | #define APPL_INTR_STATUS_L1_14 0x78 |
| 102 | #define APPL_INTR_STATUS_L1_15 0x7C |
| 103 | #define APPL_INTR_STATUS_L1_17 0x88 |
| 104 | |
| 105 | #define APPL_INTR_EN_L1_18 0x90 |
| 106 | #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT BIT(2) |
| 107 | #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR BIT(1) |
| 108 | #define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0) |
| 109 | |
| 110 | #define APPL_INTR_STATUS_L1_18 0x94 |
| 111 | #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT BIT(2) |
| 112 | #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1) |
| 113 | #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0) |
| 114 | |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 115 | #define APPL_MSI_CTRL_1 0xAC |
| 116 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 117 | #define APPL_MSI_CTRL_2 0xB0 |
| 118 | |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 119 | #define APPL_LEGACY_INTX 0xB8 |
| 120 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 121 | #define APPL_LTR_MSG_1 0xC4 |
| 122 | #define LTR_MSG_REQ BIT(15) |
| 123 | #define LTR_MST_NO_SNOOP_SHIFT 16 |
| 124 | |
| 125 | #define APPL_LTR_MSG_2 0xC8 |
| 126 | #define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE BIT(3) |
| 127 | |
| 128 | #define APPL_LINK_STATUS 0xCC |
| 129 | #define APPL_LINK_STATUS_RDLH_LINK_UP BIT(0) |
| 130 | |
| 131 | #define APPL_DEBUG 0xD0 |
| 132 | #define APPL_DEBUG_PM_LINKST_IN_L2_LAT BIT(21) |
| 133 | #define APPL_DEBUG_PM_LINKST_IN_L0 0x11 |
| 134 | #define APPL_DEBUG_LTSSM_STATE_MASK GENMASK(8, 3) |
| 135 | #define APPL_DEBUG_LTSSM_STATE_SHIFT 3 |
| 136 | #define LTSSM_STATE_PRE_DETECT 5 |
| 137 | |
| 138 | #define APPL_RADM_STATUS 0xE4 |
| 139 | #define APPL_PM_XMT_TURNOFF_STATE BIT(0) |
| 140 | |
| 141 | #define APPL_DM_TYPE 0x100 |
| 142 | #define APPL_DM_TYPE_MASK GENMASK(3, 0) |
| 143 | #define APPL_DM_TYPE_RP 0x4 |
| 144 | #define APPL_DM_TYPE_EP 0x0 |
| 145 | |
| 146 | #define APPL_CFG_BASE_ADDR 0x104 |
| 147 | #define APPL_CFG_BASE_ADDR_MASK GENMASK(31, 12) |
| 148 | |
| 149 | #define APPL_CFG_IATU_DMA_BASE_ADDR 0x108 |
| 150 | #define APPL_CFG_IATU_DMA_BASE_ADDR_MASK GENMASK(31, 18) |
| 151 | |
| 152 | #define APPL_CFG_MISC 0x110 |
| 153 | #define APPL_CFG_MISC_SLV_EP_MODE BIT(14) |
| 154 | #define APPL_CFG_MISC_ARCACHE_MASK GENMASK(13, 10) |
| 155 | #define APPL_CFG_MISC_ARCACHE_SHIFT 10 |
| 156 | #define APPL_CFG_MISC_ARCACHE_VAL 3 |
| 157 | |
| 158 | #define APPL_CFG_SLCG_OVERRIDE 0x114 |
| 159 | #define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER BIT(0) |
| 160 | |
| 161 | #define APPL_CAR_RESET_OVRD 0x12C |
| 162 | #define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N BIT(0) |
| 163 | |
| 164 | #define IO_BASE_IO_DECODE BIT(0) |
| 165 | #define IO_BASE_IO_DECODE_BIT8 BIT(8) |
| 166 | |
| 167 | #define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE BIT(0) |
| 168 | #define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE BIT(16) |
| 169 | |
| 170 | #define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF 0x718 |
| 171 | #define CFG_TIMER_CTRL_ACK_NAK_SHIFT (19) |
| 172 | |
| 173 | #define EVENT_COUNTER_ALL_CLEAR 0x3 |
| 174 | #define EVENT_COUNTER_ENABLE_ALL 0x7 |
| 175 | #define EVENT_COUNTER_ENABLE_SHIFT 2 |
| 176 | #define EVENT_COUNTER_EVENT_SEL_MASK GENMASK(7, 0) |
| 177 | #define EVENT_COUNTER_EVENT_SEL_SHIFT 16 |
| 178 | #define EVENT_COUNTER_EVENT_Tx_L0S 0x2 |
| 179 | #define EVENT_COUNTER_EVENT_Rx_L0S 0x3 |
| 180 | #define EVENT_COUNTER_EVENT_L1 0x5 |
| 181 | #define EVENT_COUNTER_EVENT_L1_1 0x7 |
| 182 | #define EVENT_COUNTER_EVENT_L1_2 0x8 |
| 183 | #define EVENT_COUNTER_GROUP_SEL_SHIFT 24 |
| 184 | #define EVENT_COUNTER_GROUP_5 0x5 |
| 185 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 186 | #define N_FTS_VAL 52 |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 187 | #define FTS_VAL 52 |
| 188 | |
| 189 | #define PORT_LOGIC_MSI_CTRL_INT_0_EN 0x828 |
| 190 | |
| 191 | #define GEN3_EQ_CONTROL_OFF 0x8a8 |
| 192 | #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8 |
| 193 | #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8) |
| 194 | #define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0) |
| 195 | |
| 196 | #define GEN3_RELATED_OFF 0x890 |
| 197 | #define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0) |
| 198 | #define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16) |
| 199 | #define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24 |
| 200 | #define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24) |
| 201 | |
| 202 | #define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0 |
| 203 | #define AMBA_ERROR_RESPONSE_CRS_SHIFT 3 |
| 204 | #define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0) |
| 205 | #define AMBA_ERROR_RESPONSE_CRS_OKAY 0 |
| 206 | #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1 |
| 207 | #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2 |
| 208 | |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 209 | #define MSIX_ADDR_MATCH_LOW_OFF 0x940 |
| 210 | #define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0) |
| 211 | #define MSIX_ADDR_MATCH_LOW_OFF_MASK GENMASK(31, 2) |
| 212 | |
| 213 | #define MSIX_ADDR_MATCH_HIGH_OFF 0x944 |
| 214 | #define MSIX_ADDR_MATCH_HIGH_OFF_MASK GENMASK(31, 0) |
| 215 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 216 | #define PORT_LOGIC_MSIX_DOORBELL 0x948 |
| 217 | |
| 218 | #define CAP_SPCIE_CAP_OFF 0x154 |
| 219 | #define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK GENMASK(3, 0) |
| 220 | #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK GENMASK(11, 8) |
| 221 | #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT 8 |
| 222 | |
| 223 | #define PME_ACK_TIMEOUT 10000 |
| 224 | |
| 225 | #define LTSSM_TIMEOUT 50000 /* 50ms */ |
| 226 | |
| 227 | #define GEN3_GEN4_EQ_PRESET_INIT 5 |
| 228 | |
| 229 | #define GEN1_CORE_CLK_FREQ 62500000 |
| 230 | #define GEN2_CORE_CLK_FREQ 125000000 |
| 231 | #define GEN3_CORE_CLK_FREQ 250000000 |
| 232 | #define GEN4_CORE_CLK_FREQ 500000000 |
| 233 | |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 234 | #define LTR_MSG_TIMEOUT (100 * 1000) |
| 235 | |
| 236 | #define PERST_DEBOUNCE_TIME (5 * 1000) |
| 237 | |
| 238 | #define EP_STATE_DISABLED 0 |
| 239 | #define EP_STATE_ENABLED 1 |
| 240 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 241 | static const unsigned int pcie_gen_freq[] = { |
| 242 | GEN1_CORE_CLK_FREQ, |
| 243 | GEN2_CORE_CLK_FREQ, |
| 244 | GEN3_CORE_CLK_FREQ, |
| 245 | GEN4_CORE_CLK_FREQ |
| 246 | }; |
| 247 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 248 | struct tegra194_pcie { |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 249 | struct device *dev; |
| 250 | struct resource *appl_res; |
| 251 | struct resource *dbi_res; |
| 252 | struct resource *atu_dma_res; |
| 253 | void __iomem *appl_base; |
| 254 | struct clk *core_clk; |
| 255 | struct reset_control *core_apb_rst; |
| 256 | struct reset_control *core_rst; |
| 257 | struct dw_pcie pci; |
| 258 | struct tegra_bpmp *bpmp; |
| 259 | |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 260 | enum dw_pcie_device_mode mode; |
| 261 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 262 | bool supports_clkreq; |
| 263 | bool enable_cdm_check; |
| 264 | bool link_state; |
| 265 | bool update_fc_fixup; |
| 266 | u8 init_link_width; |
| 267 | u32 msi_ctrl_int; |
| 268 | u32 num_lanes; |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 269 | u32 cid; |
| 270 | u32 cfg_link_cap_l1sub; |
| 271 | u32 pcie_cap_base; |
| 272 | u32 aspm_cmrt; |
| 273 | u32 aspm_pwr_on_t; |
| 274 | u32 aspm_l0s_enter_lat; |
| 275 | |
| 276 | struct regulator *pex_ctl_supply; |
Vidya Sagar | 0a901f2 | 2019-09-05 16:15:51 +0530 | [diff] [blame] | 277 | struct regulator *slot_ctl_3v3; |
| 278 | struct regulator *slot_ctl_12v; |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 279 | |
| 280 | unsigned int phy_count; |
| 281 | struct phy **phys; |
| 282 | |
| 283 | struct dentry *debugfs; |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 284 | |
| 285 | /* Endpoint mode specific */ |
| 286 | struct gpio_desc *pex_rst_gpiod; |
| 287 | struct gpio_desc *pex_refclk_sel_gpiod; |
| 288 | unsigned int pex_rst_irq; |
| 289 | int ep_state; |
| 290 | }; |
| 291 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 292 | struct tegra194_pcie_of_data { |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 293 | enum dw_pcie_device_mode mode; |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 294 | }; |
| 295 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 296 | static inline struct tegra194_pcie *to_tegra_pcie(struct dw_pcie *pci) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 297 | { |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 298 | return container_of(pci, struct tegra194_pcie, pci); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 299 | } |
| 300 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 301 | static inline void appl_writel(struct tegra194_pcie *pcie, const u32 value, |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 302 | const u32 reg) |
| 303 | { |
| 304 | writel_relaxed(value, pcie->appl_base + reg); |
| 305 | } |
| 306 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 307 | static inline u32 appl_readl(struct tegra194_pcie *pcie, const u32 reg) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 308 | { |
| 309 | return readl_relaxed(pcie->appl_base + reg); |
| 310 | } |
| 311 | |
| 312 | struct tegra_pcie_soc { |
| 313 | enum dw_pcie_device_mode mode; |
| 314 | }; |
| 315 | |
| 316 | static void apply_bad_link_workaround(struct pcie_port *pp) |
| 317 | { |
| 318 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 319 | struct tegra194_pcie *pcie = to_tegra_pcie(pci); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 320 | u32 current_link_width; |
| 321 | u16 val; |
| 322 | |
| 323 | /* |
| 324 | * NOTE:- Since this scenario is uncommon and link as such is not |
| 325 | * stable anyway, not waiting to confirm if link is really |
| 326 | * transitioning to Gen-2 speed |
| 327 | */ |
| 328 | val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); |
| 329 | if (val & PCI_EXP_LNKSTA_LBMS) { |
| 330 | current_link_width = (val & PCI_EXP_LNKSTA_NLW) >> |
| 331 | PCI_EXP_LNKSTA_NLW_SHIFT; |
| 332 | if (pcie->init_link_width > current_link_width) { |
| 333 | dev_warn(pci->dev, "PCIe link is bad, width reduced\n"); |
| 334 | val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + |
| 335 | PCI_EXP_LNKCTL2); |
| 336 | val &= ~PCI_EXP_LNKCTL2_TLS; |
| 337 | val |= PCI_EXP_LNKCTL2_TLS_2_5GT; |
| 338 | dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + |
| 339 | PCI_EXP_LNKCTL2, val); |
| 340 | |
| 341 | val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + |
| 342 | PCI_EXP_LNKCTL); |
| 343 | val |= PCI_EXP_LNKCTL_RL; |
| 344 | dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + |
| 345 | PCI_EXP_LNKCTL, val); |
| 346 | } |
| 347 | } |
| 348 | } |
| 349 | |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 350 | static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 351 | { |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 352 | struct tegra194_pcie *pcie = arg; |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 353 | struct dw_pcie *pci = &pcie->pci; |
| 354 | struct pcie_port *pp = &pci->pp; |
| 355 | u32 val, tmp; |
| 356 | u16 val_w; |
| 357 | |
| 358 | val = appl_readl(pcie, APPL_INTR_STATUS_L0); |
| 359 | if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) { |
| 360 | val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); |
| 361 | if (val & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) { |
| 362 | appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0); |
| 363 | |
| 364 | /* SBR & Surprise Link Down WAR */ |
| 365 | val = appl_readl(pcie, APPL_CAR_RESET_OVRD); |
| 366 | val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; |
| 367 | appl_writel(pcie, val, APPL_CAR_RESET_OVRD); |
| 368 | udelay(1); |
| 369 | val = appl_readl(pcie, APPL_CAR_RESET_OVRD); |
| 370 | val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; |
| 371 | appl_writel(pcie, val, APPL_CAR_RESET_OVRD); |
| 372 | |
Rob Herring | 84667a4 | 2020-08-20 21:54:11 -0600 | [diff] [blame] | 373 | val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); |
| 374 | val |= PORT_LOGIC_SPEED_CHANGE; |
| 375 | dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 376 | } |
| 377 | } |
| 378 | |
| 379 | if (val & APPL_INTR_STATUS_L0_INT_INT) { |
| 380 | val = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0); |
| 381 | if (val & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) { |
| 382 | appl_writel(pcie, |
| 383 | APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS, |
| 384 | APPL_INTR_STATUS_L1_8_0); |
| 385 | apply_bad_link_workaround(pp); |
| 386 | } |
| 387 | if (val & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) { |
| 388 | appl_writel(pcie, |
| 389 | APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS, |
| 390 | APPL_INTR_STATUS_L1_8_0); |
| 391 | |
| 392 | val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + |
| 393 | PCI_EXP_LNKSTA); |
| 394 | dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w & |
| 395 | PCI_EXP_LNKSTA_CLS); |
| 396 | } |
| 397 | } |
| 398 | |
| 399 | val = appl_readl(pcie, APPL_INTR_STATUS_L0); |
| 400 | if (val & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) { |
| 401 | val = appl_readl(pcie, APPL_INTR_STATUS_L1_18); |
| 402 | tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); |
| 403 | if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) { |
| 404 | dev_info(pci->dev, "CDM check complete\n"); |
| 405 | tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE; |
| 406 | } |
| 407 | if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) { |
| 408 | dev_err(pci->dev, "CDM comparison mismatch\n"); |
| 409 | tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR; |
| 410 | } |
| 411 | if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) { |
| 412 | dev_err(pci->dev, "CDM Logic error\n"); |
| 413 | tmp |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR; |
| 414 | } |
| 415 | dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, tmp); |
| 416 | tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR); |
| 417 | dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", tmp); |
| 418 | } |
| 419 | |
| 420 | return IRQ_HANDLED; |
| 421 | } |
| 422 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 423 | static void pex_ep_event_hot_rst_done(struct tegra194_pcie *pcie) |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 424 | { |
| 425 | u32 val; |
| 426 | |
| 427 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); |
| 428 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); |
| 429 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); |
| 430 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); |
| 431 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); |
| 432 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); |
| 433 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); |
| 434 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); |
| 435 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); |
| 436 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); |
| 437 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); |
| 438 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); |
| 439 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); |
| 440 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); |
| 441 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); |
| 442 | appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2); |
| 443 | |
| 444 | val = appl_readl(pcie, APPL_CTRL); |
| 445 | val |= APPL_CTRL_LTSSM_EN; |
| 446 | appl_writel(pcie, val, APPL_CTRL); |
| 447 | } |
| 448 | |
| 449 | static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 450 | { |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 451 | struct tegra194_pcie *pcie = arg; |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 452 | struct dw_pcie *pci = &pcie->pci; |
| 453 | u32 val, speed; |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 454 | |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 455 | speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) & |
| 456 | PCI_EXP_LNKSTA_CLS; |
| 457 | clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]); |
| 458 | |
| 459 | /* If EP doesn't advertise L1SS, just return */ |
| 460 | val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub); |
| 461 | if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2))) |
| 462 | return IRQ_HANDLED; |
| 463 | |
| 464 | /* Check if BME is set to '1' */ |
| 465 | val = dw_pcie_readl_dbi(pci, PCI_COMMAND); |
| 466 | if (val & PCI_COMMAND_MASTER) { |
| 467 | ktime_t timeout; |
| 468 | |
| 469 | /* 110us for both snoop and no-snoop */ |
| 470 | val = 110 | (2 << PCI_LTR_SCALE_SHIFT) | LTR_MSG_REQ; |
| 471 | val |= (val << LTR_MST_NO_SNOOP_SHIFT); |
| 472 | appl_writel(pcie, val, APPL_LTR_MSG_1); |
| 473 | |
| 474 | /* Send LTR upstream */ |
| 475 | val = appl_readl(pcie, APPL_LTR_MSG_2); |
| 476 | val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE; |
| 477 | appl_writel(pcie, val, APPL_LTR_MSG_2); |
| 478 | |
| 479 | timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT); |
| 480 | for (;;) { |
| 481 | val = appl_readl(pcie, APPL_LTR_MSG_2); |
| 482 | if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)) |
| 483 | break; |
| 484 | if (ktime_after(ktime_get(), timeout)) |
| 485 | break; |
| 486 | usleep_range(1000, 1100); |
| 487 | } |
| 488 | if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE) |
| 489 | dev_err(pcie->dev, "Failed to send LTR message\n"); |
| 490 | } |
| 491 | |
| 492 | return IRQ_HANDLED; |
| 493 | } |
| 494 | |
| 495 | static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg) |
| 496 | { |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 497 | struct tegra194_pcie *pcie = arg; |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 498 | struct dw_pcie_ep *ep = &pcie->pci.ep; |
| 499 | int spurious = 1; |
Om Prakash Singh | ceb1412 | 2021-06-23 15:35:21 +0530 | [diff] [blame] | 500 | u32 status_l0, status_l1, link_status; |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 501 | |
Om Prakash Singh | ceb1412 | 2021-06-23 15:35:21 +0530 | [diff] [blame] | 502 | status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0); |
| 503 | if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) { |
| 504 | status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); |
| 505 | appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0); |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 506 | |
Om Prakash Singh | ceb1412 | 2021-06-23 15:35:21 +0530 | [diff] [blame] | 507 | if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE) |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 508 | pex_ep_event_hot_rst_done(pcie); |
| 509 | |
Om Prakash Singh | ceb1412 | 2021-06-23 15:35:21 +0530 | [diff] [blame] | 510 | if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) { |
| 511 | link_status = appl_readl(pcie, APPL_LINK_STATUS); |
| 512 | if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) { |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 513 | dev_dbg(pcie->dev, "Link is up with Host\n"); |
| 514 | dw_pcie_ep_linkup(ep); |
| 515 | } |
| 516 | } |
| 517 | |
| 518 | spurious = 0; |
| 519 | } |
| 520 | |
Om Prakash Singh | ceb1412 | 2021-06-23 15:35:21 +0530 | [diff] [blame] | 521 | if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) { |
| 522 | status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15); |
| 523 | appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15); |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 524 | |
Om Prakash Singh | ceb1412 | 2021-06-23 15:35:21 +0530 | [diff] [blame] | 525 | if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED) |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 526 | return IRQ_WAKE_THREAD; |
| 527 | |
| 528 | spurious = 0; |
| 529 | } |
| 530 | |
| 531 | if (spurious) { |
| 532 | dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n", |
Om Prakash Singh | ceb1412 | 2021-06-23 15:35:21 +0530 | [diff] [blame] | 533 | status_l0); |
| 534 | appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0); |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 535 | } |
| 536 | |
| 537 | return IRQ_HANDLED; |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 538 | } |
| 539 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 540 | static int tegra194_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn, int where, |
Rob Herring | 692ccce7 | 2020-08-20 21:53:47 -0600 | [diff] [blame] | 541 | int size, u32 *val) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 542 | { |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 543 | /* |
| 544 | * This is an endpoint mode specific register happen to appear even |
| 545 | * when controller is operating in root port mode and system hangs |
| 546 | * when it is accessed with link being in ASPM-L1 state. |
| 547 | * So skip accessing it altogether |
| 548 | */ |
Rob Herring | 692ccce7 | 2020-08-20 21:53:47 -0600 | [diff] [blame] | 549 | if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) { |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 550 | *val = 0x00000000; |
| 551 | return PCIBIOS_SUCCESSFUL; |
| 552 | } |
| 553 | |
Rob Herring | 692ccce7 | 2020-08-20 21:53:47 -0600 | [diff] [blame] | 554 | return pci_generic_config_read(bus, devfn, where, size, val); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 555 | } |
| 556 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 557 | static int tegra194_pcie_wr_own_conf(struct pci_bus *bus, u32 devfn, int where, |
Rob Herring | 692ccce7 | 2020-08-20 21:53:47 -0600 | [diff] [blame] | 558 | int size, u32 val) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 559 | { |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 560 | /* |
| 561 | * This is an endpoint mode specific register happen to appear even |
| 562 | * when controller is operating in root port mode and system hangs |
| 563 | * when it is accessed with link being in ASPM-L1 state. |
| 564 | * So skip accessing it altogether |
| 565 | */ |
Rob Herring | 692ccce7 | 2020-08-20 21:53:47 -0600 | [diff] [blame] | 566 | if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 567 | return PCIBIOS_SUCCESSFUL; |
| 568 | |
Rob Herring | 692ccce7 | 2020-08-20 21:53:47 -0600 | [diff] [blame] | 569 | return pci_generic_config_write(bus, devfn, where, size, val); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 570 | } |
| 571 | |
Rob Herring | 692ccce7 | 2020-08-20 21:53:47 -0600 | [diff] [blame] | 572 | static struct pci_ops tegra_pci_ops = { |
| 573 | .map_bus = dw_pcie_own_conf_map_bus, |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 574 | .read = tegra194_pcie_rd_own_conf, |
| 575 | .write = tegra194_pcie_wr_own_conf, |
Rob Herring | 692ccce7 | 2020-08-20 21:53:47 -0600 | [diff] [blame] | 576 | }; |
| 577 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 578 | #if defined(CONFIG_PCIEASPM) |
Jon Hunter | a512360 | 2021-06-10 07:41:34 +0100 | [diff] [blame] | 579 | static const u32 event_cntr_ctrl_offset[] = { |
| 580 | 0x1d8, |
| 581 | 0x1a8, |
| 582 | 0x1a8, |
| 583 | 0x1a8, |
| 584 | 0x1c4, |
| 585 | 0x1d8 |
| 586 | }; |
| 587 | |
| 588 | static const u32 event_cntr_data_offset[] = { |
| 589 | 0x1dc, |
| 590 | 0x1ac, |
| 591 | 0x1ac, |
| 592 | 0x1ac, |
| 593 | 0x1c8, |
| 594 | 0x1dc |
| 595 | }; |
| 596 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 597 | static void disable_aspm_l11(struct tegra194_pcie *pcie) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 598 | { |
| 599 | u32 val; |
| 600 | |
| 601 | val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub); |
| 602 | val &= ~PCI_L1SS_CAP_ASPM_L1_1; |
| 603 | dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); |
| 604 | } |
| 605 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 606 | static void disable_aspm_l12(struct tegra194_pcie *pcie) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 607 | { |
| 608 | u32 val; |
| 609 | |
| 610 | val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub); |
| 611 | val &= ~PCI_L1SS_CAP_ASPM_L1_2; |
| 612 | dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); |
| 613 | } |
| 614 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 615 | static inline u32 event_counter_prog(struct tegra194_pcie *pcie, u32 event) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 616 | { |
| 617 | u32 val; |
| 618 | |
| 619 | val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid]); |
| 620 | val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT); |
| 621 | val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; |
| 622 | val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT; |
| 623 | val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; |
| 624 | dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val); |
| 625 | val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_data_offset[pcie->cid]); |
| 626 | |
| 627 | return val; |
| 628 | } |
| 629 | |
| 630 | static int aspm_state_cnt(struct seq_file *s, void *data) |
| 631 | { |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 632 | struct tegra194_pcie *pcie = (struct tegra194_pcie *) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 633 | dev_get_drvdata(s->private); |
| 634 | u32 val; |
| 635 | |
| 636 | seq_printf(s, "Tx L0s entry count : %u\n", |
| 637 | event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S)); |
| 638 | |
| 639 | seq_printf(s, "Rx L0s entry count : %u\n", |
| 640 | event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S)); |
| 641 | |
| 642 | seq_printf(s, "Link L1 entry count : %u\n", |
| 643 | event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1)); |
| 644 | |
| 645 | seq_printf(s, "Link L1.1 entry count : %u\n", |
| 646 | event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1)); |
| 647 | |
| 648 | seq_printf(s, "Link L1.2 entry count : %u\n", |
| 649 | event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2)); |
| 650 | |
| 651 | /* Clear all counters */ |
| 652 | dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], |
| 653 | EVENT_COUNTER_ALL_CLEAR); |
| 654 | |
| 655 | /* Re-enable counting */ |
| 656 | val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; |
| 657 | val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; |
| 658 | dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val); |
| 659 | |
| 660 | return 0; |
| 661 | } |
| 662 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 663 | static void init_host_aspm(struct tegra194_pcie *pcie) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 664 | { |
| 665 | struct dw_pcie *pci = &pcie->pci; |
| 666 | u32 val; |
| 667 | |
| 668 | val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS); |
| 669 | pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP; |
| 670 | |
| 671 | /* Enable ASPM counters */ |
| 672 | val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; |
| 673 | val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; |
| 674 | dw_pcie_writel_dbi(pci, event_cntr_ctrl_offset[pcie->cid], val); |
| 675 | |
| 676 | /* Program T_cmrt and T_pwr_on values */ |
| 677 | val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub); |
| 678 | val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE); |
| 679 | val |= (pcie->aspm_cmrt << 8); |
| 680 | val |= (pcie->aspm_pwr_on_t << 19); |
| 681 | dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val); |
| 682 | |
| 683 | /* Program L0s and L1 entrance latencies */ |
Rob Herring | 84667a4 | 2020-08-20 21:54:11 -0600 | [diff] [blame] | 684 | val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR); |
| 685 | val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK; |
| 686 | val |= (pcie->aspm_l0s_enter_lat << PORT_AFR_L0S_ENTRANCE_LAT_SHIFT); |
| 687 | val |= PORT_AFR_ENTER_ASPM; |
| 688 | dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 689 | } |
| 690 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 691 | static void init_debugfs(struct tegra194_pcie *pcie) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 692 | { |
Greg Kroah-Hartman | d27b1cd | 2020-08-18 15:37:39 +0200 | [diff] [blame] | 693 | debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs, |
| 694 | aspm_state_cnt); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 695 | } |
| 696 | #else |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 697 | static inline void disable_aspm_l12(struct tegra194_pcie *pcie) { return; } |
| 698 | static inline void disable_aspm_l11(struct tegra194_pcie *pcie) { return; } |
| 699 | static inline void init_host_aspm(struct tegra194_pcie *pcie) { return; } |
| 700 | static inline void init_debugfs(struct tegra194_pcie *pcie) { return; } |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 701 | #endif |
| 702 | |
| 703 | static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp) |
| 704 | { |
| 705 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 706 | struct tegra194_pcie *pcie = to_tegra_pcie(pci); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 707 | u32 val; |
| 708 | u16 val_w; |
| 709 | |
| 710 | val = appl_readl(pcie, APPL_INTR_EN_L0_0); |
| 711 | val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN; |
| 712 | appl_writel(pcie, val, APPL_INTR_EN_L0_0); |
| 713 | |
| 714 | val = appl_readl(pcie, APPL_INTR_EN_L1_0_0); |
| 715 | val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN; |
| 716 | appl_writel(pcie, val, APPL_INTR_EN_L1_0_0); |
| 717 | |
| 718 | if (pcie->enable_cdm_check) { |
| 719 | val = appl_readl(pcie, APPL_INTR_EN_L0_0); |
| 720 | val |= APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN; |
| 721 | appl_writel(pcie, val, APPL_INTR_EN_L0_0); |
| 722 | |
| 723 | val = appl_readl(pcie, APPL_INTR_EN_L1_18); |
| 724 | val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR; |
| 725 | val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR; |
| 726 | appl_writel(pcie, val, APPL_INTR_EN_L1_18); |
| 727 | } |
| 728 | |
| 729 | val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base + |
| 730 | PCI_EXP_LNKSTA); |
| 731 | pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >> |
| 732 | PCI_EXP_LNKSTA_NLW_SHIFT; |
| 733 | |
| 734 | val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base + |
| 735 | PCI_EXP_LNKCTL); |
| 736 | val_w |= PCI_EXP_LNKCTL_LBMIE; |
| 737 | dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL, |
| 738 | val_w); |
| 739 | } |
| 740 | |
| 741 | static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp) |
| 742 | { |
| 743 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 744 | struct tegra194_pcie *pcie = to_tegra_pcie(pci); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 745 | u32 val; |
| 746 | |
| 747 | /* Enable legacy interrupt generation */ |
| 748 | val = appl_readl(pcie, APPL_INTR_EN_L0_0); |
| 749 | val |= APPL_INTR_EN_L0_0_SYS_INTR_EN; |
| 750 | val |= APPL_INTR_EN_L0_0_INT_INT_EN; |
| 751 | appl_writel(pcie, val, APPL_INTR_EN_L0_0); |
| 752 | |
| 753 | val = appl_readl(pcie, APPL_INTR_EN_L1_8_0); |
| 754 | val |= APPL_INTR_EN_L1_8_INTX_EN; |
| 755 | val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN; |
| 756 | val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN; |
| 757 | if (IS_ENABLED(CONFIG_PCIEAER)) |
| 758 | val |= APPL_INTR_EN_L1_8_AER_INT_EN; |
| 759 | appl_writel(pcie, val, APPL_INTR_EN_L1_8_0); |
| 760 | } |
| 761 | |
| 762 | static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp) |
| 763 | { |
| 764 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 765 | struct tegra194_pcie *pcie = to_tegra_pcie(pci); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 766 | u32 val; |
| 767 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 768 | /* Enable MSI interrupt generation */ |
| 769 | val = appl_readl(pcie, APPL_INTR_EN_L0_0); |
| 770 | val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN; |
| 771 | val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN; |
| 772 | appl_writel(pcie, val, APPL_INTR_EN_L0_0); |
| 773 | } |
| 774 | |
| 775 | static void tegra_pcie_enable_interrupts(struct pcie_port *pp) |
| 776 | { |
| 777 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 778 | struct tegra194_pcie *pcie = to_tegra_pcie(pci); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 779 | |
| 780 | /* Clear interrupt statuses before enabling interrupts */ |
| 781 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); |
| 782 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); |
| 783 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); |
| 784 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); |
| 785 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); |
| 786 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); |
| 787 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); |
| 788 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); |
| 789 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); |
| 790 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); |
| 791 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); |
| 792 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); |
| 793 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); |
| 794 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); |
| 795 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); |
| 796 | |
| 797 | tegra_pcie_enable_system_interrupts(pp); |
| 798 | tegra_pcie_enable_legacy_interrupts(pp); |
| 799 | if (IS_ENABLED(CONFIG_PCI_MSI)) |
| 800 | tegra_pcie_enable_msi_interrupts(pp); |
| 801 | } |
| 802 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 803 | static void config_gen3_gen4_eq_presets(struct tegra194_pcie *pcie) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 804 | { |
| 805 | struct dw_pcie *pci = &pcie->pci; |
| 806 | u32 val, offset, i; |
| 807 | |
| 808 | /* Program init preset */ |
| 809 | for (i = 0; i < pcie->num_lanes; i++) { |
Rob Herring | fdd056f | 2020-08-20 21:54:20 -0600 | [diff] [blame] | 810 | val = dw_pcie_readw_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2)); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 811 | val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK; |
| 812 | val |= GEN3_GEN4_EQ_PRESET_INIT; |
| 813 | val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK; |
| 814 | val |= (GEN3_GEN4_EQ_PRESET_INIT << |
| 815 | CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT); |
Rob Herring | fdd056f | 2020-08-20 21:54:20 -0600 | [diff] [blame] | 816 | dw_pcie_writew_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2), val); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 817 | |
| 818 | offset = dw_pcie_find_ext_capability(pci, |
| 819 | PCI_EXT_CAP_ID_PL_16GT) + |
| 820 | PCI_PL_16GT_LE_CTRL; |
Rob Herring | fdd056f | 2020-08-20 21:54:20 -0600 | [diff] [blame] | 821 | val = dw_pcie_readb_dbi(pci, offset + i); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 822 | val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK; |
| 823 | val |= GEN3_GEN4_EQ_PRESET_INIT; |
| 824 | val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK; |
| 825 | val |= (GEN3_GEN4_EQ_PRESET_INIT << |
| 826 | PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT); |
Rob Herring | fdd056f | 2020-08-20 21:54:20 -0600 | [diff] [blame] | 827 | dw_pcie_writeb_dbi(pci, offset + i, val); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 828 | } |
| 829 | |
| 830 | val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); |
| 831 | val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; |
| 832 | dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); |
| 833 | |
| 834 | val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF); |
| 835 | val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK; |
| 836 | val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT); |
| 837 | val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK; |
| 838 | dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val); |
| 839 | |
| 840 | val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); |
| 841 | val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; |
| 842 | val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT); |
| 843 | dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); |
| 844 | |
| 845 | val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF); |
| 846 | val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK; |
| 847 | val |= (0x360 << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT); |
| 848 | val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK; |
| 849 | dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val); |
| 850 | |
| 851 | val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); |
| 852 | val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; |
| 853 | dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); |
| 854 | } |
| 855 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 856 | static int tegra194_pcie_host_init(struct pcie_port *pp) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 857 | { |
| 858 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 859 | struct tegra194_pcie *pcie = to_tegra_pcie(pci); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 860 | u32 val; |
| 861 | |
Rob Herring | 275e88b | 2020-12-18 08:39:05 -0600 | [diff] [blame] | 862 | pp->bridge->ops = &tegra_pci_ops; |
| 863 | |
Vidya Sagar | 369b868 | 2020-11-26 00:52:34 +0530 | [diff] [blame] | 864 | if (!pcie->pcie_cap_base) |
| 865 | pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci, |
| 866 | PCI_CAP_ID_EXP); |
| 867 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 868 | val = dw_pcie_readl_dbi(pci, PCI_IO_BASE); |
| 869 | val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8); |
| 870 | dw_pcie_writel_dbi(pci, PCI_IO_BASE, val); |
| 871 | |
| 872 | val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE); |
| 873 | val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE; |
| 874 | val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE; |
| 875 | dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val); |
| 876 | |
| 877 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); |
| 878 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 879 | /* Enable as 0xFFFF0001 response for CRS */ |
| 880 | val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT); |
| 881 | val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT); |
| 882 | val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 << |
| 883 | AMBA_ERROR_RESPONSE_CRS_SHIFT); |
| 884 | dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val); |
| 885 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 886 | /* Configure Max lane width from DT */ |
| 887 | val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP); |
| 888 | val &= ~PCI_EXP_LNKCAP_MLW; |
| 889 | val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT); |
| 890 | dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val); |
| 891 | |
| 892 | config_gen3_gen4_eq_presets(pcie); |
| 893 | |
| 894 | init_host_aspm(pcie); |
| 895 | |
Vidya Sagar | 6b6fafc | 2020-12-03 19:04:47 +0530 | [diff] [blame] | 896 | /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */ |
| 897 | if (!pcie->supports_clkreq) { |
| 898 | disable_aspm_l11(pcie); |
| 899 | disable_aspm_l12(pcie); |
| 900 | } |
| 901 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 902 | val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); |
| 903 | val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; |
| 904 | dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); |
| 905 | |
| 906 | if (pcie->update_fc_fixup) { |
| 907 | val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF); |
| 908 | val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT; |
| 909 | dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val); |
| 910 | } |
| 911 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 912 | clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ); |
| 913 | |
Rob Herring | 275e88b | 2020-12-18 08:39:05 -0600 | [diff] [blame] | 914 | return 0; |
| 915 | } |
| 916 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 917 | static int tegra194_pcie_start_link(struct dw_pcie *pci) |
Rob Herring | 275e88b | 2020-12-18 08:39:05 -0600 | [diff] [blame] | 918 | { |
| 919 | u32 val, offset, speed, tmp; |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 920 | struct tegra194_pcie *pcie = to_tegra_pcie(pci); |
Rob Herring | 275e88b | 2020-12-18 08:39:05 -0600 | [diff] [blame] | 921 | struct pcie_port *pp = &pci->pp; |
| 922 | bool retry = true; |
| 923 | |
| 924 | if (pcie->mode == DW_PCIE_EP_TYPE) { |
| 925 | enable_irq(pcie->pex_rst_irq); |
| 926 | return 0; |
| 927 | } |
| 928 | |
| 929 | retry_link: |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 930 | /* Assert RST */ |
| 931 | val = appl_readl(pcie, APPL_PINMUX); |
| 932 | val &= ~APPL_PINMUX_PEX_RST; |
| 933 | appl_writel(pcie, val, APPL_PINMUX); |
| 934 | |
| 935 | usleep_range(100, 200); |
| 936 | |
| 937 | /* Enable LTSSM */ |
| 938 | val = appl_readl(pcie, APPL_CTRL); |
| 939 | val |= APPL_CTRL_LTSSM_EN; |
| 940 | appl_writel(pcie, val, APPL_CTRL); |
| 941 | |
| 942 | /* De-assert RST */ |
| 943 | val = appl_readl(pcie, APPL_PINMUX); |
| 944 | val |= APPL_PINMUX_PEX_RST; |
| 945 | appl_writel(pcie, val, APPL_PINMUX); |
| 946 | |
| 947 | msleep(100); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 948 | |
| 949 | if (dw_pcie_wait_for_link(pci)) { |
Rob Herring | 275e88b | 2020-12-18 08:39:05 -0600 | [diff] [blame] | 950 | if (!retry) |
| 951 | return 0; |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 952 | /* |
| 953 | * There are some endpoints which can't get the link up if |
| 954 | * root port has Data Link Feature (DLF) enabled. |
| 955 | * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info |
| 956 | * on Scaled Flow Control and DLF. |
| 957 | * So, need to confirm that is indeed the case here and attempt |
| 958 | * link up once again with DLF disabled. |
| 959 | */ |
| 960 | val = appl_readl(pcie, APPL_DEBUG); |
| 961 | val &= APPL_DEBUG_LTSSM_STATE_MASK; |
| 962 | val >>= APPL_DEBUG_LTSSM_STATE_SHIFT; |
| 963 | tmp = appl_readl(pcie, APPL_LINK_STATUS); |
| 964 | tmp &= APPL_LINK_STATUS_RDLH_LINK_UP; |
| 965 | if (!(val == 0x11 && !tmp)) { |
| 966 | /* Link is down for all good reasons */ |
| 967 | return 0; |
| 968 | } |
| 969 | |
| 970 | dev_info(pci->dev, "Link is down in DLL"); |
| 971 | dev_info(pci->dev, "Trying again with DLFE disabled\n"); |
| 972 | /* Disable LTSSM */ |
| 973 | val = appl_readl(pcie, APPL_CTRL); |
| 974 | val &= ~APPL_CTRL_LTSSM_EN; |
| 975 | appl_writel(pcie, val, APPL_CTRL); |
| 976 | |
| 977 | reset_control_assert(pcie->core_rst); |
| 978 | reset_control_deassert(pcie->core_rst); |
| 979 | |
| 980 | offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF); |
| 981 | val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP); |
| 982 | val &= ~PCI_DLF_EXCHANGE_ENABLE; |
| 983 | dw_pcie_writel_dbi(pci, offset, val); |
| 984 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 985 | tegra194_pcie_host_init(pp); |
Rob Herring | 275e88b | 2020-12-18 08:39:05 -0600 | [diff] [blame] | 986 | dw_pcie_setup_rc(pp); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 987 | |
Rob Herring | 275e88b | 2020-12-18 08:39:05 -0600 | [diff] [blame] | 988 | retry = false; |
| 989 | goto retry_link; |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 990 | } |
| 991 | |
| 992 | speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) & |
| 993 | PCI_EXP_LNKSTA_CLS; |
| 994 | clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]); |
| 995 | |
| 996 | tegra_pcie_enable_interrupts(pp); |
| 997 | |
| 998 | return 0; |
| 999 | } |
| 1000 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1001 | static int tegra194_pcie_link_up(struct dw_pcie *pci) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1002 | { |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1003 | struct tegra194_pcie *pcie = to_tegra_pcie(pci); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1004 | u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); |
| 1005 | |
| 1006 | return !!(val & PCI_EXP_LNKSTA_DLLLA); |
| 1007 | } |
| 1008 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1009 | static void tegra194_pcie_stop_link(struct dw_pcie *pci) |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 1010 | { |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1011 | struct tegra194_pcie *pcie = to_tegra_pcie(pci); |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 1012 | |
| 1013 | disable_irq(pcie->pex_rst_irq); |
| 1014 | } |
| 1015 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1016 | static const struct dw_pcie_ops tegra_dw_pcie_ops = { |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1017 | .link_up = tegra194_pcie_link_up, |
| 1018 | .start_link = tegra194_pcie_start_link, |
| 1019 | .stop_link = tegra194_pcie_stop_link, |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1020 | }; |
| 1021 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1022 | static const struct dw_pcie_host_ops tegra194_pcie_host_ops = { |
| 1023 | .host_init = tegra194_pcie_host_init, |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1024 | }; |
| 1025 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1026 | static void tegra_pcie_disable_phy(struct tegra194_pcie *pcie) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1027 | { |
| 1028 | unsigned int phy_count = pcie->phy_count; |
| 1029 | |
| 1030 | while (phy_count--) { |
| 1031 | phy_power_off(pcie->phys[phy_count]); |
| 1032 | phy_exit(pcie->phys[phy_count]); |
| 1033 | } |
| 1034 | } |
| 1035 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1036 | static int tegra_pcie_enable_phy(struct tegra194_pcie *pcie) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1037 | { |
| 1038 | unsigned int i; |
| 1039 | int ret; |
| 1040 | |
| 1041 | for (i = 0; i < pcie->phy_count; i++) { |
| 1042 | ret = phy_init(pcie->phys[i]); |
| 1043 | if (ret < 0) |
| 1044 | goto phy_power_off; |
| 1045 | |
| 1046 | ret = phy_power_on(pcie->phys[i]); |
| 1047 | if (ret < 0) |
| 1048 | goto phy_exit; |
| 1049 | } |
| 1050 | |
| 1051 | return 0; |
| 1052 | |
| 1053 | phy_power_off: |
| 1054 | while (i--) { |
| 1055 | phy_power_off(pcie->phys[i]); |
| 1056 | phy_exit: |
| 1057 | phy_exit(pcie->phys[i]); |
| 1058 | } |
| 1059 | |
| 1060 | return ret; |
| 1061 | } |
| 1062 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1063 | static int tegra194_pcie_parse_dt(struct tegra194_pcie *pcie) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1064 | { |
Vidya Sagar | d5353c0 | 2020-11-26 00:55:54 +0530 | [diff] [blame] | 1065 | struct platform_device *pdev = to_platform_device(pcie->dev); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1066 | struct device_node *np = pcie->dev->of_node; |
| 1067 | int ret; |
| 1068 | |
Vidya Sagar | d5353c0 | 2020-11-26 00:55:54 +0530 | [diff] [blame] | 1069 | pcie->dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); |
| 1070 | if (!pcie->dbi_res) { |
| 1071 | dev_err(pcie->dev, "Failed to find \"dbi\" region\n"); |
| 1072 | return -ENODEV; |
| 1073 | } |
| 1074 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1075 | ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt); |
| 1076 | if (ret < 0) { |
| 1077 | dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret); |
| 1078 | return ret; |
| 1079 | } |
| 1080 | |
| 1081 | ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us", |
| 1082 | &pcie->aspm_pwr_on_t); |
| 1083 | if (ret < 0) |
| 1084 | dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n", |
| 1085 | ret); |
| 1086 | |
| 1087 | ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us", |
| 1088 | &pcie->aspm_l0s_enter_lat); |
| 1089 | if (ret < 0) |
| 1090 | dev_info(pcie->dev, |
| 1091 | "Failed to read ASPM L0s Entrance latency: %d\n", ret); |
| 1092 | |
| 1093 | ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes); |
| 1094 | if (ret < 0) { |
| 1095 | dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret); |
| 1096 | return ret; |
| 1097 | } |
| 1098 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1099 | ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid); |
| 1100 | if (ret) { |
| 1101 | dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret); |
| 1102 | return ret; |
| 1103 | } |
| 1104 | |
| 1105 | ret = of_property_count_strings(np, "phy-names"); |
| 1106 | if (ret < 0) { |
| 1107 | dev_err(pcie->dev, "Failed to find PHY entries: %d\n", |
| 1108 | ret); |
| 1109 | return ret; |
| 1110 | } |
| 1111 | pcie->phy_count = ret; |
| 1112 | |
| 1113 | if (of_property_read_bool(np, "nvidia,update-fc-fixup")) |
| 1114 | pcie->update_fc_fixup = true; |
| 1115 | |
| 1116 | pcie->supports_clkreq = |
| 1117 | of_property_read_bool(pcie->dev->of_node, "supports-clkreq"); |
| 1118 | |
| 1119 | pcie->enable_cdm_check = |
| 1120 | of_property_read_bool(np, "snps,enable-cdm-check"); |
| 1121 | |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 1122 | if (pcie->mode == DW_PCIE_RC_TYPE) |
| 1123 | return 0; |
| 1124 | |
| 1125 | /* Endpoint mode specific DT entries */ |
| 1126 | pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN); |
| 1127 | if (IS_ERR(pcie->pex_rst_gpiod)) { |
Thierry Reding | 5445523 | 2020-03-19 14:12:30 +0100 | [diff] [blame] | 1128 | int err = PTR_ERR(pcie->pex_rst_gpiod); |
| 1129 | const char *level = KERN_ERR; |
| 1130 | |
| 1131 | if (err == -EPROBE_DEFER) |
| 1132 | level = KERN_DEBUG; |
| 1133 | |
| 1134 | dev_printk(level, pcie->dev, |
| 1135 | dev_fmt("Failed to get PERST GPIO: %d\n"), |
| 1136 | err); |
| 1137 | return err; |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 1138 | } |
| 1139 | |
| 1140 | pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev, |
| 1141 | "nvidia,refclk-select", |
| 1142 | GPIOD_OUT_HIGH); |
| 1143 | if (IS_ERR(pcie->pex_refclk_sel_gpiod)) { |
Thierry Reding | 5445523 | 2020-03-19 14:12:30 +0100 | [diff] [blame] | 1144 | int err = PTR_ERR(pcie->pex_refclk_sel_gpiod); |
| 1145 | const char *level = KERN_ERR; |
| 1146 | |
| 1147 | if (err == -EPROBE_DEFER) |
| 1148 | level = KERN_DEBUG; |
| 1149 | |
| 1150 | dev_printk(level, pcie->dev, |
| 1151 | dev_fmt("Failed to get REFCLK select GPIOs: %d\n"), |
| 1152 | err); |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 1153 | pcie->pex_refclk_sel_gpiod = NULL; |
| 1154 | } |
| 1155 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1156 | return 0; |
| 1157 | } |
| 1158 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1159 | static int tegra_pcie_bpmp_set_ctrl_state(struct tegra194_pcie *pcie, |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1160 | bool enable) |
| 1161 | { |
| 1162 | struct mrq_uphy_response resp; |
| 1163 | struct tegra_bpmp_message msg; |
| 1164 | struct mrq_uphy_request req; |
| 1165 | |
| 1166 | /* Controller-5 doesn't need to have its state set by BPMP-FW */ |
| 1167 | if (pcie->cid == 5) |
| 1168 | return 0; |
| 1169 | |
| 1170 | memset(&req, 0, sizeof(req)); |
| 1171 | memset(&resp, 0, sizeof(resp)); |
| 1172 | |
| 1173 | req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE; |
| 1174 | req.controller_state.pcie_controller = pcie->cid; |
| 1175 | req.controller_state.enable = enable; |
| 1176 | |
| 1177 | memset(&msg, 0, sizeof(msg)); |
| 1178 | msg.mrq = MRQ_UPHY; |
| 1179 | msg.tx.data = &req; |
| 1180 | msg.tx.size = sizeof(req); |
| 1181 | msg.rx.data = &resp; |
| 1182 | msg.rx.size = sizeof(resp); |
| 1183 | |
| 1184 | return tegra_bpmp_transfer(pcie->bpmp, &msg); |
| 1185 | } |
| 1186 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1187 | static int tegra_pcie_bpmp_set_pll_state(struct tegra194_pcie *pcie, |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 1188 | bool enable) |
| 1189 | { |
| 1190 | struct mrq_uphy_response resp; |
| 1191 | struct tegra_bpmp_message msg; |
| 1192 | struct mrq_uphy_request req; |
| 1193 | |
| 1194 | memset(&req, 0, sizeof(req)); |
| 1195 | memset(&resp, 0, sizeof(resp)); |
| 1196 | |
| 1197 | if (enable) { |
| 1198 | req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT; |
| 1199 | req.ep_ctrlr_pll_init.ep_controller = pcie->cid; |
| 1200 | } else { |
| 1201 | req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF; |
| 1202 | req.ep_ctrlr_pll_off.ep_controller = pcie->cid; |
| 1203 | } |
| 1204 | |
| 1205 | memset(&msg, 0, sizeof(msg)); |
| 1206 | msg.mrq = MRQ_UPHY; |
| 1207 | msg.tx.data = &req; |
| 1208 | msg.tx.size = sizeof(req); |
| 1209 | msg.rx.data = &resp; |
| 1210 | msg.rx.size = sizeof(resp); |
| 1211 | |
| 1212 | return tegra_bpmp_transfer(pcie->bpmp, &msg); |
| 1213 | } |
| 1214 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1215 | static void tegra_pcie_downstream_dev_to_D0(struct tegra194_pcie *pcie) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1216 | { |
| 1217 | struct pcie_port *pp = &pcie->pci.pp; |
| 1218 | struct pci_bus *child, *root_bus = NULL; |
| 1219 | struct pci_dev *pdev; |
| 1220 | |
| 1221 | /* |
| 1222 | * link doesn't go into L2 state with some of the endpoints with Tegra |
| 1223 | * if they are not in D0 state. So, need to make sure that immediate |
| 1224 | * downstream devices are in D0 state before sending PME_TurnOff to put |
| 1225 | * link into L2 state. |
| 1226 | * This is as per PCI Express Base r4.0 v1.0 September 27-2017, |
| 1227 | * 5.2 Link State Power Management (Page #428). |
| 1228 | */ |
| 1229 | |
Rob Herring | 5808d43 | 2020-08-20 21:53:57 -0600 | [diff] [blame] | 1230 | list_for_each_entry(child, &pp->bridge->bus->children, node) { |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1231 | /* Bring downstream devices to D0 if they are not already in */ |
Rob Herring | 5808d43 | 2020-08-20 21:53:57 -0600 | [diff] [blame] | 1232 | if (child->parent == pp->bridge->bus) { |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1233 | root_bus = child; |
| 1234 | break; |
| 1235 | } |
| 1236 | } |
| 1237 | |
| 1238 | if (!root_bus) { |
| 1239 | dev_err(pcie->dev, "Failed to find downstream devices\n"); |
| 1240 | return; |
| 1241 | } |
| 1242 | |
| 1243 | list_for_each_entry(pdev, &root_bus->devices, bus_list) { |
| 1244 | if (PCI_SLOT(pdev->devfn) == 0) { |
| 1245 | if (pci_set_power_state(pdev, PCI_D0)) |
| 1246 | dev_err(pcie->dev, |
| 1247 | "Failed to transition %s to D0 state\n", |
| 1248 | dev_name(&pdev->dev)); |
| 1249 | } |
| 1250 | } |
| 1251 | } |
| 1252 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1253 | static int tegra_pcie_get_slot_regulators(struct tegra194_pcie *pcie) |
Vidya Sagar | 0a901f2 | 2019-09-05 16:15:51 +0530 | [diff] [blame] | 1254 | { |
| 1255 | pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3"); |
| 1256 | if (IS_ERR(pcie->slot_ctl_3v3)) { |
| 1257 | if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV) |
| 1258 | return PTR_ERR(pcie->slot_ctl_3v3); |
| 1259 | |
| 1260 | pcie->slot_ctl_3v3 = NULL; |
| 1261 | } |
| 1262 | |
| 1263 | pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v"); |
| 1264 | if (IS_ERR(pcie->slot_ctl_12v)) { |
| 1265 | if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV) |
| 1266 | return PTR_ERR(pcie->slot_ctl_12v); |
| 1267 | |
| 1268 | pcie->slot_ctl_12v = NULL; |
| 1269 | } |
| 1270 | |
| 1271 | return 0; |
| 1272 | } |
| 1273 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1274 | static int tegra_pcie_enable_slot_regulators(struct tegra194_pcie *pcie) |
Vidya Sagar | 0a901f2 | 2019-09-05 16:15:51 +0530 | [diff] [blame] | 1275 | { |
| 1276 | int ret; |
| 1277 | |
| 1278 | if (pcie->slot_ctl_3v3) { |
| 1279 | ret = regulator_enable(pcie->slot_ctl_3v3); |
| 1280 | if (ret < 0) { |
| 1281 | dev_err(pcie->dev, |
| 1282 | "Failed to enable 3.3V slot supply: %d\n", ret); |
| 1283 | return ret; |
| 1284 | } |
| 1285 | } |
| 1286 | |
| 1287 | if (pcie->slot_ctl_12v) { |
| 1288 | ret = regulator_enable(pcie->slot_ctl_12v); |
| 1289 | if (ret < 0) { |
| 1290 | dev_err(pcie->dev, |
| 1291 | "Failed to enable 12V slot supply: %d\n", ret); |
| 1292 | goto fail_12v_enable; |
| 1293 | } |
| 1294 | } |
| 1295 | |
| 1296 | /* |
| 1297 | * According to PCI Express Card Electromechanical Specification |
| 1298 | * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive) |
| 1299 | * should be a minimum of 100ms. |
| 1300 | */ |
| 1301 | if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v) |
| 1302 | msleep(100); |
| 1303 | |
| 1304 | return 0; |
| 1305 | |
| 1306 | fail_12v_enable: |
| 1307 | if (pcie->slot_ctl_3v3) |
| 1308 | regulator_disable(pcie->slot_ctl_3v3); |
| 1309 | return ret; |
| 1310 | } |
| 1311 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1312 | static void tegra_pcie_disable_slot_regulators(struct tegra194_pcie *pcie) |
Vidya Sagar | 0a901f2 | 2019-09-05 16:15:51 +0530 | [diff] [blame] | 1313 | { |
| 1314 | if (pcie->slot_ctl_12v) |
| 1315 | regulator_disable(pcie->slot_ctl_12v); |
| 1316 | if (pcie->slot_ctl_3v3) |
| 1317 | regulator_disable(pcie->slot_ctl_3v3); |
| 1318 | } |
| 1319 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1320 | static int tegra_pcie_config_controller(struct tegra194_pcie *pcie, |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1321 | bool en_hw_hot_rst) |
| 1322 | { |
| 1323 | int ret; |
| 1324 | u32 val; |
| 1325 | |
| 1326 | ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true); |
| 1327 | if (ret) { |
| 1328 | dev_err(pcie->dev, |
| 1329 | "Failed to enable controller %u: %d\n", pcie->cid, ret); |
| 1330 | return ret; |
| 1331 | } |
| 1332 | |
Vidya Sagar | 0a901f2 | 2019-09-05 16:15:51 +0530 | [diff] [blame] | 1333 | ret = tegra_pcie_enable_slot_regulators(pcie); |
| 1334 | if (ret < 0) |
| 1335 | goto fail_slot_reg_en; |
| 1336 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1337 | ret = regulator_enable(pcie->pex_ctl_supply); |
| 1338 | if (ret < 0) { |
| 1339 | dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret); |
| 1340 | goto fail_reg_en; |
| 1341 | } |
| 1342 | |
| 1343 | ret = clk_prepare_enable(pcie->core_clk); |
| 1344 | if (ret) { |
| 1345 | dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret); |
| 1346 | goto fail_core_clk; |
| 1347 | } |
| 1348 | |
| 1349 | ret = reset_control_deassert(pcie->core_apb_rst); |
| 1350 | if (ret) { |
| 1351 | dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n", |
| 1352 | ret); |
| 1353 | goto fail_core_apb_rst; |
| 1354 | } |
| 1355 | |
| 1356 | if (en_hw_hot_rst) { |
| 1357 | /* Enable HW_HOT_RST mode */ |
| 1358 | val = appl_readl(pcie, APPL_CTRL); |
| 1359 | val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << |
| 1360 | APPL_CTRL_HW_HOT_RST_MODE_SHIFT); |
| 1361 | val |= APPL_CTRL_HW_HOT_RST_EN; |
| 1362 | appl_writel(pcie, val, APPL_CTRL); |
| 1363 | } |
| 1364 | |
| 1365 | ret = tegra_pcie_enable_phy(pcie); |
| 1366 | if (ret) { |
| 1367 | dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret); |
| 1368 | goto fail_phy; |
| 1369 | } |
| 1370 | |
| 1371 | /* Update CFG base address */ |
| 1372 | appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK, |
| 1373 | APPL_CFG_BASE_ADDR); |
| 1374 | |
| 1375 | /* Configure this core for RP mode operation */ |
| 1376 | appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE); |
| 1377 | |
| 1378 | appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE); |
| 1379 | |
| 1380 | val = appl_readl(pcie, APPL_CTRL); |
| 1381 | appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL); |
| 1382 | |
| 1383 | val = appl_readl(pcie, APPL_CFG_MISC); |
| 1384 | val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT); |
| 1385 | appl_writel(pcie, val, APPL_CFG_MISC); |
| 1386 | |
| 1387 | if (!pcie->supports_clkreq) { |
| 1388 | val = appl_readl(pcie, APPL_PINMUX); |
Vidya Sagar | ff5c2bb9 | 2019-10-05 22:12:11 +0530 | [diff] [blame] | 1389 | val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN; |
| 1390 | val &= ~APPL_PINMUX_CLKREQ_OVERRIDE; |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1391 | appl_writel(pcie, val, APPL_PINMUX); |
| 1392 | } |
| 1393 | |
| 1394 | /* Update iATU_DMA base address */ |
| 1395 | appl_writel(pcie, |
| 1396 | pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK, |
| 1397 | APPL_CFG_IATU_DMA_BASE_ADDR); |
| 1398 | |
| 1399 | reset_control_deassert(pcie->core_rst); |
| 1400 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1401 | return ret; |
| 1402 | |
| 1403 | fail_phy: |
| 1404 | reset_control_assert(pcie->core_apb_rst); |
| 1405 | fail_core_apb_rst: |
| 1406 | clk_disable_unprepare(pcie->core_clk); |
| 1407 | fail_core_clk: |
| 1408 | regulator_disable(pcie->pex_ctl_supply); |
| 1409 | fail_reg_en: |
Vidya Sagar | 0a901f2 | 2019-09-05 16:15:51 +0530 | [diff] [blame] | 1410 | tegra_pcie_disable_slot_regulators(pcie); |
| 1411 | fail_slot_reg_en: |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1412 | tegra_pcie_bpmp_set_ctrl_state(pcie, false); |
| 1413 | |
| 1414 | return ret; |
| 1415 | } |
| 1416 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1417 | static void tegra_pcie_unconfig_controller(struct tegra194_pcie *pcie) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1418 | { |
| 1419 | int ret; |
| 1420 | |
| 1421 | ret = reset_control_assert(pcie->core_rst); |
Vidya Sagar | b8f0d67 | 2020-12-03 19:04:49 +0530 | [diff] [blame] | 1422 | if (ret) |
| 1423 | dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n", ret); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1424 | |
| 1425 | tegra_pcie_disable_phy(pcie); |
| 1426 | |
| 1427 | ret = reset_control_assert(pcie->core_apb_rst); |
Vidya Sagar | b8f0d67 | 2020-12-03 19:04:49 +0530 | [diff] [blame] | 1428 | if (ret) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1429 | dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1430 | |
| 1431 | clk_disable_unprepare(pcie->core_clk); |
| 1432 | |
| 1433 | ret = regulator_disable(pcie->pex_ctl_supply); |
Vidya Sagar | b8f0d67 | 2020-12-03 19:04:49 +0530 | [diff] [blame] | 1434 | if (ret) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1435 | dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1436 | |
Vidya Sagar | 0a901f2 | 2019-09-05 16:15:51 +0530 | [diff] [blame] | 1437 | tegra_pcie_disable_slot_regulators(pcie); |
| 1438 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1439 | ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false); |
Vidya Sagar | b8f0d67 | 2020-12-03 19:04:49 +0530 | [diff] [blame] | 1440 | if (ret) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1441 | dev_err(pcie->dev, "Failed to disable controller %d: %d\n", |
| 1442 | pcie->cid, ret); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1443 | } |
| 1444 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1445 | static int tegra_pcie_init_controller(struct tegra194_pcie *pcie) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1446 | { |
| 1447 | struct dw_pcie *pci = &pcie->pci; |
| 1448 | struct pcie_port *pp = &pci->pp; |
| 1449 | int ret; |
| 1450 | |
| 1451 | ret = tegra_pcie_config_controller(pcie, false); |
| 1452 | if (ret < 0) |
| 1453 | return ret; |
| 1454 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1455 | pp->ops = &tegra194_pcie_host_ops; |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1456 | |
| 1457 | ret = dw_pcie_host_init(pp); |
| 1458 | if (ret < 0) { |
| 1459 | dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret); |
| 1460 | goto fail_host_init; |
| 1461 | } |
| 1462 | |
| 1463 | return 0; |
| 1464 | |
| 1465 | fail_host_init: |
Vidya Sagar | b8f0d67 | 2020-12-03 19:04:49 +0530 | [diff] [blame] | 1466 | tegra_pcie_unconfig_controller(pcie); |
| 1467 | return ret; |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1468 | } |
| 1469 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1470 | static int tegra_pcie_try_link_l2(struct tegra194_pcie *pcie) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1471 | { |
| 1472 | u32 val; |
| 1473 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1474 | if (!tegra194_pcie_link_up(&pcie->pci)) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1475 | return 0; |
| 1476 | |
| 1477 | val = appl_readl(pcie, APPL_RADM_STATUS); |
| 1478 | val |= APPL_PM_XMT_TURNOFF_STATE; |
| 1479 | appl_writel(pcie, val, APPL_RADM_STATUS); |
| 1480 | |
| 1481 | return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val, |
| 1482 | val & APPL_DEBUG_PM_LINKST_IN_L2_LAT, |
| 1483 | 1, PME_ACK_TIMEOUT); |
| 1484 | } |
| 1485 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1486 | static void tegra194_pcie_pme_turnoff(struct tegra194_pcie *pcie) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1487 | { |
| 1488 | u32 data; |
| 1489 | int err; |
| 1490 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1491 | if (!tegra194_pcie_link_up(&pcie->pci)) { |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1492 | dev_dbg(pcie->dev, "PCIe link is not up...!\n"); |
| 1493 | return; |
| 1494 | } |
| 1495 | |
Om Prakash Singh | 834c5cf | 2021-06-23 15:35:23 +0530 | [diff] [blame] | 1496 | /* |
| 1497 | * PCIe controller exits from L2 only if reset is applied, so |
| 1498 | * controller doesn't handle interrupts. But in cases where |
| 1499 | * L2 entry fails, PERST# is asserted which can trigger surprise |
| 1500 | * link down AER. However this function call happens in |
| 1501 | * suspend_noirq(), so AER interrupt will not be processed. |
| 1502 | * Disable all interrupts to avoid such a scenario. |
| 1503 | */ |
| 1504 | appl_writel(pcie, 0x0, APPL_INTR_EN_L0_0); |
| 1505 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1506 | if (tegra_pcie_try_link_l2(pcie)) { |
| 1507 | dev_info(pcie->dev, "Link didn't transition to L2 state\n"); |
| 1508 | /* |
| 1509 | * TX lane clock freq will reset to Gen1 only if link is in L2 |
| 1510 | * or detect state. |
| 1511 | * So apply pex_rst to end point to force RP to go into detect |
| 1512 | * state |
| 1513 | */ |
| 1514 | data = appl_readl(pcie, APPL_PINMUX); |
| 1515 | data &= ~APPL_PINMUX_PEX_RST; |
| 1516 | appl_writel(pcie, data, APPL_PINMUX); |
| 1517 | |
Vidya Sagar | cf68e3b | 2020-12-03 19:04:51 +0530 | [diff] [blame] | 1518 | /* |
| 1519 | * Some cards do not go to detect state even after de-asserting |
| 1520 | * PERST#. So, de-assert LTSSM to bring link to detect state. |
| 1521 | */ |
| 1522 | data = readl(pcie->appl_base + APPL_CTRL); |
| 1523 | data &= ~APPL_CTRL_LTSSM_EN; |
| 1524 | writel(data, pcie->appl_base + APPL_CTRL); |
| 1525 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1526 | err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, |
| 1527 | data, |
| 1528 | ((data & |
| 1529 | APPL_DEBUG_LTSSM_STATE_MASK) >> |
| 1530 | APPL_DEBUG_LTSSM_STATE_SHIFT) == |
| 1531 | LTSSM_STATE_PRE_DETECT, |
| 1532 | 1, LTSSM_TIMEOUT); |
Vidya Sagar | cf68e3b | 2020-12-03 19:04:51 +0530 | [diff] [blame] | 1533 | if (err) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1534 | dev_info(pcie->dev, "Link didn't go to detect state\n"); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1535 | } |
| 1536 | /* |
| 1537 | * DBI registers may not be accessible after this as PLL-E would be |
| 1538 | * down depending on how CLKREQ is pulled by end point |
| 1539 | */ |
| 1540 | data = appl_readl(pcie, APPL_PINMUX); |
| 1541 | data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE); |
| 1542 | /* Cut REFCLK to slot */ |
| 1543 | data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; |
| 1544 | data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; |
| 1545 | appl_writel(pcie, data, APPL_PINMUX); |
| 1546 | } |
| 1547 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1548 | static void tegra_pcie_deinit_controller(struct tegra194_pcie *pcie) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1549 | { |
| 1550 | tegra_pcie_downstream_dev_to_D0(pcie); |
| 1551 | dw_pcie_host_deinit(&pcie->pci.pp); |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1552 | tegra194_pcie_pme_turnoff(pcie); |
Vidya Sagar | b8f0d67 | 2020-12-03 19:04:49 +0530 | [diff] [blame] | 1553 | tegra_pcie_unconfig_controller(pcie); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1554 | } |
| 1555 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1556 | static int tegra_pcie_config_rp(struct tegra194_pcie *pcie) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1557 | { |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1558 | struct device *dev = pcie->dev; |
| 1559 | char *name; |
| 1560 | int ret; |
| 1561 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1562 | pm_runtime_enable(dev); |
| 1563 | |
| 1564 | ret = pm_runtime_get_sync(dev); |
| 1565 | if (ret < 0) { |
| 1566 | dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n", |
| 1567 | ret); |
Vidya Sagar | f4ff4fa | 2019-09-05 16:15:50 +0530 | [diff] [blame] | 1568 | goto fail_pm_get_sync; |
| 1569 | } |
| 1570 | |
| 1571 | ret = pinctrl_pm_select_default_state(dev); |
| 1572 | if (ret < 0) { |
| 1573 | dev_err(dev, "Failed to configure sideband pins: %d\n", ret); |
Dinghao Liu | 1c1dbb2 | 2020-05-21 11:13:49 +0800 | [diff] [blame] | 1574 | goto fail_pm_get_sync; |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1575 | } |
| 1576 | |
Vidya Sagar | 3d710af | 2020-12-03 19:04:50 +0530 | [diff] [blame] | 1577 | ret = tegra_pcie_init_controller(pcie); |
| 1578 | if (ret < 0) { |
| 1579 | dev_err(dev, "Failed to initialize controller: %d\n", ret); |
| 1580 | goto fail_pm_get_sync; |
| 1581 | } |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1582 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1583 | pcie->link_state = tegra194_pcie_link_up(&pcie->pci); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1584 | if (!pcie->link_state) { |
| 1585 | ret = -ENOMEDIUM; |
| 1586 | goto fail_host_init; |
| 1587 | } |
| 1588 | |
| 1589 | name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); |
| 1590 | if (!name) { |
| 1591 | ret = -ENOMEM; |
| 1592 | goto fail_host_init; |
| 1593 | } |
| 1594 | |
| 1595 | pcie->debugfs = debugfs_create_dir(name, NULL); |
Greg Kroah-Hartman | d27b1cd | 2020-08-18 15:37:39 +0200 | [diff] [blame] | 1596 | init_debugfs(pcie); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1597 | |
| 1598 | return ret; |
| 1599 | |
| 1600 | fail_host_init: |
| 1601 | tegra_pcie_deinit_controller(pcie); |
Vidya Sagar | f4ff4fa | 2019-09-05 16:15:50 +0530 | [diff] [blame] | 1602 | fail_pm_get_sync: |
Dinghao Liu | 1c1dbb2 | 2020-05-21 11:13:49 +0800 | [diff] [blame] | 1603 | pm_runtime_put_sync(dev); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1604 | pm_runtime_disable(dev); |
| 1605 | return ret; |
| 1606 | } |
| 1607 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1608 | static void pex_ep_event_pex_rst_assert(struct tegra194_pcie *pcie) |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 1609 | { |
| 1610 | u32 val; |
| 1611 | int ret; |
| 1612 | |
| 1613 | if (pcie->ep_state == EP_STATE_DISABLED) |
| 1614 | return; |
| 1615 | |
| 1616 | /* Disable LTSSM */ |
| 1617 | val = appl_readl(pcie, APPL_CTRL); |
| 1618 | val &= ~APPL_CTRL_LTSSM_EN; |
| 1619 | appl_writel(pcie, val, APPL_CTRL); |
| 1620 | |
| 1621 | ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val, |
| 1622 | ((val & APPL_DEBUG_LTSSM_STATE_MASK) >> |
| 1623 | APPL_DEBUG_LTSSM_STATE_SHIFT) == |
| 1624 | LTSSM_STATE_PRE_DETECT, |
| 1625 | 1, LTSSM_TIMEOUT); |
| 1626 | if (ret) |
| 1627 | dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret); |
| 1628 | |
| 1629 | reset_control_assert(pcie->core_rst); |
| 1630 | |
| 1631 | tegra_pcie_disable_phy(pcie); |
| 1632 | |
| 1633 | reset_control_assert(pcie->core_apb_rst); |
| 1634 | |
| 1635 | clk_disable_unprepare(pcie->core_clk); |
| 1636 | |
| 1637 | pm_runtime_put_sync(pcie->dev); |
| 1638 | |
| 1639 | ret = tegra_pcie_bpmp_set_pll_state(pcie, false); |
| 1640 | if (ret) |
| 1641 | dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret); |
| 1642 | |
| 1643 | pcie->ep_state = EP_STATE_DISABLED; |
| 1644 | dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n"); |
| 1645 | } |
| 1646 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1647 | static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie) |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 1648 | { |
| 1649 | struct dw_pcie *pci = &pcie->pci; |
| 1650 | struct dw_pcie_ep *ep = &pci->ep; |
| 1651 | struct device *dev = pcie->dev; |
| 1652 | u32 val; |
| 1653 | int ret; |
| 1654 | |
| 1655 | if (pcie->ep_state == EP_STATE_ENABLED) |
| 1656 | return; |
| 1657 | |
Dinghao Liu | 5859c92 | 2021-04-08 15:26:58 +0800 | [diff] [blame] | 1658 | ret = pm_runtime_resume_and_get(dev); |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 1659 | if (ret < 0) { |
| 1660 | dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n", |
| 1661 | ret); |
| 1662 | return; |
| 1663 | } |
| 1664 | |
| 1665 | ret = tegra_pcie_bpmp_set_pll_state(pcie, true); |
| 1666 | if (ret) { |
| 1667 | dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", ret); |
| 1668 | goto fail_pll_init; |
| 1669 | } |
| 1670 | |
| 1671 | ret = clk_prepare_enable(pcie->core_clk); |
| 1672 | if (ret) { |
| 1673 | dev_err(dev, "Failed to enable core clock: %d\n", ret); |
| 1674 | goto fail_core_clk_enable; |
| 1675 | } |
| 1676 | |
| 1677 | ret = reset_control_deassert(pcie->core_apb_rst); |
| 1678 | if (ret) { |
| 1679 | dev_err(dev, "Failed to deassert core APB reset: %d\n", ret); |
| 1680 | goto fail_core_apb_rst; |
| 1681 | } |
| 1682 | |
| 1683 | ret = tegra_pcie_enable_phy(pcie); |
| 1684 | if (ret) { |
| 1685 | dev_err(dev, "Failed to enable PHY: %d\n", ret); |
| 1686 | goto fail_phy; |
| 1687 | } |
| 1688 | |
| 1689 | /* Clear any stale interrupt statuses */ |
| 1690 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); |
| 1691 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); |
| 1692 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); |
| 1693 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); |
| 1694 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); |
| 1695 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); |
| 1696 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); |
| 1697 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); |
| 1698 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); |
| 1699 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); |
| 1700 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); |
| 1701 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); |
| 1702 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); |
| 1703 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); |
| 1704 | appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); |
| 1705 | |
| 1706 | /* configure this core for EP mode operation */ |
| 1707 | val = appl_readl(pcie, APPL_DM_TYPE); |
| 1708 | val &= ~APPL_DM_TYPE_MASK; |
| 1709 | val |= APPL_DM_TYPE_EP; |
| 1710 | appl_writel(pcie, val, APPL_DM_TYPE); |
| 1711 | |
| 1712 | appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE); |
| 1713 | |
| 1714 | val = appl_readl(pcie, APPL_CTRL); |
| 1715 | val |= APPL_CTRL_SYS_PRE_DET_STATE; |
| 1716 | val |= APPL_CTRL_HW_HOT_RST_EN; |
| 1717 | appl_writel(pcie, val, APPL_CTRL); |
| 1718 | |
| 1719 | val = appl_readl(pcie, APPL_CFG_MISC); |
| 1720 | val |= APPL_CFG_MISC_SLV_EP_MODE; |
| 1721 | val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT); |
| 1722 | appl_writel(pcie, val, APPL_CFG_MISC); |
| 1723 | |
| 1724 | val = appl_readl(pcie, APPL_PINMUX); |
| 1725 | val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; |
| 1726 | val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; |
| 1727 | appl_writel(pcie, val, APPL_PINMUX); |
| 1728 | |
| 1729 | appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK, |
| 1730 | APPL_CFG_BASE_ADDR); |
| 1731 | |
| 1732 | appl_writel(pcie, pcie->atu_dma_res->start & |
| 1733 | APPL_CFG_IATU_DMA_BASE_ADDR_MASK, |
| 1734 | APPL_CFG_IATU_DMA_BASE_ADDR); |
| 1735 | |
| 1736 | val = appl_readl(pcie, APPL_INTR_EN_L0_0); |
| 1737 | val |= APPL_INTR_EN_L0_0_SYS_INTR_EN; |
| 1738 | val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN; |
| 1739 | val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN; |
| 1740 | appl_writel(pcie, val, APPL_INTR_EN_L0_0); |
| 1741 | |
| 1742 | val = appl_readl(pcie, APPL_INTR_EN_L1_0_0); |
| 1743 | val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN; |
| 1744 | val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN; |
| 1745 | appl_writel(pcie, val, APPL_INTR_EN_L1_0_0); |
| 1746 | |
| 1747 | reset_control_deassert(pcie->core_rst); |
| 1748 | |
| 1749 | if (pcie->update_fc_fixup) { |
| 1750 | val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF); |
| 1751 | val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT; |
| 1752 | dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val); |
| 1753 | } |
| 1754 | |
| 1755 | config_gen3_gen4_eq_presets(pcie); |
| 1756 | |
| 1757 | init_host_aspm(pcie); |
| 1758 | |
| 1759 | /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */ |
| 1760 | if (!pcie->supports_clkreq) { |
| 1761 | disable_aspm_l11(pcie); |
| 1762 | disable_aspm_l12(pcie); |
| 1763 | } |
| 1764 | |
| 1765 | val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); |
| 1766 | val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; |
| 1767 | dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); |
| 1768 | |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 1769 | pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci, |
| 1770 | PCI_CAP_ID_EXP); |
| 1771 | clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ); |
| 1772 | |
| 1773 | val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK); |
| 1774 | val |= MSIX_ADDR_MATCH_LOW_OFF_EN; |
| 1775 | dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val); |
Om Prakash Singh | 43537cf | 2021-06-23 15:35:22 +0530 | [diff] [blame] | 1776 | val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK); |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 1777 | dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val); |
| 1778 | |
| 1779 | ret = dw_pcie_ep_init_complete(ep); |
| 1780 | if (ret) { |
| 1781 | dev_err(dev, "Failed to complete initialization: %d\n", ret); |
| 1782 | goto fail_init_complete; |
| 1783 | } |
| 1784 | |
| 1785 | dw_pcie_ep_init_notify(ep); |
| 1786 | |
| 1787 | /* Enable LTSSM */ |
| 1788 | val = appl_readl(pcie, APPL_CTRL); |
| 1789 | val |= APPL_CTRL_LTSSM_EN; |
| 1790 | appl_writel(pcie, val, APPL_CTRL); |
| 1791 | |
| 1792 | pcie->ep_state = EP_STATE_ENABLED; |
| 1793 | dev_dbg(dev, "Initialization of endpoint is completed\n"); |
| 1794 | |
| 1795 | return; |
| 1796 | |
| 1797 | fail_init_complete: |
| 1798 | reset_control_assert(pcie->core_rst); |
| 1799 | tegra_pcie_disable_phy(pcie); |
| 1800 | fail_phy: |
| 1801 | reset_control_assert(pcie->core_apb_rst); |
| 1802 | fail_core_apb_rst: |
| 1803 | clk_disable_unprepare(pcie->core_clk); |
| 1804 | fail_core_clk_enable: |
| 1805 | tegra_pcie_bpmp_set_pll_state(pcie, false); |
| 1806 | fail_pll_init: |
| 1807 | pm_runtime_put_sync(dev); |
| 1808 | } |
| 1809 | |
| 1810 | static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg) |
| 1811 | { |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1812 | struct tegra194_pcie *pcie = arg; |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 1813 | |
| 1814 | if (gpiod_get_value(pcie->pex_rst_gpiod)) |
| 1815 | pex_ep_event_pex_rst_assert(pcie); |
| 1816 | else |
| 1817 | pex_ep_event_pex_rst_deassert(pcie); |
| 1818 | |
| 1819 | return IRQ_HANDLED; |
| 1820 | } |
| 1821 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1822 | static int tegra_pcie_ep_raise_legacy_irq(struct tegra194_pcie *pcie, u16 irq) |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 1823 | { |
| 1824 | /* Tegra194 supports only INTA */ |
| 1825 | if (irq > 1) |
| 1826 | return -EINVAL; |
| 1827 | |
| 1828 | appl_writel(pcie, 1, APPL_LEGACY_INTX); |
| 1829 | usleep_range(1000, 2000); |
| 1830 | appl_writel(pcie, 0, APPL_LEGACY_INTX); |
| 1831 | return 0; |
| 1832 | } |
| 1833 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1834 | static int tegra_pcie_ep_raise_msi_irq(struct tegra194_pcie *pcie, u16 irq) |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 1835 | { |
| 1836 | if (unlikely(irq > 31)) |
| 1837 | return -EINVAL; |
| 1838 | |
Jon Hunter | f67092e | 2021-06-18 17:02:19 +0100 | [diff] [blame] | 1839 | appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1); |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 1840 | |
| 1841 | return 0; |
| 1842 | } |
| 1843 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1844 | static int tegra_pcie_ep_raise_msix_irq(struct tegra194_pcie *pcie, u16 irq) |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 1845 | { |
| 1846 | struct dw_pcie_ep *ep = &pcie->pci.ep; |
| 1847 | |
| 1848 | writel(irq, ep->msi_mem); |
| 1849 | |
| 1850 | return 0; |
| 1851 | } |
| 1852 | |
| 1853 | static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, |
| 1854 | enum pci_epc_irq_type type, |
| 1855 | u16 interrupt_num) |
| 1856 | { |
| 1857 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1858 | struct tegra194_pcie *pcie = to_tegra_pcie(pci); |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 1859 | |
| 1860 | switch (type) { |
| 1861 | case PCI_EPC_IRQ_LEGACY: |
| 1862 | return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num); |
| 1863 | |
| 1864 | case PCI_EPC_IRQ_MSI: |
| 1865 | return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num); |
| 1866 | |
| 1867 | case PCI_EPC_IRQ_MSIX: |
| 1868 | return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num); |
| 1869 | |
| 1870 | default: |
| 1871 | dev_err(pci->dev, "Unknown IRQ type\n"); |
| 1872 | return -EPERM; |
| 1873 | } |
| 1874 | |
| 1875 | return 0; |
| 1876 | } |
| 1877 | |
| 1878 | static const struct pci_epc_features tegra_pcie_epc_features = { |
| 1879 | .linkup_notifier = true, |
| 1880 | .core_init_notifier = true, |
| 1881 | .msi_capable = false, |
| 1882 | .msix_capable = false, |
| 1883 | .reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5, |
| 1884 | .bar_fixed_64bit = 1 << BAR_0, |
| 1885 | .bar_fixed_size[0] = SZ_1M, |
| 1886 | }; |
| 1887 | |
| 1888 | static const struct pci_epc_features* |
| 1889 | tegra_pcie_ep_get_features(struct dw_pcie_ep *ep) |
| 1890 | { |
| 1891 | return &tegra_pcie_epc_features; |
| 1892 | } |
| 1893 | |
Rikard Falkeborn | d895ce7 | 2021-02-07 23:16:04 +0100 | [diff] [blame] | 1894 | static const struct dw_pcie_ep_ops pcie_ep_ops = { |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 1895 | .raise_irq = tegra_pcie_ep_raise_irq, |
| 1896 | .get_features = tegra_pcie_ep_get_features, |
| 1897 | }; |
| 1898 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1899 | static int tegra_pcie_config_ep(struct tegra194_pcie *pcie, |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 1900 | struct platform_device *pdev) |
| 1901 | { |
| 1902 | struct dw_pcie *pci = &pcie->pci; |
| 1903 | struct device *dev = pcie->dev; |
| 1904 | struct dw_pcie_ep *ep; |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 1905 | char *name; |
| 1906 | int ret; |
| 1907 | |
| 1908 | ep = &pci->ep; |
| 1909 | ep->ops = &pcie_ep_ops; |
| 1910 | |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 1911 | ep->page_size = SZ_64K; |
| 1912 | |
| 1913 | ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME); |
| 1914 | if (ret < 0) { |
| 1915 | dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n", |
| 1916 | ret); |
| 1917 | return ret; |
| 1918 | } |
| 1919 | |
| 1920 | ret = gpiod_to_irq(pcie->pex_rst_gpiod); |
| 1921 | if (ret < 0) { |
| 1922 | dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret); |
| 1923 | return ret; |
| 1924 | } |
| 1925 | pcie->pex_rst_irq = (unsigned int)ret; |
| 1926 | |
| 1927 | name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq", |
| 1928 | pcie->cid); |
| 1929 | if (!name) { |
| 1930 | dev_err(dev, "Failed to create PERST IRQ string\n"); |
| 1931 | return -ENOMEM; |
| 1932 | } |
| 1933 | |
| 1934 | irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN); |
| 1935 | |
| 1936 | pcie->ep_state = EP_STATE_DISABLED; |
| 1937 | |
| 1938 | ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL, |
| 1939 | tegra_pcie_ep_pex_rst_irq, |
| 1940 | IRQF_TRIGGER_RISING | |
| 1941 | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, |
| 1942 | name, (void *)pcie); |
| 1943 | if (ret < 0) { |
| 1944 | dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret); |
| 1945 | return ret; |
| 1946 | } |
| 1947 | |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 1948 | pm_runtime_enable(dev); |
| 1949 | |
| 1950 | ret = dw_pcie_ep_init(ep); |
| 1951 | if (ret) { |
| 1952 | dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n", |
| 1953 | ret); |
| 1954 | return ret; |
| 1955 | } |
| 1956 | |
| 1957 | return 0; |
| 1958 | } |
| 1959 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1960 | static int tegra194_pcie_probe(struct platform_device *pdev) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1961 | { |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1962 | const struct tegra194_pcie_of_data *data; |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1963 | struct device *dev = &pdev->dev; |
| 1964 | struct resource *atu_dma_res; |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1965 | struct tegra194_pcie *pcie; |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1966 | struct pcie_port *pp; |
| 1967 | struct dw_pcie *pci; |
| 1968 | struct phy **phys; |
| 1969 | char *name; |
| 1970 | int ret; |
| 1971 | u32 i; |
| 1972 | |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 1973 | data = of_device_get_match_data(dev); |
| 1974 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1975 | pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); |
| 1976 | if (!pcie) |
| 1977 | return -ENOMEM; |
| 1978 | |
| 1979 | pci = &pcie->pci; |
| 1980 | pci->dev = &pdev->dev; |
| 1981 | pci->ops = &tegra_dw_pcie_ops; |
Rob Herring | aeaa0bf | 2020-08-20 21:54:19 -0600 | [diff] [blame] | 1982 | pci->n_fts[0] = N_FTS_VAL; |
| 1983 | pci->n_fts[1] = FTS_VAL; |
Vidya Sagar | 01254b6 | 2020-12-03 19:04:48 +0530 | [diff] [blame] | 1984 | pci->version = 0x490A; |
Rob Herring | aeaa0bf | 2020-08-20 21:54:19 -0600 | [diff] [blame] | 1985 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1986 | pp = &pci->pp; |
Rob Herring | 331e9bc | 2020-11-05 15:11:50 -0600 | [diff] [blame] | 1987 | pp->num_vectors = MAX_MSI_IRQS; |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1988 | pcie->dev = &pdev->dev; |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 1989 | pcie->mode = (enum dw_pcie_device_mode)data->mode; |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1990 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 1991 | ret = tegra194_pcie_parse_dt(pcie); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 1992 | if (ret < 0) { |
Thierry Reding | 5445523 | 2020-03-19 14:12:30 +0100 | [diff] [blame] | 1993 | const char *level = KERN_ERR; |
| 1994 | |
| 1995 | if (ret == -EPROBE_DEFER) |
| 1996 | level = KERN_DEBUG; |
| 1997 | |
| 1998 | dev_printk(level, dev, |
| 1999 | dev_fmt("Failed to parse device tree: %d\n"), |
| 2000 | ret); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2001 | return ret; |
| 2002 | } |
| 2003 | |
Vidya Sagar | 0a901f2 | 2019-09-05 16:15:51 +0530 | [diff] [blame] | 2004 | ret = tegra_pcie_get_slot_regulators(pcie); |
| 2005 | if (ret < 0) { |
Thierry Reding | 5445523 | 2020-03-19 14:12:30 +0100 | [diff] [blame] | 2006 | const char *level = KERN_ERR; |
| 2007 | |
| 2008 | if (ret == -EPROBE_DEFER) |
| 2009 | level = KERN_DEBUG; |
| 2010 | |
| 2011 | dev_printk(level, dev, |
| 2012 | dev_fmt("Failed to get slot regulators: %d\n"), |
| 2013 | ret); |
Vidya Sagar | 0a901f2 | 2019-09-05 16:15:51 +0530 | [diff] [blame] | 2014 | return ret; |
| 2015 | } |
| 2016 | |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 2017 | if (pcie->pex_refclk_sel_gpiod) |
| 2018 | gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1); |
| 2019 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2020 | pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl"); |
| 2021 | if (IS_ERR(pcie->pex_ctl_supply)) { |
| 2022 | ret = PTR_ERR(pcie->pex_ctl_supply); |
| 2023 | if (ret != -EPROBE_DEFER) |
| 2024 | dev_err(dev, "Failed to get regulator: %ld\n", |
| 2025 | PTR_ERR(pcie->pex_ctl_supply)); |
| 2026 | return ret; |
| 2027 | } |
| 2028 | |
| 2029 | pcie->core_clk = devm_clk_get(dev, "core"); |
| 2030 | if (IS_ERR(pcie->core_clk)) { |
| 2031 | dev_err(dev, "Failed to get core clock: %ld\n", |
| 2032 | PTR_ERR(pcie->core_clk)); |
| 2033 | return PTR_ERR(pcie->core_clk); |
| 2034 | } |
| 2035 | |
| 2036 | pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, |
| 2037 | "appl"); |
| 2038 | if (!pcie->appl_res) { |
| 2039 | dev_err(dev, "Failed to find \"appl\" region\n"); |
| 2040 | return -ENODEV; |
| 2041 | } |
| 2042 | |
| 2043 | pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res); |
| 2044 | if (IS_ERR(pcie->appl_base)) |
| 2045 | return PTR_ERR(pcie->appl_base); |
| 2046 | |
| 2047 | pcie->core_apb_rst = devm_reset_control_get(dev, "apb"); |
| 2048 | if (IS_ERR(pcie->core_apb_rst)) { |
| 2049 | dev_err(dev, "Failed to get APB reset: %ld\n", |
| 2050 | PTR_ERR(pcie->core_apb_rst)); |
| 2051 | return PTR_ERR(pcie->core_apb_rst); |
| 2052 | } |
| 2053 | |
| 2054 | phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL); |
| 2055 | if (!phys) |
| 2056 | return -ENOMEM; |
| 2057 | |
| 2058 | for (i = 0; i < pcie->phy_count; i++) { |
| 2059 | name = kasprintf(GFP_KERNEL, "p2u-%u", i); |
| 2060 | if (!name) { |
| 2061 | dev_err(dev, "Failed to create P2U string\n"); |
| 2062 | return -ENOMEM; |
| 2063 | } |
| 2064 | phys[i] = devm_phy_get(dev, name); |
| 2065 | kfree(name); |
| 2066 | if (IS_ERR(phys[i])) { |
| 2067 | ret = PTR_ERR(phys[i]); |
| 2068 | if (ret != -EPROBE_DEFER) |
| 2069 | dev_err(dev, "Failed to get PHY: %d\n", ret); |
| 2070 | return ret; |
| 2071 | } |
| 2072 | } |
| 2073 | |
| 2074 | pcie->phys = phys; |
| 2075 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2076 | atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, |
| 2077 | "atu_dma"); |
| 2078 | if (!atu_dma_res) { |
| 2079 | dev_err(dev, "Failed to find \"atu_dma\" region\n"); |
| 2080 | return -ENODEV; |
| 2081 | } |
| 2082 | pcie->atu_dma_res = atu_dma_res; |
| 2083 | |
Rob Herring | 281f1f9 | 2020-11-05 15:11:59 -0600 | [diff] [blame] | 2084 | pci->atu_size = resource_size(atu_dma_res); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2085 | pci->atu_base = devm_ioremap_resource(dev, atu_dma_res); |
| 2086 | if (IS_ERR(pci->atu_base)) |
| 2087 | return PTR_ERR(pci->atu_base); |
| 2088 | |
| 2089 | pcie->core_rst = devm_reset_control_get(dev, "core"); |
| 2090 | if (IS_ERR(pcie->core_rst)) { |
| 2091 | dev_err(dev, "Failed to get core reset: %ld\n", |
| 2092 | PTR_ERR(pcie->core_rst)); |
| 2093 | return PTR_ERR(pcie->core_rst); |
| 2094 | } |
| 2095 | |
| 2096 | pp->irq = platform_get_irq_byname(pdev, "intr"); |
Krzysztof Wilczyński | caecb05 | 2020-08-02 14:25:53 +0000 | [diff] [blame] | 2097 | if (pp->irq < 0) |
Aman Sharma | 0584bff | 2020-03-12 00:49:02 +0530 | [diff] [blame] | 2098 | return pp->irq; |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2099 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2100 | pcie->bpmp = tegra_bpmp_get(dev); |
| 2101 | if (IS_ERR(pcie->bpmp)) |
| 2102 | return PTR_ERR(pcie->bpmp); |
| 2103 | |
| 2104 | platform_set_drvdata(pdev, pcie); |
| 2105 | |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 2106 | switch (pcie->mode) { |
| 2107 | case DW_PCIE_RC_TYPE: |
| 2108 | ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler, |
| 2109 | IRQF_SHARED, "tegra-pcie-intr", pcie); |
| 2110 | if (ret) { |
| 2111 | dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, |
| 2112 | ret); |
| 2113 | goto fail; |
| 2114 | } |
| 2115 | |
| 2116 | ret = tegra_pcie_config_rp(pcie); |
| 2117 | if (ret && ret != -ENOMEDIUM) |
| 2118 | goto fail; |
| 2119 | else |
| 2120 | return 0; |
| 2121 | break; |
| 2122 | |
| 2123 | case DW_PCIE_EP_TYPE: |
| 2124 | ret = devm_request_threaded_irq(dev, pp->irq, |
| 2125 | tegra_pcie_ep_hard_irq, |
| 2126 | tegra_pcie_ep_irq_thread, |
| 2127 | IRQF_SHARED | IRQF_ONESHOT, |
| 2128 | "tegra-pcie-ep-intr", pcie); |
| 2129 | if (ret) { |
| 2130 | dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, |
| 2131 | ret); |
| 2132 | goto fail; |
| 2133 | } |
| 2134 | |
| 2135 | ret = tegra_pcie_config_ep(pcie, pdev); |
| 2136 | if (ret < 0) |
| 2137 | goto fail; |
| 2138 | break; |
| 2139 | |
| 2140 | default: |
| 2141 | dev_err(dev, "Invalid PCIe device type %d\n", pcie->mode); |
| 2142 | } |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2143 | |
| 2144 | fail: |
| 2145 | tegra_bpmp_put(pcie->bpmp); |
| 2146 | return ret; |
| 2147 | } |
| 2148 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2149 | static int tegra194_pcie_remove(struct platform_device *pdev) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2150 | { |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2151 | struct tegra194_pcie *pcie = platform_get_drvdata(pdev); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2152 | |
| 2153 | if (!pcie->link_state) |
| 2154 | return 0; |
| 2155 | |
| 2156 | debugfs_remove_recursive(pcie->debugfs); |
| 2157 | tegra_pcie_deinit_controller(pcie); |
| 2158 | pm_runtime_put_sync(pcie->dev); |
| 2159 | pm_runtime_disable(pcie->dev); |
| 2160 | tegra_bpmp_put(pcie->bpmp); |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 2161 | if (pcie->pex_refclk_sel_gpiod) |
| 2162 | gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2163 | |
| 2164 | return 0; |
| 2165 | } |
| 2166 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2167 | static int tegra194_pcie_suspend_late(struct device *dev) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2168 | { |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2169 | struct tegra194_pcie *pcie = dev_get_drvdata(dev); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2170 | u32 val; |
| 2171 | |
| 2172 | if (!pcie->link_state) |
| 2173 | return 0; |
| 2174 | |
| 2175 | /* Enable HW_HOT_RST mode */ |
| 2176 | val = appl_readl(pcie, APPL_CTRL); |
| 2177 | val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << |
| 2178 | APPL_CTRL_HW_HOT_RST_MODE_SHIFT); |
| 2179 | val |= APPL_CTRL_HW_HOT_RST_EN; |
| 2180 | appl_writel(pcie, val, APPL_CTRL); |
| 2181 | |
| 2182 | return 0; |
| 2183 | } |
| 2184 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2185 | static int tegra194_pcie_suspend_noirq(struct device *dev) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2186 | { |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2187 | struct tegra194_pcie *pcie = dev_get_drvdata(dev); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2188 | |
| 2189 | if (!pcie->link_state) |
| 2190 | return 0; |
| 2191 | |
| 2192 | /* Save MSI interrupt vector */ |
| 2193 | pcie->msi_ctrl_int = dw_pcie_readl_dbi(&pcie->pci, |
| 2194 | PORT_LOGIC_MSI_CTRL_INT_0_EN); |
| 2195 | tegra_pcie_downstream_dev_to_D0(pcie); |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2196 | tegra194_pcie_pme_turnoff(pcie); |
Vidya Sagar | b8f0d67 | 2020-12-03 19:04:49 +0530 | [diff] [blame] | 2197 | tegra_pcie_unconfig_controller(pcie); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2198 | |
Vidya Sagar | b8f0d67 | 2020-12-03 19:04:49 +0530 | [diff] [blame] | 2199 | return 0; |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2200 | } |
| 2201 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2202 | static int tegra194_pcie_resume_noirq(struct device *dev) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2203 | { |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2204 | struct tegra194_pcie *pcie = dev_get_drvdata(dev); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2205 | int ret; |
| 2206 | |
| 2207 | if (!pcie->link_state) |
| 2208 | return 0; |
| 2209 | |
| 2210 | ret = tegra_pcie_config_controller(pcie, true); |
| 2211 | if (ret < 0) |
| 2212 | return ret; |
| 2213 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2214 | ret = tegra194_pcie_host_init(&pcie->pci.pp); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2215 | if (ret < 0) { |
| 2216 | dev_err(dev, "Failed to init host: %d\n", ret); |
| 2217 | goto fail_host_init; |
| 2218 | } |
| 2219 | |
Vidya Sagar | c4bf1f2 | 2021-05-04 22:51:57 +0530 | [diff] [blame] | 2220 | dw_pcie_setup_rc(&pcie->pci.pp); |
| 2221 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2222 | ret = tegra194_pcie_start_link(&pcie->pci); |
Rob Herring | 275e88b | 2020-12-18 08:39:05 -0600 | [diff] [blame] | 2223 | if (ret < 0) |
| 2224 | goto fail_host_init; |
| 2225 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2226 | /* Restore MSI interrupt vector */ |
| 2227 | dw_pcie_writel_dbi(&pcie->pci, PORT_LOGIC_MSI_CTRL_INT_0_EN, |
| 2228 | pcie->msi_ctrl_int); |
| 2229 | |
| 2230 | return 0; |
| 2231 | |
| 2232 | fail_host_init: |
Vidya Sagar | b8f0d67 | 2020-12-03 19:04:49 +0530 | [diff] [blame] | 2233 | tegra_pcie_unconfig_controller(pcie); |
| 2234 | return ret; |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2235 | } |
| 2236 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2237 | static int tegra194_pcie_resume_early(struct device *dev) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2238 | { |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2239 | struct tegra194_pcie *pcie = dev_get_drvdata(dev); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2240 | u32 val; |
| 2241 | |
Om Prakash Singh | de2bbf2 | 2021-06-23 15:35:24 +0530 | [diff] [blame] | 2242 | if (pcie->mode == DW_PCIE_EP_TYPE) { |
| 2243 | dev_err(dev, "Suspend is not supported in EP mode"); |
| 2244 | return -ENOTSUPP; |
| 2245 | } |
| 2246 | |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2247 | if (!pcie->link_state) |
| 2248 | return 0; |
| 2249 | |
| 2250 | /* Disable HW_HOT_RST mode */ |
| 2251 | val = appl_readl(pcie, APPL_CTRL); |
| 2252 | val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << |
| 2253 | APPL_CTRL_HW_HOT_RST_MODE_SHIFT); |
| 2254 | val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST << |
| 2255 | APPL_CTRL_HW_HOT_RST_MODE_SHIFT; |
| 2256 | val &= ~APPL_CTRL_HW_HOT_RST_EN; |
| 2257 | appl_writel(pcie, val, APPL_CTRL); |
| 2258 | |
| 2259 | return 0; |
| 2260 | } |
| 2261 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2262 | static void tegra194_pcie_shutdown(struct platform_device *pdev) |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2263 | { |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2264 | struct tegra194_pcie *pcie = platform_get_drvdata(pdev); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2265 | |
| 2266 | if (!pcie->link_state) |
| 2267 | return; |
| 2268 | |
| 2269 | debugfs_remove_recursive(pcie->debugfs); |
| 2270 | tegra_pcie_downstream_dev_to_D0(pcie); |
| 2271 | |
| 2272 | disable_irq(pcie->pci.pp.irq); |
| 2273 | if (IS_ENABLED(CONFIG_PCI_MSI)) |
| 2274 | disable_irq(pcie->pci.pp.msi_irq); |
| 2275 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2276 | tegra194_pcie_pme_turnoff(pcie); |
Vidya Sagar | b8f0d67 | 2020-12-03 19:04:49 +0530 | [diff] [blame] | 2277 | tegra_pcie_unconfig_controller(pcie); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2278 | } |
| 2279 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2280 | static const struct tegra194_pcie_of_data tegra194_pcie_rc_of_data = { |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 2281 | .mode = DW_PCIE_RC_TYPE, |
| 2282 | }; |
| 2283 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2284 | static const struct tegra194_pcie_of_data tegra194_pcie_ep_of_data = { |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 2285 | .mode = DW_PCIE_EP_TYPE, |
| 2286 | }; |
| 2287 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2288 | static const struct of_device_id tegra194_pcie_of_match[] = { |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2289 | { |
| 2290 | .compatible = "nvidia,tegra194-pcie", |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2291 | .data = &tegra194_pcie_rc_of_data, |
Vidya Sagar | c57247f | 2020-03-03 23:40:52 +0530 | [diff] [blame] | 2292 | }, |
| 2293 | { |
| 2294 | .compatible = "nvidia,tegra194-pcie-ep", |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2295 | .data = &tegra194_pcie_ep_of_data, |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2296 | }, |
| 2297 | {}, |
| 2298 | }; |
| 2299 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2300 | static const struct dev_pm_ops tegra194_pcie_pm_ops = { |
| 2301 | .suspend_late = tegra194_pcie_suspend_late, |
| 2302 | .suspend_noirq = tegra194_pcie_suspend_noirq, |
| 2303 | .resume_noirq = tegra194_pcie_resume_noirq, |
| 2304 | .resume_early = tegra194_pcie_resume_early, |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2305 | }; |
| 2306 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2307 | static struct platform_driver tegra194_pcie_driver = { |
| 2308 | .probe = tegra194_pcie_probe, |
| 2309 | .remove = tegra194_pcie_remove, |
| 2310 | .shutdown = tegra194_pcie_shutdown, |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2311 | .driver = { |
| 2312 | .name = "tegra194-pcie", |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2313 | .pm = &tegra194_pcie_pm_ops, |
| 2314 | .of_match_table = tegra194_pcie_of_match, |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2315 | }, |
| 2316 | }; |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2317 | module_platform_driver(tegra194_pcie_driver); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2318 | |
Fan Fei | b572569 | 2021-12-22 19:10:50 -0600 | [diff] [blame] | 2319 | MODULE_DEVICE_TABLE(of, tegra194_pcie_of_match); |
Vidya Sagar | 56e15a2 | 2019-08-13 17:06:27 +0530 | [diff] [blame] | 2320 | |
| 2321 | MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>"); |
| 2322 | MODULE_DESCRIPTION("NVIDIA PCIe host controller driver"); |
| 2323 | MODULE_LICENSE("GPL v2"); |