Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * PCIe host controller driver for Intel Gateway SoCs |
| 4 | * |
| 5 | * Copyright (c) 2019 Intel Corporation. |
| 6 | */ |
| 7 | |
| 8 | #include <linux/bitfield.h> |
| 9 | #include <linux/clk.h> |
| 10 | #include <linux/gpio/consumer.h> |
| 11 | #include <linux/iopoll.h> |
| 12 | #include <linux/pci_regs.h> |
| 13 | #include <linux/phy/phy.h> |
| 14 | #include <linux/platform_device.h> |
| 15 | #include <linux/reset.h> |
| 16 | |
| 17 | #include "../../pci.h" |
| 18 | #include "pcie-designware.h" |
| 19 | |
| 20 | #define PORT_AFR_N_FTS_GEN12_DFT (SZ_128 - 1) |
| 21 | #define PORT_AFR_N_FTS_GEN3 180 |
| 22 | #define PORT_AFR_N_FTS_GEN4 196 |
| 23 | |
| 24 | /* PCIe Application logic Registers */ |
| 25 | #define PCIE_APP_CCR 0x10 |
| 26 | #define PCIE_APP_CCR_LTSSM_ENABLE BIT(0) |
| 27 | |
| 28 | #define PCIE_APP_MSG_CR 0x30 |
| 29 | #define PCIE_APP_MSG_XMT_PM_TURNOFF BIT(0) |
| 30 | |
| 31 | #define PCIE_APP_PMC 0x44 |
| 32 | #define PCIE_APP_PMC_IN_L2 BIT(20) |
| 33 | |
| 34 | #define PCIE_APP_IRNEN 0xF4 |
| 35 | #define PCIE_APP_IRNCR 0xF8 |
| 36 | #define PCIE_APP_IRN_AER_REPORT BIT(0) |
| 37 | #define PCIE_APP_IRN_PME BIT(2) |
| 38 | #define PCIE_APP_IRN_RX_VDM_MSG BIT(4) |
| 39 | #define PCIE_APP_IRN_PM_TO_ACK BIT(9) |
| 40 | #define PCIE_APP_IRN_LINK_AUTO_BW_STAT BIT(11) |
| 41 | #define PCIE_APP_IRN_BW_MGT BIT(12) |
Martin Blumenstingl | 655832d | 2021-01-06 14:55:40 +0100 | [diff] [blame] | 42 | #define PCIE_APP_IRN_INTA BIT(13) |
| 43 | #define PCIE_APP_IRN_INTB BIT(14) |
| 44 | #define PCIE_APP_IRN_INTC BIT(15) |
| 45 | #define PCIE_APP_IRN_INTD BIT(16) |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 46 | #define PCIE_APP_IRN_MSG_LTR BIT(18) |
| 47 | #define PCIE_APP_IRN_SYS_ERR_RC BIT(29) |
| 48 | #define PCIE_APP_INTX_OFST 12 |
| 49 | |
| 50 | #define PCIE_APP_IRN_INT \ |
| 51 | (PCIE_APP_IRN_AER_REPORT | PCIE_APP_IRN_PME | \ |
| 52 | PCIE_APP_IRN_RX_VDM_MSG | PCIE_APP_IRN_SYS_ERR_RC | \ |
| 53 | PCIE_APP_IRN_PM_TO_ACK | PCIE_APP_IRN_MSG_LTR | \ |
| 54 | PCIE_APP_IRN_BW_MGT | PCIE_APP_IRN_LINK_AUTO_BW_STAT | \ |
Martin Blumenstingl | 655832d | 2021-01-06 14:55:40 +0100 | [diff] [blame] | 55 | PCIE_APP_IRN_INTA | PCIE_APP_IRN_INTB | \ |
| 56 | PCIE_APP_IRN_INTC | PCIE_APP_IRN_INTD) |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 57 | |
| 58 | #define BUS_IATU_OFFSET SZ_256M |
| 59 | #define RESET_INTERVAL_MS 100 |
| 60 | |
| 61 | struct intel_pcie_soc { |
| 62 | unsigned int pcie_ver; |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 63 | }; |
| 64 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 65 | struct intel_pcie { |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 66 | struct dw_pcie pci; |
| 67 | void __iomem *app_base; |
| 68 | struct gpio_desc *reset_gpio; |
| 69 | u32 rst_intrvl; |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 70 | struct clk *core_clk; |
| 71 | struct reset_control *core_rst; |
| 72 | struct phy *phy; |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 73 | }; |
| 74 | |
| 75 | static void pcie_update_bits(void __iomem *base, u32 ofs, u32 mask, u32 val) |
| 76 | { |
| 77 | u32 old; |
| 78 | |
| 79 | old = readl(base + ofs); |
| 80 | val = (old & ~mask) | (val & mask); |
| 81 | |
| 82 | if (val != old) |
| 83 | writel(val, base + ofs); |
| 84 | } |
| 85 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 86 | static inline void pcie_app_wr(struct intel_pcie *pcie, u32 ofs, u32 val) |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 87 | { |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 88 | writel(val, pcie->app_base + ofs); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 89 | } |
| 90 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 91 | static void pcie_app_wr_mask(struct intel_pcie *pcie, u32 ofs, |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 92 | u32 mask, u32 val) |
| 93 | { |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 94 | pcie_update_bits(pcie->app_base, ofs, mask, val); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 95 | } |
| 96 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 97 | static inline u32 pcie_rc_cfg_rd(struct intel_pcie *pcie, u32 ofs) |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 98 | { |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 99 | return dw_pcie_readl_dbi(&pcie->pci, ofs); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 100 | } |
| 101 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 102 | static inline void pcie_rc_cfg_wr(struct intel_pcie *pcie, u32 ofs, u32 val) |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 103 | { |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 104 | dw_pcie_writel_dbi(&pcie->pci, ofs, val); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 105 | } |
| 106 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 107 | static void pcie_rc_cfg_wr_mask(struct intel_pcie *pcie, u32 ofs, |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 108 | u32 mask, u32 val) |
| 109 | { |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 110 | pcie_update_bits(pcie->pci.dbi_base, ofs, mask, val); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 111 | } |
| 112 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 113 | static void intel_pcie_ltssm_enable(struct intel_pcie *pcie) |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 114 | { |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 115 | pcie_app_wr_mask(pcie, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 116 | PCIE_APP_CCR_LTSSM_ENABLE); |
| 117 | } |
| 118 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 119 | static void intel_pcie_ltssm_disable(struct intel_pcie *pcie) |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 120 | { |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 121 | pcie_app_wr_mask(pcie, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, 0); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 122 | } |
| 123 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 124 | static void intel_pcie_link_setup(struct intel_pcie *pcie) |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 125 | { |
| 126 | u32 val; |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 127 | u8 offset = dw_pcie_find_capability(&pcie->pci, PCI_CAP_ID_EXP); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 128 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 129 | val = pcie_rc_cfg_rd(pcie, offset + PCI_EXP_LNKCTL); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 130 | |
| 131 | val &= ~(PCI_EXP_LNKCTL_LD | PCI_EXP_LNKCTL_ASPMC); |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 132 | pcie_rc_cfg_wr(pcie, offset + PCI_EXP_LNKCTL, val); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 133 | } |
| 134 | |
Rob Herring | aeaa0bf | 2020-08-20 21:54:19 -0600 | [diff] [blame] | 135 | static void intel_pcie_init_n_fts(struct dw_pcie *pci) |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 136 | { |
Rob Herring | aeaa0bf | 2020-08-20 21:54:19 -0600 | [diff] [blame] | 137 | switch (pci->link_gen) { |
| 138 | case 3: |
| 139 | pci->n_fts[1] = PORT_AFR_N_FTS_GEN3; |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 140 | break; |
Rob Herring | aeaa0bf | 2020-08-20 21:54:19 -0600 | [diff] [blame] | 141 | case 4: |
| 142 | pci->n_fts[1] = PORT_AFR_N_FTS_GEN4; |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 143 | break; |
| 144 | default: |
Rob Herring | aeaa0bf | 2020-08-20 21:54:19 -0600 | [diff] [blame] | 145 | pci->n_fts[1] = PORT_AFR_N_FTS_GEN12_DFT; |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 146 | break; |
| 147 | } |
Rob Herring | aeaa0bf | 2020-08-20 21:54:19 -0600 | [diff] [blame] | 148 | pci->n_fts[0] = PORT_AFR_N_FTS_GEN12_DFT; |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 149 | } |
| 150 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 151 | static int intel_pcie_ep_rst_init(struct intel_pcie *pcie) |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 152 | { |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 153 | struct device *dev = pcie->pci.dev; |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 154 | int ret; |
| 155 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 156 | pcie->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); |
| 157 | if (IS_ERR(pcie->reset_gpio)) { |
| 158 | ret = PTR_ERR(pcie->reset_gpio); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 159 | if (ret != -EPROBE_DEFER) |
| 160 | dev_err(dev, "Failed to request PCIe GPIO: %d\n", ret); |
| 161 | return ret; |
| 162 | } |
| 163 | |
| 164 | /* Make initial reset last for 100us */ |
| 165 | usleep_range(100, 200); |
| 166 | |
| 167 | return 0; |
| 168 | } |
| 169 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 170 | static void intel_pcie_core_rst_assert(struct intel_pcie *pcie) |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 171 | { |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 172 | reset_control_assert(pcie->core_rst); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 173 | } |
| 174 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 175 | static void intel_pcie_core_rst_deassert(struct intel_pcie *pcie) |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 176 | { |
| 177 | /* |
| 178 | * One micro-second delay to make sure the reset pulse |
| 179 | * wide enough so that core reset is clean. |
| 180 | */ |
| 181 | udelay(1); |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 182 | reset_control_deassert(pcie->core_rst); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 183 | |
| 184 | /* |
| 185 | * Some SoC core reset also reset PHY, more delay needed |
| 186 | * to make sure the reset process is done. |
| 187 | */ |
| 188 | usleep_range(1000, 2000); |
| 189 | } |
| 190 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 191 | static void intel_pcie_device_rst_assert(struct intel_pcie *pcie) |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 192 | { |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 193 | gpiod_set_value_cansleep(pcie->reset_gpio, 1); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 194 | } |
| 195 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 196 | static void intel_pcie_device_rst_deassert(struct intel_pcie *pcie) |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 197 | { |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 198 | msleep(pcie->rst_intrvl); |
| 199 | gpiod_set_value_cansleep(pcie->reset_gpio, 0); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 200 | } |
| 201 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 202 | static void intel_pcie_core_irq_disable(struct intel_pcie *pcie) |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 203 | { |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 204 | pcie_app_wr(pcie, PCIE_APP_IRNEN, 0); |
| 205 | pcie_app_wr(pcie, PCIE_APP_IRNCR, PCIE_APP_IRN_INT); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 206 | } |
| 207 | |
| 208 | static int intel_pcie_get_resources(struct platform_device *pdev) |
| 209 | { |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 210 | struct intel_pcie *pcie = platform_get_drvdata(pdev); |
| 211 | struct dw_pcie *pci = &pcie->pci; |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 212 | struct device *dev = pci->dev; |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 213 | int ret; |
| 214 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 215 | pcie->core_clk = devm_clk_get(dev, NULL); |
| 216 | if (IS_ERR(pcie->core_clk)) { |
| 217 | ret = PTR_ERR(pcie->core_clk); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 218 | if (ret != -EPROBE_DEFER) |
| 219 | dev_err(dev, "Failed to get clks: %d\n", ret); |
| 220 | return ret; |
| 221 | } |
| 222 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 223 | pcie->core_rst = devm_reset_control_get(dev, NULL); |
| 224 | if (IS_ERR(pcie->core_rst)) { |
| 225 | ret = PTR_ERR(pcie->core_rst); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 226 | if (ret != -EPROBE_DEFER) |
| 227 | dev_err(dev, "Failed to get resets: %d\n", ret); |
| 228 | return ret; |
| 229 | } |
| 230 | |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 231 | ret = device_property_read_u32(dev, "reset-assert-ms", |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 232 | &pcie->rst_intrvl); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 233 | if (ret) |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 234 | pcie->rst_intrvl = RESET_INTERVAL_MS; |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 235 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 236 | pcie->app_base = devm_platform_ioremap_resource_byname(pdev, "app"); |
| 237 | if (IS_ERR(pcie->app_base)) |
| 238 | return PTR_ERR(pcie->app_base); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 239 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 240 | pcie->phy = devm_phy_get(dev, "pcie"); |
| 241 | if (IS_ERR(pcie->phy)) { |
| 242 | ret = PTR_ERR(pcie->phy); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 243 | if (ret != -EPROBE_DEFER) |
| 244 | dev_err(dev, "Couldn't get pcie-phy: %d\n", ret); |
| 245 | return ret; |
| 246 | } |
| 247 | |
| 248 | return 0; |
| 249 | } |
| 250 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 251 | static int intel_pcie_wait_l2(struct intel_pcie *pcie) |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 252 | { |
| 253 | u32 value; |
| 254 | int ret; |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 255 | struct dw_pcie *pci = &pcie->pci; |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 256 | |
Rob Herring | 39bc500 | 2020-08-20 21:54:14 -0600 | [diff] [blame] | 257 | if (pci->link_gen < 3) |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 258 | return 0; |
| 259 | |
| 260 | /* Send PME_TURN_OFF message */ |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 261 | pcie_app_wr_mask(pcie, PCIE_APP_MSG_CR, PCIE_APP_MSG_XMT_PM_TURNOFF, |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 262 | PCIE_APP_MSG_XMT_PM_TURNOFF); |
| 263 | |
| 264 | /* Read PMC status and wait for falling into L2 link state */ |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 265 | ret = readl_poll_timeout(pcie->app_base + PCIE_APP_PMC, value, |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 266 | value & PCIE_APP_PMC_IN_L2, 20, |
| 267 | jiffies_to_usecs(5 * HZ)); |
| 268 | if (ret) |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 269 | dev_err(pcie->pci.dev, "PCIe link enter L2 timeout!\n"); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 270 | |
| 271 | return ret; |
| 272 | } |
| 273 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 274 | static void intel_pcie_turn_off(struct intel_pcie *pcie) |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 275 | { |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 276 | if (dw_pcie_link_up(&pcie->pci)) |
| 277 | intel_pcie_wait_l2(pcie); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 278 | |
| 279 | /* Put endpoint device in reset state */ |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 280 | intel_pcie_device_rst_assert(pcie); |
| 281 | pcie_rc_cfg_wr_mask(pcie, PCI_COMMAND, PCI_COMMAND_MEMORY, 0); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 282 | } |
| 283 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 284 | static int intel_pcie_host_setup(struct intel_pcie *pcie) |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 285 | { |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 286 | int ret; |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 287 | struct dw_pcie *pci = &pcie->pci; |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 288 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 289 | intel_pcie_core_rst_assert(pcie); |
| 290 | intel_pcie_device_rst_assert(pcie); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 291 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 292 | ret = phy_init(pcie->phy); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 293 | if (ret) |
| 294 | return ret; |
| 295 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 296 | intel_pcie_core_rst_deassert(pcie); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 297 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 298 | ret = clk_prepare_enable(pcie->core_clk); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 299 | if (ret) { |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 300 | dev_err(pcie->pci.dev, "Core clock enable failed: %d\n", ret); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 301 | goto clk_err; |
| 302 | } |
| 303 | |
Rob Herring | 1cc9a55 | 2020-11-05 15:11:47 -0600 | [diff] [blame] | 304 | pci->atu_base = pci->dbi_base + 0xC0000; |
| 305 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 306 | intel_pcie_ltssm_disable(pcie); |
| 307 | intel_pcie_link_setup(pcie); |
Rob Herring | 1cc9a55 | 2020-11-05 15:11:47 -0600 | [diff] [blame] | 308 | intel_pcie_init_n_fts(pci); |
| 309 | dw_pcie_setup_rc(&pci->pp); |
| 310 | dw_pcie_upconfig_setup(pci); |
| 311 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 312 | intel_pcie_device_rst_deassert(pcie); |
| 313 | intel_pcie_ltssm_enable(pcie); |
Rob Herring | 1cc9a55 | 2020-11-05 15:11:47 -0600 | [diff] [blame] | 314 | |
| 315 | ret = dw_pcie_wait_for_link(pci); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 316 | if (ret) |
| 317 | goto app_init_err; |
| 318 | |
| 319 | /* Enable integrated interrupts */ |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 320 | pcie_app_wr_mask(pcie, PCIE_APP_IRNEN, PCIE_APP_IRN_INT, |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 321 | PCIE_APP_IRN_INT); |
| 322 | |
| 323 | return 0; |
| 324 | |
| 325 | app_init_err: |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 326 | clk_disable_unprepare(pcie->core_clk); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 327 | clk_err: |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 328 | intel_pcie_core_rst_assert(pcie); |
| 329 | phy_exit(pcie->phy); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 330 | |
| 331 | return ret; |
| 332 | } |
| 333 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 334 | static void __intel_pcie_remove(struct intel_pcie *pcie) |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 335 | { |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 336 | intel_pcie_core_irq_disable(pcie); |
| 337 | intel_pcie_turn_off(pcie); |
| 338 | clk_disable_unprepare(pcie->core_clk); |
| 339 | intel_pcie_core_rst_assert(pcie); |
| 340 | phy_exit(pcie->phy); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 341 | } |
| 342 | |
| 343 | static int intel_pcie_remove(struct platform_device *pdev) |
| 344 | { |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 345 | struct intel_pcie *pcie = platform_get_drvdata(pdev); |
| 346 | struct pcie_port *pp = &pcie->pci.pp; |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 347 | |
| 348 | dw_pcie_host_deinit(pp); |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 349 | __intel_pcie_remove(pcie); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 350 | |
| 351 | return 0; |
| 352 | } |
| 353 | |
| 354 | static int __maybe_unused intel_pcie_suspend_noirq(struct device *dev) |
| 355 | { |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 356 | struct intel_pcie *pcie = dev_get_drvdata(dev); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 357 | int ret; |
| 358 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 359 | intel_pcie_core_irq_disable(pcie); |
| 360 | ret = intel_pcie_wait_l2(pcie); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 361 | if (ret) |
| 362 | return ret; |
| 363 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 364 | phy_exit(pcie->phy); |
| 365 | clk_disable_unprepare(pcie->core_clk); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 366 | return ret; |
| 367 | } |
| 368 | |
| 369 | static int __maybe_unused intel_pcie_resume_noirq(struct device *dev) |
| 370 | { |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 371 | struct intel_pcie *pcie = dev_get_drvdata(dev); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 372 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 373 | return intel_pcie_host_setup(pcie); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 374 | } |
| 375 | |
| 376 | static int intel_pcie_rc_init(struct pcie_port *pp) |
| 377 | { |
| 378 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 379 | struct intel_pcie *pcie = dev_get_drvdata(pci->dev); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 380 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 381 | return intel_pcie_host_setup(pcie); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 382 | } |
| 383 | |
Jason Yan | 558c122 | 2020-04-15 16:49:53 +0800 | [diff] [blame] | 384 | static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr) |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 385 | { |
| 386 | return cpu_addr + BUS_IATU_OFFSET; |
| 387 | } |
| 388 | |
| 389 | static const struct dw_pcie_ops intel_pcie_ops = { |
| 390 | .cpu_addr_fixup = intel_pcie_cpu_addr, |
| 391 | }; |
| 392 | |
| 393 | static const struct dw_pcie_host_ops intel_pcie_dw_ops = { |
| 394 | .host_init = intel_pcie_rc_init, |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 395 | }; |
| 396 | |
| 397 | static const struct intel_pcie_soc pcie_data = { |
| 398 | .pcie_ver = 0x520A, |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 399 | }; |
| 400 | |
| 401 | static int intel_pcie_probe(struct platform_device *pdev) |
| 402 | { |
| 403 | const struct intel_pcie_soc *data; |
| 404 | struct device *dev = &pdev->dev; |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 405 | struct intel_pcie *pcie; |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 406 | struct pcie_port *pp; |
| 407 | struct dw_pcie *pci; |
| 408 | int ret; |
| 409 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 410 | pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); |
| 411 | if (!pcie) |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 412 | return -ENOMEM; |
| 413 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 414 | platform_set_drvdata(pdev, pcie); |
| 415 | pci = &pcie->pci; |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 416 | pci->dev = dev; |
| 417 | pp = &pci->pp; |
| 418 | |
| 419 | ret = intel_pcie_get_resources(pdev); |
| 420 | if (ret) |
| 421 | return ret; |
| 422 | |
Fan Fei | 733770d | 2021-12-22 19:10:41 -0600 | [diff] [blame] | 423 | ret = intel_pcie_ep_rst_init(pcie); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 424 | if (ret) |
| 425 | return ret; |
| 426 | |
| 427 | data = device_get_match_data(dev); |
| 428 | if (!data) |
| 429 | return -ENODEV; |
| 430 | |
| 431 | pci->ops = &intel_pcie_ops; |
| 432 | pci->version = data->pcie_ver; |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 433 | pp->ops = &intel_pcie_dw_ops; |
| 434 | |
| 435 | ret = dw_pcie_host_init(pp); |
| 436 | if (ret) { |
| 437 | dev_err(dev, "Cannot initialize host\n"); |
| 438 | return ret; |
| 439 | } |
| 440 | |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 441 | return 0; |
| 442 | } |
| 443 | |
| 444 | static const struct dev_pm_ops intel_pcie_pm_ops = { |
| 445 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pcie_suspend_noirq, |
| 446 | intel_pcie_resume_noirq) |
| 447 | }; |
| 448 | |
| 449 | static const struct of_device_id of_intel_pcie_match[] = { |
| 450 | { .compatible = "intel,lgm-pcie", .data = &pcie_data }, |
| 451 | {} |
| 452 | }; |
| 453 | |
| 454 | static struct platform_driver intel_pcie_driver = { |
| 455 | .probe = intel_pcie_probe, |
| 456 | .remove = intel_pcie_remove, |
| 457 | .driver = { |
| 458 | .name = "intel-gw-pcie", |
| 459 | .of_match_table = of_intel_pcie_match, |
| 460 | .pm = &intel_pcie_pm_ops, |
| 461 | }, |
| 462 | }; |
| 463 | builtin_platform_driver(intel_pcie_driver); |