Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * PCIe host controller driver for Intel Gateway SoCs |
| 4 | * |
| 5 | * Copyright (c) 2019 Intel Corporation. |
| 6 | */ |
| 7 | |
| 8 | #include <linux/bitfield.h> |
| 9 | #include <linux/clk.h> |
| 10 | #include <linux/gpio/consumer.h> |
| 11 | #include <linux/iopoll.h> |
| 12 | #include <linux/pci_regs.h> |
| 13 | #include <linux/phy/phy.h> |
| 14 | #include <linux/platform_device.h> |
| 15 | #include <linux/reset.h> |
| 16 | |
| 17 | #include "../../pci.h" |
| 18 | #include "pcie-designware.h" |
| 19 | |
| 20 | #define PORT_AFR_N_FTS_GEN12_DFT (SZ_128 - 1) |
| 21 | #define PORT_AFR_N_FTS_GEN3 180 |
| 22 | #define PORT_AFR_N_FTS_GEN4 196 |
| 23 | |
| 24 | /* PCIe Application logic Registers */ |
| 25 | #define PCIE_APP_CCR 0x10 |
| 26 | #define PCIE_APP_CCR_LTSSM_ENABLE BIT(0) |
| 27 | |
| 28 | #define PCIE_APP_MSG_CR 0x30 |
| 29 | #define PCIE_APP_MSG_XMT_PM_TURNOFF BIT(0) |
| 30 | |
| 31 | #define PCIE_APP_PMC 0x44 |
| 32 | #define PCIE_APP_PMC_IN_L2 BIT(20) |
| 33 | |
| 34 | #define PCIE_APP_IRNEN 0xF4 |
| 35 | #define PCIE_APP_IRNCR 0xF8 |
| 36 | #define PCIE_APP_IRN_AER_REPORT BIT(0) |
| 37 | #define PCIE_APP_IRN_PME BIT(2) |
| 38 | #define PCIE_APP_IRN_RX_VDM_MSG BIT(4) |
| 39 | #define PCIE_APP_IRN_PM_TO_ACK BIT(9) |
| 40 | #define PCIE_APP_IRN_LINK_AUTO_BW_STAT BIT(11) |
| 41 | #define PCIE_APP_IRN_BW_MGT BIT(12) |
| 42 | #define PCIE_APP_IRN_MSG_LTR BIT(18) |
| 43 | #define PCIE_APP_IRN_SYS_ERR_RC BIT(29) |
| 44 | #define PCIE_APP_INTX_OFST 12 |
| 45 | |
| 46 | #define PCIE_APP_IRN_INT \ |
| 47 | (PCIE_APP_IRN_AER_REPORT | PCIE_APP_IRN_PME | \ |
| 48 | PCIE_APP_IRN_RX_VDM_MSG | PCIE_APP_IRN_SYS_ERR_RC | \ |
| 49 | PCIE_APP_IRN_PM_TO_ACK | PCIE_APP_IRN_MSG_LTR | \ |
| 50 | PCIE_APP_IRN_BW_MGT | PCIE_APP_IRN_LINK_AUTO_BW_STAT | \ |
| 51 | (PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTA) | \ |
| 52 | (PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTB) | \ |
| 53 | (PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTC) | \ |
| 54 | (PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTD)) |
| 55 | |
| 56 | #define BUS_IATU_OFFSET SZ_256M |
| 57 | #define RESET_INTERVAL_MS 100 |
| 58 | |
| 59 | struct intel_pcie_soc { |
| 60 | unsigned int pcie_ver; |
| 61 | unsigned int pcie_atu_offset; |
| 62 | u32 num_viewport; |
| 63 | }; |
| 64 | |
| 65 | struct intel_pcie_port { |
| 66 | struct dw_pcie pci; |
| 67 | void __iomem *app_base; |
| 68 | struct gpio_desc *reset_gpio; |
| 69 | u32 rst_intrvl; |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 70 | u32 max_width; |
| 71 | u32 n_fts; |
| 72 | struct clk *core_clk; |
| 73 | struct reset_control *core_rst; |
| 74 | struct phy *phy; |
| 75 | u8 pcie_cap_ofst; |
| 76 | }; |
| 77 | |
| 78 | static void pcie_update_bits(void __iomem *base, u32 ofs, u32 mask, u32 val) |
| 79 | { |
| 80 | u32 old; |
| 81 | |
| 82 | old = readl(base + ofs); |
| 83 | val = (old & ~mask) | (val & mask); |
| 84 | |
| 85 | if (val != old) |
| 86 | writel(val, base + ofs); |
| 87 | } |
| 88 | |
| 89 | static inline u32 pcie_app_rd(struct intel_pcie_port *lpp, u32 ofs) |
| 90 | { |
| 91 | return readl(lpp->app_base + ofs); |
| 92 | } |
| 93 | |
| 94 | static inline void pcie_app_wr(struct intel_pcie_port *lpp, u32 ofs, u32 val) |
| 95 | { |
| 96 | writel(val, lpp->app_base + ofs); |
| 97 | } |
| 98 | |
| 99 | static void pcie_app_wr_mask(struct intel_pcie_port *lpp, u32 ofs, |
| 100 | u32 mask, u32 val) |
| 101 | { |
| 102 | pcie_update_bits(lpp->app_base, ofs, mask, val); |
| 103 | } |
| 104 | |
| 105 | static inline u32 pcie_rc_cfg_rd(struct intel_pcie_port *lpp, u32 ofs) |
| 106 | { |
| 107 | return dw_pcie_readl_dbi(&lpp->pci, ofs); |
| 108 | } |
| 109 | |
| 110 | static inline void pcie_rc_cfg_wr(struct intel_pcie_port *lpp, u32 ofs, u32 val) |
| 111 | { |
| 112 | dw_pcie_writel_dbi(&lpp->pci, ofs, val); |
| 113 | } |
| 114 | |
| 115 | static void pcie_rc_cfg_wr_mask(struct intel_pcie_port *lpp, u32 ofs, |
| 116 | u32 mask, u32 val) |
| 117 | { |
| 118 | pcie_update_bits(lpp->pci.dbi_base, ofs, mask, val); |
| 119 | } |
| 120 | |
| 121 | static void intel_pcie_ltssm_enable(struct intel_pcie_port *lpp) |
| 122 | { |
| 123 | pcie_app_wr_mask(lpp, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, |
| 124 | PCIE_APP_CCR_LTSSM_ENABLE); |
| 125 | } |
| 126 | |
| 127 | static void intel_pcie_ltssm_disable(struct intel_pcie_port *lpp) |
| 128 | { |
| 129 | pcie_app_wr_mask(lpp, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, 0); |
| 130 | } |
| 131 | |
| 132 | static void intel_pcie_link_setup(struct intel_pcie_port *lpp) |
| 133 | { |
| 134 | u32 val; |
| 135 | u8 offset = lpp->pcie_cap_ofst; |
| 136 | |
| 137 | val = pcie_rc_cfg_rd(lpp, offset + PCI_EXP_LNKCAP); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 138 | lpp->max_width = FIELD_GET(PCI_EXP_LNKCAP_MLW, val); |
| 139 | |
| 140 | val = pcie_rc_cfg_rd(lpp, offset + PCI_EXP_LNKCTL); |
| 141 | |
| 142 | val &= ~(PCI_EXP_LNKCTL_LD | PCI_EXP_LNKCTL_ASPMC); |
| 143 | pcie_rc_cfg_wr(lpp, offset + PCI_EXP_LNKCTL, val); |
| 144 | } |
| 145 | |
| 146 | static void intel_pcie_port_logic_setup(struct intel_pcie_port *lpp) |
| 147 | { |
| 148 | u32 val, mask; |
Rob Herring | 39bc500 | 2020-08-20 21:54:14 -0600 | [diff] [blame^] | 149 | struct dw_pcie *pci = &lpp->pci; |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 150 | |
Rob Herring | 39bc500 | 2020-08-20 21:54:14 -0600 | [diff] [blame^] | 151 | switch (pcie_link_speed[pci->link_gen]) { |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 152 | case PCIE_SPEED_8_0GT: |
| 153 | lpp->n_fts = PORT_AFR_N_FTS_GEN3; |
| 154 | break; |
| 155 | case PCIE_SPEED_16_0GT: |
| 156 | lpp->n_fts = PORT_AFR_N_FTS_GEN4; |
| 157 | break; |
| 158 | default: |
| 159 | lpp->n_fts = PORT_AFR_N_FTS_GEN12_DFT; |
| 160 | break; |
| 161 | } |
| 162 | |
| 163 | mask = PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK; |
| 164 | val = FIELD_PREP(PORT_AFR_N_FTS_MASK, lpp->n_fts) | |
| 165 | FIELD_PREP(PORT_AFR_CC_N_FTS_MASK, lpp->n_fts); |
| 166 | pcie_rc_cfg_wr_mask(lpp, PCIE_PORT_AFR, mask, val); |
| 167 | |
| 168 | /* Port Link Control Register */ |
| 169 | pcie_rc_cfg_wr_mask(lpp, PCIE_PORT_LINK_CONTROL, PORT_LINK_DLL_LINK_EN, |
| 170 | PORT_LINK_DLL_LINK_EN); |
| 171 | } |
| 172 | |
| 173 | static void intel_pcie_rc_setup(struct intel_pcie_port *lpp) |
| 174 | { |
| 175 | intel_pcie_ltssm_disable(lpp); |
| 176 | intel_pcie_link_setup(lpp); |
| 177 | dw_pcie_setup_rc(&lpp->pci.pp); |
| 178 | dw_pcie_upconfig_setup(&lpp->pci); |
| 179 | intel_pcie_port_logic_setup(lpp); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 180 | dw_pcie_link_set_n_fts(&lpp->pci, lpp->n_fts); |
| 181 | } |
| 182 | |
| 183 | static int intel_pcie_ep_rst_init(struct intel_pcie_port *lpp) |
| 184 | { |
| 185 | struct device *dev = lpp->pci.dev; |
| 186 | int ret; |
| 187 | |
| 188 | lpp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); |
| 189 | if (IS_ERR(lpp->reset_gpio)) { |
| 190 | ret = PTR_ERR(lpp->reset_gpio); |
| 191 | if (ret != -EPROBE_DEFER) |
| 192 | dev_err(dev, "Failed to request PCIe GPIO: %d\n", ret); |
| 193 | return ret; |
| 194 | } |
| 195 | |
| 196 | /* Make initial reset last for 100us */ |
| 197 | usleep_range(100, 200); |
| 198 | |
| 199 | return 0; |
| 200 | } |
| 201 | |
| 202 | static void intel_pcie_core_rst_assert(struct intel_pcie_port *lpp) |
| 203 | { |
| 204 | reset_control_assert(lpp->core_rst); |
| 205 | } |
| 206 | |
| 207 | static void intel_pcie_core_rst_deassert(struct intel_pcie_port *lpp) |
| 208 | { |
| 209 | /* |
| 210 | * One micro-second delay to make sure the reset pulse |
| 211 | * wide enough so that core reset is clean. |
| 212 | */ |
| 213 | udelay(1); |
| 214 | reset_control_deassert(lpp->core_rst); |
| 215 | |
| 216 | /* |
| 217 | * Some SoC core reset also reset PHY, more delay needed |
| 218 | * to make sure the reset process is done. |
| 219 | */ |
| 220 | usleep_range(1000, 2000); |
| 221 | } |
| 222 | |
| 223 | static void intel_pcie_device_rst_assert(struct intel_pcie_port *lpp) |
| 224 | { |
| 225 | gpiod_set_value_cansleep(lpp->reset_gpio, 1); |
| 226 | } |
| 227 | |
| 228 | static void intel_pcie_device_rst_deassert(struct intel_pcie_port *lpp) |
| 229 | { |
| 230 | msleep(lpp->rst_intrvl); |
| 231 | gpiod_set_value_cansleep(lpp->reset_gpio, 0); |
| 232 | } |
| 233 | |
| 234 | static int intel_pcie_app_logic_setup(struct intel_pcie_port *lpp) |
| 235 | { |
| 236 | intel_pcie_device_rst_deassert(lpp); |
| 237 | intel_pcie_ltssm_enable(lpp); |
| 238 | |
| 239 | return dw_pcie_wait_for_link(&lpp->pci); |
| 240 | } |
| 241 | |
| 242 | static void intel_pcie_core_irq_disable(struct intel_pcie_port *lpp) |
| 243 | { |
| 244 | pcie_app_wr(lpp, PCIE_APP_IRNEN, 0); |
| 245 | pcie_app_wr(lpp, PCIE_APP_IRNCR, PCIE_APP_IRN_INT); |
| 246 | } |
| 247 | |
| 248 | static int intel_pcie_get_resources(struct platform_device *pdev) |
| 249 | { |
| 250 | struct intel_pcie_port *lpp = platform_get_drvdata(pdev); |
| 251 | struct dw_pcie *pci = &lpp->pci; |
| 252 | struct device *dev = pci->dev; |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 253 | int ret; |
| 254 | |
Dejin Zheng | 936fa5c | 2020-07-09 00:40:13 +0800 | [diff] [blame] | 255 | pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "dbi"); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 256 | if (IS_ERR(pci->dbi_base)) |
| 257 | return PTR_ERR(pci->dbi_base); |
| 258 | |
| 259 | lpp->core_clk = devm_clk_get(dev, NULL); |
| 260 | if (IS_ERR(lpp->core_clk)) { |
| 261 | ret = PTR_ERR(lpp->core_clk); |
| 262 | if (ret != -EPROBE_DEFER) |
| 263 | dev_err(dev, "Failed to get clks: %d\n", ret); |
| 264 | return ret; |
| 265 | } |
| 266 | |
| 267 | lpp->core_rst = devm_reset_control_get(dev, NULL); |
| 268 | if (IS_ERR(lpp->core_rst)) { |
| 269 | ret = PTR_ERR(lpp->core_rst); |
| 270 | if (ret != -EPROBE_DEFER) |
| 271 | dev_err(dev, "Failed to get resets: %d\n", ret); |
| 272 | return ret; |
| 273 | } |
| 274 | |
| 275 | ret = device_property_match_string(dev, "device_type", "pci"); |
| 276 | if (ret) { |
| 277 | dev_err(dev, "Failed to find pci device type: %d\n", ret); |
| 278 | return ret; |
| 279 | } |
| 280 | |
| 281 | ret = device_property_read_u32(dev, "reset-assert-ms", |
| 282 | &lpp->rst_intrvl); |
| 283 | if (ret) |
| 284 | lpp->rst_intrvl = RESET_INTERVAL_MS; |
| 285 | |
Dejin Zheng | 936fa5c | 2020-07-09 00:40:13 +0800 | [diff] [blame] | 286 | lpp->app_base = devm_platform_ioremap_resource_byname(pdev, "app"); |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 287 | if (IS_ERR(lpp->app_base)) |
| 288 | return PTR_ERR(lpp->app_base); |
| 289 | |
| 290 | lpp->phy = devm_phy_get(dev, "pcie"); |
| 291 | if (IS_ERR(lpp->phy)) { |
| 292 | ret = PTR_ERR(lpp->phy); |
| 293 | if (ret != -EPROBE_DEFER) |
| 294 | dev_err(dev, "Couldn't get pcie-phy: %d\n", ret); |
| 295 | return ret; |
| 296 | } |
| 297 | |
| 298 | return 0; |
| 299 | } |
| 300 | |
| 301 | static void intel_pcie_deinit_phy(struct intel_pcie_port *lpp) |
| 302 | { |
| 303 | phy_exit(lpp->phy); |
| 304 | } |
| 305 | |
| 306 | static int intel_pcie_wait_l2(struct intel_pcie_port *lpp) |
| 307 | { |
| 308 | u32 value; |
| 309 | int ret; |
Rob Herring | 39bc500 | 2020-08-20 21:54:14 -0600 | [diff] [blame^] | 310 | struct dw_pcie *pci = &lpp->pci; |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 311 | |
Rob Herring | 39bc500 | 2020-08-20 21:54:14 -0600 | [diff] [blame^] | 312 | if (pci->link_gen < 3) |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 313 | return 0; |
| 314 | |
| 315 | /* Send PME_TURN_OFF message */ |
| 316 | pcie_app_wr_mask(lpp, PCIE_APP_MSG_CR, PCIE_APP_MSG_XMT_PM_TURNOFF, |
| 317 | PCIE_APP_MSG_XMT_PM_TURNOFF); |
| 318 | |
| 319 | /* Read PMC status and wait for falling into L2 link state */ |
| 320 | ret = readl_poll_timeout(lpp->app_base + PCIE_APP_PMC, value, |
| 321 | value & PCIE_APP_PMC_IN_L2, 20, |
| 322 | jiffies_to_usecs(5 * HZ)); |
| 323 | if (ret) |
| 324 | dev_err(lpp->pci.dev, "PCIe link enter L2 timeout!\n"); |
| 325 | |
| 326 | return ret; |
| 327 | } |
| 328 | |
| 329 | static void intel_pcie_turn_off(struct intel_pcie_port *lpp) |
| 330 | { |
| 331 | if (dw_pcie_link_up(&lpp->pci)) |
| 332 | intel_pcie_wait_l2(lpp); |
| 333 | |
| 334 | /* Put endpoint device in reset state */ |
| 335 | intel_pcie_device_rst_assert(lpp); |
| 336 | pcie_rc_cfg_wr_mask(lpp, PCI_COMMAND, PCI_COMMAND_MEMORY, 0); |
| 337 | } |
| 338 | |
| 339 | static int intel_pcie_host_setup(struct intel_pcie_port *lpp) |
| 340 | { |
| 341 | struct device *dev = lpp->pci.dev; |
| 342 | int ret; |
| 343 | |
| 344 | intel_pcie_core_rst_assert(lpp); |
| 345 | intel_pcie_device_rst_assert(lpp); |
| 346 | |
| 347 | ret = phy_init(lpp->phy); |
| 348 | if (ret) |
| 349 | return ret; |
| 350 | |
| 351 | intel_pcie_core_rst_deassert(lpp); |
| 352 | |
| 353 | ret = clk_prepare_enable(lpp->core_clk); |
| 354 | if (ret) { |
| 355 | dev_err(lpp->pci.dev, "Core clock enable failed: %d\n", ret); |
| 356 | goto clk_err; |
| 357 | } |
| 358 | |
| 359 | if (!lpp->pcie_cap_ofst) { |
| 360 | ret = dw_pcie_find_capability(&lpp->pci, PCI_CAP_ID_EXP); |
| 361 | if (!ret) { |
| 362 | ret = -ENXIO; |
| 363 | dev_err(dev, "Invalid PCIe capability offset\n"); |
| 364 | goto app_init_err; |
| 365 | } |
| 366 | |
| 367 | lpp->pcie_cap_ofst = ret; |
| 368 | } |
| 369 | |
| 370 | intel_pcie_rc_setup(lpp); |
| 371 | ret = intel_pcie_app_logic_setup(lpp); |
| 372 | if (ret) |
| 373 | goto app_init_err; |
| 374 | |
| 375 | /* Enable integrated interrupts */ |
| 376 | pcie_app_wr_mask(lpp, PCIE_APP_IRNEN, PCIE_APP_IRN_INT, |
| 377 | PCIE_APP_IRN_INT); |
| 378 | |
| 379 | return 0; |
| 380 | |
| 381 | app_init_err: |
| 382 | clk_disable_unprepare(lpp->core_clk); |
| 383 | clk_err: |
| 384 | intel_pcie_core_rst_assert(lpp); |
| 385 | intel_pcie_deinit_phy(lpp); |
| 386 | |
| 387 | return ret; |
| 388 | } |
| 389 | |
| 390 | static void __intel_pcie_remove(struct intel_pcie_port *lpp) |
| 391 | { |
| 392 | intel_pcie_core_irq_disable(lpp); |
| 393 | intel_pcie_turn_off(lpp); |
| 394 | clk_disable_unprepare(lpp->core_clk); |
| 395 | intel_pcie_core_rst_assert(lpp); |
| 396 | intel_pcie_deinit_phy(lpp); |
| 397 | } |
| 398 | |
| 399 | static int intel_pcie_remove(struct platform_device *pdev) |
| 400 | { |
| 401 | struct intel_pcie_port *lpp = platform_get_drvdata(pdev); |
| 402 | struct pcie_port *pp = &lpp->pci.pp; |
| 403 | |
| 404 | dw_pcie_host_deinit(pp); |
| 405 | __intel_pcie_remove(lpp); |
| 406 | |
| 407 | return 0; |
| 408 | } |
| 409 | |
| 410 | static int __maybe_unused intel_pcie_suspend_noirq(struct device *dev) |
| 411 | { |
| 412 | struct intel_pcie_port *lpp = dev_get_drvdata(dev); |
| 413 | int ret; |
| 414 | |
| 415 | intel_pcie_core_irq_disable(lpp); |
| 416 | ret = intel_pcie_wait_l2(lpp); |
| 417 | if (ret) |
| 418 | return ret; |
| 419 | |
| 420 | intel_pcie_deinit_phy(lpp); |
| 421 | clk_disable_unprepare(lpp->core_clk); |
| 422 | return ret; |
| 423 | } |
| 424 | |
| 425 | static int __maybe_unused intel_pcie_resume_noirq(struct device *dev) |
| 426 | { |
| 427 | struct intel_pcie_port *lpp = dev_get_drvdata(dev); |
| 428 | |
| 429 | return intel_pcie_host_setup(lpp); |
| 430 | } |
| 431 | |
| 432 | static int intel_pcie_rc_init(struct pcie_port *pp) |
| 433 | { |
| 434 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
| 435 | struct intel_pcie_port *lpp = dev_get_drvdata(pci->dev); |
| 436 | |
| 437 | return intel_pcie_host_setup(lpp); |
| 438 | } |
| 439 | |
| 440 | /* |
| 441 | * Dummy function so that DW core doesn't configure MSI |
| 442 | */ |
| 443 | static int intel_pcie_msi_init(struct pcie_port *pp) |
| 444 | { |
| 445 | return 0; |
| 446 | } |
| 447 | |
Jason Yan | 558c122 | 2020-04-15 16:49:53 +0800 | [diff] [blame] | 448 | static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr) |
Dilip Kota | ed22aaa | 2019-12-09 11:20:05 +0800 | [diff] [blame] | 449 | { |
| 450 | return cpu_addr + BUS_IATU_OFFSET; |
| 451 | } |
| 452 | |
| 453 | static const struct dw_pcie_ops intel_pcie_ops = { |
| 454 | .cpu_addr_fixup = intel_pcie_cpu_addr, |
| 455 | }; |
| 456 | |
| 457 | static const struct dw_pcie_host_ops intel_pcie_dw_ops = { |
| 458 | .host_init = intel_pcie_rc_init, |
| 459 | .msi_host_init = intel_pcie_msi_init, |
| 460 | }; |
| 461 | |
| 462 | static const struct intel_pcie_soc pcie_data = { |
| 463 | .pcie_ver = 0x520A, |
| 464 | .pcie_atu_offset = 0xC0000, |
| 465 | .num_viewport = 3, |
| 466 | }; |
| 467 | |
| 468 | static int intel_pcie_probe(struct platform_device *pdev) |
| 469 | { |
| 470 | const struct intel_pcie_soc *data; |
| 471 | struct device *dev = &pdev->dev; |
| 472 | struct intel_pcie_port *lpp; |
| 473 | struct pcie_port *pp; |
| 474 | struct dw_pcie *pci; |
| 475 | int ret; |
| 476 | |
| 477 | lpp = devm_kzalloc(dev, sizeof(*lpp), GFP_KERNEL); |
| 478 | if (!lpp) |
| 479 | return -ENOMEM; |
| 480 | |
| 481 | platform_set_drvdata(pdev, lpp); |
| 482 | pci = &lpp->pci; |
| 483 | pci->dev = dev; |
| 484 | pp = &pci->pp; |
| 485 | |
| 486 | ret = intel_pcie_get_resources(pdev); |
| 487 | if (ret) |
| 488 | return ret; |
| 489 | |
| 490 | ret = intel_pcie_ep_rst_init(lpp); |
| 491 | if (ret) |
| 492 | return ret; |
| 493 | |
| 494 | data = device_get_match_data(dev); |
| 495 | if (!data) |
| 496 | return -ENODEV; |
| 497 | |
| 498 | pci->ops = &intel_pcie_ops; |
| 499 | pci->version = data->pcie_ver; |
| 500 | pci->atu_base = pci->dbi_base + data->pcie_atu_offset; |
| 501 | pp->ops = &intel_pcie_dw_ops; |
| 502 | |
| 503 | ret = dw_pcie_host_init(pp); |
| 504 | if (ret) { |
| 505 | dev_err(dev, "Cannot initialize host\n"); |
| 506 | return ret; |
| 507 | } |
| 508 | |
| 509 | /* |
| 510 | * Intel PCIe doesn't configure IO region, so set viewport |
| 511 | * to not perform IO region access. |
| 512 | */ |
| 513 | pci->num_viewport = data->num_viewport; |
| 514 | |
| 515 | return 0; |
| 516 | } |
| 517 | |
| 518 | static const struct dev_pm_ops intel_pcie_pm_ops = { |
| 519 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pcie_suspend_noirq, |
| 520 | intel_pcie_resume_noirq) |
| 521 | }; |
| 522 | |
| 523 | static const struct of_device_id of_intel_pcie_match[] = { |
| 524 | { .compatible = "intel,lgm-pcie", .data = &pcie_data }, |
| 525 | {} |
| 526 | }; |
| 527 | |
| 528 | static struct platform_driver intel_pcie_driver = { |
| 529 | .probe = intel_pcie_probe, |
| 530 | .remove = intel_pcie_remove, |
| 531 | .driver = { |
| 532 | .name = "intel-gw-pcie", |
| 533 | .of_match_table = of_intel_pcie_match, |
| 534 | .pm = &intel_pcie_pm_ops, |
| 535 | }, |
| 536 | }; |
| 537 | builtin_platform_driver(intel_pcie_driver); |