blob: c1d434ba3642c34fa7100740c643d45b392f6a7b [file] [log] [blame]
Bjorn Helgaas8cfab3c2018-01-26 12:50:27 -06001// SPDX-License-Identifier: GPL-2.0
Sean Crossbb389192013-09-26 11:24:47 +08002/*
3 * PCIe host controller driver for Freescale i.MX6 SoCs
4 *
5 * Copyright (C) 2013 Kosagi
6 * http://www.kosagi.com
7 *
8 * Author: Sean Cross <xobs@kosagi.com>
Sean Crossbb389192013-09-26 11:24:47 +08009 */
10
Andrey Smirnov2d8ed462019-02-01 16:15:23 -080011#include <linux/bitfield.h>
Sean Crossbb389192013-09-26 11:24:47 +080012#include <linux/clk.h>
13#include <linux/delay.h>
14#include <linux/gpio.h>
15#include <linux/kernel.h>
16#include <linux/mfd/syscon.h>
17#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
Andrey Smirnov9b3fe672017-03-28 08:42:49 -070018#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
Sean Crossbb389192013-09-26 11:24:47 +080019#include <linux/module.h>
20#include <linux/of_gpio.h>
Andrey Smirnove6f1fef2016-05-02 14:08:21 -050021#include <linux/of_device.h>
Sean Crossbb389192013-09-26 11:24:47 +080022#include <linux/pci.h>
23#include <linux/platform_device.h>
24#include <linux/regmap.h>
Quentin Schulzc26ebe92017-06-08 10:07:42 +020025#include <linux/regulator/consumer.h>
Sean Crossbb389192013-09-26 11:24:47 +080026#include <linux/resource.h>
27#include <linux/signal.h>
28#include <linux/types.h>
Lucas Stachd1dc9742014-03-28 17:52:59 +010029#include <linux/interrupt.h>
Andrey Smirnov9b3fe672017-03-28 08:42:49 -070030#include <linux/reset.h>
Leonard Crestez3f7ccee2018-10-08 18:06:21 +000031#include <linux/pm_domain.h>
32#include <linux/pm_runtime.h>
Sean Crossbb389192013-09-26 11:24:47 +080033
34#include "pcie-designware.h"
35
Andrey Smirnov2d8ed462019-02-01 16:15:23 -080036#define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9)
37#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN BIT(10)
38#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11)
39#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8)
40#define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000
41
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +053042#define to_imx6_pcie(x) dev_get_drvdata((x)->dev)
Sean Crossbb389192013-09-26 11:24:47 +080043
Andrey Smirnove6f1fef2016-05-02 14:08:21 -050044enum imx6_pcie_variants {
45 IMX6Q,
Andrey Smirnov4d31c612016-05-02 14:09:10 -050046 IMX6SX,
47 IMX6QP,
Andrey Smirnov9b3fe672017-03-28 08:42:49 -070048 IMX7D,
Andrey Smirnov2d8ed462019-02-01 16:15:23 -080049 IMX8MQ,
Andrey Smirnove6f1fef2016-05-02 14:08:21 -050050};
51
Andrey Smirnov2f532d072019-02-01 16:15:21 -080052#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
Andrey Smirnov4c458bb2019-02-01 16:15:22 -080053#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1)
Andrey Smirnov2f532d072019-02-01 16:15:21 -080054
Andrey Smirnove8e4d4e2019-02-01 16:15:20 -080055struct imx6_pcie_drvdata {
56 enum imx6_pcie_variants variant;
Andrey Smirnov2f532d072019-02-01 16:15:21 -080057 u32 flags;
Andrey Smirnove8e4d4e2019-02-01 16:15:20 -080058};
59
Sean Crossbb389192013-09-26 11:24:47 +080060struct imx6_pcie {
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +053061 struct dw_pcie *pci;
Fabio Estevamb2d7a9c2016-03-28 18:45:36 -030062 int reset_gpio;
Petr Štetiar3ea8529a2016-04-19 19:42:07 -050063 bool gpio_active_high;
Lucas Stach57526132014-03-28 17:52:55 +010064 struct clk *pcie_bus;
65 struct clk *pcie_phy;
Christoph Fritze3c06cd2016-04-05 16:53:27 -050066 struct clk *pcie_inbound_axi;
Lucas Stach57526132014-03-28 17:52:55 +010067 struct clk *pcie;
Sean Crossbb389192013-09-26 11:24:47 +080068 struct regmap *iomuxc_gpr;
Andrey Smirnov2d8ed462019-02-01 16:15:23 -080069 u32 controller_id;
Andrey Smirnov9b3fe672017-03-28 08:42:49 -070070 struct reset_control *pciephy_reset;
71 struct reset_control *apps_reset;
Leonard Crestezf4e833b2018-07-19 17:02:10 +030072 struct reset_control *turnoff_reset;
Justin Waters28e3abe2016-01-15 10:24:35 -050073 u32 tx_deemph_gen1;
74 u32 tx_deemph_gen2_3p5db;
75 u32 tx_deemph_gen2_6db;
76 u32 tx_swing_full;
77 u32 tx_swing_low;
Tim Harveya5fcec42016-04-19 19:52:44 -050078 int link_gen;
Quentin Schulzc26ebe92017-06-08 10:07:42 +020079 struct regulator *vpcie;
Leonard Crestez3f7ccee2018-10-08 18:06:21 +000080
81 /* power domain for pcie */
82 struct device *pd_pcie;
83 /* power domain for pcie phy */
84 struct device *pd_pcie_phy;
Andrey Smirnove8e4d4e2019-02-01 16:15:20 -080085 const struct imx6_pcie_drvdata *drvdata;
Sean Crossbb389192013-09-26 11:24:47 +080086};
87
Andrey Smirnov9b3fe672017-03-28 08:42:49 -070088/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
89#define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000
90#define PHY_PLL_LOCK_WAIT_USLEEP_MIN 50
91#define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
92
Marek Vasutfa33a6d2013-12-12 22:50:02 +010093/* PCIe Root Complex registers (memory-mapped) */
Richard Zhu75cb8d22018-12-21 04:33:38 +000094#define PCIE_RC_IMX6_MSI_CAP 0x50
Marek Vasutfa33a6d2013-12-12 22:50:02 +010095#define PCIE_RC_LCR 0x7c
96#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
97#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
98#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
99
Bjorn Helgaas2393f792015-06-12 17:27:43 -0500100#define PCIE_RC_LCSR 0x80
101
Sean Crossbb389192013-09-26 11:24:47 +0800102/* PCIe Port Logic registers (memory-mapped) */
103#define PL_OFFSET 0x700
Lucas Stach3e3e4062014-07-31 20:16:05 +0200104#define PCIE_PL_PFLR (PL_OFFSET + 0x08)
105#define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16)
106#define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
Sean Crossbb389192013-09-26 11:24:47 +0800107#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
108#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
109
110#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
111#define PCIE_PHY_CTRL_DATA_LOC 0
112#define PCIE_PHY_CTRL_CAP_ADR_LOC 16
113#define PCIE_PHY_CTRL_CAP_DAT_LOC 17
114#define PCIE_PHY_CTRL_WR_LOC 18
115#define PCIE_PHY_CTRL_RD_LOC 19
116
117#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
118#define PCIE_PHY_STAT_ACK_LOC 16
119
Marek Vasutfa33a6d2013-12-12 22:50:02 +0100120#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
121#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
122
Sean Crossbb389192013-09-26 11:24:47 +0800123/* PHY registers (not memory-mapped) */
Lucas Stachf18f42d2018-07-31 12:21:49 +0200124#define PCIE_PHY_ATEOVRD 0x10
125#define PCIE_PHY_ATEOVRD_EN (0x1 << 2)
126#define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0
127#define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1
128
129#define PCIE_PHY_MPLL_OVRD_IN_LO 0x11
130#define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2
131#define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f
132#define PCIE_PHY_MPLL_MULTIPLIER_OVRD (0x1 << 9)
133
Sean Crossbb389192013-09-26 11:24:47 +0800134#define PCIE_PHY_RX_ASIC_OUT 0x100D
Fabio Estevam111feb72015-09-11 09:08:53 -0300135#define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0)
Sean Crossbb389192013-09-26 11:24:47 +0800136
137#define PHY_RX_OVRD_IN_LO 0x1005
138#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
139#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
140
Bjorn Helgaas8bad7f22016-10-11 22:09:32 -0500141static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val)
Sean Crossbb389192013-09-26 11:24:47 +0800142{
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530143 struct dw_pcie *pci = imx6_pcie->pci;
Sean Crossbb389192013-09-26 11:24:47 +0800144 u32 val;
145 u32 max_iterations = 10;
146 u32 wait_counter = 0;
147
148 do {
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530149 val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
Sean Crossbb389192013-09-26 11:24:47 +0800150 val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
151 wait_counter++;
152
153 if (val == exp_val)
154 return 0;
155
156 udelay(1);
157 } while (wait_counter < max_iterations);
158
159 return -ETIMEDOUT;
160}
161
Bjorn Helgaas8bad7f22016-10-11 22:09:32 -0500162static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
Sean Crossbb389192013-09-26 11:24:47 +0800163{
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530164 struct dw_pcie *pci = imx6_pcie->pci;
Sean Crossbb389192013-09-26 11:24:47 +0800165 u32 val;
166 int ret;
167
168 val = addr << PCIE_PHY_CTRL_DATA_LOC;
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530169 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
Sean Crossbb389192013-09-26 11:24:47 +0800170
171 val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530172 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
Sean Crossbb389192013-09-26 11:24:47 +0800173
Bjorn Helgaas8bad7f22016-10-11 22:09:32 -0500174 ret = pcie_phy_poll_ack(imx6_pcie, 1);
Sean Crossbb389192013-09-26 11:24:47 +0800175 if (ret)
176 return ret;
177
178 val = addr << PCIE_PHY_CTRL_DATA_LOC;
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530179 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
Sean Crossbb389192013-09-26 11:24:47 +0800180
Bjorn Helgaas8bad7f22016-10-11 22:09:32 -0500181 return pcie_phy_poll_ack(imx6_pcie, 0);
Sean Crossbb389192013-09-26 11:24:47 +0800182}
183
184/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
Bjorn Helgaas8bad7f22016-10-11 22:09:32 -0500185static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data)
Sean Crossbb389192013-09-26 11:24:47 +0800186{
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530187 struct dw_pcie *pci = imx6_pcie->pci;
Sean Crossbb389192013-09-26 11:24:47 +0800188 u32 val, phy_ctl;
189 int ret;
190
Bjorn Helgaas8bad7f22016-10-11 22:09:32 -0500191 ret = pcie_phy_wait_ack(imx6_pcie, addr);
Sean Crossbb389192013-09-26 11:24:47 +0800192 if (ret)
193 return ret;
194
195 /* assert Read signal */
196 phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530197 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
Sean Crossbb389192013-09-26 11:24:47 +0800198
Bjorn Helgaas8bad7f22016-10-11 22:09:32 -0500199 ret = pcie_phy_poll_ack(imx6_pcie, 1);
Sean Crossbb389192013-09-26 11:24:47 +0800200 if (ret)
201 return ret;
202
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530203 val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
Sean Crossbb389192013-09-26 11:24:47 +0800204 *data = val & 0xffff;
205
206 /* deassert Read signal */
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530207 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
Sean Crossbb389192013-09-26 11:24:47 +0800208
Bjorn Helgaas8bad7f22016-10-11 22:09:32 -0500209 return pcie_phy_poll_ack(imx6_pcie, 0);
Sean Crossbb389192013-09-26 11:24:47 +0800210}
211
Bjorn Helgaas8bad7f22016-10-11 22:09:32 -0500212static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
Sean Crossbb389192013-09-26 11:24:47 +0800213{
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530214 struct dw_pcie *pci = imx6_pcie->pci;
Sean Crossbb389192013-09-26 11:24:47 +0800215 u32 var;
216 int ret;
217
218 /* write addr */
219 /* cap addr */
Bjorn Helgaas8bad7f22016-10-11 22:09:32 -0500220 ret = pcie_phy_wait_ack(imx6_pcie, addr);
Sean Crossbb389192013-09-26 11:24:47 +0800221 if (ret)
222 return ret;
223
224 var = data << PCIE_PHY_CTRL_DATA_LOC;
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530225 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
Sean Crossbb389192013-09-26 11:24:47 +0800226
227 /* capture data */
228 var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530229 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
Sean Crossbb389192013-09-26 11:24:47 +0800230
Bjorn Helgaas8bad7f22016-10-11 22:09:32 -0500231 ret = pcie_phy_poll_ack(imx6_pcie, 1);
Sean Crossbb389192013-09-26 11:24:47 +0800232 if (ret)
233 return ret;
234
235 /* deassert cap data */
236 var = data << PCIE_PHY_CTRL_DATA_LOC;
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530237 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
Sean Crossbb389192013-09-26 11:24:47 +0800238
239 /* wait for ack de-assertion */
Bjorn Helgaas8bad7f22016-10-11 22:09:32 -0500240 ret = pcie_phy_poll_ack(imx6_pcie, 0);
Sean Crossbb389192013-09-26 11:24:47 +0800241 if (ret)
242 return ret;
243
244 /* assert wr signal */
245 var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530246 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
Sean Crossbb389192013-09-26 11:24:47 +0800247
248 /* wait for ack */
Bjorn Helgaas8bad7f22016-10-11 22:09:32 -0500249 ret = pcie_phy_poll_ack(imx6_pcie, 1);
Sean Crossbb389192013-09-26 11:24:47 +0800250 if (ret)
251 return ret;
252
253 /* deassert wr signal */
254 var = data << PCIE_PHY_CTRL_DATA_LOC;
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530255 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
Sean Crossbb389192013-09-26 11:24:47 +0800256
257 /* wait for ack de-assertion */
Bjorn Helgaas8bad7f22016-10-11 22:09:32 -0500258 ret = pcie_phy_poll_ack(imx6_pcie, 0);
Sean Crossbb389192013-09-26 11:24:47 +0800259 if (ret)
260 return ret;
261
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530262 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0);
Sean Crossbb389192013-09-26 11:24:47 +0800263
264 return 0;
265}
266
Bjorn Helgaase7d77052016-10-11 22:06:47 -0500267static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
Lucas Stach53eeb482016-01-15 19:56:47 +0100268{
269 u32 tmp;
270
Andrey Smirnov2f532d072019-02-01 16:15:21 -0800271 if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
272 return;
273
Bjorn Helgaas8bad7f22016-10-11 22:09:32 -0500274 pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
Lucas Stach53eeb482016-01-15 19:56:47 +0100275 tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
276 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
Bjorn Helgaas8bad7f22016-10-11 22:09:32 -0500277 pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
Lucas Stach53eeb482016-01-15 19:56:47 +0100278
279 usleep_range(2000, 3000);
280
Bjorn Helgaas8bad7f22016-10-11 22:09:32 -0500281 pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
Lucas Stach53eeb482016-01-15 19:56:47 +0100282 tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
283 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
Bjorn Helgaas8bad7f22016-10-11 22:09:32 -0500284 pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
Lucas Stach53eeb482016-01-15 19:56:47 +0100285}
286
Andrey Smirnov2d8ed462019-02-01 16:15:23 -0800287#ifdef CONFIG_ARM
Sean Crossbb389192013-09-26 11:24:47 +0800288/* Added for PCI abort handling */
289static int imx6q_pcie_abort_handler(unsigned long addr,
290 unsigned int fsr, struct pt_regs *regs)
291{
Lucas Stach415b6182017-05-22 17:06:30 -0500292 unsigned long pc = instruction_pointer(regs);
293 unsigned long instr = *(unsigned long *)pc;
294 int reg = (instr >> 12) & 15;
295
296 /*
297 * If the instruction being executed was a read,
298 * make it look like it read all-ones.
299 */
300 if ((instr & 0x0c100000) == 0x04100000) {
301 unsigned long val;
302
303 if (instr & 0x00400000)
304 val = 255;
305 else
306 val = -1;
307
308 regs->uregs[reg] = val;
309 regs->ARM_pc += 4;
310 return 0;
311 }
312
313 if ((instr & 0x0e100090) == 0x00100090) {
314 regs->uregs[reg] = -1;
315 regs->ARM_pc += 4;
316 return 0;
317 }
318
319 return 1;
Sean Crossbb389192013-09-26 11:24:47 +0800320}
Andrey Smirnov2d8ed462019-02-01 16:15:23 -0800321#endif
Sean Crossbb389192013-09-26 11:24:47 +0800322
Leonard Crestez3f7ccee2018-10-08 18:06:21 +0000323static int imx6_pcie_attach_pd(struct device *dev)
324{
325 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
326 struct device_link *link;
327
328 /* Do nothing when in a single power domain */
329 if (dev->pm_domain)
330 return 0;
331
332 imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
333 if (IS_ERR(imx6_pcie->pd_pcie))
334 return PTR_ERR(imx6_pcie->pd_pcie);
335 link = device_link_add(dev, imx6_pcie->pd_pcie,
336 DL_FLAG_STATELESS |
337 DL_FLAG_PM_RUNTIME |
338 DL_FLAG_RPM_ACTIVE);
339 if (!link) {
340 dev_err(dev, "Failed to add device_link to pcie pd.\n");
341 return -EINVAL;
342 }
343
344 imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
345 if (IS_ERR(imx6_pcie->pd_pcie_phy))
346 return PTR_ERR(imx6_pcie->pd_pcie_phy);
347
348 device_link_add(dev, imx6_pcie->pd_pcie_phy,
349 DL_FLAG_STATELESS |
350 DL_FLAG_PM_RUNTIME |
351 DL_FLAG_RPM_ACTIVE);
352 if (IS_ERR(link)) {
353 dev_err(dev, "Failed to add device_link to pcie_phy pd: %ld\n", PTR_ERR(link));
354 return PTR_ERR(link);
355 }
356
357 return 0;
358}
359
Bjorn Helgaas9ab021b2016-10-06 13:35:17 -0500360static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
Sean Crossbb389192013-09-26 11:24:47 +0800361{
Quentin Schulzc26ebe92017-06-08 10:07:42 +0200362 struct device *dev = imx6_pcie->pci->dev;
363
Andrey Smirnove8e4d4e2019-02-01 16:15:20 -0800364 switch (imx6_pcie->drvdata->variant) {
Andrey Smirnov9b3fe672017-03-28 08:42:49 -0700365 case IMX7D:
Andrey Smirnov2d8ed462019-02-01 16:15:23 -0800366 case IMX8MQ:
Andrey Smirnov9b3fe672017-03-28 08:42:49 -0700367 reset_control_assert(imx6_pcie->pciephy_reset);
368 reset_control_assert(imx6_pcie->apps_reset);
369 break;
Andrey Smirnove6f1fef2016-05-02 14:08:21 -0500370 case IMX6SX:
Christoph Fritze3c06cd2016-04-05 16:53:27 -0500371 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
372 IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
373 IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
374 /* Force PCIe PHY reset */
375 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
376 IMX6SX_GPR5_PCIE_BTNRST_RESET,
377 IMX6SX_GPR5_PCIE_BTNRST_RESET);
Andrey Smirnove6f1fef2016-05-02 14:08:21 -0500378 break;
Andrey Smirnov4d31c612016-05-02 14:09:10 -0500379 case IMX6QP:
380 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
381 IMX6Q_GPR1_PCIE_SW_RST,
382 IMX6Q_GPR1_PCIE_SW_RST);
383 break;
Andrey Smirnove6f1fef2016-05-02 14:08:21 -0500384 case IMX6Q:
Andrey Smirnove6f1fef2016-05-02 14:08:21 -0500385 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
386 IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
387 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
388 IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
389 break;
Christoph Fritze3c06cd2016-04-05 16:53:27 -0500390 }
Quentin Schulzc26ebe92017-06-08 10:07:42 +0200391
392 if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
393 int ret = regulator_disable(imx6_pcie->vpcie);
394
395 if (ret)
396 dev_err(dev, "failed to disable vpcie regulator: %d\n",
397 ret);
398 }
Sean Crossbb389192013-09-26 11:24:47 +0800399}
400
Andrey Smirnov2d8ed462019-02-01 16:15:23 -0800401static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
402{
403 WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ);
404 return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
405}
406
Bjorn Helgaas4d1821e2016-03-14 00:30:55 +0100407static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
408{
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530409 struct dw_pcie *pci = imx6_pcie->pci;
410 struct device *dev = pci->dev;
Andrey Smirnov2d8ed462019-02-01 16:15:23 -0800411 unsigned int offset;
Andrey Smirnove6f1fef2016-05-02 14:08:21 -0500412 int ret = 0;
Christoph Fritze3c06cd2016-04-05 16:53:27 -0500413
Andrey Smirnove8e4d4e2019-02-01 16:15:20 -0800414 switch (imx6_pcie->drvdata->variant) {
Andrey Smirnove6f1fef2016-05-02 14:08:21 -0500415 case IMX6SX:
Christoph Fritze3c06cd2016-04-05 16:53:27 -0500416 ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
417 if (ret) {
Bjorn Helgaas13957652016-10-06 13:35:18 -0500418 dev_err(dev, "unable to enable pcie_axi clock\n");
Andrey Smirnove6f1fef2016-05-02 14:08:21 -0500419 break;
Christoph Fritze3c06cd2016-04-05 16:53:27 -0500420 }
421
422 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
423 IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
Andrey Smirnove6f1fef2016-05-02 14:08:21 -0500424 break;
Fabio Estevamc27fd682018-05-09 14:01:48 -0300425 case IMX6QP: /* FALLTHROUGH */
Andrey Smirnove6f1fef2016-05-02 14:08:21 -0500426 case IMX6Q:
427 /* power up core phy and enable ref clock */
428 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
429 IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
430 /*
431 * the async reset input need ref clock to sync internally,
432 * when the ref clock comes after reset, internal synced
433 * reset time is too short, cannot meet the requirement.
434 * add one ~10us delay here.
435 */
436 udelay(10);
437 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
438 IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
439 break;
Andrey Smirnov9b3fe672017-03-28 08:42:49 -0700440 case IMX7D:
441 break;
Andrey Smirnov2d8ed462019-02-01 16:15:23 -0800442 case IMX8MQ:
443 offset = imx6_pcie_grp_offset(imx6_pcie);
444 /*
445 * Set the over ride low and enabled
446 * make sure that REF_CLK is turned on.
447 */
448 regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
449 IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
450 0);
451 regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
452 IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
453 IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
454 break;
Christoph Fritze3c06cd2016-04-05 16:53:27 -0500455 }
456
Andrey Smirnove6f1fef2016-05-02 14:08:21 -0500457 return ret;
Bjorn Helgaas4d1821e2016-03-14 00:30:55 +0100458}
459
Andrey Smirnov9b3fe672017-03-28 08:42:49 -0700460static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
461{
462 u32 val;
463 unsigned int retries;
464 struct device *dev = imx6_pcie->pci->dev;
465
466 for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) {
467 regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val);
468
469 if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED)
470 return;
471
472 usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN,
473 PHY_PLL_LOCK_WAIT_USLEEP_MAX);
474 }
475
476 dev_err(dev, "PCIe PLL lock timeout\n");
477}
478
Bjorn Helgaas9ab021b2016-10-06 13:35:17 -0500479static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
Sean Crossbb389192013-09-26 11:24:47 +0800480{
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530481 struct dw_pcie *pci = imx6_pcie->pci;
482 struct device *dev = pci->dev;
Sean Crossbb389192013-09-26 11:24:47 +0800483 int ret;
484
Quentin Schulzc26ebe92017-06-08 10:07:42 +0200485 if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
486 ret = regulator_enable(imx6_pcie->vpcie);
487 if (ret) {
488 dev_err(dev, "failed to enable vpcie regulator: %d\n",
489 ret);
490 return;
491 }
492 }
493
Lucas Stach57526132014-03-28 17:52:55 +0100494 ret = clk_prepare_enable(imx6_pcie->pcie_phy);
Sean Crossbb389192013-09-26 11:24:47 +0800495 if (ret) {
Bjorn Helgaas13957652016-10-06 13:35:18 -0500496 dev_err(dev, "unable to enable pcie_phy clock\n");
Quentin Schulzc26ebe92017-06-08 10:07:42 +0200497 goto err_pcie_phy;
Sean Crossbb389192013-09-26 11:24:47 +0800498 }
499
Lucas Stach57526132014-03-28 17:52:55 +0100500 ret = clk_prepare_enable(imx6_pcie->pcie_bus);
Sean Crossbb389192013-09-26 11:24:47 +0800501 if (ret) {
Bjorn Helgaas13957652016-10-06 13:35:18 -0500502 dev_err(dev, "unable to enable pcie_bus clock\n");
Lucas Stach57526132014-03-28 17:52:55 +0100503 goto err_pcie_bus;
Sean Crossbb389192013-09-26 11:24:47 +0800504 }
505
Lucas Stach57526132014-03-28 17:52:55 +0100506 ret = clk_prepare_enable(imx6_pcie->pcie);
Sean Crossbb389192013-09-26 11:24:47 +0800507 if (ret) {
Bjorn Helgaas13957652016-10-06 13:35:18 -0500508 dev_err(dev, "unable to enable pcie clock\n");
Lucas Stach57526132014-03-28 17:52:55 +0100509 goto err_pcie;
Sean Crossbb389192013-09-26 11:24:47 +0800510 }
511
Bjorn Helgaas4d1821e2016-03-14 00:30:55 +0100512 ret = imx6_pcie_enable_ref_clk(imx6_pcie);
513 if (ret) {
Bjorn Helgaas13957652016-10-06 13:35:18 -0500514 dev_err(dev, "unable to enable pcie ref clock\n");
Bjorn Helgaas4d1821e2016-03-14 00:30:55 +0100515 goto err_ref_clk;
516 }
Tim Harvey3fce0e82014-08-07 23:36:40 -0700517
Richard Zhua2fa6f62014-10-27 13:17:32 +0800518 /* allow the clocks to stabilize */
519 usleep_range(200, 500);
520
Richard Zhubc9ef772013-12-12 22:50:03 +0100521 /* Some boards don't have PCIe reset GPIO. */
Fabio Estevamb2d7a9c2016-03-28 18:45:36 -0300522 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
Petr Štetiar3ea8529a2016-04-19 19:42:07 -0500523 gpio_set_value_cansleep(imx6_pcie->reset_gpio,
524 imx6_pcie->gpio_active_high);
Richard Zhubc9ef772013-12-12 22:50:03 +0100525 msleep(100);
Petr Štetiar3ea8529a2016-04-19 19:42:07 -0500526 gpio_set_value_cansleep(imx6_pcie->reset_gpio,
527 !imx6_pcie->gpio_active_high);
Richard Zhubc9ef772013-12-12 22:50:03 +0100528 }
Christoph Fritze3c06cd2016-04-05 16:53:27 -0500529
Andrey Smirnove8e4d4e2019-02-01 16:15:20 -0800530 switch (imx6_pcie->drvdata->variant) {
Andrey Smirnov2d8ed462019-02-01 16:15:23 -0800531 case IMX8MQ:
532 reset_control_deassert(imx6_pcie->pciephy_reset);
533 break;
Andrey Smirnov9b3fe672017-03-28 08:42:49 -0700534 case IMX7D:
535 reset_control_deassert(imx6_pcie->pciephy_reset);
536 imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
537 break;
Andrey Smirnov4d31c612016-05-02 14:09:10 -0500538 case IMX6SX:
Christoph Fritze3c06cd2016-04-05 16:53:27 -0500539 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
540 IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
Andrey Smirnov4d31c612016-05-02 14:09:10 -0500541 break;
542 case IMX6QP:
543 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
544 IMX6Q_GPR1_PCIE_SW_RST, 0);
545
546 usleep_range(200, 500);
547 break;
548 case IMX6Q: /* Nothing to do */
549 break;
550 }
Christoph Fritze3c06cd2016-04-05 16:53:27 -0500551
Bjorn Helgaas9ab021b2016-10-06 13:35:17 -0500552 return;
Sean Crossbb389192013-09-26 11:24:47 +0800553
Bjorn Helgaas4d1821e2016-03-14 00:30:55 +0100554err_ref_clk:
555 clk_disable_unprepare(imx6_pcie->pcie);
Lucas Stach57526132014-03-28 17:52:55 +0100556err_pcie:
557 clk_disable_unprepare(imx6_pcie->pcie_bus);
558err_pcie_bus:
559 clk_disable_unprepare(imx6_pcie->pcie_phy);
Quentin Schulzc26ebe92017-06-08 10:07:42 +0200560err_pcie_phy:
561 if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
562 ret = regulator_disable(imx6_pcie->vpcie);
563 if (ret)
564 dev_err(dev, "failed to disable vpcie regulator: %d\n",
565 ret);
566 }
Sean Crossbb389192013-09-26 11:24:47 +0800567}
568
Andrey Smirnov2d8ed462019-02-01 16:15:23 -0800569static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
570{
571 unsigned int mask, val;
572
573 if (imx6_pcie->drvdata->variant == IMX8MQ &&
574 imx6_pcie->controller_id == 1) {
575 mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
576 val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
577 PCI_EXP_TYPE_ROOT_PORT);
578 } else {
579 mask = IMX6Q_GPR12_DEVICE_TYPE;
580 val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE,
581 PCI_EXP_TYPE_ROOT_PORT);
582 }
583
584 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
585}
586
Bjorn Helgaase7d77052016-10-11 22:06:47 -0500587static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
Sean Crossbb389192013-09-26 11:24:47 +0800588{
Andrey Smirnove8e4d4e2019-02-01 16:15:20 -0800589 switch (imx6_pcie->drvdata->variant) {
Andrey Smirnov2d8ed462019-02-01 16:15:23 -0800590 case IMX8MQ:
591 /*
592 * TODO: Currently this code assumes external
593 * oscillator is being used
594 */
595 regmap_update_bits(imx6_pcie->iomuxc_gpr,
596 imx6_pcie_grp_offset(imx6_pcie),
597 IMX8MQ_GPR_PCIE_REF_USE_PAD,
598 IMX8MQ_GPR_PCIE_REF_USE_PAD);
599 break;
Andrey Smirnov9b3fe672017-03-28 08:42:49 -0700600 case IMX7D:
601 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
602 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
603 break;
604 case IMX6SX:
Christoph Fritze3c06cd2016-04-05 16:53:27 -0500605 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
606 IMX6SX_GPR12_PCIE_RX_EQ_MASK,
607 IMX6SX_GPR12_PCIE_RX_EQ_2);
Andrey Smirnov9b3fe672017-03-28 08:42:49 -0700608 /* FALLTHROUGH */
609 default:
610 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
611 IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
Christoph Fritze3c06cd2016-04-05 16:53:27 -0500612
Andrey Smirnov9b3fe672017-03-28 08:42:49 -0700613 /* configure constant input signal to the pcie ctrl and phy */
614 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
615 IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
Sean Crossbb389192013-09-26 11:24:47 +0800616
Andrey Smirnov9b3fe672017-03-28 08:42:49 -0700617 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
618 IMX6Q_GPR8_TX_DEEMPH_GEN1,
619 imx6_pcie->tx_deemph_gen1 << 0);
620 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
621 IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
622 imx6_pcie->tx_deemph_gen2_3p5db << 6);
623 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
624 IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
625 imx6_pcie->tx_deemph_gen2_6db << 12);
626 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
627 IMX6Q_GPR8_TX_SWING_FULL,
628 imx6_pcie->tx_swing_full << 18);
629 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
630 IMX6Q_GPR8_TX_SWING_LOW,
631 imx6_pcie->tx_swing_low << 25);
632 break;
633 }
634
Andrey Smirnov2d8ed462019-02-01 16:15:23 -0800635 imx6_pcie_configure_type(imx6_pcie);
Sean Crossbb389192013-09-26 11:24:47 +0800636}
637
Lucas Stachf18f42d2018-07-31 12:21:49 +0200638static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
639{
640 unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
641 int mult, div;
642 u32 val;
643
Andrey Smirnov2f532d072019-02-01 16:15:21 -0800644 if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
645 return 0;
646
Lucas Stachf18f42d2018-07-31 12:21:49 +0200647 switch (phy_rate) {
648 case 125000000:
649 /*
650 * The default settings of the MPLL are for a 125MHz input
651 * clock, so no need to reconfigure anything in that case.
652 */
653 return 0;
654 case 100000000:
655 mult = 25;
656 div = 0;
657 break;
658 case 200000000:
659 mult = 25;
660 div = 1;
661 break;
662 default:
663 dev_err(imx6_pcie->pci->dev,
664 "Unsupported PHY reference clock rate %lu\n", phy_rate);
665 return -EINVAL;
666 }
667
668 pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
669 val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
670 PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
671 val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
672 val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
673 pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
674
675 pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
676 val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
677 PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
678 val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
679 val |= PCIE_PHY_ATEOVRD_EN;
680 pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
681
682 return 0;
683}
684
Bjorn Helgaase7d77052016-10-11 22:06:47 -0500685static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
Marek Vasut66a60f92013-12-12 22:50:01 +0100686{
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530687 struct dw_pcie *pci = imx6_pcie->pci;
688 struct device *dev = pci->dev;
Bjorn Helgaas13957652016-10-06 13:35:18 -0500689
Joao Pinto886bc5c2016-03-10 14:44:35 -0600690 /* check if the link is up or not */
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530691 if (!dw_pcie_wait_for_link(pci))
Joao Pinto886bc5c2016-03-10 14:44:35 -0600692 return 0;
Marek Vasut66a60f92013-12-12 22:50:01 +0100693
Bjorn Helgaas13957652016-10-06 13:35:18 -0500694 dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530695 dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
696 dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
Joao Pinto886bc5c2016-03-10 14:44:35 -0600697 return -ETIMEDOUT;
Marek Vasut66a60f92013-12-12 22:50:01 +0100698}
699
Bjorn Helgaase7d77052016-10-11 22:06:47 -0500700static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
Troy Kiskya0427462015-06-12 14:30:16 -0500701{
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530702 struct dw_pcie *pci = imx6_pcie->pci;
703 struct device *dev = pci->dev;
Bjorn Helgaas1c7fae12015-06-12 15:02:49 -0500704 u32 tmp;
Troy Kiskya0427462015-06-12 14:30:16 -0500705 unsigned int retries;
706
707 for (retries = 0; retries < 200; retries++) {
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530708 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
Troy Kiskya0427462015-06-12 14:30:16 -0500709 /* Test if the speed change finished. */
710 if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
711 return 0;
712 usleep_range(100, 1000);
713 }
714
Bjorn Helgaas13957652016-10-06 13:35:18 -0500715 dev_err(dev, "Speed change timeout\n");
Troy Kiskya0427462015-06-12 14:30:16 -0500716 return -EINVAL;
Sean Crossbb389192013-09-26 11:24:47 +0800717}
718
Leonard Crestez0ee2c1f2018-08-27 14:28:37 +0300719static void imx6_pcie_ltssm_enable(struct device *dev)
720{
721 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
722
Andrey Smirnove8e4d4e2019-02-01 16:15:20 -0800723 switch (imx6_pcie->drvdata->variant) {
Leonard Crestez0ee2c1f2018-08-27 14:28:37 +0300724 case IMX6Q:
725 case IMX6SX:
726 case IMX6QP:
727 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
728 IMX6Q_GPR12_PCIE_CTL_2,
729 IMX6Q_GPR12_PCIE_CTL_2);
730 break;
731 case IMX7D:
Andrey Smirnov2d8ed462019-02-01 16:15:23 -0800732 case IMX8MQ:
Leonard Crestez0ee2c1f2018-08-27 14:28:37 +0300733 reset_control_deassert(imx6_pcie->apps_reset);
734 break;
735 }
736}
737
Bjorn Helgaase7d77052016-10-11 22:06:47 -0500738static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
Marek Vasutfa33a6d2013-12-12 22:50:02 +0100739{
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530740 struct dw_pcie *pci = imx6_pcie->pci;
741 struct device *dev = pci->dev;
Bjorn Helgaas1c7fae12015-06-12 15:02:49 -0500742 u32 tmp;
Troy Kiskya0427462015-06-12 14:30:16 -0500743 int ret;
Marek Vasutfa33a6d2013-12-12 22:50:02 +0100744
745 /*
746 * Force Gen1 operation when starting the link. In case the link is
747 * started in Gen2 mode, there is a possibility the devices on the
748 * bus will not be detected at all. This happens with PCIe switches.
749 */
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530750 tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
Marek Vasutfa33a6d2013-12-12 22:50:02 +0100751 tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
752 tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530753 dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
Marek Vasutfa33a6d2013-12-12 22:50:02 +0100754
755 /* Start LTSSM. */
Leonard Crestez0ee2c1f2018-08-27 14:28:37 +0300756 imx6_pcie_ltssm_enable(dev);
Marek Vasutfa33a6d2013-12-12 22:50:02 +0100757
Bjorn Helgaase7d77052016-10-11 22:06:47 -0500758 ret = imx6_pcie_wait_for_link(imx6_pcie);
Fabio Estevamcaf3f562016-12-27 12:40:43 -0200759 if (ret)
Lucas Stach54a47a82016-01-25 16:49:53 -0600760 goto err_reset_phy;
Marek Vasutfa33a6d2013-12-12 22:50:02 +0100761
Tim Harveya5fcec42016-04-19 19:52:44 -0500762 if (imx6_pcie->link_gen == 2) {
763 /* Allow Gen2 mode after the link is up. */
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530764 tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
Tim Harveya5fcec42016-04-19 19:52:44 -0500765 tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
766 tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530767 dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
Marek Vasutfa33a6d2013-12-12 22:50:02 +0100768
Andrey Smirnove6dcd872017-03-28 08:42:51 -0700769 /*
Andrey Smirnov93b226f2017-03-28 08:42:52 -0700770 * Start Directed Speed Change so the best possible
771 * speed both link partners support can be negotiated.
Andrey Smirnove6dcd872017-03-28 08:42:51 -0700772 */
Andrey Smirnov93b226f2017-03-28 08:42:52 -0700773 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
774 tmp |= PORT_LOGIC_SPEED_CHANGE;
775 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
Andrey Smirnove6dcd872017-03-28 08:42:51 -0700776
Andrey Smirnov4c458bb2019-02-01 16:15:22 -0800777 if (imx6_pcie->drvdata->flags &
778 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) {
Andrey Smirnov93b226f2017-03-28 08:42:52 -0700779 /*
780 * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
781 * from i.MX6 family when no link speed transition
782 * occurs and we go Gen1 -> yep, Gen1. The difference
783 * is that, in such case, it will not be cleared by HW
784 * which will cause the following code to report false
785 * failure.
786 */
787
788 ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
789 if (ret) {
790 dev_err(dev, "Failed to bring link up!\n");
791 goto err_reset_phy;
792 }
793 }
794
795 /* Make sure link training is finished as well! */
796 ret = imx6_pcie_wait_for_link(imx6_pcie);
Andrey Smirnove6dcd872017-03-28 08:42:51 -0700797 if (ret) {
798 dev_err(dev, "Failed to bring link up!\n");
799 goto err_reset_phy;
800 }
Andrey Smirnov93b226f2017-03-28 08:42:52 -0700801 } else {
802 dev_info(dev, "Link: Gen2 disabled\n");
Marek Vasutfa33a6d2013-12-12 22:50:02 +0100803 }
804
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530805 tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR);
Bjorn Helgaas13957652016-10-06 13:35:18 -0500806 dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
Troy Kiskya0427462015-06-12 14:30:16 -0500807 return 0;
Lucas Stach54a47a82016-01-25 16:49:53 -0600808
809err_reset_phy:
Bjorn Helgaas13957652016-10-06 13:35:18 -0500810 dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530811 dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
812 dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
Bjorn Helgaas2a6a85d2016-10-11 22:18:26 -0500813 imx6_pcie_reset_phy(imx6_pcie);
Lucas Stach54a47a82016-01-25 16:49:53 -0600814 return ret;
Marek Vasutfa33a6d2013-12-12 22:50:02 +0100815}
816
Bjorn Andersson4a301762017-07-15 23:39:45 -0700817static int imx6_pcie_host_init(struct pcie_port *pp)
Sean Crossbb389192013-09-26 11:24:47 +0800818{
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530819 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
820 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
Sean Crossbb389192013-09-26 11:24:47 +0800821
Bjorn Helgaase7d77052016-10-11 22:06:47 -0500822 imx6_pcie_assert_core_reset(imx6_pcie);
823 imx6_pcie_init_phy(imx6_pcie);
824 imx6_pcie_deassert_core_reset(imx6_pcie);
Lucas Stachf18f42d2018-07-31 12:21:49 +0200825 imx6_setup_phy_mpll(imx6_pcie);
Sean Crossbb389192013-09-26 11:24:47 +0800826 dw_pcie_setup_rc(pp);
Bjorn Helgaase7d77052016-10-11 22:06:47 -0500827 imx6_pcie_establish_link(imx6_pcie);
Lucas Stachd1dc9742014-03-28 17:52:59 +0100828
829 if (IS_ENABLED(CONFIG_PCI_MSI))
830 dw_pcie_msi_init(pp);
Bjorn Andersson4a301762017-07-15 23:39:45 -0700831
832 return 0;
Sean Crossbb389192013-09-26 11:24:47 +0800833}
834
Jisheng Zhang4ab2e7c2017-06-05 16:53:46 +0800835static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
Sean Crossbb389192013-09-26 11:24:47 +0800836 .host_init = imx6_pcie_host_init,
837};
838
Andrey Smirnovbde4a5a2017-03-28 08:42:50 -0700839static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
840 struct platform_device *pdev)
Sean Crossbb389192013-09-26 11:24:47 +0800841{
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530842 struct dw_pcie *pci = imx6_pcie->pci;
843 struct pcie_port *pp = &pci->pp;
844 struct device *dev = &pdev->dev;
Sean Crossbb389192013-09-26 11:24:47 +0800845 int ret;
846
Lucas Stachd1dc9742014-03-28 17:52:59 +0100847 if (IS_ENABLED(CONFIG_PCI_MSI)) {
848 pp->msi_irq = platform_get_irq_byname(pdev, "msi");
849 if (pp->msi_irq <= 0) {
Bjorn Helgaas13957652016-10-06 13:35:18 -0500850 dev_err(dev, "failed to get MSI irq\n");
Lucas Stachd1dc9742014-03-28 17:52:59 +0100851 return -ENODEV;
852 }
Lucas Stachd1dc9742014-03-28 17:52:59 +0100853 }
854
Sean Crossbb389192013-09-26 11:24:47 +0800855 pp->ops = &imx6_pcie_host_ops;
856
Sean Crossbb389192013-09-26 11:24:47 +0800857 ret = dw_pcie_host_init(pp);
858 if (ret) {
Bjorn Helgaas13957652016-10-06 13:35:18 -0500859 dev_err(dev, "failed to initialize host\n");
Sean Crossbb389192013-09-26 11:24:47 +0800860 return ret;
861 }
862
863 return 0;
864}
865
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530866static const struct dw_pcie_ops dw_pcie_ops = {
Trent Piepho68bc10b2018-11-05 18:11:36 +0000867 /* No special ops needed, but pcie-designware still expects this struct */
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530868};
869
Leonard Crestez0ee2c1f2018-08-27 14:28:37 +0300870#ifdef CONFIG_PM_SLEEP
871static void imx6_pcie_ltssm_disable(struct device *dev)
872{
873 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
874
Andrey Smirnove8e4d4e2019-02-01 16:15:20 -0800875 switch (imx6_pcie->drvdata->variant) {
Leonard Crestez0ee2c1f2018-08-27 14:28:37 +0300876 case IMX6SX:
877 case IMX6QP:
878 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
879 IMX6Q_GPR12_PCIE_CTL_2, 0);
880 break;
881 case IMX7D:
882 reset_control_assert(imx6_pcie->apps_reset);
883 break;
884 default:
885 dev_err(dev, "ltssm_disable not supported\n");
886 }
887}
888
Leonard Crestezf4e833b2018-07-19 17:02:10 +0300889static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
890{
Leonard Crestez9e56f0d2018-11-07 13:57:03 +0000891 struct device *dev = imx6_pcie->pci->dev;
892
893 /* Some variants have a turnoff reset in DT */
894 if (imx6_pcie->turnoff_reset) {
895 reset_control_assert(imx6_pcie->turnoff_reset);
896 reset_control_deassert(imx6_pcie->turnoff_reset);
897 goto pm_turnoff_sleep;
898 }
899
900 /* Others poke directly at IOMUXC registers */
Andrey Smirnove8e4d4e2019-02-01 16:15:20 -0800901 switch (imx6_pcie->drvdata->variant) {
Leonard Crestez9e56f0d2018-11-07 13:57:03 +0000902 case IMX6SX:
903 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
904 IMX6SX_GPR12_PCIE_PM_TURN_OFF,
905 IMX6SX_GPR12_PCIE_PM_TURN_OFF);
906 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
907 IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
908 break;
909 default:
910 dev_err(dev, "PME_Turn_Off not implemented\n");
911 return;
912 }
Leonard Crestezf4e833b2018-07-19 17:02:10 +0300913
914 /*
915 * Components with an upstream port must respond to
916 * PME_Turn_Off with PME_TO_Ack but we can't check.
917 *
918 * The standard recommends a 1-10ms timeout after which to
919 * proceed anyway as if acks were received.
920 */
Leonard Crestez9e56f0d2018-11-07 13:57:03 +0000921pm_turnoff_sleep:
Leonard Crestezf4e833b2018-07-19 17:02:10 +0300922 usleep_range(1000, 10000);
923}
924
Leonard Crestez0ee2c1f2018-08-27 14:28:37 +0300925static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
926{
927 clk_disable_unprepare(imx6_pcie->pcie);
928 clk_disable_unprepare(imx6_pcie->pcie_phy);
929 clk_disable_unprepare(imx6_pcie->pcie_bus);
930
Andrey Smirnove8e4d4e2019-02-01 16:15:20 -0800931 switch (imx6_pcie->drvdata->variant) {
Leonard Crestez9e56f0d2018-11-07 13:57:03 +0000932 case IMX6SX:
933 clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
934 break;
935 case IMX7D:
Leonard Crestez0ee2c1f2018-08-27 14:28:37 +0300936 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
937 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
938 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
Leonard Crestez9e56f0d2018-11-07 13:57:03 +0000939 break;
940 default:
941 break;
Leonard Crestez0ee2c1f2018-08-27 14:28:37 +0300942 }
943}
944
Leonard Crestez9e56f0d2018-11-07 13:57:03 +0000945static inline bool imx6_pcie_supports_suspend(struct imx6_pcie *imx6_pcie)
946{
Andrey Smirnove8e4d4e2019-02-01 16:15:20 -0800947 return (imx6_pcie->drvdata->variant == IMX7D ||
948 imx6_pcie->drvdata->variant == IMX6SX);
Leonard Crestez9e56f0d2018-11-07 13:57:03 +0000949}
950
Leonard Crestez0ee2c1f2018-08-27 14:28:37 +0300951static int imx6_pcie_suspend_noirq(struct device *dev)
952{
953 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
954
Leonard Crestez9e56f0d2018-11-07 13:57:03 +0000955 if (!imx6_pcie_supports_suspend(imx6_pcie))
Leonard Crestez0ee2c1f2018-08-27 14:28:37 +0300956 return 0;
957
Leonard Crestezf4e833b2018-07-19 17:02:10 +0300958 imx6_pcie_pm_turnoff(imx6_pcie);
Leonard Crestez0ee2c1f2018-08-27 14:28:37 +0300959 imx6_pcie_clk_disable(imx6_pcie);
960 imx6_pcie_ltssm_disable(dev);
961
962 return 0;
963}
964
965static int imx6_pcie_resume_noirq(struct device *dev)
966{
967 int ret;
968 struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
969 struct pcie_port *pp = &imx6_pcie->pci->pp;
970
Leonard Crestez9e56f0d2018-11-07 13:57:03 +0000971 if (!imx6_pcie_supports_suspend(imx6_pcie))
Leonard Crestez0ee2c1f2018-08-27 14:28:37 +0300972 return 0;
973
974 imx6_pcie_assert_core_reset(imx6_pcie);
975 imx6_pcie_init_phy(imx6_pcie);
976 imx6_pcie_deassert_core_reset(imx6_pcie);
977 dw_pcie_setup_rc(pp);
978
979 ret = imx6_pcie_establish_link(imx6_pcie);
980 if (ret < 0)
981 dev_info(dev, "pcie link is down after resume.\n");
982
983 return 0;
984}
985#endif
986
987static const struct dev_pm_ops imx6_pcie_pm_ops = {
988 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
989 imx6_pcie_resume_noirq)
990};
991
Andrey Smirnovbde4a5a2017-03-28 08:42:50 -0700992static int imx6_pcie_probe(struct platform_device *pdev)
Sean Crossbb389192013-09-26 11:24:47 +0800993{
Bjorn Helgaas13957652016-10-06 13:35:18 -0500994 struct device *dev = &pdev->dev;
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530995 struct dw_pcie *pci;
Sean Crossbb389192013-09-26 11:24:47 +0800996 struct imx6_pcie *imx6_pcie;
Sean Crossbb389192013-09-26 11:24:47 +0800997 struct resource *dbi_base;
Bjorn Helgaas13957652016-10-06 13:35:18 -0500998 struct device_node *node = dev->of_node;
Sean Crossbb389192013-09-26 11:24:47 +0800999 int ret;
Richard Zhu75cb8d22018-12-21 04:33:38 +00001000 u16 val;
Sean Crossbb389192013-09-26 11:24:47 +08001001
Bjorn Helgaas13957652016-10-06 13:35:18 -05001002 imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
Sean Crossbb389192013-09-26 11:24:47 +08001003 if (!imx6_pcie)
1004 return -ENOMEM;
1005
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +05301006 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1007 if (!pci)
1008 return -ENOMEM;
1009
1010 pci->dev = dev;
1011 pci->ops = &dw_pcie_ops;
Sean Crossbb389192013-09-26 11:24:47 +08001012
Guenter Roeckc0464062017-02-25 02:08:12 -08001013 imx6_pcie->pci = pci;
Andrey Smirnove8e4d4e2019-02-01 16:15:20 -08001014 imx6_pcie->drvdata = of_device_get_match_data(dev);
Christoph Fritze3c06cd2016-04-05 16:53:27 -05001015
Sean Crossbb389192013-09-26 11:24:47 +08001016 dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +05301017 pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
1018 if (IS_ERR(pci->dbi_base))
1019 return PTR_ERR(pci->dbi_base);
Sean Crossbb389192013-09-26 11:24:47 +08001020
1021 /* Fetch GPIOs */
Bjorn Helgaasc5af4072016-10-06 13:35:18 -05001022 imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
1023 imx6_pcie->gpio_active_high = of_property_read_bool(node,
Petr Štetiar3ea8529a2016-04-19 19:42:07 -05001024 "reset-gpio-active-high");
Fabio Estevamb2d7a9c2016-03-28 18:45:36 -03001025 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
Bjorn Helgaas13957652016-10-06 13:35:18 -05001026 ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
Petr Štetiar3ea8529a2016-04-19 19:42:07 -05001027 imx6_pcie->gpio_active_high ?
1028 GPIOF_OUT_INIT_HIGH :
1029 GPIOF_OUT_INIT_LOW,
1030 "PCIe reset");
Fabio Estevamb2d7a9c2016-03-28 18:45:36 -03001031 if (ret) {
Bjorn Helgaas13957652016-10-06 13:35:18 -05001032 dev_err(dev, "unable to get reset gpio\n");
Fabio Estevamb2d7a9c2016-03-28 18:45:36 -03001033 return ret;
1034 }
Andrey Smirnovbde4a5a2017-03-28 08:42:50 -07001035 } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
1036 return imx6_pcie->reset_gpio;
Fabio Estevamb2d7a9c2016-03-28 18:45:36 -03001037 }
Sean Crossbb389192013-09-26 11:24:47 +08001038
Sean Crossbb389192013-09-26 11:24:47 +08001039 /* Fetch clocks */
Bjorn Helgaas13957652016-10-06 13:35:18 -05001040 imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
Lucas Stach57526132014-03-28 17:52:55 +01001041 if (IS_ERR(imx6_pcie->pcie_phy)) {
Bjorn Helgaas13957652016-10-06 13:35:18 -05001042 dev_err(dev, "pcie_phy clock source missing or invalid\n");
Lucas Stach57526132014-03-28 17:52:55 +01001043 return PTR_ERR(imx6_pcie->pcie_phy);
Sean Crossbb389192013-09-26 11:24:47 +08001044 }
1045
Bjorn Helgaas13957652016-10-06 13:35:18 -05001046 imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
Lucas Stach57526132014-03-28 17:52:55 +01001047 if (IS_ERR(imx6_pcie->pcie_bus)) {
Bjorn Helgaas13957652016-10-06 13:35:18 -05001048 dev_err(dev, "pcie_bus clock source missing or invalid\n");
Lucas Stach57526132014-03-28 17:52:55 +01001049 return PTR_ERR(imx6_pcie->pcie_bus);
Sean Crossbb389192013-09-26 11:24:47 +08001050 }
1051
Bjorn Helgaas13957652016-10-06 13:35:18 -05001052 imx6_pcie->pcie = devm_clk_get(dev, "pcie");
Lucas Stach57526132014-03-28 17:52:55 +01001053 if (IS_ERR(imx6_pcie->pcie)) {
Bjorn Helgaas13957652016-10-06 13:35:18 -05001054 dev_err(dev, "pcie clock source missing or invalid\n");
Lucas Stach57526132014-03-28 17:52:55 +01001055 return PTR_ERR(imx6_pcie->pcie);
Sean Crossbb389192013-09-26 11:24:47 +08001056 }
1057
Andrey Smirnove8e4d4e2019-02-01 16:15:20 -08001058 switch (imx6_pcie->drvdata->variant) {
Andrey Smirnov9b3fe672017-03-28 08:42:49 -07001059 case IMX6SX:
Bjorn Helgaas13957652016-10-06 13:35:18 -05001060 imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
Christoph Fritze3c06cd2016-04-05 16:53:27 -05001061 "pcie_inbound_axi");
1062 if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
Andrey Smirnov21b72452017-02-07 07:50:25 -08001063 dev_err(dev, "pcie_inbound_axi clock missing or invalid\n");
Christoph Fritze3c06cd2016-04-05 16:53:27 -05001064 return PTR_ERR(imx6_pcie->pcie_inbound_axi);
1065 }
Andrey Smirnov9b3fe672017-03-28 08:42:49 -07001066 break;
1067 case IMX7D:
Andrey Smirnov2d8ed462019-02-01 16:15:23 -08001068 case IMX8MQ:
1069 if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
1070 imx6_pcie->controller_id = 1;
1071
Philipp Zabel7c180582017-07-19 17:25:56 +02001072 imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
1073 "pciephy");
Andrey Smirnov9b3fe672017-03-28 08:42:49 -07001074 if (IS_ERR(imx6_pcie->pciephy_reset)) {
Colin Ian King72215472017-04-21 08:02:30 +01001075 dev_err(dev, "Failed to get PCIEPHY reset control\n");
Andrey Smirnov9b3fe672017-03-28 08:42:49 -07001076 return PTR_ERR(imx6_pcie->pciephy_reset);
1077 }
1078
Philipp Zabel7c180582017-07-19 17:25:56 +02001079 imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
1080 "apps");
Andrey Smirnov9b3fe672017-03-28 08:42:49 -07001081 if (IS_ERR(imx6_pcie->apps_reset)) {
Colin Ian King72215472017-04-21 08:02:30 +01001082 dev_err(dev, "Failed to get PCIE APPS reset control\n");
Andrey Smirnov9b3fe672017-03-28 08:42:49 -07001083 return PTR_ERR(imx6_pcie->apps_reset);
1084 }
1085 break;
1086 default:
1087 break;
Christoph Fritze3c06cd2016-04-05 16:53:27 -05001088 }
1089
Leonard Crestezf4e833b2018-07-19 17:02:10 +03001090 /* Grab turnoff reset */
1091 imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
1092 if (IS_ERR(imx6_pcie->turnoff_reset)) {
1093 dev_err(dev, "Failed to get TURNOFF reset control\n");
1094 return PTR_ERR(imx6_pcie->turnoff_reset);
1095 }
1096
Sean Crossbb389192013-09-26 11:24:47 +08001097 /* Grab GPR config register range */
1098 imx6_pcie->iomuxc_gpr =
1099 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
1100 if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
Bjorn Helgaas13957652016-10-06 13:35:18 -05001101 dev_err(dev, "unable to find iomuxc registers\n");
Fabio Estevamb391bf32013-12-02 01:39:35 -02001102 return PTR_ERR(imx6_pcie->iomuxc_gpr);
Sean Crossbb389192013-09-26 11:24:47 +08001103 }
1104
Justin Waters28e3abe2016-01-15 10:24:35 -05001105 /* Grab PCIe PHY Tx Settings */
1106 if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
1107 &imx6_pcie->tx_deemph_gen1))
1108 imx6_pcie->tx_deemph_gen1 = 0;
1109
1110 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
1111 &imx6_pcie->tx_deemph_gen2_3p5db))
1112 imx6_pcie->tx_deemph_gen2_3p5db = 0;
1113
1114 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
1115 &imx6_pcie->tx_deemph_gen2_6db))
1116 imx6_pcie->tx_deemph_gen2_6db = 20;
1117
1118 if (of_property_read_u32(node, "fsl,tx-swing-full",
1119 &imx6_pcie->tx_swing_full))
1120 imx6_pcie->tx_swing_full = 127;
1121
1122 if (of_property_read_u32(node, "fsl,tx-swing-low",
1123 &imx6_pcie->tx_swing_low))
1124 imx6_pcie->tx_swing_low = 127;
1125
Tim Harveya5fcec42016-04-19 19:52:44 -05001126 /* Limit link speed */
Bjorn Helgaasc5af4072016-10-06 13:35:18 -05001127 ret = of_property_read_u32(node, "fsl,max-link-speed",
Tim Harveya5fcec42016-04-19 19:52:44 -05001128 &imx6_pcie->link_gen);
1129 if (ret)
1130 imx6_pcie->link_gen = 1;
1131
Quentin Schulzc26ebe92017-06-08 10:07:42 +02001132 imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
1133 if (IS_ERR(imx6_pcie->vpcie)) {
1134 if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER)
1135 return -EPROBE_DEFER;
1136 imx6_pcie->vpcie = NULL;
1137 }
1138
Kishon Vijay Abraham I9bcf0a62017-02-15 18:48:11 +05301139 platform_set_drvdata(pdev, imx6_pcie);
1140
Leonard Crestez3f7ccee2018-10-08 18:06:21 +00001141 ret = imx6_pcie_attach_pd(dev);
1142 if (ret)
1143 return ret;
1144
Bjorn Helgaase7d77052016-10-11 22:06:47 -05001145 ret = imx6_add_pcie_port(imx6_pcie, pdev);
Sean Crossbb389192013-09-26 11:24:47 +08001146 if (ret < 0)
Fabio Estevamb391bf32013-12-02 01:39:35 -02001147 return ret;
Sean Crossbb389192013-09-26 11:24:47 +08001148
Richard Zhu75cb8d22018-12-21 04:33:38 +00001149 if (pci_msi_enabled()) {
1150 val = dw_pcie_readw_dbi(pci, PCIE_RC_IMX6_MSI_CAP +
1151 PCI_MSI_FLAGS);
1152 val |= PCI_MSI_FLAGS_ENABLE;
1153 dw_pcie_writew_dbi(pci, PCIE_RC_IMX6_MSI_CAP + PCI_MSI_FLAGS,
1154 val);
1155 }
1156
Sean Crossbb389192013-09-26 11:24:47 +08001157 return 0;
Sean Crossbb389192013-09-26 11:24:47 +08001158}
1159
Lucas Stach3e3e4062014-07-31 20:16:05 +02001160static void imx6_pcie_shutdown(struct platform_device *pdev)
1161{
1162 struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
1163
1164 /* bring down link, so bootloader gets clean state in case of reboot */
Bjorn Helgaase7d77052016-10-11 22:06:47 -05001165 imx6_pcie_assert_core_reset(imx6_pcie);
Lucas Stach3e3e4062014-07-31 20:16:05 +02001166}
1167
Andrey Smirnove8e4d4e2019-02-01 16:15:20 -08001168static const struct imx6_pcie_drvdata drvdata[] = {
1169 [IMX6Q] = {
1170 .variant = IMX6Q,
Andrey Smirnov4c458bb2019-02-01 16:15:22 -08001171 .flags = IMX6_PCIE_FLAG_IMX6_PHY |
1172 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
Andrey Smirnove8e4d4e2019-02-01 16:15:20 -08001173 },
1174 [IMX6SX] = {
1175 .variant = IMX6SX,
Andrey Smirnov4c458bb2019-02-01 16:15:22 -08001176 .flags = IMX6_PCIE_FLAG_IMX6_PHY |
1177 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
Andrey Smirnove8e4d4e2019-02-01 16:15:20 -08001178 },
1179 [IMX6QP] = {
1180 .variant = IMX6QP,
Andrey Smirnov4c458bb2019-02-01 16:15:22 -08001181 .flags = IMX6_PCIE_FLAG_IMX6_PHY |
1182 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
Andrey Smirnove8e4d4e2019-02-01 16:15:20 -08001183 },
1184 [IMX7D] = {
1185 .variant = IMX7D,
1186 },
Andrey Smirnov2d8ed462019-02-01 16:15:23 -08001187 [IMX8MQ] = {
1188 .variant = IMX8MQ,
1189 },
Andrey Smirnove8e4d4e2019-02-01 16:15:20 -08001190};
1191
Sean Crossbb389192013-09-26 11:24:47 +08001192static const struct of_device_id imx6_pcie_of_match[] = {
Andrey Smirnove8e4d4e2019-02-01 16:15:20 -08001193 { .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], },
1194 { .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
1195 { .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
1196 { .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], },
Andrey Smirnov2d8ed462019-02-01 16:15:23 -08001197 { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], } ,
Sean Crossbb389192013-09-26 11:24:47 +08001198 {},
1199};
Sean Crossbb389192013-09-26 11:24:47 +08001200
1201static struct platform_driver imx6_pcie_driver = {
1202 .driver = {
1203 .name = "imx6q-pcie",
Sachin Kamat8bcadbe2013-10-21 14:36:41 +05301204 .of_match_table = imx6_pcie_of_match,
Brian Norrisa5f40e82017-04-20 15:36:25 -05001205 .suppress_bind_attrs = true,
Leonard Crestez0ee2c1f2018-08-27 14:28:37 +03001206 .pm = &imx6_pcie_pm_ops,
Sean Crossbb389192013-09-26 11:24:47 +08001207 },
Andrey Smirnovbde4a5a2017-03-28 08:42:50 -07001208 .probe = imx6_pcie_probe,
Lucas Stach3e3e4062014-07-31 20:16:05 +02001209 .shutdown = imx6_pcie_shutdown,
Sean Crossbb389192013-09-26 11:24:47 +08001210};
1211
Sean Crossbb389192013-09-26 11:24:47 +08001212static int __init imx6_pcie_init(void)
1213{
Andrey Smirnov2d8ed462019-02-01 16:15:23 -08001214#ifdef CONFIG_ARM
Andrey Smirnovbde4a5a2017-03-28 08:42:50 -07001215 /*
1216 * Since probe() can be deferred we need to make sure that
1217 * hook_fault_code is not called after __init memory is freed
1218 * by kernel and since imx6q_pcie_abort_handler() is a no-op,
1219 * we can install the handler here without risking it
1220 * accessing some uninitialized driver state.
1221 */
Lucas Stach415b6182017-05-22 17:06:30 -05001222 hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
1223 "external abort on non-linefetch");
Andrey Smirnov2d8ed462019-02-01 16:15:23 -08001224#endif
Andrey Smirnovbde4a5a2017-03-28 08:42:50 -07001225
1226 return platform_driver_register(&imx6_pcie_driver);
Sean Crossbb389192013-09-26 11:24:47 +08001227}
Paul Gortmakerf90d8e82016-08-22 17:59:43 -04001228device_initcall(imx6_pcie_init);