blob: ebcdce219acb1c760be457dfcfb4ad4f4baaca0f [file] [log] [blame]
Jingoo Han340cba62013-06-21 16:24:54 +09001/*
Jingoo Han4b1ced82013-07-31 17:14:10 +09002 * Synopsys Designware PCIe host controller driver
Jingoo Han340cba62013-06-21 16:24:54 +09003 *
4 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 *
7 * Author: Jingoo Han <jg1.han@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
Joao Pinto886bc5c2016-03-10 14:44:35 -060014#include <linux/delay.h>
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +053015#include <linux/of.h>
16#include <linux/types.h>
Jingoo Han340cba62013-06-21 16:24:54 +090017
Jingoo Han4b1ced82013-07-31 17:14:10 +090018#include "pcie-designware.h"
Jingoo Han340cba62013-06-21 16:24:54 +090019
Joao Pintodac29e62016-03-10 14:44:44 -060020/* PCIe Port Logic registers */
21#define PLR_OFFSET 0x700
22#define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c)
Jisheng Zhang01c07672016-08-17 15:57:37 -050023#define PCIE_PHY_DEBUG_R1_LINK_UP (0x1 << 4)
24#define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (0x1 << 29)
Joao Pintodac29e62016-03-10 14:44:44 -060025
Kishon Vijay Abraham I19ce01cc2017-02-15 18:48:12 +053026int dw_pcie_read(void __iomem *addr, int size, u32 *val)
Jingoo Han340cba62013-06-21 16:24:54 +090027{
Gabriele Paolonib6b18f52015-10-08 14:27:53 -050028 if ((uintptr_t)addr & (size - 1)) {
29 *val = 0;
30 return PCIBIOS_BAD_REGISTER_NUMBER;
31 }
32
Kishon Vijay Abraham I314fc852017-02-15 18:48:16 +053033 if (size == 4) {
Gabriele Paolonic003ca92015-10-08 14:27:43 -050034 *val = readl(addr);
Kishon Vijay Abraham I314fc852017-02-15 18:48:16 +053035 } else if (size == 2) {
Gabriele Paoloni4c458522015-10-08 14:27:48 -050036 *val = readw(addr);
Kishon Vijay Abraham I314fc852017-02-15 18:48:16 +053037 } else if (size == 1) {
Gabriele Paoloni4c458522015-10-08 14:27:48 -050038 *val = readb(addr);
Kishon Vijay Abraham I314fc852017-02-15 18:48:16 +053039 } else {
Gabriele Paolonic003ca92015-10-08 14:27:43 -050040 *val = 0;
Jingoo Han340cba62013-06-21 16:24:54 +090041 return PCIBIOS_BAD_REGISTER_NUMBER;
Gabriele Paolonic003ca92015-10-08 14:27:43 -050042 }
Jingoo Han340cba62013-06-21 16:24:54 +090043
44 return PCIBIOS_SUCCESSFUL;
45}
46
Kishon Vijay Abraham I19ce01cc2017-02-15 18:48:12 +053047int dw_pcie_write(void __iomem *addr, int size, u32 val)
Jingoo Han340cba62013-06-21 16:24:54 +090048{
Gabriele Paolonib6b18f52015-10-08 14:27:53 -050049 if ((uintptr_t)addr & (size - 1))
50 return PCIBIOS_BAD_REGISTER_NUMBER;
51
Jingoo Han340cba62013-06-21 16:24:54 +090052 if (size == 4)
53 writel(val, addr);
54 else if (size == 2)
Gabriele Paoloni4c458522015-10-08 14:27:48 -050055 writew(val, addr);
Jingoo Han340cba62013-06-21 16:24:54 +090056 else if (size == 1)
Gabriele Paoloni4c458522015-10-08 14:27:48 -050057 writeb(val, addr);
Jingoo Han340cba62013-06-21 16:24:54 +090058 else
59 return PCIBIOS_BAD_REGISTER_NUMBER;
60
61 return PCIBIOS_SUCCESSFUL;
62}
63
Kishon Vijay Abraham Ia509d7d2017-03-13 19:13:26 +053064u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
65 size_t size)
Jingoo Han340cba62013-06-21 16:24:54 +090066{
Kishon Vijay Abraham Ia509d7d2017-03-13 19:13:26 +053067 int ret;
68 u32 val;
Bjorn Helgaas446fc232016-08-17 14:17:58 -050069
Kishon Vijay Abraham Ia509d7d2017-03-13 19:13:26 +053070 if (pci->ops->read_dbi)
71 return pci->ops->read_dbi(pci, base, reg, size);
72
73 ret = dw_pcie_read(base + reg, size, &val);
74 if (ret)
75 dev_err(pci->dev, "read DBI address failed\n");
76
77 return val;
Jingoo Han340cba62013-06-21 16:24:54 +090078}
79
Kishon Vijay Abraham Ia509d7d2017-03-13 19:13:26 +053080void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
81 size_t size, u32 val)
Jingoo Han340cba62013-06-21 16:24:54 +090082{
Kishon Vijay Abraham Ia509d7d2017-03-13 19:13:26 +053083 int ret;
84
85 if (pci->ops->write_dbi) {
86 pci->ops->write_dbi(pci, base, reg, size, val);
87 return;
88 }
89
90 ret = dw_pcie_write(base + reg, size, val);
91 if (ret)
92 dev_err(pci->dev, "write DBI address failed\n");
Jingoo Han340cba62013-06-21 16:24:54 +090093}
94
Kishon Vijay Abraham Iedd45e32017-03-13 19:13:27 +053095static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
Joao Pintoa0601a42016-08-10 11:02:39 +010096{
97 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
98
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +053099 return dw_pcie_readl_dbi(pci, offset + reg);
Joao Pintoa0601a42016-08-10 11:02:39 +0100100}
101
Kishon Vijay Abraham Iedd45e32017-03-13 19:13:27 +0530102static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
103 u32 val)
Joao Pintoa0601a42016-08-10 11:02:39 +0100104{
105 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
106
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530107 dw_pcie_writel_dbi(pci, offset + reg, val);
Joao Pintoa0601a42016-08-10 11:02:39 +0100108}
109
Carlos Palminha684a3a92017-07-17 14:13:34 +0100110static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
111 int type, u64 cpu_addr,
112 u64 pci_addr, u32 size)
Kishon Vijay Abraham Iedd45e32017-03-13 19:13:27 +0530113{
114 u32 retries, val;
115
116 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
117 lower_32_bits(cpu_addr));
118 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
119 upper_32_bits(cpu_addr));
120 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
121 lower_32_bits(cpu_addr + size - 1));
122 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
123 lower_32_bits(pci_addr));
124 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
125 upper_32_bits(pci_addr));
126 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
127 type);
128 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
129 PCIE_ATU_ENABLE);
130
131 /*
132 * Make sure ATU enable takes effect before any subsequent config
133 * and I/O accesses.
134 */
135 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
136 val = dw_pcie_readl_ob_unroll(pci, index,
137 PCIE_ATU_UNR_REGION_CTRL2);
138 if (val & PCIE_ATU_ENABLE)
139 return;
140
141 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
142 }
143 dev_err(pci->dev, "outbound iATU is not being enabled\n");
144}
145
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530146void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
147 u64 cpu_addr, u64 pci_addr, u32 size)
Jisheng Zhang63503c82015-04-30 16:22:28 +0800148{
Joao Pintod8bbeb32016-08-17 13:26:07 -0500149 u32 retries, val;
Stanimir Varbanov17209df2015-12-18 14:38:55 +0200150
Kishon Vijay Abraham Ia6600832017-03-13 19:13:22 +0530151 if (pci->ops->cpu_addr_fixup)
152 cpu_addr = pci->ops->cpu_addr_fixup(cpu_addr);
153
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530154 if (pci->iatu_unroll_enabled) {
Kishon Vijay Abraham Iedd45e32017-03-13 19:13:27 +0530155 dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr,
156 pci_addr, size);
157 return;
Joao Pintoa0601a42016-08-10 11:02:39 +0100158 }
Stanimir Varbanov17209df2015-12-18 14:38:55 +0200159
Kishon Vijay Abraham Iedd45e32017-03-13 19:13:27 +0530160 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
161 PCIE_ATU_REGION_OUTBOUND | index);
162 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
163 lower_32_bits(cpu_addr));
164 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
165 upper_32_bits(cpu_addr));
166 dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
167 lower_32_bits(cpu_addr + size - 1));
168 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
169 lower_32_bits(pci_addr));
170 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
171 upper_32_bits(pci_addr));
172 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
173 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
174
Stanimir Varbanov17209df2015-12-18 14:38:55 +0200175 /*
176 * Make sure ATU enable takes effect before any subsequent config
177 * and I/O accesses.
178 */
Joao Pintod8bbeb32016-08-17 13:26:07 -0500179 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
Kishon Vijay Abraham Iedd45e32017-03-13 19:13:27 +0530180 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
Joao Pintod8bbeb32016-08-17 13:26:07 -0500181 if (val == PCIE_ATU_ENABLE)
182 return;
183
184 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
185 }
Kishon Vijay Abraham Iedd45e32017-03-13 19:13:27 +0530186 dev_err(pci->dev, "outbound iATU is not being enabled\n");
Jisheng Zhang63503c82015-04-30 16:22:28 +0800187}
188
Kishon Vijay Abraham If8aed6e2017-03-27 15:15:05 +0530189static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
190{
191 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
192
193 return dw_pcie_readl_dbi(pci, offset + reg);
194}
195
196static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
197 u32 val)
198{
199 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
200
201 dw_pcie_writel_dbi(pci, offset + reg, val);
202}
203
Carlos Palminha684a3a92017-07-17 14:13:34 +0100204static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
205 int bar, u64 cpu_addr,
206 enum dw_pcie_as_type as_type)
Kishon Vijay Abraham If8aed6e2017-03-27 15:15:05 +0530207{
208 int type;
209 u32 retries, val;
210
211 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
212 lower_32_bits(cpu_addr));
213 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
214 upper_32_bits(cpu_addr));
215
216 switch (as_type) {
217 case DW_PCIE_AS_MEM:
218 type = PCIE_ATU_TYPE_MEM;
219 break;
220 case DW_PCIE_AS_IO:
221 type = PCIE_ATU_TYPE_IO;
222 break;
223 default:
224 return -EINVAL;
225 }
226
227 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type);
228 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
229 PCIE_ATU_ENABLE |
230 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
231
232 /*
233 * Make sure ATU enable takes effect before any subsequent config
234 * and I/O accesses.
235 */
236 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
237 val = dw_pcie_readl_ib_unroll(pci, index,
238 PCIE_ATU_UNR_REGION_CTRL2);
239 if (val & PCIE_ATU_ENABLE)
240 return 0;
241
242 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
243 }
244 dev_err(pci->dev, "inbound iATU is not being enabled\n");
245
246 return -EBUSY;
247}
248
249int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
250 u64 cpu_addr, enum dw_pcie_as_type as_type)
251{
252 int type;
253 u32 retries, val;
254
255 if (pci->iatu_unroll_enabled)
256 return dw_pcie_prog_inbound_atu_unroll(pci, index, bar,
257 cpu_addr, as_type);
258
259 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
260 index);
261 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
262 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
263
264 switch (as_type) {
265 case DW_PCIE_AS_MEM:
266 type = PCIE_ATU_TYPE_MEM;
267 break;
268 case DW_PCIE_AS_IO:
269 type = PCIE_ATU_TYPE_IO;
270 break;
271 default:
272 return -EINVAL;
273 }
274
275 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
276 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE
277 | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
278
279 /*
280 * Make sure ATU enable takes effect before any subsequent config
281 * and I/O accesses.
282 */
283 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
284 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
285 if (val & PCIE_ATU_ENABLE)
286 return 0;
287
288 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
289 }
290 dev_err(pci->dev, "inbound iATU is not being enabled\n");
291
292 return -EBUSY;
293}
294
295void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
296 enum dw_pcie_region_type type)
297{
298 int region;
299
300 switch (type) {
301 case DW_PCIE_REGION_INBOUND:
302 region = PCIE_ATU_REGION_INBOUND;
303 break;
304 case DW_PCIE_REGION_OUTBOUND:
305 region = PCIE_ATU_REGION_OUTBOUND;
306 break;
307 default:
308 return;
309 }
310
311 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
312 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~PCIE_ATU_ENABLE);
313}
314
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530315int dw_pcie_wait_for_link(struct dw_pcie *pci)
Joao Pinto886bc5c2016-03-10 14:44:35 -0600316{
317 int retries;
318
319 /* check if the link is up or not */
320 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530321 if (dw_pcie_link_up(pci)) {
322 dev_info(pci->dev, "link up\n");
Joao Pinto886bc5c2016-03-10 14:44:35 -0600323 return 0;
324 }
325 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
326 }
327
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530328 dev_err(pci->dev, "phy link never came up\n");
Joao Pinto886bc5c2016-03-10 14:44:35 -0600329
330 return -ETIMEDOUT;
331}
332
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530333int dw_pcie_link_up(struct dw_pcie *pci)
Jingoo Han340cba62013-06-21 16:24:54 +0900334{
Joao Pintodac29e62016-03-10 14:44:44 -0600335 u32 val;
336
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530337 if (pci->ops->link_up)
338 return pci->ops->link_up(pci);
Bjorn Helgaas116a4892016-01-05 15:48:11 -0600339
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530340 val = readl(pci->dbi_base + PCIE_PHY_DEBUG_R1);
Jisheng Zhang01c07672016-08-17 15:57:37 -0500341 return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) &&
342 (!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING)));
Jingoo Han340cba62013-06-21 16:24:54 +0900343}
344
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530345void dw_pcie_setup(struct dw_pcie *pci)
Jingoo Han4b1ced82013-07-31 17:14:10 +0900346{
Kishon Vijay Abraham I5f334db2017-02-15 18:48:15 +0530347 int ret;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900348 u32 val;
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530349 u32 lanes;
Kishon Vijay Abraham I5f334db2017-02-15 18:48:15 +0530350 struct device *dev = pci->dev;
351 struct device_node *np = dev->of_node;
352
353 ret = of_property_read_u32(np, "num-lanes", &lanes);
354 if (ret)
355 lanes = 0;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900356
Mohit Kumar66c5c342014-04-14 14:22:54 -0600357 /* set the number of lanes */
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530358 val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900359 val &= ~PORT_LINK_MODE_MASK;
Kishon Vijay Abraham I5f334db2017-02-15 18:48:15 +0530360 switch (lanes) {
Jingoo Han4b1ced82013-07-31 17:14:10 +0900361 case 1:
362 val |= PORT_LINK_MODE_1_LANES;
363 break;
364 case 2:
365 val |= PORT_LINK_MODE_2_LANES;
366 break;
367 case 4:
368 val |= PORT_LINK_MODE_4_LANES;
369 break;
Zhou Wang5b0f0732015-05-13 14:44:34 +0800370 case 8:
371 val |= PORT_LINK_MODE_8_LANES;
372 break;
Gabriele Paoloni907fce02015-09-29 00:03:10 +0800373 default:
Kishon Vijay Abraham I5f334db2017-02-15 18:48:15 +0530374 dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes);
Gabriele Paoloni907fce02015-09-29 00:03:10 +0800375 return;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900376 }
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530377 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900378
379 /* set link width speed control register */
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530380 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900381 val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
Kishon Vijay Abraham I5f334db2017-02-15 18:48:15 +0530382 switch (lanes) {
Jingoo Han4b1ced82013-07-31 17:14:10 +0900383 case 1:
384 val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
385 break;
386 case 2:
387 val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
388 break;
389 case 4:
390 val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
391 break;
Zhou Wang5b0f0732015-05-13 14:44:34 +0800392 case 8:
393 val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
394 break;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900395 }
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530396 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900397}