blob: 35aee9cf531f06fb4b42e25b6cc0ee06b2f4fae3 [file] [log] [blame]
Phil Edworthyc25da472014-05-12 11:57:48 +01001/*
2 * PCIe driver for Renesas R-Car SoCs
3 * Copyright (C) 2014 Renesas Electronics Europe Ltd
4 *
5 * Based on:
6 * arch/sh/drivers/pci/pcie-sh7786.c
7 * arch/sh/drivers/pci/ops-sh7786.c
8 * Copyright (C) 2009 - 2011 Paul Mundt
9 *
Paul Gortmaker42d10712016-07-22 16:23:21 -050010 * Author: Phil Edworthy <phil.edworthy@renesas.com>
11 *
Phil Edworthyc25da472014-05-12 11:57:48 +010012 * This file is licensed under the terms of the GNU General Public
13 * License version 2. This program is licensed "as is" without any
14 * warranty of any kind, whether express or implied.
15 */
16
17#include <linux/clk.h>
18#include <linux/delay.h>
19#include <linux/interrupt.h>
Phil Edworthy290c1fb2014-05-12 11:57:49 +010020#include <linux/irq.h>
21#include <linux/irqdomain.h>
Phil Edworthyc25da472014-05-12 11:57:48 +010022#include <linux/kernel.h>
Paul Gortmaker42d10712016-07-22 16:23:21 -050023#include <linux/init.h>
Phil Edworthy290c1fb2014-05-12 11:57:49 +010024#include <linux/msi.h>
Phil Edworthyc25da472014-05-12 11:57:48 +010025#include <linux/of_address.h>
26#include <linux/of_irq.h>
27#include <linux/of_pci.h>
28#include <linux/of_platform.h>
29#include <linux/pci.h>
30#include <linux/platform_device.h>
Phil Edworthyde1be9a2016-01-05 13:00:30 +000031#include <linux/pm_runtime.h>
Phil Edworthyc25da472014-05-12 11:57:48 +010032#include <linux/slab.h>
33
34#define DRV_NAME "rcar-pcie"
35
36#define PCIECAR 0x000010
37#define PCIECCTLR 0x000018
38#define CONFIG_SEND_ENABLE (1 << 31)
39#define TYPE0 (0 << 8)
40#define TYPE1 (1 << 8)
41#define PCIECDR 0x000020
42#define PCIEMSR 0x000028
43#define PCIEINTXR 0x000400
Phil Edworthy290c1fb2014-05-12 11:57:49 +010044#define PCIEMSITXR 0x000840
Phil Edworthyc25da472014-05-12 11:57:48 +010045
46/* Transfer control */
47#define PCIETCTLR 0x02000
48#define CFINIT 1
49#define PCIETSTR 0x02004
50#define DATA_LINK_ACTIVE 1
51#define PCIEERRFR 0x02020
52#define UNSUPPORTED_REQUEST (1 << 4)
Phil Edworthy290c1fb2014-05-12 11:57:49 +010053#define PCIEMSIFR 0x02044
54#define PCIEMSIALR 0x02048
55#define MSIFE 1
56#define PCIEMSIAUR 0x0204c
57#define PCIEMSIIER 0x02050
Phil Edworthyc25da472014-05-12 11:57:48 +010058
59/* root port address */
60#define PCIEPRAR(x) (0x02080 + ((x) * 0x4))
61
62/* local address reg & mask */
63#define PCIELAR(x) (0x02200 + ((x) * 0x20))
64#define PCIELAMR(x) (0x02208 + ((x) * 0x20))
65#define LAM_PREFETCH (1 << 3)
66#define LAM_64BIT (1 << 2)
67#define LAR_ENABLE (1 << 1)
68
69/* PCIe address reg & mask */
Nobuhiro Iwamatsuecd06302015-02-04 18:02:55 +090070#define PCIEPALR(x) (0x03400 + ((x) * 0x20))
71#define PCIEPAUR(x) (0x03404 + ((x) * 0x20))
Phil Edworthyc25da472014-05-12 11:57:48 +010072#define PCIEPAMR(x) (0x03408 + ((x) * 0x20))
73#define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20))
74#define PAR_ENABLE (1 << 31)
75#define IO_SPACE (1 << 8)
76
77/* Configuration */
78#define PCICONF(x) (0x010000 + ((x) * 0x4))
79#define PMCAP(x) (0x010040 + ((x) * 0x4))
80#define EXPCAP(x) (0x010070 + ((x) * 0x4))
81#define VCCAP(x) (0x010100 + ((x) * 0x4))
82
83/* link layer */
84#define IDSETR1 0x011004
85#define TLCTLR 0x011048
86#define MACSR 0x011054
87#define MACCTLR 0x011058
88#define SCRAMBLE_DISABLE (1 << 27)
89
90/* R-Car H1 PHY */
91#define H1_PCIEPHYADRR 0x04000c
92#define WRITE_CMD (1 << 16)
93#define PHY_ACK (1 << 24)
94#define RATE_POS 12
95#define LANE_POS 8
96#define ADR_POS 0
97#define H1_PCIEPHYDOUTR 0x040014
98#define H1_PCIEPHYSR 0x040018
99
Phil Edworthy581d9432016-01-05 13:00:31 +0000100/* R-Car Gen2 PHY */
101#define GEN2_PCIEPHYADDR 0x780
102#define GEN2_PCIEPHYDATA 0x784
103#define GEN2_PCIEPHYCTRL 0x78c
104
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100105#define INT_PCI_MSI_NR 32
106
Phil Edworthyc25da472014-05-12 11:57:48 +0100107#define RCONF(x) (PCICONF(0)+(x))
108#define RPMCAP(x) (PMCAP(0)+(x))
109#define REXPCAP(x) (EXPCAP(0)+(x))
110#define RVCCAP(x) (VCCAP(0)+(x))
111
112#define PCIE_CONF_BUS(b) (((b) & 0xff) << 24)
113#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19)
114#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16)
115
Phil Edworthyb77188492014-06-30 08:54:23 +0100116#define RCAR_PCI_MAX_RESOURCES 4
Phil Edworthyc25da472014-05-12 11:57:48 +0100117#define MAX_NR_INBOUND_MAPS 6
118
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100119struct rcar_msi {
120 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
121 struct irq_domain *domain;
Yijing Wangc2791b82014-11-11 17:45:45 -0700122 struct msi_controller chip;
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100123 unsigned long pages;
124 struct mutex lock;
125 int irq1;
126 int irq2;
127};
128
Yijing Wangc2791b82014-11-11 17:45:45 -0700129static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100130{
131 return container_of(chip, struct rcar_msi, chip);
132}
133
Phil Edworthyc25da472014-05-12 11:57:48 +0100134/* Structure representing the PCIe interface */
135struct rcar_pcie {
136 struct device *dev;
137 void __iomem *base;
Phil Edworthy5d2917d2015-11-25 15:30:37 +0000138 struct list_head resources;
Phil Edworthyc25da472014-05-12 11:57:48 +0100139 int root_bus_nr;
140 struct clk *clk;
141 struct clk *bus_clk;
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100142 struct rcar_msi msi;
Phil Edworthyc25da472014-05-12 11:57:48 +0100143};
144
Phil Edworthyb77188492014-06-30 08:54:23 +0100145static void rcar_pci_write_reg(struct rcar_pcie *pcie, unsigned long val,
146 unsigned long reg)
Phil Edworthyc25da472014-05-12 11:57:48 +0100147{
148 writel(val, pcie->base + reg);
149}
150
Phil Edworthyb77188492014-06-30 08:54:23 +0100151static unsigned long rcar_pci_read_reg(struct rcar_pcie *pcie,
152 unsigned long reg)
Phil Edworthyc25da472014-05-12 11:57:48 +0100153{
154 return readl(pcie->base + reg);
155}
156
157enum {
Phil Edworthyb77188492014-06-30 08:54:23 +0100158 RCAR_PCI_ACCESS_READ,
159 RCAR_PCI_ACCESS_WRITE,
Phil Edworthyc25da472014-05-12 11:57:48 +0100160};
161
162static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data)
163{
164 int shift = 8 * (where & 3);
Phil Edworthyb77188492014-06-30 08:54:23 +0100165 u32 val = rcar_pci_read_reg(pcie, where & ~3);
Phil Edworthyc25da472014-05-12 11:57:48 +0100166
167 val &= ~(mask << shift);
168 val |= data << shift;
Phil Edworthyb77188492014-06-30 08:54:23 +0100169 rcar_pci_write_reg(pcie, val, where & ~3);
Phil Edworthyc25da472014-05-12 11:57:48 +0100170}
171
172static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
173{
174 int shift = 8 * (where & 3);
Phil Edworthyb77188492014-06-30 08:54:23 +0100175 u32 val = rcar_pci_read_reg(pcie, where & ~3);
Phil Edworthyc25da472014-05-12 11:57:48 +0100176
177 return val >> shift;
178}
179
180/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
181static int rcar_pcie_config_access(struct rcar_pcie *pcie,
182 unsigned char access_type, struct pci_bus *bus,
183 unsigned int devfn, int where, u32 *data)
184{
185 int dev, func, reg, index;
186
187 dev = PCI_SLOT(devfn);
188 func = PCI_FUNC(devfn);
189 reg = where & ~3;
190 index = reg / 4;
191
192 /*
193 * While each channel has its own memory-mapped extended config
194 * space, it's generally only accessible when in endpoint mode.
195 * When in root complex mode, the controller is unable to target
196 * itself with either type 0 or type 1 accesses, and indeed, any
197 * controller initiated target transfer to its own config space
198 * result in a completer abort.
199 *
200 * Each channel effectively only supports a single device, but as
201 * the same channel <-> device access works for any PCI_SLOT()
202 * value, we cheat a bit here and bind the controller's config
203 * space to devfn 0 in order to enable self-enumeration. In this
204 * case the regular ECAR/ECDR path is sidelined and the mangled
205 * config access itself is initiated as an internal bus transaction.
206 */
207 if (pci_is_root_bus(bus)) {
208 if (dev != 0)
209 return PCIBIOS_DEVICE_NOT_FOUND;
210
Phil Edworthyb77188492014-06-30 08:54:23 +0100211 if (access_type == RCAR_PCI_ACCESS_READ) {
212 *data = rcar_pci_read_reg(pcie, PCICONF(index));
Phil Edworthyc25da472014-05-12 11:57:48 +0100213 } else {
214 /* Keep an eye out for changes to the root bus number */
215 if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS))
216 pcie->root_bus_nr = *data & 0xff;
217
Phil Edworthyb77188492014-06-30 08:54:23 +0100218 rcar_pci_write_reg(pcie, *data, PCICONF(index));
Phil Edworthyc25da472014-05-12 11:57:48 +0100219 }
220
221 return PCIBIOS_SUCCESSFUL;
222 }
223
224 if (pcie->root_bus_nr < 0)
225 return PCIBIOS_DEVICE_NOT_FOUND;
226
227 /* Clear errors */
Phil Edworthyb77188492014-06-30 08:54:23 +0100228 rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
Phil Edworthyc25da472014-05-12 11:57:48 +0100229
230 /* Set the PIO address */
Phil Edworthyb77188492014-06-30 08:54:23 +0100231 rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
232 PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
Phil Edworthyc25da472014-05-12 11:57:48 +0100233
234 /* Enable the configuration access */
235 if (bus->parent->number == pcie->root_bus_nr)
Phil Edworthyb77188492014-06-30 08:54:23 +0100236 rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
Phil Edworthyc25da472014-05-12 11:57:48 +0100237 else
Phil Edworthyb77188492014-06-30 08:54:23 +0100238 rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
Phil Edworthyc25da472014-05-12 11:57:48 +0100239
240 /* Check for errors */
Phil Edworthyb77188492014-06-30 08:54:23 +0100241 if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
Phil Edworthyc25da472014-05-12 11:57:48 +0100242 return PCIBIOS_DEVICE_NOT_FOUND;
243
244 /* Check for master and target aborts */
245 if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
246 (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
247 return PCIBIOS_DEVICE_NOT_FOUND;
248
Phil Edworthyb77188492014-06-30 08:54:23 +0100249 if (access_type == RCAR_PCI_ACCESS_READ)
250 *data = rcar_pci_read_reg(pcie, PCIECDR);
Phil Edworthyc25da472014-05-12 11:57:48 +0100251 else
Phil Edworthyb77188492014-06-30 08:54:23 +0100252 rcar_pci_write_reg(pcie, *data, PCIECDR);
Phil Edworthyc25da472014-05-12 11:57:48 +0100253
254 /* Disable the configuration access */
Phil Edworthyb77188492014-06-30 08:54:23 +0100255 rcar_pci_write_reg(pcie, 0, PCIECCTLR);
Phil Edworthyc25da472014-05-12 11:57:48 +0100256
257 return PCIBIOS_SUCCESSFUL;
258}
259
260static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
261 int where, int size, u32 *val)
262{
Phil Edworthy79953dd2015-10-02 11:25:05 +0100263 struct rcar_pcie *pcie = bus->sysdata;
Phil Edworthyc25da472014-05-12 11:57:48 +0100264 int ret;
265
Phil Edworthyb77188492014-06-30 08:54:23 +0100266 ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
Phil Edworthyc25da472014-05-12 11:57:48 +0100267 bus, devfn, where, val);
268 if (ret != PCIBIOS_SUCCESSFUL) {
269 *val = 0xffffffff;
270 return ret;
271 }
272
273 if (size == 1)
274 *val = (*val >> (8 * (where & 3))) & 0xff;
275 else if (size == 2)
276 *val = (*val >> (8 * (where & 2))) & 0xffff;
277
Ryan Desfosses227f0642014-04-18 20:13:50 -0400278 dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n",
279 bus->number, devfn, where, size, (unsigned long)*val);
Phil Edworthyc25da472014-05-12 11:57:48 +0100280
281 return ret;
282}
283
284/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
285static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
286 int where, int size, u32 val)
287{
Phil Edworthy79953dd2015-10-02 11:25:05 +0100288 struct rcar_pcie *pcie = bus->sysdata;
Phil Edworthyc25da472014-05-12 11:57:48 +0100289 int shift, ret;
290 u32 data;
291
Phil Edworthyb77188492014-06-30 08:54:23 +0100292 ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
Phil Edworthyc25da472014-05-12 11:57:48 +0100293 bus, devfn, where, &data);
294 if (ret != PCIBIOS_SUCCESSFUL)
295 return ret;
296
Ryan Desfosses227f0642014-04-18 20:13:50 -0400297 dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n",
298 bus->number, devfn, where, size, (unsigned long)val);
Phil Edworthyc25da472014-05-12 11:57:48 +0100299
300 if (size == 1) {
301 shift = 8 * (where & 3);
302 data &= ~(0xff << shift);
303 data |= ((val & 0xff) << shift);
304 } else if (size == 2) {
305 shift = 8 * (where & 2);
306 data &= ~(0xffff << shift);
307 data |= ((val & 0xffff) << shift);
308 } else
309 data = val;
310
Phil Edworthyb77188492014-06-30 08:54:23 +0100311 ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_WRITE,
Phil Edworthyc25da472014-05-12 11:57:48 +0100312 bus, devfn, where, &data);
313
314 return ret;
315}
316
317static struct pci_ops rcar_pcie_ops = {
318 .read = rcar_pcie_read_conf,
319 .write = rcar_pcie_write_conf,
320};
321
Phil Edworthy5d2917d2015-11-25 15:30:37 +0000322static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie,
323 struct resource *res)
Phil Edworthyc25da472014-05-12 11:57:48 +0100324{
325 /* Setup PCIe address space mappings for each resource */
326 resource_size_t size;
Liviu Dudau0b0b0892014-09-29 15:29:25 +0100327 resource_size_t res_start;
Phil Edworthyc25da472014-05-12 11:57:48 +0100328 u32 mask;
329
Phil Edworthyb77188492014-06-30 08:54:23 +0100330 rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
Phil Edworthyc25da472014-05-12 11:57:48 +0100331
332 /*
333 * The PAMR mask is calculated in units of 128Bytes, which
334 * keeps things pretty simple.
335 */
336 size = resource_size(res);
337 mask = (roundup_pow_of_two(size) / SZ_128) - 1;
Phil Edworthyb77188492014-06-30 08:54:23 +0100338 rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
Phil Edworthyc25da472014-05-12 11:57:48 +0100339
Liviu Dudau0b0b0892014-09-29 15:29:25 +0100340 if (res->flags & IORESOURCE_IO)
341 res_start = pci_pio_to_address(res->start);
342 else
343 res_start = res->start;
344
Nobuhiro Iwamatsuecd06302015-02-04 18:02:55 +0900345 rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win));
Nobuhiro Iwamatsu2ea2a272015-02-02 14:09:58 +0900346 rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F,
Nobuhiro Iwamatsuecd06302015-02-04 18:02:55 +0900347 PCIEPALR(win));
Phil Edworthyc25da472014-05-12 11:57:48 +0100348
349 /* First resource is for IO */
350 mask = PAR_ENABLE;
351 if (res->flags & IORESOURCE_IO)
352 mask |= IO_SPACE;
353
Phil Edworthyb77188492014-06-30 08:54:23 +0100354 rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win));
Phil Edworthyc25da472014-05-12 11:57:48 +0100355}
356
Phil Edworthy5d2917d2015-11-25 15:30:37 +0000357static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci)
Phil Edworthyc25da472014-05-12 11:57:48 +0100358{
Phil Edworthy5d2917d2015-11-25 15:30:37 +0000359 struct resource_entry *win;
360 int i = 0;
Phil Edworthyc25da472014-05-12 11:57:48 +0100361
362 /* Setup PCI resources */
Phil Edworthy5d2917d2015-11-25 15:30:37 +0000363 resource_list_for_each_entry(win, &pci->resources) {
364 struct resource *res = win->res;
Phil Edworthyc25da472014-05-12 11:57:48 +0100365
Phil Edworthyc25da472014-05-12 11:57:48 +0100366 if (!res->flags)
367 continue;
368
Phil Edworthy5d2917d2015-11-25 15:30:37 +0000369 switch (resource_type(res)) {
370 case IORESOURCE_IO:
371 case IORESOURCE_MEM:
372 rcar_pcie_setup_window(i, pci, res);
373 i++;
374 break;
375 case IORESOURCE_BUS:
376 pci->root_bus_nr = res->start;
377 break;
378 default:
379 continue;
Phil Edworthyd0c3f4d2015-10-02 11:25:04 +0100380 }
381
Phil Edworthy79953dd2015-10-02 11:25:05 +0100382 pci_add_resource(resource, res);
Phil Edworthyc25da472014-05-12 11:57:48 +0100383 }
Phil Edworthyc25da472014-05-12 11:57:48 +0100384
385 return 1;
386}
387
Phil Edworthy79953dd2015-10-02 11:25:05 +0100388static int rcar_pcie_enable(struct rcar_pcie *pcie)
Phil Edworthyc25da472014-05-12 11:57:48 +0100389{
Phil Edworthy79953dd2015-10-02 11:25:05 +0100390 struct pci_bus *bus, *child;
391 LIST_HEAD(res);
Phil Edworthyc25da472014-05-12 11:57:48 +0100392
Phil Edworthy8c53e8e2015-10-02 11:25:07 +0100393 rcar_pcie_setup(&res, pcie);
Phil Edworthyc25da472014-05-12 11:57:48 +0100394
Lorenzo Pieralisi3487c652016-01-29 11:29:31 +0000395 pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
Phil Edworthy79953dd2015-10-02 11:25:05 +0100396
397 if (IS_ENABLED(CONFIG_PCI_MSI))
398 bus = pci_scan_root_bus_msi(pcie->dev, pcie->root_bus_nr,
399 &rcar_pcie_ops, pcie, &res, &pcie->msi.chip);
400 else
401 bus = pci_scan_root_bus(pcie->dev, pcie->root_bus_nr,
402 &rcar_pcie_ops, pcie, &res);
403
404 if (!bus) {
405 dev_err(pcie->dev, "Scanning rootbus failed");
406 return -ENODEV;
407 }
408
409 pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
410
Lorenzo Pieralisi3487c652016-01-29 11:29:31 +0000411 pci_bus_size_bridges(bus);
412 pci_bus_assign_resources(bus);
Phil Edworthy79953dd2015-10-02 11:25:05 +0100413
Lorenzo Pieralisi3487c652016-01-29 11:29:31 +0000414 list_for_each_entry(child, &bus->children, node)
415 pcie_bus_configure_settings(child);
Phil Edworthy79953dd2015-10-02 11:25:05 +0100416
417 pci_bus_add_devices(bus);
418
419 return 0;
Phil Edworthyc25da472014-05-12 11:57:48 +0100420}
421
422static int phy_wait_for_ack(struct rcar_pcie *pcie)
423{
424 unsigned int timeout = 100;
425
426 while (timeout--) {
Phil Edworthyb77188492014-06-30 08:54:23 +0100427 if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
Phil Edworthyc25da472014-05-12 11:57:48 +0100428 return 0;
429
430 udelay(100);
431 }
432
433 dev_err(pcie->dev, "Access to PCIe phy timed out\n");
434
435 return -ETIMEDOUT;
436}
437
438static void phy_write_reg(struct rcar_pcie *pcie,
439 unsigned int rate, unsigned int addr,
440 unsigned int lane, unsigned int data)
441{
442 unsigned long phyaddr;
443
444 phyaddr = WRITE_CMD |
445 ((rate & 1) << RATE_POS) |
446 ((lane & 0xf) << LANE_POS) |
447 ((addr & 0xff) << ADR_POS);
448
449 /* Set write data */
Phil Edworthyb77188492014-06-30 08:54:23 +0100450 rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
451 rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
Phil Edworthyc25da472014-05-12 11:57:48 +0100452
453 /* Ignore errors as they will be dealt with if the data link is down */
454 phy_wait_for_ack(pcie);
455
456 /* Clear command */
Phil Edworthyb77188492014-06-30 08:54:23 +0100457 rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
458 rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
Phil Edworthyc25da472014-05-12 11:57:48 +0100459
460 /* Ignore errors as they will be dealt with if the data link is down */
461 phy_wait_for_ack(pcie);
462}
463
464static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
465{
466 unsigned int timeout = 10;
467
468 while (timeout--) {
Phil Edworthyb77188492014-06-30 08:54:23 +0100469 if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
Phil Edworthyc25da472014-05-12 11:57:48 +0100470 return 0;
471
472 msleep(5);
473 }
474
475 return -ETIMEDOUT;
476}
477
478static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
479{
480 int err;
481
482 /* Begin initialization */
Phil Edworthyb77188492014-06-30 08:54:23 +0100483 rcar_pci_write_reg(pcie, 0, PCIETCTLR);
Phil Edworthyc25da472014-05-12 11:57:48 +0100484
485 /* Set mode */
Phil Edworthyb77188492014-06-30 08:54:23 +0100486 rcar_pci_write_reg(pcie, 1, PCIEMSR);
Phil Edworthyc25da472014-05-12 11:57:48 +0100487
488 /*
489 * Initial header for port config space is type 1, set the device
490 * class to match. Hardware takes care of propagating the IDSETR
491 * settings, so there is no need to bother with a quirk.
492 */
Phil Edworthyb77188492014-06-30 08:54:23 +0100493 rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
Phil Edworthyc25da472014-05-12 11:57:48 +0100494
495 /*
496 * Setup Secondary Bus Number & Subordinate Bus Number, even though
497 * they aren't used, to avoid bridge being detected as broken.
498 */
499 rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
500 rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
501
502 /* Initialize default capabilities. */
Phil Edworthy2c3fd4c2014-06-30 08:54:22 +0100503 rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
Phil Edworthyc25da472014-05-12 11:57:48 +0100504 rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
505 PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
506 rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
507 PCI_HEADER_TYPE_BRIDGE);
508
509 /* Enable data link layer active state reporting */
Phil Edworthy2c3fd4c2014-06-30 08:54:22 +0100510 rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
511 PCI_EXP_LNKCAP_DLLLARC);
Phil Edworthyc25da472014-05-12 11:57:48 +0100512
513 /* Write out the physical slot number = 0 */
514 rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
515
516 /* Set the completion timer timeout to the maximum 50ms. */
Phil Edworthyb77188492014-06-30 08:54:23 +0100517 rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
Phil Edworthyc25da472014-05-12 11:57:48 +0100518
519 /* Terminate list of capabilities (Next Capability Offset=0) */
Phil Edworthy2c3fd4c2014-06-30 08:54:22 +0100520 rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
Phil Edworthyc25da472014-05-12 11:57:48 +0100521
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100522 /* Enable MSI */
523 if (IS_ENABLED(CONFIG_PCI_MSI))
Nobuhiro Iwamatsu1fc6aa92015-02-02 14:09:39 +0900524 rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100525
Phil Edworthyc25da472014-05-12 11:57:48 +0100526 /* Finish initialization - establish a PCI Express link */
Phil Edworthyb77188492014-06-30 08:54:23 +0100527 rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
Phil Edworthyc25da472014-05-12 11:57:48 +0100528
529 /* This will timeout if we don't have a link. */
530 err = rcar_pcie_wait_for_dl(pcie);
531 if (err)
532 return err;
533
534 /* Enable INTx interrupts */
535 rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
536
Phil Edworthyc25da472014-05-12 11:57:48 +0100537 wmb();
538
539 return 0;
540}
541
542static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie)
543{
544 unsigned int timeout = 10;
545
546 /* Initialize the phy */
547 phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
548 phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
549 phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
550 phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
551 phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
552 phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
553 phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
554 phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
555 phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
556 phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
557 phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
558 phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
559
560 phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
561 phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
562 phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
563
564 while (timeout--) {
Phil Edworthyb77188492014-06-30 08:54:23 +0100565 if (rcar_pci_read_reg(pcie, H1_PCIEPHYSR))
Phil Edworthyc25da472014-05-12 11:57:48 +0100566 return rcar_pcie_hw_init(pcie);
567
568 msleep(5);
569 }
570
571 return -ETIMEDOUT;
572}
573
Phil Edworthy581d9432016-01-05 13:00:31 +0000574static int rcar_pcie_hw_init_gen2(struct rcar_pcie *pcie)
575{
576 /*
577 * These settings come from the R-Car Series, 2nd Generation User's
578 * Manual, section 50.3.1 (2) Initialization of the physical layer.
579 */
580 rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR);
581 rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA);
582 rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
583 rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
584
585 rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR);
586 /* The following value is for DC connection, no termination resistor */
587 rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA);
588 rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
589 rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
590
591 return rcar_pcie_hw_init(pcie);
592}
593
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100594static int rcar_msi_alloc(struct rcar_msi *chip)
595{
596 int msi;
597
598 mutex_lock(&chip->lock);
599
600 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
601 if (msi < INT_PCI_MSI_NR)
602 set_bit(msi, chip->used);
603 else
604 msi = -ENOSPC;
605
606 mutex_unlock(&chip->lock);
607
608 return msi;
609}
610
Grigory Kletskoe3123c22016-09-08 22:32:59 +0300611static int rcar_msi_alloc_region(struct rcar_msi *chip, int no_irqs)
612{
613 int msi;
614
615 mutex_lock(&chip->lock);
616 msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR,
617 order_base_2(no_irqs));
618 mutex_unlock(&chip->lock);
619
620 return msi;
621}
622
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100623static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq)
624{
625 mutex_lock(&chip->lock);
626 clear_bit(irq, chip->used);
627 mutex_unlock(&chip->lock);
628}
629
630static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
631{
632 struct rcar_pcie *pcie = data;
633 struct rcar_msi *msi = &pcie->msi;
634 unsigned long reg;
635
Phil Edworthyb77188492014-06-30 08:54:23 +0100636 reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100637
638 /* MSI & INTx share an interrupt - we only handle MSI here */
639 if (!reg)
640 return IRQ_NONE;
641
642 while (reg) {
643 unsigned int index = find_first_bit(&reg, 32);
644 unsigned int irq;
645
646 /* clear the interrupt */
Phil Edworthyb77188492014-06-30 08:54:23 +0100647 rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR);
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100648
649 irq = irq_find_mapping(msi->domain, index);
650 if (irq) {
651 if (test_bit(index, msi->used))
652 generic_handle_irq(irq);
653 else
654 dev_info(pcie->dev, "unhandled MSI\n");
655 } else {
656 /* Unknown MSI, just clear it */
657 dev_dbg(pcie->dev, "unexpected MSI\n");
658 }
659
660 /* see if there's any more pending in this vector */
Phil Edworthyb77188492014-06-30 08:54:23 +0100661 reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100662 }
663
664 return IRQ_HANDLED;
665}
666
Yijing Wangc2791b82014-11-11 17:45:45 -0700667static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100668 struct msi_desc *desc)
669{
670 struct rcar_msi *msi = to_rcar_msi(chip);
671 struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip);
672 struct msi_msg msg;
673 unsigned int irq;
674 int hwirq;
675
676 hwirq = rcar_msi_alloc(msi);
677 if (hwirq < 0)
678 return hwirq;
679
Grigory Kletskoe3123c22016-09-08 22:32:59 +0300680 irq = irq_find_mapping(msi->domain, hwirq);
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100681 if (!irq) {
682 rcar_msi_free(msi, hwirq);
683 return -EINVAL;
684 }
685
686 irq_set_msi_desc(irq, desc);
687
Phil Edworthyb77188492014-06-30 08:54:23 +0100688 msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
689 msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100690 msg.data = hwirq;
691
Jiang Liu83a18912014-11-09 23:10:34 +0800692 pci_write_msi_msg(irq, &msg);
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100693
694 return 0;
695}
696
Grigory Kletskoe3123c22016-09-08 22:32:59 +0300697static int rcar_msi_setup_irqs(struct msi_controller *chip,
698 struct pci_dev *pdev, int nvec, int type)
699{
700 struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip);
701 struct rcar_msi *msi = to_rcar_msi(chip);
702 struct msi_desc *desc;
703 struct msi_msg msg;
704 unsigned int irq;
705 int hwirq;
706 int i;
707
708 /* MSI-X interrupts are not supported */
709 if (type == PCI_CAP_ID_MSIX)
710 return -EINVAL;
711
712 WARN_ON(!list_is_singular(&pdev->dev.msi_list));
713 desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
714
715 hwirq = rcar_msi_alloc_region(msi, nvec);
716 if (hwirq < 0)
717 return -ENOSPC;
718
719 irq = irq_find_mapping(msi->domain, hwirq);
720 if (!irq)
721 return -ENOSPC;
722
723 for (i = 0; i < nvec; i++) {
724 /*
725 * irq_create_mapping() called from rcar_pcie_probe() pre-
726 * allocates descs, so there is no need to allocate descs here.
727 * We can therefore assume that if irq_find_mapping() above
728 * returns non-zero, then the descs are also successfully
729 * allocated.
730 */
731 if (irq_set_msi_desc_off(irq, i, desc)) {
732 /* TODO: clear */
733 return -EINVAL;
734 }
735 }
736
737 desc->nvec_used = nvec;
738 desc->msi_attrib.multiple = order_base_2(nvec);
739
740 msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
741 msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
742 msg.data = hwirq;
743
744 pci_write_msi_msg(irq, &msg);
745
746 return 0;
747}
748
Yijing Wangc2791b82014-11-11 17:45:45 -0700749static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100750{
751 struct rcar_msi *msi = to_rcar_msi(chip);
752 struct irq_data *d = irq_get_irq_data(irq);
753
754 rcar_msi_free(msi, d->hwirq);
755}
756
757static struct irq_chip rcar_msi_irq_chip = {
758 .name = "R-Car PCIe MSI",
Thomas Gleixner280510f2014-11-23 12:23:20 +0100759 .irq_enable = pci_msi_unmask_irq,
760 .irq_disable = pci_msi_mask_irq,
761 .irq_mask = pci_msi_mask_irq,
762 .irq_unmask = pci_msi_unmask_irq,
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100763};
764
765static int rcar_msi_map(struct irq_domain *domain, unsigned int irq,
766 irq_hw_number_t hwirq)
767{
768 irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq);
769 irq_set_chip_data(irq, domain->host_data);
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100770
771 return 0;
772}
773
774static const struct irq_domain_ops msi_domain_ops = {
775 .map = rcar_msi_map,
776};
777
778static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
779{
780 struct platform_device *pdev = to_platform_device(pcie->dev);
781 struct rcar_msi *msi = &pcie->msi;
782 unsigned long base;
Grigory Kletskoe3123c22016-09-08 22:32:59 +0300783 int err, i;
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100784
785 mutex_init(&msi->lock);
786
787 msi->chip.dev = pcie->dev;
788 msi->chip.setup_irq = rcar_msi_setup_irq;
Grigory Kletskoe3123c22016-09-08 22:32:59 +0300789 msi->chip.setup_irqs = rcar_msi_setup_irqs;
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100790 msi->chip.teardown_irq = rcar_msi_teardown_irq;
791
792 msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
793 &msi_domain_ops, &msi->chip);
794 if (!msi->domain) {
795 dev_err(&pdev->dev, "failed to create IRQ domain\n");
796 return -ENOMEM;
797 }
798
Grigory Kletskoe3123c22016-09-08 22:32:59 +0300799 for (i = 0; i < INT_PCI_MSI_NR; i++)
800 irq_create_mapping(msi->domain, i);
801
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100802 /* Two irqs are for MSI, but they are also used for non-MSI irqs */
803 err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq,
Grygorii Strashko8ff0ef92015-12-10 21:18:20 +0200804 IRQF_SHARED | IRQF_NO_THREAD,
805 rcar_msi_irq_chip.name, pcie);
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100806 if (err < 0) {
807 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
808 goto err;
809 }
810
811 err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq,
Grygorii Strashko8ff0ef92015-12-10 21:18:20 +0200812 IRQF_SHARED | IRQF_NO_THREAD,
813 rcar_msi_irq_chip.name, pcie);
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100814 if (err < 0) {
815 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
816 goto err;
817 }
818
819 /* setup MSI data target */
820 msi->pages = __get_free_pages(GFP_KERNEL, 0);
821 base = virt_to_phys((void *)msi->pages);
822
Phil Edworthyb77188492014-06-30 08:54:23 +0100823 rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR);
824 rcar_pci_write_reg(pcie, 0, PCIEMSIAUR);
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100825
826 /* enable all MSI interrupts */
Phil Edworthyb77188492014-06-30 08:54:23 +0100827 rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100828
829 return 0;
830
831err:
832 irq_domain_remove(msi->domain);
833 return err;
834}
835
Phil Edworthyc25da472014-05-12 11:57:48 +0100836static int rcar_pcie_get_resources(struct platform_device *pdev,
837 struct rcar_pcie *pcie)
838{
839 struct resource res;
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100840 int err, i;
Phil Edworthyc25da472014-05-12 11:57:48 +0100841
842 err = of_address_to_resource(pdev->dev.of_node, 0, &res);
843 if (err)
844 return err;
845
Bjorn Helgaas51afa3c2016-08-22 14:16:38 -0500846 pcie->base = devm_ioremap_resource(&pdev->dev, &res);
847 if (IS_ERR(pcie->base))
848 return PTR_ERR(pcie->base);
849
Phil Edworthyc25da472014-05-12 11:57:48 +0100850 pcie->clk = devm_clk_get(&pdev->dev, "pcie");
851 if (IS_ERR(pcie->clk)) {
852 dev_err(pcie->dev, "cannot get platform clock\n");
853 return PTR_ERR(pcie->clk);
854 }
855 err = clk_prepare_enable(pcie->clk);
856 if (err)
Geert Uytterhoeven3d664b02016-08-31 11:28:22 +0200857 return err;
Phil Edworthyc25da472014-05-12 11:57:48 +0100858
859 pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
860 if (IS_ERR(pcie->bus_clk)) {
861 dev_err(pcie->dev, "cannot get pcie bus clock\n");
862 err = PTR_ERR(pcie->bus_clk);
863 goto fail_clk;
864 }
865 err = clk_prepare_enable(pcie->bus_clk);
866 if (err)
Geert Uytterhoeven3d664b02016-08-31 11:28:22 +0200867 goto fail_clk;
Phil Edworthyc25da472014-05-12 11:57:48 +0100868
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100869 i = irq_of_parse_and_map(pdev->dev.of_node, 0);
Dmitry Torokhovc51d4112014-11-14 14:21:53 -0800870 if (!i) {
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100871 dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
872 err = -ENOENT;
873 goto err_map_reg;
874 }
875 pcie->msi.irq1 = i;
876
877 i = irq_of_parse_and_map(pdev->dev.of_node, 1);
Dmitry Torokhovc51d4112014-11-14 14:21:53 -0800878 if (!i) {
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100879 dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
880 err = -ENOENT;
881 goto err_map_reg;
882 }
883 pcie->msi.irq2 = i;
884
Phil Edworthyc25da472014-05-12 11:57:48 +0100885 return 0;
886
887err_map_reg:
888 clk_disable_unprepare(pcie->bus_clk);
889fail_clk:
890 clk_disable_unprepare(pcie->clk);
891
892 return err;
893}
894
895static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
896 struct of_pci_range *range,
897 int *index)
898{
899 u64 restype = range->flags;
900 u64 cpu_addr = range->cpu_addr;
901 u64 cpu_end = range->cpu_addr + range->size;
902 u64 pci_addr = range->pci_addr;
903 u32 flags = LAM_64BIT | LAR_ENABLE;
904 u64 mask;
905 u64 size;
906 int idx = *index;
907
908 if (restype & IORESOURCE_PREFETCH)
909 flags |= LAM_PREFETCH;
910
911 /*
912 * If the size of the range is larger than the alignment of the start
913 * address, we have to use multiple entries to perform the mapping.
914 */
915 if (cpu_addr > 0) {
916 unsigned long nr_zeros = __ffs64(cpu_addr);
917 u64 alignment = 1ULL << nr_zeros;
Phil Edworthyb77188492014-06-30 08:54:23 +0100918
Phil Edworthyc25da472014-05-12 11:57:48 +0100919 size = min(range->size, alignment);
920 } else {
921 size = range->size;
922 }
923 /* Hardware supports max 4GiB inbound region */
924 size = min(size, 1ULL << 32);
925
926 mask = roundup_pow_of_two(size) - 1;
927 mask &= ~0xf;
928
929 while (cpu_addr < cpu_end) {
930 /*
931 * Set up 64-bit inbound regions as the range parser doesn't
932 * distinguish between 32 and 64-bit types.
933 */
Sergei Shtylyovf7bc6382016-09-09 01:26:18 +0300934 rcar_pci_write_reg(pcie, lower_32_bits(pci_addr),
935 PCIEPRAR(idx));
Phil Edworthyb77188492014-06-30 08:54:23 +0100936 rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx));
Sergei Shtylyovf7bc6382016-09-09 01:26:18 +0300937 rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags,
938 PCIELAMR(idx));
Phil Edworthyc25da472014-05-12 11:57:48 +0100939
Sergei Shtylyovf7bc6382016-09-09 01:26:18 +0300940 rcar_pci_write_reg(pcie, upper_32_bits(pci_addr),
941 PCIEPRAR(idx + 1));
942 rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr),
943 PCIELAR(idx + 1));
Phil Edworthyb77188492014-06-30 08:54:23 +0100944 rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1));
Phil Edworthyc25da472014-05-12 11:57:48 +0100945
946 pci_addr += size;
947 cpu_addr += size;
948 idx += 2;
949
950 if (idx > MAX_NR_INBOUND_MAPS) {
951 dev_err(pcie->dev, "Failed to map inbound regions!\n");
952 return -EINVAL;
953 }
954 }
955 *index = idx;
956
957 return 0;
958}
959
960static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
961 struct device_node *node)
962{
963 const int na = 3, ns = 2;
964 int rlen;
965
966 parser->node = node;
967 parser->pna = of_n_addr_cells(node);
968 parser->np = parser->pna + na + ns;
969
970 parser->range = of_get_property(node, "dma-ranges", &rlen);
971 if (!parser->range)
972 return -ENOENT;
973
974 parser->end = parser->range + rlen / sizeof(__be32);
975 return 0;
976}
977
978static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
979 struct device_node *np)
980{
981 struct of_pci_range range;
982 struct of_pci_range_parser parser;
983 int index = 0;
984 int err;
985
986 if (pci_dma_range_parser_init(&parser, np))
987 return -EINVAL;
988
989 /* Get the dma-ranges from DT */
990 for_each_of_pci_range(&parser, &range) {
991 u64 end = range.cpu_addr + range.size - 1;
Sergei Shtylyovf7bc6382016-09-09 01:26:18 +0300992
Phil Edworthyc25da472014-05-12 11:57:48 +0100993 dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
994 range.flags, range.cpu_addr, end, range.pci_addr);
995
996 err = rcar_pcie_inbound_ranges(pcie, &range, &index);
997 if (err)
998 return err;
999 }
1000
1001 return 0;
1002}
1003
1004static const struct of_device_id rcar_pcie_of_match[] = {
1005 { .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 },
Sergei Shtylyovf7bc6382016-09-09 01:26:18 +03001006 { .compatible = "renesas,pcie-rcar-gen2",
1007 .data = rcar_pcie_hw_init_gen2 },
1008 { .compatible = "renesas,pcie-r8a7790",
1009 .data = rcar_pcie_hw_init_gen2 },
1010 { .compatible = "renesas,pcie-r8a7791",
1011 .data = rcar_pcie_hw_init_gen2 },
Harunobu Kurokawae015f882015-11-25 15:30:39 +00001012 { .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init },
Phil Edworthyc25da472014-05-12 11:57:48 +01001013 {},
1014};
Phil Edworthy5d2917d2015-11-25 15:30:37 +00001015
1016static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
1017{
1018 int err;
1019 struct device *dev = pci->dev;
1020 struct device_node *np = dev->of_node;
1021 resource_size_t iobase;
1022 struct resource_entry *win;
1023
Sergei Shtylyovf7bc6382016-09-09 01:26:18 +03001024 err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources,
1025 &iobase);
Phil Edworthy5d2917d2015-11-25 15:30:37 +00001026 if (err)
1027 return err;
1028
Bjorn Helgaas6fd7f552016-05-31 12:20:57 -05001029 err = devm_request_pci_bus_resources(dev, &pci->resources);
1030 if (err)
1031 goto out_release_res;
Phil Edworthy5d2917d2015-11-25 15:30:37 +00001032
Phil Edworthy5d2917d2015-11-25 15:30:37 +00001033 resource_list_for_each_entry(win, &pci->resources) {
Bjorn Helgaas6fd7f552016-05-31 12:20:57 -05001034 struct resource *res = win->res;
Phil Edworthy5d2917d2015-11-25 15:30:37 +00001035
Bjorn Helgaas4c540a32016-05-28 18:37:46 -05001036 if (resource_type(res) == IORESOURCE_IO) {
Phil Edworthy5d2917d2015-11-25 15:30:37 +00001037 err = pci_remap_iospace(res, iobase);
Bjorn Helgaas4c540a32016-05-28 18:37:46 -05001038 if (err)
Phil Edworthy5d2917d2015-11-25 15:30:37 +00001039 dev_warn(dev, "error %d: failed to map resource %pR\n",
1040 err, res);
Phil Edworthy5d2917d2015-11-25 15:30:37 +00001041 }
Phil Edworthy5d2917d2015-11-25 15:30:37 +00001042 }
1043
1044 return 0;
1045
1046out_release_res:
Bjorn Helgaas4c540a32016-05-28 18:37:46 -05001047 pci_free_resource_list(&pci->resources);
Phil Edworthy5d2917d2015-11-25 15:30:37 +00001048 return err;
1049}
1050
Phil Edworthyc25da472014-05-12 11:57:48 +01001051static int rcar_pcie_probe(struct platform_device *pdev)
1052{
1053 struct rcar_pcie *pcie;
1054 unsigned int data;
Phil Edworthyc25da472014-05-12 11:57:48 +01001055 const struct of_device_id *of_id;
Phil Edworthy5d2917d2015-11-25 15:30:37 +00001056 int err;
Phil Edworthyc25da472014-05-12 11:57:48 +01001057 int (*hw_init_fn)(struct rcar_pcie *);
1058
1059 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
1060 if (!pcie)
1061 return -ENOMEM;
1062
1063 pcie->dev = &pdev->dev;
1064 platform_set_drvdata(pdev, pcie);
1065
Phil Edworthy5d2917d2015-11-25 15:30:37 +00001066 INIT_LIST_HEAD(&pcie->resources);
Phil Edworthyc25da472014-05-12 11:57:48 +01001067
Phil Edworthy5d2917d2015-11-25 15:30:37 +00001068 rcar_pcie_parse_request_of_pci_ranges(pcie);
Phil Edworthyc25da472014-05-12 11:57:48 +01001069
1070 err = rcar_pcie_get_resources(pdev, pcie);
1071 if (err < 0) {
1072 dev_err(&pdev->dev, "failed to request resources: %d\n", err);
1073 return err;
1074 }
1075
Sergei Shtylyovf7bc6382016-09-09 01:26:18 +03001076 err = rcar_pcie_parse_map_dma_ranges(pcie, pdev->dev.of_node);
1077 if (err)
Phil Edworthyc25da472014-05-12 11:57:48 +01001078 return err;
1079
Phil Edworthyde1be9a2016-01-05 13:00:30 +00001080 of_id = of_match_device(rcar_pcie_of_match, pcie->dev);
1081 if (!of_id || !of_id->data)
1082 return -EINVAL;
1083 hw_init_fn = of_id->data;
1084
1085 pm_runtime_enable(pcie->dev);
1086 err = pm_runtime_get_sync(pcie->dev);
1087 if (err < 0) {
1088 dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
1089 goto err_pm_disable;
1090 }
1091
1092 /* Failure to get a link might just be that no cards are inserted */
1093 err = hw_init_fn(pcie);
1094 if (err) {
1095 dev_info(&pdev->dev, "PCIe link down\n");
1096 err = 0;
1097 goto err_pm_put;
1098 }
1099
1100 data = rcar_pci_read_reg(pcie, MACSR);
1101 dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
1102
Phil Edworthy290c1fb2014-05-12 11:57:49 +01001103 if (IS_ENABLED(CONFIG_PCI_MSI)) {
1104 err = rcar_pcie_enable_msi(pcie);
1105 if (err < 0) {
1106 dev_err(&pdev->dev,
1107 "failed to enable MSI support: %d\n",
1108 err);
Phil Edworthyde1be9a2016-01-05 13:00:30 +00001109 goto err_pm_put;
Phil Edworthy290c1fb2014-05-12 11:57:49 +01001110 }
1111 }
1112
Phil Edworthyde1be9a2016-01-05 13:00:30 +00001113 err = rcar_pcie_enable(pcie);
1114 if (err)
1115 goto err_pm_put;
Phil Edworthyc25da472014-05-12 11:57:48 +01001116
Phil Edworthyde1be9a2016-01-05 13:00:30 +00001117 return 0;
Phil Edworthyc25da472014-05-12 11:57:48 +01001118
Phil Edworthyde1be9a2016-01-05 13:00:30 +00001119err_pm_put:
1120 pm_runtime_put(pcie->dev);
Phil Edworthyc25da472014-05-12 11:57:48 +01001121
Phil Edworthyde1be9a2016-01-05 13:00:30 +00001122err_pm_disable:
1123 pm_runtime_disable(pcie->dev);
1124 return err;
Phil Edworthyc25da472014-05-12 11:57:48 +01001125}
1126
1127static struct platform_driver rcar_pcie_driver = {
1128 .driver = {
1129 .name = DRV_NAME,
Phil Edworthyc25da472014-05-12 11:57:48 +01001130 .of_match_table = rcar_pcie_of_match,
1131 .suppress_bind_attrs = true,
1132 },
1133 .probe = rcar_pcie_probe,
1134};
Paul Gortmaker42d10712016-07-22 16:23:21 -05001135builtin_platform_driver(rcar_pcie_driver);