Rafał Miłecki | 9352f69 | 2011-07-05 19:48:26 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Broadcom specific AMBA |
| 3 | * PCI Core in hostmode |
| 4 | * |
Hauke Mehrtens | 49dc957 | 2012-01-31 00:03:35 +0100 | [diff] [blame] | 5 | * Copyright 2005 - 2011, Broadcom Corporation |
| 6 | * Copyright 2006, 2007, Michael Buesch <m@bues.ch> |
| 7 | * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de> |
| 8 | * |
Rafał Miłecki | 9352f69 | 2011-07-05 19:48:26 +0200 | [diff] [blame] | 9 | * Licensed under the GNU/GPL. See COPYING for details. |
| 10 | */ |
| 11 | |
| 12 | #include "bcma_private.h" |
Paul Gortmaker | 58f743e | 2012-03-25 20:02:55 -0400 | [diff] [blame^] | 13 | #include <linux/pci.h> |
Hauke Mehrtens | 49dc957 | 2012-01-31 00:03:35 +0100 | [diff] [blame] | 14 | #include <linux/export.h> |
Rafał Miłecki | 9352f69 | 2011-07-05 19:48:26 +0200 | [diff] [blame] | 15 | #include <linux/bcma/bcma.h> |
Hauke Mehrtens | 49dc957 | 2012-01-31 00:03:35 +0100 | [diff] [blame] | 16 | #include <asm/paccess.h> |
| 17 | |
| 18 | /* Probe a 32bit value on the bus and catch bus exceptions. |
| 19 | * Returns nonzero on a bus exception. |
| 20 | * This is MIPS specific */ |
| 21 | #define mips_busprobe32(val, addr) get_dbe((val), ((u32 *)(addr))) |
| 22 | |
| 23 | /* Assume one-hot slot wiring */ |
| 24 | #define BCMA_PCI_SLOT_MAX 16 |
| 25 | #define PCI_CONFIG_SPACE_SIZE 256 |
| 26 | |
| 27 | bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc) |
| 28 | { |
| 29 | struct bcma_bus *bus = pc->core->bus; |
| 30 | u16 chipid_top; |
| 31 | u32 tmp; |
| 32 | |
| 33 | chipid_top = (bus->chipinfo.id & 0xFF00); |
| 34 | if (chipid_top != 0x4700 && |
| 35 | chipid_top != 0x5300) |
| 36 | return false; |
| 37 | |
| 38 | if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) { |
| 39 | pr_info("This PCI core is disabled and not working\n"); |
| 40 | return false; |
| 41 | } |
| 42 | |
| 43 | bcma_core_enable(pc->core, 0); |
| 44 | |
| 45 | return !mips_busprobe32(tmp, pc->core->io_addr); |
| 46 | } |
| 47 | |
| 48 | static u32 bcma_pcie_read_config(struct bcma_drv_pci *pc, u32 address) |
| 49 | { |
| 50 | pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_ADDR, address); |
| 51 | pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_ADDR); |
| 52 | return pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_DATA); |
| 53 | } |
| 54 | |
| 55 | static void bcma_pcie_write_config(struct bcma_drv_pci *pc, u32 address, |
| 56 | u32 data) |
| 57 | { |
| 58 | pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_ADDR, address); |
| 59 | pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_ADDR); |
| 60 | pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_DATA, data); |
| 61 | } |
| 62 | |
| 63 | static u32 bcma_get_cfgspace_addr(struct bcma_drv_pci *pc, unsigned int dev, |
| 64 | unsigned int func, unsigned int off) |
| 65 | { |
| 66 | u32 addr = 0; |
| 67 | |
| 68 | /* Issue config commands only when the data link is up (atleast |
| 69 | * one external pcie device is present). |
| 70 | */ |
| 71 | if (dev >= 2 || !(bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_LSREG) |
| 72 | & BCMA_CORE_PCI_DLLP_LSREG_LINKUP)) |
| 73 | goto out; |
| 74 | |
| 75 | /* Type 0 transaction */ |
| 76 | /* Slide the PCI window to the appropriate slot */ |
| 77 | pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI1, BCMA_CORE_PCI_SBTOPCI_CFG0); |
| 78 | /* Calculate the address */ |
| 79 | addr = pc->host_controller->host_cfg_addr; |
| 80 | addr |= (dev << BCMA_CORE_PCI_CFG_SLOT_SHIFT); |
| 81 | addr |= (func << BCMA_CORE_PCI_CFG_FUN_SHIFT); |
| 82 | addr |= (off & ~3); |
| 83 | |
| 84 | out: |
| 85 | return addr; |
| 86 | } |
| 87 | |
| 88 | static int bcma_extpci_read_config(struct bcma_drv_pci *pc, unsigned int dev, |
| 89 | unsigned int func, unsigned int off, |
| 90 | void *buf, int len) |
| 91 | { |
| 92 | int err = -EINVAL; |
| 93 | u32 addr, val; |
| 94 | void __iomem *mmio = 0; |
| 95 | |
| 96 | WARN_ON(!pc->hostmode); |
| 97 | if (unlikely(len != 1 && len != 2 && len != 4)) |
| 98 | goto out; |
| 99 | if (dev == 0) { |
| 100 | /* we support only two functions on device 0 */ |
| 101 | if (func > 1) |
| 102 | return -EINVAL; |
| 103 | |
| 104 | /* accesses to config registers with offsets >= 256 |
| 105 | * requires indirect access. |
| 106 | */ |
| 107 | if (off >= PCI_CONFIG_SPACE_SIZE) { |
| 108 | addr = (func << 12); |
| 109 | addr |= (off & 0x0FFF); |
| 110 | val = bcma_pcie_read_config(pc, addr); |
| 111 | } else { |
| 112 | addr = BCMA_CORE_PCI_PCICFG0; |
| 113 | addr |= (func << 8); |
| 114 | addr |= (off & 0xfc); |
| 115 | val = pcicore_read32(pc, addr); |
| 116 | } |
| 117 | } else { |
| 118 | addr = bcma_get_cfgspace_addr(pc, dev, func, off); |
| 119 | if (unlikely(!addr)) |
| 120 | goto out; |
| 121 | err = -ENOMEM; |
| 122 | mmio = ioremap_nocache(addr, len); |
| 123 | if (!mmio) |
| 124 | goto out; |
| 125 | |
| 126 | if (mips_busprobe32(val, mmio)) { |
| 127 | val = 0xffffffff; |
| 128 | goto unmap; |
| 129 | } |
| 130 | |
| 131 | val = readl(mmio); |
| 132 | } |
| 133 | val >>= (8 * (off & 3)); |
| 134 | |
| 135 | switch (len) { |
| 136 | case 1: |
| 137 | *((u8 *)buf) = (u8)val; |
| 138 | break; |
| 139 | case 2: |
| 140 | *((u16 *)buf) = (u16)val; |
| 141 | break; |
| 142 | case 4: |
| 143 | *((u32 *)buf) = (u32)val; |
| 144 | break; |
| 145 | } |
| 146 | err = 0; |
| 147 | unmap: |
| 148 | if (mmio) |
| 149 | iounmap(mmio); |
| 150 | out: |
| 151 | return err; |
| 152 | } |
| 153 | |
| 154 | static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev, |
| 155 | unsigned int func, unsigned int off, |
| 156 | const void *buf, int len) |
| 157 | { |
| 158 | int err = -EINVAL; |
| 159 | u32 addr = 0, val = 0; |
| 160 | void __iomem *mmio = 0; |
| 161 | u16 chipid = pc->core->bus->chipinfo.id; |
| 162 | |
| 163 | WARN_ON(!pc->hostmode); |
| 164 | if (unlikely(len != 1 && len != 2 && len != 4)) |
| 165 | goto out; |
| 166 | if (dev == 0) { |
| 167 | /* accesses to config registers with offsets >= 256 |
| 168 | * requires indirect access. |
| 169 | */ |
| 170 | if (off < PCI_CONFIG_SPACE_SIZE) { |
| 171 | addr = pc->core->addr + BCMA_CORE_PCI_PCICFG0; |
| 172 | addr |= (func << 8); |
| 173 | addr |= (off & 0xfc); |
| 174 | mmio = ioremap_nocache(addr, len); |
| 175 | if (!mmio) |
| 176 | goto out; |
| 177 | } |
| 178 | } else { |
| 179 | addr = bcma_get_cfgspace_addr(pc, dev, func, off); |
| 180 | if (unlikely(!addr)) |
| 181 | goto out; |
| 182 | err = -ENOMEM; |
| 183 | mmio = ioremap_nocache(addr, len); |
| 184 | if (!mmio) |
| 185 | goto out; |
| 186 | |
| 187 | if (mips_busprobe32(val, mmio)) { |
| 188 | val = 0xffffffff; |
| 189 | goto unmap; |
| 190 | } |
| 191 | } |
| 192 | |
| 193 | switch (len) { |
| 194 | case 1: |
| 195 | val = readl(mmio); |
| 196 | val &= ~(0xFF << (8 * (off & 3))); |
| 197 | val |= *((const u8 *)buf) << (8 * (off & 3)); |
| 198 | break; |
| 199 | case 2: |
| 200 | val = readl(mmio); |
| 201 | val &= ~(0xFFFF << (8 * (off & 3))); |
| 202 | val |= *((const u16 *)buf) << (8 * (off & 3)); |
| 203 | break; |
| 204 | case 4: |
| 205 | val = *((const u32 *)buf); |
| 206 | break; |
| 207 | } |
| 208 | if (dev == 0 && !addr) { |
| 209 | /* accesses to config registers with offsets >= 256 |
| 210 | * requires indirect access. |
| 211 | */ |
| 212 | addr = (func << 12); |
| 213 | addr |= (off & 0x0FFF); |
| 214 | bcma_pcie_write_config(pc, addr, val); |
| 215 | } else { |
| 216 | writel(val, mmio); |
| 217 | |
| 218 | if (chipid == 0x4716 || chipid == 0x4748) |
| 219 | readl(mmio); |
| 220 | } |
| 221 | |
| 222 | err = 0; |
| 223 | unmap: |
| 224 | if (mmio) |
| 225 | iounmap(mmio); |
| 226 | out: |
| 227 | return err; |
| 228 | } |
| 229 | |
| 230 | static int bcma_core_pci_hostmode_read_config(struct pci_bus *bus, |
| 231 | unsigned int devfn, |
| 232 | int reg, int size, u32 *val) |
| 233 | { |
| 234 | unsigned long flags; |
| 235 | int err; |
| 236 | struct bcma_drv_pci *pc; |
| 237 | struct bcma_drv_pci_host *pc_host; |
| 238 | |
| 239 | pc_host = container_of(bus->ops, struct bcma_drv_pci_host, pci_ops); |
| 240 | pc = pc_host->pdev; |
| 241 | |
| 242 | spin_lock_irqsave(&pc_host->cfgspace_lock, flags); |
| 243 | err = bcma_extpci_read_config(pc, PCI_SLOT(devfn), |
| 244 | PCI_FUNC(devfn), reg, val, size); |
| 245 | spin_unlock_irqrestore(&pc_host->cfgspace_lock, flags); |
| 246 | |
| 247 | return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; |
| 248 | } |
| 249 | |
| 250 | static int bcma_core_pci_hostmode_write_config(struct pci_bus *bus, |
| 251 | unsigned int devfn, |
| 252 | int reg, int size, u32 val) |
| 253 | { |
| 254 | unsigned long flags; |
| 255 | int err; |
| 256 | struct bcma_drv_pci *pc; |
| 257 | struct bcma_drv_pci_host *pc_host; |
| 258 | |
| 259 | pc_host = container_of(bus->ops, struct bcma_drv_pci_host, pci_ops); |
| 260 | pc = pc_host->pdev; |
| 261 | |
| 262 | spin_lock_irqsave(&pc_host->cfgspace_lock, flags); |
| 263 | err = bcma_extpci_write_config(pc, PCI_SLOT(devfn), |
| 264 | PCI_FUNC(devfn), reg, &val, size); |
| 265 | spin_unlock_irqrestore(&pc_host->cfgspace_lock, flags); |
| 266 | |
| 267 | return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; |
| 268 | } |
| 269 | |
| 270 | /* return cap_offset if requested capability exists in the PCI config space */ |
| 271 | static u8 __devinit bcma_find_pci_capability(struct bcma_drv_pci *pc, |
| 272 | unsigned int dev, |
| 273 | unsigned int func, u8 req_cap_id, |
| 274 | unsigned char *buf, u32 *buflen) |
| 275 | { |
| 276 | u8 cap_id; |
| 277 | u8 cap_ptr = 0; |
| 278 | u32 bufsize; |
| 279 | u8 byte_val; |
| 280 | |
| 281 | /* check for Header type 0 */ |
| 282 | bcma_extpci_read_config(pc, dev, func, PCI_HEADER_TYPE, &byte_val, |
| 283 | sizeof(u8)); |
| 284 | if ((byte_val & 0x7f) != PCI_HEADER_TYPE_NORMAL) |
| 285 | return cap_ptr; |
| 286 | |
| 287 | /* check if the capability pointer field exists */ |
| 288 | bcma_extpci_read_config(pc, dev, func, PCI_STATUS, &byte_val, |
| 289 | sizeof(u8)); |
| 290 | if (!(byte_val & PCI_STATUS_CAP_LIST)) |
| 291 | return cap_ptr; |
| 292 | |
| 293 | /* check if the capability pointer is 0x00 */ |
| 294 | bcma_extpci_read_config(pc, dev, func, PCI_CAPABILITY_LIST, &cap_ptr, |
| 295 | sizeof(u8)); |
| 296 | if (cap_ptr == 0x00) |
| 297 | return cap_ptr; |
| 298 | |
| 299 | /* loop thr'u the capability list and see if the requested capabilty |
| 300 | * exists */ |
| 301 | bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id, sizeof(u8)); |
| 302 | while (cap_id != req_cap_id) { |
| 303 | bcma_extpci_read_config(pc, dev, func, cap_ptr + 1, &cap_ptr, |
| 304 | sizeof(u8)); |
| 305 | if (cap_ptr == 0x00) |
| 306 | return cap_ptr; |
| 307 | bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id, |
| 308 | sizeof(u8)); |
| 309 | } |
| 310 | |
| 311 | /* found the caller requested capability */ |
| 312 | if ((buf != NULL) && (buflen != NULL)) { |
| 313 | u8 cap_data; |
| 314 | |
| 315 | bufsize = *buflen; |
| 316 | if (!bufsize) |
| 317 | return cap_ptr; |
| 318 | |
| 319 | *buflen = 0; |
| 320 | |
| 321 | /* copy the cpability data excluding cap ID and next ptr */ |
| 322 | cap_data = cap_ptr + 2; |
| 323 | if ((bufsize + cap_data) > PCI_CONFIG_SPACE_SIZE) |
| 324 | bufsize = PCI_CONFIG_SPACE_SIZE - cap_data; |
| 325 | *buflen = bufsize; |
| 326 | while (bufsize--) { |
| 327 | bcma_extpci_read_config(pc, dev, func, cap_data, buf, |
| 328 | sizeof(u8)); |
| 329 | cap_data++; |
| 330 | buf++; |
| 331 | } |
| 332 | } |
| 333 | |
| 334 | return cap_ptr; |
| 335 | } |
| 336 | |
| 337 | /* If the root port is capable of returning Config Request |
| 338 | * Retry Status (CRS) Completion Status to software then |
| 339 | * enable the feature. |
| 340 | */ |
| 341 | static void __devinit bcma_core_pci_enable_crs(struct bcma_drv_pci *pc) |
| 342 | { |
| 343 | u8 cap_ptr, root_ctrl, root_cap, dev; |
| 344 | u16 val16; |
| 345 | int i; |
| 346 | |
| 347 | cap_ptr = bcma_find_pci_capability(pc, 0, 0, PCI_CAP_ID_EXP, NULL, |
| 348 | NULL); |
| 349 | root_cap = cap_ptr + PCI_EXP_RTCAP; |
| 350 | bcma_extpci_read_config(pc, 0, 0, root_cap, &val16, sizeof(u16)); |
| 351 | if (val16 & BCMA_CORE_PCI_RC_CRS_VISIBILITY) { |
| 352 | /* Enable CRS software visibility */ |
| 353 | root_ctrl = cap_ptr + PCI_EXP_RTCTL; |
| 354 | val16 = PCI_EXP_RTCTL_CRSSVE; |
| 355 | bcma_extpci_read_config(pc, 0, 0, root_ctrl, &val16, |
| 356 | sizeof(u16)); |
| 357 | |
| 358 | /* Initiate a configuration request to read the vendor id |
| 359 | * field of the device function's config space header after |
| 360 | * 100 ms wait time from the end of Reset. If the device is |
| 361 | * not done with its internal initialization, it must at |
| 362 | * least return a completion TLP, with a completion status |
| 363 | * of "Configuration Request Retry Status (CRS)". The root |
| 364 | * complex must complete the request to the host by returning |
| 365 | * a read-data value of 0001h for the Vendor ID field and |
| 366 | * all 1s for any additional bytes included in the request. |
| 367 | * Poll using the config reads for max wait time of 1 sec or |
| 368 | * until we receive the successful completion status. Repeat |
| 369 | * the procedure for all the devices. |
| 370 | */ |
| 371 | for (dev = 1; dev < BCMA_PCI_SLOT_MAX; dev++) { |
| 372 | for (i = 0; i < 100000; i++) { |
| 373 | bcma_extpci_read_config(pc, dev, 0, |
| 374 | PCI_VENDOR_ID, &val16, |
| 375 | sizeof(val16)); |
| 376 | if (val16 != 0x1) |
| 377 | break; |
| 378 | udelay(10); |
| 379 | } |
| 380 | if (val16 == 0x1) |
| 381 | pr_err("PCI: Broken device in slot %d\n", dev); |
| 382 | } |
| 383 | } |
| 384 | } |
Rafał Miłecki | 9352f69 | 2011-07-05 19:48:26 +0200 | [diff] [blame] | 385 | |
Hauke Mehrtens | d1a7a8e | 2012-01-31 00:03:34 +0100 | [diff] [blame] | 386 | void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc) |
Rafał Miłecki | 9352f69 | 2011-07-05 19:48:26 +0200 | [diff] [blame] | 387 | { |
Hauke Mehrtens | 49dc957 | 2012-01-31 00:03:35 +0100 | [diff] [blame] | 388 | struct bcma_bus *bus = pc->core->bus; |
| 389 | struct bcma_drv_pci_host *pc_host; |
| 390 | u32 tmp; |
| 391 | u32 pci_membase_1G; |
| 392 | unsigned long io_map_base; |
| 393 | |
| 394 | pr_info("PCIEcore in host mode found\n"); |
| 395 | |
| 396 | pc_host = kzalloc(sizeof(*pc_host), GFP_KERNEL); |
| 397 | if (!pc_host) { |
| 398 | pr_err("can not allocate memory"); |
| 399 | return; |
| 400 | } |
| 401 | |
| 402 | pc->host_controller = pc_host; |
| 403 | pc_host->pci_controller.io_resource = &pc_host->io_resource; |
| 404 | pc_host->pci_controller.mem_resource = &pc_host->mem_resource; |
| 405 | pc_host->pci_controller.pci_ops = &pc_host->pci_ops; |
| 406 | pc_host->pdev = pc; |
| 407 | |
| 408 | pci_membase_1G = BCMA_SOC_PCI_DMA; |
| 409 | pc_host->host_cfg_addr = BCMA_SOC_PCI_CFG; |
| 410 | |
| 411 | pc_host->pci_ops.read = bcma_core_pci_hostmode_read_config; |
| 412 | pc_host->pci_ops.write = bcma_core_pci_hostmode_write_config; |
| 413 | |
| 414 | pc_host->mem_resource.name = "BCMA PCIcore external memory", |
| 415 | pc_host->mem_resource.start = BCMA_SOC_PCI_DMA; |
| 416 | pc_host->mem_resource.end = BCMA_SOC_PCI_DMA + BCMA_SOC_PCI_DMA_SZ - 1; |
| 417 | pc_host->mem_resource.flags = IORESOURCE_MEM | IORESOURCE_PCI_FIXED; |
| 418 | |
| 419 | pc_host->io_resource.name = "BCMA PCIcore external I/O", |
| 420 | pc_host->io_resource.start = 0x100; |
| 421 | pc_host->io_resource.end = 0x7FF; |
| 422 | pc_host->io_resource.flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED; |
| 423 | |
| 424 | /* Reset RC */ |
| 425 | udelay(3000); |
| 426 | pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST_OE); |
| 427 | udelay(1000); |
| 428 | pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST | |
| 429 | BCMA_CORE_PCI_CTL_RST_OE); |
| 430 | |
| 431 | /* 64 MB I/O access window. On 4716, use |
| 432 | * sbtopcie0 to access the device registers. We |
| 433 | * can't use address match 2 (1 GB window) region |
| 434 | * as mips can't generate 64-bit address on the |
| 435 | * backplane. |
| 436 | */ |
| 437 | if (bus->chipinfo.id == 0x4716 || bus->chipinfo.id == 0x4748) { |
| 438 | pc_host->mem_resource.start = BCMA_SOC_PCI_MEM; |
| 439 | pc_host->mem_resource.end = BCMA_SOC_PCI_MEM + |
| 440 | BCMA_SOC_PCI_MEM_SZ - 1; |
| 441 | pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0, |
| 442 | BCMA_CORE_PCI_SBTOPCI_MEM | BCMA_SOC_PCI_MEM); |
| 443 | } else if (bus->chipinfo.id == 0x5300) { |
| 444 | tmp = BCMA_CORE_PCI_SBTOPCI_MEM; |
| 445 | tmp |= BCMA_CORE_PCI_SBTOPCI_PREF; |
| 446 | tmp |= BCMA_CORE_PCI_SBTOPCI_BURST; |
| 447 | if (pc->core->core_unit == 0) { |
| 448 | pc_host->mem_resource.start = BCMA_SOC_PCI_MEM; |
| 449 | pc_host->mem_resource.end = BCMA_SOC_PCI_MEM + |
| 450 | BCMA_SOC_PCI_MEM_SZ - 1; |
| 451 | pci_membase_1G = BCMA_SOC_PCIE_DMA_H32; |
| 452 | pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0, |
| 453 | tmp | BCMA_SOC_PCI_MEM); |
| 454 | } else if (pc->core->core_unit == 1) { |
| 455 | pc_host->mem_resource.start = BCMA_SOC_PCI1_MEM; |
| 456 | pc_host->mem_resource.end = BCMA_SOC_PCI1_MEM + |
| 457 | BCMA_SOC_PCI_MEM_SZ - 1; |
| 458 | pci_membase_1G = BCMA_SOC_PCIE1_DMA_H32; |
| 459 | pc_host->host_cfg_addr = BCMA_SOC_PCI1_CFG; |
| 460 | pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0, |
| 461 | tmp | BCMA_SOC_PCI1_MEM); |
| 462 | } |
| 463 | } else |
| 464 | pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0, |
| 465 | BCMA_CORE_PCI_SBTOPCI_IO); |
| 466 | |
| 467 | /* 64 MB configuration access window */ |
| 468 | pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI1, BCMA_CORE_PCI_SBTOPCI_CFG0); |
| 469 | |
| 470 | /* 1 GB memory access window */ |
| 471 | pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI2, |
| 472 | BCMA_CORE_PCI_SBTOPCI_MEM | pci_membase_1G); |
| 473 | |
| 474 | |
| 475 | /* As per PCI Express Base Spec 1.1 we need to wait for |
| 476 | * at least 100 ms from the end of a reset (cold/warm/hot) |
| 477 | * before issuing configuration requests to PCI Express |
| 478 | * devices. |
| 479 | */ |
| 480 | udelay(100000); |
| 481 | |
| 482 | bcma_core_pci_enable_crs(pc); |
| 483 | |
| 484 | /* Enable PCI bridge BAR0 memory & master access */ |
| 485 | tmp = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY; |
| 486 | bcma_extpci_write_config(pc, 0, 0, PCI_COMMAND, &tmp, sizeof(tmp)); |
| 487 | |
| 488 | /* Enable PCI interrupts */ |
| 489 | pcicore_write32(pc, BCMA_CORE_PCI_IMASK, BCMA_CORE_PCI_IMASK_INTA); |
| 490 | |
| 491 | /* Ok, ready to run, register it to the system. |
| 492 | * The following needs change, if we want to port hostmode |
| 493 | * to non-MIPS platform. */ |
| 494 | io_map_base = (unsigned long)ioremap_nocache(BCMA_SOC_PCI_MEM, |
| 495 | 0x04000000); |
| 496 | pc_host->pci_controller.io_map_base = io_map_base; |
| 497 | set_io_port_base(pc_host->pci_controller.io_map_base); |
| 498 | /* Give some time to the PCI controller to configure itself with the new |
| 499 | * values. Not waiting at this point causes crashes of the machine. */ |
| 500 | mdelay(10); |
| 501 | register_pci_controller(&pc_host->pci_controller); |
| 502 | return; |
Rafał Miłecki | 9352f69 | 2011-07-05 19:48:26 +0200 | [diff] [blame] | 503 | } |
Hauke Mehrtens | 49dc957 | 2012-01-31 00:03:35 +0100 | [diff] [blame] | 504 | |
| 505 | /* Early PCI fixup for a device on the PCI-core bridge. */ |
| 506 | static void bcma_core_pci_fixup_pcibridge(struct pci_dev *dev) |
| 507 | { |
| 508 | if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) { |
| 509 | /* This is not a device on the PCI-core bridge. */ |
| 510 | return; |
| 511 | } |
| 512 | if (PCI_SLOT(dev->devfn) != 0) |
| 513 | return; |
| 514 | |
| 515 | pr_info("PCI: Fixing up bridge %s\n", pci_name(dev)); |
| 516 | |
| 517 | /* Enable PCI bridge bus mastering and memory space */ |
| 518 | pci_set_master(dev); |
| 519 | if (pcibios_enable_device(dev, ~0) < 0) { |
| 520 | pr_err("PCI: BCMA bridge enable failed\n"); |
| 521 | return; |
| 522 | } |
| 523 | |
| 524 | /* Enable PCI bridge BAR1 prefetch and burst */ |
| 525 | pci_write_config_dword(dev, BCMA_PCI_BAR1_CONTROL, 3); |
| 526 | } |
| 527 | DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_pcibridge); |
| 528 | |
| 529 | /* Early PCI fixup for all PCI-cores to set the correct memory address. */ |
| 530 | static void bcma_core_pci_fixup_addresses(struct pci_dev *dev) |
| 531 | { |
| 532 | struct resource *res; |
| 533 | int pos; |
| 534 | |
| 535 | if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) { |
| 536 | /* This is not a device on the PCI-core bridge. */ |
| 537 | return; |
| 538 | } |
| 539 | if (PCI_SLOT(dev->devfn) == 0) |
| 540 | return; |
| 541 | |
| 542 | pr_info("PCI: Fixing up addresses %s\n", pci_name(dev)); |
| 543 | |
| 544 | for (pos = 0; pos < 6; pos++) { |
| 545 | res = &dev->resource[pos]; |
| 546 | if (res->flags & (IORESOURCE_IO | IORESOURCE_MEM)) |
| 547 | pci_assign_resource(dev, pos); |
| 548 | } |
| 549 | } |
| 550 | DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_addresses); |
| 551 | |
| 552 | /* This function is called when doing a pci_enable_device(). |
| 553 | * We must first check if the device is a device on the PCI-core bridge. */ |
| 554 | int bcma_core_pci_plat_dev_init(struct pci_dev *dev) |
| 555 | { |
| 556 | struct bcma_drv_pci_host *pc_host; |
| 557 | |
| 558 | if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) { |
| 559 | /* This is not a device on the PCI-core bridge. */ |
| 560 | return -ENODEV; |
| 561 | } |
| 562 | pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host, |
| 563 | pci_ops); |
| 564 | |
| 565 | pr_info("PCI: Fixing up device %s\n", pci_name(dev)); |
| 566 | |
| 567 | /* Fix up interrupt lines */ |
| 568 | dev->irq = bcma_core_mips_irq(pc_host->pdev->core) + 2; |
| 569 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); |
| 570 | |
| 571 | return 0; |
| 572 | } |
| 573 | EXPORT_SYMBOL(bcma_core_pci_plat_dev_init); |
| 574 | |
| 575 | /* PCI device IRQ mapping. */ |
| 576 | int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev) |
| 577 | { |
| 578 | struct bcma_drv_pci_host *pc_host; |
| 579 | |
| 580 | if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) { |
| 581 | /* This is not a device on the PCI-core bridge. */ |
| 582 | return -ENODEV; |
| 583 | } |
| 584 | |
| 585 | pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host, |
| 586 | pci_ops); |
| 587 | return bcma_core_mips_irq(pc_host->pdev->core) + 2; |
| 588 | } |
| 589 | EXPORT_SYMBOL(bcma_core_pci_pcibios_map_irq); |