David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame^] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (C) 2004-2007 Cavium Networks |
| 7 | * Copyright (C) 2008 Wind River Systems |
| 8 | */ |
| 9 | #include <linux/init.h> |
| 10 | #include <linux/console.h> |
| 11 | #include <linux/delay.h> |
| 12 | #include <linux/interrupt.h> |
| 13 | #include <linux/io.h> |
| 14 | #include <linux/irq.h> |
| 15 | #include <linux/serial.h> |
| 16 | #include <linux/types.h> |
| 17 | #include <linux/string.h> /* for memset */ |
| 18 | #include <linux/serial.h> |
| 19 | #include <linux/tty.h> |
| 20 | #include <linux/time.h> |
| 21 | #include <linux/platform_device.h> |
| 22 | #include <linux/serial_core.h> |
| 23 | #include <linux/serial_8250.h> |
| 24 | #include <linux/string.h> |
| 25 | |
| 26 | #include <asm/processor.h> |
| 27 | #include <asm/reboot.h> |
| 28 | #include <asm/smp-ops.h> |
| 29 | #include <asm/system.h> |
| 30 | #include <asm/irq_cpu.h> |
| 31 | #include <asm/mipsregs.h> |
| 32 | #include <asm/bootinfo.h> |
| 33 | #include <asm/sections.h> |
| 34 | #include <asm/time.h> |
| 35 | |
| 36 | #include <asm/octeon/octeon.h> |
| 37 | |
| 38 | #ifdef CONFIG_CAVIUM_DECODE_RSL |
| 39 | extern void cvmx_interrupt_rsl_decode(void); |
| 40 | extern int __cvmx_interrupt_ecc_report_single_bit_errors; |
| 41 | extern void cvmx_interrupt_rsl_enable(void); |
| 42 | #endif |
| 43 | |
| 44 | extern struct plat_smp_ops octeon_smp_ops; |
| 45 | |
| 46 | #ifdef CONFIG_PCI |
| 47 | extern void pci_console_init(const char *arg); |
| 48 | #endif |
| 49 | |
| 50 | #ifdef CONFIG_CAVIUM_RESERVE32 |
| 51 | extern uint64_t octeon_reserve32_memory; |
| 52 | #endif |
| 53 | static unsigned long long MAX_MEMORY = 512ull << 20; |
| 54 | |
| 55 | struct octeon_boot_descriptor *octeon_boot_desc_ptr; |
| 56 | |
| 57 | struct cvmx_bootinfo *octeon_bootinfo; |
| 58 | EXPORT_SYMBOL(octeon_bootinfo); |
| 59 | |
| 60 | #ifdef CONFIG_CAVIUM_RESERVE32 |
| 61 | uint64_t octeon_reserve32_memory; |
| 62 | EXPORT_SYMBOL(octeon_reserve32_memory); |
| 63 | #endif |
| 64 | |
| 65 | static int octeon_uart; |
| 66 | |
| 67 | extern asmlinkage void handle_int(void); |
| 68 | extern asmlinkage void plat_irq_dispatch(void); |
| 69 | |
| 70 | /** |
| 71 | * Return non zero if we are currently running in the Octeon simulator |
| 72 | * |
| 73 | * Returns |
| 74 | */ |
| 75 | int octeon_is_simulation(void) |
| 76 | { |
| 77 | return octeon_bootinfo->board_type == CVMX_BOARD_TYPE_SIM; |
| 78 | } |
| 79 | EXPORT_SYMBOL(octeon_is_simulation); |
| 80 | |
| 81 | /** |
| 82 | * Return true if Octeon is in PCI Host mode. This means |
| 83 | * Linux can control the PCI bus. |
| 84 | * |
| 85 | * Returns Non zero if Octeon in host mode. |
| 86 | */ |
| 87 | int octeon_is_pci_host(void) |
| 88 | { |
| 89 | #ifdef CONFIG_PCI |
| 90 | return octeon_bootinfo->config_flags & CVMX_BOOTINFO_CFG_FLAG_PCI_HOST; |
| 91 | #else |
| 92 | return 0; |
| 93 | #endif |
| 94 | } |
| 95 | |
| 96 | /** |
| 97 | * Get the clock rate of Octeon |
| 98 | * |
| 99 | * Returns Clock rate in HZ |
| 100 | */ |
| 101 | uint64_t octeon_get_clock_rate(void) |
| 102 | { |
| 103 | if (octeon_is_simulation()) |
| 104 | octeon_bootinfo->eclock_hz = 6000000; |
| 105 | return octeon_bootinfo->eclock_hz; |
| 106 | } |
| 107 | EXPORT_SYMBOL(octeon_get_clock_rate); |
| 108 | |
| 109 | /** |
| 110 | * Write to the LCD display connected to the bootbus. This display |
| 111 | * exists on most Cavium evaluation boards. If it doesn't exist, then |
| 112 | * this function doesn't do anything. |
| 113 | * |
| 114 | * @s: String to write |
| 115 | */ |
| 116 | void octeon_write_lcd(const char *s) |
| 117 | { |
| 118 | if (octeon_bootinfo->led_display_base_addr) { |
| 119 | void __iomem *lcd_address = |
| 120 | ioremap_nocache(octeon_bootinfo->led_display_base_addr, |
| 121 | 8); |
| 122 | int i; |
| 123 | for (i = 0; i < 8; i++, s++) { |
| 124 | if (*s) |
| 125 | iowrite8(*s, lcd_address + i); |
| 126 | else |
| 127 | iowrite8(' ', lcd_address + i); |
| 128 | } |
| 129 | iounmap(lcd_address); |
| 130 | } |
| 131 | } |
| 132 | |
| 133 | /** |
| 134 | * Return the console uart passed by the bootloader |
| 135 | * |
| 136 | * Returns uart (0 or 1) |
| 137 | */ |
| 138 | int octeon_get_boot_uart(void) |
| 139 | { |
| 140 | int uart; |
| 141 | #ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL |
| 142 | uart = 1; |
| 143 | #else |
| 144 | uart = (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ? |
| 145 | 1 : 0; |
| 146 | #endif |
| 147 | return uart; |
| 148 | } |
| 149 | |
| 150 | /** |
| 151 | * Get the coremask Linux was booted on. |
| 152 | * |
| 153 | * Returns Core mask |
| 154 | */ |
| 155 | int octeon_get_boot_coremask(void) |
| 156 | { |
| 157 | return octeon_boot_desc_ptr->core_mask; |
| 158 | } |
| 159 | |
| 160 | /** |
| 161 | * Check the hardware BIST results for a CPU |
| 162 | */ |
| 163 | void octeon_check_cpu_bist(void) |
| 164 | { |
| 165 | const int coreid = cvmx_get_core_num(); |
| 166 | unsigned long long mask; |
| 167 | unsigned long long bist_val; |
| 168 | |
| 169 | /* Check BIST results for COP0 registers */ |
| 170 | mask = 0x1f00000000ull; |
| 171 | bist_val = read_octeon_c0_icacheerr(); |
| 172 | if (bist_val & mask) |
| 173 | pr_err("Core%d BIST Failure: CacheErr(icache) = 0x%llx\n", |
| 174 | coreid, bist_val); |
| 175 | |
| 176 | bist_val = read_octeon_c0_dcacheerr(); |
| 177 | if (bist_val & 1) |
| 178 | pr_err("Core%d L1 Dcache parity error: " |
| 179 | "CacheErr(dcache) = 0x%llx\n", |
| 180 | coreid, bist_val); |
| 181 | |
| 182 | mask = 0xfc00000000000000ull; |
| 183 | bist_val = read_c0_cvmmemctl(); |
| 184 | if (bist_val & mask) |
| 185 | pr_err("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%llx\n", |
| 186 | coreid, bist_val); |
| 187 | |
| 188 | write_octeon_c0_dcacheerr(0); |
| 189 | } |
| 190 | |
| 191 | #ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB |
| 192 | /** |
| 193 | * Called on every core to setup the wired tlb entry needed |
| 194 | * if CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB is set. |
| 195 | * |
| 196 | */ |
| 197 | static void octeon_hal_setup_per_cpu_reserved32(void *unused) |
| 198 | { |
| 199 | /* |
| 200 | * The config has selected to wire the reserve32 memory for all |
| 201 | * userspace applications. We need to put a wired TLB entry in for each |
| 202 | * 512MB of reserve32 memory. We only handle double 256MB pages here, |
| 203 | * so reserve32 must be multiple of 512MB. |
| 204 | */ |
| 205 | uint32_t size = CONFIG_CAVIUM_RESERVE32; |
| 206 | uint32_t entrylo0 = |
| 207 | 0x7 | ((octeon_reserve32_memory & ((1ul << 40) - 1)) >> 6); |
| 208 | uint32_t entrylo1 = entrylo0 + (256 << 14); |
| 209 | uint32_t entryhi = (0x80000000UL - (CONFIG_CAVIUM_RESERVE32 << 20)); |
| 210 | while (size >= 512) { |
| 211 | #if 0 |
| 212 | pr_info("CPU%d: Adding double wired TLB entry for 0x%lx\n", |
| 213 | smp_processor_id(), entryhi); |
| 214 | #endif |
| 215 | add_wired_entry(entrylo0, entrylo1, entryhi, PM_256M); |
| 216 | entrylo0 += 512 << 14; |
| 217 | entrylo1 += 512 << 14; |
| 218 | entryhi += 512 << 20; |
| 219 | size -= 512; |
| 220 | } |
| 221 | } |
| 222 | #endif /* CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB */ |
| 223 | |
| 224 | /** |
| 225 | * Called to release the named block which was used to made sure |
| 226 | * that nobody used the memory for something else during |
| 227 | * init. Now we'll free it so userspace apps can use this |
| 228 | * memory region with bootmem_alloc. |
| 229 | * |
| 230 | * This function is called only once from prom_free_prom_memory(). |
| 231 | */ |
| 232 | void octeon_hal_setup_reserved32(void) |
| 233 | { |
| 234 | #ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB |
| 235 | on_each_cpu(octeon_hal_setup_per_cpu_reserved32, NULL, 0, 1); |
| 236 | #endif |
| 237 | } |
| 238 | |
| 239 | /** |
| 240 | * Reboot Octeon |
| 241 | * |
| 242 | * @command: Command to pass to the bootloader. Currently ignored. |
| 243 | */ |
| 244 | static void octeon_restart(char *command) |
| 245 | { |
| 246 | /* Disable all watchdogs before soft reset. They don't get cleared */ |
| 247 | #ifdef CONFIG_SMP |
| 248 | int cpu; |
| 249 | for_each_online_cpu(cpu) |
| 250 | cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0); |
| 251 | #else |
| 252 | cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0); |
| 253 | #endif |
| 254 | |
| 255 | mb(); |
| 256 | while (1) |
| 257 | cvmx_write_csr(CVMX_CIU_SOFT_RST, 1); |
| 258 | } |
| 259 | |
| 260 | |
| 261 | /** |
| 262 | * Permanently stop a core. |
| 263 | * |
| 264 | * @arg: Ignored. |
| 265 | */ |
| 266 | static void octeon_kill_core(void *arg) |
| 267 | { |
| 268 | mb(); |
| 269 | if (octeon_is_simulation()) { |
| 270 | /* The simulator needs the watchdog to stop for dead cores */ |
| 271 | cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0); |
| 272 | /* A break instruction causes the simulator stop a core */ |
| 273 | asm volatile ("sync\nbreak"); |
| 274 | } |
| 275 | } |
| 276 | |
| 277 | |
| 278 | /** |
| 279 | * Halt the system |
| 280 | */ |
| 281 | static void octeon_halt(void) |
| 282 | { |
| 283 | smp_call_function(octeon_kill_core, NULL, 0); |
| 284 | |
| 285 | switch (octeon_bootinfo->board_type) { |
| 286 | case CVMX_BOARD_TYPE_NAO38: |
| 287 | /* Driving a 1 to GPIO 12 shuts off this board */ |
| 288 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(12), 1); |
| 289 | cvmx_write_csr(CVMX_GPIO_TX_SET, 0x1000); |
| 290 | break; |
| 291 | default: |
| 292 | octeon_write_lcd("PowerOff"); |
| 293 | break; |
| 294 | } |
| 295 | |
| 296 | octeon_kill_core(NULL); |
| 297 | } |
| 298 | |
| 299 | #if 0 |
| 300 | /** |
| 301 | * Platform time init specifics. |
| 302 | * Returns |
| 303 | */ |
| 304 | void __init plat_time_init(void) |
| 305 | { |
| 306 | /* Nothing special here, but we are required to have one */ |
| 307 | } |
| 308 | |
| 309 | #endif |
| 310 | |
| 311 | /** |
| 312 | * Handle all the error condition interrupts that might occur. |
| 313 | * |
| 314 | */ |
| 315 | #ifdef CONFIG_CAVIUM_DECODE_RSL |
| 316 | static irqreturn_t octeon_rlm_interrupt(int cpl, void *dev_id) |
| 317 | { |
| 318 | cvmx_interrupt_rsl_decode(); |
| 319 | return IRQ_HANDLED; |
| 320 | } |
| 321 | #endif |
| 322 | |
| 323 | /** |
| 324 | * Return a string representing the system type |
| 325 | * |
| 326 | * Returns |
| 327 | */ |
| 328 | const char *octeon_board_type_string(void) |
| 329 | { |
| 330 | static char name[80]; |
| 331 | sprintf(name, "%s (%s)", |
| 332 | cvmx_board_type_to_string(octeon_bootinfo->board_type), |
| 333 | octeon_model_get_string(read_c0_prid())); |
| 334 | return name; |
| 335 | } |
| 336 | |
| 337 | const char *get_system_type(void) |
| 338 | __attribute__ ((alias("octeon_board_type_string"))); |
| 339 | |
| 340 | void octeon_user_io_init(void) |
| 341 | { |
| 342 | union octeon_cvmemctl cvmmemctl; |
| 343 | union cvmx_iob_fau_timeout fau_timeout; |
| 344 | union cvmx_pow_nw_tim nm_tim; |
| 345 | uint64_t cvmctl; |
| 346 | |
| 347 | /* Get the current settings for CP0_CVMMEMCTL_REG */ |
| 348 | cvmmemctl.u64 = read_c0_cvmmemctl(); |
| 349 | /* R/W If set, marked write-buffer entries time out the same |
| 350 | * as as other entries; if clear, marked write-buffer entries |
| 351 | * use the maximum timeout. */ |
| 352 | cvmmemctl.s.dismarkwblongto = 1; |
| 353 | /* R/W If set, a merged store does not clear the write-buffer |
| 354 | * entry timeout state. */ |
| 355 | cvmmemctl.s.dismrgclrwbto = 0; |
| 356 | /* R/W Two bits that are the MSBs of the resultant CVMSEG LM |
| 357 | * word location for an IOBDMA. The other 8 bits come from the |
| 358 | * SCRADDR field of the IOBDMA. */ |
| 359 | cvmmemctl.s.iobdmascrmsb = 0; |
| 360 | /* R/W If set, SYNCWS and SYNCS only order marked stores; if |
| 361 | * clear, SYNCWS and SYNCS only order unmarked |
| 362 | * stores. SYNCWSMARKED has no effect when DISSYNCWS is |
| 363 | * set. */ |
| 364 | cvmmemctl.s.syncwsmarked = 0; |
| 365 | /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as SYNC. */ |
| 366 | cvmmemctl.s.dissyncws = 0; |
| 367 | /* R/W If set, no stall happens on write buffer full. */ |
| 368 | if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) |
| 369 | cvmmemctl.s.diswbfst = 1; |
| 370 | else |
| 371 | cvmmemctl.s.diswbfst = 0; |
| 372 | /* R/W If set (and SX set), supervisor-level loads/stores can |
| 373 | * use XKPHYS addresses with <48>==0 */ |
| 374 | cvmmemctl.s.xkmemenas = 0; |
| 375 | |
| 376 | /* R/W If set (and UX set), user-level loads/stores can use |
| 377 | * XKPHYS addresses with VA<48>==0 */ |
| 378 | cvmmemctl.s.xkmemenau = 0; |
| 379 | |
| 380 | /* R/W If set (and SX set), supervisor-level loads/stores can |
| 381 | * use XKPHYS addresses with VA<48>==1 */ |
| 382 | cvmmemctl.s.xkioenas = 0; |
| 383 | |
| 384 | /* R/W If set (and UX set), user-level loads/stores can use |
| 385 | * XKPHYS addresses with VA<48>==1 */ |
| 386 | cvmmemctl.s.xkioenau = 0; |
| 387 | |
| 388 | /* R/W If set, all stores act as SYNCW (NOMERGE must be set |
| 389 | * when this is set) RW, reset to 0. */ |
| 390 | cvmmemctl.s.allsyncw = 0; |
| 391 | |
| 392 | /* R/W If set, no stores merge, and all stores reach the |
| 393 | * coherent bus in order. */ |
| 394 | cvmmemctl.s.nomerge = 0; |
| 395 | /* R/W Selects the bit in the counter used for DID time-outs 0 |
| 396 | * = 231, 1 = 230, 2 = 229, 3 = 214. Actual time-out is |
| 397 | * between 1x and 2x this interval. For example, with |
| 398 | * DIDTTO=3, expiration interval is between 16K and 32K. */ |
| 399 | cvmmemctl.s.didtto = 0; |
| 400 | /* R/W If set, the (mem) CSR clock never turns off. */ |
| 401 | cvmmemctl.s.csrckalwys = 0; |
| 402 | /* R/W If set, mclk never turns off. */ |
| 403 | cvmmemctl.s.mclkalwys = 0; |
| 404 | /* R/W Selects the bit in the counter used for write buffer |
| 405 | * flush time-outs (WBFLT+11) is the bit position in an |
| 406 | * internal counter used to determine expiration. The write |
| 407 | * buffer expires between 1x and 2x this interval. For |
| 408 | * example, with WBFLT = 0, a write buffer expires between 2K |
| 409 | * and 4K cycles after the write buffer entry is allocated. */ |
| 410 | cvmmemctl.s.wbfltime = 0; |
| 411 | /* R/W If set, do not put Istream in the L2 cache. */ |
| 412 | cvmmemctl.s.istrnol2 = 0; |
| 413 | /* R/W The write buffer threshold. */ |
| 414 | cvmmemctl.s.wbthresh = 10; |
| 415 | /* R/W If set, CVMSEG is available for loads/stores in |
| 416 | * kernel/debug mode. */ |
| 417 | #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 |
| 418 | cvmmemctl.s.cvmsegenak = 1; |
| 419 | #else |
| 420 | cvmmemctl.s.cvmsegenak = 0; |
| 421 | #endif |
| 422 | /* R/W If set, CVMSEG is available for loads/stores in |
| 423 | * supervisor mode. */ |
| 424 | cvmmemctl.s.cvmsegenas = 0; |
| 425 | /* R/W If set, CVMSEG is available for loads/stores in user |
| 426 | * mode. */ |
| 427 | cvmmemctl.s.cvmsegenau = 0; |
| 428 | /* R/W Size of local memory in cache blocks, 54 (6912 bytes) |
| 429 | * is max legal value. */ |
| 430 | cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE; |
| 431 | |
| 432 | |
| 433 | if (smp_processor_id() == 0) |
| 434 | pr_notice("CVMSEG size: %d cache lines (%d bytes)\n", |
| 435 | CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE, |
| 436 | CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128); |
| 437 | |
| 438 | write_c0_cvmmemctl(cvmmemctl.u64); |
| 439 | |
| 440 | /* Move the performance counter interrupts to IRQ 6 */ |
| 441 | cvmctl = read_c0_cvmctl(); |
| 442 | cvmctl &= ~(7 << 7); |
| 443 | cvmctl |= 6 << 7; |
| 444 | write_c0_cvmctl(cvmctl); |
| 445 | |
| 446 | /* Set a default for the hardware timeouts */ |
| 447 | fau_timeout.u64 = 0; |
| 448 | fau_timeout.s.tout_val = 0xfff; |
| 449 | /* Disable tagwait FAU timeout */ |
| 450 | fau_timeout.s.tout_enb = 0; |
| 451 | cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_timeout.u64); |
| 452 | |
| 453 | nm_tim.u64 = 0; |
| 454 | /* 4096 cycles */ |
| 455 | nm_tim.s.nw_tim = 3; |
| 456 | cvmx_write_csr(CVMX_POW_NW_TIM, nm_tim.u64); |
| 457 | |
| 458 | write_octeon_c0_icacheerr(0); |
| 459 | write_c0_derraddr1(0); |
| 460 | } |
| 461 | |
| 462 | /** |
| 463 | * Early entry point for arch setup |
| 464 | */ |
| 465 | void __init prom_init(void) |
| 466 | { |
| 467 | struct cvmx_sysinfo *sysinfo; |
| 468 | const int coreid = cvmx_get_core_num(); |
| 469 | int i; |
| 470 | int argc; |
| 471 | struct uart_port octeon_port; |
| 472 | #ifdef CONFIG_CAVIUM_RESERVE32 |
| 473 | int64_t addr = -1; |
| 474 | #endif |
| 475 | /* |
| 476 | * The bootloader passes a pointer to the boot descriptor in |
| 477 | * $a3, this is available as fw_arg3. |
| 478 | */ |
| 479 | octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3; |
| 480 | octeon_bootinfo = |
| 481 | cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr); |
| 482 | cvmx_bootmem_init(cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr)); |
| 483 | |
| 484 | /* |
| 485 | * Only enable the LED controller if we're running on a CN38XX, CN58XX, |
| 486 | * or CN56XX. The CN30XX and CN31XX don't have an LED controller. |
| 487 | */ |
| 488 | if (!octeon_is_simulation() && |
| 489 | octeon_has_feature(OCTEON_FEATURE_LED_CONTROLLER)) { |
| 490 | cvmx_write_csr(CVMX_LED_EN, 0); |
| 491 | cvmx_write_csr(CVMX_LED_PRT, 0); |
| 492 | cvmx_write_csr(CVMX_LED_DBG, 0); |
| 493 | cvmx_write_csr(CVMX_LED_PRT_FMT, 0); |
| 494 | cvmx_write_csr(CVMX_LED_UDD_CNTX(0), 32); |
| 495 | cvmx_write_csr(CVMX_LED_UDD_CNTX(1), 32); |
| 496 | cvmx_write_csr(CVMX_LED_UDD_DATX(0), 0); |
| 497 | cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0); |
| 498 | cvmx_write_csr(CVMX_LED_EN, 1); |
| 499 | } |
| 500 | #ifdef CONFIG_CAVIUM_RESERVE32 |
| 501 | /* |
| 502 | * We need to temporarily allocate all memory in the reserve32 |
| 503 | * region. This makes sure the kernel doesn't allocate this |
| 504 | * memory when it is getting memory from the |
| 505 | * bootloader. Later, after the memory allocations are |
| 506 | * complete, the reserve32 will be freed. |
| 507 | */ |
| 508 | #ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB |
| 509 | if (CONFIG_CAVIUM_RESERVE32 & 0x1ff) |
| 510 | pr_err("CAVIUM_RESERVE32 isn't a multiple of 512MB. " |
| 511 | "This is required if CAVIUM_RESERVE32_USE_WIRED_TLB " |
| 512 | "is set\n"); |
| 513 | else |
| 514 | addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20, |
| 515 | 0, 0, 512 << 20, |
| 516 | "CAVIUM_RESERVE32", 0); |
| 517 | #else |
| 518 | /* |
| 519 | * Allocate memory for RESERVED32 aligned on 2MB boundary. This |
| 520 | * is in case we later use hugetlb entries with it. |
| 521 | */ |
| 522 | addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20, |
| 523 | 0, 0, 2 << 20, |
| 524 | "CAVIUM_RESERVE32", 0); |
| 525 | #endif |
| 526 | if (addr < 0) |
| 527 | pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n"); |
| 528 | else |
| 529 | octeon_reserve32_memory = addr; |
| 530 | #endif |
| 531 | |
| 532 | #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2 |
| 533 | if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) { |
| 534 | pr_info("Skipping L2 locking due to reduced L2 cache size\n"); |
| 535 | } else { |
| 536 | uint32_t ebase = read_c0_ebase() & 0x3ffff000; |
| 537 | #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB |
| 538 | /* TLB refill */ |
| 539 | cvmx_l2c_lock_mem_region(ebase, 0x100); |
| 540 | #endif |
| 541 | #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION |
| 542 | /* General exception */ |
| 543 | cvmx_l2c_lock_mem_region(ebase + 0x180, 0x80); |
| 544 | #endif |
| 545 | #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT |
| 546 | /* Interrupt handler */ |
| 547 | cvmx_l2c_lock_mem_region(ebase + 0x200, 0x80); |
| 548 | #endif |
| 549 | #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT |
| 550 | cvmx_l2c_lock_mem_region(__pa_symbol(handle_int), 0x100); |
| 551 | cvmx_l2c_lock_mem_region(__pa_symbol(plat_irq_dispatch), 0x80); |
| 552 | #endif |
| 553 | #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY |
| 554 | cvmx_l2c_lock_mem_region(__pa_symbol(memcpy), 0x480); |
| 555 | #endif |
| 556 | } |
| 557 | #endif |
| 558 | |
| 559 | sysinfo = cvmx_sysinfo_get(); |
| 560 | memset(sysinfo, 0, sizeof(*sysinfo)); |
| 561 | sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20; |
| 562 | sysinfo->phy_mem_desc_ptr = |
| 563 | cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr); |
| 564 | sysinfo->core_mask = octeon_bootinfo->core_mask; |
| 565 | sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr; |
| 566 | sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz; |
| 567 | sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2; |
| 568 | sysinfo->board_type = octeon_bootinfo->board_type; |
| 569 | sysinfo->board_rev_major = octeon_bootinfo->board_rev_major; |
| 570 | sysinfo->board_rev_minor = octeon_bootinfo->board_rev_minor; |
| 571 | memcpy(sysinfo->mac_addr_base, octeon_bootinfo->mac_addr_base, |
| 572 | sizeof(sysinfo->mac_addr_base)); |
| 573 | sysinfo->mac_addr_count = octeon_bootinfo->mac_addr_count; |
| 574 | memcpy(sysinfo->board_serial_number, |
| 575 | octeon_bootinfo->board_serial_number, |
| 576 | sizeof(sysinfo->board_serial_number)); |
| 577 | sysinfo->compact_flash_common_base_addr = |
| 578 | octeon_bootinfo->compact_flash_common_base_addr; |
| 579 | sysinfo->compact_flash_attribute_base_addr = |
| 580 | octeon_bootinfo->compact_flash_attribute_base_addr; |
| 581 | sysinfo->led_display_base_addr = octeon_bootinfo->led_display_base_addr; |
| 582 | sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz; |
| 583 | sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags; |
| 584 | |
| 585 | |
| 586 | octeon_check_cpu_bist(); |
| 587 | |
| 588 | octeon_uart = octeon_get_boot_uart(); |
| 589 | |
| 590 | /* |
| 591 | * Disable All CIU Interrupts. The ones we need will be |
| 592 | * enabled later. Read the SUM register so we know the write |
| 593 | * completed. |
| 594 | */ |
| 595 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0); |
| 596 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0); |
| 597 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0); |
| 598 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0); |
| 599 | cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2))); |
| 600 | |
| 601 | #ifdef CONFIG_SMP |
| 602 | octeon_write_lcd("LinuxSMP"); |
| 603 | #else |
| 604 | octeon_write_lcd("Linux"); |
| 605 | #endif |
| 606 | |
| 607 | #ifdef CONFIG_CAVIUM_GDB |
| 608 | /* |
| 609 | * When debugging the linux kernel, force the cores to enter |
| 610 | * the debug exception handler to break in. |
| 611 | */ |
| 612 | if (octeon_get_boot_debug_flag()) { |
| 613 | cvmx_write_csr(CVMX_CIU_DINT, 1 << cvmx_get_core_num()); |
| 614 | cvmx_read_csr(CVMX_CIU_DINT); |
| 615 | } |
| 616 | #endif |
| 617 | |
| 618 | /* |
| 619 | * BIST should always be enabled when doing a soft reset. L2 |
| 620 | * Cache locking for instance is not cleared unless BIST is |
| 621 | * enabled. Unfortunately due to a chip errata G-200 for |
| 622 | * Cn38XX and CN31XX, BIST msut be disabled on these parts. |
| 623 | */ |
| 624 | if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) || |
| 625 | OCTEON_IS_MODEL(OCTEON_CN31XX)) |
| 626 | cvmx_write_csr(CVMX_CIU_SOFT_BIST, 0); |
| 627 | else |
| 628 | cvmx_write_csr(CVMX_CIU_SOFT_BIST, 1); |
| 629 | |
| 630 | /* Default to 64MB in the simulator to speed things up */ |
| 631 | if (octeon_is_simulation()) |
| 632 | MAX_MEMORY = 64ull << 20; |
| 633 | |
| 634 | arcs_cmdline[0] = 0; |
| 635 | argc = octeon_boot_desc_ptr->argc; |
| 636 | for (i = 0; i < argc; i++) { |
| 637 | const char *arg = |
| 638 | cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]); |
| 639 | if ((strncmp(arg, "MEM=", 4) == 0) || |
| 640 | (strncmp(arg, "mem=", 4) == 0)) { |
| 641 | sscanf(arg + 4, "%llu", &MAX_MEMORY); |
| 642 | MAX_MEMORY <<= 20; |
| 643 | if (MAX_MEMORY == 0) |
| 644 | MAX_MEMORY = 32ull << 30; |
| 645 | } else if (strcmp(arg, "ecc_verbose") == 0) { |
| 646 | #ifdef CONFIG_CAVIUM_REPORT_SINGLE_BIT_ECC |
| 647 | __cvmx_interrupt_ecc_report_single_bit_errors = 1; |
| 648 | pr_notice("Reporting of single bit ECC errors is " |
| 649 | "turned on\n"); |
| 650 | #endif |
| 651 | } else if (strlen(arcs_cmdline) + strlen(arg) + 1 < |
| 652 | sizeof(arcs_cmdline) - 1) { |
| 653 | strcat(arcs_cmdline, " "); |
| 654 | strcat(arcs_cmdline, arg); |
| 655 | } |
| 656 | } |
| 657 | |
| 658 | if (strstr(arcs_cmdline, "console=") == NULL) { |
| 659 | #ifdef CONFIG_GDB_CONSOLE |
| 660 | strcat(arcs_cmdline, " console=gdb"); |
| 661 | #else |
| 662 | #ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL |
| 663 | strcat(arcs_cmdline, " console=ttyS0,115200"); |
| 664 | #else |
| 665 | if (octeon_uart == 1) |
| 666 | strcat(arcs_cmdline, " console=ttyS1,115200"); |
| 667 | else |
| 668 | strcat(arcs_cmdline, " console=ttyS0,115200"); |
| 669 | #endif |
| 670 | #endif |
| 671 | } |
| 672 | |
| 673 | if (octeon_is_simulation()) { |
| 674 | /* |
| 675 | * The simulator uses a mtdram device pre filled with |
| 676 | * the filesystem. Also specify the calibration delay |
| 677 | * to avoid calculating it every time. |
| 678 | */ |
| 679 | strcat(arcs_cmdline, " rw root=1f00" |
| 680 | " lpj=60176 slram=root,0x40000000,+1073741824"); |
| 681 | } |
| 682 | |
| 683 | mips_hpt_frequency = octeon_get_clock_rate(); |
| 684 | |
| 685 | octeon_init_cvmcount(); |
| 686 | |
| 687 | _machine_restart = octeon_restart; |
| 688 | _machine_halt = octeon_halt; |
| 689 | |
| 690 | memset(&octeon_port, 0, sizeof(octeon_port)); |
| 691 | /* |
| 692 | * For early_serial_setup we don't set the port type or |
| 693 | * UPF_FIXED_TYPE. |
| 694 | */ |
| 695 | octeon_port.flags = ASYNC_SKIP_TEST | UPF_SHARE_IRQ; |
| 696 | octeon_port.iotype = UPIO_MEM; |
| 697 | /* I/O addresses are every 8 bytes */ |
| 698 | octeon_port.regshift = 3; |
| 699 | /* Clock rate of the chip */ |
| 700 | octeon_port.uartclk = mips_hpt_frequency; |
| 701 | octeon_port.fifosize = 64; |
| 702 | octeon_port.mapbase = 0x0001180000000800ull + (1024 * octeon_uart); |
| 703 | octeon_port.membase = cvmx_phys_to_ptr(octeon_port.mapbase); |
| 704 | octeon_port.serial_in = octeon_serial_in; |
| 705 | octeon_port.serial_out = octeon_serial_out; |
| 706 | #ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL |
| 707 | octeon_port.line = 0; |
| 708 | #else |
| 709 | octeon_port.line = octeon_uart; |
| 710 | #endif |
| 711 | octeon_port.irq = 42 + octeon_uart; |
| 712 | early_serial_setup(&octeon_port); |
| 713 | |
| 714 | octeon_user_io_init(); |
| 715 | register_smp_ops(&octeon_smp_ops); |
| 716 | } |
| 717 | |
| 718 | void __init plat_mem_setup(void) |
| 719 | { |
| 720 | uint64_t mem_alloc_size; |
| 721 | uint64_t total; |
| 722 | int64_t memory; |
| 723 | |
| 724 | total = 0; |
| 725 | |
| 726 | /* First add the init memory we will be returning. */ |
| 727 | memory = __pa_symbol(&__init_begin) & PAGE_MASK; |
| 728 | mem_alloc_size = (__pa_symbol(&__init_end) & PAGE_MASK) - memory; |
| 729 | if (mem_alloc_size > 0) { |
| 730 | add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM); |
| 731 | total += mem_alloc_size; |
| 732 | } |
| 733 | |
| 734 | /* |
| 735 | * The Mips memory init uses the first memory location for |
| 736 | * some memory vectors. When SPARSEMEM is in use, it doesn't |
| 737 | * verify that the size is big enough for the final |
| 738 | * vectors. Making the smallest chuck 4MB seems to be enough |
| 739 | * to consistantly work. |
| 740 | */ |
| 741 | mem_alloc_size = 4 << 20; |
| 742 | if (mem_alloc_size > MAX_MEMORY) |
| 743 | mem_alloc_size = MAX_MEMORY; |
| 744 | |
| 745 | /* |
| 746 | * When allocating memory, we want incrementing addresses from |
| 747 | * bootmem_alloc so the code in add_memory_region can merge |
| 748 | * regions next to each other. |
| 749 | */ |
| 750 | cvmx_bootmem_lock(); |
| 751 | while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX) |
| 752 | && (total < MAX_MEMORY)) { |
| 753 | #if defined(CONFIG_64BIT) || defined(CONFIG_64BIT_PHYS_ADDR) |
| 754 | memory = cvmx_bootmem_phy_alloc(mem_alloc_size, |
| 755 | __pa_symbol(&__init_end), -1, |
| 756 | 0x100000, |
| 757 | CVMX_BOOTMEM_FLAG_NO_LOCKING); |
| 758 | #elif defined(CONFIG_HIGHMEM) |
| 759 | memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 0, 1ull << 31, |
| 760 | 0x100000, |
| 761 | CVMX_BOOTMEM_FLAG_NO_LOCKING); |
| 762 | #else |
| 763 | memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 0, 512 << 20, |
| 764 | 0x100000, |
| 765 | CVMX_BOOTMEM_FLAG_NO_LOCKING); |
| 766 | #endif |
| 767 | if (memory >= 0) { |
| 768 | /* |
| 769 | * This function automatically merges address |
| 770 | * regions next to each other if they are |
| 771 | * received in incrementing order. |
| 772 | */ |
| 773 | add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM); |
| 774 | total += mem_alloc_size; |
| 775 | } else { |
| 776 | break; |
| 777 | } |
| 778 | } |
| 779 | cvmx_bootmem_unlock(); |
| 780 | |
| 781 | #ifdef CONFIG_CAVIUM_RESERVE32 |
| 782 | /* |
| 783 | * Now that we've allocated the kernel memory it is safe to |
| 784 | * free the reserved region. We free it here so that builtin |
| 785 | * drivers can use the memory. |
| 786 | */ |
| 787 | if (octeon_reserve32_memory) |
| 788 | cvmx_bootmem_free_named("CAVIUM_RESERVE32"); |
| 789 | #endif /* CONFIG_CAVIUM_RESERVE32 */ |
| 790 | |
| 791 | if (total == 0) |
| 792 | panic("Unable to allocate memory from " |
| 793 | "cvmx_bootmem_phy_alloc\n"); |
| 794 | } |
| 795 | |
| 796 | |
| 797 | int prom_putchar(char c) |
| 798 | { |
| 799 | uint64_t lsrval; |
| 800 | |
| 801 | /* Spin until there is room */ |
| 802 | do { |
| 803 | lsrval = cvmx_read_csr(CVMX_MIO_UARTX_LSR(octeon_uart)); |
| 804 | } while ((lsrval & 0x20) == 0); |
| 805 | |
| 806 | /* Write the byte */ |
| 807 | cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c); |
| 808 | return 1; |
| 809 | } |
| 810 | |
| 811 | void prom_free_prom_memory(void) |
| 812 | { |
| 813 | #ifdef CONFIG_CAVIUM_DECODE_RSL |
| 814 | cvmx_interrupt_rsl_enable(); |
| 815 | |
| 816 | /* Add an interrupt handler for general failures. */ |
| 817 | if (request_irq(OCTEON_IRQ_RML, octeon_rlm_interrupt, IRQF_SHARED, |
| 818 | "RML/RSL", octeon_rlm_interrupt)) { |
| 819 | panic("Unable to request_irq(OCTEON_IRQ_RML)\n"); |
| 820 | } |
| 821 | #endif |
| 822 | |
| 823 | /* This call is here so that it is performed after any TLB |
| 824 | initializations. It needs to be after these in case the |
| 825 | CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB option is set */ |
| 826 | octeon_hal_setup_reserved32(); |
| 827 | } |
| 828 | |
| 829 | static struct octeon_cf_data octeon_cf_data; |
| 830 | |
| 831 | static int __init octeon_cf_device_init(void) |
| 832 | { |
| 833 | union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg; |
| 834 | unsigned long base_ptr, region_base, region_size; |
| 835 | struct platform_device *pd; |
| 836 | struct resource cf_resources[3]; |
| 837 | unsigned int num_resources; |
| 838 | int i; |
| 839 | int ret = 0; |
| 840 | |
| 841 | /* Setup octeon-cf platform device if present. */ |
| 842 | base_ptr = 0; |
| 843 | if (octeon_bootinfo->major_version == 1 |
| 844 | && octeon_bootinfo->minor_version >= 1) { |
| 845 | if (octeon_bootinfo->compact_flash_common_base_addr) |
| 846 | base_ptr = |
| 847 | octeon_bootinfo->compact_flash_common_base_addr; |
| 848 | } else { |
| 849 | base_ptr = 0x1d000800; |
| 850 | } |
| 851 | |
| 852 | if (!base_ptr) |
| 853 | return ret; |
| 854 | |
| 855 | /* Find CS0 region. */ |
| 856 | for (i = 0; i < 8; i++) { |
| 857 | mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i)); |
| 858 | region_base = mio_boot_reg_cfg.s.base << 16; |
| 859 | region_size = (mio_boot_reg_cfg.s.size + 1) << 16; |
| 860 | if (mio_boot_reg_cfg.s.en && base_ptr >= region_base |
| 861 | && base_ptr < region_base + region_size) |
| 862 | break; |
| 863 | } |
| 864 | if (i >= 7) { |
| 865 | /* i and i + 1 are CS0 and CS1, both must be less than 8. */ |
| 866 | goto out; |
| 867 | } |
| 868 | octeon_cf_data.base_region = i; |
| 869 | octeon_cf_data.is16bit = mio_boot_reg_cfg.s.width; |
| 870 | octeon_cf_data.base_region_bias = base_ptr - region_base; |
| 871 | memset(cf_resources, 0, sizeof(cf_resources)); |
| 872 | num_resources = 0; |
| 873 | cf_resources[num_resources].flags = IORESOURCE_MEM; |
| 874 | cf_resources[num_resources].start = region_base; |
| 875 | cf_resources[num_resources].end = region_base + region_size - 1; |
| 876 | num_resources++; |
| 877 | |
| 878 | |
| 879 | if (!(base_ptr & 0xfffful)) { |
| 880 | /* |
| 881 | * Boot loader signals availability of DMA (true_ide |
| 882 | * mode) by setting low order bits of base_ptr to |
| 883 | * zero. |
| 884 | */ |
| 885 | |
| 886 | /* Asume that CS1 immediately follows. */ |
| 887 | mio_boot_reg_cfg.u64 = |
| 888 | cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i + 1)); |
| 889 | region_base = mio_boot_reg_cfg.s.base << 16; |
| 890 | region_size = (mio_boot_reg_cfg.s.size + 1) << 16; |
| 891 | if (!mio_boot_reg_cfg.s.en) |
| 892 | goto out; |
| 893 | |
| 894 | cf_resources[num_resources].flags = IORESOURCE_MEM; |
| 895 | cf_resources[num_resources].start = region_base; |
| 896 | cf_resources[num_resources].end = region_base + region_size - 1; |
| 897 | num_resources++; |
| 898 | |
| 899 | octeon_cf_data.dma_engine = 0; |
| 900 | cf_resources[num_resources].flags = IORESOURCE_IRQ; |
| 901 | cf_resources[num_resources].start = OCTEON_IRQ_BOOTDMA; |
| 902 | cf_resources[num_resources].end = OCTEON_IRQ_BOOTDMA; |
| 903 | num_resources++; |
| 904 | } else { |
| 905 | octeon_cf_data.dma_engine = -1; |
| 906 | } |
| 907 | |
| 908 | pd = platform_device_alloc("pata_octeon_cf", -1); |
| 909 | if (!pd) { |
| 910 | ret = -ENOMEM; |
| 911 | goto out; |
| 912 | } |
| 913 | pd->dev.platform_data = &octeon_cf_data; |
| 914 | |
| 915 | ret = platform_device_add_resources(pd, cf_resources, num_resources); |
| 916 | if (ret) |
| 917 | goto fail; |
| 918 | |
| 919 | ret = platform_device_add(pd); |
| 920 | if (ret) |
| 921 | goto fail; |
| 922 | |
| 923 | return ret; |
| 924 | fail: |
| 925 | platform_device_put(pd); |
| 926 | out: |
| 927 | return ret; |
| 928 | } |
| 929 | device_initcall(octeon_cf_device_init); |