Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1 | /* |
| 2 | * This file contains miscellaneous low-level functions. |
| 3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
| 4 | * |
| 5 | * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) |
| 6 | * and Paul Mackerras. |
| 7 | * |
Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 8 | * kexec bits: |
| 9 | * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com> |
| 10 | * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz |
| 11 | * |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 12 | * This program is free software; you can redistribute it and/or |
| 13 | * modify it under the terms of the GNU General Public License |
| 14 | * as published by the Free Software Foundation; either version |
| 15 | * 2 of the License, or (at your option) any later version. |
| 16 | * |
| 17 | */ |
| 18 | |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 19 | #include <linux/sys.h> |
| 20 | #include <asm/unistd.h> |
| 21 | #include <asm/errno.h> |
| 22 | #include <asm/reg.h> |
| 23 | #include <asm/page.h> |
| 24 | #include <asm/cache.h> |
| 25 | #include <asm/cputable.h> |
| 26 | #include <asm/mmu.h> |
| 27 | #include <asm/ppc_asm.h> |
| 28 | #include <asm/thread_info.h> |
| 29 | #include <asm/asm-offsets.h> |
Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 30 | #include <asm/processor.h> |
| 31 | #include <asm/kexec.h> |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 32 | |
| 33 | .text |
| 34 | |
Kumar Gala | 8521882 | 2008-04-28 16:21:22 +1000 | [diff] [blame] | 35 | #ifdef CONFIG_IRQSTACKS |
| 36 | _GLOBAL(call_do_softirq) |
| 37 | mflr r0 |
| 38 | stw r0,4(r1) |
| 39 | stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) |
| 40 | mr r1,r3 |
| 41 | bl __do_softirq |
| 42 | lwz r1,0(r1) |
| 43 | lwz r0,4(r1) |
| 44 | mtlr r0 |
| 45 | blr |
| 46 | |
| 47 | _GLOBAL(call_handle_irq) |
| 48 | mflr r0 |
| 49 | stw r0,4(r1) |
| 50 | mtctr r6 |
| 51 | stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5) |
| 52 | mr r1,r5 |
| 53 | bctrl |
| 54 | lwz r1,0(r1) |
| 55 | lwz r0,4(r1) |
| 56 | mtlr r0 |
| 57 | blr |
| 58 | #endif /* CONFIG_IRQSTACKS */ |
| 59 | |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 60 | /* |
Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 61 | * This returns the high 64 bits of the product of two 64-bit numbers. |
| 62 | */ |
| 63 | _GLOBAL(mulhdu) |
| 64 | cmpwi r6,0 |
| 65 | cmpwi cr1,r3,0 |
| 66 | mr r10,r4 |
| 67 | mulhwu r4,r4,r5 |
| 68 | beq 1f |
| 69 | mulhwu r0,r10,r6 |
| 70 | mullw r7,r10,r5 |
| 71 | addc r7,r0,r7 |
| 72 | addze r4,r4 |
| 73 | 1: beqlr cr1 /* all done if high part of A is 0 */ |
| 74 | mr r10,r3 |
| 75 | mullw r9,r3,r5 |
| 76 | mulhwu r3,r3,r5 |
| 77 | beq 2f |
| 78 | mullw r0,r10,r6 |
| 79 | mulhwu r8,r10,r6 |
| 80 | addc r7,r0,r7 |
| 81 | adde r4,r4,r8 |
| 82 | addze r3,r3 |
| 83 | 2: addc r4,r4,r9 |
| 84 | addze r3,r3 |
| 85 | blr |
| 86 | |
| 87 | /* |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 88 | * sub_reloc_offset(x) returns x - reloc_offset(). |
| 89 | */ |
| 90 | _GLOBAL(sub_reloc_offset) |
| 91 | mflr r0 |
| 92 | bl 1f |
| 93 | 1: mflr r5 |
| 94 | lis r4,1b@ha |
| 95 | addi r4,r4,1b@l |
| 96 | subf r5,r4,r5 |
| 97 | subf r3,r5,r3 |
| 98 | mtlr r0 |
| 99 | blr |
| 100 | |
| 101 | /* |
| 102 | * reloc_got2 runs through the .got2 section adding an offset |
| 103 | * to each entry. |
| 104 | */ |
| 105 | _GLOBAL(reloc_got2) |
| 106 | mflr r11 |
| 107 | lis r7,__got2_start@ha |
| 108 | addi r7,r7,__got2_start@l |
| 109 | lis r8,__got2_end@ha |
| 110 | addi r8,r8,__got2_end@l |
| 111 | subf r8,r7,r8 |
| 112 | srwi. r8,r8,2 |
| 113 | beqlr |
| 114 | mtctr r8 |
| 115 | bl 1f |
| 116 | 1: mflr r0 |
| 117 | lis r4,1b@ha |
| 118 | addi r4,r4,1b@l |
| 119 | subf r0,r4,r0 |
| 120 | add r7,r0,r7 |
| 121 | 2: lwz r0,0(r7) |
| 122 | add r0,r0,r3 |
| 123 | stw r0,0(r7) |
| 124 | addi r7,r7,4 |
| 125 | bdnz 2b |
| 126 | mtlr r11 |
| 127 | blr |
| 128 | |
| 129 | /* |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 130 | * call_setup_cpu - call the setup_cpu function for this cpu |
| 131 | * r3 = data offset, r24 = cpu number |
| 132 | * |
| 133 | * Setup function is called with: |
| 134 | * r3 = data offset |
| 135 | * r4 = ptr to CPU spec (relocated) |
| 136 | */ |
| 137 | _GLOBAL(call_setup_cpu) |
| 138 | addis r4,r3,cur_cpu_spec@ha |
| 139 | addi r4,r4,cur_cpu_spec@l |
| 140 | lwz r4,0(r4) |
| 141 | add r4,r4,r3 |
| 142 | lwz r5,CPU_SPEC_SETUP(r4) |
Geoff Levand | b26f100 | 2006-05-19 14:24:18 +1000 | [diff] [blame] | 143 | cmpwi 0,r5,0 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 144 | add r5,r5,r3 |
| 145 | beqlr |
| 146 | mtctr r5 |
| 147 | bctr |
| 148 | |
| 149 | #if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx) |
| 150 | |
| 151 | /* This gets called by via-pmu.c to switch the PLL selection |
| 152 | * on 750fx CPU. This function should really be moved to some |
| 153 | * other place (as most of the cpufreq code in via-pmu |
| 154 | */ |
| 155 | _GLOBAL(low_choose_750fx_pll) |
| 156 | /* Clear MSR:EE */ |
| 157 | mfmsr r7 |
| 158 | rlwinm r0,r7,0,17,15 |
| 159 | mtmsr r0 |
| 160 | |
| 161 | /* If switching to PLL1, disable HID0:BTIC */ |
| 162 | cmplwi cr0,r3,0 |
| 163 | beq 1f |
| 164 | mfspr r5,SPRN_HID0 |
| 165 | rlwinm r5,r5,0,27,25 |
| 166 | sync |
| 167 | mtspr SPRN_HID0,r5 |
| 168 | isync |
| 169 | sync |
| 170 | |
| 171 | 1: |
| 172 | /* Calc new HID1 value */ |
| 173 | mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */ |
| 174 | rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */ |
| 175 | rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */ |
| 176 | or r4,r4,r5 |
| 177 | mtspr SPRN_HID1,r4 |
| 178 | |
| 179 | /* Store new HID1 image */ |
Kumar Gala | f608600 | 2008-04-24 06:29:36 +1000 | [diff] [blame] | 180 | rlwinm r6,r1,0,0,(31-THREAD_SHIFT) |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 181 | lwz r6,TI_CPU(r6) |
| 182 | slwi r6,r6,2 |
| 183 | addis r6,r6,nap_save_hid1@ha |
| 184 | stw r4,nap_save_hid1@l(r6) |
| 185 | |
| 186 | /* If switching to PLL0, enable HID0:BTIC */ |
| 187 | cmplwi cr0,r3,0 |
| 188 | bne 1f |
| 189 | mfspr r5,SPRN_HID0 |
| 190 | ori r5,r5,HID0_BTIC |
| 191 | sync |
| 192 | mtspr SPRN_HID0,r5 |
| 193 | isync |
| 194 | sync |
| 195 | |
| 196 | 1: |
| 197 | /* Return */ |
| 198 | mtmsr r7 |
| 199 | blr |
| 200 | |
| 201 | _GLOBAL(low_choose_7447a_dfs) |
| 202 | /* Clear MSR:EE */ |
| 203 | mfmsr r7 |
| 204 | rlwinm r0,r7,0,17,15 |
| 205 | mtmsr r0 |
| 206 | |
| 207 | /* Calc new HID1 value */ |
| 208 | mfspr r4,SPRN_HID1 |
| 209 | insrwi r4,r3,1,9 /* insert parameter into bit 9 */ |
| 210 | sync |
| 211 | mtspr SPRN_HID1,r4 |
| 212 | sync |
| 213 | isync |
| 214 | |
| 215 | /* Return */ |
| 216 | mtmsr r7 |
| 217 | blr |
| 218 | |
| 219 | #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */ |
| 220 | |
| 221 | /* |
| 222 | * complement mask on the msr then "or" some values on. |
| 223 | * _nmask_and_or_msr(nmask, value_to_or) |
| 224 | */ |
| 225 | _GLOBAL(_nmask_and_or_msr) |
| 226 | mfmsr r0 /* Get current msr */ |
| 227 | andc r0,r0,r3 /* And off the bits set in r3 (first parm) */ |
| 228 | or r0,r0,r4 /* Or on the bits in r4 (second parm) */ |
| 229 | SYNC /* Some chip revs have problems here... */ |
| 230 | mtmsr r0 /* Update machine state */ |
| 231 | isync |
| 232 | blr /* Done */ |
| 233 | |
Benjamin Herrenschmidt | 9dae8af | 2007-12-21 15:39:26 +1100 | [diff] [blame] | 234 | #ifdef CONFIG_40x |
| 235 | |
| 236 | /* |
| 237 | * Do an IO access in real mode |
| 238 | */ |
| 239 | _GLOBAL(real_readb) |
| 240 | mfmsr r7 |
| 241 | ori r0,r7,MSR_DR |
| 242 | xori r0,r0,MSR_DR |
| 243 | sync |
| 244 | mtmsr r0 |
| 245 | sync |
| 246 | isync |
| 247 | lbz r3,0(r3) |
| 248 | sync |
| 249 | mtmsr r7 |
| 250 | sync |
| 251 | isync |
| 252 | blr |
| 253 | |
| 254 | /* |
| 255 | * Do an IO access in real mode |
| 256 | */ |
| 257 | _GLOBAL(real_writeb) |
| 258 | mfmsr r7 |
| 259 | ori r0,r7,MSR_DR |
| 260 | xori r0,r0,MSR_DR |
| 261 | sync |
| 262 | mtmsr r0 |
| 263 | sync |
| 264 | isync |
| 265 | stb r3,0(r4) |
| 266 | sync |
| 267 | mtmsr r7 |
| 268 | sync |
| 269 | isync |
| 270 | blr |
| 271 | |
| 272 | #endif /* CONFIG_40x */ |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 273 | |
| 274 | /* |
| 275 | * Flush MMU TLB |
| 276 | */ |
| 277 | _GLOBAL(_tlbia) |
| 278 | #if defined(CONFIG_40x) |
| 279 | sync /* Flush to memory before changing mapping */ |
| 280 | tlbia |
| 281 | isync /* Flush shadow TLB */ |
| 282 | #elif defined(CONFIG_44x) |
| 283 | li r3,0 |
| 284 | sync |
| 285 | |
| 286 | /* Load high watermark */ |
| 287 | lis r4,tlb_44x_hwater@ha |
| 288 | lwz r5,tlb_44x_hwater@l(r4) |
| 289 | |
| 290 | 1: tlbwe r3,r3,PPC44x_TLB_PAGEID |
| 291 | addi r3,r3,1 |
| 292 | cmpw 0,r3,r5 |
| 293 | ble 1b |
| 294 | |
| 295 | isync |
| 296 | #elif defined(CONFIG_FSL_BOOKE) |
| 297 | /* Invalidate all entries in TLB0 */ |
| 298 | li r3, 0x04 |
| 299 | tlbivax 0,3 |
| 300 | /* Invalidate all entries in TLB1 */ |
| 301 | li r3, 0x0c |
| 302 | tlbivax 0,3 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 303 | msync |
| 304 | #ifdef CONFIG_SMP |
| 305 | tlbsync |
| 306 | #endif /* CONFIG_SMP */ |
| 307 | #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */ |
| 308 | #if defined(CONFIG_SMP) |
Kumar Gala | f608600 | 2008-04-24 06:29:36 +1000 | [diff] [blame] | 309 | rlwinm r8,r1,0,0,(31-THREAD_SHIFT) |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 310 | lwz r8,TI_CPU(r8) |
| 311 | oris r8,r8,10 |
| 312 | mfmsr r10 |
| 313 | SYNC |
| 314 | rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ |
| 315 | rlwinm r0,r0,0,28,26 /* clear DR */ |
| 316 | mtmsr r0 |
| 317 | SYNC_601 |
| 318 | isync |
| 319 | lis r9,mmu_hash_lock@h |
| 320 | ori r9,r9,mmu_hash_lock@l |
| 321 | tophys(r9,r9) |
| 322 | 10: lwarx r7,0,r9 |
| 323 | cmpwi 0,r7,0 |
| 324 | bne- 10b |
| 325 | stwcx. r8,0,r9 |
| 326 | bne- 10b |
| 327 | sync |
| 328 | tlbia |
| 329 | sync |
| 330 | TLBSYNC |
| 331 | li r0,0 |
| 332 | stw r0,0(r9) /* clear mmu_hash_lock */ |
| 333 | mtmsr r10 |
| 334 | SYNC_601 |
| 335 | isync |
| 336 | #else /* CONFIG_SMP */ |
| 337 | sync |
| 338 | tlbia |
| 339 | sync |
| 340 | #endif /* CONFIG_SMP */ |
| 341 | #endif /* ! defined(CONFIG_40x) */ |
| 342 | blr |
| 343 | |
| 344 | /* |
| 345 | * Flush MMU TLB for a particular address |
| 346 | */ |
| 347 | _GLOBAL(_tlbie) |
| 348 | #if defined(CONFIG_40x) |
Benjamin Herrenschmidt | e701d26 | 2007-10-30 09:46:06 +1100 | [diff] [blame] | 349 | /* We run the search with interrupts disabled because we have to change |
| 350 | * the PID and I don't want to preempt when that happens. |
| 351 | */ |
| 352 | mfmsr r5 |
| 353 | mfspr r6,SPRN_PID |
| 354 | wrteei 0 |
| 355 | mtspr SPRN_PID,r4 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 356 | tlbsx. r3, 0, r3 |
Benjamin Herrenschmidt | e701d26 | 2007-10-30 09:46:06 +1100 | [diff] [blame] | 357 | mtspr SPRN_PID,r6 |
| 358 | wrtee r5 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 359 | bne 10f |
| 360 | sync |
| 361 | /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear. |
| 362 | * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate |
| 363 | * the TLB entry. */ |
| 364 | tlbwe r3, r3, TLB_TAG |
| 365 | isync |
| 366 | 10: |
Benjamin Herrenschmidt | e701d26 | 2007-10-30 09:46:06 +1100 | [diff] [blame] | 367 | |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 368 | #elif defined(CONFIG_44x) |
Benjamin Herrenschmidt | e701d26 | 2007-10-30 09:46:06 +1100 | [diff] [blame] | 369 | mfspr r5,SPRN_MMUCR |
| 370 | rlwimi r5,r4,0,24,31 /* Set TID */ |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 371 | |
David Gibson | aa1cf63 | 2007-08-07 14:20:50 +1000 | [diff] [blame] | 372 | /* We have to run the search with interrupts disabled, even critical |
| 373 | * and debug interrupts (in fact the only critical exceptions we have |
| 374 | * are debug and machine check). Otherwise an interrupt which causes |
| 375 | * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */ |
Benjamin Herrenschmidt | e701d26 | 2007-10-30 09:46:06 +1100 | [diff] [blame] | 376 | mfmsr r4 |
David Gibson | aa1cf63 | 2007-08-07 14:20:50 +1000 | [diff] [blame] | 377 | lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha |
| 378 | addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l |
Benjamin Herrenschmidt | e701d26 | 2007-10-30 09:46:06 +1100 | [diff] [blame] | 379 | andc r6,r4,r6 |
David Gibson | aa1cf63 | 2007-08-07 14:20:50 +1000 | [diff] [blame] | 380 | mtmsr r6 |
Benjamin Herrenschmidt | e701d26 | 2007-10-30 09:46:06 +1100 | [diff] [blame] | 381 | mtspr SPRN_MMUCR,r5 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 382 | tlbsx. r3, 0, r3 |
Benjamin Herrenschmidt | e701d26 | 2007-10-30 09:46:06 +1100 | [diff] [blame] | 383 | mtmsr r4 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 384 | bne 10f |
| 385 | sync |
| 386 | /* There are only 64 TLB entries, so r3 < 64, |
| 387 | * which means bit 22, is clear. Since 22 is |
| 388 | * the V bit in the TLB_PAGEID, loading this |
| 389 | * value will invalidate the TLB entry. |
| 390 | */ |
| 391 | tlbwe r3, r3, PPC44x_TLB_PAGEID |
| 392 | isync |
| 393 | 10: |
| 394 | #elif defined(CONFIG_FSL_BOOKE) |
| 395 | rlwinm r4, r3, 0, 0, 19 |
| 396 | ori r5, r4, 0x08 /* TLBSEL = 1 */ |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 397 | tlbivax 0, r4 |
| 398 | tlbivax 0, r5 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 399 | msync |
| 400 | #if defined(CONFIG_SMP) |
| 401 | tlbsync |
| 402 | #endif /* CONFIG_SMP */ |
| 403 | #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */ |
| 404 | #if defined(CONFIG_SMP) |
Kumar Gala | f608600 | 2008-04-24 06:29:36 +1000 | [diff] [blame] | 405 | rlwinm r8,r1,0,0,(31-THREAD_SHIFT) |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 406 | lwz r8,TI_CPU(r8) |
| 407 | oris r8,r8,11 |
| 408 | mfmsr r10 |
| 409 | SYNC |
| 410 | rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ |
| 411 | rlwinm r0,r0,0,28,26 /* clear DR */ |
| 412 | mtmsr r0 |
| 413 | SYNC_601 |
| 414 | isync |
| 415 | lis r9,mmu_hash_lock@h |
| 416 | ori r9,r9,mmu_hash_lock@l |
| 417 | tophys(r9,r9) |
| 418 | 10: lwarx r7,0,r9 |
| 419 | cmpwi 0,r7,0 |
| 420 | bne- 10b |
| 421 | stwcx. r8,0,r9 |
| 422 | bne- 10b |
| 423 | eieio |
| 424 | tlbie r3 |
| 425 | sync |
| 426 | TLBSYNC |
| 427 | li r0,0 |
| 428 | stw r0,0(r9) /* clear mmu_hash_lock */ |
| 429 | mtmsr r10 |
| 430 | SYNC_601 |
| 431 | isync |
| 432 | #else /* CONFIG_SMP */ |
| 433 | tlbie r3 |
| 434 | sync |
| 435 | #endif /* CONFIG_SMP */ |
| 436 | #endif /* ! CONFIG_40x */ |
| 437 | blr |
| 438 | |
| 439 | /* |
| 440 | * Flush instruction cache. |
| 441 | * This is a no-op on the 601. |
| 442 | */ |
| 443 | _GLOBAL(flush_instruction_cache) |
| 444 | #if defined(CONFIG_8xx) |
| 445 | isync |
| 446 | lis r5, IDC_INVALL@h |
| 447 | mtspr SPRN_IC_CST, r5 |
| 448 | #elif defined(CONFIG_4xx) |
| 449 | #ifdef CONFIG_403GCX |
| 450 | li r3, 512 |
| 451 | mtctr r3 |
| 452 | lis r4, KERNELBASE@h |
| 453 | 1: iccci 0, r4 |
| 454 | addi r4, r4, 16 |
| 455 | bdnz 1b |
| 456 | #else |
| 457 | lis r3, KERNELBASE@h |
| 458 | iccci 0,r3 |
| 459 | #endif |
| 460 | #elif CONFIG_FSL_BOOKE |
| 461 | BEGIN_FTR_SECTION |
| 462 | mfspr r3,SPRN_L1CSR0 |
| 463 | ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC |
| 464 | /* msync; isync recommended here */ |
| 465 | mtspr SPRN_L1CSR0,r3 |
| 466 | isync |
| 467 | blr |
David Gibson | 4508dc2 | 2007-06-13 14:52:57 +1000 | [diff] [blame] | 468 | END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE) |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 469 | mfspr r3,SPRN_L1CSR1 |
| 470 | ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR |
| 471 | mtspr SPRN_L1CSR1,r3 |
| 472 | #else |
| 473 | mfspr r3,SPRN_PVR |
| 474 | rlwinm r3,r3,16,16,31 |
| 475 | cmpwi 0,r3,1 |
| 476 | beqlr /* for 601, do nothing */ |
| 477 | /* 603/604 processor - use invalidate-all bit in HID0 */ |
| 478 | mfspr r3,SPRN_HID0 |
| 479 | ori r3,r3,HID0_ICFI |
| 480 | mtspr SPRN_HID0,r3 |
| 481 | #endif /* CONFIG_8xx/4xx */ |
| 482 | isync |
| 483 | blr |
| 484 | |
| 485 | /* |
| 486 | * Write any modified data cache blocks out to memory |
| 487 | * and invalidate the corresponding instruction cache blocks. |
| 488 | * This is a no-op on the 601. |
| 489 | * |
| 490 | * flush_icache_range(unsigned long start, unsigned long stop) |
| 491 | */ |
Kumar Gala | b76e59d | 2008-06-26 01:57:58 -0500 | [diff] [blame] | 492 | _KPROBE(__flush_icache_range) |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 493 | BEGIN_FTR_SECTION |
| 494 | blr /* for 601, do nothing */ |
David Gibson | 4508dc2 | 2007-06-13 14:52:57 +1000 | [diff] [blame] | 495 | END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 496 | li r5,L1_CACHE_BYTES-1 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 497 | andc r3,r3,r5 |
| 498 | subf r4,r3,r4 |
| 499 | add r4,r4,r5 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 500 | srwi. r4,r4,L1_CACHE_SHIFT |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 501 | beqlr |
| 502 | mtctr r4 |
| 503 | mr r6,r3 |
| 504 | 1: dcbst 0,r3 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 505 | addi r3,r3,L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 506 | bdnz 1b |
| 507 | sync /* wait for dcbst's to get to ram */ |
| 508 | mtctr r4 |
| 509 | 2: icbi 0,r6 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 510 | addi r6,r6,L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 511 | bdnz 2b |
| 512 | sync /* additional sync needed on g4 */ |
| 513 | isync |
| 514 | blr |
| 515 | /* |
| 516 | * Write any modified data cache blocks out to memory. |
| 517 | * Does not invalidate the corresponding cache lines (especially for |
| 518 | * any corresponding instruction cache). |
| 519 | * |
| 520 | * clean_dcache_range(unsigned long start, unsigned long stop) |
| 521 | */ |
| 522 | _GLOBAL(clean_dcache_range) |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 523 | li r5,L1_CACHE_BYTES-1 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 524 | andc r3,r3,r5 |
| 525 | subf r4,r3,r4 |
| 526 | add r4,r4,r5 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 527 | srwi. r4,r4,L1_CACHE_SHIFT |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 528 | beqlr |
| 529 | mtctr r4 |
| 530 | |
| 531 | 1: dcbst 0,r3 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 532 | addi r3,r3,L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 533 | bdnz 1b |
| 534 | sync /* wait for dcbst's to get to ram */ |
| 535 | blr |
| 536 | |
| 537 | /* |
| 538 | * Write any modified data cache blocks out to memory and invalidate them. |
| 539 | * Does not invalidate the corresponding instruction cache blocks. |
| 540 | * |
| 541 | * flush_dcache_range(unsigned long start, unsigned long stop) |
| 542 | */ |
| 543 | _GLOBAL(flush_dcache_range) |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 544 | li r5,L1_CACHE_BYTES-1 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 545 | andc r3,r3,r5 |
| 546 | subf r4,r3,r4 |
| 547 | add r4,r4,r5 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 548 | srwi. r4,r4,L1_CACHE_SHIFT |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 549 | beqlr |
| 550 | mtctr r4 |
| 551 | |
| 552 | 1: dcbf 0,r3 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 553 | addi r3,r3,L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 554 | bdnz 1b |
| 555 | sync /* wait for dcbst's to get to ram */ |
| 556 | blr |
| 557 | |
| 558 | /* |
| 559 | * Like above, but invalidate the D-cache. This is used by the 8xx |
| 560 | * to invalidate the cache so the PPC core doesn't get stale data |
| 561 | * from the CPM (no cache snooping here :-). |
| 562 | * |
| 563 | * invalidate_dcache_range(unsigned long start, unsigned long stop) |
| 564 | */ |
| 565 | _GLOBAL(invalidate_dcache_range) |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 566 | li r5,L1_CACHE_BYTES-1 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 567 | andc r3,r3,r5 |
| 568 | subf r4,r3,r4 |
| 569 | add r4,r4,r5 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 570 | srwi. r4,r4,L1_CACHE_SHIFT |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 571 | beqlr |
| 572 | mtctr r4 |
| 573 | |
| 574 | 1: dcbi 0,r3 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 575 | addi r3,r3,L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 576 | bdnz 1b |
| 577 | sync /* wait for dcbi's to get to ram */ |
| 578 | blr |
| 579 | |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 580 | /* |
| 581 | * Flush a particular page from the data cache to RAM. |
| 582 | * Note: this is necessary because the instruction cache does *not* |
| 583 | * snoop from the data cache. |
| 584 | * This is a no-op on the 601 which has a unified cache. |
| 585 | * |
| 586 | * void __flush_dcache_icache(void *page) |
| 587 | */ |
| 588 | _GLOBAL(__flush_dcache_icache) |
| 589 | BEGIN_FTR_SECTION |
David Gibson | 4508dc2 | 2007-06-13 14:52:57 +1000 | [diff] [blame] | 590 | blr |
| 591 | END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 592 | rlwinm r3,r3,0,0,19 /* Get page base address */ |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 593 | li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 594 | mtctr r4 |
| 595 | mr r6,r3 |
| 596 | 0: dcbst 0,r3 /* Write line to ram */ |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 597 | addi r3,r3,L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 598 | bdnz 0b |
| 599 | sync |
Benjamin Herrenschmidt | b98ac05d | 2007-10-31 16:42:19 +1100 | [diff] [blame] | 600 | #ifndef CONFIG_44x |
| 601 | /* We don't flush the icache on 44x. Those have a virtual icache |
| 602 | * and we don't have access to the virtual address here (it's |
| 603 | * not the page vaddr but where it's mapped in user space). The |
| 604 | * flushing of the icache on these is handled elsewhere, when |
| 605 | * a change in the address space occurs, before returning to |
| 606 | * user space |
| 607 | */ |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 608 | mtctr r4 |
| 609 | 1: icbi 0,r6 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 610 | addi r6,r6,L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 611 | bdnz 1b |
| 612 | sync |
| 613 | isync |
Benjamin Herrenschmidt | b98ac05d | 2007-10-31 16:42:19 +1100 | [diff] [blame] | 614 | #endif /* CONFIG_44x */ |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 615 | blr |
| 616 | |
| 617 | /* |
| 618 | * Flush a particular page from the data cache to RAM, identified |
| 619 | * by its physical address. We turn off the MMU so we can just use |
| 620 | * the physical address (this may be a highmem page without a kernel |
| 621 | * mapping). |
| 622 | * |
| 623 | * void __flush_dcache_icache_phys(unsigned long physaddr) |
| 624 | */ |
| 625 | _GLOBAL(__flush_dcache_icache_phys) |
| 626 | BEGIN_FTR_SECTION |
| 627 | blr /* for 601, do nothing */ |
David Gibson | 4508dc2 | 2007-06-13 14:52:57 +1000 | [diff] [blame] | 628 | END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 629 | mfmsr r10 |
| 630 | rlwinm r0,r10,0,28,26 /* clear DR */ |
| 631 | mtmsr r0 |
| 632 | isync |
| 633 | rlwinm r3,r3,0,0,19 /* Get page base address */ |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 634 | li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 635 | mtctr r4 |
| 636 | mr r6,r3 |
| 637 | 0: dcbst 0,r3 /* Write line to ram */ |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 638 | addi r3,r3,L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 639 | bdnz 0b |
| 640 | sync |
| 641 | mtctr r4 |
| 642 | 1: icbi 0,r6 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 643 | addi r6,r6,L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 644 | bdnz 1b |
| 645 | sync |
| 646 | mtmsr r10 /* restore DR */ |
| 647 | isync |
| 648 | blr |
| 649 | |
| 650 | /* |
| 651 | * Clear pages using the dcbz instruction, which doesn't cause any |
| 652 | * memory traffic (except to write out any cache lines which get |
| 653 | * displaced). This only works on cacheable memory. |
| 654 | * |
| 655 | * void clear_pages(void *page, int order) ; |
| 656 | */ |
| 657 | _GLOBAL(clear_pages) |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 658 | li r0,4096/L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 659 | slw r0,r0,r4 |
| 660 | mtctr r0 |
| 661 | #ifdef CONFIG_8xx |
| 662 | li r4, 0 |
| 663 | 1: stw r4, 0(r3) |
| 664 | stw r4, 4(r3) |
| 665 | stw r4, 8(r3) |
| 666 | stw r4, 12(r3) |
| 667 | #else |
| 668 | 1: dcbz 0,r3 |
| 669 | #endif |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 670 | addi r3,r3,L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 671 | bdnz 1b |
| 672 | blr |
| 673 | |
| 674 | /* |
| 675 | * Copy a whole page. We use the dcbz instruction on the destination |
| 676 | * to reduce memory traffic (it eliminates the unnecessary reads of |
| 677 | * the destination into cache). This requires that the destination |
| 678 | * is cacheable. |
| 679 | */ |
| 680 | #define COPY_16_BYTES \ |
| 681 | lwz r6,4(r4); \ |
| 682 | lwz r7,8(r4); \ |
| 683 | lwz r8,12(r4); \ |
| 684 | lwzu r9,16(r4); \ |
| 685 | stw r6,4(r3); \ |
| 686 | stw r7,8(r3); \ |
| 687 | stw r8,12(r3); \ |
| 688 | stwu r9,16(r3) |
| 689 | |
| 690 | _GLOBAL(copy_page) |
| 691 | addi r3,r3,-4 |
| 692 | addi r4,r4,-4 |
| 693 | |
| 694 | #ifdef CONFIG_8xx |
| 695 | /* don't use prefetch on 8xx */ |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 696 | li r0,4096/L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 697 | mtctr r0 |
| 698 | 1: COPY_16_BYTES |
| 699 | bdnz 1b |
| 700 | blr |
| 701 | |
| 702 | #else /* not 8xx, we can prefetch */ |
| 703 | li r5,4 |
| 704 | |
| 705 | #if MAX_COPY_PREFETCH > 1 |
| 706 | li r0,MAX_COPY_PREFETCH |
| 707 | li r11,4 |
| 708 | mtctr r0 |
| 709 | 11: dcbt r11,r4 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 710 | addi r11,r11,L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 711 | bdnz 11b |
| 712 | #else /* MAX_COPY_PREFETCH == 1 */ |
| 713 | dcbt r5,r4 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 714 | li r11,L1_CACHE_BYTES+4 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 715 | #endif /* MAX_COPY_PREFETCH */ |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 716 | li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 717 | crclr 4*cr0+eq |
| 718 | 2: |
| 719 | mtctr r0 |
| 720 | 1: |
| 721 | dcbt r11,r4 |
| 722 | dcbz r5,r3 |
| 723 | COPY_16_BYTES |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 724 | #if L1_CACHE_BYTES >= 32 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 725 | COPY_16_BYTES |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 726 | #if L1_CACHE_BYTES >= 64 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 727 | COPY_16_BYTES |
| 728 | COPY_16_BYTES |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 729 | #if L1_CACHE_BYTES >= 128 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 730 | COPY_16_BYTES |
| 731 | COPY_16_BYTES |
| 732 | COPY_16_BYTES |
| 733 | COPY_16_BYTES |
| 734 | #endif |
| 735 | #endif |
| 736 | #endif |
| 737 | bdnz 1b |
| 738 | beqlr |
| 739 | crnot 4*cr0+eq,4*cr0+eq |
| 740 | li r0,MAX_COPY_PREFETCH |
| 741 | li r11,4 |
| 742 | b 2b |
| 743 | #endif /* CONFIG_8xx */ |
| 744 | |
| 745 | /* |
| 746 | * void atomic_clear_mask(atomic_t mask, atomic_t *addr) |
| 747 | * void atomic_set_mask(atomic_t mask, atomic_t *addr); |
| 748 | */ |
| 749 | _GLOBAL(atomic_clear_mask) |
| 750 | 10: lwarx r5,0,r4 |
| 751 | andc r5,r5,r3 |
| 752 | PPC405_ERR77(0,r4) |
| 753 | stwcx. r5,0,r4 |
| 754 | bne- 10b |
| 755 | blr |
| 756 | _GLOBAL(atomic_set_mask) |
| 757 | 10: lwarx r5,0,r4 |
| 758 | or r5,r5,r3 |
| 759 | PPC405_ERR77(0,r4) |
| 760 | stwcx. r5,0,r4 |
| 761 | bne- 10b |
| 762 | blr |
| 763 | |
| 764 | /* |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 765 | * Extended precision shifts. |
| 766 | * |
| 767 | * Updated to be valid for shift counts from 0 to 63 inclusive. |
| 768 | * -- Gabriel |
| 769 | * |
| 770 | * R3/R4 has 64 bit value |
| 771 | * R5 has shift count |
| 772 | * result in R3/R4 |
| 773 | * |
| 774 | * ashrdi3: arithmetic right shift (sign propagation) |
| 775 | * lshrdi3: logical right shift |
| 776 | * ashldi3: left shift |
| 777 | */ |
| 778 | _GLOBAL(__ashrdi3) |
| 779 | subfic r6,r5,32 |
| 780 | srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count |
| 781 | addi r7,r5,32 # could be xori, or addi with -32 |
| 782 | slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) |
| 783 | rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0 |
| 784 | sraw r7,r3,r7 # t2 = MSW >> (count-32) |
| 785 | or r4,r4,r6 # LSW |= t1 |
| 786 | slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2 |
| 787 | sraw r3,r3,r5 # MSW = MSW >> count |
| 788 | or r4,r4,r7 # LSW |= t2 |
| 789 | blr |
| 790 | |
| 791 | _GLOBAL(__ashldi3) |
| 792 | subfic r6,r5,32 |
| 793 | slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count |
| 794 | addi r7,r5,32 # could be xori, or addi with -32 |
| 795 | srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count) |
| 796 | slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32) |
| 797 | or r3,r3,r6 # MSW |= t1 |
| 798 | slw r4,r4,r5 # LSW = LSW << count |
| 799 | or r3,r3,r7 # MSW |= t2 |
| 800 | blr |
| 801 | |
| 802 | _GLOBAL(__lshrdi3) |
| 803 | subfic r6,r5,32 |
| 804 | srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count |
| 805 | addi r7,r5,32 # could be xori, or addi with -32 |
| 806 | slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) |
| 807 | srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32) |
| 808 | or r4,r4,r6 # LSW |= t1 |
| 809 | srw r3,r3,r5 # MSW = MSW >> count |
| 810 | or r4,r4,r7 # LSW |= t2 |
| 811 | blr |
| 812 | |
Paul Mackerras | 95ff54f | 2008-03-13 09:39:55 +1100 | [diff] [blame] | 813 | /* |
| 814 | * 64-bit comparison: __ucmpdi2(u64 a, u64 b) |
| 815 | * Returns 0 if a < b, 1 if a == b, 2 if a > b. |
| 816 | */ |
| 817 | _GLOBAL(__ucmpdi2) |
| 818 | cmplw r3,r5 |
| 819 | li r3,1 |
| 820 | bne 1f |
| 821 | cmplw r4,r6 |
| 822 | beqlr |
| 823 | 1: li r3,0 |
| 824 | bltlr |
| 825 | li r3,2 |
| 826 | blr |
| 827 | |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 828 | _GLOBAL(abs) |
| 829 | srawi r4,r3,31 |
| 830 | xor r3,r3,r4 |
| 831 | sub r3,r3,r4 |
| 832 | blr |
| 833 | |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 834 | /* |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 835 | * Create a kernel thread |
| 836 | * kernel_thread(fn, arg, flags) |
| 837 | */ |
| 838 | _GLOBAL(kernel_thread) |
| 839 | stwu r1,-16(r1) |
| 840 | stw r30,8(r1) |
| 841 | stw r31,12(r1) |
| 842 | mr r30,r3 /* function */ |
| 843 | mr r31,r4 /* argument */ |
| 844 | ori r3,r5,CLONE_VM /* flags */ |
| 845 | oris r3,r3,CLONE_UNTRACED>>16 |
| 846 | li r4,0 /* new sp (unused) */ |
| 847 | li r0,__NR_clone |
| 848 | sc |
| 849 | cmpwi 0,r3,0 /* parent or child? */ |
| 850 | bne 1f /* return if parent */ |
| 851 | li r0,0 /* make top-level stack frame */ |
| 852 | stwu r0,-16(r1) |
| 853 | mtlr r30 /* fn addr in lr */ |
| 854 | mr r3,r31 /* load arg and call fn */ |
| 855 | PPC440EP_ERR42 |
| 856 | blrl |
| 857 | li r0,__NR_exit /* exit if function returns */ |
| 858 | li r3,0 |
| 859 | sc |
| 860 | 1: lwz r30,8(r1) |
| 861 | lwz r31,12(r1) |
| 862 | addi r1,r1,16 |
| 863 | blr |
| 864 | |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 865 | /* |
| 866 | * This routine is just here to keep GCC happy - sigh... |
| 867 | */ |
| 868 | _GLOBAL(__main) |
| 869 | blr |
Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 870 | |
| 871 | #ifdef CONFIG_KEXEC |
| 872 | /* |
| 873 | * Must be relocatable PIC code callable as a C function. |
| 874 | */ |
| 875 | .globl relocate_new_kernel |
| 876 | relocate_new_kernel: |
| 877 | /* r3 = page_list */ |
| 878 | /* r4 = reboot_code_buffer */ |
| 879 | /* r5 = start_address */ |
| 880 | |
| 881 | li r0, 0 |
| 882 | |
| 883 | /* |
| 884 | * Set Machine Status Register to a known status, |
| 885 | * switch the MMU off and jump to 1: in a single step. |
| 886 | */ |
| 887 | |
| 888 | mr r8, r0 |
| 889 | ori r8, r8, MSR_RI|MSR_ME |
| 890 | mtspr SPRN_SRR1, r8 |
| 891 | addi r8, r4, 1f - relocate_new_kernel |
| 892 | mtspr SPRN_SRR0, r8 |
| 893 | sync |
| 894 | rfi |
| 895 | |
| 896 | 1: |
| 897 | /* from this point address translation is turned off */ |
| 898 | /* and interrupts are disabled */ |
| 899 | |
| 900 | /* set a new stack at the bottom of our page... */ |
| 901 | /* (not really needed now) */ |
Paul Collins | d9178f4 | 2008-08-16 18:55:54 +1000 | [diff] [blame^] | 902 | addi r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */ |
Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 903 | stw r0, 0(r1) |
| 904 | |
| 905 | /* Do the copies */ |
| 906 | li r6, 0 /* checksum */ |
| 907 | mr r0, r3 |
| 908 | b 1f |
| 909 | |
| 910 | 0: /* top, read another word for the indirection page */ |
| 911 | lwzu r0, 4(r3) |
| 912 | |
| 913 | 1: |
| 914 | /* is it a destination page? (r8) */ |
| 915 | rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */ |
| 916 | beq 2f |
| 917 | |
| 918 | rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */ |
| 919 | b 0b |
| 920 | |
| 921 | 2: /* is it an indirection page? (r3) */ |
| 922 | rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */ |
| 923 | beq 2f |
| 924 | |
| 925 | rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */ |
| 926 | subi r3, r3, 4 |
| 927 | b 0b |
| 928 | |
| 929 | 2: /* are we done? */ |
| 930 | rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */ |
| 931 | beq 2f |
| 932 | b 3f |
| 933 | |
| 934 | 2: /* is it a source page? (r9) */ |
| 935 | rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */ |
| 936 | beq 0b |
| 937 | |
| 938 | rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */ |
| 939 | |
| 940 | li r7, PAGE_SIZE / 4 |
| 941 | mtctr r7 |
| 942 | subi r9, r9, 4 |
| 943 | subi r8, r8, 4 |
| 944 | 9: |
| 945 | lwzu r0, 4(r9) /* do the copy */ |
| 946 | xor r6, r6, r0 |
| 947 | stwu r0, 4(r8) |
| 948 | dcbst 0, r8 |
| 949 | sync |
| 950 | icbi 0, r8 |
| 951 | bdnz 9b |
| 952 | |
| 953 | addi r9, r9, 4 |
| 954 | addi r8, r8, 4 |
| 955 | b 0b |
| 956 | |
| 957 | 3: |
| 958 | |
| 959 | /* To be certain of avoiding problems with self-modifying code |
| 960 | * execute a serializing instruction here. |
| 961 | */ |
| 962 | isync |
| 963 | sync |
| 964 | |
| 965 | /* jump to the entry point, usually the setup routine */ |
| 966 | mtlr r5 |
| 967 | blrl |
| 968 | |
| 969 | 1: b 1b |
| 970 | |
| 971 | relocate_new_kernel_end: |
| 972 | |
| 973 | .globl relocate_new_kernel_size |
| 974 | relocate_new_kernel_size: |
| 975 | .long relocate_new_kernel_end - relocate_new_kernel |
| 976 | #endif |