Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Uwe Zeisberger | f30c226 | 2006-10-03 23:01:26 +0200 | [diff] [blame] | 2 | * arch/ia64/kernel/entry.S |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
| 4 | * Kernel entry points. |
| 5 | * |
| 6 | * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co |
| 7 | * David Mosberger-Tang <davidm@hpl.hp.com> |
| 8 | * Copyright (C) 1999, 2002-2003 |
| 9 | * Asit Mallick <Asit.K.Mallick@intel.com> |
| 10 | * Don Dugger <Don.Dugger@intel.com> |
| 11 | * Suresh Siddha <suresh.b.siddha@intel.com> |
| 12 | * Fenghua Yu <fenghua.yu@intel.com> |
| 13 | * Copyright (C) 1999 VA Linux Systems |
| 14 | * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> |
| 15 | */ |
| 16 | /* |
| 17 | * ia64_switch_to now places correct virtual mapping in in TR2 for |
| 18 | * kernel stack. This allows us to handle interrupts without changing |
| 19 | * to physical mode. |
| 20 | * |
| 21 | * Jonathan Nicklin <nicklin@missioncriticallinux.com> |
| 22 | * Patrick O'Rourke <orourke@missioncriticallinux.com> |
| 23 | * 11/07/2000 |
| 24 | */ |
| 25 | /* |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 26 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> |
| 27 | * VA Linux Systems Japan K.K. |
| 28 | * pv_ops. |
| 29 | */ |
| 30 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | * Global (preserved) predicate usage on syscall entry/exit path: |
| 32 | * |
| 33 | * pKStk: See entry.h. |
| 34 | * pUStk: See entry.h. |
| 35 | * pSys: See entry.h. |
| 36 | * pNonSys: !pSys |
| 37 | */ |
| 38 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
| 40 | #include <asm/asmmacro.h> |
| 41 | #include <asm/cache.h> |
| 42 | #include <asm/errno.h> |
| 43 | #include <asm/kregs.h> |
Sam Ravnborg | 39e01cb | 2005-09-09 22:03:13 +0200 | [diff] [blame] | 44 | #include <asm/asm-offsets.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | #include <asm/pgtable.h> |
| 46 | #include <asm/percpu.h> |
| 47 | #include <asm/processor.h> |
| 48 | #include <asm/thread_info.h> |
| 49 | #include <asm/unistd.h> |
Shaohua Li | d3e75ff | 2009-01-09 11:29:46 +0800 | [diff] [blame] | 50 | #include <asm/ftrace.h> |
Al Viro | e007c53 | 2016-01-17 01:13:41 -0500 | [diff] [blame] | 51 | #include <asm/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | |
| 53 | #include "minstate.h" |
| 54 | |
| 55 | /* |
| 56 | * execve() is special because in case of success, we need to |
| 57 | * setup a null register window frame. |
| 58 | */ |
| 59 | ENTRY(ia64_execve) |
| 60 | /* |
| 61 | * Allocate 8 input registers since ptrace() may clobber them |
| 62 | */ |
| 63 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) |
Al Viro | 71b4ecc | 2012-10-14 15:53:04 -0400 | [diff] [blame] | 64 | alloc loc1=ar.pfs,8,2,3,0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | mov loc0=rp |
| 66 | .body |
| 67 | mov out0=in0 // filename |
| 68 | ;; // stop bit between alloc and call |
| 69 | mov out1=in1 // argv |
| 70 | mov out2=in2 // envp |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | br.call.sptk.many rp=sys_execve |
| 72 | .ret0: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | cmp4.ge p6,p7=r8,r0 |
| 74 | mov ar.pfs=loc1 // restore ar.pfs |
| 75 | sxt4 r8=r8 // return 64-bit result |
| 76 | ;; |
| 77 | stf.spill [sp]=f0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | mov rp=loc0 |
| 79 | (p6) mov ar.pfs=r0 // clear ar.pfs on success |
| 80 | (p7) br.ret.sptk.many rp |
| 81 | |
| 82 | /* |
| 83 | * In theory, we'd have to zap this state only to prevent leaking of |
| 84 | * security sensitive state (e.g., if current->mm->dumpable is zero). However, |
| 85 | * this executes in less than 20 cycles even on Itanium, so it's not worth |
| 86 | * optimizing for...). |
| 87 | */ |
| 88 | mov ar.unat=0; mov ar.lc=0 |
| 89 | mov r4=0; mov f2=f0; mov b1=r0 |
| 90 | mov r5=0; mov f3=f0; mov b2=r0 |
| 91 | mov r6=0; mov f4=f0; mov b3=r0 |
| 92 | mov r7=0; mov f5=f0; mov b4=r0 |
| 93 | ldf.fill f12=[sp]; mov f13=f0; mov b5=r0 |
| 94 | ldf.fill f14=[sp]; ldf.fill f15=[sp]; mov f16=f0 |
| 95 | ldf.fill f17=[sp]; ldf.fill f18=[sp]; mov f19=f0 |
| 96 | ldf.fill f20=[sp]; ldf.fill f21=[sp]; mov f22=f0 |
| 97 | ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0 |
| 98 | ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0 |
| 99 | ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | br.ret.sptk.many rp |
| 101 | END(ia64_execve) |
| 102 | |
| 103 | /* |
| 104 | * sys_clone2(u64 flags, u64 ustack_base, u64 ustack_size, u64 parent_tidptr, u64 child_tidptr, |
| 105 | * u64 tls) |
| 106 | */ |
| 107 | GLOBAL_ENTRY(sys_clone2) |
| 108 | /* |
| 109 | * Allocate 8 input registers since ptrace() may clobber them |
| 110 | */ |
| 111 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) |
| 112 | alloc r16=ar.pfs,8,2,6,0 |
| 113 | DO_SAVE_SWITCH_STACK |
| 114 | adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp |
| 115 | mov loc0=rp |
| 116 | mov loc1=r16 // save ar.pfs across do_fork |
| 117 | .body |
| 118 | mov out1=in1 |
Al Viro | e80d666 | 2012-10-22 23:10:08 -0400 | [diff] [blame] | 119 | mov out2=in2 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | tbit.nz p6,p0=in0,CLONE_SETTLS_BIT |
Al Viro | e80d666 | 2012-10-22 23:10:08 -0400 | [diff] [blame] | 121 | mov out3=in3 // parent_tidptr: valid only w/CLONE_PARENT_SETTID |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | ;; |
| 123 | (p6) st8 [r2]=in5 // store TLS in r16 for copy_thread() |
Al Viro | e80d666 | 2012-10-22 23:10:08 -0400 | [diff] [blame] | 124 | mov out4=in4 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | mov out0=in0 // out0 = clone_flags |
| 126 | br.call.sptk.many rp=do_fork |
| 127 | .ret1: .restore sp |
| 128 | adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack |
| 129 | mov ar.pfs=loc1 |
| 130 | mov rp=loc0 |
| 131 | br.ret.sptk.many rp |
| 132 | END(sys_clone2) |
| 133 | |
| 134 | /* |
| 135 | * sys_clone(u64 flags, u64 ustack_base, u64 parent_tidptr, u64 child_tidptr, u64 tls) |
| 136 | * Deprecated. Use sys_clone2() instead. |
| 137 | */ |
| 138 | GLOBAL_ENTRY(sys_clone) |
| 139 | /* |
| 140 | * Allocate 8 input registers since ptrace() may clobber them |
| 141 | */ |
| 142 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) |
| 143 | alloc r16=ar.pfs,8,2,6,0 |
| 144 | DO_SAVE_SWITCH_STACK |
| 145 | adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp |
| 146 | mov loc0=rp |
| 147 | mov loc1=r16 // save ar.pfs across do_fork |
| 148 | .body |
| 149 | mov out1=in1 |
Al Viro | e80d666 | 2012-10-22 23:10:08 -0400 | [diff] [blame] | 150 | mov out2=16 // stacksize (compensates for 16-byte scratch area) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | tbit.nz p6,p0=in0,CLONE_SETTLS_BIT |
Al Viro | e80d666 | 2012-10-22 23:10:08 -0400 | [diff] [blame] | 152 | mov out3=in2 // parent_tidptr: valid only w/CLONE_PARENT_SETTID |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | ;; |
| 154 | (p6) st8 [r2]=in4 // store TLS in r13 (tp) |
Al Viro | e80d666 | 2012-10-22 23:10:08 -0400 | [diff] [blame] | 155 | mov out4=in3 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | mov out0=in0 // out0 = clone_flags |
| 157 | br.call.sptk.many rp=do_fork |
| 158 | .ret2: .restore sp |
| 159 | adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack |
| 160 | mov ar.pfs=loc1 |
| 161 | mov rp=loc0 |
| 162 | br.ret.sptk.many rp |
| 163 | END(sys_clone) |
| 164 | |
| 165 | /* |
| 166 | * prev_task <- ia64_switch_to(struct task_struct *next) |
| 167 | * With Ingo's new scheduler, interrupts are disabled when this routine gets |
| 168 | * called. The code starting at .map relies on this. The rest of the code |
| 169 | * doesn't care about the interrupt masking status. |
| 170 | */ |
Luis R. Rodriguez | e55645e | 2015-06-02 11:42:02 -0700 | [diff] [blame] | 171 | GLOBAL_ENTRY(ia64_switch_to) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | .prologue |
| 173 | alloc r16=ar.pfs,1,0,0,0 |
| 174 | DO_SAVE_SWITCH_STACK |
| 175 | .body |
| 176 | |
| 177 | adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13 |
| 178 | movl r25=init_task |
| 179 | mov r27=IA64_KR(CURRENT_STACK) |
| 180 | adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0 |
| 181 | dep r20=0,in0,61,3 // physical address of "next" |
| 182 | ;; |
| 183 | st8 [r22]=sp // save kernel stack pointer of old task |
| 184 | shr.u r26=r20,IA64_GRANULE_SHIFT |
| 185 | cmp.eq p7,p6=r25,in0 |
| 186 | ;; |
| 187 | /* |
| 188 | * If we've already mapped this task's page, we can skip doing it again. |
| 189 | */ |
| 190 | (p6) cmp.eq p7,p6=r26,r27 |
| 191 | (p6) br.cond.dpnt .map |
| 192 | ;; |
| 193 | .done: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | ld8 sp=[r21] // load kernel stack pointer of new task |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 195 | MOV_TO_KR(CURRENT, in0, r8, r9) // update "current" application register |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | mov r8=r13 // return pointer to previously running task |
| 197 | mov r13=in0 // set "current" pointer |
| 198 | ;; |
| 199 | DO_LOAD_SWITCH_STACK |
| 200 | |
| 201 | #ifdef CONFIG_SMP |
| 202 | sync.i // ensure "fc"s done by this CPU are visible on other CPUs |
| 203 | #endif |
| 204 | br.ret.sptk.many rp // boogie on out in new context |
| 205 | |
| 206 | .map: |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 207 | RSM_PSR_IC(r25) // interrupts (psr.i) are already disabled here |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | movl r25=PAGE_KERNEL |
| 209 | ;; |
| 210 | srlz.d |
| 211 | or r23=r25,r20 // construct PA | page properties |
| 212 | mov r25=IA64_GRANULE_SHIFT<<2 |
| 213 | ;; |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 214 | MOV_TO_ITIR(p0, r25, r8) |
| 215 | MOV_TO_IFA(in0, r8) // VA of next task... |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | ;; |
| 217 | mov r25=IA64_TR_CURRENT_STACK |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 218 | MOV_TO_KR(CURRENT_STACK, r26, r8, r9) // remember last page we mapped... |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | ;; |
| 220 | itr.d dtr[r25]=r23 // wire in new mapping... |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 221 | SSM_PSR_IC_AND_SRLZ_D(r8, r9) // reenable the psr.ic bit |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | br.cond.sptk .done |
Luis R. Rodriguez | e55645e | 2015-06-02 11:42:02 -0700 | [diff] [blame] | 223 | END(ia64_switch_to) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | |
| 225 | /* |
| 226 | * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This |
| 227 | * means that we may get an interrupt with "sp" pointing to the new kernel stack while |
| 228 | * ar.bspstore is still pointing to the old kernel backing store area. Since ar.rsc, |
| 229 | * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts, this is not a |
| 230 | * problem. Also, we don't need to specify unwind information for preserved registers |
| 231 | * that are not modified in save_switch_stack as the right unwind information is already |
| 232 | * specified at the call-site of save_switch_stack. |
| 233 | */ |
| 234 | |
| 235 | /* |
| 236 | * save_switch_stack: |
| 237 | * - r16 holds ar.pfs |
| 238 | * - b7 holds address to return to |
| 239 | * - rp (b0) holds return address to save |
| 240 | */ |
| 241 | GLOBAL_ENTRY(save_switch_stack) |
| 242 | .prologue |
| 243 | .altrp b7 |
| 244 | flushrs // flush dirty regs to backing store (must be first in insn group) |
| 245 | .save @priunat,r17 |
| 246 | mov r17=ar.unat // preserve caller's |
| 247 | .body |
| 248 | #ifdef CONFIG_ITANIUM |
| 249 | adds r2=16+128,sp |
| 250 | adds r3=16+64,sp |
| 251 | adds r14=SW(R4)+16,sp |
| 252 | ;; |
| 253 | st8.spill [r14]=r4,16 // spill r4 |
| 254 | lfetch.fault.excl.nt1 [r3],128 |
| 255 | ;; |
| 256 | lfetch.fault.excl.nt1 [r2],128 |
| 257 | lfetch.fault.excl.nt1 [r3],128 |
| 258 | ;; |
| 259 | lfetch.fault.excl [r2] |
| 260 | lfetch.fault.excl [r3] |
| 261 | adds r15=SW(R5)+16,sp |
| 262 | #else |
| 263 | add r2=16+3*128,sp |
| 264 | add r3=16,sp |
| 265 | add r14=SW(R4)+16,sp |
| 266 | ;; |
| 267 | st8.spill [r14]=r4,SW(R6)-SW(R4) // spill r4 and prefetch offset 0x1c0 |
| 268 | lfetch.fault.excl.nt1 [r3],128 // prefetch offset 0x010 |
| 269 | ;; |
| 270 | lfetch.fault.excl.nt1 [r3],128 // prefetch offset 0x090 |
| 271 | lfetch.fault.excl.nt1 [r2],128 // prefetch offset 0x190 |
| 272 | ;; |
| 273 | lfetch.fault.excl.nt1 [r3] // prefetch offset 0x110 |
| 274 | lfetch.fault.excl.nt1 [r2] // prefetch offset 0x210 |
| 275 | adds r15=SW(R5)+16,sp |
| 276 | #endif |
| 277 | ;; |
| 278 | st8.spill [r15]=r5,SW(R7)-SW(R5) // spill r5 |
| 279 | mov.m ar.rsc=0 // put RSE in mode: enforced lazy, little endian, pl 0 |
| 280 | add r2=SW(F2)+16,sp // r2 = &sw->f2 |
| 281 | ;; |
| 282 | st8.spill [r14]=r6,SW(B0)-SW(R6) // spill r6 |
| 283 | mov.m r18=ar.fpsr // preserve fpsr |
| 284 | add r3=SW(F3)+16,sp // r3 = &sw->f3 |
| 285 | ;; |
| 286 | stf.spill [r2]=f2,32 |
| 287 | mov.m r19=ar.rnat |
| 288 | mov r21=b0 |
| 289 | |
| 290 | stf.spill [r3]=f3,32 |
| 291 | st8.spill [r15]=r7,SW(B2)-SW(R7) // spill r7 |
| 292 | mov r22=b1 |
| 293 | ;; |
| 294 | // since we're done with the spills, read and save ar.unat: |
| 295 | mov.m r29=ar.unat |
| 296 | mov.m r20=ar.bspstore |
| 297 | mov r23=b2 |
| 298 | stf.spill [r2]=f4,32 |
| 299 | stf.spill [r3]=f5,32 |
| 300 | mov r24=b3 |
| 301 | ;; |
| 302 | st8 [r14]=r21,SW(B1)-SW(B0) // save b0 |
| 303 | st8 [r15]=r23,SW(B3)-SW(B2) // save b2 |
| 304 | mov r25=b4 |
| 305 | mov r26=b5 |
| 306 | ;; |
| 307 | st8 [r14]=r22,SW(B4)-SW(B1) // save b1 |
| 308 | st8 [r15]=r24,SW(AR_PFS)-SW(B3) // save b3 |
| 309 | mov r21=ar.lc // I-unit |
| 310 | stf.spill [r2]=f12,32 |
| 311 | stf.spill [r3]=f13,32 |
| 312 | ;; |
| 313 | st8 [r14]=r25,SW(B5)-SW(B4) // save b4 |
| 314 | st8 [r15]=r16,SW(AR_LC)-SW(AR_PFS) // save ar.pfs |
| 315 | stf.spill [r2]=f14,32 |
| 316 | stf.spill [r3]=f15,32 |
| 317 | ;; |
| 318 | st8 [r14]=r26 // save b5 |
| 319 | st8 [r15]=r21 // save ar.lc |
| 320 | stf.spill [r2]=f16,32 |
| 321 | stf.spill [r3]=f17,32 |
| 322 | ;; |
| 323 | stf.spill [r2]=f18,32 |
| 324 | stf.spill [r3]=f19,32 |
| 325 | ;; |
| 326 | stf.spill [r2]=f20,32 |
| 327 | stf.spill [r3]=f21,32 |
| 328 | ;; |
| 329 | stf.spill [r2]=f22,32 |
| 330 | stf.spill [r3]=f23,32 |
| 331 | ;; |
| 332 | stf.spill [r2]=f24,32 |
| 333 | stf.spill [r3]=f25,32 |
| 334 | ;; |
| 335 | stf.spill [r2]=f26,32 |
| 336 | stf.spill [r3]=f27,32 |
| 337 | ;; |
| 338 | stf.spill [r2]=f28,32 |
| 339 | stf.spill [r3]=f29,32 |
| 340 | ;; |
| 341 | stf.spill [r2]=f30,SW(AR_UNAT)-SW(F30) |
| 342 | stf.spill [r3]=f31,SW(PR)-SW(F31) |
| 343 | add r14=SW(CALLER_UNAT)+16,sp |
| 344 | ;; |
| 345 | st8 [r2]=r29,SW(AR_RNAT)-SW(AR_UNAT) // save ar.unat |
| 346 | st8 [r14]=r17,SW(AR_FPSR)-SW(CALLER_UNAT) // save caller_unat |
| 347 | mov r21=pr |
| 348 | ;; |
| 349 | st8 [r2]=r19,SW(AR_BSPSTORE)-SW(AR_RNAT) // save ar.rnat |
| 350 | st8 [r3]=r21 // save predicate registers |
| 351 | ;; |
| 352 | st8 [r2]=r20 // save ar.bspstore |
| 353 | st8 [r14]=r18 // save fpsr |
| 354 | mov ar.rsc=3 // put RSE back into eager mode, pl 0 |
| 355 | br.cond.sptk.many b7 |
| 356 | END(save_switch_stack) |
| 357 | |
| 358 | /* |
| 359 | * load_switch_stack: |
| 360 | * - "invala" MUST be done at call site (normally in DO_LOAD_SWITCH_STACK) |
| 361 | * - b7 holds address to return to |
| 362 | * - must not touch r8-r11 |
| 363 | */ |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 364 | GLOBAL_ENTRY(load_switch_stack) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 365 | .prologue |
| 366 | .altrp b7 |
| 367 | |
| 368 | .body |
| 369 | lfetch.fault.nt1 [sp] |
| 370 | adds r2=SW(AR_BSPSTORE)+16,sp |
| 371 | adds r3=SW(AR_UNAT)+16,sp |
| 372 | mov ar.rsc=0 // put RSE into enforced lazy mode |
| 373 | adds r14=SW(CALLER_UNAT)+16,sp |
| 374 | adds r15=SW(AR_FPSR)+16,sp |
| 375 | ;; |
| 376 | ld8 r27=[r2],(SW(B0)-SW(AR_BSPSTORE)) // bspstore |
| 377 | ld8 r29=[r3],(SW(B1)-SW(AR_UNAT)) // unat |
| 378 | ;; |
| 379 | ld8 r21=[r2],16 // restore b0 |
| 380 | ld8 r22=[r3],16 // restore b1 |
| 381 | ;; |
| 382 | ld8 r23=[r2],16 // restore b2 |
| 383 | ld8 r24=[r3],16 // restore b3 |
| 384 | ;; |
| 385 | ld8 r25=[r2],16 // restore b4 |
| 386 | ld8 r26=[r3],16 // restore b5 |
| 387 | ;; |
| 388 | ld8 r16=[r2],(SW(PR)-SW(AR_PFS)) // ar.pfs |
| 389 | ld8 r17=[r3],(SW(AR_RNAT)-SW(AR_LC)) // ar.lc |
| 390 | ;; |
| 391 | ld8 r28=[r2] // restore pr |
| 392 | ld8 r30=[r3] // restore rnat |
| 393 | ;; |
| 394 | ld8 r18=[r14],16 // restore caller's unat |
| 395 | ld8 r19=[r15],24 // restore fpsr |
| 396 | ;; |
| 397 | ldf.fill f2=[r14],32 |
| 398 | ldf.fill f3=[r15],32 |
| 399 | ;; |
| 400 | ldf.fill f4=[r14],32 |
| 401 | ldf.fill f5=[r15],32 |
| 402 | ;; |
| 403 | ldf.fill f12=[r14],32 |
| 404 | ldf.fill f13=[r15],32 |
| 405 | ;; |
| 406 | ldf.fill f14=[r14],32 |
| 407 | ldf.fill f15=[r15],32 |
| 408 | ;; |
| 409 | ldf.fill f16=[r14],32 |
| 410 | ldf.fill f17=[r15],32 |
| 411 | ;; |
| 412 | ldf.fill f18=[r14],32 |
| 413 | ldf.fill f19=[r15],32 |
| 414 | mov b0=r21 |
| 415 | ;; |
| 416 | ldf.fill f20=[r14],32 |
| 417 | ldf.fill f21=[r15],32 |
| 418 | mov b1=r22 |
| 419 | ;; |
| 420 | ldf.fill f22=[r14],32 |
| 421 | ldf.fill f23=[r15],32 |
| 422 | mov b2=r23 |
| 423 | ;; |
| 424 | mov ar.bspstore=r27 |
| 425 | mov ar.unat=r29 // establish unat holding the NaT bits for r4-r7 |
| 426 | mov b3=r24 |
| 427 | ;; |
| 428 | ldf.fill f24=[r14],32 |
| 429 | ldf.fill f25=[r15],32 |
| 430 | mov b4=r25 |
| 431 | ;; |
| 432 | ldf.fill f26=[r14],32 |
| 433 | ldf.fill f27=[r15],32 |
| 434 | mov b5=r26 |
| 435 | ;; |
| 436 | ldf.fill f28=[r14],32 |
| 437 | ldf.fill f29=[r15],32 |
| 438 | mov ar.pfs=r16 |
| 439 | ;; |
| 440 | ldf.fill f30=[r14],32 |
| 441 | ldf.fill f31=[r15],24 |
| 442 | mov ar.lc=r17 |
| 443 | ;; |
| 444 | ld8.fill r4=[r14],16 |
| 445 | ld8.fill r5=[r15],16 |
| 446 | mov pr=r28,-1 |
| 447 | ;; |
| 448 | ld8.fill r6=[r14],16 |
| 449 | ld8.fill r7=[r15],16 |
| 450 | |
| 451 | mov ar.unat=r18 // restore caller's unat |
| 452 | mov ar.rnat=r30 // must restore after bspstore but before rsc! |
| 453 | mov ar.fpsr=r19 // restore fpsr |
| 454 | mov ar.rsc=3 // put RSE back into eager mode, pl 0 |
| 455 | br.cond.sptk.many b7 |
| 456 | END(load_switch_stack) |
| 457 | |
Chen, Kenneth W | 383f283 | 2005-09-09 13:02:02 -0700 | [diff] [blame] | 458 | GLOBAL_ENTRY(prefetch_stack) |
| 459 | add r14 = -IA64_SWITCH_STACK_SIZE, sp |
| 460 | add r15 = IA64_TASK_THREAD_KSP_OFFSET, in0 |
| 461 | ;; |
| 462 | ld8 r16 = [r15] // load next's stack pointer |
| 463 | lfetch.fault.excl [r14], 128 |
| 464 | ;; |
| 465 | lfetch.fault.excl [r14], 128 |
| 466 | lfetch.fault [r16], 128 |
| 467 | ;; |
| 468 | lfetch.fault.excl [r14], 128 |
| 469 | lfetch.fault [r16], 128 |
| 470 | ;; |
| 471 | lfetch.fault.excl [r14], 128 |
| 472 | lfetch.fault [r16], 128 |
| 473 | ;; |
| 474 | lfetch.fault.excl [r14], 128 |
| 475 | lfetch.fault [r16], 128 |
| 476 | ;; |
| 477 | lfetch.fault [r16], 128 |
| 478 | br.ret.sptk.many rp |
Peter Chubb | 24b8e0c | 2005-09-15 15:36:35 +1000 | [diff] [blame] | 479 | END(prefetch_stack) |
Chen, Kenneth W | 383f283 | 2005-09-09 13:02:02 -0700 | [diff] [blame] | 480 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 | /* |
| 482 | * Invoke a system call, but do some tracing before and after the call. |
| 483 | * We MUST preserve the current register frame throughout this routine |
| 484 | * because some system calls (such as ia64_execve) directly |
| 485 | * manipulate ar.pfs. |
| 486 | */ |
| 487 | GLOBAL_ENTRY(ia64_trace_syscall) |
| 488 | PT_REGS_UNWIND_INFO(0) |
| 489 | /* |
| 490 | * We need to preserve the scratch registers f6-f11 in case the system |
| 491 | * call is sigreturn. |
| 492 | */ |
| 493 | adds r16=PT(F6)+16,sp |
| 494 | adds r17=PT(F7)+16,sp |
| 495 | ;; |
| 496 | stf.spill [r16]=f6,32 |
| 497 | stf.spill [r17]=f7,32 |
| 498 | ;; |
| 499 | stf.spill [r16]=f8,32 |
| 500 | stf.spill [r17]=f9,32 |
| 501 | ;; |
| 502 | stf.spill [r16]=f10 |
| 503 | stf.spill [r17]=f11 |
| 504 | br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args |
Shaohua Li | f14488c | 2008-10-06 10:43:06 -0700 | [diff] [blame] | 505 | cmp.lt p6,p0=r8,r0 // check tracehook |
| 506 | adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 |
| 507 | adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10 |
| 508 | mov r10=0 |
| 509 | (p6) br.cond.sptk strace_error // syscall failed -> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 510 | adds r16=PT(F6)+16,sp |
| 511 | adds r17=PT(F7)+16,sp |
| 512 | ;; |
| 513 | ldf.fill f6=[r16],32 |
| 514 | ldf.fill f7=[r17],32 |
| 515 | ;; |
| 516 | ldf.fill f8=[r16],32 |
| 517 | ldf.fill f9=[r17],32 |
| 518 | ;; |
| 519 | ldf.fill f10=[r16] |
| 520 | ldf.fill f11=[r17] |
| 521 | // the syscall number may have changed, so re-load it and re-calculate the |
| 522 | // syscall entry-point: |
| 523 | adds r15=PT(R15)+16,sp // r15 = &pt_regs.r15 (syscall #) |
| 524 | ;; |
| 525 | ld8 r15=[r15] |
| 526 | mov r3=NR_syscalls - 1 |
| 527 | ;; |
| 528 | adds r15=-1024,r15 |
| 529 | movl r16=sys_call_table |
| 530 | ;; |
| 531 | shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024) |
| 532 | cmp.leu p6,p7=r15,r3 |
| 533 | ;; |
| 534 | (p6) ld8 r20=[r20] // load address of syscall entry point |
| 535 | (p7) movl r20=sys_ni_syscall |
| 536 | ;; |
| 537 | mov b6=r20 |
| 538 | br.call.sptk.many rp=b6 // do the syscall |
| 539 | .strace_check_retval: |
| 540 | cmp.lt p6,p0=r8,r0 // syscall failed? |
| 541 | adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 |
| 542 | adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10 |
| 543 | mov r10=0 |
| 544 | (p6) br.cond.sptk strace_error // syscall failed -> |
| 545 | ;; // avoid RAW on r10 |
| 546 | .strace_save_retval: |
| 547 | .mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8 |
| 548 | .mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10 |
| 549 | br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value |
Jack Steiner | 6f6d758 | 2006-02-15 19:46:50 -0600 | [diff] [blame] | 550 | .ret3: |
| 551 | (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk |
Hidetoshi Seto | 38477ad | 2008-04-21 14:34:39 -0700 | [diff] [blame] | 552 | (pUStk) rsm psr.i // disable interrupts |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 553 | br.cond.sptk ia64_work_pending_syscall_end |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 | |
| 555 | strace_error: |
| 556 | ld8 r3=[r2] // load pt_regs.r8 |
| 557 | sub r9=0,r8 // negate return value to get errno value |
| 558 | ;; |
| 559 | cmp.ne p6,p0=r3,r0 // is pt_regs.r8!=0? |
| 560 | adds r3=16,r2 // r3=&pt_regs.r10 |
| 561 | ;; |
| 562 | (p6) mov r10=-1 |
| 563 | (p6) mov r8=r9 |
| 564 | br.cond.sptk .strace_save_retval |
| 565 | END(ia64_trace_syscall) |
| 566 | |
| 567 | /* |
| 568 | * When traced and returning from sigreturn, we invoke syscall_trace but then |
| 569 | * go straight to ia64_leave_kernel rather than ia64_leave_syscall. |
| 570 | */ |
| 571 | GLOBAL_ENTRY(ia64_strace_leave_kernel) |
| 572 | PT_REGS_UNWIND_INFO(0) |
| 573 | { /* |
| 574 | * Some versions of gas generate bad unwind info if the first instruction of a |
| 575 | * procedure doesn't go into the first slot of a bundle. This is a workaround. |
| 576 | */ |
| 577 | nop.m 0 |
| 578 | nop.i 0 |
| 579 | br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value |
| 580 | } |
| 581 | .ret4: br.cond.sptk ia64_leave_kernel |
| 582 | END(ia64_strace_leave_kernel) |
| 583 | |
Al Viro | 54d496c | 2012-10-14 15:43:06 -0400 | [diff] [blame] | 584 | ENTRY(call_payload) |
| 585 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(0) |
| 586 | /* call the kernel_thread payload; fn is in r4, arg - in r5 */ |
| 587 | alloc loc1=ar.pfs,0,3,1,0 |
| 588 | mov loc0=rp |
| 589 | mov loc2=gp |
| 590 | mov out0=r5 // arg |
| 591 | ld8 r14 = [r4], 8 // fn.address |
| 592 | ;; |
| 593 | mov b6 = r14 |
| 594 | ld8 gp = [r4] // fn.gp |
| 595 | ;; |
| 596 | br.call.sptk.many rp=b6 // fn(arg) |
| 597 | .ret12: mov gp=loc2 |
| 598 | mov rp=loc0 |
| 599 | mov ar.pfs=loc1 |
| 600 | /* ... and if it has returned, we are going to userland */ |
| 601 | cmp.ne pKStk,pUStk=r0,r0 |
| 602 | br.ret.sptk.many rp |
| 603 | END(call_payload) |
| 604 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | GLOBAL_ENTRY(ia64_ret_from_clone) |
| 606 | PT_REGS_UNWIND_INFO(0) |
| 607 | { /* |
| 608 | * Some versions of gas generate bad unwind info if the first instruction of a |
| 609 | * procedure doesn't go into the first slot of a bundle. This is a workaround. |
| 610 | */ |
| 611 | nop.m 0 |
| 612 | nop.i 0 |
| 613 | /* |
| 614 | * We need to call schedule_tail() to complete the scheduling process. |
| 615 | * Called by ia64_switch_to() after do_fork()->copy_thread(). r8 contains the |
| 616 | * address of the previously executing task. |
| 617 | */ |
| 618 | br.call.sptk.many rp=ia64_invoke_schedule_tail |
| 619 | } |
| 620 | .ret8: |
Al Viro | 54d496c | 2012-10-14 15:43:06 -0400 | [diff] [blame] | 621 | (pKStk) br.call.sptk.many rp=call_payload |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 | adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 |
| 623 | ;; |
| 624 | ld4 r2=[r2] |
| 625 | ;; |
| 626 | mov r8=0 |
| 627 | and r2=_TIF_SYSCALL_TRACEAUDIT,r2 |
| 628 | ;; |
| 629 | cmp.ne p6,p0=r2,r0 |
| 630 | (p6) br.cond.spnt .strace_check_retval |
| 631 | ;; // added stop bits to prevent r8 dependency |
| 632 | END(ia64_ret_from_clone) |
| 633 | // fall through |
| 634 | GLOBAL_ENTRY(ia64_ret_from_syscall) |
| 635 | PT_REGS_UNWIND_INFO(0) |
| 636 | cmp.ge p6,p7=r8,r0 // syscall executed successfully? |
| 637 | adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 |
| 638 | mov r10=r0 // clear error indication in r10 |
| 639 | (p7) br.cond.spnt handle_syscall_error // handle potential syscall failure |
| 640 | END(ia64_ret_from_syscall) |
| 641 | // fall through |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 642 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | /* |
| 644 | * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't |
| 645 | * need to switch to bank 0 and doesn't restore the scratch registers. |
| 646 | * To avoid leaking kernel bits, the scratch registers are set to |
| 647 | * the following known-to-be-safe values: |
| 648 | * |
| 649 | * r1: restored (global pointer) |
| 650 | * r2: cleared |
| 651 | * r3: 1 (when returning to user-level) |
| 652 | * r8-r11: restored (syscall return value(s)) |
| 653 | * r12: restored (user-level stack pointer) |
| 654 | * r13: restored (user-level thread pointer) |
David Mosberger-Tang | c03f058 | 2005-04-27 21:18:22 -0700 | [diff] [blame] | 655 | * r14: set to __kernel_syscall_via_epc |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 | * r15: restored (syscall #) |
| 657 | * r16-r17: cleared |
| 658 | * r18: user-level b6 |
| 659 | * r19: cleared |
| 660 | * r20: user-level ar.fpsr |
| 661 | * r21: user-level b0 |
| 662 | * r22: cleared |
| 663 | * r23: user-level ar.bspstore |
| 664 | * r24: user-level ar.rnat |
| 665 | * r25: user-level ar.unat |
| 666 | * r26: user-level ar.pfs |
| 667 | * r27: user-level ar.rsc |
| 668 | * r28: user-level ip |
| 669 | * r29: user-level psr |
| 670 | * r30: user-level cfm |
| 671 | * r31: user-level pr |
| 672 | * f6-f11: cleared |
| 673 | * pr: restored (user-level pr) |
| 674 | * b0: restored (user-level rp) |
| 675 | * b6: restored |
David Mosberger-Tang | c03f058 | 2005-04-27 21:18:22 -0700 | [diff] [blame] | 676 | * b7: set to __kernel_syscall_via_epc |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 677 | * ar.unat: restored (user-level ar.unat) |
| 678 | * ar.pfs: restored (user-level ar.pfs) |
| 679 | * ar.rsc: restored (user-level ar.rsc) |
| 680 | * ar.rnat: restored (user-level ar.rnat) |
| 681 | * ar.bspstore: restored (user-level ar.bspstore) |
| 682 | * ar.fpsr: restored (user-level ar.fpsr) |
| 683 | * ar.ccv: cleared |
| 684 | * ar.csd: cleared |
| 685 | * ar.ssd: cleared |
| 686 | */ |
Luis R. Rodriguez | e55645e | 2015-06-02 11:42:02 -0700 | [diff] [blame] | 687 | GLOBAL_ENTRY(ia64_leave_syscall) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 | PT_REGS_UNWIND_INFO(0) |
| 689 | /* |
| 690 | * work.need_resched etc. mustn't get changed by this CPU before it returns to |
| 691 | * user- or fsys-mode, hence we disable interrupts early on. |
| 692 | * |
| 693 | * p6 controls whether current_thread_info()->flags needs to be check for |
| 694 | * extra work. We always check for extra work when returning to user-level. |
| 695 | * With CONFIG_PREEMPT, we also check for extra work when the preempt_count |
| 696 | * is 0. After extra work processing has been completed, execution |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 697 | * resumes at ia64_work_processed_syscall with p6 set to 1 if the extra-work-check |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | * needs to be redone. |
| 699 | */ |
| 700 | #ifdef CONFIG_PREEMPT |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 701 | RSM_PSR_I(p0, r2, r18) // disable interrupts |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 702 | cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall |
| 703 | (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 |
| 704 | ;; |
| 705 | .pred.rel.mutex pUStk,pKStk |
| 706 | (pKStk) ld4 r21=[r20] // r21 <- preempt_count |
| 707 | (pUStk) mov r21=0 // r21 <- 0 |
| 708 | ;; |
| 709 | cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0) |
| 710 | #else /* !CONFIG_PREEMPT */ |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 711 | RSM_PSR_I(pUStk, r2, r18) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 712 | cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall |
| 713 | (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk |
| 714 | #endif |
Luis R. Rodriguez | e55645e | 2015-06-02 11:42:02 -0700 | [diff] [blame] | 715 | .global ia64_work_processed_syscall; |
| 716 | ia64_work_processed_syscall: |
Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 717 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
Hidetoshi Seto | b64f34c | 2008-01-29 14:27:30 +0900 | [diff] [blame] | 718 | adds r2=PT(LOADRS)+16,r12 |
Isaku Yamahata | 94752a7 | 2009-03-04 21:05:38 +0900 | [diff] [blame] | 719 | MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave |
Hidetoshi Seto | b64f34c | 2008-01-29 14:27:30 +0900 | [diff] [blame] | 720 | adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 |
| 721 | ;; |
| 722 | (p6) ld4 r31=[r18] // load current_thread_info()->flags |
| 723 | ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs" |
| 724 | adds r3=PT(AR_BSPSTORE)+16,r12 // deferred |
| 725 | ;; |
| 726 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 727 | adds r2=PT(LOADRS)+16,r12 |
| 728 | adds r3=PT(AR_BSPSTORE)+16,r12 |
| 729 | adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 |
| 730 | ;; |
| 731 | (p6) ld4 r31=[r18] // load current_thread_info()->flags |
| 732 | ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs" |
David Mosberger-Tang | 96e0174 | 2005-04-27 21:16:07 -0700 | [diff] [blame] | 733 | nop.i 0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 734 | ;; |
Hidetoshi Seto | b64f34c | 2008-01-29 14:27:30 +0900 | [diff] [blame] | 735 | #endif |
David Mosberger-Tang | 87e522a | 2005-04-27 21:17:44 -0700 | [diff] [blame] | 736 | mov r16=ar.bsp // M2 get existing backing store pointer |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 737 | ld8 r18=[r2],PT(R9)-PT(B6) // load b6 |
| 738 | (p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE? |
| 739 | ;; |
David Mosberger-Tang | 87e522a | 2005-04-27 21:17:44 -0700 | [diff] [blame] | 740 | ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 741 | (p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending? |
| 742 | (p6) br.cond.spnt .work_pending_syscall |
| 743 | ;; |
| 744 | // start restoring the state saved on the kernel stack (struct pt_regs): |
| 745 | ld8 r9=[r2],PT(CR_IPSR)-PT(R9) |
| 746 | ld8 r11=[r3],PT(CR_IIP)-PT(R11) |
David Mosberger-Tang | 87e522a | 2005-04-27 21:17:44 -0700 | [diff] [blame] | 747 | (pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE! |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 748 | ;; |
| 749 | invala // M0|1 invalidate ALAT |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 750 | RSM_PSR_I_IC(r28, r29, r30) // M2 turn off interrupts and interruption collection |
David Mosberger-Tang | c03f058 | 2005-04-27 21:18:22 -0700 | [diff] [blame] | 751 | cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 752 | |
David Mosberger-Tang | c03f058 | 2005-04-27 21:18:22 -0700 | [diff] [blame] | 753 | ld8 r29=[r2],16 // M0|1 load cr.ipsr |
| 754 | ld8 r28=[r3],16 // M0|1 load cr.iip |
Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 755 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
Hidetoshi Seto | b64f34c | 2008-01-29 14:27:30 +0900 | [diff] [blame] | 756 | (pUStk) add r14=TI_AC_LEAVE+IA64_TASK_SIZE,r13 |
| 757 | ;; |
| 758 | ld8 r30=[r2],16 // M0|1 load cr.ifs |
| 759 | ld8 r25=[r3],16 // M0|1 load ar.unat |
| 760 | (pUStk) add r15=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 |
| 761 | ;; |
| 762 | #else |
David Mosberger-Tang | c03f058 | 2005-04-27 21:18:22 -0700 | [diff] [blame] | 763 | mov r22=r0 // A clear r22 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 764 | ;; |
| 765 | ld8 r30=[r2],16 // M0|1 load cr.ifs |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 766 | ld8 r25=[r3],16 // M0|1 load ar.unat |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 767 | (pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 |
| 768 | ;; |
Hidetoshi Seto | b64f34c | 2008-01-29 14:27:30 +0900 | [diff] [blame] | 769 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 770 | ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 771 | MOV_FROM_PSR(pKStk, r22, r21) // M2 read PSR now that interrupts are disabled |
David Mosberger-Tang | 87e522a | 2005-04-27 21:17:44 -0700 | [diff] [blame] | 772 | nop 0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 773 | ;; |
David Mosberger-Tang | c03f058 | 2005-04-27 21:18:22 -0700 | [diff] [blame] | 774 | ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0 |
| 775 | ld8 r27=[r3],PT(PR)-PT(AR_RSC) // M0|1 load ar.rsc |
| 776 | mov f6=f0 // F clear f6 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 777 | ;; |
David Mosberger-Tang | c03f058 | 2005-04-27 21:18:22 -0700 | [diff] [blame] | 778 | ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT) // M0|1 load ar.rnat (may be garbage) |
| 779 | ld8 r31=[r3],PT(R1)-PT(PR) // M0|1 load predicates |
| 780 | mov f7=f0 // F clear f7 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 781 | ;; |
David Mosberger-Tang | c03f058 | 2005-04-27 21:18:22 -0700 | [diff] [blame] | 782 | ld8 r20=[r2],PT(R12)-PT(AR_FPSR) // M0|1 load ar.fpsr |
| 783 | ld8.fill r1=[r3],16 // M0|1 load r1 |
| 784 | (pUStk) mov r17=1 // A |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 785 | ;; |
Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 786 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
Hidetoshi Seto | b64f34c | 2008-01-29 14:27:30 +0900 | [diff] [blame] | 787 | (pUStk) st1 [r15]=r17 // M2|3 |
| 788 | #else |
David Mosberger-Tang | c03f058 | 2005-04-27 21:18:22 -0700 | [diff] [blame] | 789 | (pUStk) st1 [r14]=r17 // M2|3 |
Hidetoshi Seto | b64f34c | 2008-01-29 14:27:30 +0900 | [diff] [blame] | 790 | #endif |
David Mosberger-Tang | c03f058 | 2005-04-27 21:18:22 -0700 | [diff] [blame] | 791 | ld8.fill r13=[r3],16 // M0|1 |
| 792 | mov f8=f0 // F clear f8 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 793 | ;; |
David Mosberger-Tang | c03f058 | 2005-04-27 21:18:22 -0700 | [diff] [blame] | 794 | ld8.fill r12=[r2] // M0|1 restore r12 (sp) |
| 795 | ld8.fill r15=[r3] // M0|1 restore r15 |
| 796 | mov b6=r18 // I0 restore b6 |
David Mosberger-Tang | 30325d1 | 2005-04-25 13:03:16 -0700 | [diff] [blame] | 797 | |
Chen, Kenneth W | a0776ec | 2006-10-13 10:05:45 -0700 | [diff] [blame] | 798 | LOAD_PHYS_STACK_REG_SIZE(r17) |
David Mosberger-Tang | c03f058 | 2005-04-27 21:18:22 -0700 | [diff] [blame] | 799 | mov f9=f0 // F clear f9 |
| 800 | (pKStk) br.cond.dpnt.many skip_rbs_switch // B |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 801 | |
David Mosberger-Tang | c03f058 | 2005-04-27 21:18:22 -0700 | [diff] [blame] | 802 | srlz.d // M0 ensure interruption collection is off (for cover) |
| 803 | shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 804 | COVER // B add current frame into dirty partition & set cr.ifs |
David Mosberger-Tang | 3c79c8b | 2005-04-27 21:15:13 -0700 | [diff] [blame] | 805 | ;; |
Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 806 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
Hidetoshi Seto | b64f34c | 2008-01-29 14:27:30 +0900 | [diff] [blame] | 807 | mov r19=ar.bsp // M2 get new backing store pointer |
| 808 | st8 [r14]=r22 // M save time at leave |
| 809 | mov f10=f0 // F clear f10 |
| 810 | |
| 811 | mov r22=r0 // A clear r22 |
| 812 | movl r14=__kernel_syscall_via_epc // X |
| 813 | ;; |
| 814 | #else |
David Mosberger-Tang | c03f058 | 2005-04-27 21:18:22 -0700 | [diff] [blame] | 815 | mov r19=ar.bsp // M2 get new backing store pointer |
| 816 | mov f10=f0 // F clear f10 |
David Mosberger-Tang | 96e0174 | 2005-04-27 21:16:07 -0700 | [diff] [blame] | 817 | |
| 818 | nop.m 0 |
David Mosberger-Tang | c03f058 | 2005-04-27 21:18:22 -0700 | [diff] [blame] | 819 | movl r14=__kernel_syscall_via_epc // X |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 820 | ;; |
Hidetoshi Seto | b64f34c | 2008-01-29 14:27:30 +0900 | [diff] [blame] | 821 | #endif |
David Mosberger-Tang | c03f058 | 2005-04-27 21:18:22 -0700 | [diff] [blame] | 822 | mov.m ar.csd=r0 // M2 clear ar.csd |
| 823 | mov.m ar.ccv=r0 // M2 clear ar.ccv |
| 824 | mov b7=r14 // I0 clear b7 (hint with __kernel_syscall_via_epc) |
David Mosberger-Tang | 96e0174 | 2005-04-27 21:16:07 -0700 | [diff] [blame] | 825 | |
David Mosberger-Tang | c03f058 | 2005-04-27 21:18:22 -0700 | [diff] [blame] | 826 | mov.m ar.ssd=r0 // M2 clear ar.ssd |
| 827 | mov f11=f0 // F clear f11 |
| 828 | br.cond.sptk.many rbs_switch // B |
Luis R. Rodriguez | e55645e | 2015-06-02 11:42:02 -0700 | [diff] [blame] | 829 | END(ia64_leave_syscall) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 830 | |
Luis R. Rodriguez | e55645e | 2015-06-02 11:42:02 -0700 | [diff] [blame] | 831 | GLOBAL_ENTRY(ia64_leave_kernel) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 832 | PT_REGS_UNWIND_INFO(0) |
| 833 | /* |
| 834 | * work.need_resched etc. mustn't get changed by this CPU before it returns to |
| 835 | * user- or fsys-mode, hence we disable interrupts early on. |
| 836 | * |
| 837 | * p6 controls whether current_thread_info()->flags needs to be check for |
| 838 | * extra work. We always check for extra work when returning to user-level. |
| 839 | * With CONFIG_PREEMPT, we also check for extra work when the preempt_count |
| 840 | * is 0. After extra work processing has been completed, execution |
| 841 | * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check |
| 842 | * needs to be redone. |
| 843 | */ |
| 844 | #ifdef CONFIG_PREEMPT |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 845 | RSM_PSR_I(p0, r17, r31) // disable interrupts |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 846 | cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel |
| 847 | (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 |
| 848 | ;; |
| 849 | .pred.rel.mutex pUStk,pKStk |
| 850 | (pKStk) ld4 r21=[r20] // r21 <- preempt_count |
| 851 | (pUStk) mov r21=0 // r21 <- 0 |
| 852 | ;; |
| 853 | cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0) |
| 854 | #else |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 855 | RSM_PSR_I(pUStk, r17, r31) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 856 | cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel |
| 857 | (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk |
| 858 | #endif |
| 859 | .work_processed_kernel: |
| 860 | adds r17=TI_FLAGS+IA64_TASK_SIZE,r13 |
| 861 | ;; |
| 862 | (p6) ld4 r31=[r17] // load current_thread_info()->flags |
| 863 | adds r21=PT(PR)+16,r12 |
| 864 | ;; |
| 865 | |
| 866 | lfetch [r21],PT(CR_IPSR)-PT(PR) |
| 867 | adds r2=PT(B6)+16,r12 |
| 868 | adds r3=PT(R16)+16,r12 |
| 869 | ;; |
| 870 | lfetch [r21] |
| 871 | ld8 r28=[r2],8 // load b6 |
| 872 | adds r29=PT(R24)+16,r12 |
| 873 | |
| 874 | ld8.fill r16=[r3],PT(AR_CSD)-PT(R16) |
| 875 | adds r30=PT(AR_CCV)+16,r12 |
| 876 | (p6) and r19=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE? |
| 877 | ;; |
| 878 | ld8.fill r24=[r29] |
| 879 | ld8 r15=[r30] // load ar.ccv |
| 880 | (p6) cmp4.ne.unc p6,p0=r19, r0 // any special work pending? |
| 881 | ;; |
| 882 | ld8 r29=[r2],16 // load b7 |
| 883 | ld8 r30=[r3],16 // load ar.csd |
| 884 | (p6) br.cond.spnt .work_pending |
| 885 | ;; |
| 886 | ld8 r31=[r2],16 // load ar.ssd |
| 887 | ld8.fill r8=[r3],16 |
| 888 | ;; |
| 889 | ld8.fill r9=[r2],16 |
| 890 | ld8.fill r10=[r3],PT(R17)-PT(R10) |
| 891 | ;; |
| 892 | ld8.fill r11=[r2],PT(R18)-PT(R11) |
| 893 | ld8.fill r17=[r3],16 |
| 894 | ;; |
| 895 | ld8.fill r18=[r2],16 |
| 896 | ld8.fill r19=[r3],16 |
| 897 | ;; |
| 898 | ld8.fill r20=[r2],16 |
| 899 | ld8.fill r21=[r3],16 |
| 900 | mov ar.csd=r30 |
| 901 | mov ar.ssd=r31 |
| 902 | ;; |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 903 | RSM_PSR_I_IC(r23, r22, r25) // initiate turning off of interrupt and interruption collection |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 904 | invala // invalidate ALAT |
| 905 | ;; |
| 906 | ld8.fill r22=[r2],24 |
| 907 | ld8.fill r23=[r3],24 |
| 908 | mov b6=r28 |
| 909 | ;; |
| 910 | ld8.fill r25=[r2],16 |
| 911 | ld8.fill r26=[r3],16 |
| 912 | mov b7=r29 |
| 913 | ;; |
| 914 | ld8.fill r27=[r2],16 |
| 915 | ld8.fill r28=[r3],16 |
| 916 | ;; |
| 917 | ld8.fill r29=[r2],16 |
| 918 | ld8.fill r30=[r3],24 |
| 919 | ;; |
| 920 | ld8.fill r31=[r2],PT(F9)-PT(R31) |
| 921 | adds r3=PT(F10)-PT(F6),r3 |
| 922 | ;; |
| 923 | ldf.fill f9=[r2],PT(F6)-PT(F9) |
| 924 | ldf.fill f10=[r3],PT(F8)-PT(F10) |
| 925 | ;; |
| 926 | ldf.fill f6=[r2],PT(F7)-PT(F6) |
| 927 | ;; |
| 928 | ldf.fill f7=[r2],PT(F11)-PT(F7) |
| 929 | ldf.fill f8=[r3],32 |
| 930 | ;; |
David Mosberger-Tang | e7e965f | 2005-04-27 21:22:08 -0700 | [diff] [blame] | 931 | srlz.d // ensure that inter. collection is off (VHPT is don't care, since text is pinned) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 932 | mov ar.ccv=r15 |
| 933 | ;; |
| 934 | ldf.fill f11=[r2] |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 935 | BSW_0(r2, r3, r15) // switch back to bank 0 (no stop bit required beforehand...) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 936 | ;; |
| 937 | (pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency) |
| 938 | adds r16=PT(CR_IPSR)+16,r12 |
| 939 | adds r17=PT(CR_IIP)+16,r12 |
| 940 | |
Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 941 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
Hidetoshi Seto | b64f34c | 2008-01-29 14:27:30 +0900 | [diff] [blame] | 942 | .pred.rel.mutex pUStk,pKStk |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 943 | MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled |
Isaku Yamahata | 94752a7 | 2009-03-04 21:05:38 +0900 | [diff] [blame] | 944 | MOV_FROM_ITC(pUStk, p9, r22, r29) // M fetch time at leave |
Hidetoshi Seto | b64f34c | 2008-01-29 14:27:30 +0900 | [diff] [blame] | 945 | nop.i 0 |
| 946 | ;; |
| 947 | #else |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 948 | MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 949 | nop.i 0 |
| 950 | nop.i 0 |
| 951 | ;; |
Hidetoshi Seto | b64f34c | 2008-01-29 14:27:30 +0900 | [diff] [blame] | 952 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 953 | ld8 r29=[r16],16 // load cr.ipsr |
| 954 | ld8 r28=[r17],16 // load cr.iip |
| 955 | ;; |
| 956 | ld8 r30=[r16],16 // load cr.ifs |
| 957 | ld8 r25=[r17],16 // load ar.unat |
| 958 | ;; |
| 959 | ld8 r26=[r16],16 // load ar.pfs |
| 960 | ld8 r27=[r17],16 // load ar.rsc |
| 961 | cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs |
| 962 | ;; |
| 963 | ld8 r24=[r16],16 // load ar.rnat (may be garbage) |
| 964 | ld8 r23=[r17],16 // load ar.bspstore (may be garbage) |
| 965 | ;; |
| 966 | ld8 r31=[r16],16 // load predicates |
| 967 | ld8 r21=[r17],16 // load b0 |
| 968 | ;; |
| 969 | ld8 r19=[r16],16 // load ar.rsc value for "loadrs" |
| 970 | ld8.fill r1=[r17],16 // load r1 |
| 971 | ;; |
| 972 | ld8.fill r12=[r16],16 |
| 973 | ld8.fill r13=[r17],16 |
Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 974 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
Hidetoshi Seto | b64f34c | 2008-01-29 14:27:30 +0900 | [diff] [blame] | 975 | (pUStk) adds r3=TI_AC_LEAVE+IA64_TASK_SIZE,r18 |
| 976 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 977 | (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 |
Hidetoshi Seto | b64f34c | 2008-01-29 14:27:30 +0900 | [diff] [blame] | 978 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 979 | ;; |
| 980 | ld8 r20=[r16],16 // ar.fpsr |
| 981 | ld8.fill r15=[r17],16 |
Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 982 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
Hidetoshi Seto | b64f34c | 2008-01-29 14:27:30 +0900 | [diff] [blame] | 983 | (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 // deferred |
| 984 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 985 | ;; |
| 986 | ld8.fill r14=[r16],16 |
| 987 | ld8.fill r2=[r17] |
| 988 | (pUStk) mov r17=1 |
| 989 | ;; |
Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 990 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
Hidetoshi Seto | b64f34c | 2008-01-29 14:27:30 +0900 | [diff] [blame] | 991 | // mmi_ : ld8 st1 shr;; mmi_ : st8 st1 shr;; |
| 992 | // mib : mov add br -> mib : ld8 add br |
| 993 | // bbb_ : br nop cover;; mbb_ : mov br cover;; |
| 994 | // |
| 995 | // no one require bsp in r16 if (pKStk) branch is selected. |
| 996 | (pUStk) st8 [r3]=r22 // save time at leave |
| 997 | (pUStk) st1 [r18]=r17 // restore current->thread.on_ustack |
| 998 | shr.u r18=r19,16 // get byte size of existing "dirty" partition |
| 999 | ;; |
| 1000 | ld8.fill r3=[r16] // deferred |
| 1001 | LOAD_PHYS_STACK_REG_SIZE(r17) |
| 1002 | (pKStk) br.cond.dpnt skip_rbs_switch |
| 1003 | mov r16=ar.bsp // get existing backing store pointer |
| 1004 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1005 | ld8.fill r3=[r16] |
| 1006 | (pUStk) st1 [r18]=r17 // restore current->thread.on_ustack |
| 1007 | shr.u r18=r19,16 // get byte size of existing "dirty" partition |
| 1008 | ;; |
| 1009 | mov r16=ar.bsp // get existing backing store pointer |
Chen, Kenneth W | a0776ec | 2006-10-13 10:05:45 -0700 | [diff] [blame] | 1010 | LOAD_PHYS_STACK_REG_SIZE(r17) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1011 | (pKStk) br.cond.dpnt skip_rbs_switch |
Hidetoshi Seto | b64f34c | 2008-01-29 14:27:30 +0900 | [diff] [blame] | 1012 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1013 | |
| 1014 | /* |
| 1015 | * Restore user backing store. |
| 1016 | * |
| 1017 | * NOTE: alloc, loadrs, and cover can't be predicated. |
| 1018 | */ |
| 1019 | (pNonSys) br.cond.dpnt dont_preserve_current_frame |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 1020 | COVER // add current frame into dirty partition and set cr.ifs |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1021 | ;; |
| 1022 | mov r19=ar.bsp // get new backing store pointer |
David Mosberger-Tang | 87e522a | 2005-04-27 21:17:44 -0700 | [diff] [blame] | 1023 | rbs_switch: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1024 | sub r16=r16,r18 // krbs = old bsp - size of dirty partition |
| 1025 | cmp.ne p9,p0=r0,r0 // clear p9 to skip restore of cr.ifs |
| 1026 | ;; |
| 1027 | sub r19=r19,r16 // calculate total byte size of dirty partition |
| 1028 | add r18=64,r18 // don't force in0-in7 into memory... |
| 1029 | ;; |
| 1030 | shl r19=r19,16 // shift size of dirty partition into loadrs position |
| 1031 | ;; |
| 1032 | dont_preserve_current_frame: |
| 1033 | /* |
| 1034 | * To prevent leaking bits between the kernel and user-space, |
| 1035 | * we must clear the stacked registers in the "invalid" partition here. |
| 1036 | * Not pretty, but at least it's fast (3.34 registers/cycle on Itanium, |
| 1037 | * 5 registers/cycle on McKinley). |
| 1038 | */ |
| 1039 | # define pRecurse p6 |
| 1040 | # define pReturn p7 |
| 1041 | #ifdef CONFIG_ITANIUM |
| 1042 | # define Nregs 10 |
| 1043 | #else |
| 1044 | # define Nregs 14 |
| 1045 | #endif |
| 1046 | alloc loc0=ar.pfs,2,Nregs-2,2,0 |
| 1047 | shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8)) |
| 1048 | sub r17=r17,r18 // r17 = (physStackedSize + 8) - dirtySize |
| 1049 | ;; |
| 1050 | mov ar.rsc=r19 // load ar.rsc to be used for "loadrs" |
| 1051 | shladd in0=loc1,3,r17 |
| 1052 | mov in1=0 |
| 1053 | ;; |
| 1054 | TEXT_ALIGN(32) |
| 1055 | rse_clear_invalid: |
| 1056 | #ifdef CONFIG_ITANIUM |
| 1057 | // cycle 0 |
| 1058 | { .mii |
| 1059 | alloc loc0=ar.pfs,2,Nregs-2,2,0 |
| 1060 | cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse |
| 1061 | add out0=-Nregs*8,in0 |
| 1062 | }{ .mfb |
| 1063 | add out1=1,in1 // increment recursion count |
| 1064 | nop.f 0 |
| 1065 | nop.b 0 // can't do br.call here because of alloc (WAW on CFM) |
| 1066 | ;; |
| 1067 | }{ .mfi // cycle 1 |
| 1068 | mov loc1=0 |
| 1069 | nop.f 0 |
| 1070 | mov loc2=0 |
| 1071 | }{ .mib |
| 1072 | mov loc3=0 |
| 1073 | mov loc4=0 |
| 1074 | (pRecurse) br.call.sptk.many b0=rse_clear_invalid |
| 1075 | |
| 1076 | }{ .mfi // cycle 2 |
| 1077 | mov loc5=0 |
| 1078 | nop.f 0 |
| 1079 | cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret |
| 1080 | }{ .mib |
| 1081 | mov loc6=0 |
| 1082 | mov loc7=0 |
| 1083 | (pReturn) br.ret.sptk.many b0 |
| 1084 | } |
| 1085 | #else /* !CONFIG_ITANIUM */ |
| 1086 | alloc loc0=ar.pfs,2,Nregs-2,2,0 |
| 1087 | cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse |
| 1088 | add out0=-Nregs*8,in0 |
| 1089 | add out1=1,in1 // increment recursion count |
| 1090 | mov loc1=0 |
| 1091 | mov loc2=0 |
| 1092 | ;; |
| 1093 | mov loc3=0 |
| 1094 | mov loc4=0 |
| 1095 | mov loc5=0 |
| 1096 | mov loc6=0 |
| 1097 | mov loc7=0 |
David Mosberger-Tang | 9ec1a7a | 2005-04-27 21:13:33 -0700 | [diff] [blame] | 1098 | (pRecurse) br.call.dptk.few b0=rse_clear_invalid |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1099 | ;; |
| 1100 | mov loc8=0 |
| 1101 | mov loc9=0 |
| 1102 | cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret |
| 1103 | mov loc10=0 |
| 1104 | mov loc11=0 |
David Mosberger-Tang | 9ec1a7a | 2005-04-27 21:13:33 -0700 | [diff] [blame] | 1105 | (pReturn) br.ret.dptk.many b0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1106 | #endif /* !CONFIG_ITANIUM */ |
| 1107 | # undef pRecurse |
| 1108 | # undef pReturn |
| 1109 | ;; |
| 1110 | alloc r17=ar.pfs,0,0,0,0 // drop current register frame |
| 1111 | ;; |
| 1112 | loadrs |
| 1113 | ;; |
| 1114 | skip_rbs_switch: |
| 1115 | mov ar.unat=r25 // M2 |
| 1116 | (pKStk) extr.u r22=r22,21,1 // I0 extract current value of psr.pp from r22 |
| 1117 | (pLvSys)mov r19=r0 // A clear r19 for leave_syscall, no-op otherwise |
| 1118 | ;; |
| 1119 | (pUStk) mov ar.bspstore=r23 // M2 |
| 1120 | (pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp |
| 1121 | (pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise |
| 1122 | ;; |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 1123 | MOV_TO_IPSR(p0, r29, r25) // M2 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1124 | mov ar.pfs=r26 // I0 |
| 1125 | (pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise |
| 1126 | |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 1127 | MOV_TO_IFS(p9, r30, r25)// M2 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1128 | mov b0=r21 // I0 |
| 1129 | (pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise |
| 1130 | |
| 1131 | mov ar.fpsr=r20 // M2 |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 1132 | MOV_TO_IIP(r28, r25) // M2 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1133 | nop 0 |
| 1134 | ;; |
| 1135 | (pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode |
| 1136 | nop 0 |
| 1137 | (pLvSys)mov r2=r0 |
| 1138 | |
| 1139 | mov ar.rsc=r27 // M2 |
| 1140 | mov pr=r31,-1 // I0 |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 1141 | RFI // B |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1142 | |
| 1143 | /* |
| 1144 | * On entry: |
| 1145 | * r20 = ¤t->thread_info->pre_count (if CONFIG_PREEMPT) |
| 1146 | * r31 = current->thread_info->flags |
| 1147 | * On exit: |
| 1148 | * p6 = TRUE if work-pending-check needs to be redone |
Hidetoshi Seto | 3633c73 | 2008-05-09 15:26:35 +0900 | [diff] [blame] | 1149 | * |
| 1150 | * Interrupts are disabled on entry, reenabled depend on work, and |
| 1151 | * disabled on exit. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1152 | */ |
| 1153 | .work_pending_syscall: |
| 1154 | add r2=-8,r2 |
| 1155 | add r3=-8,r3 |
| 1156 | ;; |
| 1157 | st8 [r2]=r8 |
| 1158 | st8 [r3]=r10 |
| 1159 | .work_pending: |
Hidetoshi Seto | 2e513fe | 2008-05-09 15:26:51 +0900 | [diff] [blame] | 1160 | tbit.z p6,p0=r31,TIF_NEED_RESCHED // is resched not needed? |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1161 | (p6) br.cond.sptk.few .notify |
Thomas Gleixner | aa0d532 | 2013-09-17 18:53:08 +0000 | [diff] [blame] | 1162 | br.call.spnt.many rp=preempt_schedule_irq |
Hidetoshi Seto | 2e513fe | 2008-05-09 15:26:51 +0900 | [diff] [blame] | 1163 | .ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check) |
Luis R. Rodriguez | e55645e | 2015-06-02 11:42:02 -0700 | [diff] [blame] | 1164 | (pLvSys)br.cond.sptk.few ia64_work_pending_syscall_end |
Hidetoshi Seto | 2e513fe | 2008-05-09 15:26:51 +0900 | [diff] [blame] | 1165 | br.cond.sptk.many .work_processed_kernel |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1166 | |
| 1167 | .notify: |
| 1168 | (pUStk) br.call.spnt.many rp=notify_resume_user |
Hidetoshi Seto | 2e513fe | 2008-05-09 15:26:51 +0900 | [diff] [blame] | 1169 | .ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check) |
Luis R. Rodriguez | e55645e | 2015-06-02 11:42:02 -0700 | [diff] [blame] | 1170 | (pLvSys)br.cond.sptk.few ia64_work_pending_syscall_end |
Hidetoshi Seto | 2e513fe | 2008-05-09 15:26:51 +0900 | [diff] [blame] | 1171 | br.cond.sptk.many .work_processed_kernel |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1172 | |
Luis R. Rodriguez | e55645e | 2015-06-02 11:42:02 -0700 | [diff] [blame] | 1173 | .global ia64_work_pending_syscall_end; |
| 1174 | ia64_work_pending_syscall_end: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1175 | adds r2=PT(R8)+16,r12 |
| 1176 | adds r3=PT(R10)+16,r12 |
| 1177 | ;; |
| 1178 | ld8 r8=[r2] |
| 1179 | ld8 r10=[r3] |
Luis R. Rodriguez | e55645e | 2015-06-02 11:42:02 -0700 | [diff] [blame] | 1180 | br.cond.sptk.many ia64_work_processed_syscall |
| 1181 | END(ia64_leave_kernel) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1182 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1183 | ENTRY(handle_syscall_error) |
| 1184 | /* |
| 1185 | * Some system calls (e.g., ptrace, mmap) can return arbitrary values which could |
| 1186 | * lead us to mistake a negative return value as a failed syscall. Those syscall |
| 1187 | * must deposit a non-zero value in pt_regs.r8 to indicate an error. If |
| 1188 | * pt_regs.r8 is zero, we assume that the call completed successfully. |
| 1189 | */ |
| 1190 | PT_REGS_UNWIND_INFO(0) |
| 1191 | ld8 r3=[r2] // load pt_regs.r8 |
| 1192 | ;; |
| 1193 | cmp.eq p6,p7=r3,r0 // is pt_regs.r8==0? |
| 1194 | ;; |
| 1195 | (p7) mov r10=-1 |
| 1196 | (p7) sub r8=0,r8 // negate return value to get errno |
| 1197 | br.cond.sptk ia64_leave_syscall |
| 1198 | END(handle_syscall_error) |
| 1199 | |
| 1200 | /* |
| 1201 | * Invoke schedule_tail(task) while preserving in0-in7, which may be needed |
| 1202 | * in case a system call gets restarted. |
| 1203 | */ |
| 1204 | GLOBAL_ENTRY(ia64_invoke_schedule_tail) |
| 1205 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) |
| 1206 | alloc loc1=ar.pfs,8,2,1,0 |
| 1207 | mov loc0=rp |
| 1208 | mov out0=r8 // Address of previous task |
| 1209 | ;; |
| 1210 | br.call.sptk.many rp=schedule_tail |
| 1211 | .ret11: mov ar.pfs=loc1 |
| 1212 | mov rp=loc0 |
| 1213 | br.ret.sptk.many rp |
| 1214 | END(ia64_invoke_schedule_tail) |
| 1215 | |
| 1216 | /* |
Hidetoshi Seto | 3633c73 | 2008-05-09 15:26:35 +0900 | [diff] [blame] | 1217 | * Setup stack and call do_notify_resume_user(), keeping interrupts |
| 1218 | * disabled. |
| 1219 | * |
| 1220 | * Note that pSys and pNonSys need to be set up by the caller. |
| 1221 | * We declare 8 input registers so the system call args get preserved, |
| 1222 | * in case we need to restart a system call. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1223 | */ |
Isaku Yamahata | 4df8d22 | 2008-05-27 15:08:01 -0700 | [diff] [blame] | 1224 | GLOBAL_ENTRY(notify_resume_user) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1225 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) |
| 1226 | alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart! |
| 1227 | mov r9=ar.unat |
| 1228 | mov loc0=rp // save return address |
| 1229 | mov out0=0 // there is no "oldset" |
| 1230 | adds out1=8,sp // out1=&sigscratch->ar_pfs |
| 1231 | (pSys) mov out2=1 // out2==1 => we're in a syscall |
| 1232 | ;; |
| 1233 | (pNonSys) mov out2=0 // out2==0 => not a syscall |
| 1234 | .fframe 16 |
David Mosberger-Tang | bfd6859 | 2005-05-04 06:42:00 -0700 | [diff] [blame] | 1235 | .spillsp ar.unat, 16 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1236 | st8 [sp]=r9,-16 // allocate space for ar.unat and save it |
| 1237 | st8 [out1]=loc1,-8 // save ar.pfs, out1=&sigscratch |
| 1238 | .body |
| 1239 | br.call.sptk.many rp=do_notify_resume_user |
| 1240 | .ret15: .restore sp |
| 1241 | adds sp=16,sp // pop scratch stack space |
| 1242 | ;; |
| 1243 | ld8 r9=[sp] // load new unat from sigscratch->scratch_unat |
| 1244 | mov rp=loc0 |
| 1245 | ;; |
| 1246 | mov ar.unat=r9 |
| 1247 | mov ar.pfs=loc1 |
| 1248 | br.ret.sptk.many rp |
| 1249 | END(notify_resume_user) |
| 1250 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1251 | ENTRY(sys_rt_sigreturn) |
| 1252 | PT_REGS_UNWIND_INFO(0) |
| 1253 | /* |
| 1254 | * Allocate 8 input registers since ptrace() may clobber them |
| 1255 | */ |
| 1256 | alloc r2=ar.pfs,8,0,1,0 |
| 1257 | .prologue |
| 1258 | PT_REGS_SAVES(16) |
| 1259 | adds sp=-16,sp |
| 1260 | .body |
| 1261 | cmp.eq pNonSys,pSys=r0,r0 // sigreturn isn't a normal syscall... |
| 1262 | ;; |
| 1263 | /* |
| 1264 | * leave_kernel() restores f6-f11 from pt_regs, but since the streamlined |
| 1265 | * syscall-entry path does not save them we save them here instead. Note: we |
| 1266 | * don't need to save any other registers that are not saved by the stream-lined |
| 1267 | * syscall path, because restore_sigcontext() restores them. |
| 1268 | */ |
| 1269 | adds r16=PT(F6)+32,sp |
| 1270 | adds r17=PT(F7)+32,sp |
| 1271 | ;; |
| 1272 | stf.spill [r16]=f6,32 |
| 1273 | stf.spill [r17]=f7,32 |
| 1274 | ;; |
| 1275 | stf.spill [r16]=f8,32 |
| 1276 | stf.spill [r17]=f9,32 |
| 1277 | ;; |
| 1278 | stf.spill [r16]=f10 |
| 1279 | stf.spill [r17]=f11 |
| 1280 | adds out0=16,sp // out0 = &sigscratch |
| 1281 | br.call.sptk.many rp=ia64_rt_sigreturn |
H. J. Lu | 763b391 | 2005-07-08 12:25:00 -0700 | [diff] [blame] | 1282 | .ret19: .restore sp,0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1283 | adds sp=16,sp |
| 1284 | ;; |
| 1285 | ld8 r9=[sp] // load new ar.unat |
Luis R. Rodriguez | e55645e | 2015-06-02 11:42:02 -0700 | [diff] [blame] | 1286 | mov.sptk b7=r8,ia64_leave_kernel |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1287 | ;; |
| 1288 | mov ar.unat=r9 |
| 1289 | br.many b7 |
| 1290 | END(sys_rt_sigreturn) |
| 1291 | |
| 1292 | GLOBAL_ENTRY(ia64_prepare_handle_unaligned) |
| 1293 | .prologue |
| 1294 | /* |
| 1295 | * r16 = fake ar.pfs, we simply need to make sure privilege is still 0 |
| 1296 | */ |
| 1297 | mov r16=r0 |
| 1298 | DO_SAVE_SWITCH_STACK |
| 1299 | br.call.sptk.many rp=ia64_handle_unaligned // stack frame setup in ivt |
| 1300 | .ret21: .body |
| 1301 | DO_LOAD_SWITCH_STACK |
| 1302 | br.cond.sptk.many rp // goes to ia64_leave_kernel |
| 1303 | END(ia64_prepare_handle_unaligned) |
| 1304 | |
| 1305 | // |
| 1306 | // unw_init_running(void (*callback)(info, arg), void *arg) |
| 1307 | // |
| 1308 | # define EXTRA_FRAME_SIZE ((UNW_FRAME_INFO_SIZE+15)&~15) |
| 1309 | |
| 1310 | GLOBAL_ENTRY(unw_init_running) |
| 1311 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2) |
| 1312 | alloc loc1=ar.pfs,2,3,3,0 |
| 1313 | ;; |
| 1314 | ld8 loc2=[in0],8 |
| 1315 | mov loc0=rp |
| 1316 | mov r16=loc1 |
| 1317 | DO_SAVE_SWITCH_STACK |
| 1318 | .body |
| 1319 | |
| 1320 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2) |
| 1321 | .fframe IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE |
| 1322 | SWITCH_STACK_SAVES(EXTRA_FRAME_SIZE) |
| 1323 | adds sp=-EXTRA_FRAME_SIZE,sp |
| 1324 | .body |
| 1325 | ;; |
| 1326 | adds out0=16,sp // &info |
| 1327 | mov out1=r13 // current |
| 1328 | adds out2=16+EXTRA_FRAME_SIZE,sp // &switch_stack |
| 1329 | br.call.sptk.many rp=unw_init_frame_info |
| 1330 | 1: adds out0=16,sp // &info |
| 1331 | mov b6=loc2 |
| 1332 | mov loc2=gp // save gp across indirect function call |
| 1333 | ;; |
| 1334 | ld8 gp=[in0] |
| 1335 | mov out1=in1 // arg |
| 1336 | br.call.sptk.many rp=b6 // invoke the callback function |
| 1337 | 1: mov gp=loc2 // restore gp |
| 1338 | |
| 1339 | // For now, we don't allow changing registers from within |
| 1340 | // unw_init_running; if we ever want to allow that, we'd |
| 1341 | // have to do a load_switch_stack here: |
| 1342 | .restore sp |
| 1343 | adds sp=IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE,sp |
| 1344 | |
| 1345 | mov ar.pfs=loc1 |
| 1346 | mov rp=loc0 |
| 1347 | br.ret.sptk.many rp |
| 1348 | END(unw_init_running) |
Al Viro | e007c53 | 2016-01-17 01:13:41 -0500 | [diff] [blame] | 1349 | EXPORT_SYMBOL(unw_init_running) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1350 | |
Shaohua Li | d3e75ff | 2009-01-09 11:29:46 +0800 | [diff] [blame] | 1351 | #ifdef CONFIG_FUNCTION_TRACER |
Shaohua Li | a14a07b | 2009-01-09 11:29:49 +0800 | [diff] [blame] | 1352 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 1353 | GLOBAL_ENTRY(_mcount) |
| 1354 | br ftrace_stub |
| 1355 | END(_mcount) |
Al Viro | e007c53 | 2016-01-17 01:13:41 -0500 | [diff] [blame] | 1356 | EXPORT_SYMBOL(_mcount) |
Shaohua Li | a14a07b | 2009-01-09 11:29:49 +0800 | [diff] [blame] | 1357 | |
| 1358 | .here: |
| 1359 | br.ret.sptk.many b0 |
| 1360 | |
| 1361 | GLOBAL_ENTRY(ftrace_caller) |
| 1362 | alloc out0 = ar.pfs, 8, 0, 4, 0 |
| 1363 | mov out3 = r0 |
| 1364 | ;; |
| 1365 | mov out2 = b0 |
| 1366 | add r3 = 0x20, r3 |
| 1367 | mov out1 = r1; |
| 1368 | br.call.sptk.many b0 = ftrace_patch_gp |
| 1369 | //this might be called from module, so we must patch gp |
| 1370 | ftrace_patch_gp: |
| 1371 | movl gp=__gp |
| 1372 | mov b0 = r3 |
| 1373 | ;; |
| 1374 | .global ftrace_call; |
| 1375 | ftrace_call: |
| 1376 | { |
| 1377 | .mlx |
| 1378 | nop.m 0x0 |
| 1379 | movl r3 = .here;; |
| 1380 | } |
| 1381 | alloc loc0 = ar.pfs, 4, 4, 2, 0 |
| 1382 | ;; |
| 1383 | mov loc1 = b0 |
| 1384 | mov out0 = b0 |
| 1385 | mov loc2 = r8 |
| 1386 | mov loc3 = r15 |
| 1387 | ;; |
| 1388 | adds out0 = -MCOUNT_INSN_SIZE, out0 |
| 1389 | mov out1 = in2 |
| 1390 | mov b6 = r3 |
| 1391 | |
| 1392 | br.call.sptk.many b0 = b6 |
| 1393 | ;; |
| 1394 | mov ar.pfs = loc0 |
| 1395 | mov b0 = loc1 |
| 1396 | mov r8 = loc2 |
| 1397 | mov r15 = loc3 |
| 1398 | br ftrace_stub |
| 1399 | ;; |
| 1400 | END(ftrace_caller) |
| 1401 | |
| 1402 | #else |
Shaohua Li | d3e75ff | 2009-01-09 11:29:46 +0800 | [diff] [blame] | 1403 | GLOBAL_ENTRY(_mcount) |
| 1404 | movl r2 = ftrace_stub |
| 1405 | movl r3 = ftrace_trace_function;; |
| 1406 | ld8 r3 = [r3];; |
| 1407 | ld8 r3 = [r3];; |
| 1408 | cmp.eq p7,p0 = r2, r3 |
| 1409 | (p7) br.sptk.many ftrace_stub |
| 1410 | ;; |
| 1411 | |
| 1412 | alloc loc0 = ar.pfs, 4, 4, 2, 0 |
| 1413 | ;; |
| 1414 | mov loc1 = b0 |
| 1415 | mov out0 = b0 |
| 1416 | mov loc2 = r8 |
| 1417 | mov loc3 = r15 |
| 1418 | ;; |
| 1419 | adds out0 = -MCOUNT_INSN_SIZE, out0 |
| 1420 | mov out1 = in2 |
| 1421 | mov b6 = r3 |
| 1422 | |
| 1423 | br.call.sptk.many b0 = b6 |
| 1424 | ;; |
| 1425 | mov ar.pfs = loc0 |
| 1426 | mov b0 = loc1 |
| 1427 | mov r8 = loc2 |
| 1428 | mov r15 = loc3 |
| 1429 | br ftrace_stub |
| 1430 | ;; |
| 1431 | END(_mcount) |
Shaohua Li | a14a07b | 2009-01-09 11:29:49 +0800 | [diff] [blame] | 1432 | #endif |
Shaohua Li | d3e75ff | 2009-01-09 11:29:46 +0800 | [diff] [blame] | 1433 | |
| 1434 | GLOBAL_ENTRY(ftrace_stub) |
| 1435 | mov r3 = b0 |
| 1436 | movl r2 = _mcount_ret_helper |
| 1437 | ;; |
| 1438 | mov b6 = r2 |
| 1439 | mov b7 = r3 |
| 1440 | br.ret.sptk.many b6 |
| 1441 | |
| 1442 | _mcount_ret_helper: |
| 1443 | mov b0 = r42 |
| 1444 | mov r1 = r41 |
| 1445 | mov ar.pfs = r40 |
| 1446 | br b7 |
| 1447 | END(ftrace_stub) |
| 1448 | |
| 1449 | #endif /* CONFIG_FUNCTION_TRACER */ |
| 1450 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1451 | .rodata |
| 1452 | .align 8 |
| 1453 | .globl sys_call_table |
| 1454 | sys_call_table: |
| 1455 | data8 sys_ni_syscall // This must be sys_ni_syscall! See ivt.S. |
| 1456 | data8 sys_exit // 1025 |
| 1457 | data8 sys_read |
| 1458 | data8 sys_write |
| 1459 | data8 sys_open |
| 1460 | data8 sys_close |
| 1461 | data8 sys_creat // 1030 |
| 1462 | data8 sys_link |
| 1463 | data8 sys_unlink |
| 1464 | data8 ia64_execve |
| 1465 | data8 sys_chdir |
| 1466 | data8 sys_fchdir // 1035 |
| 1467 | data8 sys_utimes |
| 1468 | data8 sys_mknod |
| 1469 | data8 sys_chmod |
| 1470 | data8 sys_chown |
| 1471 | data8 sys_lseek // 1040 |
| 1472 | data8 sys_getpid |
| 1473 | data8 sys_getppid |
| 1474 | data8 sys_mount |
| 1475 | data8 sys_umount |
| 1476 | data8 sys_setuid // 1045 |
| 1477 | data8 sys_getuid |
| 1478 | data8 sys_geteuid |
| 1479 | data8 sys_ptrace |
| 1480 | data8 sys_access |
| 1481 | data8 sys_sync // 1050 |
| 1482 | data8 sys_fsync |
| 1483 | data8 sys_fdatasync |
| 1484 | data8 sys_kill |
| 1485 | data8 sys_rename |
| 1486 | data8 sys_mkdir // 1055 |
| 1487 | data8 sys_rmdir |
| 1488 | data8 sys_dup |
Heiko Carstens | 1134723 | 2009-01-14 14:13:56 +0100 | [diff] [blame] | 1489 | data8 sys_ia64_pipe |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1490 | data8 sys_times |
| 1491 | data8 ia64_brk // 1060 |
| 1492 | data8 sys_setgid |
| 1493 | data8 sys_getgid |
| 1494 | data8 sys_getegid |
| 1495 | data8 sys_acct |
| 1496 | data8 sys_ioctl // 1065 |
| 1497 | data8 sys_fcntl |
| 1498 | data8 sys_umask |
| 1499 | data8 sys_chroot |
| 1500 | data8 sys_ustat |
| 1501 | data8 sys_dup2 // 1070 |
| 1502 | data8 sys_setreuid |
| 1503 | data8 sys_setregid |
| 1504 | data8 sys_getresuid |
| 1505 | data8 sys_setresuid |
| 1506 | data8 sys_getresgid // 1075 |
| 1507 | data8 sys_setresgid |
| 1508 | data8 sys_getgroups |
| 1509 | data8 sys_setgroups |
| 1510 | data8 sys_getpgid |
| 1511 | data8 sys_setpgid // 1080 |
| 1512 | data8 sys_setsid |
| 1513 | data8 sys_getsid |
| 1514 | data8 sys_sethostname |
| 1515 | data8 sys_setrlimit |
| 1516 | data8 sys_getrlimit // 1085 |
| 1517 | data8 sys_getrusage |
| 1518 | data8 sys_gettimeofday |
| 1519 | data8 sys_settimeofday |
| 1520 | data8 sys_select |
| 1521 | data8 sys_poll // 1090 |
| 1522 | data8 sys_symlink |
| 1523 | data8 sys_readlink |
| 1524 | data8 sys_uselib |
| 1525 | data8 sys_swapon |
| 1526 | data8 sys_swapoff // 1095 |
| 1527 | data8 sys_reboot |
| 1528 | data8 sys_truncate |
| 1529 | data8 sys_ftruncate |
| 1530 | data8 sys_fchmod |
| 1531 | data8 sys_fchown // 1100 |
| 1532 | data8 ia64_getpriority |
| 1533 | data8 sys_setpriority |
| 1534 | data8 sys_statfs |
| 1535 | data8 sys_fstatfs |
| 1536 | data8 sys_gettid // 1105 |
| 1537 | data8 sys_semget |
| 1538 | data8 sys_semop |
| 1539 | data8 sys_semctl |
| 1540 | data8 sys_msgget |
| 1541 | data8 sys_msgsnd // 1110 |
| 1542 | data8 sys_msgrcv |
| 1543 | data8 sys_msgctl |
| 1544 | data8 sys_shmget |
Stephen Rothwell | 7d87e14 | 2005-05-01 08:59:12 -0700 | [diff] [blame] | 1545 | data8 sys_shmat |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1546 | data8 sys_shmdt // 1115 |
| 1547 | data8 sys_shmctl |
| 1548 | data8 sys_syslog |
| 1549 | data8 sys_setitimer |
| 1550 | data8 sys_getitimer |
| 1551 | data8 sys_ni_syscall // 1120 /* was: ia64_oldstat */ |
| 1552 | data8 sys_ni_syscall /* was: ia64_oldlstat */ |
| 1553 | data8 sys_ni_syscall /* was: ia64_oldfstat */ |
| 1554 | data8 sys_vhangup |
| 1555 | data8 sys_lchown |
| 1556 | data8 sys_remap_file_pages // 1125 |
| 1557 | data8 sys_wait4 |
| 1558 | data8 sys_sysinfo |
| 1559 | data8 sys_clone |
| 1560 | data8 sys_setdomainname |
| 1561 | data8 sys_newuname // 1130 |
| 1562 | data8 sys_adjtimex |
| 1563 | data8 sys_ni_syscall /* was: ia64_create_module */ |
| 1564 | data8 sys_init_module |
| 1565 | data8 sys_delete_module |
| 1566 | data8 sys_ni_syscall // 1135 /* was: sys_get_kernel_syms */ |
| 1567 | data8 sys_ni_syscall /* was: sys_query_module */ |
| 1568 | data8 sys_quotactl |
| 1569 | data8 sys_bdflush |
| 1570 | data8 sys_sysfs |
| 1571 | data8 sys_personality // 1140 |
| 1572 | data8 sys_ni_syscall // sys_afs_syscall |
| 1573 | data8 sys_setfsuid |
| 1574 | data8 sys_setfsgid |
| 1575 | data8 sys_getdents |
| 1576 | data8 sys_flock // 1145 |
| 1577 | data8 sys_readv |
| 1578 | data8 sys_writev |
| 1579 | data8 sys_pread64 |
| 1580 | data8 sys_pwrite64 |
| 1581 | data8 sys_sysctl // 1150 |
| 1582 | data8 sys_mmap |
| 1583 | data8 sys_munmap |
| 1584 | data8 sys_mlock |
| 1585 | data8 sys_mlockall |
| 1586 | data8 sys_mprotect // 1155 |
| 1587 | data8 ia64_mremap |
| 1588 | data8 sys_msync |
| 1589 | data8 sys_munlock |
| 1590 | data8 sys_munlockall |
| 1591 | data8 sys_sched_getparam // 1160 |
| 1592 | data8 sys_sched_setparam |
| 1593 | data8 sys_sched_getscheduler |
| 1594 | data8 sys_sched_setscheduler |
| 1595 | data8 sys_sched_yield |
| 1596 | data8 sys_sched_get_priority_max // 1165 |
| 1597 | data8 sys_sched_get_priority_min |
| 1598 | data8 sys_sched_rr_get_interval |
| 1599 | data8 sys_nanosleep |
NeilBrown | f5b9409 | 2011-08-26 18:03:11 -0400 | [diff] [blame] | 1600 | data8 sys_ni_syscall // old nfsservctl |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1601 | data8 sys_prctl // 1170 |
| 1602 | data8 sys_getpagesize |
| 1603 | data8 sys_mmap2 |
| 1604 | data8 sys_pciconfig_read |
| 1605 | data8 sys_pciconfig_write |
| 1606 | data8 sys_perfmonctl // 1175 |
| 1607 | data8 sys_sigaltstack |
| 1608 | data8 sys_rt_sigaction |
| 1609 | data8 sys_rt_sigpending |
| 1610 | data8 sys_rt_sigprocmask |
| 1611 | data8 sys_rt_sigqueueinfo // 1180 |
| 1612 | data8 sys_rt_sigreturn |
| 1613 | data8 sys_rt_sigsuspend |
| 1614 | data8 sys_rt_sigtimedwait |
| 1615 | data8 sys_getcwd |
| 1616 | data8 sys_capget // 1185 |
| 1617 | data8 sys_capset |
| 1618 | data8 sys_sendfile64 |
| 1619 | data8 sys_ni_syscall // sys_getpmsg (STREAMS) |
| 1620 | data8 sys_ni_syscall // sys_putpmsg (STREAMS) |
| 1621 | data8 sys_socket // 1190 |
| 1622 | data8 sys_bind |
| 1623 | data8 sys_connect |
| 1624 | data8 sys_listen |
| 1625 | data8 sys_accept |
| 1626 | data8 sys_getsockname // 1195 |
| 1627 | data8 sys_getpeername |
| 1628 | data8 sys_socketpair |
| 1629 | data8 sys_send |
| 1630 | data8 sys_sendto |
| 1631 | data8 sys_recv // 1200 |
| 1632 | data8 sys_recvfrom |
| 1633 | data8 sys_shutdown |
| 1634 | data8 sys_setsockopt |
| 1635 | data8 sys_getsockopt |
| 1636 | data8 sys_sendmsg // 1205 |
| 1637 | data8 sys_recvmsg |
| 1638 | data8 sys_pivot_root |
| 1639 | data8 sys_mincore |
| 1640 | data8 sys_madvise |
| 1641 | data8 sys_newstat // 1210 |
| 1642 | data8 sys_newlstat |
| 1643 | data8 sys_newfstat |
| 1644 | data8 sys_clone2 |
| 1645 | data8 sys_getdents64 |
| 1646 | data8 sys_getunwind // 1215 |
| 1647 | data8 sys_readahead |
| 1648 | data8 sys_setxattr |
| 1649 | data8 sys_lsetxattr |
| 1650 | data8 sys_fsetxattr |
| 1651 | data8 sys_getxattr // 1220 |
| 1652 | data8 sys_lgetxattr |
| 1653 | data8 sys_fgetxattr |
| 1654 | data8 sys_listxattr |
| 1655 | data8 sys_llistxattr |
| 1656 | data8 sys_flistxattr // 1225 |
| 1657 | data8 sys_removexattr |
| 1658 | data8 sys_lremovexattr |
| 1659 | data8 sys_fremovexattr |
| 1660 | data8 sys_tkill |
| 1661 | data8 sys_futex // 1230 |
| 1662 | data8 sys_sched_setaffinity |
| 1663 | data8 sys_sched_getaffinity |
| 1664 | data8 sys_set_tid_address |
| 1665 | data8 sys_fadvise64_64 |
| 1666 | data8 sys_tgkill // 1235 |
| 1667 | data8 sys_exit_group |
| 1668 | data8 sys_lookup_dcookie |
| 1669 | data8 sys_io_setup |
| 1670 | data8 sys_io_destroy |
| 1671 | data8 sys_io_getevents // 1240 |
| 1672 | data8 sys_io_submit |
| 1673 | data8 sys_io_cancel |
| 1674 | data8 sys_epoll_create |
| 1675 | data8 sys_epoll_ctl |
| 1676 | data8 sys_epoll_wait // 1245 |
| 1677 | data8 sys_restart_syscall |
| 1678 | data8 sys_semtimedop |
| 1679 | data8 sys_timer_create |
| 1680 | data8 sys_timer_settime |
| 1681 | data8 sys_timer_gettime // 1250 |
| 1682 | data8 sys_timer_getoverrun |
| 1683 | data8 sys_timer_delete |
| 1684 | data8 sys_clock_settime |
| 1685 | data8 sys_clock_gettime |
| 1686 | data8 sys_clock_getres // 1255 |
| 1687 | data8 sys_clock_nanosleep |
| 1688 | data8 sys_fstatfs64 |
| 1689 | data8 sys_statfs64 |
| 1690 | data8 sys_mbind |
| 1691 | data8 sys_get_mempolicy // 1260 |
| 1692 | data8 sys_set_mempolicy |
| 1693 | data8 sys_mq_open |
| 1694 | data8 sys_mq_unlink |
| 1695 | data8 sys_mq_timedsend |
| 1696 | data8 sys_mq_timedreceive // 1265 |
| 1697 | data8 sys_mq_notify |
| 1698 | data8 sys_mq_getsetattr |
Zou Nan hai | a7956113 | 2006-12-07 09:51:35 -0800 | [diff] [blame] | 1699 | data8 sys_kexec_load |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1700 | data8 sys_ni_syscall // reserved for vserver |
| 1701 | data8 sys_waitid // 1270 |
| 1702 | data8 sys_add_key |
| 1703 | data8 sys_request_key |
| 1704 | data8 sys_keyctl |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1705 | data8 sys_ioprio_set |
| 1706 | data8 sys_ioprio_get // 1275 |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 1707 | data8 sys_move_pages |
Robert Love | d108919 | 2005-07-27 08:58:00 -0700 | [diff] [blame] | 1708 | data8 sys_inotify_init |
| 1709 | data8 sys_inotify_add_watch |
| 1710 | data8 sys_inotify_rm_watch |
Christoph Lameter | 3974388 | 2006-01-08 01:00:51 -0800 | [diff] [blame] | 1711 | data8 sys_migrate_pages // 1280 |
Chen, Kenneth W | 9ed2ad8 | 2006-01-31 14:26:25 -0800 | [diff] [blame] | 1712 | data8 sys_openat |
| 1713 | data8 sys_mkdirat |
| 1714 | data8 sys_mknodat |
| 1715 | data8 sys_fchownat |
| 1716 | data8 sys_futimesat // 1285 |
| 1717 | data8 sys_newfstatat |
| 1718 | data8 sys_unlinkat |
| 1719 | data8 sys_renameat |
| 1720 | data8 sys_linkat |
| 1721 | data8 sys_symlinkat // 1290 |
| 1722 | data8 sys_readlinkat |
| 1723 | data8 sys_fchmodat |
| 1724 | data8 sys_faccessat |
Alexey Kuznetsov | e180583 | 2007-05-08 15:57:59 -0700 | [diff] [blame] | 1725 | data8 sys_pselect6 |
Tony Luck | ad9e39c | 2008-02-06 13:57:46 -0800 | [diff] [blame] | 1726 | data8 sys_ppoll // 1295 |
Janak Desai | 9621a4e | 2006-02-08 15:43:38 -0800 | [diff] [blame] | 1727 | data8 sys_unshare |
Jens Axboe | 5274f05 | 2006-03-30 15:15:30 +0200 | [diff] [blame] | 1728 | data8 sys_splice |
Tony Luck | 5c55cd6 | 2006-09-26 14:04:42 -0700 | [diff] [blame] | 1729 | data8 sys_set_robust_list |
| 1730 | data8 sys_get_robust_list |
Tony Luck | d905b00 | 2006-04-04 14:08:11 -0700 | [diff] [blame] | 1731 | data8 sys_sync_file_range // 1300 |
Jens Axboe | 7052449 | 2006-04-11 15:51:17 +0200 | [diff] [blame] | 1732 | data8 sys_tee |
Jens Axboe | 912d35f | 2006-04-26 10:59:21 +0200 | [diff] [blame] | 1733 | data8 sys_vmsplice |
David Chinner | 3d7559e | 2007-07-16 15:33:40 +1000 | [diff] [blame] | 1734 | data8 sys_fallocate |
Fenghua Yu | 86afa9e | 2007-02-05 16:07:57 -0800 | [diff] [blame] | 1735 | data8 sys_getcpu |
Tony Luck | 472118e | 2007-05-10 09:44:42 -0700 | [diff] [blame] | 1736 | data8 sys_epoll_pwait // 1305 |
| 1737 | data8 sys_utimensat |
Tony Luck | ae67e49 | 2007-05-14 15:55:11 -0700 | [diff] [blame] | 1738 | data8 sys_signalfd |
Davide Libenzi | 4d672e7 | 2008-02-04 22:27:26 -0800 | [diff] [blame] | 1739 | data8 sys_ni_syscall |
Tony Luck | ae67e49 | 2007-05-14 15:55:11 -0700 | [diff] [blame] | 1740 | data8 sys_eventfd |
Tony Luck | ad9e39c | 2008-02-06 13:57:46 -0800 | [diff] [blame] | 1741 | data8 sys_timerfd_create // 1310 |
| 1742 | data8 sys_timerfd_settime |
| 1743 | data8 sys_timerfd_gettime |
Tony Luck | 3e4d0ca | 2008-07-25 10:10:28 -0700 | [diff] [blame] | 1744 | data8 sys_signalfd4 |
| 1745 | data8 sys_eventfd2 |
| 1746 | data8 sys_epoll_create1 // 1315 |
| 1747 | data8 sys_dup3 |
| 1748 | data8 sys_pipe2 |
| 1749 | data8 sys_inotify_init1 |
Tony Luck | 8851d37 | 2009-04-08 13:46:14 -0700 | [diff] [blame] | 1750 | data8 sys_preadv |
| 1751 | data8 sys_pwritev // 1320 |
Tony Luck | 97de6ad | 2009-06-15 16:11:43 -0700 | [diff] [blame] | 1752 | data8 sys_rt_tgsigqueueinfo |
Arnaldo Carvalho de Melo | a2e2725 | 2009-10-12 23:40:10 -0700 | [diff] [blame] | 1753 | data8 sys_recvmmsg |
Tony Luck | a78b2de | 2010-08-12 11:56:57 -0700 | [diff] [blame] | 1754 | data8 sys_fanotify_init |
| 1755 | data8 sys_fanotify_mark |
| 1756 | data8 sys_prlimit64 // 1325 |
Tony Luck | 9298168 | 2011-03-22 10:54:24 -0700 | [diff] [blame] | 1757 | data8 sys_name_to_handle_at |
| 1758 | data8 sys_open_by_handle_at |
| 1759 | data8 sys_clock_adjtime |
| 1760 | data8 sys_syncfs |
Eric W. Biederman | 7b21fdd | 2011-05-27 19:28:27 -0700 | [diff] [blame] | 1761 | data8 sys_setns // 1330 |
Tony Luck | 83caba8 | 2011-05-31 10:09:24 -0700 | [diff] [blame] | 1762 | data8 sys_sendmmsg |
Tony Luck | 5569459 | 2011-11-01 09:50:08 -0700 | [diff] [blame] | 1763 | data8 sys_process_vm_readv |
| 1764 | data8 sys_process_vm_writev |
Émeric Maschino | 65cc21b | 2012-01-09 12:55:10 -0800 | [diff] [blame] | 1765 | data8 sys_accept4 |
Luck, Tony | 062fe95a | 2013-01-03 10:33:48 -0800 | [diff] [blame] | 1766 | data8 sys_finit_module // 1335 |
Tony Luck | 7de8246 | 2014-01-28 09:38:37 -0800 | [diff] [blame] | 1767 | data8 sys_sched_setattr |
| 1768 | data8 sys_sched_getattr |
Miklos Szeredi | 3ca976a | 2014-05-20 10:59:38 +0200 | [diff] [blame] | 1769 | data8 sys_renameat2 |
Tony Luck | 5e467e2 | 2014-07-30 14:05:15 -0700 | [diff] [blame] | 1770 | data8 sys_getrandom |
Tony Luck | 703e6a6 | 2014-08-18 10:29:52 -0700 | [diff] [blame] | 1771 | data8 sys_memfd_create // 1340 |
Tony Luck | 5dab4b7 | 2014-10-09 13:26:58 -0700 | [diff] [blame] | 1772 | data8 sys_bpf |
Tony Luck | b739896 | 2015-01-05 11:25:19 -0800 | [diff] [blame] | 1773 | data8 sys_execveat |
Luck, Tony | 865ca08 | 2015-09-15 13:50:18 -0700 | [diff] [blame] | 1774 | data8 sys_userfaultfd |
| 1775 | data8 sys_membarrier |
Émeric MASCHINO | d305c47 | 2015-09-22 23:58:48 +0200 | [diff] [blame] | 1776 | data8 sys_kcmp // 1345 |
Tony Luck | 2780188 | 2015-12-14 10:30:02 -0800 | [diff] [blame] | 1777 | data8 sys_mlock2 |
Tony Luck | 884a12a | 2016-01-22 14:20:01 -0800 | [diff] [blame] | 1778 | data8 sys_copy_file_range |
Tony Luck | 2d5ae5c | 2016-03-25 14:37:32 -0700 | [diff] [blame] | 1779 | data8 sys_preadv2 |
| 1780 | data8 sys_pwritev2 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1781 | |
| 1782 | .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls |