Thomas Gleixner | 08dbd0f | 2019-05-29 07:12:41 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 2 | /* |
| 3 | * Event entry/exit for Hexagon |
| 4 | * |
Richard Kuo | 7c6a5df | 2013-03-28 20:45:40 -0500 | [diff] [blame] | 5 | * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #include <asm/asm-offsets.h> /* assembly-safer versions of C defines */ |
| 9 | #include <asm/mem-layout.h> /* sigh, except for page_offset */ |
| 10 | #include <asm/hexagon_vm.h> |
| 11 | #include <asm/thread_info.h> |
| 12 | |
| 13 | /* |
| 14 | * Entry into guest-mode Linux under Hexagon Virtual Machine. |
| 15 | * Stack pointer points to event record - build pt_regs on top of it, |
| 16 | * set up a plausible C stack frame, and dispatch to the C handler. |
| 17 | * On return, do vmrte virtual instruction with SP where we started. |
| 18 | * |
| 19 | * VM Spec 0.5 uses a trap to fetch HVM record now. |
| 20 | */ |
| 21 | |
| 22 | /* |
| 23 | * Save full register state, while setting up thread_info struct |
| 24 | * pointer derived from kernel stack pointer in THREADINFO_REG |
| 25 | * register, putting prior thread_info.regs pointer in a callee-save |
| 26 | * register (R24, which had better not ever be assigned to THREADINFO_REG), |
| 27 | * and updating thread_info.regs to point to current stack frame, |
| 28 | * so as to support nested events in kernel mode. |
| 29 | * |
| 30 | * As this is common code, we set the pt_regs system call number |
| 31 | * to -1 for all events. It will be replaced with the system call |
| 32 | * number in the case where we decode a system call (trap0(#1)). |
| 33 | */ |
| 34 | |
Richard Kuo | 60c4ba9 | 2012-03-27 17:38:09 -0500 | [diff] [blame] | 35 | #if CONFIG_HEXAGON_ARCH_VERSION < 4 |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 36 | #define save_pt_regs()\ |
Richard Kuo | 60c4ba9 | 2012-03-27 17:38:09 -0500 | [diff] [blame] | 37 | memd(R0 + #_PT_R3130) = R31:30; \ |
| 38 | { memw(R0 + #_PT_R2928) = R28; \ |
| 39 | R31 = memw(R0 + #_PT_ER_VMPSP); }\ |
| 40 | { memw(R0 + #(_PT_R2928 + 4)) = R31; \ |
| 41 | R31 = ugp; } \ |
| 42 | { memd(R0 + #_PT_R2726) = R27:26; \ |
| 43 | R30 = gp ; } \ |
| 44 | memd(R0 + #_PT_R2524) = R25:24; \ |
| 45 | memd(R0 + #_PT_R2322) = R23:22; \ |
| 46 | memd(R0 + #_PT_R2120) = R21:20; \ |
| 47 | memd(R0 + #_PT_R1918) = R19:18; \ |
| 48 | memd(R0 + #_PT_R1716) = R17:16; \ |
| 49 | memd(R0 + #_PT_R1514) = R15:14; \ |
| 50 | memd(R0 + #_PT_R1312) = R13:12; \ |
| 51 | { memd(R0 + #_PT_R1110) = R11:10; \ |
| 52 | R15 = lc0; } \ |
| 53 | { memd(R0 + #_PT_R0908) = R9:8; \ |
| 54 | R14 = sa0; } \ |
| 55 | { memd(R0 + #_PT_R0706) = R7:6; \ |
| 56 | R13 = lc1; } \ |
| 57 | { memd(R0 + #_PT_R0504) = R5:4; \ |
| 58 | R12 = sa1; } \ |
| 59 | { memd(R0 + #_PT_GPUGP) = R31:30; \ |
| 60 | R11 = m1; \ |
| 61 | R2.H = #HI(_THREAD_SIZE); } \ |
| 62 | { memd(R0 + #_PT_LC0SA0) = R15:14; \ |
| 63 | R10 = m0; \ |
| 64 | R2.L = #LO(_THREAD_SIZE); } \ |
| 65 | { memd(R0 + #_PT_LC1SA1) = R13:12; \ |
| 66 | R15 = p3:0; \ |
| 67 | R2 = neg(R2); } \ |
| 68 | { memd(R0 + #_PT_M1M0) = R11:10; \ |
| 69 | R14 = usr; \ |
| 70 | R2 = and(R0,R2); } \ |
| 71 | { memd(R0 + #_PT_PREDSUSR) = R15:14; \ |
| 72 | THREADINFO_REG = R2; } \ |
| 73 | { r24 = memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS); \ |
| 74 | memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R0; \ |
| 75 | R2 = #-1; } \ |
| 76 | { memw(R0 + #_PT_SYSCALL_NR) = R2; \ |
| 77 | R30 = #0; } |
| 78 | #else |
| 79 | /* V4+ */ |
| 80 | /* the # ## # syntax inserts a literal ## */ |
| 81 | #define save_pt_regs()\ |
| 82 | { memd(R0 + #_PT_R3130) = R31:30; \ |
| 83 | R30 = memw(R0 + #_PT_ER_VMPSP); }\ |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 84 | { memw(R0 + #_PT_R2928) = R28; \ |
Richard Kuo | 60c4ba9 | 2012-03-27 17:38:09 -0500 | [diff] [blame] | 85 | memw(R0 + #(_PT_R2928 + 4)) = R30; }\ |
| 86 | { R31:30 = C11:10; \ |
| 87 | memd(R0 + #_PT_R2726) = R27:26; \ |
| 88 | memd(R0 + #_PT_R2524) = R25:24; }\ |
| 89 | { memd(R0 + #_PT_R2322) = R23:22; \ |
| 90 | memd(R0 + #_PT_R2120) = R21:20; }\ |
| 91 | { memd(R0 + #_PT_R1918) = R19:18; \ |
| 92 | memd(R0 + #_PT_R1716) = R17:16; }\ |
| 93 | { memd(R0 + #_PT_R1514) = R15:14; \ |
| 94 | memd(R0 + #_PT_R1312) = R13:12; \ |
| 95 | R17:16 = C13:12; }\ |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 96 | { memd(R0 + #_PT_R1110) = R11:10; \ |
Richard Kuo | 60c4ba9 | 2012-03-27 17:38:09 -0500 | [diff] [blame] | 97 | memd(R0 + #_PT_R0908) = R9:8; \ |
| 98 | R15:14 = C1:0; } \ |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 99 | { memd(R0 + #_PT_R0706) = R7:6; \ |
Richard Kuo | 60c4ba9 | 2012-03-27 17:38:09 -0500 | [diff] [blame] | 100 | memd(R0 + #_PT_R0504) = R5:4; \ |
| 101 | R13:12 = C3:2; } \ |
| 102 | { memd(R0 + #_PT_GPUGP) = R31:30; \ |
| 103 | memd(R0 + #_PT_LC0SA0) = R15:14; \ |
| 104 | R11:10 = C7:6; }\ |
| 105 | { THREADINFO_REG = and(R0, # ## #-_THREAD_SIZE); \ |
| 106 | memd(R0 + #_PT_LC1SA1) = R13:12; \ |
| 107 | R15 = p3:0; }\ |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 108 | { memd(R0 + #_PT_M1M0) = R11:10; \ |
Richard Kuo | 60c4ba9 | 2012-03-27 17:38:09 -0500 | [diff] [blame] | 109 | memw(R0 + #_PT_PREDSUSR + 4) = R15; }\ |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 110 | { r24 = memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS); \ |
| 111 | memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R0; \ |
| 112 | R2 = #-1; } \ |
| 113 | { memw(R0 + #_PT_SYSCALL_NR) = R2; \ |
Richard Kuo | 60c4ba9 | 2012-03-27 17:38:09 -0500 | [diff] [blame] | 114 | memd(R0 + #_PT_CS1CS0) = R17:16; \ |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 115 | R30 = #0; } |
Richard Kuo | 60c4ba9 | 2012-03-27 17:38:09 -0500 | [diff] [blame] | 116 | #endif |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 117 | |
| 118 | /* |
| 119 | * Restore registers and thread_info.regs state. THREADINFO_REG |
| 120 | * is assumed to still be sane, and R24 to have been correctly |
| 121 | * preserved. Don't restore R29 (SP) until later. |
| 122 | */ |
| 123 | |
Richard Kuo | 60c4ba9 | 2012-03-27 17:38:09 -0500 | [diff] [blame] | 124 | #if CONFIG_HEXAGON_ARCH_VERSION < 4 |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 125 | #define restore_pt_regs() \ |
| 126 | { memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R24; \ |
| 127 | R15:14 = memd(R0 + #_PT_PREDSUSR); } \ |
| 128 | { R11:10 = memd(R0 + #_PT_M1M0); \ |
| 129 | p3:0 = R15; } \ |
| 130 | { R13:12 = memd(R0 + #_PT_LC1SA1); \ |
| 131 | usr = R14; } \ |
| 132 | { R15:14 = memd(R0 + #_PT_LC0SA0); \ |
| 133 | m1 = R11; } \ |
| 134 | { R3:2 = memd(R0 + #_PT_R0302); \ |
| 135 | m0 = R10; } \ |
| 136 | { R5:4 = memd(R0 + #_PT_R0504); \ |
| 137 | lc1 = R13; } \ |
| 138 | { R7:6 = memd(R0 + #_PT_R0706); \ |
| 139 | sa1 = R12; } \ |
| 140 | { R9:8 = memd(R0 + #_PT_R0908); \ |
| 141 | lc0 = R15; } \ |
| 142 | { R11:10 = memd(R0 + #_PT_R1110); \ |
| 143 | sa0 = R14; } \ |
| 144 | { R13:12 = memd(R0 + #_PT_R1312); \ |
| 145 | R15:14 = memd(R0 + #_PT_R1514); } \ |
| 146 | { R17:16 = memd(R0 + #_PT_R1716); \ |
| 147 | R19:18 = memd(R0 + #_PT_R1918); } \ |
| 148 | { R21:20 = memd(R0 + #_PT_R2120); \ |
| 149 | R23:22 = memd(R0 + #_PT_R2322); } \ |
| 150 | { R25:24 = memd(R0 + #_PT_R2524); \ |
| 151 | R27:26 = memd(R0 + #_PT_R2726); } \ |
Richard Kuo | 60c4ba9 | 2012-03-27 17:38:09 -0500 | [diff] [blame] | 152 | R31:30 = memd(R0 + #_PT_GPUGP); \ |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 153 | { R28 = memw(R0 + #_PT_R2928); \ |
| 154 | ugp = R31; } \ |
| 155 | { R31:30 = memd(R0 + #_PT_R3130); \ |
| 156 | gp = R30; } |
Richard Kuo | 60c4ba9 | 2012-03-27 17:38:09 -0500 | [diff] [blame] | 157 | #else |
| 158 | /* V4+ */ |
| 159 | #define restore_pt_regs() \ |
| 160 | { memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R24; \ |
| 161 | R15:14 = memd(R0 + #_PT_PREDSUSR); } \ |
| 162 | { R11:10 = memd(R0 + #_PT_M1M0); \ |
| 163 | R13:12 = memd(R0 + #_PT_LC1SA1); \ |
| 164 | p3:0 = R15; } \ |
| 165 | { R15:14 = memd(R0 + #_PT_LC0SA0); \ |
| 166 | R3:2 = memd(R0 + #_PT_R0302); \ |
| 167 | usr = R14; } \ |
| 168 | { R5:4 = memd(R0 + #_PT_R0504); \ |
| 169 | R7:6 = memd(R0 + #_PT_R0706); \ |
| 170 | C7:6 = R11:10; }\ |
| 171 | { R9:8 = memd(R0 + #_PT_R0908); \ |
| 172 | R11:10 = memd(R0 + #_PT_R1110); \ |
| 173 | C3:2 = R13:12; }\ |
| 174 | { R13:12 = memd(R0 + #_PT_R1312); \ |
| 175 | R15:14 = memd(R0 + #_PT_R1514); \ |
| 176 | C1:0 = R15:14; }\ |
| 177 | { R17:16 = memd(R0 + #_PT_R1716); \ |
| 178 | R19:18 = memd(R0 + #_PT_R1918); } \ |
| 179 | { R21:20 = memd(R0 + #_PT_R2120); \ |
| 180 | R23:22 = memd(R0 + #_PT_R2322); } \ |
| 181 | { R25:24 = memd(R0 + #_PT_R2524); \ |
| 182 | R27:26 = memd(R0 + #_PT_R2726); } \ |
| 183 | R31:30 = memd(R0 + #_PT_CS1CS0); \ |
| 184 | { C13:12 = R31:30; \ |
| 185 | R31:30 = memd(R0 + #_PT_GPUGP) ; \ |
| 186 | R28 = memw(R0 + #_PT_R2928); }\ |
| 187 | { C11:10 = R31:30; \ |
| 188 | R31:30 = memd(R0 + #_PT_R3130); } |
| 189 | #endif |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 190 | |
| 191 | /* |
| 192 | * Clears off enough space for the rest of pt_regs; evrec is a part |
| 193 | * of pt_regs in HVM mode. Save R0/R1, set handler's address in R1. |
| 194 | * R0 is the address of pt_regs and is the parameter to save_pt_regs. |
| 195 | */ |
| 196 | |
| 197 | /* |
| 198 | * Since the HVM isn't automagically pushing the EVREC onto the stack anymore, |
| 199 | * we'll subract the entire size out and then fill it in ourselves. |
| 200 | * Need to save off R0, R1, R2, R3 immediately. |
| 201 | */ |
| 202 | |
Richard Kuo | 60c4ba9 | 2012-03-27 17:38:09 -0500 | [diff] [blame] | 203 | #if CONFIG_HEXAGON_ARCH_VERSION < 4 |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 204 | #define vm_event_entry(CHandler) \ |
| 205 | { \ |
| 206 | R29 = add(R29, #-(_PT_REGS_SIZE)); \ |
| 207 | memd(R29 + #(_PT_R0100 + -_PT_REGS_SIZE)) = R1:0; \ |
| 208 | } \ |
| 209 | { \ |
| 210 | memd(R29 +#_PT_R0302) = R3:2; \ |
| 211 | } \ |
| 212 | trap1(#HVM_TRAP1_VMGETREGS); \ |
| 213 | { \ |
| 214 | memd(R29 + #_PT_ER_VMEL) = R1:0; \ |
| 215 | R0 = R29; \ |
| 216 | R1.L = #LO(CHandler); \ |
| 217 | } \ |
| 218 | { \ |
| 219 | memd(R29 + #_PT_ER_VMPSP) = R3:2; \ |
| 220 | R1.H = #HI(CHandler); \ |
| 221 | jump event_dispatch; \ |
| 222 | } |
Richard Kuo | 60c4ba9 | 2012-03-27 17:38:09 -0500 | [diff] [blame] | 223 | #else |
| 224 | /* V4+ */ |
| 225 | /* turn on I$ prefetch early */ |
| 226 | /* the # ## # syntax inserts a literal ## */ |
| 227 | #define vm_event_entry(CHandler) \ |
| 228 | { \ |
| 229 | R29 = add(R29, #-(_PT_REGS_SIZE)); \ |
| 230 | memd(R29 + #(_PT_R0100 + -_PT_REGS_SIZE)) = R1:0; \ |
| 231 | memd(R29 + #(_PT_R0302 + -_PT_REGS_SIZE)) = R3:2; \ |
| 232 | R0 = usr; \ |
| 233 | } \ |
| 234 | { \ |
| 235 | memw(R29 + #_PT_PREDSUSR) = R0; \ |
| 236 | R0 = setbit(R0, #16); \ |
| 237 | } \ |
| 238 | usr = R0; \ |
| 239 | R1:0 = G1:0; \ |
| 240 | { \ |
| 241 | memd(R29 + #_PT_ER_VMEL) = R1:0; \ |
| 242 | R1 = # ## #(CHandler); \ |
| 243 | R3:2 = G3:2; \ |
| 244 | } \ |
| 245 | { \ |
| 246 | R0 = R29; \ |
| 247 | memd(R29 + #_PT_ER_VMPSP) = R3:2; \ |
| 248 | jump event_dispatch; \ |
| 249 | } |
| 250 | #endif |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 251 | |
| 252 | .text |
| 253 | /* |
| 254 | * Do bulk save/restore in one place. |
| 255 | * Adds a jump to dispatch latency, but |
| 256 | * saves hundreds of bytes. |
| 257 | */ |
| 258 | |
| 259 | event_dispatch: |
| 260 | save_pt_regs() |
| 261 | callr r1 |
| 262 | |
| 263 | /* |
Richard Kuo | a11e67c | 2012-05-29 17:23:14 -0500 | [diff] [blame] | 264 | * Coming back from the C-world, our thread info pointer |
| 265 | * should be in the designated register (usually R19) |
| 266 | * |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 267 | * If we were in kernel mode, we don't need to check scheduler |
Thomas Gleixner | 143cd41 | 2019-10-15 21:17:55 +0200 | [diff] [blame] | 268 | * or signals if CONFIG_PREEMPTION is not set. If set, then it has |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 269 | * to jump to a need_resched kind of block. |
Thomas Gleixner | 143cd41 | 2019-10-15 21:17:55 +0200 | [diff] [blame] | 270 | * BTW, CONFIG_PREEMPTION is not supported yet. |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 271 | */ |
| 272 | |
Thomas Gleixner | 143cd41 | 2019-10-15 21:17:55 +0200 | [diff] [blame] | 273 | #ifdef CONFIG_PREEMPTION |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 274 | R0 = #VM_INT_DISABLE |
| 275 | trap1(#HVM_TRAP1_VMSETIE) |
| 276 | #endif |
| 277 | |
| 278 | /* "Nested control path" -- if the previous mode was kernel */ |
Richard Kuo | a11e67c | 2012-05-29 17:23:14 -0500 | [diff] [blame] | 279 | { |
| 280 | R0 = memw(R29 + #_PT_ER_VMEST); |
Richard Kuo | 13a95c4 | 2013-05-03 17:04:46 -0500 | [diff] [blame] | 281 | R26.L = #LO(do_work_pending); |
Richard Kuo | a11e67c | 2012-05-29 17:23:14 -0500 | [diff] [blame] | 282 | } |
Richard Kuo | 60c4ba9 | 2012-03-27 17:38:09 -0500 | [diff] [blame] | 283 | { |
| 284 | P0 = tstbit(R0, #HVM_VMEST_UM_SFT); |
| 285 | if (!P0.new) jump:nt restore_all; |
Richard Kuo | 13a95c4 | 2013-05-03 17:04:46 -0500 | [diff] [blame] | 286 | R26.H = #HI(do_work_pending); |
Richard Kuo | a11e67c | 2012-05-29 17:23:14 -0500 | [diff] [blame] | 287 | R0 = #VM_INT_DISABLE; |
Richard Kuo | 60c4ba9 | 2012-03-27 17:38:09 -0500 | [diff] [blame] | 288 | } |
Richard Kuo | a11e67c | 2012-05-29 17:23:14 -0500 | [diff] [blame] | 289 | |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 290 | /* |
Richard Kuo | a11e67c | 2012-05-29 17:23:14 -0500 | [diff] [blame] | 291 | * Check also the return from fork/system call, normally coming back from |
| 292 | * user mode |
| 293 | * |
Richard Kuo | 13a95c4 | 2013-05-03 17:04:46 -0500 | [diff] [blame] | 294 | * R26 needs to have do_work_pending, and R0 should have VM_INT_DISABLE |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 295 | */ |
Richard Kuo | a11e67c | 2012-05-29 17:23:14 -0500 | [diff] [blame] | 296 | |
| 297 | check_work_pending: |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 298 | /* Disable interrupts while checking TIF */ |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 299 | trap1(#HVM_TRAP1_VMSETIE) |
Richard Kuo | a11e67c | 2012-05-29 17:23:14 -0500 | [diff] [blame] | 300 | { |
| 301 | R0 = R29; /* regs should still be at top of stack */ |
| 302 | R1 = memw(THREADINFO_REG + #_THREAD_INFO_FLAGS); |
Richard Kuo | 13a95c4 | 2013-05-03 17:04:46 -0500 | [diff] [blame] | 303 | callr R26; |
Richard Kuo | a11e67c | 2012-05-29 17:23:14 -0500 | [diff] [blame] | 304 | } |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 305 | |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 306 | { |
Richard Kuo | a11e67c | 2012-05-29 17:23:14 -0500 | [diff] [blame] | 307 | P0 = cmp.eq(R0, #0); if (!P0.new) jump:nt check_work_pending; |
| 308 | R0 = #VM_INT_DISABLE; |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 309 | } |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 310 | |
| 311 | restore_all: |
Richard Kuo | a11e67c | 2012-05-29 17:23:14 -0500 | [diff] [blame] | 312 | /* |
| 313 | * Disable interrupts, if they weren't already, before reg restore. |
| 314 | * R0 gets preloaded with #VM_INT_DISABLE before we get here. |
| 315 | */ |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 316 | trap1(#HVM_TRAP1_VMSETIE) |
| 317 | |
| 318 | /* do the setregs here for VM 0.5 */ |
| 319 | /* R29 here should already be pointing at pt_regs */ |
Richard Kuo | 60c4ba9 | 2012-03-27 17:38:09 -0500 | [diff] [blame] | 320 | { |
| 321 | R1:0 = memd(R29 + #_PT_ER_VMEL); |
| 322 | R3:2 = memd(R29 + #_PT_ER_VMPSP); |
| 323 | } |
| 324 | #if CONFIG_HEXAGON_ARCH_VERSION < 4 |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 325 | trap1(#HVM_TRAP1_VMSETREGS); |
Richard Kuo | 60c4ba9 | 2012-03-27 17:38:09 -0500 | [diff] [blame] | 326 | #else |
| 327 | G1:0 = R1:0; |
| 328 | G3:2 = R3:2; |
| 329 | #endif |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 330 | |
| 331 | R0 = R29 |
| 332 | restore_pt_regs() |
Richard Kuo | 60c4ba9 | 2012-03-27 17:38:09 -0500 | [diff] [blame] | 333 | { |
| 334 | R1:0 = memd(R29 + #_PT_R0100); |
| 335 | R29 = add(R29, #_PT_REGS_SIZE); |
| 336 | } |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 337 | trap1(#HVM_TRAP1_VMRTE) |
| 338 | /* Notreached */ |
| 339 | |
Richard Kuo | a11e67c | 2012-05-29 17:23:14 -0500 | [diff] [blame] | 340 | |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 341 | .globl _K_enter_genex |
| 342 | _K_enter_genex: |
| 343 | vm_event_entry(do_genex) |
| 344 | |
| 345 | .globl _K_enter_interrupt |
| 346 | _K_enter_interrupt: |
| 347 | vm_event_entry(arch_do_IRQ) |
| 348 | |
| 349 | .globl _K_enter_trap0 |
| 350 | _K_enter_trap0: |
| 351 | vm_event_entry(do_trap0) |
| 352 | |
| 353 | .globl _K_enter_machcheck |
| 354 | _K_enter_machcheck: |
| 355 | vm_event_entry(do_machcheck) |
| 356 | |
Richard Kuo | 7777746 | 2013-03-07 12:03:10 -0600 | [diff] [blame] | 357 | .globl _K_enter_debug |
| 358 | _K_enter_debug: |
| 359 | vm_event_entry(do_debug_exception) |
Richard Kuo | e49ee29 | 2011-10-31 18:39:14 -0500 | [diff] [blame] | 360 | |
| 361 | .globl ret_from_fork |
| 362 | ret_from_fork: |
Richard Kuo | a11e67c | 2012-05-29 17:23:14 -0500 | [diff] [blame] | 363 | { |
Richard Kuo | 3981c47 | 2012-10-23 18:26:01 -0500 | [diff] [blame] | 364 | call schedule_tail |
Richard Kuo | 13a95c4 | 2013-05-03 17:04:46 -0500 | [diff] [blame] | 365 | R26.H = #HI(do_work_pending); |
Richard Kuo | a11e67c | 2012-05-29 17:23:14 -0500 | [diff] [blame] | 366 | } |
| 367 | { |
Richard Kuo | 3981c47 | 2012-10-23 18:26:01 -0500 | [diff] [blame] | 368 | P0 = cmp.eq(R24, #0); |
Richard Kuo | 13a95c4 | 2013-05-03 17:04:46 -0500 | [diff] [blame] | 369 | R26.L = #LO(do_work_pending); |
Richard Kuo | a11e67c | 2012-05-29 17:23:14 -0500 | [diff] [blame] | 370 | R0 = #VM_INT_DISABLE; |
Richard Kuo | 3981c47 | 2012-10-23 18:26:01 -0500 | [diff] [blame] | 371 | } |
Nick Desaulniers | 780a0cf | 2020-01-04 12:59:59 -0800 | [diff] [blame] | 372 | if (P0) jump check_work_pending |
Richard Kuo | 3981c47 | 2012-10-23 18:26:01 -0500 | [diff] [blame] | 373 | { |
| 374 | R0 = R25; |
| 375 | callr R24 |
| 376 | } |
| 377 | { |
| 378 | jump check_work_pending |
| 379 | R0 = #VM_INT_DISABLE; |
Richard Kuo | a11e67c | 2012-05-29 17:23:14 -0500 | [diff] [blame] | 380 | } |