Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 17 | #ifndef ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_ |
| 18 | #define ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_ |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 19 | |
| 20 | #include "invoke_type.h" |
| 21 | #include "compiled_method.h" |
| 22 | #include "dex/compiler_enums.h" |
| 23 | #include "dex/compiler_ir.h" |
| 24 | #include "dex/backend.h" |
| 25 | #include "dex/growable_array.h" |
| 26 | #include "dex/arena_allocator.h" |
| 27 | #include "driver/compiler_driver.h" |
| 28 | #include "safe_map.h" |
| 29 | |
| 30 | namespace art { |
| 31 | |
| 32 | // Set to 1 to measure cost of suspend check. |
| 33 | #define NO_SUSPEND 0 |
| 34 | |
| 35 | #define IS_BINARY_OP (1ULL << kIsBinaryOp) |
| 36 | #define IS_BRANCH (1ULL << kIsBranch) |
| 37 | #define IS_IT (1ULL << kIsIT) |
| 38 | #define IS_LOAD (1ULL << kMemLoad) |
| 39 | #define IS_QUAD_OP (1ULL << kIsQuadOp) |
| 40 | #define IS_QUIN_OP (1ULL << kIsQuinOp) |
| 41 | #define IS_SEXTUPLE_OP (1ULL << kIsSextupleOp) |
| 42 | #define IS_STORE (1ULL << kMemStore) |
| 43 | #define IS_TERTIARY_OP (1ULL << kIsTertiaryOp) |
| 44 | #define IS_UNARY_OP (1ULL << kIsUnaryOp) |
| 45 | #define NEEDS_FIXUP (1ULL << kPCRelFixup) |
| 46 | #define NO_OPERAND (1ULL << kNoOperand) |
| 47 | #define REG_DEF0 (1ULL << kRegDef0) |
| 48 | #define REG_DEF1 (1ULL << kRegDef1) |
| 49 | #define REG_DEFA (1ULL << kRegDefA) |
| 50 | #define REG_DEFD (1ULL << kRegDefD) |
| 51 | #define REG_DEF_FPCS_LIST0 (1ULL << kRegDefFPCSList0) |
| 52 | #define REG_DEF_FPCS_LIST2 (1ULL << kRegDefFPCSList2) |
| 53 | #define REG_DEF_LIST0 (1ULL << kRegDefList0) |
| 54 | #define REG_DEF_LIST1 (1ULL << kRegDefList1) |
| 55 | #define REG_DEF_LR (1ULL << kRegDefLR) |
| 56 | #define REG_DEF_SP (1ULL << kRegDefSP) |
| 57 | #define REG_USE0 (1ULL << kRegUse0) |
| 58 | #define REG_USE1 (1ULL << kRegUse1) |
| 59 | #define REG_USE2 (1ULL << kRegUse2) |
| 60 | #define REG_USE3 (1ULL << kRegUse3) |
| 61 | #define REG_USE4 (1ULL << kRegUse4) |
| 62 | #define REG_USEA (1ULL << kRegUseA) |
| 63 | #define REG_USEC (1ULL << kRegUseC) |
| 64 | #define REG_USED (1ULL << kRegUseD) |
| 65 | #define REG_USE_FPCS_LIST0 (1ULL << kRegUseFPCSList0) |
| 66 | #define REG_USE_FPCS_LIST2 (1ULL << kRegUseFPCSList2) |
| 67 | #define REG_USE_LIST0 (1ULL << kRegUseList0) |
| 68 | #define REG_USE_LIST1 (1ULL << kRegUseList1) |
| 69 | #define REG_USE_LR (1ULL << kRegUseLR) |
| 70 | #define REG_USE_PC (1ULL << kRegUsePC) |
| 71 | #define REG_USE_SP (1ULL << kRegUseSP) |
| 72 | #define SETS_CCODES (1ULL << kSetsCCodes) |
| 73 | #define USES_CCODES (1ULL << kUsesCCodes) |
| 74 | |
| 75 | // Common combo register usage patterns. |
| 76 | #define REG_DEF01 (REG_DEF0 | REG_DEF1) |
| 77 | #define REG_DEF01_USE2 (REG_DEF0 | REG_DEF1 | REG_USE2) |
| 78 | #define REG_DEF0_USE01 (REG_DEF0 | REG_USE01) |
| 79 | #define REG_DEF0_USE0 (REG_DEF0 | REG_USE0) |
| 80 | #define REG_DEF0_USE12 (REG_DEF0 | REG_USE12) |
| 81 | #define REG_DEF0_USE1 (REG_DEF0 | REG_USE1) |
| 82 | #define REG_DEF0_USE2 (REG_DEF0 | REG_USE2) |
| 83 | #define REG_DEFAD_USEAD (REG_DEFAD_USEA | REG_USED) |
| 84 | #define REG_DEFAD_USEA (REG_DEFA_USEA | REG_DEFD) |
| 85 | #define REG_DEFA_USEA (REG_DEFA | REG_USEA) |
| 86 | #define REG_USE012 (REG_USE01 | REG_USE2) |
| 87 | #define REG_USE014 (REG_USE01 | REG_USE4) |
| 88 | #define REG_USE01 (REG_USE0 | REG_USE1) |
| 89 | #define REG_USE02 (REG_USE0 | REG_USE2) |
| 90 | #define REG_USE12 (REG_USE1 | REG_USE2) |
| 91 | #define REG_USE23 (REG_USE2 | REG_USE3) |
| 92 | |
| 93 | struct BasicBlock; |
| 94 | struct CallInfo; |
| 95 | struct CompilationUnit; |
| 96 | struct MIR; |
| 97 | struct RegLocation; |
| 98 | struct RegisterInfo; |
| 99 | class MIRGraph; |
| 100 | class Mir2Lir; |
| 101 | |
| 102 | typedef int (*NextCallInsn)(CompilationUnit*, CallInfo*, int, |
| 103 | const MethodReference& target_method, |
| 104 | uint32_t method_idx, uintptr_t direct_code, |
| 105 | uintptr_t direct_method, InvokeType type); |
| 106 | |
| 107 | typedef std::vector<uint8_t> CodeBuffer; |
| 108 | |
| 109 | |
| 110 | struct LIR { |
| 111 | int offset; // Offset of this instruction. |
| 112 | int dalvik_offset; // Offset of Dalvik opcode. |
| 113 | LIR* next; |
| 114 | LIR* prev; |
| 115 | LIR* target; |
| 116 | int opcode; |
| 117 | int operands[5]; // [0..4] = [dest, src1, src2, extra, extra2]. |
| 118 | struct { |
| 119 | bool is_nop:1; // LIR is optimized away. |
| 120 | bool pcRelFixup:1; // May need pc-relative fixup. |
| 121 | unsigned int size:5; // Note: size is in bytes. |
| 122 | unsigned int unused:25; |
| 123 | } flags; |
| 124 | int alias_info; // For Dalvik register & litpool disambiguation. |
| 125 | uint64_t use_mask; // Resource mask for use. |
| 126 | uint64_t def_mask; // Resource mask for def. |
| 127 | }; |
| 128 | |
| 129 | // Target-specific initialization. |
| 130 | Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, |
| 131 | ArenaAllocator* const arena); |
| 132 | Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, |
| 133 | ArenaAllocator* const arena); |
| 134 | Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, |
| 135 | ArenaAllocator* const arena); |
| 136 | |
| 137 | // Utility macros to traverse the LIR list. |
| 138 | #define NEXT_LIR(lir) (lir->next) |
| 139 | #define PREV_LIR(lir) (lir->prev) |
| 140 | |
| 141 | // Defines for alias_info (tracks Dalvik register references). |
| 142 | #define DECODE_ALIAS_INFO_REG(X) (X & 0xffff) |
| 143 | #define DECODE_ALIAS_INFO_WIDE_FLAG (0x80000000) |
| 144 | #define DECODE_ALIAS_INFO_WIDE(X) ((X & DECODE_ALIAS_INFO_WIDE_FLAG) ? 1 : 0) |
| 145 | #define ENCODE_ALIAS_INFO(REG, ISWIDE) (REG | (ISWIDE ? DECODE_ALIAS_INFO_WIDE_FLAG : 0)) |
| 146 | |
| 147 | // Common resource macros. |
| 148 | #define ENCODE_CCODE (1ULL << kCCode) |
| 149 | #define ENCODE_FP_STATUS (1ULL << kFPStatus) |
| 150 | |
| 151 | // Abstract memory locations. |
| 152 | #define ENCODE_DALVIK_REG (1ULL << kDalvikReg) |
| 153 | #define ENCODE_LITERAL (1ULL << kLiteral) |
| 154 | #define ENCODE_HEAP_REF (1ULL << kHeapRef) |
| 155 | #define ENCODE_MUST_NOT_ALIAS (1ULL << kMustNotAlias) |
| 156 | |
| 157 | #define ENCODE_ALL (~0ULL) |
| 158 | #define ENCODE_MEM (ENCODE_DALVIK_REG | ENCODE_LITERAL | \ |
| 159 | ENCODE_HEAP_REF | ENCODE_MUST_NOT_ALIAS) |
| 160 | //TODO: replace these macros |
| 161 | #define SLOW_FIELD_PATH (cu_->enable_debug & (1 << kDebugSlowFieldPath)) |
| 162 | #define SLOW_INVOKE_PATH (cu_->enable_debug & (1 << kDebugSlowInvokePath)) |
| 163 | #define SLOW_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowStringPath)) |
| 164 | #define SLOW_TYPE_PATH (cu_->enable_debug & (1 << kDebugSlowTypePath)) |
| 165 | #define EXERCISE_SLOWEST_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowestStringPath)) |
| 166 | #define is_pseudo_opcode(opcode) (static_cast<int>(opcode) < 0) |
| 167 | |
| 168 | class Mir2Lir : public Backend { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 169 | public: |
| 170 | struct SwitchTable { |
| 171 | int offset; |
| 172 | const uint16_t* table; // Original dex table. |
| 173 | int vaddr; // Dalvik offset of switch opcode. |
| 174 | LIR* anchor; // Reference instruction for relative offsets. |
| 175 | LIR** targets; // Array of case targets. |
| 176 | }; |
| 177 | |
| 178 | struct FillArrayData { |
| 179 | int offset; |
| 180 | const uint16_t* table; // Original dex table. |
| 181 | int size; |
| 182 | int vaddr; // Dalvik offset of FILL_ARRAY_DATA opcode. |
| 183 | }; |
| 184 | |
| 185 | /* Static register use counts */ |
| 186 | struct RefCounts { |
| 187 | int count; |
| 188 | int s_reg; |
| 189 | bool double_start; // Starting v_reg for a double |
| 190 | }; |
| 191 | |
| 192 | /* |
| 193 | * Data structure tracking the mapping between a Dalvik register (pair) and a |
| 194 | * native register (pair). The idea is to reuse the previously loaded value |
| 195 | * if possible, otherwise to keep the value in a native register as long as |
| 196 | * possible. |
| 197 | */ |
| 198 | struct RegisterInfo { |
| 199 | int reg; // Reg number |
| 200 | bool in_use; // Has it been allocated? |
| 201 | bool is_temp; // Can allocate as temp? |
| 202 | bool pair; // Part of a register pair? |
| 203 | int partner; // If pair, other reg of pair. |
| 204 | bool live; // Is there an associated SSA name? |
| 205 | bool dirty; // If live, is it dirty? |
| 206 | int s_reg; // Name of live value. |
| 207 | LIR *def_start; // Starting inst in last def sequence. |
| 208 | LIR *def_end; // Ending inst in last def sequence. |
| 209 | }; |
| 210 | |
| 211 | struct RegisterPool { |
| 212 | int num_core_regs; |
| 213 | RegisterInfo *core_regs; |
| 214 | int next_core_reg; |
| 215 | int num_fp_regs; |
| 216 | RegisterInfo *FPRegs; |
| 217 | int next_fp_reg; |
| 218 | }; |
| 219 | |
| 220 | struct PromotionMap { |
| 221 | RegLocationType core_location:3; |
| 222 | uint8_t core_reg; |
| 223 | RegLocationType fp_location:3; |
| 224 | uint8_t FpReg; |
| 225 | bool first_in_pair; |
| 226 | }; |
| 227 | |
Brian Carlstrom | 2ce745c | 2013-07-17 17:44:30 -0700 | [diff] [blame] | 228 | virtual ~Mir2Lir() {}; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 229 | |
| 230 | int32_t s4FromSwitchData(const void* switch_data) { |
| 231 | return *reinterpret_cast<const int32_t*>(switch_data); |
| 232 | } |
| 233 | |
| 234 | RegisterClass oat_reg_class_by_size(OpSize size) { |
| 235 | return (size == kUnsignedHalf || size == kSignedHalf || size == kUnsignedByte || |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 236 | size == kSignedByte) ? kCoreReg : kAnyReg; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 237 | } |
| 238 | |
| 239 | size_t CodeBufferSizeInBytes() { |
| 240 | return code_buffer_.size() / sizeof(code_buffer_[0]); |
| 241 | } |
| 242 | |
| 243 | // Shared by all targets - implemented in codegen_util.cc |
| 244 | void AppendLIR(LIR* lir); |
| 245 | void InsertLIRBefore(LIR* current_lir, LIR* new_lir); |
| 246 | void InsertLIRAfter(LIR* current_lir, LIR* new_lir); |
| 247 | |
| 248 | int ComputeFrameSize(); |
| 249 | virtual void Materialize(); |
| 250 | virtual CompiledMethod* GetCompiledMethod(); |
| 251 | void MarkSafepointPC(LIR* inst); |
| 252 | bool FastInstance(uint32_t field_idx, int& field_offset, bool& is_volatile, bool is_put); |
| 253 | void SetupResourceMasks(LIR* lir); |
| 254 | void AssembleLIR(); |
| 255 | void SetMemRefType(LIR* lir, bool is_load, int mem_type); |
| 256 | void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit); |
| 257 | void SetupRegMask(uint64_t* mask, int reg); |
| 258 | void DumpLIRInsn(LIR* arg, unsigned char* base_addr); |
| 259 | void DumpPromotionMap(); |
| 260 | void CodegenDump(); |
| 261 | LIR* RawLIR(int dalvik_offset, int opcode, int op0 = 0, int op1 = 0, |
| 262 | int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL); |
| 263 | LIR* NewLIR0(int opcode); |
| 264 | LIR* NewLIR1(int opcode, int dest); |
| 265 | LIR* NewLIR2(int opcode, int dest, int src1); |
| 266 | LIR* NewLIR3(int opcode, int dest, int src1, int src2); |
| 267 | LIR* NewLIR4(int opcode, int dest, int src1, int src2, int info); |
| 268 | LIR* NewLIR5(int opcode, int dest, int src1, int src2, int info1, int info2); |
| 269 | LIR* ScanLiteralPool(LIR* data_target, int value, unsigned int delta); |
| 270 | LIR* ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi); |
| 271 | LIR* AddWordData(LIR* *constant_list_p, int value); |
| 272 | LIR* AddWideData(LIR* *constant_list_p, int val_lo, int val_hi); |
| 273 | void ProcessSwitchTables(); |
| 274 | void DumpSparseSwitchTable(const uint16_t* table); |
| 275 | void DumpPackedSwitchTable(const uint16_t* table); |
| 276 | LIR* MarkBoundary(int offset, const char* inst_str); |
| 277 | void NopLIR(LIR* lir); |
| 278 | bool EvaluateBranch(Instruction::Code opcode, int src1, int src2); |
| 279 | bool IsInexpensiveConstant(RegLocation rl_src); |
| 280 | ConditionCode FlipComparisonOrder(ConditionCode before); |
| 281 | void DumpMappingTable(const char* table_name, const std::string& descriptor, |
| 282 | const std::string& name, const std::string& signature, |
| 283 | const std::vector<uint32_t>& v); |
| 284 | void InstallLiteralPools(); |
| 285 | void InstallSwitchTables(); |
| 286 | void InstallFillArrayData(); |
| 287 | bool VerifyCatchEntries(); |
| 288 | void CreateMappingTables(); |
| 289 | void CreateNativeGcMap(); |
| 290 | int AssignLiteralOffset(int offset); |
| 291 | int AssignSwitchTablesOffset(int offset); |
| 292 | int AssignFillArrayDataOffset(int offset); |
| 293 | int AssignInsnOffsets(); |
| 294 | void AssignOffsets(); |
| 295 | LIR* InsertCaseLabel(int vaddr, int keyVal); |
| 296 | void MarkPackedCaseLabels(Mir2Lir::SwitchTable *tab_rec); |
| 297 | void MarkSparseCaseLabels(Mir2Lir::SwitchTable *tab_rec); |
| 298 | |
| 299 | // Shared by all targets - implemented in local_optimizations.cc |
| 300 | void ConvertMemOpIntoMove(LIR* orig_lir, int dest, int src); |
| 301 | void ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir); |
| 302 | void ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir); |
| 303 | void ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir); |
| 304 | void RemoveRedundantBranches(); |
| 305 | |
| 306 | // Shared by all targets - implemented in ralloc_util.cc |
| 307 | int GetSRegHi(int lowSreg); |
| 308 | bool oat_live_out(int s_reg); |
| 309 | int oatSSASrc(MIR* mir, int num); |
| 310 | void SimpleRegAlloc(); |
| 311 | void ResetRegPool(); |
| 312 | void CompilerInitPool(RegisterInfo* regs, int* reg_nums, int num); |
| 313 | void DumpRegPool(RegisterInfo* p, int num_regs); |
| 314 | void DumpCoreRegPool(); |
| 315 | void DumpFpRegPool(); |
| 316 | /* Mark a temp register as dead. Does not affect allocation state. */ |
| 317 | void Clobber(int reg) { |
| 318 | ClobberBody(GetRegInfo(reg)); |
| 319 | } |
| 320 | void ClobberSRegBody(RegisterInfo* p, int num_regs, int s_reg); |
| 321 | void ClobberSReg(int s_reg); |
| 322 | int SRegToPMap(int s_reg); |
| 323 | void RecordCorePromotion(int reg, int s_reg); |
| 324 | int AllocPreservedCoreReg(int s_reg); |
| 325 | void RecordFpPromotion(int reg, int s_reg); |
| 326 | int AllocPreservedSingle(int s_reg, bool even); |
| 327 | int AllocPreservedDouble(int s_reg); |
| 328 | int AllocPreservedFPReg(int s_reg, bool double_start); |
| 329 | int AllocTempBody(RegisterInfo* p, int num_regs, int* next_temp, |
| 330 | bool required); |
| 331 | int AllocTempDouble(); |
| 332 | int AllocFreeTemp(); |
| 333 | int AllocTemp(); |
| 334 | int AllocTempFloat(); |
| 335 | RegisterInfo* AllocLiveBody(RegisterInfo* p, int num_regs, int s_reg); |
| 336 | RegisterInfo* AllocLive(int s_reg, int reg_class); |
| 337 | void FreeTemp(int reg); |
| 338 | RegisterInfo* IsLive(int reg); |
| 339 | RegisterInfo* IsTemp(int reg); |
| 340 | RegisterInfo* IsPromoted(int reg); |
| 341 | bool IsDirty(int reg); |
| 342 | void LockTemp(int reg); |
| 343 | void ResetDef(int reg); |
| 344 | void NullifyRange(LIR *start, LIR *finish, int s_reg1, int s_reg2); |
| 345 | void MarkDef(RegLocation rl, LIR *start, LIR *finish); |
| 346 | void MarkDefWide(RegLocation rl, LIR *start, LIR *finish); |
| 347 | RegLocation WideToNarrow(RegLocation rl); |
| 348 | void ResetDefLoc(RegLocation rl); |
| 349 | void ResetDefLocWide(RegLocation rl); |
| 350 | void ResetDefTracking(); |
| 351 | void ClobberAllRegs(); |
| 352 | void FlushAllRegsBody(RegisterInfo* info, int num_regs); |
| 353 | void FlushAllRegs(); |
| 354 | bool RegClassMatches(int reg_class, int reg); |
| 355 | void MarkLive(int reg, int s_reg); |
| 356 | void MarkTemp(int reg); |
| 357 | void UnmarkTemp(int reg); |
| 358 | void MarkPair(int low_reg, int high_reg); |
| 359 | void MarkClean(RegLocation loc); |
| 360 | void MarkDirty(RegLocation loc); |
| 361 | void MarkInUse(int reg); |
| 362 | void CopyRegInfo(int new_reg, int old_reg); |
| 363 | bool CheckCorePoolSanity(); |
| 364 | RegLocation UpdateLoc(RegLocation loc); |
| 365 | RegLocation UpdateLocWide(RegLocation loc); |
| 366 | RegLocation UpdateRawLoc(RegLocation loc); |
| 367 | RegLocation EvalLocWide(RegLocation loc, int reg_class, bool update); |
| 368 | RegLocation EvalLoc(RegLocation loc, int reg_class, bool update); |
| 369 | void CountRefs(RefCounts* core_counts, RefCounts* fp_counts); |
| 370 | void DumpCounts(const RefCounts* arr, int size, const char* msg); |
| 371 | void DoPromotion(); |
| 372 | int VRegOffset(int v_reg); |
| 373 | int SRegOffset(int s_reg); |
| 374 | RegLocation GetReturnWide(bool is_double); |
| 375 | RegLocation GetReturn(bool is_float); |
| 376 | |
| 377 | // Shared by all targets - implemented in gen_common.cc. |
| 378 | bool HandleEasyDivide(Instruction::Code dalvik_opcode, |
| 379 | RegLocation rl_src, RegLocation rl_dest, int lit); |
| 380 | bool HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit); |
| 381 | void HandleSuspendLaunchPads(); |
| 382 | void HandleIntrinsicLaunchPads(); |
| 383 | void HandleThrowLaunchPads(); |
| 384 | void GenBarrier(); |
| 385 | LIR* GenCheck(ConditionCode c_code, ThrowKind kind); |
| 386 | LIR* GenImmedCheck(ConditionCode c_code, int reg, int imm_val, |
| 387 | ThrowKind kind); |
| 388 | LIR* GenNullCheck(int s_reg, int m_reg, int opt_flags); |
| 389 | LIR* GenRegRegCheck(ConditionCode c_code, int reg1, int reg2, |
| 390 | ThrowKind kind); |
| 391 | void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, |
| 392 | RegLocation rl_src2, LIR* taken, LIR* fall_through); |
| 393 | void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, |
| 394 | LIR* taken, LIR* fall_through); |
| 395 | void GenIntToLong(RegLocation rl_dest, RegLocation rl_src); |
| 396 | void GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, |
| 397 | RegLocation rl_src); |
| 398 | void GenNewArray(uint32_t type_idx, RegLocation rl_dest, |
| 399 | RegLocation rl_src); |
| 400 | void GenFilledNewArray(CallInfo* info); |
| 401 | void GenSput(uint32_t field_idx, RegLocation rl_src, |
| 402 | bool is_long_or_double, bool is_object); |
| 403 | void GenSget(uint32_t field_idx, RegLocation rl_dest, |
| 404 | bool is_long_or_double, bool is_object); |
| 405 | void GenIGet(uint32_t field_idx, int opt_flags, OpSize size, |
| 406 | RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object); |
| 407 | void GenIPut(uint32_t field_idx, int opt_flags, OpSize size, |
| 408 | RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object); |
| 409 | void GenConstClass(uint32_t type_idx, RegLocation rl_dest); |
| 410 | void GenConstString(uint32_t string_idx, RegLocation rl_dest); |
| 411 | void GenNewInstance(uint32_t type_idx, RegLocation rl_dest); |
| 412 | void GenThrow(RegLocation rl_src); |
| 413 | void GenInstanceof(uint32_t type_idx, RegLocation rl_dest, |
| 414 | RegLocation rl_src); |
| 415 | void GenCheckCast(uint32_t insn_idx, uint32_t type_idx, |
| 416 | RegLocation rl_src); |
| 417 | void GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest, |
| 418 | RegLocation rl_src1, RegLocation rl_src2); |
| 419 | void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, |
| 420 | RegLocation rl_src1, RegLocation rl_shift); |
| 421 | void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, |
| 422 | RegLocation rl_src1, RegLocation rl_src2); |
| 423 | void GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, |
| 424 | RegLocation rl_src, int lit); |
| 425 | void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, |
| 426 | RegLocation rl_src1, RegLocation rl_src2); |
| 427 | void GenConversionCall(int func_offset, RegLocation rl_dest, |
| 428 | RegLocation rl_src); |
| 429 | void GenSuspendTest(int opt_flags); |
| 430 | void GenSuspendTestAndBranch(int opt_flags, LIR* target); |
| 431 | |
| 432 | // Shared by all targets - implemented in gen_invoke.cc. |
| 433 | int CallHelperSetup(int helper_offset); |
| 434 | LIR* CallHelper(int r_tgt, int helper_offset, bool safepoint_pc); |
| 435 | void CallRuntimeHelperImm(int helper_offset, int arg0, bool safepoint_pc); |
| 436 | void CallRuntimeHelperReg(int helper_offset, int arg0, bool safepoint_pc); |
| 437 | void CallRuntimeHelperRegLocation(int helper_offset, RegLocation arg0, |
| 438 | bool safepoint_pc); |
| 439 | void CallRuntimeHelperImmImm(int helper_offset, int arg0, int arg1, |
| 440 | bool safepoint_pc); |
| 441 | void CallRuntimeHelperImmRegLocation(int helper_offset, int arg0, |
| 442 | RegLocation arg1, bool safepoint_pc); |
| 443 | void CallRuntimeHelperRegLocationImm(int helper_offset, RegLocation arg0, |
| 444 | int arg1, bool safepoint_pc); |
| 445 | void CallRuntimeHelperImmReg(int helper_offset, int arg0, int arg1, |
| 446 | bool safepoint_pc); |
| 447 | void CallRuntimeHelperRegImm(int helper_offset, int arg0, int arg1, |
| 448 | bool safepoint_pc); |
| 449 | void CallRuntimeHelperImmMethod(int helper_offset, int arg0, |
| 450 | bool safepoint_pc); |
| 451 | void CallRuntimeHelperRegLocationRegLocation(int helper_offset, |
| 452 | RegLocation arg0, RegLocation arg1, |
| 453 | bool safepoint_pc); |
| 454 | void CallRuntimeHelperRegReg(int helper_offset, int arg0, int arg1, |
| 455 | bool safepoint_pc); |
| 456 | void CallRuntimeHelperRegRegImm(int helper_offset, int arg0, int arg1, |
| 457 | int arg2, bool safepoint_pc); |
| 458 | void CallRuntimeHelperImmMethodRegLocation(int helper_offset, int arg0, |
| 459 | RegLocation arg2, bool safepoint_pc); |
| 460 | void CallRuntimeHelperImmMethodImm(int helper_offset, int arg0, int arg2, |
| 461 | bool safepoint_pc); |
| 462 | void CallRuntimeHelperImmRegLocationRegLocation(int helper_offset, |
| 463 | int arg0, RegLocation arg1, RegLocation arg2, |
| 464 | bool safepoint_pc); |
| 465 | void GenInvoke(CallInfo* info); |
| 466 | void FlushIns(RegLocation* ArgLocs, RegLocation rl_method); |
| 467 | int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel, |
| 468 | NextCallInsn next_call_insn, |
| 469 | const MethodReference& target_method, |
| 470 | uint32_t vtable_idx, |
| 471 | uintptr_t direct_code, uintptr_t direct_method, InvokeType type, |
| 472 | bool skip_this); |
| 473 | int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel, |
| 474 | NextCallInsn next_call_insn, |
| 475 | const MethodReference& target_method, |
| 476 | uint32_t vtable_idx, |
| 477 | uintptr_t direct_code, uintptr_t direct_method, InvokeType type, |
| 478 | bool skip_this); |
| 479 | RegLocation InlineTarget(CallInfo* info); |
| 480 | RegLocation InlineTargetWide(CallInfo* info); |
| 481 | |
| 482 | bool GenInlinedCharAt(CallInfo* info); |
| 483 | bool GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty); |
| 484 | bool GenInlinedAbsInt(CallInfo* info); |
| 485 | bool GenInlinedAbsLong(CallInfo* info); |
| 486 | bool GenInlinedFloatCvt(CallInfo* info); |
| 487 | bool GenInlinedDoubleCvt(CallInfo* info); |
| 488 | bool GenInlinedIndexOf(CallInfo* info, bool zero_based); |
| 489 | bool GenInlinedStringCompareTo(CallInfo* info); |
| 490 | bool GenInlinedCurrentThread(CallInfo* info); |
| 491 | bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_volatile); |
| 492 | bool GenInlinedUnsafePut(CallInfo* info, bool is_long, bool is_object, |
| 493 | bool is_volatile, bool is_ordered); |
| 494 | bool GenIntrinsic(CallInfo* info); |
| 495 | int LoadArgRegs(CallInfo* info, int call_state, |
| 496 | NextCallInsn next_call_insn, |
| 497 | const MethodReference& target_method, |
| 498 | uint32_t vtable_idx, |
| 499 | uintptr_t direct_code, uintptr_t direct_method, InvokeType type, |
| 500 | bool skip_this); |
| 501 | |
| 502 | // Shared by all targets - implemented in gen_loadstore.cc. |
| 503 | RegLocation LoadCurrMethod(); |
| 504 | void LoadCurrMethodDirect(int r_tgt); |
| 505 | LIR* LoadConstant(int r_dest, int value); |
| 506 | LIR* LoadWordDisp(int rBase, int displacement, int r_dest); |
| 507 | RegLocation LoadValue(RegLocation rl_src, RegisterClass op_kind); |
| 508 | RegLocation LoadValueWide(RegLocation rl_src, RegisterClass op_kind); |
| 509 | void LoadValueDirect(RegLocation rl_src, int r_dest); |
| 510 | void LoadValueDirectFixed(RegLocation rl_src, int r_dest); |
| 511 | void LoadValueDirectWide(RegLocation rl_src, int reg_lo, int reg_hi); |
| 512 | void LoadValueDirectWideFixed(RegLocation rl_src, int reg_lo, int reg_hi); |
| 513 | LIR* StoreWordDisp(int rBase, int displacement, int r_src); |
| 514 | void StoreValue(RegLocation rl_dest, RegLocation rl_src); |
| 515 | void StoreValueWide(RegLocation rl_dest, RegLocation rl_src); |
| 516 | |
| 517 | // Shared by all targets - implemented in mir_to_lir.cc. |
| 518 | void CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list); |
| 519 | void HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir); |
| 520 | bool MethodBlockCodeGen(BasicBlock* bb); |
| 521 | void SpecialMIR2LIR(SpecialCaseHandler special_case); |
| 522 | void MethodMIR2LIR(); |
| 523 | |
| 524 | |
| 525 | |
| 526 | // Required for target - codegen helpers. |
| 527 | virtual bool SmallLiteralDivide(Instruction::Code dalvik_opcode, |
| 528 | RegLocation rl_src, RegLocation rl_dest, int lit) = 0; |
| 529 | virtual int LoadHelper(int offset) = 0; |
| 530 | virtual LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg) = 0; |
| 531 | virtual LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi, |
| 532 | int s_reg) = 0; |
| 533 | virtual LIR* LoadBaseIndexed(int rBase, int r_index, int r_dest, int scale, OpSize size) = 0; |
| 534 | virtual LIR* LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement, |
| 535 | int r_dest, int r_dest_hi, OpSize size, int s_reg) = 0; |
| 536 | virtual LIR* LoadConstantNoClobber(int r_dest, int value) = 0; |
| 537 | virtual LIR* LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) = 0; |
| 538 | virtual LIR* StoreBaseDisp(int rBase, int displacement, int r_src, OpSize size) = 0; |
| 539 | virtual LIR* StoreBaseDispWide(int rBase, int displacement, int r_src_lo, int r_src_hi) = 0; |
| 540 | virtual LIR* StoreBaseIndexed(int rBase, int r_index, int r_src, int scale, OpSize size) = 0; |
| 541 | virtual LIR* StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement, |
| 542 | int r_src, int r_src_hi, OpSize size, int s_reg) = 0; |
| 543 | virtual void MarkGCCard(int val_reg, int tgt_addr_reg) = 0; |
| 544 | |
| 545 | // Required for target - register utilities. |
| 546 | virtual bool IsFpReg(int reg) = 0; |
| 547 | virtual bool SameRegType(int reg1, int reg2) = 0; |
| 548 | virtual int AllocTypedTemp(bool fp_hint, int reg_class) = 0; |
| 549 | virtual int AllocTypedTempPair(bool fp_hint, int reg_class) = 0; |
| 550 | virtual int S2d(int low_reg, int high_reg) = 0; |
| 551 | virtual int TargetReg(SpecialTargetRegister reg) = 0; |
| 552 | virtual RegisterInfo* GetRegInfo(int reg) = 0; |
| 553 | virtual RegLocation GetReturnAlt() = 0; |
| 554 | virtual RegLocation GetReturnWideAlt() = 0; |
| 555 | virtual RegLocation LocCReturn() = 0; |
| 556 | virtual RegLocation LocCReturnDouble() = 0; |
| 557 | virtual RegLocation LocCReturnFloat() = 0; |
| 558 | virtual RegLocation LocCReturnWide() = 0; |
| 559 | virtual uint32_t FpRegMask() = 0; |
| 560 | virtual uint64_t GetRegMaskCommon(int reg) = 0; |
| 561 | virtual void AdjustSpillMask() = 0; |
| 562 | virtual void ClobberCalleeSave() = 0; |
| 563 | virtual void FlushReg(int reg) = 0; |
| 564 | virtual void FlushRegWide(int reg1, int reg2) = 0; |
| 565 | virtual void FreeCallTemps() = 0; |
| 566 | virtual void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) = 0; |
| 567 | virtual void LockCallTemps() = 0; |
| 568 | virtual void MarkPreservedSingle(int v_reg, int reg) = 0; |
| 569 | virtual void CompilerInitializeRegAlloc() = 0; |
| 570 | |
| 571 | // Required for target - miscellaneous. |
| 572 | virtual AssemblerStatus AssembleInstructions(uintptr_t start_addr) = 0; |
| 573 | virtual void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix) = 0; |
| 574 | virtual void SetupTargetResourceMasks(LIR* lir) = 0; |
| 575 | virtual const char* GetTargetInstFmt(int opcode) = 0; |
| 576 | virtual const char* GetTargetInstName(int opcode) = 0; |
| 577 | virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) = 0; |
| 578 | virtual uint64_t GetPCUseDefEncoding() = 0; |
| 579 | virtual uint64_t GetTargetInstFlags(int opcode) = 0; |
| 580 | virtual int GetInsnSize(LIR* lir) = 0; |
| 581 | virtual bool IsUnconditionalBranch(LIR* lir) = 0; |
| 582 | |
| 583 | // Required for target - Dalvik-level generators. |
| 584 | virtual void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, |
| 585 | RegLocation rl_src1, RegLocation rl_src2) = 0; |
| 586 | virtual void GenMulLong(RegLocation rl_dest, RegLocation rl_src1, |
| 587 | RegLocation rl_src2) = 0; |
| 588 | virtual void GenAddLong(RegLocation rl_dest, RegLocation rl_src1, |
| 589 | RegLocation rl_src2) = 0; |
| 590 | virtual void GenAndLong(RegLocation rl_dest, RegLocation rl_src1, |
| 591 | RegLocation rl_src2) = 0; |
| 592 | virtual void GenArithOpDouble(Instruction::Code opcode, |
| 593 | RegLocation rl_dest, RegLocation rl_src1, |
| 594 | RegLocation rl_src2) = 0; |
| 595 | virtual void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, |
| 596 | RegLocation rl_src1, RegLocation rl_src2) = 0; |
| 597 | virtual void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, |
| 598 | RegLocation rl_src1, RegLocation rl_src2) = 0; |
| 599 | virtual void GenConversion(Instruction::Code opcode, RegLocation rl_dest, |
| 600 | RegLocation rl_src) = 0; |
| 601 | virtual bool GenInlinedCas32(CallInfo* info, bool need_write_barrier) = 0; |
| 602 | virtual bool GenInlinedMinMaxInt(CallInfo* info, bool is_min) = 0; |
| 603 | virtual bool GenInlinedSqrt(CallInfo* info) = 0; |
| 604 | virtual void GenNegLong(RegLocation rl_dest, RegLocation rl_src) = 0; |
| 605 | virtual void GenOrLong(RegLocation rl_dest, RegLocation rl_src1, |
| 606 | RegLocation rl_src2) = 0; |
| 607 | virtual void GenSubLong(RegLocation rl_dest, RegLocation rl_src1, |
| 608 | RegLocation rl_src2) = 0; |
| 609 | virtual void GenXorLong(RegLocation rl_dest, RegLocation rl_src1, |
| 610 | RegLocation rl_src2) = 0; |
| 611 | virtual LIR* GenRegMemCheck(ConditionCode c_code, int reg1, int base, |
| 612 | int offset, ThrowKind kind) = 0; |
| 613 | virtual RegLocation GenDivRem(RegLocation rl_dest, int reg_lo, int reg_hi, |
| 614 | bool is_div) = 0; |
| 615 | virtual RegLocation GenDivRemLit(RegLocation rl_dest, int reg_lo, int lit, |
| 616 | bool is_div) = 0; |
| 617 | virtual void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, |
| 618 | RegLocation rl_src2) = 0; |
| 619 | virtual void GenDivZeroCheck(int reg_lo, int reg_hi) = 0; |
| 620 | virtual void GenEntrySequence(RegLocation* ArgLocs, |
| 621 | RegLocation rl_method) = 0; |
| 622 | virtual void GenExitSequence() = 0; |
| 623 | virtual void GenFillArrayData(uint32_t table_offset, |
| 624 | RegLocation rl_src) = 0; |
| 625 | virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, |
| 626 | bool is_double) = 0; |
| 627 | virtual void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) = 0; |
| 628 | virtual void GenSelect(BasicBlock* bb, MIR* mir) = 0; |
| 629 | virtual void GenMemBarrier(MemBarrierKind barrier_kind) = 0; |
| 630 | virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src) = 0; |
| 631 | virtual void GenMonitorExit(int opt_flags, RegLocation rl_src) = 0; |
| 632 | virtual void GenMoveException(RegLocation rl_dest) = 0; |
| 633 | virtual void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, |
| 634 | RegLocation rl_result, int lit, int first_bit, |
| 635 | int second_bit) = 0; |
| 636 | virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) = 0; |
| 637 | virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) = 0; |
| 638 | virtual void GenPackedSwitch(MIR* mir, uint32_t table_offset, |
| 639 | RegLocation rl_src) = 0; |
| 640 | virtual void GenSparseSwitch(MIR* mir, uint32_t table_offset, |
| 641 | RegLocation rl_src) = 0; |
| 642 | virtual void GenSpecialCase(BasicBlock* bb, MIR* mir, |
| 643 | SpecialCaseHandler special_case) = 0; |
| 644 | virtual void GenArrayObjPut(int opt_flags, RegLocation rl_array, |
| 645 | RegLocation rl_index, RegLocation rl_src, int scale) = 0; |
| 646 | virtual void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, |
| 647 | RegLocation rl_index, RegLocation rl_dest, int scale) = 0; |
| 648 | virtual void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, |
| 649 | RegLocation rl_index, RegLocation rl_src, int scale) = 0; |
| 650 | virtual void GenShiftImmOpLong(Instruction::Code opcode, |
| 651 | RegLocation rl_dest, RegLocation rl_src1, |
| 652 | RegLocation rl_shift) = 0; |
| 653 | |
| 654 | // Required for target - single operation generators. |
| 655 | virtual LIR* OpUnconditionalBranch(LIR* target) = 0; |
| 656 | virtual LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, |
| 657 | LIR* target) = 0; |
| 658 | virtual LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value, |
| 659 | LIR* target) = 0; |
| 660 | virtual LIR* OpCondBranch(ConditionCode cc, LIR* target) = 0; |
| 661 | virtual LIR* OpDecAndBranch(ConditionCode c_code, int reg, |
| 662 | LIR* target) = 0; |
| 663 | virtual LIR* OpFpRegCopy(int r_dest, int r_src) = 0; |
| 664 | virtual LIR* OpIT(ConditionCode cond, const char* guide) = 0; |
| 665 | virtual LIR* OpMem(OpKind op, int rBase, int disp) = 0; |
| 666 | virtual LIR* OpPcRelLoad(int reg, LIR* target) = 0; |
| 667 | virtual LIR* OpReg(OpKind op, int r_dest_src) = 0; |
| 668 | virtual LIR* OpRegCopy(int r_dest, int r_src) = 0; |
| 669 | virtual LIR* OpRegCopyNoInsert(int r_dest, int r_src) = 0; |
| 670 | virtual LIR* OpRegImm(OpKind op, int r_dest_src1, int value) = 0; |
| 671 | virtual LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset) = 0; |
| 672 | virtual LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2) = 0; |
| 673 | virtual LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) = 0; |
| 674 | virtual LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, |
| 675 | int r_src2) = 0; |
| 676 | virtual LIR* OpTestSuspend(LIR* target) = 0; |
| 677 | virtual LIR* OpThreadMem(OpKind op, int thread_offset) = 0; |
| 678 | virtual LIR* OpVldm(int rBase, int count) = 0; |
| 679 | virtual LIR* OpVstm(int rBase, int count) = 0; |
| 680 | virtual void OpLea(int rBase, int reg1, int reg2, int scale, |
| 681 | int offset) = 0; |
| 682 | virtual void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, |
| 683 | int src_hi) = 0; |
| 684 | virtual void OpTlsCmp(int offset, int val) = 0; |
| 685 | virtual bool InexpensiveConstantInt(int32_t value) = 0; |
| 686 | virtual bool InexpensiveConstantFloat(int32_t value) = 0; |
| 687 | virtual bool InexpensiveConstantLong(int64_t value) = 0; |
| 688 | virtual bool InexpensiveConstantDouble(int64_t value) = 0; |
| 689 | |
| 690 | // Temp workaround |
| 691 | void Workaround7250540(RegLocation rl_dest, int value); |
| 692 | |
| 693 | protected: |
| 694 | Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena); |
| 695 | |
| 696 | CompilationUnit* GetCompilationUnit() { |
| 697 | return cu_; |
| 698 | } |
| 699 | |
| 700 | private: |
| 701 | void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest, |
| 702 | RegLocation rl_src); |
| 703 | void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final, |
| 704 | bool type_known_abstract, bool use_declaring_class, |
| 705 | bool can_assume_type_is_in_dex_cache, |
| 706 | uint32_t type_idx, RegLocation rl_dest, |
| 707 | RegLocation rl_src); |
| 708 | |
| 709 | void ClobberBody(RegisterInfo* p); |
| 710 | void ResetDefBody(RegisterInfo* p) { |
| 711 | p->def_start = NULL; |
| 712 | p->def_end = NULL; |
| 713 | } |
| 714 | |
| 715 | public: |
| 716 | // TODO: add accessors for these. |
| 717 | LIR* literal_list_; // Constants. |
| 718 | LIR* method_literal_list_; // Method literals requiring patching. |
| 719 | LIR* code_literal_list_; // Code literals requiring patching. |
| 720 | |
| 721 | protected: |
| 722 | CompilationUnit* const cu_; |
| 723 | MIRGraph* const mir_graph_; |
| 724 | GrowableArray<SwitchTable*> switch_tables_; |
| 725 | GrowableArray<FillArrayData*> fill_array_data_; |
| 726 | GrowableArray<LIR*> throw_launchpads_; |
| 727 | GrowableArray<LIR*> suspend_launchpads_; |
| 728 | GrowableArray<LIR*> intrinsic_launchpads_; |
| 729 | SafeMap<unsigned int, LIR*> boundary_map_; // boundary lookup cache. |
| 730 | /* |
| 731 | * Holds mapping from native PC to dex PC for safepoints where we may deoptimize. |
| 732 | * Native PC is on the return address of the safepointed operation. Dex PC is for |
| 733 | * the instruction being executed at the safepoint. |
| 734 | */ |
| 735 | std::vector<uint32_t> pc2dex_mapping_table_; |
| 736 | /* |
| 737 | * Holds mapping from Dex PC to native PC for catch entry points. Native PC and Dex PC |
| 738 | * immediately preceed the instruction. |
| 739 | */ |
| 740 | std::vector<uint32_t> dex2pc_mapping_table_; |
| 741 | int data_offset_; // starting offset of literal pool. |
| 742 | int total_size_; // header + code size. |
| 743 | LIR* block_label_list_; |
| 744 | PromotionMap* promotion_map_; |
| 745 | /* |
| 746 | * TODO: The code generation utilities don't have a built-in |
| 747 | * mechanism to propagate the original Dalvik opcode address to the |
| 748 | * associated generated instructions. For the trace compiler, this wasn't |
| 749 | * necessary because the interpreter handled all throws and debugging |
| 750 | * requests. For now we'll handle this by placing the Dalvik offset |
| 751 | * in the CompilationUnit struct before codegen for each instruction. |
| 752 | * The low-level LIR creation utilites will pull it from here. Rework this. |
| 753 | */ |
| 754 | int current_dalvik_offset_; |
| 755 | RegisterPool* reg_pool_; |
| 756 | /* |
| 757 | * Sanity checking for the register temp tracking. The same ssa |
| 758 | * name should never be associated with one temp register per |
| 759 | * instruction compilation. |
| 760 | */ |
| 761 | int live_sreg_; |
| 762 | CodeBuffer code_buffer_; |
| 763 | std::vector<uint32_t> combined_mapping_table_; |
| 764 | std::vector<uint32_t> core_vmap_table_; |
| 765 | std::vector<uint32_t> fp_vmap_table_; |
| 766 | std::vector<uint8_t> native_gc_map_; |
| 767 | int num_core_spills_; |
| 768 | int num_fp_spills_; |
| 769 | int frame_size_; |
| 770 | unsigned int core_spill_mask_; |
| 771 | unsigned int fp_spill_mask_; |
| 772 | LIR* first_lir_insn_; |
| 773 | LIR* last_lir_insn_; |
| 774 | }; // Class Mir2Lir |
| 775 | |
| 776 | } // namespace art |
| 777 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 778 | #endif // ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_ |