blob: 9000514856bfe7a13e415e349f8fe6a08856431e [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the X86 ISA */
18
19#include "codegen_x86.h"
20#include "dex/quick/mir_to_lir-inl.h"
Ian Rogers576ca0c2014-06-06 15:58:22 -070021#include "gc/accounting/card_table.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070022#include "x86_lir.h"
23
24namespace art {
25
Brian Carlstrom7940e442013-07-12 13:46:57 -070026/*
27 * The sparse table in the literal pool is an array of <key,displacement>
28 * pairs.
29 */
buzbee0d829482013-10-11 15:24:55 -070030void X86Mir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070031 RegLocation rl_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070032 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
33 if (cu_->verbose) {
34 DumpSparseSwitchTable(table);
35 }
36 int entries = table[1];
buzbee0d829482013-10-11 15:24:55 -070037 const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]);
38 const int32_t* targets = &keys[entries];
Brian Carlstrom7940e442013-07-12 13:46:57 -070039 rl_src = LoadValue(rl_src, kCoreReg);
40 for (int i = 0; i < entries; i++) {
41 int key = keys[i];
42 BasicBlock* case_block =
43 mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]);
buzbee2700f7e2014-03-07 09:46:20 -080044 OpCmpImmBranch(kCondEq, rl_src.reg, key, &block_label_list_[case_block->id]);
Brian Carlstrom7940e442013-07-12 13:46:57 -070045 }
46}
47
48/*
49 * Code pattern will look something like:
50 *
51 * mov r_val, ..
52 * call 0
53 * pop r_start_of_method
54 * sub r_start_of_method, ..
55 * mov r_key_reg, r_val
56 * sub r_key_reg, low_key
57 * cmp r_key_reg, size-1 ; bound check
58 * ja done
59 * mov r_disp, [r_start_of_method + r_key_reg * 4 + table_offset]
60 * add r_start_of_method, r_disp
61 * jmp r_start_of_method
62 * done:
63 */
buzbee0d829482013-10-11 15:24:55 -070064void X86Mir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070065 RegLocation rl_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070066 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
67 if (cu_->verbose) {
68 DumpPackedSwitchTable(table);
69 }
70 // Add the table to the list - we'll process it later
buzbee0d829482013-10-11 15:24:55 -070071 SwitchTable* tab_rec =
Vladimir Marko83cc7ae2014-02-12 18:02:05 +000072 static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
Brian Carlstrom7940e442013-07-12 13:46:57 -070073 tab_rec->table = table;
74 tab_rec->vaddr = current_dalvik_offset_;
75 int size = table[1];
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -070076 tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
Vladimir Marko83cc7ae2014-02-12 18:02:05 +000077 kArenaAllocLIR));
Brian Carlstrom7940e442013-07-12 13:46:57 -070078 switch_tables_.Insert(tab_rec);
79
80 // Get the switch value
81 rl_src = LoadValue(rl_src, kCoreReg);
Brian Carlstrom7934ac22013-07-26 10:54:15 -070082 // NewLIR0(kX86Bkpt);
Mark Mendell67c39c42014-01-31 17:28:00 -080083
84 // Materialize a pointer to the switch table
buzbee2700f7e2014-03-07 09:46:20 -080085 RegStorage start_of_method_reg;
Mark Mendell67c39c42014-01-31 17:28:00 -080086 if (base_of_code_ != nullptr) {
87 // We can use the saved value.
88 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
Chao-ying Fue0ccdc02014-06-06 17:32:37 -070089 if (rl_method.wide) {
90 rl_method = LoadValueWide(rl_method, kCoreReg);
91 } else {
92 rl_method = LoadValue(rl_method, kCoreReg);
93 }
buzbee2700f7e2014-03-07 09:46:20 -080094 start_of_method_reg = rl_method.reg;
Mark Mendell55d0eac2014-02-06 11:02:52 -080095 store_method_addr_used_ = true;
Mark Mendell67c39c42014-01-31 17:28:00 -080096 } else {
Serguei Katkov407a9d22014-07-05 03:09:32 +070097 start_of_method_reg = AllocTempRef();
buzbee2700f7e2014-03-07 09:46:20 -080098 NewLIR1(kX86StartOfMethod, start_of_method_reg.GetReg());
Mark Mendell67c39c42014-01-31 17:28:00 -080099 }
Serguei Katkov407a9d22014-07-05 03:09:32 +0700100 DCHECK_EQ(start_of_method_reg.Is64Bit(), cu_->target64);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700101 int low_key = s4FromSwitchData(&table[2]);
buzbee2700f7e2014-03-07 09:46:20 -0800102 RegStorage keyReg;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700103 // Remove the bias, if necessary
104 if (low_key == 0) {
buzbee2700f7e2014-03-07 09:46:20 -0800105 keyReg = rl_src.reg;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700106 } else {
107 keyReg = AllocTemp();
buzbee2700f7e2014-03-07 09:46:20 -0800108 OpRegRegImm(kOpSub, keyReg, rl_src.reg, low_key);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700109 }
110 // Bounds check - if < 0 or >= size continue following switch
Serguei Katkov407a9d22014-07-05 03:09:32 +0700111 OpRegImm(kOpCmp, keyReg, size - 1);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700112 LIR* branch_over = OpCondBranch(kCondHi, NULL);
113
114 // Load the displacement from the switch table
buzbee2700f7e2014-03-07 09:46:20 -0800115 RegStorage disp_reg = AllocTemp();
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700116 NewLIR5(kX86PcRelLoadRA, disp_reg.GetReg(), start_of_method_reg.GetReg(), keyReg.GetReg(),
117 2, WrapPointer(tab_rec));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700118 // Add displacement to start of method
Serguei Katkov407a9d22014-07-05 03:09:32 +0700119 OpRegReg(kOpAdd, start_of_method_reg, cu_->target64 ? As64BitReg(disp_reg) : disp_reg);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700120 // ..and go!
buzbee2700f7e2014-03-07 09:46:20 -0800121 LIR* switch_branch = NewLIR1(kX86JmpR, start_of_method_reg.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700122 tab_rec->anchor = switch_branch;
123
124 /* branch_over target here */
125 LIR* target = NewLIR0(kPseudoTargetLabel);
126 branch_over->target = target;
127}
128
129/*
130 * Array data table format:
131 * ushort ident = 0x0300 magic value
132 * ushort width width of each element in the table
133 * uint size number of elements in the table
134 * ubyte data[size*width] table of data values (may contain a single-byte
135 * padding at the end)
136 *
137 * Total size is 4+(width * size + 1)/2 16-bit code units.
138 */
buzbee0d829482013-10-11 15:24:55 -0700139void X86Mir2Lir::GenFillArrayData(DexOffset table_offset, RegLocation rl_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700140 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
141 // Add the table to the list - we'll process it later
buzbee0d829482013-10-11 15:24:55 -0700142 FillArrayData* tab_rec =
Vladimir Marko83cc7ae2014-02-12 18:02:05 +0000143 static_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData), kArenaAllocData));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700144 tab_rec->table = table;
145 tab_rec->vaddr = current_dalvik_offset_;
146 uint16_t width = tab_rec->table[1];
147 uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
148 tab_rec->size = (size * width) + 8;
149
150 fill_array_data_.Insert(tab_rec);
151
152 // Making a call - use explicit registers
153 FlushAllRegs(); /* Everything to home location */
Chao-ying Fua77ee512014-07-01 17:43:41 -0700154 RegStorage array_ptr = TargetRefReg(kArg0);
155 RegStorage payload = TargetPtrReg(kArg1);
156 RegStorage method_start = TargetPtrReg(kArg2);
157
158 LoadValueDirectFixed(rl_src, array_ptr);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700159 // Materialize a pointer to the fill data image
Mark Mendell67c39c42014-01-31 17:28:00 -0800160 if (base_of_code_ != nullptr) {
161 // We can use the saved value.
162 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700163 if (rl_method.wide) {
Chao-ying Fua77ee512014-07-01 17:43:41 -0700164 LoadValueDirectWide(rl_method, method_start);
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700165 } else {
Chao-ying Fua77ee512014-07-01 17:43:41 -0700166 LoadValueDirect(rl_method, method_start);
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700167 }
Mark Mendell55d0eac2014-02-06 11:02:52 -0800168 store_method_addr_used_ = true;
Mark Mendell67c39c42014-01-31 17:28:00 -0800169 } else {
Chao-ying Fua77ee512014-07-01 17:43:41 -0700170 NewLIR1(kX86StartOfMethod, method_start.GetReg());
Mark Mendell67c39c42014-01-31 17:28:00 -0800171 }
Chao-ying Fua77ee512014-07-01 17:43:41 -0700172 NewLIR2(kX86PcRelAdr, payload.GetReg(), WrapPointer(tab_rec));
173 OpRegReg(kOpAdd, payload, method_start);
buzbee33ae5582014-06-12 14:56:32 -0700174 if (cu_->target64) {
Chao-ying Fua77ee512014-07-01 17:43:41 -0700175 CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pHandleFillArrayData), array_ptr,
176 payload, true);
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700177 } else {
Chao-ying Fua77ee512014-07-01 17:43:41 -0700178 CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pHandleFillArrayData), array_ptr,
179 payload, true);
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700180 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700181}
182
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700183void X86Mir2Lir::GenMoveException(RegLocation rl_dest) {
buzbee33ae5582014-06-12 14:56:32 -0700184 int ex_offset = cu_->target64 ?
Andreas Gampe2f244e92014-05-08 03:35:25 -0700185 Thread::ExceptionOffset<8>().Int32Value() :
186 Thread::ExceptionOffset<4>().Int32Value();
buzbeea0cd2d72014-06-01 09:33:49 -0700187 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
Serguei Katkov407a9d22014-07-05 03:09:32 +0700188 NewLIR2(cu_->target64 ? kX86Mov64RT : kX86Mov32RT, rl_result.reg.GetReg(), ex_offset);
189 NewLIR2(cu_->target64 ? kX86Mov64TI : kX86Mov32TI, ex_offset, 0);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700190 StoreValue(rl_dest, rl_result);
191}
192
193/*
194 * Mark garbage collection card. Skip if the value we're storing is null.
195 */
buzbee2700f7e2014-03-07 09:46:20 -0800196void X86Mir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) {
Serguei Katkov407a9d22014-07-05 03:09:32 +0700197 DCHECK_EQ(tgt_addr_reg.Is64Bit(), cu_->target64);
198 DCHECK_EQ(val_reg.Is64Bit(), cu_->target64);
199 RegStorage reg_card_base = AllocTempRef();
200 RegStorage reg_card_no = AllocTempRef();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700201 LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
buzbee33ae5582014-06-12 14:56:32 -0700202 int ct_offset = cu_->target64 ?
Andreas Gampe2f244e92014-05-08 03:35:25 -0700203 Thread::CardTableOffset<8>().Int32Value() :
204 Thread::CardTableOffset<4>().Int32Value();
Serguei Katkov407a9d22014-07-05 03:09:32 +0700205 NewLIR2(cu_->target64 ? kX86Mov64RT : kX86Mov32RT, reg_card_base.GetReg(), ct_offset);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700206 OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
buzbee2700f7e2014-03-07 09:46:20 -0800207 StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700208 LIR* target = NewLIR0(kPseudoTargetLabel);
209 branch_over->target = target;
210 FreeTemp(reg_card_base);
211 FreeTemp(reg_card_no);
212}
213
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700214void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700215 /*
216 * On entry, rX86_ARG0, rX86_ARG1, rX86_ARG2 are live. Let the register
217 * allocation mechanism know so it doesn't try to use any of them when
218 * expanding the frame or flushing. This leaves the utility
219 * code with no spare temps.
220 */
buzbee091cc402014-03-31 10:14:40 -0700221 LockTemp(rs_rX86_ARG0);
222 LockTemp(rs_rX86_ARG1);
223 LockTemp(rs_rX86_ARG2);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700224
225 /* Build frame, return address already on stack */
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700226 stack_decrement_ = OpRegImm(kOpSub, rs_rX86_SP, frame_size_ - GetInstructionSetPointerSize(cu_->instruction_set));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700227
228 /*
229 * We can safely skip the stack overflow check if we're
230 * a leaf *and* our frame size < fudge factor.
231 */
Andreas Gampe7cd26f32014-06-18 17:01:15 -0700232 const bool skip_overflow_check = mir_graph_->MethodIsLeaf() &&
Elena Sayapinadd644502014-07-01 18:39:52 +0700233 !IsLargeFrame(frame_size_, cu_->target64 ? kX86_64 : kX86);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700234 NewLIR0(kPseudoMethodEntry);
235 /* Spill core callee saves */
236 SpillCoreRegs();
237 /* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
238 DCHECK_EQ(num_fp_spills_, 0);
239 if (!skip_overflow_check) {
Mathieu Chartier0d507d12014-03-19 10:17:28 -0700240 class StackOverflowSlowPath : public LIRSlowPath {
241 public:
242 StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace)
243 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, nullptr), sp_displace_(sp_displace) {
244 }
245 void Compile() OVERRIDE {
246 m2l_->ResetRegPool();
247 m2l_->ResetDefTracking();
Mingyao Yang6ffcfa02014-04-25 11:06:00 -0700248 GenerateTargetLabel(kPseudoThrowTarget);
buzbee2700f7e2014-03-07 09:46:20 -0800249 m2l_->OpRegImm(kOpAdd, rs_rX86_SP, sp_displace_);
Mathieu Chartier0d507d12014-03-19 10:17:28 -0700250 m2l_->ClobberCallerSave();
Mathieu Chartier0d507d12014-03-19 10:17:28 -0700251 // Assumes codegen and target are in thumb2 mode.
buzbee33ae5582014-06-12 14:56:32 -0700252 if (cu_->target64) {
Andreas Gampe2f244e92014-05-08 03:35:25 -0700253 m2l_->CallHelper(RegStorage::InvalidReg(), QUICK_ENTRYPOINT_OFFSET(8, pThrowStackOverflow),
254 false /* MarkSafepointPC */, false /* UseLink */);
255 } else {
256 m2l_->CallHelper(RegStorage::InvalidReg(), QUICK_ENTRYPOINT_OFFSET(4, pThrowStackOverflow),
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700257 false /* MarkSafepointPC */, false /* UseLink */);
Andreas Gampe2f244e92014-05-08 03:35:25 -0700258 }
Mathieu Chartier0d507d12014-03-19 10:17:28 -0700259 }
260
261 private:
262 const size_t sp_displace_;
263 };
264 // TODO: for large frames we should do something like:
265 // spill ebp
266 // lea ebp, [esp + frame_size]
267 // cmp ebp, fs:[stack_end_]
268 // jcc stack_overflow_exception
269 // mov esp, ebp
270 // in case a signal comes in that's not using an alternate signal stack and the large frame may
271 // have moved us outside of the reserved area at the end of the stack.
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700272 // cmp rs_rX86_SP, fs:[stack_end_]; jcc throw_slowpath
buzbee33ae5582014-06-12 14:56:32 -0700273 if (cu_->target64) {
Andreas Gampe2f244e92014-05-08 03:35:25 -0700274 OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<8>());
275 } else {
276 OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<4>());
277 }
Mathieu Chartier0d507d12014-03-19 10:17:28 -0700278 LIR* branch = OpCondBranch(kCondUlt, nullptr);
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700279 AddSlowPath(
280 new(arena_)StackOverflowSlowPath(this, branch,
281 frame_size_ -
282 GetInstructionSetPointerSize(cu_->instruction_set)));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700283 }
284
285 FlushIns(ArgLocs, rl_method);
286
Mark Mendell67c39c42014-01-31 17:28:00 -0800287 if (base_of_code_ != nullptr) {
Chao-ying Fua77ee512014-07-01 17:43:41 -0700288 RegStorage method_start = TargetPtrReg(kArg0);
Mark Mendell67c39c42014-01-31 17:28:00 -0800289 // We have been asked to save the address of the method start for later use.
Chao-ying Fua77ee512014-07-01 17:43:41 -0700290 setup_method_address_[0] = NewLIR1(kX86StartOfMethod, method_start.GetReg());
Mark Mendell67c39c42014-01-31 17:28:00 -0800291 int displacement = SRegOffset(base_of_code_->s_reg_low);
buzbee695d13a2014-04-19 13:32:20 -0700292 // Native pointer - must be natural word size.
Chao-ying Fua77ee512014-07-01 17:43:41 -0700293 setup_method_address_[1] = StoreBaseDisp(rs_rX86_SP, displacement, method_start,
Elena Sayapinadd644502014-07-01 18:39:52 +0700294 cu_->target64 ? k64 : k32, kNotVolatile);
Mark Mendell67c39c42014-01-31 17:28:00 -0800295 }
296
buzbee091cc402014-03-31 10:14:40 -0700297 FreeTemp(rs_rX86_ARG0);
298 FreeTemp(rs_rX86_ARG1);
299 FreeTemp(rs_rX86_ARG2);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700300}
301
302void X86Mir2Lir::GenExitSequence() {
303 /*
304 * In the exit path, rX86_RET0/rX86_RET1 are live - make sure they aren't
305 * allocated by the register utilities as temps.
306 */
buzbee091cc402014-03-31 10:14:40 -0700307 LockTemp(rs_rX86_RET0);
308 LockTemp(rs_rX86_RET1);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700309
310 NewLIR0(kPseudoMethodExit);
311 UnSpillCoreRegs();
312 /* Remove frame except for return address */
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700313 stack_increment_ = OpRegImm(kOpAdd, rs_rX86_SP, frame_size_ - GetInstructionSetPointerSize(cu_->instruction_set));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700314 NewLIR0(kX86Ret);
315}
316
Razvan A Lupusoru3bc01742014-02-06 13:18:43 -0800317void X86Mir2Lir::GenSpecialExitSequence() {
318 NewLIR0(kX86Ret);
319}
320
Brian Carlstrom7940e442013-07-12 13:46:57 -0700321} // namespace art