Helpers and refactorings to prepare for interpreter optimizations (x64)
- Add data structure offsets that will be used in assembly code.
- Be explicit about a stack overflow in a fault handler.
- Move assembly helper code in asm_support so interpreter can use it.
- Support putting literals in InterpreterCache.
- Fix artHandleFillArrayDataFromCode for x64.
Bug: 119800099
Test: test.py
Change-Id: I2729f87fe5d09c04ae2e7081636f0cd89ac14c21
diff --git a/libdexfile/dex/standard_dex_file.h b/libdexfile/dex/standard_dex_file.h
index db82a9b..3af36f6 100644
--- a/libdexfile/dex/standard_dex_file.h
+++ b/libdexfile/dex/standard_dex_file.h
@@ -35,6 +35,22 @@
struct CodeItem : public dex::CodeItem {
static constexpr size_t kAlignment = 4;
+ static constexpr size_t InsSizeOffset() {
+ return OFFSETOF_MEMBER(CodeItem, ins_size_);
+ }
+
+ static constexpr size_t OutsSizeOffset() {
+ return OFFSETOF_MEMBER(CodeItem, outs_size_);
+ }
+
+ static constexpr size_t RegistersSizeOffset() {
+ return OFFSETOF_MEMBER(CodeItem, registers_size_);
+ }
+
+ static constexpr size_t InsnsOffset() {
+ return OFFSETOF_MEMBER(CodeItem, insns_);
+ }
+
private:
CodeItem() = default;
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index e186cd3..4e7d64c 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -45,9 +45,12 @@
return instr_size;
}
-void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED, void* context,
+void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED,
+ void* context,
ArtMethod** out_method,
- uintptr_t* out_return_pc, uintptr_t* out_sp) {
+ uintptr_t* out_return_pc,
+ uintptr_t* out_sp,
+ bool* out_is_stack_overflow) {
struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
*out_sp = static_cast<uintptr_t>(sc->arm_sp);
@@ -63,9 +66,11 @@
reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kArm));
if (overflow_addr == fault_addr) {
*out_method = reinterpret_cast<ArtMethod*>(sc->arm_r0);
+ *out_is_stack_overflow = true;
} else {
// The method is at the top of the stack.
*out_method = reinterpret_cast<ArtMethod*>(reinterpret_cast<uintptr_t*>(*out_sp)[0]);
+ *out_is_stack_overflow = false;
}
// Work out the return PC. This will be the address of the instruction
diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc
index 751c05b..c139e21 100644
--- a/runtime/arch/arm64/fault_handler_arm64.cc
+++ b/runtime/arch/arm64/fault_handler_arm64.cc
@@ -38,9 +38,12 @@
namespace art {
-void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED, void* context,
+void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED,
+ void* context,
ArtMethod** out_method,
- uintptr_t* out_return_pc, uintptr_t* out_sp) {
+ uintptr_t* out_return_pc,
+ uintptr_t* out_sp,
+ bool* out_is_stack_overflow) {
struct ucontext *uc = reinterpret_cast<struct ucontext *>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
*out_sp = static_cast<uintptr_t>(sc->sp);
@@ -56,9 +59,11 @@
reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kArm64));
if (overflow_addr == fault_addr) {
*out_method = reinterpret_cast<ArtMethod*>(sc->regs[0]);
+ *out_is_stack_overflow = true;
} else {
// The method is at the top of the stack.
*out_method = *reinterpret_cast<ArtMethod**>(*out_sp);
+ *out_is_stack_overflow = false;
}
// Work out the return PC. This will be the address of the instruction
diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc
index 0354f0c..f55df92 100644
--- a/runtime/arch/mips/fault_handler_mips.cc
+++ b/runtime/arch/mips/fault_handler_mips.cc
@@ -37,9 +37,12 @@
namespace art {
-void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
+void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo,
+ void* context,
ArtMethod** out_method,
- uintptr_t* out_return_pc, uintptr_t* out_sp) {
+ uintptr_t* out_return_pc,
+ uintptr_t* out_sp,
+ bool* out_is_stack_overflow) {
struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
*out_sp = static_cast<uintptr_t>(sc->sc_regs[mips::SP]);
@@ -55,9 +58,11 @@
reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kMips));
if (overflow_addr == fault_addr) {
*out_method = reinterpret_cast<ArtMethod*>(sc->sc_regs[mips::A0]);
+ *is_stack_overflow = true;
} else {
// The method is at the top of the stack.
*out_method = *reinterpret_cast<ArtMethod**>(*out_sp);
+ *is_stack_overflow = false;
}
// Work out the return PC. This will be the address of the instruction
diff --git a/runtime/arch/mips64/fault_handler_mips64.cc b/runtime/arch/mips64/fault_handler_mips64.cc
index 6255235..ff53fa6 100644
--- a/runtime/arch/mips64/fault_handler_mips64.cc
+++ b/runtime/arch/mips64/fault_handler_mips64.cc
@@ -38,9 +38,12 @@
namespace art {
-void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
+void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo,
+ void* context,
ArtMethod** out_method,
- uintptr_t* out_return_pc, uintptr_t* out_sp) {
+ uintptr_t* out_return_pc,
+ uintptr_t* out_sp,
+ bool* out_is_stack_overflow) {
struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
*out_sp = static_cast<uintptr_t>(sc->sc_regs[mips64::SP]);
@@ -56,9 +59,11 @@
reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kMips64));
if (overflow_addr == fault_addr) {
*out_method = reinterpret_cast<ArtMethod*>(sc->sc_regs[mips64::A0]);
+ *out_is_stack_overflow = true;
} else {
// The method is at the top of the stack.
*out_method = *reinterpret_cast<ArtMethod**>(*out_sp);
+ *out_is_stack_overflow = false;
}
// Work out the return PC. This will be the address of the instruction
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 26312fb..3a08ec5 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -279,7 +279,9 @@
void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
ArtMethod** out_method,
- uintptr_t* out_return_pc, uintptr_t* out_sp) {
+ uintptr_t* out_return_pc,
+ uintptr_t* out_sp,
+ bool* out_is_stack_overflow) {
struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
*out_sp = static_cast<uintptr_t>(uc->CTX_ESP);
VLOG(signals) << "sp: " << std::hex << *out_sp;
@@ -298,9 +300,11 @@
#endif
if (overflow_addr == fault_addr) {
*out_method = reinterpret_cast<ArtMethod*>(uc->CTX_METHOD);
+ *out_is_stack_overflow = true;
} else {
// The method is at the top of the stack.
*out_method = *reinterpret_cast<ArtMethod**>(*out_sp);
+ *out_is_stack_overflow = false;
}
uint8_t* pc = reinterpret_cast<uint8_t*>(uc->CTX_EIP);
diff --git a/runtime/arch/x86_64/asm_support_x86_64.S b/runtime/arch/x86_64/asm_support_x86_64.S
index b6a64b6..596e468 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.S
+++ b/runtime/arch/x86_64/asm_support_x86_64.S
@@ -189,4 +189,152 @@
#endif // USE_HEAP_POISONING
END_MACRO
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly)
+ */
+MACRO0(SETUP_SAVE_REFS_ONLY_FRAME)
+#if defined(__APPLE__)
+ int3
+ int3
+#else
+ // R10 := Runtime::Current()
+ movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10
+ movq (%r10), %r10
+ // Save callee and GPR args, mixed together to agree with core spills bitmap.
+ PUSH r15 // Callee save.
+ PUSH r14 // Callee save.
+ PUSH r13 // Callee save.
+ PUSH r12 // Callee save.
+ PUSH rbp // Callee save.
+ PUSH rbx // Callee save.
+ // Create space for FPR args, plus space for ArtMethod*.
+ subq LITERAL(8 + 4 * 8), %rsp
+ CFI_ADJUST_CFA_OFFSET(8 + 4 * 8)
+ // Save FPRs.
+ movq %xmm12, 8(%rsp)
+ movq %xmm13, 16(%rsp)
+ movq %xmm14, 24(%rsp)
+ movq %xmm15, 32(%rsp)
+ // R10 := ArtMethod* for refs only callee save frame method.
+ movq RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET(%r10), %r10
+ // Store ArtMethod* to bottom of stack.
+ movq %r10, 0(%rsp)
+ // Store rsp as the stop quick frame.
+ movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET
+
+ // Ugly compile-time check, but we only have the preprocessor.
+ // Last +8: implicit return address pushed on stack when caller made call.
+#if (FRAME_SIZE_SAVE_REFS_ONLY != 6 * 8 + 4 * 8 + 8 + 8)
+#error "FRAME_SIZE_SAVE_REFS_ONLY(X86_64) size not as expected."
+#endif
+#endif // __APPLE__
+END_MACRO
+
+MACRO0(RESTORE_SAVE_REFS_ONLY_FRAME)
+ movq 8(%rsp), %xmm12
+ movq 16(%rsp), %xmm13
+ movq 24(%rsp), %xmm14
+ movq 32(%rsp), %xmm15
+ addq LITERAL(8 + 4*8), %rsp
+ CFI_ADJUST_CFA_OFFSET(-8 - 4*8)
+ // TODO: optimize by not restoring callee-saves restored by the ABI
+ POP rbx
+ POP rbp
+ POP r12
+ POP r13
+ POP r14
+ POP r15
+END_MACRO
+
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves)
+ */
+MACRO0(SETUP_SAVE_ALL_CALLEE_SAVES_FRAME)
+#if defined(__APPLE__)
+ int3
+ int3
+#else
+ // R10 := Runtime::Current()
+ movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10
+ movq (%r10), %r10
+ // Save callee save registers to agree with core spills bitmap.
+ PUSH r15 // Callee save.
+ PUSH r14 // Callee save.
+ PUSH r13 // Callee save.
+ PUSH r12 // Callee save.
+ PUSH rbp // Callee save.
+ PUSH rbx // Callee save.
+ // Create space for FPR args, plus space for ArtMethod*.
+ subq MACRO_LITERAL(4 * 8 + 8), %rsp
+ CFI_ADJUST_CFA_OFFSET(4 * 8 + 8)
+ // Save FPRs.
+ movq %xmm12, 8(%rsp)
+ movq %xmm13, 16(%rsp)
+ movq %xmm14, 24(%rsp)
+ movq %xmm15, 32(%rsp)
+ // R10 := ArtMethod* for save all callee save frame method.
+ movq RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET(%r10), %r10
+ // Store ArtMethod* to bottom of stack.
+ movq %r10, 0(%rsp)
+ // Store rsp as the top quick frame.
+ movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET
+
+ // Ugly compile-time check, but we only have the preprocessor.
+ // Last +8: implicit return address pushed on stack when caller made call.
+#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 6 * 8 + 4 * 8 + 8 + 8)
+#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(X86_64) size not as expected."
+#endif
+#endif // __APPLE__
+END_MACRO
+
+MACRO0(SETUP_FP_CALLEE_SAVE_FRAME)
+ // Create space for ART FP callee-saved registers
+ subq MACRO_LITERAL(4 * 8), %rsp
+ CFI_ADJUST_CFA_OFFSET(4 * 8)
+ movq %xmm12, 0(%rsp)
+ movq %xmm13, 8(%rsp)
+ movq %xmm14, 16(%rsp)
+ movq %xmm15, 24(%rsp)
+END_MACRO
+
+MACRO0(RESTORE_FP_CALLEE_SAVE_FRAME)
+ // Restore ART FP callee-saved registers
+ movq 0(%rsp), %xmm12
+ movq 8(%rsp), %xmm13
+ movq 16(%rsp), %xmm14
+ movq 24(%rsp), %xmm15
+ addq MACRO_LITERAL(4 * 8), %rsp
+ CFI_ADJUST_CFA_OFFSET(- 4 * 8)
+END_MACRO
+
+ /*
+ * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
+ * exception is Thread::Current()->exception_ when the runtime method frame is ready.
+ */
+MACRO0(DELIVER_PENDING_EXCEPTION_FRAME_READY)
+ // (Thread*) setup
+ movq %gs:THREAD_SELF_OFFSET, %rdi
+ call SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*)
+ UNREACHABLE
+END_MACRO
+ /*
+ * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
+ * exception is Thread::Current()->exception_.
+ */
+MACRO0(DELIVER_PENDING_EXCEPTION)
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save callee saves for throw
+ DELIVER_PENDING_EXCEPTION_FRAME_READY
+END_MACRO
+
+MACRO0(RETURN_OR_DELIVER_PENDING_EXCEPTION)
+ movq %gs:THREAD_EXCEPTION_OFFSET, %rcx // get exception field
+ testq %rcx, %rcx // rcx == 0 ?
+ jnz 1f // if rcx != 0 goto 1
+ ret // return
+1: // deliver exception on current thread
+ DELIVER_PENDING_EXCEPTION
+END_MACRO
+
#endif // ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_S_
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 3b30c37..f9b6d2e 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -26,127 +26,8 @@
#endif
END_MACRO
-MACRO0(SETUP_FP_CALLEE_SAVE_FRAME)
- // Create space for ART FP callee-saved registers
- subq MACRO_LITERAL(4 * 8), %rsp
- CFI_ADJUST_CFA_OFFSET(4 * 8)
- movq %xmm12, 0(%rsp)
- movq %xmm13, 8(%rsp)
- movq %xmm14, 16(%rsp)
- movq %xmm15, 24(%rsp)
-END_MACRO
-
-MACRO0(RESTORE_FP_CALLEE_SAVE_FRAME)
- // Restore ART FP callee-saved registers
- movq 0(%rsp), %xmm12
- movq 8(%rsp), %xmm13
- movq 16(%rsp), %xmm14
- movq 24(%rsp), %xmm15
- addq MACRO_LITERAL(4 * 8), %rsp
- CFI_ADJUST_CFA_OFFSET(- 4 * 8)
-END_MACRO
-
// For x86, the CFA is esp+4, the address above the pushed return address on the stack.
- /*
- * Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves)
- */
-MACRO0(SETUP_SAVE_ALL_CALLEE_SAVES_FRAME)
-#if defined(__APPLE__)
- int3
- int3
-#else
- // R10 := Runtime::Current()
- movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10
- movq (%r10), %r10
- // Save callee save registers to agree with core spills bitmap.
- PUSH r15 // Callee save.
- PUSH r14 // Callee save.
- PUSH r13 // Callee save.
- PUSH r12 // Callee save.
- PUSH rbp // Callee save.
- PUSH rbx // Callee save.
- // Create space for FPR args, plus space for ArtMethod*.
- subq MACRO_LITERAL(4 * 8 + 8), %rsp
- CFI_ADJUST_CFA_OFFSET(4 * 8 + 8)
- // Save FPRs.
- movq %xmm12, 8(%rsp)
- movq %xmm13, 16(%rsp)
- movq %xmm14, 24(%rsp)
- movq %xmm15, 32(%rsp)
- // R10 := ArtMethod* for save all callee save frame method.
- movq RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET(%r10), %r10
- // Store ArtMethod* to bottom of stack.
- movq %r10, 0(%rsp)
- // Store rsp as the top quick frame.
- movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET
-
- // Ugly compile-time check, but we only have the preprocessor.
- // Last +8: implicit return address pushed on stack when caller made call.
-#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 6 * 8 + 4 * 8 + 8 + 8)
-#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(X86_64) size not as expected."
-#endif
-#endif // __APPLE__
-END_MACRO
-
- /*
- * Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly)
- */
-MACRO0(SETUP_SAVE_REFS_ONLY_FRAME)
-#if defined(__APPLE__)
- int3
- int3
-#else
- // R10 := Runtime::Current()
- movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10
- movq (%r10), %r10
- // Save callee and GPR args, mixed together to agree with core spills bitmap.
- PUSH r15 // Callee save.
- PUSH r14 // Callee save.
- PUSH r13 // Callee save.
- PUSH r12 // Callee save.
- PUSH rbp // Callee save.
- PUSH rbx // Callee save.
- // Create space for FPR args, plus space for ArtMethod*.
- subq LITERAL(8 + 4 * 8), %rsp
- CFI_ADJUST_CFA_OFFSET(8 + 4 * 8)
- // Save FPRs.
- movq %xmm12, 8(%rsp)
- movq %xmm13, 16(%rsp)
- movq %xmm14, 24(%rsp)
- movq %xmm15, 32(%rsp)
- // R10 := ArtMethod* for refs only callee save frame method.
- movq RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET(%r10), %r10
- // Store ArtMethod* to bottom of stack.
- movq %r10, 0(%rsp)
- // Store rsp as the stop quick frame.
- movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET
-
- // Ugly compile-time check, but we only have the preprocessor.
- // Last +8: implicit return address pushed on stack when caller made call.
-#if (FRAME_SIZE_SAVE_REFS_ONLY != 6 * 8 + 4 * 8 + 8 + 8)
-#error "FRAME_SIZE_SAVE_REFS_ONLY(X86_64) size not as expected."
-#endif
-#endif // __APPLE__
-END_MACRO
-
-MACRO0(RESTORE_SAVE_REFS_ONLY_FRAME)
- movq 8(%rsp), %xmm12
- movq 16(%rsp), %xmm13
- movq 24(%rsp), %xmm14
- movq 32(%rsp), %xmm15
- addq LITERAL(8 + 4*8), %rsp
- CFI_ADJUST_CFA_OFFSET(-8 - 4*8)
- // TODO: optimize by not restoring callee-saves restored by the ABI
- POP rbx
- POP rbp
- POP r12
- POP r13
- POP r14
- POP r15
-END_MACRO
/*
* Macro that sets up the callee save frame to conform with
@@ -408,26 +289,6 @@
RESTORE_SAVE_EVERYTHING_FRAME_GPRS_EXCEPT_RAX
END_MACRO
- /*
- * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
- * exception is Thread::Current()->exception_ when the runtime method frame is ready.
- */
-MACRO0(DELIVER_PENDING_EXCEPTION_FRAME_READY)
- // (Thread*) setup
- movq %gs:THREAD_SELF_OFFSET, %rdi
- call SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*)
- UNREACHABLE
-END_MACRO
-
- /*
- * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
- * exception is Thread::Current()->exception_.
- */
-MACRO0(DELIVER_PENDING_EXCEPTION)
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save callee saves for throw
- DELIVER_PENDING_EXCEPTION_FRAME_READY
-END_MACRO
-
MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name)
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
@@ -993,15 +854,6 @@
DELIVER_PENDING_EXCEPTION
END_MACRO
-MACRO0(RETURN_OR_DELIVER_PENDING_EXCEPTION)
- movq %gs:THREAD_EXCEPTION_OFFSET, %rcx // get exception field
- testq %rcx, %rcx // rcx == 0 ?
- jnz 1f // if rcx != 0 goto 1
- ret // return
-1: // deliver exception on current thread
- DELIVER_PENDING_EXCEPTION
-END_MACRO
-
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
diff --git a/runtime/art_field.h b/runtime/art_field.h
index bc2c399..c149003 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -96,10 +96,14 @@
return MemberOffset(offset_);
}
- static MemberOffset OffsetOffset() {
+ static constexpr MemberOffset OffsetOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtField, offset_));
}
+ static constexpr MemberOffset DeclaringClassOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(ArtField, declaring_class_));
+ }
+
MemberOffset GetOffsetDuringLinking() REQUIRES_SHARED(Locks::mutator_lock_);
void SetOffset(MemberOffset num_bytes) REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 7b435d5..ff61065 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -411,6 +411,10 @@
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, method_index_));
}
+ static constexpr MemberOffset ImtIndexOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(ArtMethod, imt_index_));
+ }
+
uint32_t GetCodeItemOffset() const {
return dex_code_item_offset_;
}
diff --git a/runtime/entrypoints/quick/quick_entrypoints_enum.h b/runtime/entrypoints/quick/quick_entrypoints_enum.h
index 1cf7f8d..6240a7b 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_enum.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_enum.h
@@ -36,7 +36,7 @@
// Translate a QuickEntrypointEnum value to the corresponding ThreadOffset.
template <PointerSize pointer_size>
-static ThreadOffset<pointer_size> GetThreadOffset(QuickEntrypointEnum trampoline) {
+static constexpr ThreadOffset<pointer_size> GetThreadOffset(QuickEntrypointEnum trampoline) {
switch (trampoline)
{ // NOLINT(whitespace/braces)
#define ENTRYPOINT_ENUM(name, rettype, ...) case kQuick ## name : \
diff --git a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
index d22f180..5b7fe0c 100644
--- a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
@@ -24,13 +24,11 @@
/*
* Handle fill array data by copying appropriate part of dex file into array.
*/
-extern "C" int artHandleFillArrayDataFromCode(uint32_t payload_offset, mirror::Array* array,
- ArtMethod* method, Thread* self)
+extern "C" int artHandleFillArrayDataFromCode(const Instruction::ArrayDataPayload* payload,
+ mirror::Array* array,
+ Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- const uint16_t* const insns = method->DexInstructions().Insns();
- const Instruction::ArrayDataPayload* payload =
- reinterpret_cast<const Instruction::ArrayDataPayload*>(insns + payload_offset);
bool success = FillArrayData(array, payload);
return success ? 0 : -1;
}
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 3c65500..5c9d226 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -354,7 +354,7 @@
return stack_map.GetDexPc();
}
} else {
- return current_code->ToDexPc(*caller_sp, outer_pc);
+ return current_code->ToDexPc(caller_sp, outer_pc);
}
}
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 5c2830d..cae7deb 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -291,10 +291,11 @@
ArtMethod* method_obj = nullptr;
uintptr_t return_pc = 0;
uintptr_t sp = 0;
+ bool is_stack_overflow = false;
// Get the architecture specific method address and return address. These
// are in architecture specific files in arch/<arch>/fault_handler_<arch>.
- GetMethodAndReturnPcAndSp(siginfo, context, &method_obj, &return_pc, &sp);
+ GetMethodAndReturnPcAndSp(siginfo, context, &method_obj, &return_pc, &sp, &is_stack_overflow);
// If we don't have a potential method, we're outta here.
VLOG(signals) << "potential method: " << method_obj;
@@ -336,7 +337,15 @@
reinterpret_cast<uintptr_t>(method_header->GetEntryPoint());
VLOG(signals) << "pc offset: " << std::hex << sought_offset;
}
- uint32_t dexpc = method_header->ToDexPc(method_obj, return_pc, false);
+ uint32_t dexpc = dex::kDexNoIndex;
+ if (is_stack_overflow) {
+ // If it's an implicit stack overflow check, the frame is not setup, so we
+ // just infer the dex PC as zero.
+ dexpc = 0;
+ } else {
+ CHECK_EQ(*reinterpret_cast<ArtMethod**>(sp), method_obj);
+ dexpc = method_header->ToDexPc(reinterpret_cast<ArtMethod**>(sp), return_pc, false);
+ }
VLOG(signals) << "dexpc: " << dexpc;
return !check_dex_pc || dexpc != dex::kDexNoIndex;
}
@@ -380,9 +389,11 @@
ArtMethod* method = nullptr;
uintptr_t return_pc = 0;
uintptr_t sp = 0;
+ bool is_stack_overflow = false;
Thread* self = Thread::Current();
- manager_->GetMethodAndReturnPcAndSp(siginfo, context, &method, &return_pc, &sp);
+ manager_->GetMethodAndReturnPcAndSp(
+ siginfo, context, &method, &return_pc, &sp, &is_stack_overflow);
// Inside of generated code, sp[0] is the method, so sp is the frame.
self->SetTopOfStack(reinterpret_cast<ArtMethod**>(sp));
self->DumpJavaStack(LOG_STREAM(ERROR));
diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h
index f6cf2d7..8b89c22 100644
--- a/runtime/fault_handler.h
+++ b/runtime/fault_handler.h
@@ -55,8 +55,12 @@
// The IsInGeneratedCode() function checks that the mutator lock is held before it
// calls GetMethodAndReturnPCAndSP().
// TODO: think about adding lock assertions and fake lock and unlock functions.
- void GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context, ArtMethod** out_method,
- uintptr_t* out_return_pc, uintptr_t* out_sp)
+ void GetMethodAndReturnPcAndSp(siginfo_t* siginfo,
+ void* context,
+ ArtMethod** out_method,
+ uintptr_t* out_return_pc,
+ uintptr_t* out_sp,
+ bool* out_is_stack_overflow)
NO_THREAD_SAFETY_ANALYSIS;
bool IsInGeneratedCode(siginfo_t* siginfo, void *context, bool check_dex_pc)
NO_THREAD_SAFETY_ANALYSIS;
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index ca48955..011d947 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -365,9 +365,9 @@
std::string thread_name;
GetThread()->GetThreadName(thread_name);
uint32_t dex_pc = dex::kDexNoIndex;
- if (last_return_pc_ != 0 &&
- GetCurrentOatQuickMethodHeader() != nullptr) {
- dex_pc = GetCurrentOatQuickMethodHeader()->ToDexPc(m, last_return_pc_);
+ if (last_return_pc_ != 0 && GetCurrentOatQuickMethodHeader() != nullptr) {
+ dex_pc = GetCurrentOatQuickMethodHeader()->ToDexPc(
+ GetCurrentQuickFrame(), last_return_pc_);
}
LOG(FATAL) << "While walking " << thread_name << " found unexpected non-runtime method"
<< " without instrumentation exit return or interpreter frame."
@@ -400,9 +400,8 @@
SetReturnPc(instrumentation_exit_pc_);
}
uint32_t dex_pc = dex::kDexNoIndex;
- if (last_return_pc_ != 0 &&
- GetCurrentOatQuickMethodHeader() != nullptr) {
- dex_pc = GetCurrentOatQuickMethodHeader()->ToDexPc(m, last_return_pc_);
+ if (last_return_pc_ != 0 && GetCurrentOatQuickMethodHeader() != nullptr) {
+ dex_pc = GetCurrentOatQuickMethodHeader()->ToDexPc(GetCurrentQuickFrame(), last_return_pc_);
}
dex_pcs_.push_back(dex_pc);
last_return_pc_ = return_pc;
diff --git a/runtime/interpreter/interpreter_cache.h b/runtime/interpreter/interpreter_cache.h
index 003ea6c..0ada562 100644
--- a/runtime/interpreter/interpreter_cache.h
+++ b/runtime/interpreter/interpreter_cache.h
@@ -45,10 +45,10 @@
// Aligned to 16-bytes to make it easier to get the address of the cache
// from assembly (it ensures that the offset is valid immediate value).
class ALIGNED(16) InterpreterCache {
+ public:
// Aligned since we load the whole entry in single assembly instruction.
typedef std::pair<const void*, size_t> Entry ALIGNED(2 * sizeof(size_t));
- public:
// 2x size increase/decrease corresponds to ~0.5% interpreter performance change.
// Value of 256 has around 75% cache hit rate.
static constexpr size_t kSize = 256;
@@ -77,6 +77,10 @@
data_[IndexOf(key)] = Entry{key, value};
}
+ std::array<Entry, kSize>& GetArray() {
+ return data_;
+ }
+
private:
bool IsCalledFromOwningThread();
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index db5cbce..fc0cf24 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -991,10 +991,6 @@
return MakeIterationRangeFromLengthPrefixedArray(GetSFieldsPtrUnchecked());
}
-inline MemberOffset Class::EmbeddedVTableOffset(PointerSize pointer_size) {
- return MemberOffset(ImtPtrOffset(pointer_size).Uint32Value() + static_cast<size_t>(pointer_size));
-}
-
inline void Class::CheckPointerSize(PointerSize pointer_size) {
DCHECK_EQ(pointer_size, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
}
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index a8b8235..ecbae71 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -794,6 +794,11 @@
static_cast<size_t>(pointer_size)));
}
+ static constexpr MemberOffset EmbeddedVTableOffset(PointerSize pointer_size) {
+ return MemberOffset(
+ ImtPtrOffset(pointer_size).Uint32Value() + static_cast<size_t>(pointer_size));
+ }
+
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool ShouldHaveImt() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1357,7 +1362,6 @@
// Check that the pointer size matches the one in the class linker.
ALWAYS_INLINE static void CheckPointerSize(PointerSize pointer_size);
- static MemberOffset EmbeddedVTableOffset(PointerSize pointer_size);
template <bool kVisitNativeRoots,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
diff --git a/runtime/oat_quick_method_header.cc b/runtime/oat_quick_method_header.cc
index 7f47398..7a6ebf8 100644
--- a/runtime/oat_quick_method_header.cc
+++ b/runtime/oat_quick_method_header.cc
@@ -24,9 +24,10 @@
namespace art {
-uint32_t OatQuickMethodHeader::ToDexPc(ArtMethod* method,
+uint32_t OatQuickMethodHeader::ToDexPc(ArtMethod** frame,
const uintptr_t pc,
bool abort_on_failure) const {
+ ArtMethod* method = *frame;
const void* entry_point = GetEntryPoint();
uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(entry_point);
if (method->IsNative()) {
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index 9d0883b..0d08149 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -83,7 +83,7 @@
// ART compiled method are prefixed with header, but we can also easily
// accidentally use a function pointer to one of the stubs/trampolines.
// We prefix those with 0xFF in the aseembly so that we can do DCHECKs.
- CHECK_NE(code_size_, 0xFFFFFFFF) << code_;
+ CHECK_NE(code_size_, 0xFFFFFFFF) << code_size_;
return code_size_ & kCodeSizeMask;
}
@@ -148,7 +148,9 @@
bool is_for_catch_handler,
bool abort_on_failure = true) const;
- uint32_t ToDexPc(ArtMethod* method, const uintptr_t pc, bool abort_on_failure = true) const;
+ uint32_t ToDexPc(ArtMethod** frame,
+ const uintptr_t pc,
+ bool abort_on_failure = true) const;
void SetHasShouldDeoptimizeFlag() {
DCHECK_EQ(code_size_ & kShouldDeoptimizeMask, 0u);
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 2a07051..f3557a3 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -128,7 +128,7 @@
return stack_map->GetDexPc();
} else {
return cur_oat_quick_method_header_->ToDexPc(
- GetMethod(), cur_quick_frame_pc_, abort_on_failure);
+ GetCurrentQuickFrame(), cur_quick_frame_pc_, abort_on_failure);
}
} else {
return 0;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 6f0776b..c78d4ec 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -4037,6 +4037,20 @@
for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
visitor->VisitRootIfNonNull(&frame.this_object_, RootInfo(kRootVMInternal, thread_id));
}
+ for (InterpreterCache::Entry& entry : GetInterpreterCache()->GetArray()) {
+ const Instruction* inst = reinterpret_cast<const Instruction*>(entry.first);
+ if (inst != nullptr &&
+ (inst->Opcode() == Instruction::NEW_INSTANCE ||
+ inst->Opcode() == Instruction::CHECK_CAST ||
+ inst->Opcode() == Instruction::INSTANCE_OF ||
+ inst->Opcode() == Instruction::NEW_ARRAY ||
+ inst->Opcode() == Instruction::CONST_CLASS ||
+ inst->Opcode() == Instruction::CONST_STRING ||
+ inst->Opcode() == Instruction::CONST_STRING_JUMBO)) {
+ visitor->VisitRootIfNonNull(reinterpret_cast<mirror::Object**>(&entry.second),
+ RootInfo(kRootThreadObject, thread_id));
+ }
+ }
}
void Thread::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
diff --git a/runtime/thread.h b/runtime/thread.h
index 29375e5..d6faa95 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -718,7 +718,14 @@
}
public:
- static uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
+ template<PointerSize pointer_size>
+ static constexpr ThreadOffset<pointer_size> QuickEntryPointOffset(
+ size_t quick_entrypoint_offset) {
+ return ThreadOffsetFromTlsPtr<pointer_size>(
+ OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
+ }
+
+ static constexpr uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
PointerSize pointer_size) {
if (pointer_size == PointerSize::k32) {
return QuickEntryPointOffset<PointerSize::k32>(quick_entrypoint_offset).
@@ -730,12 +737,6 @@
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> QuickEntryPointOffset(size_t quick_entrypoint_offset) {
- return ThreadOffsetFromTlsPtr<pointer_size>(
- OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
- }
-
- template<PointerSize pointer_size>
static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
@@ -743,7 +744,7 @@
// Return the entry point offset integer value for ReadBarrierMarkRegX, where X is `reg`.
template <PointerSize pointer_size>
- static int32_t ReadBarrierMarkEntryPointsOffset(size_t reg) {
+ static constexpr int32_t ReadBarrierMarkEntryPointsOffset(size_t reg) {
// The entry point list defines 30 ReadBarrierMarkRegX entry points.
DCHECK_LT(reg, 30u);
// The ReadBarrierMarkRegX entry points are ordered by increasing
diff --git a/tools/cpp-define-generator/art_field.def b/tools/cpp-define-generator/art_field.def
new file mode 100644
index 0000000..a15076f
--- /dev/null
+++ b/tools/cpp-define-generator/art_field.def
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "art_field.h"
+#endif
+
+ASM_DEFINE(ART_FIELD_OFFSET_OFFSET,
+ art::ArtField::OffsetOffset().Int32Value())
+ASM_DEFINE(ART_FIELD_DECLARING_CLASS_OFFSET,
+ art::ArtField::DeclaringClassOffset().Int32Value())
diff --git a/tools/cpp-define-generator/art_method.def b/tools/cpp-define-generator/art_method.def
index 21859dc..75fbab0 100644
--- a/tools/cpp-define-generator/art_method.def
+++ b/tools/cpp-define-generator/art_method.def
@@ -20,6 +20,8 @@
ASM_DEFINE(ART_METHOD_ACCESS_FLAGS_OFFSET,
art::ArtMethod::AccessFlagsOffset().Int32Value())
+ASM_DEFINE(ART_METHOD_IS_STATIC_FLAG,
+ art::kAccStatic)
ASM_DEFINE(ART_METHOD_DECLARING_CLASS_OFFSET,
art::ArtMethod::DeclaringClassOffset().Int32Value())
ASM_DEFINE(ART_METHOD_JNI_OFFSET_32,
@@ -30,3 +32,9 @@
art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k32).Int32Value())
ASM_DEFINE(ART_METHOD_QUICK_CODE_OFFSET_64,
art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k64).Int32Value())
+ASM_DEFINE(ART_METHOD_METHOD_INDEX_OFFSET,
+ art::ArtMethod::MethodIndexOffset().Int32Value())
+ASM_DEFINE(ART_METHOD_IMT_INDEX_OFFSET,
+ art::ArtMethod::ImtIndexOffset().Int32Value())
+ASM_DEFINE(ART_METHOD_HOTNESS_COUNT_OFFSET,
+ art::ArtMethod::HotnessCountOffset().Int32Value())
diff --git a/tools/cpp-define-generator/asm_defines.def b/tools/cpp-define-generator/asm_defines.def
index 9aad8a4..9747844 100644
--- a/tools/cpp-define-generator/asm_defines.def
+++ b/tools/cpp-define-generator/asm_defines.def
@@ -19,7 +19,9 @@
#endif
#include "globals.def"
+#include "art_field.def"
#include "art_method.def"
+#include "code_item.def"
#include "lockword.def"
#include "mirror_array.def"
#include "mirror_class.def"
diff --git a/tools/cpp-define-generator/code_item.def b/tools/cpp-define-generator/code_item.def
new file mode 100644
index 0000000..01b0e85
--- /dev/null
+++ b/tools/cpp-define-generator/code_item.def
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "dex/standard_dex_file.h"
+#endif
+
+ASM_DEFINE(CODE_ITEM_REGISTERS_SIZE_OFFSET,
+ art::StandardDexFile::CodeItem::RegistersSizeOffset())
+ASM_DEFINE(CODE_ITEM_INS_SIZE_OFFSET,
+ art::StandardDexFile::CodeItem::InsSizeOffset())
+ASM_DEFINE(CODE_ITEM_OUTS_SIZE_OFFSET,
+ art::StandardDexFile::CodeItem::OutsSizeOffset())
+ASM_DEFINE(CODE_ITEM_INSNS_OFFSET,
+ art::StandardDexFile::CodeItem::InsnsOffset())
diff --git a/tools/cpp-define-generator/globals.def b/tools/cpp-define-generator/globals.def
index 09d33ce..ca0c8ba 100644
--- a/tools/cpp-define-generator/globals.def
+++ b/tools/cpp-define-generator/globals.def
@@ -72,3 +72,5 @@
sizeof(art::StackReference<art::mirror::Object>))
ASM_DEFINE(STD_MEMORY_ORDER_RELAXED,
std::memory_order_relaxed)
+ASM_DEFINE(STACK_OVERFLOW_RESERVED_BYTES,
+ GetStackOverflowReservedBytes(art::kRuntimeISA))
diff --git a/tools/cpp-define-generator/mirror_class.def b/tools/cpp-define-generator/mirror_class.def
index c15ae92..6df6c41 100644
--- a/tools/cpp-define-generator/mirror_class.def
+++ b/tools/cpp-define-generator/mirror_class.def
@@ -36,3 +36,11 @@
art::mirror::Class::StatusOffset().Int32Value())
ASM_DEFINE(PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT,
art::mirror::Class::kPrimitiveTypeSizeShiftShift)
+ASM_DEFINE(MIRROR_CLASS_VTABLE_OFFSET_32,
+ art::mirror::Class::EmbeddedVTableOffset(art::PointerSize::k32).Int32Value())
+ASM_DEFINE(MIRROR_CLASS_VTABLE_OFFSET_64,
+ art::mirror::Class::EmbeddedVTableOffset(art::PointerSize::k64).Int32Value())
+ASM_DEFINE(MIRROR_CLASS_IMT_PTR_OFFSET_32,
+ art::mirror::Class::ImtPtrOffset(art::PointerSize::k32).Int32Value())
+ASM_DEFINE(MIRROR_CLASS_IMT_PTR_OFFSET_64,
+ art::mirror::Class::ImtPtrOffset(art::PointerSize::k64).Int32Value())
diff --git a/tools/cpp-define-generator/mirror_object.def b/tools/cpp-define-generator/mirror_object.def
index facb037..7d7028b 100644
--- a/tools/cpp-define-generator/mirror_object.def
+++ b/tools/cpp-define-generator/mirror_object.def
@@ -24,3 +24,10 @@
sizeof(art::mirror::Object))
ASM_DEFINE(MIRROR_OBJECT_LOCK_WORD_OFFSET,
art::mirror::Object::MonitorOffset().Int32Value())
+ASM_DEFINE(GRAY_BYTE_OFFSET,
+ art::mirror::Object::MonitorOffset().Int32Value() +
+ art::LockWord::kReadBarrierStateShift / art::kBitsPerByte)
+ASM_DEFINE(GRAY_BIT_POSITION,
+ art::LockWord::kReadBarrierStateShift % art::kBitsPerByte)
+ASM_DEFINE(READ_BARRIER_TEST_VALUE,
+ static_cast<int8_t>(1 << (art::LockWord::kReadBarrierStateShift % art::kBitsPerByte)))
diff --git a/tools/cpp-define-generator/thread.def b/tools/cpp-define-generator/thread.def
index 8c91dc8..72cd2a9 100644
--- a/tools/cpp-define-generator/thread.def
+++ b/tools/cpp-define-generator/thread.def
@@ -15,6 +15,7 @@
*/
#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "thread.h"
#endif
@@ -36,6 +37,8 @@
art::Thread::InterpreterCacheOffset<art::kRuntimePointerSize>().Int32Value())
ASM_DEFINE(THREAD_INTERPRETER_CACHE_SIZE_LOG2,
art::Thread::InterpreterCacheSizeLog2())
+ASM_DEFINE(THREAD_INTERPRETER_CACHE_SIZE_MASK,
+ (sizeof(art::InterpreterCache::Entry) * (art::InterpreterCache::kSize - 1)))
ASM_DEFINE(THREAD_IS_GC_MARKING_OFFSET,
art::Thread::IsGcMarkingOffset<art::kRuntimePointerSize>().Int32Value())
ASM_DEFINE(THREAD_LOCAL_ALLOC_STACK_END_OFFSET,
@@ -60,3 +63,11 @@
art::Thread::UseMterpOffset<art::kRuntimePointerSize>().Int32Value())
ASM_DEFINE(THREAD_TOP_QUICK_FRAME_OFFSET,
art::Thread::TopOfManagedStackOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_ALLOC_OBJECT_ENTRYPOINT_OFFSET,
+ art::GetThreadOffset<art::kRuntimePointerSize>(art::kQuickAllocObjectInitialized)
+ .Int32Value())
+ASM_DEFINE(THREAD_ALLOC_ARRAY_ENTRYPOINT_OFFSET,
+ art::GetThreadOffset<art::kRuntimePointerSize>(art::kQuickAllocArrayResolved)
+ .Int32Value())
+ASM_DEFINE(THREAD_READ_BARRIER_MARK_REG00_OFFSET,
+ art::Thread::ReadBarrierMarkEntryPointsOffset<art::kRuntimePointerSize>(0))