ART: Convert pointer size to enum
Move away from size_t to dedicated enum (class).
Bug: 30373134
Bug: 30419309
Test: m test-art-host
Change-Id: Id453c330f1065012e7d4f9fc24ac477cc9bb9269
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index bf29e1c..06a39b2 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -19,6 +19,7 @@
#include "arch/instruction_set_features.h"
#include "art_field-inl.h"
#include "art_method.h"
+#include "base/enums.h"
#include "class_linker.h"
#include "compiled_method.h"
#include "dex/quick_compiler_callbacks.h"
@@ -115,7 +116,7 @@
Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader));
mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader);
CHECK(klass != nullptr) << "Class not found " << class_name;
- size_t pointer_size = class_linker_->GetImagePointerSize();
+ PointerSize pointer_size = class_linker_->GetImagePointerSize();
for (auto& m : klass->GetMethods(pointer_size)) {
MakeExecutable(&m);
}
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 951b075..8d53dbf 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -843,13 +843,14 @@
}
}
-uint32_t DexFileMethodInliner::GetOffsetForStringInit(uint32_t method_index, size_t pointer_size) {
+uint32_t DexFileMethodInliner::GetOffsetForStringInit(uint32_t method_index,
+ PointerSize pointer_size) {
ReaderMutexLock mu(Thread::Current(), lock_);
auto it = inline_methods_.find(method_index);
if (it != inline_methods_.end() && (it->second.opcode == kInlineStringInit)) {
uint32_t string_init_base_offset = Thread::QuickEntryPointOffsetWithSize(
OFFSETOF_MEMBER(QuickEntryPoints, pNewEmptyString), pointer_size);
- return string_init_base_offset + it->second.d.data * pointer_size;
+ return string_init_base_offset + it->second.d.data * static_cast<size_t>(pointer_size);
}
return 0;
}
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index 50dc032..dbdfa24 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -18,6 +18,8 @@
#define ART_COMPILER_DEX_QUICK_DEX_FILE_METHOD_INLINER_H_
#include <stdint.h>
+
+#include "base/enums.h"
#include "base/mutex.h"
#include "base/macros.h"
#include "safe_map.h"
@@ -82,7 +84,7 @@
/**
* Gets the thread pointer entrypoint offset for a string init method index and pointer size.
*/
- uint32_t GetOffsetForStringInit(uint32_t method_index, size_t pointer_size)
+ uint32_t GetOffsetForStringInit(uint32_t method_index, PointerSize pointer_size)
REQUIRES(!lock_);
/**
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index bace014..4bcd59a 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -21,6 +21,7 @@
#include <vector>
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "dex_file.h"
@@ -169,7 +170,7 @@
continue;
}
auto* cl = Runtime::Current()->GetClassLinker();
- size_t pointer_size = cl->GetImagePointerSize();
+ PointerSize pointer_size = cl->GetImagePointerSize();
ArtMethod* abstract_method = method_verifier->GetDexCache()->GetResolvedMethod(
is_range ? inst->VRegB_3rc() : inst->VRegB_35c(), pointer_size);
if (abstract_method == nullptr) {
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 94f5acc..3a260f5 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -21,6 +21,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "class_linker-inl.h"
#include "dex_compilation_unit.h"
#include "mirror/class_loader.h"
@@ -336,7 +337,7 @@
methods_declaring_class->IsFinal());
// For invoke-super, ensure the vtable index will be correct to dispatch in the vtable of
// the super class.
- const size_t pointer_size = InstructionSetPointerSize(GetInstructionSet());
+ const PointerSize pointer_size = InstructionSetPointerSize(GetInstructionSet());
// TODO We should be able to sharpen if we are going into the boot image as well.
bool can_sharpen_super_based_on_type = same_dex_file &&
(*invoke_type == kSuper) &&
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 4c0095d..8286033 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -27,6 +27,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/bit_vector.h"
+#include "base/enums.h"
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/time_utils.h"
@@ -434,10 +435,10 @@
#define CREATE_TRAMPOLINE(type, abi, offset) \
if (Is64BitInstructionSet(instruction_set_)) { \
return CreateTrampoline64(instruction_set_, abi, \
- type ## _ENTRYPOINT_OFFSET(8, offset)); \
+ type ## _ENTRYPOINT_OFFSET(PointerSize::k64, offset)); \
} else { \
return CreateTrampoline32(instruction_set_, abi, \
- type ## _ENTRYPOINT_OFFSET(4, offset)); \
+ type ## _ENTRYPOINT_OFFSET(PointerSize::k32, offset)); \
}
std::unique_ptr<const std::vector<uint8_t>> CompilerDriver::CreateJniDlsymLookup() const {
@@ -1015,7 +1016,7 @@
}
private:
- void ResolveExceptionsForMethod(ArtMethod* method_handle, size_t pointer_size)
+ void ResolveExceptionsForMethod(ArtMethod* method_handle, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = method_handle->GetCodeItem();
if (code_item == nullptr) {
@@ -1147,7 +1148,7 @@
// Make a copy of the handle so that we don't clobber it doing Assign.
MutableHandle<mirror::Class> klass(hs.NewHandle(c.Get()));
std::string temp;
- const size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ const PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
while (!klass->IsObjectClass()) {
const char* descriptor = klass->GetDescriptor(&temp);
std::pair<std::unordered_set<std::string>::iterator, bool> result =
@@ -2885,7 +2886,7 @@
bool CompilerDriver::IsStringInit(uint32_t method_index, const DexFile* dex_file, int32_t* offset) {
DexFileMethodInliner* inliner = GetMethodInlinerMap()->GetMethodInliner(dex_file);
- size_t pointer_size = InstructionSetPointerSize(GetInstructionSet());
+ PointerSize pointer_size = InstructionSetPointerSize(GetInstructionSet());
*offset = inliner->GetOffsetForStringInit(method_index, pointer_size);
return inliner->IsStringInitMethodIndex(method_index);
}
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index 38ac052..e223534 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -17,6 +17,7 @@
#include <memory>
#include "base/arena_allocator.h"
+#include "base/enums.h"
#include "class_linker.h"
#include "common_runtime_test.h"
#include "dex_file.h"
@@ -100,11 +101,11 @@
CHECK_ALIGNED(stack_maps_offset, 2);
}
- method_f_ = my_klass_->FindVirtualMethod("f", "()I", sizeof(void*));
+ method_f_ = my_klass_->FindVirtualMethod("f", "()I", kRuntimePointerSize);
ASSERT_TRUE(method_f_ != nullptr);
method_f_->SetEntryPointFromQuickCompiledCode(code_ptr);
- method_g_ = my_klass_->FindVirtualMethod("g", "(I)V", sizeof(void*));
+ method_g_ = my_klass_->FindVirtualMethod("g", "(I)V", kRuntimePointerSize);
ASSERT_TRUE(method_g_ != nullptr);
method_g_->SetEntryPointFromQuickCompiledCode(code_ptr);
}
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 063eb11..7a34683 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -1416,7 +1416,7 @@
}
case kBinImTable:
case kBinIMTConflictTable: {
- bin_offset = RoundUp(bin_offset, target_ptr_size_);
+ bin_offset = RoundUp(bin_offset, static_cast<size_t>(target_ptr_size_));
break;
}
default: {
@@ -1573,7 +1573,7 @@
boot_image_end - boot_image_begin,
boot_oat_begin,
boot_oat_end - boot_oat_begin,
- target_ptr_size_,
+ static_cast<uint32_t>(target_ptr_size_),
compile_pic_,
/*is_pic*/compile_app_image_,
image_storage_mode_,
@@ -2029,7 +2029,7 @@
if (orig_strings != nullptr) {
copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::StringsOffset(),
NativeLocationInImage(orig_strings),
- /*pointer size*/8u);
+ PointerSize::k64);
orig_dex_cache->FixupStrings(NativeCopyLocation(orig_strings, orig_dex_cache),
ImageAddressVisitor(this));
}
@@ -2037,7 +2037,7 @@
if (orig_types != nullptr) {
copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::ResolvedTypesOffset(),
NativeLocationInImage(orig_types),
- /*pointer size*/8u);
+ PointerSize::k64);
orig_dex_cache->FixupResolvedTypes(NativeCopyLocation(orig_types, orig_dex_cache),
ImageAddressVisitor(this));
}
@@ -2045,7 +2045,7 @@
if (orig_methods != nullptr) {
copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::ResolvedMethodsOffset(),
NativeLocationInImage(orig_methods),
- /*pointer size*/8u);
+ PointerSize::k64);
ArtMethod** copy_methods = NativeCopyLocation(orig_methods, orig_dex_cache);
for (size_t i = 0, num = orig_dex_cache->NumResolvedMethods(); i != num; ++i) {
ArtMethod* orig = mirror::DexCache::GetElementPtrSize(orig_methods, i, target_ptr_size_);
@@ -2058,7 +2058,7 @@
if (orig_fields != nullptr) {
copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::ResolvedFieldsOffset(),
NativeLocationInImage(orig_fields),
- /*pointer size*/8u);
+ PointerSize::k64);
ArtField** copy_fields = NativeCopyLocation(orig_fields, orig_dex_cache);
for (size_t i = 0, num = orig_dex_cache->NumResolvedFields(); i != num; ++i) {
ArtField* orig = mirror::DexCache::GetElementPtrSize(orig_fields, i, target_ptr_size_);
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 1efdc22..626a975 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -28,6 +28,7 @@
#include "base/bit_utils.h"
#include "base/dchecked_vector.h"
+#include "base/enums.h"
#include "base/length_prefixed_array.h"
#include "base/macros.h"
#include "driver/compiler_driver.h"
@@ -524,7 +525,7 @@
const bool compile_app_image_;
// Size of pointers on the target architecture.
- size_t target_ptr_size_;
+ PointerSize target_ptr_size_;
// Image data indexed by the oat file index.
dchecked_vector<ImageInfo> image_infos_;
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
index 29411f0..0d16260 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.cc
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -22,6 +22,8 @@
namespace art {
namespace arm {
+static_assert(kArmPointerSize == PointerSize::k32, "Unexpected ARM pointer size");
+
// Used by hard float.
static const Register kHFCoreArgumentRegisters[] = {
R0, R1, R2, R3
@@ -255,7 +257,7 @@
ArmJniCallingConvention::ArmJniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ : JniCallingConvention(is_static, is_synchronized, shorty, kArmPointerSize) {
// Compute padding to ensure longs and doubles are not split in AAPCS. Ignore the 'this' jobject
// or jclass for static methods and the JNIEnv. We start at the aligned register r2.
size_t padding = 0;
@@ -287,9 +289,10 @@
size_t ArmJniCallingConvention::FrameSize() {
// Method*, LR and callee save area size, local reference segment state
- size_t frame_data_size = kArmPointerSize + (2 + CalleeSaveRegisters().size()) * kFramePointerSize;
+ size_t frame_data_size = static_cast<size_t>(kArmPointerSize)
+ + (2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus 2 words for HandleScope header
- size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kArmPointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
@@ -343,7 +346,8 @@
FrameOffset ArmJniCallingConvention::CurrentParamStackOffset() {
CHECK_GE(itr_slots_, 4u);
- size_t offset = displacement_.Int32Value() - OutArgSize() + ((itr_slots_ - 4) * kFramePointerSize);
+ size_t offset =
+ displacement_.Int32Value() - OutArgSize() + ((itr_slots_ - 4) * kFramePointerSize);
CHECK_LT(offset, OutArgSize());
return FrameOffset(offset);
}
diff --git a/compiler/jni/quick/arm/calling_convention_arm.h b/compiler/jni/quick/arm/calling_convention_arm.h
index 157880b..7c717cc 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.h
+++ b/compiler/jni/quick/arm/calling_convention_arm.h
@@ -17,17 +17,21 @@
#ifndef ART_COMPILER_JNI_QUICK_ARM_CALLING_CONVENTION_ARM_H_
#define ART_COMPILER_JNI_QUICK_ARM_CALLING_CONVENTION_ARM_H_
+#include "base/enums.h"
#include "jni/quick/calling_convention.h"
namespace art {
namespace arm {
-constexpr size_t kFramePointerSize = 4;
+constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k32);
class ArmManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
public:
ArmManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+ : ManagedRuntimeCallingConvention(is_static,
+ is_synchronized,
+ shorty,
+ PointerSize::k32) {}
~ArmManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
ManagedRegister ReturnRegister() OVERRIDE;
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index ab56c1c..afa707d 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -22,6 +22,8 @@
namespace art {
namespace arm64 {
+static_assert(kArm64PointerSize == PointerSize::k64, "Unexpected ARM64 pointer size");
+
static const XRegister kXArgumentRegisters[] = {
X0, X1, X2, X3, X4, X5, X6, X7
};
@@ -211,7 +213,7 @@
// JNI calling convention
Arm64JniCallingConvention::Arm64JniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ : JniCallingConvention(is_static, is_synchronized, shorty, kArm64PointerSize) {
}
uint32_t Arm64JniCallingConvention::CoreSpillMask() const {
@@ -231,7 +233,7 @@
size_t frame_data_size = kFramePointerSize +
CalleeSaveRegisters().size() * kFramePointerSize + sizeof(uint32_t);
// References plus 2 words for HandleScope header
- size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kArm64PointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.h b/compiler/jni/quick/arm64/calling_convention_arm64.h
index 337e881..90b12e5 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.h
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.h
@@ -17,17 +17,21 @@
#ifndef ART_COMPILER_JNI_QUICK_ARM64_CALLING_CONVENTION_ARM64_H_
#define ART_COMPILER_JNI_QUICK_ARM64_CALLING_CONVENTION_ARM64_H_
+#include "base/enums.h"
#include "jni/quick/calling_convention.h"
namespace art {
namespace arm64 {
-constexpr size_t kFramePointerSize = 8;
+constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k64);
class Arm64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
public:
Arm64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+ : ManagedRuntimeCallingConvention(is_static,
+ is_synchronized,
+ shorty,
+ PointerSize::k64) {}
~Arm64ManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
ManagedRegister ReturnRegister() OVERRIDE;
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index e21f554..c7ed9c9 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -299,7 +299,7 @@
size_t JniCallingConvention::CurrentParamSize() {
if (itr_args_ <= kObjectOrClass) {
- return frame_pointer_size_; // JNIEnv or jobject/jclass
+ return static_cast<size_t>(frame_pointer_size_); // JNIEnv or jobject/jclass
} else {
int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
return ParamSize(arg_pos);
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index e8f738d..995fa51 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_JNI_QUICK_CALLING_CONVENTION_H_
#include "base/arena_object.h"
+#include "base/enums.h"
#include "handle_scope.h"
#include "primitive.h"
#include "thread.h"
@@ -70,8 +71,10 @@
virtual ~CallingConvention() {}
protected:
- CallingConvention(bool is_static, bool is_synchronized, const char* shorty,
- size_t frame_pointer_size)
+ CallingConvention(bool is_static,
+ bool is_synchronized,
+ const char* shorty,
+ PointerSize frame_pointer_size)
: itr_slots_(0), itr_refs_(0), itr_args_(0), itr_longs_and_doubles_(0),
itr_float_and_doubles_(0), displacement_(0),
frame_pointer_size_(frame_pointer_size),
@@ -198,7 +201,7 @@
// Space for frames below this on the stack.
FrameOffset displacement_;
// The size of a pointer.
- const size_t frame_pointer_size_;
+ const PointerSize frame_pointer_size_;
// The size of a reference entry within the handle scope.
const size_t handle_scope_pointer_size_;
@@ -255,7 +258,7 @@
ManagedRuntimeCallingConvention(bool is_static,
bool is_synchronized,
const char* shorty,
- size_t frame_pointer_size)
+ PointerSize frame_pointer_size)
: CallingConvention(is_static, is_synchronized, shorty, frame_pointer_size) {}
};
@@ -328,7 +331,7 @@
// Position of handle scope and interior fields
FrameOffset HandleScopeOffset() const {
- return FrameOffset(this->displacement_.Int32Value() + frame_pointer_size_);
+ return FrameOffset(this->displacement_.Int32Value() + static_cast<size_t>(frame_pointer_size_));
// above Method reference
}
@@ -356,8 +359,10 @@
kObjectOrClass = 1
};
- JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty,
- size_t frame_pointer_size)
+ JniCallingConvention(bool is_static,
+ bool is_synchronized,
+ const char* shorty,
+ PointerSize frame_pointer_size)
: CallingConvention(is_static, is_synchronized, shorty, frame_pointer_size) {}
// Number of stack slots for outgoing arguments, above which the handle scope is
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 4311a34..277b794 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -23,6 +23,7 @@
#include "art_method.h"
#include "base/arena_allocator.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/macros.h"
#include "calling_convention.h"
@@ -125,16 +126,16 @@
if (is_64_bit_target) {
__ CopyRawPtrFromThread64(main_jni_conv->HandleScopeLinkOffset(),
- Thread::TopHandleScopeOffset<8>(),
+ Thread::TopHandleScopeOffset<PointerSize::k64>(),
mr_conv->InterproceduralScratchRegister());
- __ StoreStackOffsetToThread64(Thread::TopHandleScopeOffset<8>(),
+ __ StoreStackOffsetToThread64(Thread::TopHandleScopeOffset<PointerSize::k64>(),
main_jni_conv->HandleScopeOffset(),
mr_conv->InterproceduralScratchRegister());
} else {
__ CopyRawPtrFromThread32(main_jni_conv->HandleScopeLinkOffset(),
- Thread::TopHandleScopeOffset<4>(),
+ Thread::TopHandleScopeOffset<PointerSize::k32>(),
mr_conv->InterproceduralScratchRegister());
- __ StoreStackOffsetToThread32(Thread::TopHandleScopeOffset<4>(),
+ __ StoreStackOffsetToThread32(Thread::TopHandleScopeOffset<PointerSize::k32>(),
main_jni_conv->HandleScopeOffset(),
mr_conv->InterproceduralScratchRegister());
}
@@ -188,9 +189,9 @@
// 4. Write out the end of the quick frames.
if (is_64_bit_target) {
- __ StoreStackPointerToThread64(Thread::TopOfManagedStackOffset<8>());
+ __ StoreStackPointerToThread64(Thread::TopOfManagedStackOffset<PointerSize::k64>());
} else {
- __ StoreStackPointerToThread32(Thread::TopOfManagedStackOffset<4>());
+ __ StoreStackPointerToThread32(Thread::TopOfManagedStackOffset<PointerSize::k32>());
}
// 5. Move frame down to allow space for out going args.
@@ -201,8 +202,10 @@
// Call the read barrier for the declaring class loaded from the method for a static call.
// Note that we always have outgoing param space available for at least two params.
if (kUseReadBarrier && is_static) {
- ThreadOffset<4> read_barrier32 = QUICK_ENTRYPOINT_OFFSET(4, pReadBarrierJni);
- ThreadOffset<8> read_barrier64 = QUICK_ENTRYPOINT_OFFSET(8, pReadBarrierJni);
+ ThreadOffset32 read_barrier32 =
+ QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pReadBarrierJni);
+ ThreadOffset64 read_barrier64 =
+ QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pReadBarrierJni);
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
main_jni_conv->Next(); // Skip JNIEnv.
FrameOffset class_handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
@@ -245,10 +248,14 @@
// can occur. The result is the saved JNI local state that is restored by the exit call. We
// abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer
// arguments.
- ThreadOffset<4> jni_start32 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(4, pJniMethodStartSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(4, pJniMethodStart);
- ThreadOffset<8> jni_start64 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(8, pJniMethodStartSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(8, pJniMethodStart);
+ ThreadOffset32 jni_start32 =
+ is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodStartSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodStart);
+ ThreadOffset64 jni_start64 =
+ is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodStartSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodStart);
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
FrameOffset locked_object_handle_scope_offset(0);
if (is_synchronized) {
@@ -346,17 +353,17 @@
ManagedRegister jni_env = main_jni_conv->CurrentParamRegister();
DCHECK(!jni_env.Equals(main_jni_conv->InterproceduralScratchRegister()));
if (is_64_bit_target) {
- __ LoadRawPtrFromThread64(jni_env, Thread::JniEnvOffset<8>());
+ __ LoadRawPtrFromThread64(jni_env, Thread::JniEnvOffset<PointerSize::k64>());
} else {
- __ LoadRawPtrFromThread32(jni_env, Thread::JniEnvOffset<4>());
+ __ LoadRawPtrFromThread32(jni_env, Thread::JniEnvOffset<PointerSize::k32>());
}
} else {
FrameOffset jni_env = main_jni_conv->CurrentParamStackOffset();
if (is_64_bit_target) {
- __ CopyRawPtrFromThread64(jni_env, Thread::JniEnvOffset<8>(),
+ __ CopyRawPtrFromThread64(jni_env, Thread::JniEnvOffset<PointerSize::k64>(),
main_jni_conv->InterproceduralScratchRegister());
} else {
- __ CopyRawPtrFromThread32(jni_env, Thread::JniEnvOffset<4>(),
+ __ CopyRawPtrFromThread32(jni_env, Thread::JniEnvOffset<PointerSize::k32>(),
main_jni_conv->InterproceduralScratchRegister());
}
}
@@ -387,7 +394,8 @@
main_jni_conv->GetReturnType() == Primitive::kPrimDouble &&
return_save_location.Uint32Value() % 8 != 0) {
// Ensure doubles are 8-byte aligned for MIPS
- return_save_location = FrameOffset(return_save_location.Uint32Value() + kMipsPointerSize);
+ return_save_location = FrameOffset(return_save_location.Uint32Value()
+ + static_cast<size_t>(kMipsPointerSize));
}
CHECK_LT(return_save_location.Uint32Value(), frame_size + main_out_arg_size);
__ Store(return_save_location, main_jni_conv->ReturnRegister(), main_jni_conv->SizeOfReturnValue());
@@ -406,21 +414,27 @@
}
// thread.
end_jni_conv->ResetIterator(FrameOffset(end_out_arg_size));
- ThreadOffset<4> jni_end32(-1);
- ThreadOffset<8> jni_end64(-1);
+ ThreadOffset32 jni_end32(-1);
+ ThreadOffset64 jni_end64(-1);
if (reference_return) {
// Pass result.
- jni_end32 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEndWithReferenceSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEndWithReference);
- jni_end64 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEndWithReferenceSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEndWithReference);
+ jni_end32 = is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k32,
+ pJniMethodEndWithReferenceSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodEndWithReference);
+ jni_end64 = is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k64,
+ pJniMethodEndWithReferenceSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodEndWithReference);
SetNativeParameter(jni_asm.get(), end_jni_conv.get(), end_jni_conv->ReturnRegister());
end_jni_conv->Next();
} else {
- jni_end32 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEndSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEnd);
- jni_end64 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEndSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEnd);
+ jni_end32 = is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodEndSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodEnd);
+ jni_end64 = is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodEndSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodEnd);
}
// Pass saved local reference state.
if (end_jni_conv->IsCurrentParamOnStack()) {
@@ -458,9 +472,11 @@
__ GetCurrentThread(end_jni_conv->CurrentParamStackOffset(),
end_jni_conv->InterproceduralScratchRegister());
if (is_64_bit_target) {
- __ CallFromThread64(ThreadOffset<8>(jni_end64), end_jni_conv->InterproceduralScratchRegister());
+ __ CallFromThread64(ThreadOffset64(jni_end64),
+ end_jni_conv->InterproceduralScratchRegister());
} else {
- __ CallFromThread32(ThreadOffset<4>(jni_end32), end_jni_conv->InterproceduralScratchRegister());
+ __ CallFromThread32(ThreadOffset32(jni_end32),
+ end_jni_conv->InterproceduralScratchRegister());
}
}
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
index 3d4d140..f5ab5f7 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.cc
+++ b/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -172,7 +172,7 @@
MipsJniCallingConvention::MipsJniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ : JniCallingConvention(is_static, is_synchronized, shorty, kMipsPointerSize) {
// Compute padding to ensure longs and doubles are not split in AAPCS. Ignore the 'this' jobject
// or jclass for static methods and the JNIEnv. We start at the aligned register A2.
size_t padding = 0;
@@ -203,10 +203,10 @@
size_t MipsJniCallingConvention::FrameSize() {
// ArtMethod*, RA and callee save area size, local reference segment state
- size_t frame_data_size = kMipsPointerSize +
+ size_t frame_data_size = static_cast<size_t>(kMipsPointerSize) +
(2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus 2 words for HandleScope header
- size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kMipsPointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/jni/quick/mips/calling_convention_mips.h b/compiler/jni/quick/mips/calling_convention_mips.h
index 5c128b0..e95a738 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.h
+++ b/compiler/jni/quick/mips/calling_convention_mips.h
@@ -17,17 +17,23 @@
#ifndef ART_COMPILER_JNI_QUICK_MIPS_CALLING_CONVENTION_MIPS_H_
#define ART_COMPILER_JNI_QUICK_MIPS_CALLING_CONVENTION_MIPS_H_
+#include "base/enums.h"
#include "jni/quick/calling_convention.h"
namespace art {
namespace mips {
constexpr size_t kFramePointerSize = 4;
+static_assert(kFramePointerSize == static_cast<size_t>(PointerSize::k32),
+ "Invalid frame pointer size");
class MipsManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
public:
MipsManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+ : ManagedRuntimeCallingConvention(is_static,
+ is_synchronized,
+ shorty,
+ PointerSize::k32) {}
~MipsManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
ManagedRegister ReturnRegister() OVERRIDE;
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.cc b/compiler/jni/quick/mips64/calling_convention_mips64.cc
index f2e1da8..8341e8e 100644
--- a/compiler/jni/quick/mips64/calling_convention_mips64.cc
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.cc
@@ -152,7 +152,7 @@
Mips64JniCallingConvention::Mips64JniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ : JniCallingConvention(is_static, is_synchronized, shorty, kMips64PointerSize) {
}
uint32_t Mips64JniCallingConvention::CoreSpillMask() const {
@@ -172,7 +172,7 @@
size_t frame_data_size = kFramePointerSize +
(CalleeSaveRegisters().size() + 1) * kFramePointerSize + sizeof(uint32_t);
// References plus 2 words for HandleScope header
- size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kMips64PointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.h b/compiler/jni/quick/mips64/calling_convention_mips64.h
index 99ea3cd..a5fd111 100644
--- a/compiler/jni/quick/mips64/calling_convention_mips64.h
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.h
@@ -17,17 +17,23 @@
#ifndef ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_
#define ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_
+#include "base/enums.h"
#include "jni/quick/calling_convention.h"
namespace art {
namespace mips64 {
constexpr size_t kFramePointerSize = 8;
+static_assert(kFramePointerSize == static_cast<size_t>(PointerSize::k64),
+ "Invalid frame pointer size");
class Mips64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
public:
Mips64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+ : ManagedRuntimeCallingConvention(is_static,
+ is_synchronized,
+ shorty,
+ PointerSize::k64) {}
~Mips64ManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
ManagedRegister ReturnRegister() OVERRIDE;
diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc
index 22c7cd0..1d06f26 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.cc
+++ b/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -23,6 +23,8 @@
namespace art {
namespace x86 {
+static_assert(kX86PointerSize == PointerSize::k32, "Unexpected x86 pointer size");
+
static constexpr ManagedRegister kCalleeSaveRegisters[] = {
// Core registers.
X86ManagedRegister::FromCpuRegister(EBP),
@@ -190,7 +192,7 @@
X86JniCallingConvention::X86JniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ : JniCallingConvention(is_static, is_synchronized, shorty, kX86PointerSize) {
}
uint32_t X86JniCallingConvention::CoreSpillMask() const {
@@ -203,10 +205,10 @@
size_t X86JniCallingConvention::FrameSize() {
// Method*, return address and callee save area size, local reference segment state
- size_t frame_data_size = kX86PointerSize +
+ size_t frame_data_size = static_cast<size_t>(kX86PointerSize) +
(2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus 2 words for HandleScope header
- size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kX86PointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/jni/quick/x86/calling_convention_x86.h b/compiler/jni/quick/x86/calling_convention_x86.h
index 9d678b7..ff92fc9 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.h
+++ b/compiler/jni/quick/x86/calling_convention_x86.h
@@ -17,17 +17,21 @@
#ifndef ART_COMPILER_JNI_QUICK_X86_CALLING_CONVENTION_X86_H_
#define ART_COMPILER_JNI_QUICK_X86_CALLING_CONVENTION_X86_H_
+#include "base/enums.h"
#include "jni/quick/calling_convention.h"
namespace art {
namespace x86 {
-constexpr size_t kFramePointerSize = 4;
+constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k32);
class X86ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
public:
X86ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize),
+ : ManagedRuntimeCallingConvention(is_static,
+ is_synchronized,
+ shorty,
+ PointerSize::k32),
gpr_arg_count_(0) {}
~X86ManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index cc4d232..cbf10bd 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -24,6 +24,10 @@
namespace art {
namespace x86_64 {
+constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k64);
+
+static_assert(kX86_64PointerSize == PointerSize::k64, "Unexpected x86_64 pointer size");
+
static constexpr ManagedRegister kCalleeSaveRegisters[] = {
// Core registers.
X86_64ManagedRegister::FromCpuRegister(RBX),
@@ -136,7 +140,7 @@
FrameOffset X86_64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
return FrameOffset(displacement_.Int32Value() + // displacement
- kX86_64PointerSize + // Method ref
+ static_cast<size_t>(kX86_64PointerSize) + // Method ref
itr_slots_ * sizeof(uint32_t)); // offset into in args
}
@@ -163,7 +167,7 @@
X86_64JniCallingConvention::X86_64JniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ : JniCallingConvention(is_static, is_synchronized, shorty, kX86_64PointerSize) {
}
uint32_t X86_64JniCallingConvention::CoreSpillMask() const {
@@ -176,10 +180,10 @@
size_t X86_64JniCallingConvention::FrameSize() {
// Method*, return address and callee save area size, local reference segment state
- size_t frame_data_size = kX86_64PointerSize +
+ size_t frame_data_size = static_cast<size_t>(kX86_64PointerSize) +
(2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus link_ (pointer) and number_of_references_ (uint32_t) for HandleScope header
- size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kX86_64PointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.h b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
index e2d3d48..b98f505 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.h
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
@@ -17,17 +17,19 @@
#ifndef ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_
#define ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_
+#include "base/enums.h"
#include "jni/quick/calling_convention.h"
namespace art {
namespace x86_64 {
-constexpr size_t kFramePointerSize = 8;
-
class X86_64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
public:
X86_64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+ : ManagedRuntimeCallingConvention(is_static,
+ is_synchronized,
+ shorty,
+ PointerSize::k64) {}
~X86_64ManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
ManagedRegister ReturnRegister() OVERRIDE;
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 0762eec..ce044e8 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -16,6 +16,7 @@
#include "arch/instruction_set_features.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
#include "common_compiler_test.h"
@@ -444,7 +445,8 @@
EXPECT_EQ(72U, sizeof(OatHeader));
EXPECT_EQ(4U, sizeof(OatMethodOffsets));
EXPECT_EQ(20U, sizeof(OatQuickMethodHeader));
- EXPECT_EQ(162 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
+ EXPECT_EQ(162 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
+ sizeof(QuickEntryPoints));
}
TEST_F(OatTest, OatHeaderIsValid) {
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index b32199f..f20c715 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -23,6 +23,7 @@
#include "art_method-inl.h"
#include "base/allocator.h"
#include "base/bit_vector.h"
+#include "base/enums.h"
#include "base/file_magic.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
@@ -506,7 +507,7 @@
if (!HasBootImage()) {
// Allocate space for app dex cache arrays in the .bss section.
size_t bss_start = RoundUp(size_, kPageSize);
- size_t pointer_size = GetInstructionSetPointerSize(instruction_set);
+ PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set);
bss_size_ = 0u;
for (const DexFile* dex_file : *dex_files_) {
dex_cache_arrays_offsets_.Put(dex_file, bss_start + bss_size_);
@@ -941,7 +942,7 @@
}
protected:
- const size_t pointer_size_;
+ const PointerSize pointer_size_;
};
class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
@@ -1149,7 +1150,8 @@
if (UNLIKELY(target_offset == 0)) {
ArtMethod* target = GetTargetMethod(patch);
DCHECK(target != nullptr);
- size_t size = GetInstructionSetPointerSize(writer_->compiler_driver_->GetInstructionSet());
+ PointerSize size =
+ GetInstructionSetPointerSize(writer_->compiler_driver_->GetInstructionSet());
const void* oat_code_offset = target->GetEntryPointFromQuickCompiledCodePtrSize(size);
if (oat_code_offset != 0) {
DCHECK(!writer_->HasBootImage());
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 3269dc6..4a4b98c 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -137,7 +137,7 @@
size_t CodeGenerator::GetCachePointerOffset(uint32_t index) {
auto pointer_size = InstructionSetPointerSize(GetInstructionSet());
- return pointer_size * index;
+ return static_cast<size_t>(pointer_size) * index;
}
uint32_t CodeGenerator::GetArrayLengthOffset(HArrayLength* array_length) {
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 62dd1cc..ad02ecf 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -22,6 +22,7 @@
#include "base/arena_containers.h"
#include "base/arena_object.h"
#include "base/bit_field.h"
+#include "base/enums.h"
#include "compiled_method.h"
#include "driver/compiler_options.h"
#include "globals.h"
@@ -191,7 +192,7 @@
size_t GetStackSlotOfParameter(HParameterValue* parameter) const {
// Note that this follows the current calling convention.
return GetFrameSize()
- + InstructionSetPointerSize(GetInstructionSet()) // Art method
+ + static_cast<size_t>(InstructionSetPointerSize(GetInstructionSet())) // Art method
+ parameter->GetIndex() * kVRegSize;
}
@@ -357,14 +358,14 @@
static uint32_t GetArrayDataOffset(HArrayGet* array_get);
// Return the entry point offset for ReadBarrierMarkRegX, where X is `reg`.
- template <size_t pointer_size>
+ template <PointerSize pointer_size>
static int32_t GetReadBarrierMarkEntryPointsOffset(size_t reg) {
// The entry point list defines 30 ReadBarrierMarkRegX entry points.
DCHECK_LT(reg, 30u);
// The ReadBarrierMarkRegX entry points are ordered by increasing
// register number in Thread::tls_Ptr_.quick_entrypoints.
return QUICK_ENTRYPOINT_OFFSET(pointer_size, pReadBarrierMarkReg00).Int32Value()
- + pointer_size * reg;
+ + static_cast<size_t>(pointer_size) * reg;
}
void EmitParallelMoves(Location from1,
@@ -700,7 +701,7 @@
size_t number_of_registers,
const F* fpu_registers,
size_t number_of_fpu_registers,
- size_t pointer_size)
+ PointerSize pointer_size)
: registers_(registers),
number_of_registers_(number_of_registers),
fpu_registers_(fpu_registers),
@@ -723,7 +724,7 @@
size_t GetStackOffsetOf(size_t index) const {
// We still reserve the space for parameters passed by registers.
// Add space for the method pointer.
- return pointer_size_ + index * kVRegSize;
+ return static_cast<size_t>(pointer_size_) + index * kVRegSize;
}
private:
@@ -731,7 +732,7 @@
const size_t number_of_registers_;
const F* fpu_registers_;
const size_t number_of_fpu_registers_;
- const size_t pointer_size_;
+ const PointerSize pointer_size_;
DISALLOW_COPY_AND_ASSIGN(CallingConvention);
};
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 124a61f..c18b793 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -61,7 +61,7 @@
// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())-> // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value()
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, x).Int32Value()
class NullCheckSlowPathARM : public SlowPathCode {
public:
@@ -459,7 +459,7 @@
// rX <- ReadBarrierMarkRegX(rX)
//
int32_t entry_point_offset =
- CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArmWordSize>(reg);
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArmPointerSize>(reg);
// This runtime call does not require a stack map.
arm_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
__ b(GetExitLabel());
@@ -966,7 +966,7 @@
if (fpu_spill_mask_ != 0) {
SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
__ vpops(start_register, POPCOUNT(fpu_spill_mask_));
- __ cfi().AdjustCFAOffset(-kArmPointerSize * POPCOUNT(fpu_spill_mask_));
+ __ cfi().AdjustCFAOffset(-static_cast<int>(kArmPointerSize) * POPCOUNT(fpu_spill_mask_));
__ cfi().RestoreMany(DWARFReg(SRegister(0)), fpu_spill_mask_);
}
// Pop LR into PC to return.
@@ -1218,7 +1218,7 @@
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- InvokeRuntime(GetThreadOffset<kArmWordSize>(entrypoint).Int32Value(),
+ InvokeRuntime(GetThreadOffset<kArmPointerSize>(entrypoint).Int32Value(),
instruction,
dex_pc,
slow_path);
@@ -1939,7 +1939,7 @@
// temp = temp->GetImtEntryAt(method_offset);
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
uint32_t entry_point =
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmWordSize).Int32Value();
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value();
// LR = temp->GetEntryPoint();
__ LoadFromOffset(kLoadWord, LR, temp, entry_point);
// LR();
@@ -3530,7 +3530,7 @@
if (instruction->IsStringAlloc()) {
// String is allocated through StringFactory. Call NewEmptyString entry point.
Register temp = instruction->GetLocations()->GetTemp(0).AsRegister<Register>();
- MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmWordSize);
+ MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize);
__ LoadFromOffset(kLoadWord, temp, TR, QUICK_ENTRY_POINT(pNewEmptyString));
__ LoadFromOffset(kLoadWord, LR, temp, code_offset.Int32Value());
__ blx(LR);
@@ -4945,7 +4945,7 @@
if (can_be_null) {
__ CompareAndBranchIfZero(value, &is_null);
}
- __ LoadFromOffset(kLoadWord, card, TR, Thread::CardTableOffset<kArmWordSize>().Int32Value());
+ __ LoadFromOffset(kLoadWord, card, TR, Thread::CardTableOffset<kArmPointerSize>().Int32Value());
__ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
__ strb(card, Address(card, temp));
if (can_be_null) {
@@ -4996,7 +4996,7 @@
}
__ LoadFromOffset(
- kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmWordSize>().Int32Value());
+ kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmPointerSize>().Int32Value());
if (successor == nullptr) {
__ CompareAndBranchIfNonZero(IP, slow_path->GetEntryLabel());
__ Bind(slow_path->GetReturnLabel());
@@ -5577,7 +5577,7 @@
}
static int32_t GetExceptionTlsOffset() {
- return Thread::ExceptionOffset<kArmWordSize>().Int32Value();
+ return Thread::ExceptionOffset<kArmPointerSize>().Int32Value();
}
void LocationsBuilderARM::VisitLoadException(HLoadException* load) {
@@ -6332,7 +6332,7 @@
// IP = Thread::Current()->GetIsGcMarking()
__ LoadFromOffset(
- kLoadWord, IP, TR, Thread::IsGcMarkingOffset<kArmWordSize>().Int32Value());
+ kLoadWord, IP, TR, Thread::IsGcMarkingOffset<kArmPointerSize>().Int32Value());
__ CompareAndBranchIfNonZero(IP, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
} else {
@@ -6691,7 +6691,7 @@
// LR = callee_method->entry_point_from_quick_compiled_code_
__ LoadFromOffset(
kLoadWord, LR, callee_method.AsRegister<Register>(),
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmWordSize).Int32Value());
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value());
// LR()
__ blx(LR);
break;
@@ -6725,7 +6725,7 @@
__ MaybeUnpoisonHeapReference(temp);
// temp = temp->GetMethodAt(method_offset);
uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kArmWordSize).Int32Value();
+ kArmPointerSize).Int32Value();
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// LR = temp->GetEntryPoint();
__ LoadFromOffset(kLoadWord, LR, temp, entry_point);
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index a07a233..f9fcabd 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_H_
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_H_
+#include "base/enums.h"
#include "code_generator.h"
#include "driver/compiler_options.h"
#include "nodes.h"
@@ -31,7 +32,7 @@
class CodeGeneratorARM;
// Use a local definition to prevent copying mistakes.
-static constexpr size_t kArmWordSize = kArmPointerSize;
+static constexpr size_t kArmWordSize = static_cast<size_t>(kArmPointerSize);
static constexpr size_t kArmBitsPerWord = kArmWordSize * kBitsPerByte;
static constexpr Register kParameterCoreRegisters[] = { R1, R2, R3 };
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index efeef7b..115cee6 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -133,7 +133,7 @@
// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()-> // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, x).Int32Value()
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, x).Int32Value()
// Calculate memory accessing operand for save/restore live registers.
static void SaveRestoreLiveRegistersHelper(CodeGenerator* codegen,
@@ -625,7 +625,7 @@
// rX <- ReadBarrierMarkRegX(rX)
//
int32_t entry_point_offset =
- CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArm64WordSize>(obj_.reg());
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArm64PointerSize>(obj_.reg());
// This runtime call does not require a stack map.
arm64_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
__ B(GetExitLabel());
@@ -1105,7 +1105,7 @@
if (value_can_be_null) {
__ Cbz(value, &done);
}
- __ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64WordSize>().Int32Value()));
+ __ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64PointerSize>().Int32Value()));
__ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
__ Strb(card, MemOperand(card, temp.X()));
if (value_can_be_null) {
@@ -1479,7 +1479,7 @@
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- InvokeRuntime(GetThreadOffset<kArm64WordSize>(entrypoint).Int32Value(),
+ InvokeRuntime(GetThreadOffset<kArm64PointerSize>(entrypoint).Int32Value(),
instruction,
dex_pc,
slow_path);
@@ -1562,7 +1562,7 @@
UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
Register temp = temps.AcquireW();
- __ Ldrh(temp, MemOperand(tr, Thread::ThreadFlagsOffset<kArm64WordSize>().SizeValue()));
+ __ Ldrh(temp, MemOperand(tr, Thread::ThreadFlagsOffset<kArm64PointerSize>().SizeValue()));
if (successor == nullptr) {
__ Cbnz(temp, slow_path->GetEntryLabel());
__ Bind(slow_path->GetReturnLabel());
@@ -3526,7 +3526,7 @@
Register temp = XRegisterFrom(locations->GetTemp(0));
Location receiver = locations->InAt(0);
Offset class_offset = mirror::Object::ClassOffset();
- Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize);
// The register ip1 is required to be used for the hidden argument in
// art_quick_imt_conflict_trampoline, so prevent VIXL from using it.
@@ -3678,7 +3678,7 @@
// /* ArtMethod*[] */ temp = temp.ptr_sized_fields_->dex_cache_resolved_methods_;
__ Ldr(reg.X(),
MemOperand(method_reg.X(),
- ArtMethod::DexCacheResolvedMethodsOffset(kArm64WordSize).Int32Value()));
+ ArtMethod::DexCacheResolvedMethodsOffset(kArm64PointerSize).Int32Value()));
// temp = temp[index_in_cache];
// Note: Don't use invoke->GetTargetMethod() as it may point to a different dex file.
uint32_t index_in_cache = invoke->GetDexMethodIndex();
@@ -3710,7 +3710,7 @@
// LR = callee_method->entry_point_from_quick_compiled_code_;
__ Ldr(lr, MemOperand(
XRegisterFrom(callee_method),
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize).Int32Value()));
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize).Int32Value()));
// lr()
__ Blr(lr);
break;
@@ -3730,7 +3730,7 @@
size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
invoke->GetVTableIndex(), kArm64PointerSize).SizeValue();
Offset class_offset = mirror::Object::ClassOffset();
- Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize);
BlockPoolsScope block_pools(GetVIXLAssembler());
@@ -4127,7 +4127,7 @@
}
static MemOperand GetExceptionTlsAddress() {
- return MemOperand(tr, Thread::ExceptionOffset<kArm64WordSize>().Int32Value());
+ return MemOperand(tr, Thread::ExceptionOffset<kArm64PointerSize>().Int32Value());
}
void LocationsBuilderARM64::VisitLoadException(HLoadException* load) {
@@ -4440,7 +4440,7 @@
if (instruction->IsStringAlloc()) {
// String is allocated through StringFactory. Call NewEmptyString entry point.
Location temp = instruction->GetLocations()->GetTemp(0);
- MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
+ MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize);
__ Ldr(XRegisterFrom(temp), MemOperand(tr, QUICK_ENTRY_POINT(pNewEmptyString)));
__ Ldr(lr, MemOperand(XRegisterFrom(temp), code_offset.Int32Value()));
__ Blr(lr);
@@ -5096,7 +5096,7 @@
UseScratchRegisterScope temps(masm);
Register temp = temps.AcquireW();
// temp = Thread::Current()->GetIsGcMarking()
- __ Ldr(temp, MemOperand(tr, Thread::IsGcMarkingOffset<kArm64WordSize>().Int32Value()));
+ __ Ldr(temp, MemOperand(tr, Thread::IsGcMarkingOffset<kArm64PointerSize>().Int32Value()));
__ Cbnz(temp, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
} else {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 03f5a33..240936c 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -40,7 +40,7 @@
class CodeGeneratorARM64;
// Use a local definition to prevent copying mistakes.
-static constexpr size_t kArm64WordSize = kArm64PointerSize;
+static constexpr size_t kArm64WordSize = static_cast<size_t>(kArm64PointerSize);
static const vixl::aarch64::Register kParameterCoreRegisters[] = {
vixl::aarch64::x1,
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 334d30d..8dd82ef 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -147,7 +147,7 @@
// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
#define __ down_cast<CodeGeneratorMIPS*>(codegen)->GetAssembler()-> // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, x).Int32Value()
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, x).Int32Value()
class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS {
public:
@@ -505,7 +505,7 @@
#undef __
// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
#define __ down_cast<MipsAssembler*>(GetAssembler())-> // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, x).Int32Value()
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, x).Int32Value()
void CodeGeneratorMIPS::Finalize(CodeAllocator* allocator) {
// Ensure that we fix up branches.
@@ -1147,7 +1147,7 @@
__ LoadFromOffset(kLoadWord,
card,
TR,
- Thread::CardTableOffset<kMipsWordSize>().Int32Value());
+ Thread::CardTableOffset<kMipsPointerSize>().Int32Value());
__ Srl(temp, object, gc::accounting::CardTable::kCardShift);
__ Addu(temp, card, temp);
__ Sb(card, temp, 0);
@@ -1239,7 +1239,7 @@
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- InvokeRuntime(GetThreadOffset<kMipsWordSize>(entrypoint).Int32Value(),
+ InvokeRuntime(GetThreadOffset<kMipsPointerSize>(entrypoint).Int32Value(),
instruction,
dex_pc,
slow_path,
@@ -1290,7 +1290,7 @@
__ LoadFromOffset(kLoadUnsignedHalfword,
TMP,
TR,
- Thread::ThreadFlagsOffset<kMipsWordSize>().Int32Value());
+ Thread::ThreadFlagsOffset<kMipsPointerSize>().Int32Value());
if (successor == nullptr) {
__ Bnez(TMP, slow_path->GetEntryLabel());
__ Bind(slow_path->GetReturnLabel());
@@ -3949,7 +3949,7 @@
Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
Location receiver = invoke->GetLocations()->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsWordSize);
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsPointerSize);
// Set the hidden argument.
__ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(),
@@ -4287,7 +4287,7 @@
T9,
callee_method.AsRegister<Register>(),
ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kMipsWordSize).Int32Value());
+ kMipsPointerSize).Int32Value());
// T9()
__ Jalr(T9);
__ Nop();
@@ -4320,7 +4320,7 @@
size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
invoke->GetVTableIndex(), kMipsPointerSize).SizeValue();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsWordSize);
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsPointerSize);
// temp = object->GetClass();
DCHECK(receiver.IsRegister());
@@ -4520,7 +4520,7 @@
}
static int32_t GetExceptionTlsOffset() {
- return Thread::ExceptionOffset<kMipsWordSize>().Int32Value();
+ return Thread::ExceptionOffset<kMipsPointerSize>().Int32Value();
}
void LocationsBuilderMIPS::VisitLoadException(HLoadException* load) {
@@ -4883,7 +4883,7 @@
// Move an uint16_t value to a register.
__ LoadConst32(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
codegen_->InvokeRuntime(
- GetThreadOffset<kMipsWordSize>(instruction->GetEntrypoint()).Int32Value(),
+ GetThreadOffset<kMipsPointerSize>(instruction->GetEntrypoint()).Int32Value(),
instruction,
instruction->GetDexPc(),
nullptr,
@@ -4909,7 +4909,7 @@
if (instruction->IsStringAlloc()) {
// String is allocated through StringFactory. Call NewEmptyString entry point.
Register temp = instruction->GetLocations()->GetTemp(0).AsRegister<Register>();
- MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsWordSize);
+ MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsPointerSize);
__ LoadFromOffset(kLoadWord, temp, TR, QUICK_ENTRY_POINT(pNewEmptyString));
__ LoadFromOffset(kLoadWord, T9, temp, code_offset.Int32Value());
__ Jalr(T9);
@@ -4917,7 +4917,7 @@
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(
- GetThreadOffset<kMipsWordSize>(instruction->GetEntrypoint()).Int32Value(),
+ GetThreadOffset<kMipsPointerSize>(instruction->GetEntrypoint()).Int32Value(),
instruction,
instruction->GetDexPc(),
nullptr,
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 29b8c20..3472830 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -104,7 +104,7 @@
// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
#define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()-> // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, x).Int32Value()
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, x).Int32Value()
class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
public:
@@ -431,7 +431,7 @@
#undef __
// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
#define __ down_cast<Mips64Assembler*>(GetAssembler())-> // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, x).Int32Value()
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, x).Int32Value()
void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) {
// Ensure that we fix up branches.
@@ -888,7 +888,7 @@
__ LoadFromOffset(kLoadDoubleword,
card,
TR,
- Thread::CardTableOffset<kMips64DoublewordSize>().Int32Value());
+ Thread::CardTableOffset<kMips64PointerSize>().Int32Value());
__ Dsrl(temp, object, gc::accounting::CardTable::kCardShift);
__ Daddu(temp, card, temp);
__ Sb(card, temp, 0);
@@ -964,7 +964,7 @@
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- InvokeRuntime(GetThreadOffset<kMips64DoublewordSize>(entrypoint).Int32Value(),
+ InvokeRuntime(GetThreadOffset<kMips64PointerSize>(entrypoint).Int32Value(),
instruction,
dex_pc,
slow_path);
@@ -1004,7 +1004,7 @@
__ LoadFromOffset(kLoadUnsignedHalfword,
TMP,
TR,
- Thread::ThreadFlagsOffset<kMips64DoublewordSize>().Int32Value());
+ Thread::ThreadFlagsOffset<kMips64PointerSize>().Int32Value());
if (successor == nullptr) {
__ Bnezc(TMP, slow_path->GetEntryLabel());
__ Bind(slow_path->GetReturnLabel());
@@ -2934,7 +2934,7 @@
GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
Location receiver = invoke->GetLocations()->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64DoublewordSize);
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64PointerSize);
// Set the hidden argument.
__ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<GpuRegister>(),
@@ -3115,7 +3115,7 @@
T9,
callee_method.AsRegister<GpuRegister>(),
ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kMips64DoublewordSize).Int32Value());
+ kMips64PointerSize).Int32Value());
// T9()
__ Jalr(T9);
__ Nop();
@@ -3153,7 +3153,7 @@
size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
invoke->GetVTableIndex(), kMips64PointerSize).SizeValue();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64DoublewordSize);
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64PointerSize);
// temp = object->GetClass();
__ LoadFromOffset(kLoadUnsignedWord, temp, receiver, class_offset);
@@ -3231,7 +3231,7 @@
}
static int32_t GetExceptionTlsOffset() {
- return Thread::ExceptionOffset<kMips64DoublewordSize>().Int32Value();
+ return Thread::ExceptionOffset<kMips64PointerSize>().Int32Value();
}
void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) {
@@ -3456,7 +3456,7 @@
// String is allocated through StringFactory. Call NewEmptyString entry point.
GpuRegister temp = instruction->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
MemberOffset code_offset =
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64DoublewordSize);
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64PointerSize);
__ LoadFromOffset(kLoadDoubleword, temp, TR, QUICK_ENTRY_POINT(pNewEmptyString));
__ LoadFromOffset(kLoadDoubleword, T9, temp, code_offset.Int32Value());
__ Jalr(T9);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 528e94f..a2fa245 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -49,7 +49,7 @@
// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
#define __ down_cast<X86Assembler*>(codegen->GetAssembler())-> // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86WordSize, x).Int32Value()
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, x).Int32Value()
class NullCheckSlowPathX86 : public SlowPathCode {
public:
@@ -492,7 +492,7 @@
// rX <- ReadBarrierMarkRegX(rX)
//
int32_t entry_point_offset =
- CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86WordSize>(reg);
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86PointerSize>(reg);
// This runtime call does not require a stack map.
x86_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
__ jmp(GetExitLabel());
@@ -803,7 +803,7 @@
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- InvokeRuntime(GetThreadOffset<kX86WordSize>(entrypoint).Int32Value(),
+ InvokeRuntime(GetThreadOffset<kX86PointerSize>(entrypoint).Int32Value(),
instruction,
dex_pc,
slow_path);
@@ -2094,7 +2094,7 @@
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
__ call(Address(temp,
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86PointerSize).Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -4034,7 +4034,7 @@
if (instruction->IsStringAlloc()) {
// String is allocated through StringFactory. Call NewEmptyString entry point.
Register temp = instruction->GetLocations()->GetTemp(0).AsRegister<Register>();
- MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize);
+ MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86PointerSize);
__ fs()->movl(temp, Address::Absolute(QUICK_ENTRY_POINT(pNewEmptyString)));
__ call(Address(temp, code_offset.Int32Value()));
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
@@ -4451,7 +4451,7 @@
// (callee_method + offset_of_quick_compiled_code)()
__ call(Address(callee_method.AsRegister<Register>(),
ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kX86WordSize).Int32Value()));
+ kX86PointerSize).Int32Value()));
break;
}
@@ -4485,7 +4485,7 @@
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
__ call(Address(
- temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
+ temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86PointerSize).Int32Value()));
}
void CodeGeneratorX86::RecordSimplePatch() {
@@ -4589,7 +4589,7 @@
__ testl(value, value);
__ j(kEqual, &is_null);
}
- __ fs()->movl(card, Address::Absolute(Thread::CardTableOffset<kX86WordSize>().Int32Value()));
+ __ fs()->movl(card, Address::Absolute(Thread::CardTableOffset<kX86PointerSize>().Int32Value()));
__ movl(temp, object);
__ shrl(temp, Immediate(gc::accounting::CardTable::kCardShift));
__ movb(Address(temp, card, TIMES_1, 0),
@@ -5681,7 +5681,7 @@
DCHECK_EQ(slow_path->GetSuccessor(), successor);
}
- __ fs()->cmpw(Address::Absolute(Thread::ThreadFlagsOffset<kX86WordSize>().Int32Value()),
+ __ fs()->cmpw(Address::Absolute(Thread::ThreadFlagsOffset<kX86PointerSize>().Int32Value()),
Immediate(0));
if (successor == nullptr) {
__ j(kNotEqual, slow_path->GetEntryLabel());
@@ -6277,7 +6277,7 @@
}
static Address GetExceptionTlsAddress() {
- return Address::Absolute(Thread::ExceptionOffset<kX86WordSize>().Int32Value());
+ return Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>().Int32Value());
}
void LocationsBuilderX86::VisitLoadException(HLoadException* load) {
@@ -6994,7 +6994,7 @@
new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86(instruction, root);
codegen_->AddSlowPath(slow_path);
- __ fs()->cmpl(Address::Absolute(Thread::IsGcMarkingOffset<kX86WordSize>().Int32Value()),
+ __ fs()->cmpl(Address::Absolute(Thread::IsGcMarkingOffset<kX86PointerSize>().Int32Value()),
Immediate(0));
__ j(kNotEqual, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 39ea7d5..f306b33 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_H_
#include "arch/x86/instruction_set_features_x86.h"
+#include "base/enums.h"
#include "code_generator.h"
#include "driver/compiler_options.h"
#include "nodes.h"
@@ -28,7 +29,7 @@
namespace x86 {
// Use a local definition to prevent copying mistakes.
-static constexpr size_t kX86WordSize = kX86PointerSize;
+static constexpr size_t kX86WordSize = static_cast<size_t>(kX86PointerSize);
class CodeGeneratorX86;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 0f0129b..5d5fa85 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -53,7 +53,7 @@
// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
#define __ down_cast<X86_64Assembler*>(codegen->GetAssembler())-> // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, x).Int32Value()
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, x).Int32Value()
class NullCheckSlowPathX86_64 : public SlowPathCode {
public:
@@ -513,7 +513,7 @@
// rX <- ReadBarrierMarkRegX(rX)
//
int32_t entry_point_offset =
- CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86_64WordSize>(reg);
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86_64PointerSize>(reg);
// This runtime call does not require a stack map.
x86_64_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
__ jmp(GetExitLabel());
@@ -883,7 +883,7 @@
// (callee_method + offset_of_quick_compiled_code)()
__ call(Address(callee_method.AsRegister<CpuRegister>(),
ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kX86_64WordSize).SizeValue()));
+ kX86_64PointerSize).SizeValue()));
break;
}
@@ -918,7 +918,7 @@
__ movq(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
__ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kX86_64WordSize).SizeValue()));
+ kX86_64PointerSize).SizeValue()));
}
void CodeGeneratorX86_64::RecordSimplePatch() {
@@ -1031,7 +1031,7 @@
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- InvokeRuntime(GetThreadOffset<kX86_64WordSize>(entrypoint).Int32Value(),
+ InvokeRuntime(GetThreadOffset<kX86_64PointerSize>(entrypoint).Int32Value(),
instruction,
dex_pc,
slow_path);
@@ -2323,8 +2323,8 @@
// temp = temp->GetImtEntryAt(method_offset);
__ movq(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp,
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86_64WordSize).SizeValue()));
+ __ call(Address(
+ temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86_64PointerSize).SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -3962,7 +3962,7 @@
if (instruction->IsStringAlloc()) {
// String is allocated through StringFactory. Call NewEmptyString entry point.
CpuRegister temp = instruction->GetLocations()->GetTemp(0).AsRegister<CpuRegister>();
- MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86_64WordSize);
+ MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86_64PointerSize);
__ gs()->movq(temp, Address::Absolute(QUICK_ENTRY_POINT(pNewEmptyString), /* no_rip */ true));
__ call(Address(temp, code_offset.SizeValue()));
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
@@ -5118,7 +5118,7 @@
__ testl(value, value);
__ j(kEqual, &is_null);
}
- __ gs()->movq(card, Address::Absolute(Thread::CardTableOffset<kX86_64WordSize>().Int32Value(),
+ __ gs()->movq(card, Address::Absolute(Thread::CardTableOffset<kX86_64PointerSize>().Int32Value(),
/* no_rip */ true));
__ movq(temp, object);
__ shrq(temp, Immediate(gc::accounting::CardTable::kCardShift));
@@ -5170,7 +5170,7 @@
DCHECK_EQ(slow_path->GetSuccessor(), successor);
}
- __ gs()->cmpw(Address::Absolute(Thread::ThreadFlagsOffset<kX86_64WordSize>().Int32Value(),
+ __ gs()->cmpw(Address::Absolute(Thread::ThreadFlagsOffset<kX86_64PointerSize>().Int32Value(),
/* no_rip */ true),
Immediate(0));
if (successor == nullptr) {
@@ -5687,7 +5687,7 @@
}
static Address GetExceptionTlsAddress() {
- return Address::Absolute(Thread::ExceptionOffset<kX86_64WordSize>().Int32Value(),
+ return Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>().Int32Value(),
/* no_rip */ true);
}
@@ -6445,7 +6445,7 @@
new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86_64(instruction, root);
codegen_->AddSlowPath(slow_path);
- __ gs()->cmpl(Address::Absolute(Thread::IsGcMarkingOffset<kX86_64WordSize>().Int32Value(),
+ __ gs()->cmpl(Address::Absolute(Thread::IsGcMarkingOffset<kX86_64PointerSize>().Int32Value(),
/* no_rip */ true),
Immediate(0));
__ j(kNotEqual, slow_path->GetEntryLabel());
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index fbb78bc..4e0e34c 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -28,7 +28,7 @@
namespace x86_64 {
// Use a local definition to prevent copying mistakes.
-static constexpr size_t kX86_64WordSize = kX86_64PointerSize;
+static constexpr size_t kX86_64WordSize = static_cast<size_t>(kX86_64PointerSize);
// Some x86_64 instructions require a register to be available as temp.
static constexpr Register TMP = R11;
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index a592162..31cf29a 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -17,6 +17,7 @@
#include "inliner.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "builder.h"
#include "class_linker.h"
#include "constant_folding.h"
@@ -151,7 +152,7 @@
}
ClassLinker* cl = Runtime::Current()->GetClassLinker();
- size_t pointer_size = cl->GetImagePointerSize();
+ PointerSize pointer_size = cl->GetImagePointerSize();
if (invoke->IsInvokeInterface()) {
resolved_method = info.GetTypeHandle()->FindVirtualMethodForInterface(
resolved_method, pointer_size);
@@ -243,7 +244,7 @@
~ScopedProfilingInfoInlineUse() {
if (profiling_info_ != nullptr) {
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
DCHECK_EQ(profiling_info_, method_->GetProfilingInfo(pointer_size));
Runtime::Current()->GetJit()->GetCodeCache()->DoneCompilerUse(method_, self_);
}
@@ -390,7 +391,7 @@
}
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
- size_t pointer_size = class_linker->GetImagePointerSize();
+ PointerSize pointer_size = class_linker->GetImagePointerSize();
if (invoke_instruction->IsInvokeInterface()) {
resolved_method = ic.GetMonomorphicType()->FindVirtualMethodForInterface(
resolved_method, pointer_size);
@@ -482,7 +483,7 @@
}
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
- size_t pointer_size = class_linker->GetImagePointerSize();
+ PointerSize pointer_size = class_linker->GetImagePointerSize();
const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
bool all_targets_inlined = true;
@@ -644,7 +645,7 @@
return false;
}
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
- size_t pointer_size = class_linker->GetImagePointerSize();
+ PointerSize pointer_size = class_linker->GetImagePointerSize();
DCHECK(resolved_method != nullptr);
ArtMethod* actual_method = nullptr;
@@ -1004,7 +1005,7 @@
invoke_instruction->GetBlock()->InsertInstructionBefore(iput, invoke_instruction);
// Check whether the field is final. If it is, we need to add a barrier.
- size_t pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
+ PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
DCHECK(resolved_field != nullptr);
if (resolved_field->IsFinal()) {
@@ -1030,7 +1031,7 @@
uint32_t field_index,
HInstruction* obj)
SHARED_REQUIRES(Locks::mutator_lock_) {
- size_t pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
+ PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
DCHECK(resolved_field != nullptr);
HInstanceFieldGet* iget = new (graph_->GetArena()) HInstanceFieldGet(
@@ -1058,7 +1059,7 @@
HInstruction* obj,
HInstruction* value)
SHARED_REQUIRES(Locks::mutator_lock_) {
- size_t pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
+ PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
DCHECK(resolved_field != nullptr);
HInstanceFieldSet* iput = new (graph_->GetArena()) HInstanceFieldSet(
@@ -1397,7 +1398,7 @@
}
}
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
// Iterate over the list of parameter types and test whether any of the
// actual inputs has a more specific reference type than the type declared in
@@ -1454,7 +1455,7 @@
// TODO: we could be more precise by merging the phi inputs but that requires
// some functionality from the reference type propagation.
DCHECK(return_replacement->IsPhi());
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
mirror::Class* cls = resolved_method->GetReturnType(false /* resolve */, pointer_size);
return_replacement->SetReferenceTypeInfo(GetClassRTI(cls));
}
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 5ab9389..be061f5 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -1201,7 +1201,7 @@
}
__ LoadFromOffset(kLoadWord, LR, TR,
- QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pIndexOf).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pIndexOf).Int32Value());
CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>();
__ blx(LR);
@@ -1270,8 +1270,10 @@
codegen_->AddSlowPath(slow_path);
__ b(slow_path->GetEntryLabel(), EQ);
- __ LoadFromOffset(
- kLoadWord, LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromBytes).Int32Value());
+ __ LoadFromOffset(kLoadWord,
+ LR,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pAllocStringFromBytes).Int32Value());
CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
__ blx(LR);
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1298,8 +1300,10 @@
// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
//
// all include a null check on `data` before calling that method.
- __ LoadFromOffset(
- kLoadWord, LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromChars).Int32Value());
+ __ LoadFromOffset(kLoadWord,
+ LR,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pAllocStringFromChars).Int32Value());
CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
__ blx(LR);
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1325,7 +1329,7 @@
__ b(slow_path->GetEntryLabel(), EQ);
__ LoadFromOffset(kLoadWord,
- LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromString).Int32Value());
+ LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pAllocStringFromString).Int32Value());
CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
__ blx(LR);
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1718,7 +1722,7 @@
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(0)));
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(1)));
- __ LoadFromOffset(kLoadWord, LR, TR, GetThreadOffset<kArmWordSize>(entry).Int32Value());
+ __ LoadFromOffset(kLoadWord, LR, TR, GetThreadOffset<kArmPointerSize>(entry).Int32Value());
// Native code uses the soft float ABI.
__ vmovrrd(calling_convention.GetRegisterAt(0),
calling_convention.GetRegisterAt(1),
@@ -1744,7 +1748,7 @@
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(2)));
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(3)));
- __ LoadFromOffset(kLoadWord, LR, TR, GetThreadOffset<kArmWordSize>(entry).Int32Value());
+ __ LoadFromOffset(kLoadWord, LR, TR, GetThreadOffset<kArmPointerSize>(entry).Int32Value());
// Native code uses the soft float ABI.
__ vmovrrd(calling_convention.GetRegisterAt(0),
calling_convention.GetRegisterAt(1),
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 987d3f8..06d1148 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -771,7 +771,7 @@
void IntrinsicCodeGeneratorARM64::VisitThreadCurrentThread(HInvoke* invoke) {
codegen_->Load(Primitive::kPrimNot, WRegisterFrom(invoke->GetLocations()->Out()),
- MemOperand(tr, Thread::PeerOffset<8>().Int32Value()));
+ MemOperand(tr, Thread::PeerOffset<kArm64PointerSize>().Int32Value()));
}
static void GenUnsafeGet(HInvoke* invoke,
@@ -1398,7 +1398,7 @@
__ Mov(tmp_reg, 0);
}
- __ Ldr(lr, MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pIndexOf).Int32Value()));
+ __ Ldr(lr, MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pIndexOf).Int32Value()));
CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>();
__ Blr(lr);
@@ -1468,7 +1468,8 @@
__ B(eq, slow_path->GetEntryLabel());
__ Ldr(lr,
- MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromBytes).Int32Value()));
+ MemOperand(tr,
+ QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pAllocStringFromBytes).Int32Value()));
CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
__ Blr(lr);
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1496,7 +1497,8 @@
//
// all include a null check on `data` before calling that method.
__ Ldr(lr,
- MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromChars).Int32Value()));
+ MemOperand(tr,
+ QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pAllocStringFromChars).Int32Value()));
CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
__ Blr(lr);
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1522,7 +1524,8 @@
__ B(eq, slow_path->GetEntryLabel());
__ Ldr(lr,
- MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromString).Int32Value()));
+ MemOperand(tr,
+ QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pAllocStringFromString).Int32Value()));
CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
__ Blr(lr);
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1563,7 +1566,8 @@
MacroAssembler* masm,
CodeGeneratorARM64* codegen,
QuickEntrypointEnum entry) {
- __ Ldr(lr, MemOperand(tr, GetThreadOffset<kArm64WordSize>(entry).Int32Value()));
+ __ Ldr(lr, MemOperand(tr,
+ GetThreadOffset<kArm64PointerSize>(entry).Int32Value()));
__ Blr(lr);
codegen->RecordPcInfo(invoke, invoke->GetDexPc());
}
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 0bfa025..9449f79 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -1899,8 +1899,7 @@
__ LoadFromOffset(kLoadWord,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMipsWordSize,
- pStringCompareTo).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pStringCompareTo).Int32Value());
__ Jalr(T9);
__ Nop();
__ Bind(slow_path->GetExitLabel());
@@ -2059,7 +2058,7 @@
__ LoadFromOffset(kLoadWord,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pIndexOf).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pIndexOf).Int32Value());
__ Jalr(T9);
__ Nop();
@@ -2145,7 +2144,7 @@
__ LoadFromOffset(kLoadWord,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pAllocStringFromBytes).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pAllocStringFromBytes).Int32Value());
__ Jalr(T9);
__ Nop();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -2178,7 +2177,7 @@
__ LoadFromOffset(kLoadWord,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pAllocStringFromChars).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pAllocStringFromChars).Int32Value());
__ Jalr(T9);
__ Nop();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -2207,7 +2206,7 @@
__ LoadFromOffset(kLoadWord,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pAllocStringFromString).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pAllocStringFromString).Int32Value());
__ Jalr(T9);
__ Nop();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index dfaa84e..8d4d3e5 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1543,7 +1543,7 @@
__ LoadFromOffset(kLoadDoubleword,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, pStringCompareTo).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, pStringCompareTo).Int32Value());
__ Jalr(T9);
__ Nop();
__ Bind(slow_path->GetExitLabel());
@@ -1694,7 +1694,7 @@
__ LoadFromOffset(kLoadDoubleword,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, pIndexOf).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, pIndexOf).Int32Value());
CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>();
__ Jalr(T9);
__ Nop();
@@ -1771,7 +1771,7 @@
__ LoadFromOffset(kLoadDoubleword,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize,
+ QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize,
pAllocStringFromBytes).Int32Value());
CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
__ Jalr(T9);
@@ -1805,7 +1805,7 @@
__ LoadFromOffset(kLoadDoubleword,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize,
+ QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize,
pAllocStringFromChars).Int32Value());
CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
__ Jalr(T9);
@@ -1836,7 +1836,7 @@
__ LoadFromOffset(kLoadDoubleword,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize,
+ QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize,
pAllocStringFromString).Int32Value());
CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
__ Jalr(T9);
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 6c81421..65f4def 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -857,7 +857,7 @@
}
// Now do the actual call.
- __ fs()->call(Address::Absolute(GetThreadOffset<kX86WordSize>(entry)));
+ __ fs()->call(Address::Absolute(GetThreadOffset<kX86PointerSize>(entry)));
// Extract the return value from the FP stack.
__ fstpl(Address(ESP, 0));
@@ -1237,7 +1237,7 @@
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pStringCompareTo)));
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pStringCompareTo)));
__ Bind(slow_path->GetExitLabel());
}
@@ -1510,7 +1510,7 @@
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromBytes)));
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pAllocStringFromBytes)));
CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
__ Bind(slow_path->GetExitLabel());
@@ -1536,7 +1536,7 @@
// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
//
// all include a null check on `data` before calling that method.
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromChars)));
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pAllocStringFromChars)));
CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
@@ -1560,7 +1560,8 @@
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromString)));
+ __ fs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pAllocStringFromString)));
CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
__ Bind(slow_path->GetExitLabel());
@@ -1801,7 +1802,7 @@
void IntrinsicCodeGeneratorX86::VisitThreadCurrentThread(HInvoke* invoke) {
Register out = invoke->GetLocations()->Out().AsRegister<Register>();
- GetAssembler()->fs()->movl(out, Address::Absolute(Thread::PeerOffset<kX86WordSize>()));
+ GetAssembler()->fs()->movl(out, Address::Absolute(Thread::PeerOffset<kX86PointerSize>()));
}
static void GenUnsafeGet(HInvoke* invoke,
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 28f1f4f..7e0d729 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -720,7 +720,7 @@
DCHECK(invoke->IsInvokeStaticOrDirect());
X86_64Assembler* assembler = codegen->GetAssembler();
- __ gs()->call(Address::Absolute(GetThreadOffset<kX86_64WordSize>(entry), true));
+ __ gs()->call(Address::Absolute(GetThreadOffset<kX86_64PointerSize>(entry), true));
codegen->RecordPcInfo(invoke, invoke->GetDexPc());
}
@@ -1324,7 +1324,7 @@
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pStringCompareTo),
+ __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, pStringCompareTo),
/* no_rip */ true));
__ Bind(slow_path->GetExitLabel());
}
@@ -1597,7 +1597,8 @@
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromBytes),
+ __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize,
+ pAllocStringFromBytes),
/* no_rip */ true));
CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1624,7 +1625,8 @@
// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
//
// all include a null check on `data` before calling that method.
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromChars),
+ __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize,
+ pAllocStringFromChars),
/* no_rip */ true));
CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1649,7 +1651,8 @@
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromString),
+ __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize,
+ pAllocStringFromString),
/* no_rip */ true));
CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1875,7 +1878,7 @@
void IntrinsicCodeGeneratorX86_64::VisitThreadCurrentThread(HInvoke* invoke) {
CpuRegister out = invoke->GetLocations()->Out().AsRegister<CpuRegister>();
- GetAssembler()->gs()->movl(out, Address::Absolute(Thread::PeerOffset<kX86_64WordSize>(),
+ GetAssembler()->gs()->movl(out, Address::Absolute(Thread::PeerOffset<kX86_64PointerSize>(),
/* no_rip */ true));
}
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 965d5ee..e96ab19 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -16,6 +16,7 @@
#include "reference_type_propagation.h"
+#include "base/enums.h"
#include "class_linker-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
@@ -775,7 +776,7 @@
ClassLinker* cl = Runtime::Current()->GetClassLinker();
mirror::DexCache* dex_cache =
FindDexCacheWithHint(soa.Self(), instr->GetDexFile(), hint_dex_cache_);
- size_t pointer_size = cl->GetImagePointerSize();
+ PointerSize pointer_size = cl->GetImagePointerSize();
ArtMethod* method = dex_cache->GetResolvedMethod(instr->GetDexMethodIndex(), pointer_size);
mirror::Class* klass = (method == nullptr) ? nullptr : method->GetReturnType(false, pointer_size);
SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index a9151ba..768ed2d 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -20,6 +20,7 @@
#include <sstream>
#include "base/bit_vector-inl.h"
+#include "base/enums.h"
#include "code_generator.h"
#include "register_allocation_resolver.h"
#include "ssa_liveness_analysis.h"
@@ -77,8 +78,8 @@
// Always reserve for the current method and the graph's max out registers.
// TODO: compute it instead.
// ArtMethod* takes 2 vregs for 64 bits.
- reserved_out_slots_ = InstructionSetPointerSize(codegen->GetInstructionSet()) / kVRegSize +
- codegen->GetGraph()->GetMaximumNumberOfOutVRegs();
+ size_t ptr_size = static_cast<size_t>(InstructionSetPointerSize(codegen->GetInstructionSet()));
+ reserved_out_slots_ = ptr_size / kVRegSize + codegen->GetGraph()->GetMaximumNumberOfOutVRegs();
}
static bool ShouldProcess(bool processing_core_registers, LiveInterval* interval) {
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 97f34e6..b73f738 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -17,6 +17,7 @@
#include "sharpening.h"
#include "base/casts.h"
+#include "base/enums.h"
#include "class_linker.h"
#include "code_generator.h"
#include "driver/dex_compilation_unit.h"
@@ -259,7 +260,7 @@
load_class->SetLoadKindWithAddress(load_kind, address);
break;
case HLoadClass::LoadKind::kDexCachePcRelative: {
- size_t pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
+ PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
DexCacheArraysLayout layout(pointer_size, &dex_file);
size_t element_index = layout.TypeOffset(type_index);
load_class->SetLoadKindWithDexCacheReference(load_kind, dex_file, element_index);
@@ -358,7 +359,7 @@
load_string->SetLoadKindWithAddress(load_kind, address);
break;
case HLoadString::LoadKind::kDexCachePcRelative: {
- size_t pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
+ PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
DexCacheArraysLayout layout(pointer_size, &dex_file);
size_t element_index = layout.StringOffset(string_index);
load_string->SetLoadKindWithDexCacheReference(load_kind, dex_file, element_index);
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 1ee1c4d..304e56b 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -50,7 +50,7 @@
#ifdef ART_ENABLE_CODEGEN_arm
namespace arm {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
- ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset<4> offset) {
+ ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset32 offset) {
Thumb2Assembler assembler(arena);
switch (abi) {
@@ -80,7 +80,7 @@
#ifdef ART_ENABLE_CODEGEN_arm64
namespace arm64 {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
- ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset<8> offset) {
+ ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset64 offset) {
Arm64Assembler assembler(arena);
switch (abi) {
@@ -119,7 +119,7 @@
#ifdef ART_ENABLE_CODEGEN_mips
namespace mips {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
- ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset<4> offset) {
+ ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset32 offset) {
MipsAssembler assembler(arena);
switch (abi) {
@@ -151,7 +151,7 @@
#ifdef ART_ENABLE_CODEGEN_mips64
namespace mips64 {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
- ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset<8> offset) {
+ ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset64 offset) {
Mips64Assembler assembler(arena);
switch (abi) {
@@ -183,7 +183,7 @@
#ifdef ART_ENABLE_CODEGEN_x86
namespace x86 {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* arena,
- ThreadOffset<4> offset) {
+ ThreadOffset32 offset) {
X86Assembler assembler(arena);
// All x86 trampolines call via the Thread* held in fs.
@@ -204,7 +204,7 @@
#ifdef ART_ENABLE_CODEGEN_x86_64
namespace x86_64 {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* arena,
- ThreadOffset<8> offset) {
+ ThreadOffset64 offset) {
x86_64::X86_64Assembler assembler(arena);
// All x86 trampolines call via the Thread* held in gs.
@@ -224,7 +224,7 @@
std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline64(InstructionSet isa,
EntryPointCallingConvention abi,
- ThreadOffset<8> offset) {
+ ThreadOffset64 offset) {
ArenaPool pool;
ArenaAllocator arena(&pool);
switch (isa) {
@@ -250,7 +250,7 @@
std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline32(InstructionSet isa,
EntryPointCallingConvention abi,
- ThreadOffset<4> offset) {
+ ThreadOffset32 offset) {
ArenaPool pool;
ArenaAllocator arena(&pool);
switch (isa) {
diff --git a/compiler/trampolines/trampoline_compiler.h b/compiler/trampolines/trampoline_compiler.h
index 8f823f1..1a10e4c 100644
--- a/compiler/trampolines/trampoline_compiler.h
+++ b/compiler/trampolines/trampoline_compiler.h
@@ -27,10 +27,10 @@
// Create code that will invoke the function held in thread local storage.
std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline32(InstructionSet isa,
EntryPointCallingConvention abi,
- ThreadOffset<4> entry_point_offset);
+ ThreadOffset32 entry_point_offset);
std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline64(InstructionSet isa,
EntryPointCallingConvention abi,
- ThreadOffset<8> entry_point_offset);
+ ThreadOffset64 entry_point_offset);
} // namespace art
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index a7f4547..1796b39 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -384,7 +384,7 @@
return dwarf::Reg::ArmFp(static_cast<int>(reg));
}
-constexpr size_t kFramePointerSize = kArmPointerSize;
+constexpr size_t kFramePointerSize = static_cast<size_t>(kArmPointerSize);
void ArmAssembler::BuildFrame(size_t frame_size,
ManagedRegister method_reg,
@@ -568,8 +568,9 @@
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
}
-void ArmAssembler::StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm,
- ManagedRegister mscratch) {
+void ArmAssembler::StoreImmediateToThread32(ThreadOffset32 dest,
+ uint32_t imm,
+ ManagedRegister mscratch) {
ArmManagedRegister scratch = mscratch.AsArm();
CHECK(scratch.IsCoreRegister()) << scratch;
LoadImmediate(scratch.AsCoreRegister(), imm);
@@ -600,19 +601,19 @@
return EmitLoad(this, m_dst, SP, src.Int32Value(), size);
}
-void ArmAssembler::LoadFromThread32(ManagedRegister m_dst, ThreadOffset<4> src, size_t size) {
+void ArmAssembler::LoadFromThread32(ManagedRegister m_dst, ThreadOffset32 src, size_t size) {
return EmitLoad(this, m_dst, TR, src.Int32Value(), size);
}
-void ArmAssembler::LoadRawPtrFromThread32(ManagedRegister m_dst, ThreadOffset<4> offs) {
+void ArmAssembler::LoadRawPtrFromThread32(ManagedRegister m_dst, ThreadOffset32 offs) {
ArmManagedRegister dst = m_dst.AsArm();
CHECK(dst.IsCoreRegister()) << dst;
LoadFromOffset(kLoadWord, dst.AsCoreRegister(), TR, offs.Int32Value());
}
void ArmAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset<4> thr_offs,
- ManagedRegister mscratch) {
+ ThreadOffset32 thr_offs,
+ ManagedRegister mscratch) {
ArmManagedRegister scratch = mscratch.AsArm();
CHECK(scratch.IsCoreRegister()) << scratch;
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
@@ -621,9 +622,9 @@
SP, fr_offs.Int32Value());
}
-void ArmAssembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
+void ArmAssembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
ArmManagedRegister scratch = mscratch.AsArm();
CHECK(scratch.IsCoreRegister()) << scratch;
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
@@ -632,9 +633,9 @@
TR, thr_offs.Int32Value());
}
-void ArmAssembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
+void ArmAssembler::StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
ArmManagedRegister scratch = mscratch.AsArm();
CHECK(scratch.IsCoreRegister()) << scratch;
AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL);
@@ -642,7 +643,7 @@
TR, thr_offs.Int32Value());
}
-void ArmAssembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs) {
+void ArmAssembler::StoreStackPointerToThread32(ThreadOffset32 thr_offs) {
StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value());
}
@@ -831,7 +832,8 @@
// TODO: place reference map on call
}
-void ArmAssembler::CallFromThread32(ThreadOffset<4> /*offset*/, ManagedRegister /*scratch*/) {
+void ArmAssembler::CallFromThread32(ThreadOffset32 offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
@@ -848,8 +850,10 @@
ArmManagedRegister scratch = mscratch.AsArm();
ArmExceptionSlowPath* slow = new (GetArena()) ArmExceptionSlowPath(scratch, stack_adjust);
buffer_.EnqueueSlowPath(slow);
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- TR, Thread::ExceptionOffset<4>().Int32Value());
+ LoadFromOffset(kLoadWord,
+ scratch.AsCoreRegister(),
+ TR,
+ Thread::ExceptionOffset<kArmPointerSize>().Int32Value());
cmp(scratch.AsCoreRegister(), ShifterOperand(0));
b(slow->Entry(), NE);
}
@@ -865,7 +869,10 @@
// Don't care about preserving R0 as this call won't return.
__ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
// Set up call to Thread::Current()->pDeliverException.
- __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(4, pDeliverException).Int32Value());
+ __ LoadFromOffset(kLoadWord,
+ R12,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pDeliverException).Int32Value());
__ blx(R12);
#undef __
}
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index 9cf72a2..2b7414d 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -904,13 +904,13 @@
void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
- void StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm, ManagedRegister scratch)
+ void StoreImmediateToThread32(ThreadOffset32 dest, uint32_t imm, ManagedRegister scratch)
OVERRIDE;
- void StoreStackOffsetToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs,
+ void StoreStackOffsetToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs,
ManagedRegister scratch) OVERRIDE;
- void StoreStackPointerToThread32(ThreadOffset<4> thr_offs) OVERRIDE;
+ void StoreStackPointerToThread32(ThreadOffset32 thr_offs) OVERRIDE;
void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
ManagedRegister scratch) OVERRIDE;
@@ -918,7 +918,7 @@
// Load routines
void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size) OVERRIDE;
+ void LoadFromThread32(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE;
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
@@ -927,15 +927,15 @@
void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset<4> offs) OVERRIDE;
+ void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
// Copying routines
void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
- void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs,
+ void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset32 thr_offs,
ManagedRegister scratch) OVERRIDE;
- void CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ void CopyRawPtrToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
OVERRIDE;
void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
@@ -990,7 +990,7 @@
// Call to address held at [base+offset]
void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread32(ThreadOffset<4> offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread32(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 9f2027f..d82caf5 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -164,24 +164,25 @@
offs.Int32Value());
}
-void Arm64Assembler::StoreImmediateToThread64(ThreadOffset<8> offs, uint32_t imm,
- ManagedRegister m_scratch) {
+void Arm64Assembler::StoreImmediateToThread64(ThreadOffset64 offs,
+ uint32_t imm,
+ ManagedRegister m_scratch) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsXRegister()) << scratch;
LoadImmediate(scratch.AsXRegister(), imm);
StoreToOffset(scratch.AsXRegister(), TR, offs.Int32Value());
}
-void Arm64Assembler::StoreStackOffsetToThread64(ThreadOffset<8> tr_offs,
- FrameOffset fr_offs,
- ManagedRegister m_scratch) {
+void Arm64Assembler::StoreStackOffsetToThread64(ThreadOffset64 tr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister m_scratch) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsXRegister()) << scratch;
AddConstant(scratch.AsXRegister(), SP, fr_offs.Int32Value());
StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
}
-void Arm64Assembler::StoreStackPointerToThread64(ThreadOffset<8> tr_offs) {
+void Arm64Assembler::StoreStackPointerToThread64(ThreadOffset64 tr_offs) {
UseScratchRegisterScope temps(vixl_masm_);
Register temp = temps.AcquireX();
___ Mov(temp, reg_x(SP));
@@ -285,7 +286,7 @@
return Load(m_dst.AsArm64(), SP, src.Int32Value(), size);
}
-void Arm64Assembler::LoadFromThread64(ManagedRegister m_dst, ThreadOffset<8> src, size_t size) {
+void Arm64Assembler::LoadFromThread64(ManagedRegister m_dst, ThreadOffset64 src, size_t size) {
return Load(m_dst.AsArm64(), TR, src.Int32Value(), size);
}
@@ -318,7 +319,7 @@
___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
}
-void Arm64Assembler::LoadRawPtrFromThread64(ManagedRegister m_dst, ThreadOffset<8> offs) {
+void Arm64Assembler::LoadRawPtrFromThread64(ManagedRegister m_dst, ThreadOffset64 offs) {
Arm64ManagedRegister dst = m_dst.AsArm64();
CHECK(dst.IsXRegister()) << dst;
LoadFromOffset(dst.AsXRegister(), TR, offs.Int32Value());
@@ -355,17 +356,17 @@
}
void Arm64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
- ThreadOffset<8> tr_offs,
- ManagedRegister m_scratch) {
+ ThreadOffset64 tr_offs,
+ ManagedRegister m_scratch) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsXRegister()) << scratch;
LoadFromOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
StoreToOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
}
-void Arm64Assembler::CopyRawPtrToThread64(ThreadOffset<8> tr_offs,
- FrameOffset fr_offs,
- ManagedRegister m_scratch) {
+void Arm64Assembler::CopyRawPtrToThread64(ThreadOffset64 tr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister m_scratch) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsXRegister()) << scratch;
LoadFromOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
@@ -542,7 +543,8 @@
___ Blr(reg_x(scratch.AsXRegister()));
}
-void Arm64Assembler::CallFromThread64(ThreadOffset<8> /*offset*/, ManagedRegister /*scratch*/) {
+void Arm64Assembler::CallFromThread64(ThreadOffset64 offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
}
@@ -612,7 +614,9 @@
CHECK_ALIGNED(stack_adjust, kStackAlignment);
Arm64ManagedRegister scratch = m_scratch.AsArm64();
exception_blocks_.emplace_back(new Arm64Exception(scratch, stack_adjust));
- LoadFromOffset(scratch.AsXRegister(), TR, Thread::ExceptionOffset<8>().Int32Value());
+ LoadFromOffset(scratch.AsXRegister(),
+ TR,
+ Thread::ExceptionOffset<kArm64PointerSize>().Int32Value());
___ Cbnz(reg_x(scratch.AsXRegister()), exception_blocks_.back()->Entry());
}
@@ -629,7 +633,9 @@
// Pass exception object as argument.
// Don't care about preserving X0 as this won't return.
___ Mov(reg_x(X0), reg_x(exception->scratch_.AsXRegister()));
- ___ Ldr(temp, MEM_OP(reg_x(TR), QUICK_ENTRYPOINT_OFFSET(8, pDeliverException).Int32Value()));
+ ___ Ldr(temp,
+ MEM_OP(reg_x(TR),
+ QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pDeliverException).Int32Value()));
___ Blr(temp);
// Call should never return.
@@ -720,7 +726,7 @@
// Increase frame to required size.
DCHECK_ALIGNED(frame_size, kStackAlignment);
- DCHECK_GE(frame_size, core_reg_size + fp_reg_size + kArm64PointerSize);
+ DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
IncreaseFrameSize(frame_size);
// Save callee-saves.
@@ -734,7 +740,7 @@
StoreToOffset(X0, SP, 0);
// Write out entry spills
- int32_t offset = frame_size + kArm64PointerSize;
+ int32_t offset = frame_size + static_cast<size_t>(kArm64PointerSize);
for (size_t i = 0; i < entry_spills.size(); ++i) {
Arm64ManagedRegister reg = entry_spills.at(i).AsArm64();
if (reg.IsNoRegister()) {
@@ -776,7 +782,7 @@
// For now we only check that the size of the frame is large enough to hold spills and method
// reference.
- DCHECK_GE(frame_size, core_reg_size + fp_reg_size + kArm64PointerSize);
+ DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
DCHECK_ALIGNED(frame_size, kStackAlignment);
DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index a481544..24b7982 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -126,28 +126,28 @@
void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
- void StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm, ManagedRegister scratch)
+ void StoreImmediateToThread64(ThreadOffset64 dest, uint32_t imm, ManagedRegister scratch)
OVERRIDE;
- void StoreStackOffsetToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs,
+ void StoreStackOffsetToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs,
ManagedRegister scratch) OVERRIDE;
- void StoreStackPointerToThread64(ThreadOffset<8> thr_offs) OVERRIDE;
+ void StoreStackPointerToThread64(ThreadOffset64 thr_offs) OVERRIDE;
void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
ManagedRegister scratch) OVERRIDE;
// Load routines.
void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size) OVERRIDE;
+ void LoadFromThread64(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
bool unpoison_reference) OVERRIDE;
void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs) OVERRIDE;
+ void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
// Copying routines.
void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
- void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs,
+ void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset64 thr_offs,
ManagedRegister scratch) OVERRIDE;
- void CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ void CopyRawPtrToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
OVERRIDE;
void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
@@ -200,7 +200,7 @@
// Call to address held at [base+offset].
void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread64(ThreadOffset<8> offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread64(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
// Jump to address (not setting link register)
void JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch);
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index e6c3a18..0a1b733 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -162,90 +162,94 @@
}
}
-void Assembler::StoreImmediateToThread32(ThreadOffset<4> dest ATTRIBUTE_UNUSED,
+void Assembler::StoreImmediateToThread32(ThreadOffset32 dest ATTRIBUTE_UNUSED,
uint32_t imm ATTRIBUTE_UNUSED,
ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreImmediateToThread64(ThreadOffset<8> dest ATTRIBUTE_UNUSED,
+void Assembler::StoreImmediateToThread64(ThreadOffset64 dest ATTRIBUTE_UNUSED,
uint32_t imm ATTRIBUTE_UNUSED,
ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED,
- FrameOffset fr_offs ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
+void Assembler::StoreStackOffsetToThread32(
+ ThreadOffset32 thr_offs ATTRIBUTE_UNUSED,
+ FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED,
- FrameOffset fr_offs ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
+void Assembler::StoreStackOffsetToThread64(
+ ThreadOffset64 thr_offs ATTRIBUTE_UNUSED,
+ FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED) {
+void Assembler::StoreStackPointerToThread32(
+ ThreadOffset32 thr_offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackPointerToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED) {
+void Assembler::StoreStackPointerToThread64(
+ ThreadOffset64 thr_offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
void Assembler::LoadFromThread32(ManagedRegister dest ATTRIBUTE_UNUSED,
- ThreadOffset<4> src ATTRIBUTE_UNUSED,
+ ThreadOffset32 src ATTRIBUTE_UNUSED,
size_t size ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
void Assembler::LoadFromThread64(ManagedRegister dest ATTRIBUTE_UNUSED,
- ThreadOffset<8> src ATTRIBUTE_UNUSED,
+ ThreadOffset64 src ATTRIBUTE_UNUSED,
size_t size ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
void Assembler::LoadRawPtrFromThread32(ManagedRegister dest ATTRIBUTE_UNUSED,
- ThreadOffset<4> offs ATTRIBUTE_UNUSED) {
+ ThreadOffset32 offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
void Assembler::LoadRawPtrFromThread64(ManagedRegister dest ATTRIBUTE_UNUSED,
- ThreadOffset<8> offs ATTRIBUTE_UNUSED) {
+ ThreadOffset64 offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
void Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs ATTRIBUTE_UNUSED,
- ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED,
+ ThreadOffset32 thr_offs ATTRIBUTE_UNUSED,
ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
void Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs ATTRIBUTE_UNUSED,
- ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED,
+ ThreadOffset64 thr_offs ATTRIBUTE_UNUSED,
ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED,
+void Assembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs ATTRIBUTE_UNUSED,
FrameOffset fr_offs ATTRIBUTE_UNUSED,
ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED,
+void Assembler::CopyRawPtrToThread64(ThreadOffset64 thr_offs ATTRIBUTE_UNUSED,
FrameOffset fr_offs ATTRIBUTE_UNUSED,
ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CallFromThread32(ThreadOffset<4> offset ATTRIBUTE_UNUSED,
+void Assembler::CallFromThread32(ThreadOffset32 offset ATTRIBUTE_UNUSED,
ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CallFromThread64(ThreadOffset<8> offset ATTRIBUTE_UNUSED,
+void Assembler::CallFromThread64(ThreadOffset64 offset ATTRIBUTE_UNUSED,
ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 80aa630..89f7947 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -24,6 +24,7 @@
#include "arm/constants_arm.h"
#include "base/arena_allocator.h"
#include "base/arena_object.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/macros.h"
#include "debug/dwarf/debug_frame_opcode_writer.h"
@@ -382,8 +383,7 @@
const ManagedRegisterEntrySpills& entry_spills) = 0;
// Emit code that will remove an activation from the stack
- virtual void RemoveFrame(size_t frame_size,
- ArrayRef<const ManagedRegister> callee_save_regs) = 0;
+ virtual void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) = 0;
virtual void IncreaseFrameSize(size_t adjust) = 0;
virtual void DecreaseFrameSize(size_t adjust) = 0;
@@ -393,23 +393,24 @@
virtual void StoreRef(FrameOffset dest, ManagedRegister src) = 0;
virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src) = 0;
- virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
- ManagedRegister scratch) = 0;
+ virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) = 0;
- virtual void StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm,
+ virtual void StoreImmediateToThread32(ThreadOffset32 dest,
+ uint32_t imm,
ManagedRegister scratch);
- virtual void StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm,
+ virtual void StoreImmediateToThread64(ThreadOffset64 dest,
+ uint32_t imm,
ManagedRegister scratch);
- virtual void StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,
+ virtual void StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
ManagedRegister scratch);
- virtual void StoreStackOffsetToThread64(ThreadOffset<8> thr_offs,
+ virtual void StoreStackOffsetToThread64(ThreadOffset64 thr_offs,
FrameOffset fr_offs,
ManagedRegister scratch);
- virtual void StoreStackPointerToThread32(ThreadOffset<4> thr_offs);
- virtual void StoreStackPointerToThread64(ThreadOffset<8> thr_offs);
+ virtual void StoreStackPointerToThread32(ThreadOffset32 thr_offs);
+ virtual void StoreStackPointerToThread64(ThreadOffset64 thr_offs);
virtual void StoreSpanning(FrameOffset dest, ManagedRegister src,
FrameOffset in_off, ManagedRegister scratch) = 0;
@@ -417,8 +418,8 @@
// Load routines
virtual void Load(ManagedRegister dest, FrameOffset src, size_t size) = 0;
- virtual void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size);
- virtual void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size);
+ virtual void LoadFromThread32(ManagedRegister dest, ThreadOffset32 src, size_t size);
+ virtual void LoadFromThread64(ManagedRegister dest, ThreadOffset64 src, size_t size);
virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0;
// If unpoison_reference is true and kPoisonReference is true, then we negate the read reference.
@@ -427,24 +428,27 @@
virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) = 0;
- virtual void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset<4> offs);
- virtual void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs);
+ virtual void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset32 offs);
+ virtual void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset64 offs);
// Copying routines
virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size) = 0;
- virtual void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs,
+ virtual void CopyRawPtrFromThread32(FrameOffset fr_offs,
+ ThreadOffset32 thr_offs,
ManagedRegister scratch);
- virtual void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs,
+ virtual void CopyRawPtrFromThread64(FrameOffset fr_offs,
+ ThreadOffset64 thr_offs,
ManagedRegister scratch);
- virtual void CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs,
+ virtual void CopyRawPtrToThread32(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
ManagedRegister scratch);
- virtual void CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs,
+ virtual void CopyRawPtrToThread64(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
ManagedRegister scratch);
- virtual void CopyRef(FrameOffset dest, FrameOffset src,
- ManagedRegister scratch) = 0;
+ virtual void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) = 0;
virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) = 0;
@@ -474,24 +478,26 @@
// Exploit fast access in managed code to Thread::Current()
virtual void GetCurrentThread(ManagedRegister tr) = 0;
- virtual void GetCurrentThread(FrameOffset dest_offset,
- ManagedRegister scratch) = 0;
+ virtual void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) = 0;
// Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
// null.
- virtual void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
- ManagedRegister in_reg, bool null_allowed) = 0;
+ virtual void CreateHandleScopeEntry(ManagedRegister out_reg,
+ FrameOffset handlescope_offset,
+ ManagedRegister in_reg,
+ bool null_allowed) = 0;
// Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
- virtual void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
- ManagedRegister scratch, bool null_allowed) = 0;
+ virtual void CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handlescope_offset,
+ ManagedRegister scratch,
+ bool null_allowed) = 0;
// src holds a handle scope entry (Object**) load this into dst
- virtual void LoadReferenceFromHandleScope(ManagedRegister dst,
- ManagedRegister src) = 0;
+ virtual void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) = 0;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
@@ -499,12 +505,10 @@
virtual void VerifyObject(FrameOffset src, bool could_be_null) = 0;
// Call to address held at [base+offset]
- virtual void Call(ManagedRegister base, Offset offset,
- ManagedRegister scratch) = 0;
- virtual void Call(FrameOffset base, Offset offset,
- ManagedRegister scratch) = 0;
- virtual void CallFromThread32(ThreadOffset<4> offset, ManagedRegister scratch);
- virtual void CallFromThread64(ThreadOffset<8> offset, ManagedRegister scratch);
+ virtual void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) = 0;
+ virtual void Call(FrameOffset base, Offset offset, ManagedRegister scratch) = 0;
+ virtual void CallFromThread32(ThreadOffset32 offset, ManagedRegister scratch);
+ virtual void CallFromThread64(ThreadOffset64 offset, ManagedRegister scratch);
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index 608b3bc..e6b32de 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -26,6 +26,11 @@
namespace art {
namespace mips {
+static_assert(static_cast<size_t>(kMipsPointerSize) == kMipsWordSize,
+ "Unexpected Mips pointer size.");
+static_assert(kMipsPointerSize == PointerSize::k32, "Unexpected Mips pointer size.");
+
+
std::ostream& operator<<(std::ostream& os, const DRegister& rhs) {
if (rhs >= D0 && rhs < kNumberOfDRegisters) {
os << "d" << static_cast<int>(rhs);
@@ -2794,7 +2799,8 @@
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
}
-void MipsAssembler::StoreImmediateToThread32(ThreadOffset<kMipsWordSize> dest, uint32_t imm,
+void MipsAssembler::StoreImmediateToThread32(ThreadOffset32 dest,
+ uint32_t imm,
ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
@@ -2803,7 +2809,7 @@
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), S1, dest.Int32Value());
}
-void MipsAssembler::StoreStackOffsetToThread32(ThreadOffset<kMipsWordSize> thr_offs,
+void MipsAssembler::StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
@@ -2813,7 +2819,7 @@
S1, thr_offs.Int32Value());
}
-void MipsAssembler::StoreStackPointerToThread32(ThreadOffset<kMipsWordSize> thr_offs) {
+void MipsAssembler::StoreStackPointerToThread32(ThreadOffset32 thr_offs) {
StoreToOffset(kStoreWord, SP, S1, thr_offs.Int32Value());
}
@@ -2830,8 +2836,7 @@
return EmitLoad(mdest, SP, src.Int32Value(), size);
}
-void MipsAssembler::LoadFromThread32(ManagedRegister mdest,
- ThreadOffset<kMipsWordSize> src, size_t size) {
+void MipsAssembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset32 src, size_t size) {
return EmitLoad(mdest, S1, src.Int32Value(), size);
}
@@ -2859,8 +2864,7 @@
base.AsMips().AsCoreRegister(), offs.Int32Value());
}
-void MipsAssembler::LoadRawPtrFromThread32(ManagedRegister mdest,
- ThreadOffset<kMipsWordSize> offs) {
+void MipsAssembler::LoadRawPtrFromThread32(ManagedRegister mdest, ThreadOffset32 offs) {
MipsManagedRegister dest = mdest.AsMips();
CHECK(dest.IsCoreRegister());
LoadFromOffset(kLoadWord, dest.AsCoreRegister(), S1, offs.Int32Value());
@@ -2915,7 +2919,7 @@
}
void MipsAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset<kMipsWordSize> thr_offs,
+ ThreadOffset32 thr_offs,
ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
@@ -2925,7 +2929,7 @@
SP, fr_offs.Int32Value());
}
-void MipsAssembler::CopyRawPtrToThread32(ThreadOffset<kMipsWordSize> thr_offs,
+void MipsAssembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
@@ -3099,7 +3103,7 @@
// TODO: place reference map on call.
}
-void MipsAssembler::CallFromThread32(ThreadOffset<kMipsWordSize> offset ATTRIBUTE_UNUSED,
+void MipsAssembler::CallFromThread32(ThreadOffset32 offset ATTRIBUTE_UNUSED,
ManagedRegister mscratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "no mips implementation";
}
@@ -3117,7 +3121,7 @@
MipsManagedRegister scratch = mscratch.AsMips();
exception_blocks_.emplace_back(scratch, stack_adjust);
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- S1, Thread::ExceptionOffset<kMipsWordSize>().Int32Value());
+ S1, Thread::ExceptionOffset<kMipsPointerSize>().Int32Value());
// TODO: on MIPS32R6 prefer Bnezc(scratch.AsCoreRegister(), slow.Entry());
// as the NAL instruction (occurring in long R2 branches) may become deprecated.
// For now use common for R2 and R6 instructions as this code must execute on both.
@@ -3135,7 +3139,7 @@
Move(A0, exception->scratch_.AsCoreRegister());
// Set up call to Thread::Current()->pDeliverException.
LoadFromOffset(kLoadWord, T9, S1,
- QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pDeliverException).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pDeliverException).Int32Value());
Jr(T9);
Nop();
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 8367e68..852ced6 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -500,15 +500,15 @@
void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE;
- void StoreImmediateToThread32(ThreadOffset<kMipsWordSize> dest,
+ void StoreImmediateToThread32(ThreadOffset32 dest,
uint32_t imm,
ManagedRegister mscratch) OVERRIDE;
- void StoreStackOffsetToThread32(ThreadOffset<kMipsWordSize> thr_offs,
+ void StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) OVERRIDE;
- void StoreStackPointerToThread32(ThreadOffset<kMipsWordSize> thr_offs) OVERRIDE;
+ void StoreStackPointerToThread32(ThreadOffset32 thr_offs) OVERRIDE;
void StoreSpanning(FrameOffset dest,
ManagedRegister msrc,
@@ -518,9 +518,7 @@
// Load routines.
void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread32(ManagedRegister mdest,
- ThreadOffset<kMipsWordSize> src,
- size_t size) OVERRIDE;
+ void LoadFromThread32(ManagedRegister mdest, ThreadOffset32 src, size_t size) OVERRIDE;
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
@@ -531,16 +529,16 @@
void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread32(ManagedRegister mdest, ThreadOffset<kMipsWordSize> offs) OVERRIDE;
+ void LoadRawPtrFromThread32(ManagedRegister mdest, ThreadOffset32 offs) OVERRIDE;
// Copying routines.
void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE;
void CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset<kMipsWordSize> thr_offs,
+ ThreadOffset32 thr_offs,
ManagedRegister mscratch) OVERRIDE;
- void CopyRawPtrToThread32(ThreadOffset<kMipsWordSize> thr_offs,
+ void CopyRawPtrToThread32(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) OVERRIDE;
@@ -619,7 +617,7 @@
// Call to address held at [base+offset].
void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE;
- void CallFromThread32(ThreadOffset<kMipsWordSize> offset, ManagedRegister mscratch) OVERRIDE;
+ void CallFromThread32(ThreadOffset32 offset, ManagedRegister mscratch) OVERRIDE;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 447ede5..3fd77a0 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -26,6 +26,11 @@
namespace art {
namespace mips64 {
+static_assert(static_cast<size_t>(kMips64PointerSize) == kMips64DoublewordSize,
+ "Unexpected Mips64 pointer size.");
+static_assert(kMips64PointerSize == PointerSize::k64, "Unexpected Mips64 pointer size.");
+
+
void Mips64Assembler::FinalizeCode() {
for (auto& exception_block : exception_blocks_) {
EmitExceptionPoll(&exception_block);
@@ -2110,7 +2115,7 @@
StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
}
-void Mips64Assembler::StoreStackOffsetToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs,
+void Mips64Assembler::StoreStackOffsetToThread64(ThreadOffset64 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) {
Mips64ManagedRegister scratch = mscratch.AsMips64();
@@ -2119,7 +2124,7 @@
StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value());
}
-void Mips64Assembler::StoreStackPointerToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs) {
+void Mips64Assembler::StoreStackPointerToThread64(ThreadOffset64 thr_offs) {
StoreToOffset(kStoreDoubleword, SP, S1, thr_offs.Int32Value());
}
@@ -2136,9 +2141,7 @@
return EmitLoad(mdest, SP, src.Int32Value(), size);
}
-void Mips64Assembler::LoadFromThread64(ManagedRegister mdest,
- ThreadOffset<kMips64DoublewordSize> src,
- size_t size) {
+void Mips64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset64 src, size_t size) {
return EmitLoad(mdest, S1, src.Int32Value(), size);
}
@@ -2171,8 +2174,7 @@
base.AsMips64().AsGpuRegister(), offs.Int32Value());
}
-void Mips64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest,
- ThreadOffset<kMips64DoublewordSize> offs) {
+void Mips64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset64 offs) {
Mips64ManagedRegister dest = mdest.AsMips64();
CHECK(dest.IsGpuRegister());
LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(), S1, offs.Int32Value());
@@ -2217,7 +2219,7 @@
}
void Mips64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
- ThreadOffset<kMips64DoublewordSize> thr_offs,
+ ThreadOffset64 thr_offs,
ManagedRegister mscratch) {
Mips64ManagedRegister scratch = mscratch.AsMips64();
CHECK(scratch.IsGpuRegister()) << scratch;
@@ -2225,7 +2227,7 @@
StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, fr_offs.Int32Value());
}
-void Mips64Assembler::CopyRawPtrToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs,
+void Mips64Assembler::CopyRawPtrToThread64(ThreadOffset64 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) {
Mips64ManagedRegister scratch = mscratch.AsMips64();
@@ -2429,7 +2431,7 @@
// TODO: place reference map on call
}
-void Mips64Assembler::CallFromThread64(ThreadOffset<kMips64DoublewordSize> offset ATTRIBUTE_UNUSED,
+void Mips64Assembler::CallFromThread64(ThreadOffset64 offset ATTRIBUTE_UNUSED,
ManagedRegister mscratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "No MIPS64 implementation";
}
@@ -2449,7 +2451,7 @@
LoadFromOffset(kLoadDoubleword,
scratch.AsGpuRegister(),
S1,
- Thread::ExceptionOffset<kMips64DoublewordSize>().Int32Value());
+ Thread::ExceptionOffset<kMips64PointerSize>().Int32Value());
Bnezc(scratch.AsGpuRegister(), exception_blocks_.back().Entry());
}
@@ -2466,7 +2468,7 @@
LoadFromOffset(kLoadDoubleword,
T9,
S1,
- QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, pDeliverException).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, pDeliverException).Int32Value());
Jr(T9);
Nop();
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 0cd0708..1ad05b0 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -383,10 +383,11 @@
void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE;
- void StoreStackOffsetToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs, FrameOffset fr_offs,
+ void StoreStackOffsetToThread64(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
ManagedRegister mscratch) OVERRIDE;
- void StoreStackPointerToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs) OVERRIDE;
+ void StoreStackPointerToThread64(ThreadOffset64 thr_offs) OVERRIDE;
void StoreSpanning(FrameOffset dest, ManagedRegister msrc, FrameOffset in_off,
ManagedRegister mscratch) OVERRIDE;
@@ -394,9 +395,7 @@
// Load routines.
void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread64(ManagedRegister mdest,
- ThreadOffset<kMips64DoublewordSize> src,
- size_t size) OVERRIDE;
+ void LoadFromThread64(ManagedRegister mdest, ThreadOffset64 src, size_t size) OVERRIDE;
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
@@ -405,16 +404,17 @@
void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread64(ManagedRegister mdest,
- ThreadOffset<kMips64DoublewordSize> offs) OVERRIDE;
+ void LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset64 offs) OVERRIDE;
// Copying routines.
void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE;
- void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<kMips64DoublewordSize> thr_offs,
+ void CopyRawPtrFromThread64(FrameOffset fr_offs,
+ ThreadOffset64 thr_offs,
ManagedRegister mscratch) OVERRIDE;
- void CopyRawPtrToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs, FrameOffset fr_offs,
+ void CopyRawPtrToThread64(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
ManagedRegister mscratch) OVERRIDE;
void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) OVERRIDE;
@@ -471,8 +471,7 @@
// Call to address held at [base+offset].
void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE;
- void CallFromThread64(ThreadOffset<kMips64DoublewordSize> offset,
- ManagedRegister mscratch) OVERRIDE;
+ void CallFromThread64(ThreadOffset64 offset, ManagedRegister mscratch) OVERRIDE;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index f931d75..87f5647 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -2051,21 +2051,20 @@
movl(Address(ESP, dest), Immediate(imm));
}
-void X86Assembler::StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm,
- ManagedRegister) {
+void X86Assembler::StoreImmediateToThread32(ThreadOffset32 dest, uint32_t imm, ManagedRegister) {
fs()->movl(Address::Absolute(dest), Immediate(imm));
}
-void X86Assembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
+void X86Assembler::StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
X86ManagedRegister scratch = mscratch.AsX86();
CHECK(scratch.IsCpuRegister());
leal(scratch.AsCpuRegister(), Address(ESP, fr_offs));
fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
}
-void X86Assembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs) {
+void X86Assembler::StoreStackPointerToThread32(ThreadOffset32 thr_offs) {
fs()->movl(Address::Absolute(thr_offs), ESP);
}
@@ -2101,7 +2100,7 @@
}
}
-void X86Assembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset<4> src, size_t size) {
+void X86Assembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset32 src, size_t size) {
X86ManagedRegister dest = mdest.AsX86();
if (dest.IsNoRegister()) {
CHECK_EQ(0u, size);
@@ -2111,7 +2110,7 @@
} else if (dest.IsRegisterPair()) {
CHECK_EQ(8u, size);
fs()->movl(dest.AsRegisterPairLow(), Address::Absolute(src));
- fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset<4>(src.Int32Value()+4)));
+ fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset32(src.Int32Value()+4)));
} else if (dest.IsX87Register()) {
if (size == 4) {
fs()->flds(Address::Absolute(src));
@@ -2152,7 +2151,7 @@
}
void X86Assembler::LoadRawPtrFromThread32(ManagedRegister mdest,
- ThreadOffset<4> offs) {
+ ThreadOffset32 offs) {
X86ManagedRegister dest = mdest.AsX86();
CHECK(dest.IsCpuRegister());
fs()->movl(dest.AsCpuRegister(), Address::Absolute(offs));
@@ -2215,17 +2214,17 @@
}
void X86Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset<4> thr_offs,
- ManagedRegister mscratch) {
+ ThreadOffset32 thr_offs,
+ ManagedRegister mscratch) {
X86ManagedRegister scratch = mscratch.AsX86();
CHECK(scratch.IsCpuRegister());
fs()->movl(scratch.AsCpuRegister(), Address::Absolute(thr_offs));
Store(fr_offs, scratch, 4);
}
-void X86Assembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
+void X86Assembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
X86ManagedRegister scratch = mscratch.AsX86();
CHECK(scratch.IsCpuRegister());
Load(scratch, fr_offs, 4);
@@ -2371,26 +2370,26 @@
call(Address(scratch, offset));
}
-void X86Assembler::CallFromThread32(ThreadOffset<4> offset, ManagedRegister /*mscratch*/) {
+void X86Assembler::CallFromThread32(ThreadOffset32 offset, ManagedRegister /*mscratch*/) {
fs()->call(Address::Absolute(offset));
}
void X86Assembler::GetCurrentThread(ManagedRegister tr) {
fs()->movl(tr.AsX86().AsCpuRegister(),
- Address::Absolute(Thread::SelfOffset<4>()));
+ Address::Absolute(Thread::SelfOffset<kX86PointerSize>()));
}
void X86Assembler::GetCurrentThread(FrameOffset offset,
ManagedRegister mscratch) {
X86ManagedRegister scratch = mscratch.AsX86();
- fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset<4>()));
+ fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset<kX86PointerSize>()));
movl(Address(ESP, offset), scratch.AsCpuRegister());
}
void X86Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
X86ExceptionSlowPath* slow = new (GetArena()) X86ExceptionSlowPath(stack_adjust);
buffer_.EnqueueSlowPath(slow);
- fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<4>()), Immediate(0));
+ fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()), Immediate(0));
j(kNotEqual, slow->Entry());
}
@@ -2403,8 +2402,8 @@
__ DecreaseFrameSize(stack_adjust_);
}
// Pass exception as argument in EAX
- __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset<4>()));
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(4, pDeliverException)));
+ __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()));
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pDeliverException)));
// this call should never return
__ int3();
#undef __
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index fa61662..75648f2 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -195,7 +195,7 @@
return result;
}
- static Address Absolute(ThreadOffset<4> addr) {
+ static Address Absolute(ThreadOffset32 addr) {
return Absolute(addr.Int32Value());
}
@@ -652,13 +652,13 @@
void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
- void StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm, ManagedRegister scratch)
+ void StoreImmediateToThread32(ThreadOffset32 dest, uint32_t imm, ManagedRegister scratch)
OVERRIDE;
- void StoreStackOffsetToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs,
+ void StoreStackOffsetToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs,
ManagedRegister scratch) OVERRIDE;
- void StoreStackPointerToThread32(ThreadOffset<4> thr_offs) OVERRIDE;
+ void StoreStackPointerToThread32(ThreadOffset32 thr_offs) OVERRIDE;
void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
ManagedRegister scratch) OVERRIDE;
@@ -666,7 +666,7 @@
// Load routines
void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size) OVERRIDE;
+ void LoadFromThread32(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE;
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
@@ -675,15 +675,15 @@
void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset<4> offs) OVERRIDE;
+ void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
// Copying routines
void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
- void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs,
+ void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset32 thr_offs,
ManagedRegister scratch) OVERRIDE;
- void CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ void CopyRawPtrToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
OVERRIDE;
void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
@@ -740,7 +740,7 @@
// Call to address held at [base+offset]
void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread32(ThreadOffset<4> offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread32(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 3046710..977ce9d 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -2683,7 +2683,8 @@
}
}
- DCHECK_EQ(kX86_64PointerSize, kFramePointerSize);
+ static_assert(static_cast<size_t>(kX86_64PointerSize) == kFramePointerSize,
+ "Unexpected frame pointer size.");
movq(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
@@ -2803,12 +2804,11 @@
movl(Address(CpuRegister(RSP), dest), Immediate(imm)); // TODO(64) movq?
}
-void X86_64Assembler::StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm,
- ManagedRegister) {
+void X86_64Assembler::StoreImmediateToThread64(ThreadOffset64 dest, uint32_t imm, ManagedRegister) {
gs()->movl(Address::Absolute(dest, true), Immediate(imm)); // TODO(64) movq?
}
-void X86_64Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs,
+void X86_64Assembler::StoreStackOffsetToThread64(ThreadOffset64 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) {
X86_64ManagedRegister scratch = mscratch.AsX86_64();
@@ -2817,7 +2817,7 @@
gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
}
-void X86_64Assembler::StoreStackPointerToThread64(ThreadOffset<8> thr_offs) {
+void X86_64Assembler::StoreStackPointerToThread64(ThreadOffset64 thr_offs) {
gs()->movq(Address::Absolute(thr_offs, true), CpuRegister(RSP));
}
@@ -2858,7 +2858,7 @@
}
}
-void X86_64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset<8> src, size_t size) {
+void X86_64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset64 src, size_t size) {
X86_64ManagedRegister dest = mdest.AsX86_64();
if (dest.IsNoRegister()) {
CHECK_EQ(0u, size);
@@ -2907,7 +2907,7 @@
movq(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
}
-void X86_64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset<8> offs) {
+void X86_64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset64 offs) {
X86_64ManagedRegister dest = mdest.AsX86_64();
CHECK(dest.IsCpuRegister());
gs()->movq(dest.AsCpuRegister(), Address::Absolute(offs, true));
@@ -2969,7 +2969,7 @@
}
void X86_64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
- ThreadOffset<8> thr_offs,
+ ThreadOffset64 thr_offs,
ManagedRegister mscratch) {
X86_64ManagedRegister scratch = mscratch.AsX86_64();
CHECK(scratch.IsCpuRegister());
@@ -2977,7 +2977,7 @@
Store(fr_offs, scratch, 8);
}
-void X86_64Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs,
+void X86_64Assembler::CopyRawPtrToThread64(ThreadOffset64 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) {
X86_64ManagedRegister scratch = mscratch.AsX86_64();
@@ -3130,17 +3130,19 @@
call(Address(scratch, offset));
}
-void X86_64Assembler::CallFromThread64(ThreadOffset<8> offset, ManagedRegister /*mscratch*/) {
+void X86_64Assembler::CallFromThread64(ThreadOffset64 offset, ManagedRegister /*mscratch*/) {
gs()->call(Address::Absolute(offset, true));
}
void X86_64Assembler::GetCurrentThread(ManagedRegister tr) {
- gs()->movq(tr.AsX86_64().AsCpuRegister(), Address::Absolute(Thread::SelfOffset<8>(), true));
+ gs()->movq(tr.AsX86_64().AsCpuRegister(),
+ Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
}
void X86_64Assembler::GetCurrentThread(FrameOffset offset, ManagedRegister mscratch) {
X86_64ManagedRegister scratch = mscratch.AsX86_64();
- gs()->movq(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset<8>(), true));
+ gs()->movq(scratch.AsCpuRegister(),
+ Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
movq(Address(CpuRegister(RSP), offset), scratch.AsCpuRegister());
}
@@ -3156,7 +3158,7 @@
void X86_64Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
X86_64ExceptionSlowPath* slow = new (GetArena()) X86_64ExceptionSlowPath(stack_adjust);
buffer_.EnqueueSlowPath(slow);
- gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<8>(), true), Immediate(0));
+ gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true), Immediate(0));
j(kNotEqual, slow->Entry());
}
@@ -3169,8 +3171,10 @@
__ DecreaseFrameSize(stack_adjust_);
}
// Pass exception as argument in RDI
- __ gs()->movq(CpuRegister(RDI), Address::Absolute(Thread::ExceptionOffset<8>(), true));
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(8, pDeliverException), true));
+ __ gs()->movq(CpuRegister(RDI),
+ Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true));
+ __ gs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, pDeliverException), true));
// this call should never return
__ int3();
#undef __
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 361f73c..52e39cf 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -258,7 +258,7 @@
}
// If no_rip is true then the Absolute address isn't RIP relative.
- static Address Absolute(ThreadOffset<8> addr, bool no_rip = false) {
+ static Address Absolute(ThreadOffset64 addr, bool no_rip = false) {
return Absolute(addr.Int32Value(), no_rip);
}
@@ -723,13 +723,13 @@
void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
- void StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm, ManagedRegister scratch)
+ void StoreImmediateToThread64(ThreadOffset64 dest, uint32_t imm, ManagedRegister scratch)
OVERRIDE;
- void StoreStackOffsetToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs,
+ void StoreStackOffsetToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs,
ManagedRegister scratch) OVERRIDE;
- void StoreStackPointerToThread64(ThreadOffset<8> thr_offs) OVERRIDE;
+ void StoreStackPointerToThread64(ThreadOffset64 thr_offs) OVERRIDE;
void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
ManagedRegister scratch) OVERRIDE;
@@ -737,7 +737,7 @@
// Load routines
void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size) OVERRIDE;
+ void LoadFromThread64(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
@@ -746,15 +746,15 @@
void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs) OVERRIDE;
+ void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
// Copying routines
void Move(ManagedRegister dest, ManagedRegister src, size_t size);
- void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs,
+ void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset64 thr_offs,
ManagedRegister scratch) OVERRIDE;
- void CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ void CopyRawPtrToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
OVERRIDE;
void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
@@ -812,7 +812,7 @@
// Call to address held at [base+offset]
void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread64(ThreadOffset<8> offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread64(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index c410cd9..6c43e86 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -329,7 +329,7 @@
}
if (rn.r == 9) {
args << " ; ";
- Thread::DumpThreadOffset<4>(args, offset);
+ Thread::DumpThreadOffset<kArmPointerSize>(args, offset);
}
}
}
@@ -1407,7 +1407,7 @@
args << Rt << ", [" << Rn << ", #" << (U != 0u ? "" : "-") << imm12 << "]";
if (Rn.r == TR && is_load) {
args << " ; ";
- Thread::DumpThreadOffset<4>(args, imm12);
+ Thread::DumpThreadOffset<kArmPointerSize>(args, imm12);
} else if (Rn.r == PC) {
T2LitType lit_type[] = {
kT2LitUByte, kT2LitUHalf, kT2LitHexWord, kT2LitInvalid,
diff --git a/disassembler/disassembler_arm64.cc b/disassembler/disassembler_arm64.cc
index a93f7d5..0ef9025 100644
--- a/disassembler/disassembler_arm64.cc
+++ b/disassembler/disassembler_arm64.cc
@@ -102,7 +102,7 @@
if (instr->GetRn() == TR) {
int64_t offset = instr->GetImmLSUnsigned() << instr->GetSizeLS();
std::ostringstream tmp_stream;
- Thread::DumpThreadOffset<8>(tmp_stream, static_cast<uint32_t>(offset));
+ Thread::DumpThreadOffset<kArm64PointerSize>(tmp_stream, static_cast<uint32_t>(offset));
AppendToOutput(" ; %s", tmp_stream.str().c_str());
}
}
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index 769263e..3448878 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -504,9 +504,9 @@
if (rs == 17) {
args << " ; ";
if (is64bit_) {
- Thread::DumpThreadOffset<8>(args, offset);
+ Thread::DumpThreadOffset<kMips64PointerSize>(args, offset);
} else {
- Thread::DumpThreadOffset<4>(args, offset);
+ Thread::DumpThreadOffset<kMipsPointerSize>(args, offset);
}
}
}
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index 7f6a7ba..5c239f7 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -1409,11 +1409,11 @@
}
if (prefix[1] == kFs && !supports_rex_) {
args << " ; ";
- Thread::DumpThreadOffset<4>(args, address_bits);
+ Thread::DumpThreadOffset<kX86PointerSize>(args, address_bits);
}
if (prefix[1] == kGs && supports_rex_) {
args << " ; ";
- Thread::DumpThreadOffset<8>(args, address_bits);
+ Thread::DumpThreadOffset<kX86_64PointerSize>(args, address_bits);
}
const char* prefix_str;
switch (prefix[0]) {
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index f5669d7..21a0ca0 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -285,7 +285,7 @@
const backtrace_map_t& boot_map)
SHARED_REQUIRES(Locks::mutator_lock_) {
std::ostream& os = *os_;
- const size_t pointer_size = InstructionSetPointerSize(
+ const PointerSize pointer_size = InstructionSetPointerSize(
Runtime::Current()->GetInstructionSet());
std::string file_name =
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 64349b5..8c3c5e5 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1823,7 +1823,7 @@
}
ScopedIndentation indent1(&state->vios_);
DumpFields(os, obj, obj_class);
- const size_t image_pointer_size = state->image_header_.GetPointerSize();
+ const PointerSize image_pointer_size = state->image_header_.GetPointerSize();
if (obj->IsObjectArray()) {
auto* obj_array = obj->AsObjectArray<mirror::Object>();
for (int32_t i = 0, length = obj_array->GetLength(); i < length; i++) {
@@ -1968,7 +1968,7 @@
DCHECK(method != nullptr);
const void* quick_oat_code_begin = GetQuickOatCodeBegin(method);
const void* quick_oat_code_end = GetQuickOatCodeEnd(method);
- const size_t pointer_size = image_header_.GetPointerSize();
+ const PointerSize pointer_size = image_header_.GetPointerSize();
OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>(
reinterpret_cast<uintptr_t>(quick_oat_code_begin) - sizeof(OatQuickMethodHeader));
if (method->IsNative()) {
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 569c5e9..9432384 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -489,13 +489,13 @@
};
void PatchOat::PatchArtMethods(const ImageHeader* image_header) {
- const size_t pointer_size = InstructionSetPointerSize(isa_);
+ const PointerSize pointer_size = InstructionSetPointerSize(isa_);
PatchOatArtMethodVisitor visitor(this);
image_header->VisitPackedArtMethods(&visitor, heap_->Begin(), pointer_size);
}
void PatchOat::PatchImTables(const ImageHeader* image_header) {
- const size_t pointer_size = InstructionSetPointerSize(isa_);
+ const PointerSize pointer_size = InstructionSetPointerSize(isa_);
// We can safely walk target image since the conflict tables are independent.
image_header->VisitPackedImTables(
[this](ArtMethod* method) {
@@ -506,7 +506,7 @@
}
void PatchOat::PatchImtConflictTables(const ImageHeader* image_header) {
- const size_t pointer_size = InstructionSetPointerSize(isa_);
+ const PointerSize pointer_size = InstructionSetPointerSize(isa_);
// We can safely walk target image since the conflict tables are independent.
image_header->VisitPackedImtConflictTables(
[this](ArtMethod* method) {
@@ -584,7 +584,7 @@
void PatchOat::PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots) {
auto* dex_caches = down_cast<mirror::ObjectArray<mirror::DexCache>*>(
img_roots->Get(ImageHeader::kDexCaches));
- const size_t pointer_size = InstructionSetPointerSize(isa_);
+ const PointerSize pointer_size = InstructionSetPointerSize(isa_);
for (size_t i = 0, count = dex_caches->GetLength(); i < count; ++i) {
auto* orig_dex_cache = dex_caches->GetWithoutChecks(i);
auto* copy_dex_cache = RelocatedCopyOf(orig_dex_cache);
@@ -705,7 +705,7 @@
PatchOat::PatchVisitor visitor(this, copy);
object->VisitReferences<kVerifyNone>(visitor, visitor);
if (object->IsClass<kVerifyNone>()) {
- const size_t pointer_size = InstructionSetPointerSize(isa_);
+ const PointerSize pointer_size = InstructionSetPointerSize(isa_);
mirror::Class* klass = object->AsClass();
mirror::Class* copy_klass = down_cast<mirror::Class*>(copy);
RelocatedPointerVisitor native_visitor(this);
@@ -736,7 +736,7 @@
}
void PatchOat::FixupMethod(ArtMethod* object, ArtMethod* copy) {
- const size_t pointer_size = InstructionSetPointerSize(isa_);
+ const PointerSize pointer_size = InstructionSetPointerSize(isa_);
copy->CopyFrom(object, pointer_size);
// Just update the entry points if it looks like we should.
// TODO: sanity check all the pointers' values
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 61ec695..64efea9d 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -18,6 +18,7 @@
#define ART_PATCHOAT_PATCHOAT_H_
#include "arch/instruction_set.h"
+#include "base/enums.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "elf_file.h"
@@ -168,7 +169,7 @@
}
auto ret = reinterpret_cast<uintptr_t>(obj) + delta_;
// Trim off high bits in case negative relocation with 64 bit patchoat.
- if (InstructionSetPointerSize(isa_) == sizeof(uint32_t)) {
+ if (Is32BitISA()) {
ret = static_cast<uintptr_t>(static_cast<uint32_t>(ret));
}
return reinterpret_cast<T*>(ret);
@@ -181,12 +182,16 @@
}
T ret = obj + delta_;
// Trim off high bits in case negative relocation with 64 bit patchoat.
- if (InstructionSetPointerSize(isa_) == 4) {
+ if (Is32BitISA()) {
ret = static_cast<T>(static_cast<uint32_t>(ret));
}
return ret;
}
+ bool Is32BitISA() const {
+ return InstructionSetPointerSize(isa_) == PointerSize::k32;
+ }
+
// Walks through the old image and patches the mmap'd copy of it to the new offset. It does not
// change the heap.
class PatchVisitor {
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 99c4a82..2b06671 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -342,6 +342,7 @@
LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \
arch/instruction_set.h \
base/allocator.h \
+ base/enums.h \
base/mutex.h \
debugger.h \
base/unix_file/fd_file.h \
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index d105c67..befdd48 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -20,6 +20,7 @@
#include <sys/ucontext.h>
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/macros.h"
#include "base/hex_dump.h"
#include "globals.h"
@@ -144,7 +145,8 @@
void* context) {
// These are the instructions to check for. The first one is the ldr r0,[r9,#xxx]
// where xxx is the offset of the suspend trigger.
- uint32_t checkinst1 = 0xf8d90000 + Thread::ThreadSuspendTriggerOffset<4>().Int32Value();
+ uint32_t checkinst1 = 0xf8d90000
+ + Thread::ThreadSuspendTriggerOffset<PointerSize::k32>().Int32Value();
uint16_t checkinst2 = 0x6800;
struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
diff --git a/runtime/arch/arm/quick_method_frame_info_arm.h b/runtime/arch/arm/quick_method_frame_info_arm.h
index 5580ee4..0fb8a63 100644
--- a/runtime/arch/arm/quick_method_frame_info_arm.h
+++ b/runtime/arch/arm/quick_method_frame_info_arm.h
@@ -63,7 +63,7 @@
constexpr uint32_t ArmCalleeSaveFrameSize(Runtime::CalleeSaveType type) {
return RoundUp((POPCOUNT(ArmCalleeSaveCoreSpills(type)) /* gprs */ +
POPCOUNT(ArmCalleeSaveFpSpills(type)) /* fprs */ +
- 1 /* Method* */) * kArmPointerSize, kStackAlignment);
+ 1 /* Method* */) * static_cast<size_t>(kArmPointerSize), kStackAlignment);
}
constexpr QuickMethodFrameInfo ArmCalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
@@ -75,17 +75,17 @@
constexpr size_t ArmCalleeSaveFpr1Offset(Runtime::CalleeSaveType type) {
return ArmCalleeSaveFrameSize(type) -
(POPCOUNT(ArmCalleeSaveCoreSpills(type)) +
- POPCOUNT(ArmCalleeSaveFpSpills(type))) * kArmPointerSize;
+ POPCOUNT(ArmCalleeSaveFpSpills(type))) * static_cast<size_t>(kArmPointerSize);
}
constexpr size_t ArmCalleeSaveGpr1Offset(Runtime::CalleeSaveType type) {
return ArmCalleeSaveFrameSize(type) -
- POPCOUNT(ArmCalleeSaveCoreSpills(type)) * kArmPointerSize;
+ POPCOUNT(ArmCalleeSaveCoreSpills(type)) * static_cast<size_t>(kArmPointerSize);
}
constexpr size_t ArmCalleeSaveLrOffset(Runtime::CalleeSaveType type) {
return ArmCalleeSaveFrameSize(type) -
- POPCOUNT(ArmCalleeSaveCoreSpills(type) & (-(1 << LR))) * kArmPointerSize;
+ POPCOUNT(ArmCalleeSaveCoreSpills(type) & (-(1 << LR))) * static_cast<size_t>(kArmPointerSize);
}
} // namespace arm
diff --git a/runtime/arch/arm/thread_arm.cc b/runtime/arch/arm/thread_arm.cc
index 2a551a8..ff4f81b 100644
--- a/runtime/arch/arm/thread_arm.cc
+++ b/runtime/arch/arm/thread_arm.cc
@@ -17,15 +17,16 @@
#include "thread.h"
#include "asm_support_arm.h"
+#include "base/enums.h"
#include "base/logging.h"
namespace art {
void Thread::InitCpu() {
- CHECK_EQ(THREAD_FLAGS_OFFSET, ThreadFlagsOffset<4>().Int32Value());
- CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<4>().Int32Value());
- CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<4>().Int32Value());
- CHECK_EQ(THREAD_ID_OFFSET, ThinLockIdOffset<4>().Int32Value());
+ CHECK_EQ(THREAD_FLAGS_OFFSET, ThreadFlagsOffset<PointerSize::k32>().Int32Value());
+ CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<PointerSize::k32>().Int32Value());
+ CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<PointerSize::k32>().Int32Value());
+ CHECK_EQ(THREAD_ID_OFFSET, ThinLockIdOffset<PointerSize::k32>().Int32Value());
}
void Thread::CleanupCpu() {
diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc
index f591fcc..6724d6d 100644
--- a/runtime/arch/arm64/fault_handler_arm64.cc
+++ b/runtime/arch/arm64/fault_handler_arm64.cc
@@ -20,6 +20,7 @@
#include <sys/ucontext.h>
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/macros.h"
#include "globals.h"
#include "base/logging.h"
@@ -117,7 +118,8 @@
void* context) {
// These are the instructions to check for. The first one is the ldr x0,[r18,#xxx]
// where xxx is the offset of the suspend trigger.
- uint32_t checkinst1 = 0xf9400240 | (Thread::ThreadSuspendTriggerOffset<8>().Int32Value() << 7);
+ uint32_t checkinst1 = 0xf9400240 |
+ (Thread::ThreadSuspendTriggerOffset<PointerSize::k64>().Int32Value() << 7);
uint32_t checkinst2 = 0xf9400000;
struct ucontext *uc = reinterpret_cast<struct ucontext *>(context);
diff --git a/runtime/arch/arm64/quick_method_frame_info_arm64.h b/runtime/arch/arm64/quick_method_frame_info_arm64.h
index b525309..b3d250b 100644
--- a/runtime/arch/arm64/quick_method_frame_info_arm64.h
+++ b/runtime/arch/arm64/quick_method_frame_info_arm64.h
@@ -71,7 +71,7 @@
constexpr uint32_t Arm64CalleeSaveFrameSize(Runtime::CalleeSaveType type) {
return RoundUp((POPCOUNT(Arm64CalleeSaveCoreSpills(type)) /* gprs */ +
POPCOUNT(Arm64CalleeSaveFpSpills(type)) /* fprs */ +
- 1 /* Method* */) * kArm64PointerSize, kStackAlignment);
+ 1 /* Method* */) * static_cast<size_t>(kArm64PointerSize), kStackAlignment);
}
constexpr QuickMethodFrameInfo Arm64CalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
@@ -83,17 +83,18 @@
constexpr size_t Arm64CalleeSaveFpr1Offset(Runtime::CalleeSaveType type) {
return Arm64CalleeSaveFrameSize(type) -
(POPCOUNT(Arm64CalleeSaveCoreSpills(type)) +
- POPCOUNT(Arm64CalleeSaveFpSpills(type))) * kArm64PointerSize;
+ POPCOUNT(Arm64CalleeSaveFpSpills(type))) * static_cast<size_t>(kArm64PointerSize);
}
constexpr size_t Arm64CalleeSaveGpr1Offset(Runtime::CalleeSaveType type) {
return Arm64CalleeSaveFrameSize(type) -
- POPCOUNT(Arm64CalleeSaveCoreSpills(type)) * kArm64PointerSize;
+ POPCOUNT(Arm64CalleeSaveCoreSpills(type)) * static_cast<size_t>(kArm64PointerSize);
}
constexpr size_t Arm64CalleeSaveLrOffset(Runtime::CalleeSaveType type) {
return Arm64CalleeSaveFrameSize(type) -
- POPCOUNT(Arm64CalleeSaveCoreSpills(type) & (-(1 << LR))) * kArm64PointerSize;
+ POPCOUNT(Arm64CalleeSaveCoreSpills(type) & (-(1 << LR))) *
+ static_cast<size_t>(kArm64PointerSize);
}
} // namespace arm64
diff --git a/runtime/arch/arm64/thread_arm64.cc b/runtime/arch/arm64/thread_arm64.cc
index 564dced..3483b70 100644
--- a/runtime/arch/arm64/thread_arm64.cc
+++ b/runtime/arch/arm64/thread_arm64.cc
@@ -17,15 +17,16 @@
#include "thread.h"
#include "asm_support_arm64.h"
+#include "base/enums.h"
#include "base/logging.h"
namespace art {
void Thread::InitCpu() {
- CHECK_EQ(THREAD_FLAGS_OFFSET, ThreadFlagsOffset<8>().Int32Value());
- CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<8>().Int32Value());
- CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<8>().Int32Value());
- CHECK_EQ(THREAD_ID_OFFSET, ThinLockIdOffset<8>().Int32Value());
+ CHECK_EQ(THREAD_FLAGS_OFFSET, ThreadFlagsOffset<PointerSize::k64>().Int32Value());
+ CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<PointerSize::k64>().Int32Value());
+ CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<PointerSize::k64>().Int32Value());
+ CHECK_EQ(THREAD_ID_OFFSET, ThinLockIdOffset<PointerSize::k64>().Int32Value());
}
void Thread::CleanupCpu() {
diff --git a/runtime/arch/instruction_set.h b/runtime/arch/instruction_set.h
index ff9c0b3..917acc9 100644
--- a/runtime/arch/instruction_set.h
+++ b/runtime/arch/instruction_set.h
@@ -20,6 +20,7 @@
#include <iosfwd>
#include <string>
+#include "base/enums.h"
#include "base/logging.h" // Logging is required for FATAL in the helper functions.
namespace art {
@@ -53,12 +54,12 @@
#endif
// Architecture-specific pointer sizes
-static constexpr size_t kArmPointerSize = 4;
-static constexpr size_t kArm64PointerSize = 8;
-static constexpr size_t kMipsPointerSize = 4;
-static constexpr size_t kMips64PointerSize = 8;
-static constexpr size_t kX86PointerSize = 4;
-static constexpr size_t kX86_64PointerSize = 8;
+static constexpr PointerSize kArmPointerSize = PointerSize::k32;
+static constexpr PointerSize kArm64PointerSize = PointerSize::k64;
+static constexpr PointerSize kMipsPointerSize = PointerSize::k32;
+static constexpr PointerSize kMips64PointerSize = PointerSize::k64;
+static constexpr PointerSize kX86PointerSize = PointerSize::k32;
+static constexpr PointerSize kX86_64PointerSize = PointerSize::k64;
// ARM instruction alignment. ARM processors require code to be 4-byte aligned,
// but ARM ELF requires 8..
@@ -82,7 +83,7 @@
InstructionSet GetInstructionSetFromELF(uint16_t e_machine, uint32_t e_flags);
-static inline size_t GetInstructionSetPointerSize(InstructionSet isa) {
+static inline PointerSize GetInstructionSetPointerSize(InstructionSet isa) {
switch (isa) {
case kArm:
// Fall-through.
@@ -147,8 +148,8 @@
}
}
-static inline size_t InstructionSetPointerSize(InstructionSet isa) {
- return Is64BitInstructionSet(isa) ? 8U : 4U;
+static inline PointerSize InstructionSetPointerSize(InstructionSet isa) {
+ return Is64BitInstructionSet(isa) ? PointerSize::k64 : PointerSize::k32;
}
static inline size_t GetBytesPerGprSpillLocation(InstructionSet isa) {
diff --git a/runtime/arch/instruction_set_test.cc b/runtime/arch/instruction_set_test.cc
index 2f3cf18..5dfc4b4 100644
--- a/runtime/arch/instruction_set_test.cc
+++ b/runtime/arch/instruction_set_test.cc
@@ -18,6 +18,7 @@
#include <gtest/gtest.h>
+#include "base/enums.h"
#include "base/stringprintf.h"
namespace art {
@@ -49,7 +50,7 @@
}
TEST(InstructionSetTest, PointerSize) {
- EXPECT_EQ(sizeof(void*), GetInstructionSetPointerSize(kRuntimeISA));
+ EXPECT_EQ(kRuntimePointerSize, GetInstructionSetPointerSize(kRuntimeISA));
}
} // namespace art
diff --git a/runtime/arch/mips/quick_method_frame_info_mips.h b/runtime/arch/mips/quick_method_frame_info_mips.h
index f5d13c2..7b0623b 100644
--- a/runtime/arch/mips/quick_method_frame_info_mips.h
+++ b/runtime/arch/mips/quick_method_frame_info_mips.h
@@ -59,7 +59,7 @@
constexpr uint32_t MipsCalleeSaveFrameSize(Runtime::CalleeSaveType type) {
return RoundUp((POPCOUNT(MipsCalleeSaveCoreSpills(type)) /* gprs */ +
POPCOUNT(MipsCalleeSaveFPSpills(type)) /* fprs */ +
- 1 /* Method* */) * kMipsPointerSize, kStackAlignment);
+ 1 /* Method* */) * static_cast<size_t>(kMipsPointerSize), kStackAlignment);
}
constexpr QuickMethodFrameInfo MipsCalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
diff --git a/runtime/arch/mips/thread_mips.cc b/runtime/arch/mips/thread_mips.cc
index a451496..06d6211 100644
--- a/runtime/arch/mips/thread_mips.cc
+++ b/runtime/arch/mips/thread_mips.cc
@@ -17,14 +17,15 @@
#include "thread.h"
#include "asm_support_mips.h"
+#include "base/enums.h"
#include "base/logging.h"
namespace art {
void Thread::InitCpu() {
- CHECK_EQ(THREAD_FLAGS_OFFSET, ThreadFlagsOffset<4>().Int32Value());
- CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<4>().Int32Value());
- CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<4>().Int32Value());
+ CHECK_EQ(THREAD_FLAGS_OFFSET, ThreadFlagsOffset<PointerSize::k32>().Int32Value());
+ CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<PointerSize::k32>().Int32Value());
+ CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<PointerSize::k64>().Int32Value());
}
void Thread::CleanupCpu() {
diff --git a/runtime/arch/mips64/quick_method_frame_info_mips64.h b/runtime/arch/mips64/quick_method_frame_info_mips64.h
index f967be0..b7dc57f 100644
--- a/runtime/arch/mips64/quick_method_frame_info_mips64.h
+++ b/runtime/arch/mips64/quick_method_frame_info_mips64.h
@@ -62,7 +62,7 @@
constexpr uint32_t Mips64CalleeSaveFrameSize(Runtime::CalleeSaveType type) {
return RoundUp((POPCOUNT(Mips64CalleeSaveCoreSpills(type)) /* gprs */ +
POPCOUNT(Mips64CalleeSaveFpSpills(type)) /* fprs */ +
- + 1 /* Method* */) * kMips64PointerSize, kStackAlignment);
+ + 1 /* Method* */) * static_cast<size_t>(kMips64PointerSize), kStackAlignment);
}
constexpr QuickMethodFrameInfo Mips64CalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
diff --git a/runtime/arch/mips64/thread_mips64.cc b/runtime/arch/mips64/thread_mips64.cc
index c55537c..3ce5e50 100644
--- a/runtime/arch/mips64/thread_mips64.cc
+++ b/runtime/arch/mips64/thread_mips64.cc
@@ -17,14 +17,15 @@
#include "thread.h"
#include "asm_support_mips64.h"
+#include "base/enums.h"
#include "base/logging.h"
namespace art {
void Thread::InitCpu() {
- CHECK_EQ(THREAD_FLAGS_OFFSET, ThreadFlagsOffset<8>().Int32Value());
- CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<8>().Int32Value());
- CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<8>().Int32Value());
+ CHECK_EQ(THREAD_FLAGS_OFFSET, ThreadFlagsOffset<PointerSize::k64>().Int32Value());
+ CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<PointerSize::k64>().Int32Value());
+ CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<PointerSize::k64>().Int32Value());
}
void Thread::CleanupCpu() {
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 09af373..80bb51d 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -18,6 +18,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
@@ -529,11 +530,7 @@
static uintptr_t GetEntrypoint(Thread* self, QuickEntrypointEnum entrypoint) {
int32_t offset;
-#ifdef __LP64__
- offset = GetThreadOffset<8>(entrypoint).Int32Value();
-#else
- offset = GetThreadOffset<4>(entrypoint).Int32Value();
-#endif
+ offset = GetThreadOffset<kRuntimePointerSize>(entrypoint).Int32Value();
return *reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(self) + offset);
}
@@ -1016,7 +1013,7 @@
// Use an arbitrary method from c to use as referrer
size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()), // type_idx
// arbitrary
- reinterpret_cast<size_t>(c->GetVirtualMethod(0, sizeof(void*))),
+ reinterpret_cast<size_t>(c->GetVirtualMethod(0, kRuntimePointerSize)),
0U,
StubTest::GetEntrypoint(self, kQuickAllocObject),
self);
@@ -1147,12 +1144,13 @@
if ((false)) {
// Use an arbitrary method from c to use as referrer
- size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()), // type_idx
- 10U,
- // arbitrary
- reinterpret_cast<size_t>(c_obj->GetVirtualMethod(0, sizeof(void*))),
- StubTest::GetEntrypoint(self, kQuickAllocArray),
- self);
+ size_t result = Invoke3(
+ static_cast<size_t>(c->GetDexTypeIndex()), // type_idx
+ 10U,
+ // arbitrary
+ reinterpret_cast<size_t>(c_obj->GetVirtualMethod(0, kRuntimePointerSize)),
+ StubTest::GetEntrypoint(self, kQuickAllocArray),
+ self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
@@ -1799,7 +1797,7 @@
Handle<mirror::Object> obj(hs.NewHandle(soa.Decode<mirror::Object*>(o)));
Handle<mirror::Class> c(hs.NewHandle(obj->GetClass()));
// Need a method as a referrer
- ArtMethod* m = c->GetDirectMethod(0, sizeof(void*));
+ ArtMethod* m = c->GetDirectMethod(0, kRuntimePointerSize);
// Play with it...
@@ -2015,10 +2013,10 @@
Runtime::Current()->GetClassLinker()->CreateImtConflictTable(/*count*/0u, linear_alloc);
void* data = linear_alloc->Alloc(
self,
- ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table, sizeof(void*)));
+ ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table, kRuntimePointerSize));
ImtConflictTable* new_table = new (data) ImtConflictTable(
- empty_conflict_table, inf_contains, contains_amethod, sizeof(void*));
- conflict_method->SetImtConflictTable(new_table, sizeof(void*));
+ empty_conflict_table, inf_contains, contains_amethod, kRuntimePointerSize);
+ conflict_method->SetImtConflictTable(new_table, kRuntimePointerSize);
size_t result =
Invoke3WithReferrerAndHidden(reinterpret_cast<size_t>(conflict_method),
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 24e3a0d..533905e 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -20,6 +20,7 @@
#include <sys/ucontext.h>
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/macros.h"
#include "globals.h"
#include "base/logging.h"
@@ -347,11 +348,7 @@
bool SuspensionHandler::Action(int, siginfo_t*, void* context) {
// These are the instructions to check for. The first one is the mov eax, fs:[xxx]
// where xxx is the offset of the suspend trigger.
-#if defined(__x86_64__)
- uint32_t trigger = Thread::ThreadSuspendTriggerOffset<8>().Int32Value();
-#else
- uint32_t trigger = Thread::ThreadSuspendTriggerOffset<4>().Int32Value();
-#endif
+ uint32_t trigger = Thread::ThreadSuspendTriggerOffset<kRuntimePointerSize>().Int32Value();
VLOG(signals) << "Checking for suspension point";
#if defined(__x86_64__)
diff --git a/runtime/arch/x86/quick_method_frame_info_x86.h b/runtime/arch/x86/quick_method_frame_info_x86.h
index ed1d860..24c671c 100644
--- a/runtime/arch/x86/quick_method_frame_info_x86.h
+++ b/runtime/arch/x86/quick_method_frame_info_x86.h
@@ -56,7 +56,7 @@
constexpr uint32_t X86CalleeSaveFrameSize(Runtime::CalleeSaveType type) {
return RoundUp((POPCOUNT(X86CalleeSaveCoreSpills(type)) /* gprs */ +
2 * POPCOUNT(X86CalleeSaveFpSpills(type)) /* fprs */ +
- 1 /* Method* */) * kX86PointerSize, kStackAlignment);
+ 1 /* Method* */) * static_cast<size_t>(kX86PointerSize), kStackAlignment);
}
constexpr QuickMethodFrameInfo X86CalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
diff --git a/runtime/arch/x86/thread_x86.cc b/runtime/arch/x86/thread_x86.cc
index c39d122..241650e 100644
--- a/runtime/arch/x86/thread_x86.cc
+++ b/runtime/arch/x86/thread_x86.cc
@@ -20,6 +20,7 @@
#include <sys/types.h>
#include "asm_support_x86.h"
+#include "base/enums.h"
#include "base/macros.h"
#include "thread-inl.h"
#include "thread_list.h"
@@ -136,7 +137,7 @@
// Sanity check that reads from %fs point to this Thread*.
Thread* self_check;
- CHECK_EQ(THREAD_SELF_OFFSET, SelfOffset<4>().Int32Value());
+ CHECK_EQ(THREAD_SELF_OFFSET, SelfOffset<PointerSize::k32>().Int32Value());
__asm__ __volatile__("movl %%fs:(%1), %0"
: "=r"(self_check) // output
: "r"(THREAD_SELF_OFFSET) // input
@@ -144,9 +145,9 @@
CHECK_EQ(self_check, this);
// Sanity check other offsets.
- CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<4>().Int32Value());
- CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<4>().Int32Value());
- CHECK_EQ(THREAD_ID_OFFSET, ThinLockIdOffset<4>().Int32Value());
+ CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<PointerSize::k32>().Int32Value());
+ CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<PointerSize::k32>().Int32Value());
+ CHECK_EQ(THREAD_ID_OFFSET, ThinLockIdOffset<PointerSize::k32>().Int32Value());
}
void Thread::CleanupCpu() {
diff --git a/runtime/arch/x86_64/quick_method_frame_info_x86_64.h b/runtime/arch/x86_64/quick_method_frame_info_x86_64.h
index 72d7e99..37eff83 100644
--- a/runtime/arch/x86_64/quick_method_frame_info_x86_64.h
+++ b/runtime/arch/x86_64/quick_method_frame_info_x86_64.h
@@ -53,7 +53,7 @@
constexpr uint32_t X86_64CalleeSaveFrameSize(Runtime::CalleeSaveType type) {
return RoundUp((POPCOUNT(X86_64CalleeSaveCoreSpills(type)) /* gprs */ +
POPCOUNT(X86_64CalleeSaveFpSpills(type)) /* fprs */ +
- 1 /* Method* */) * kX86_64PointerSize, kStackAlignment);
+ 1 /* Method* */) * static_cast<size_t>(kX86_64PointerSize), kStackAlignment);
}
constexpr QuickMethodFrameInfo X86_64CalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 70a907f..2421246 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -126,20 +126,21 @@
return GetDexMethodIndex() % ImTable::kSize;
}
-inline ArtMethod** ArtMethod::GetDexCacheResolvedMethods(size_t pointer_size) {
+inline ArtMethod** ArtMethod::GetDexCacheResolvedMethods(PointerSize pointer_size) {
return GetNativePointer<ArtMethod**>(DexCacheResolvedMethodsOffset(pointer_size),
pointer_size);
}
-inline ArtMethod* ArtMethod::GetDexCacheResolvedMethod(uint16_t method_index, size_t ptr_size) {
+inline ArtMethod* ArtMethod::GetDexCacheResolvedMethod(uint16_t method_index,
+ PointerSize pointer_size) {
// NOTE: Unchecked, i.e. not throwing AIOOB. We don't even know the length here
// without accessing the DexCache and we don't want to do that in release build.
DCHECK_LT(method_index,
- GetInterfaceMethodIfProxy(ptr_size)->GetDeclaringClass()
+ GetInterfaceMethodIfProxy(pointer_size)->GetDeclaringClass()
->GetDexCache()->NumResolvedMethods());
- ArtMethod* method = mirror::DexCache::GetElementPtrSize(GetDexCacheResolvedMethods(ptr_size),
+ ArtMethod* method = mirror::DexCache::GetElementPtrSize(GetDexCacheResolvedMethods(pointer_size),
method_index,
- ptr_size);
+ pointer_size);
if (LIKELY(method != nullptr)) {
auto* declaring_class = method->GetDeclaringClass();
if (LIKELY(declaring_class == nullptr || !declaring_class->IsErroneous())) {
@@ -149,70 +150,72 @@
return nullptr;
}
-inline void ArtMethod::SetDexCacheResolvedMethod(uint16_t method_index, ArtMethod* new_method,
- size_t ptr_size) {
+inline void ArtMethod::SetDexCacheResolvedMethod(uint16_t method_index,
+ ArtMethod* new_method,
+ PointerSize pointer_size) {
// NOTE: Unchecked, i.e. not throwing AIOOB. We don't even know the length here
// without accessing the DexCache and we don't want to do that in release build.
DCHECK_LT(method_index,
- GetInterfaceMethodIfProxy(ptr_size)->GetDeclaringClass()
+ GetInterfaceMethodIfProxy(pointer_size)->GetDeclaringClass()
->GetDexCache()->NumResolvedMethods());
DCHECK(new_method == nullptr || new_method->GetDeclaringClass() != nullptr);
- mirror::DexCache::SetElementPtrSize(GetDexCacheResolvedMethods(ptr_size),
+ mirror::DexCache::SetElementPtrSize(GetDexCacheResolvedMethods(pointer_size),
method_index,
new_method,
- ptr_size);
+ pointer_size);
}
-inline bool ArtMethod::HasDexCacheResolvedMethods(size_t pointer_size) {
+inline bool ArtMethod::HasDexCacheResolvedMethods(PointerSize pointer_size) {
return GetDexCacheResolvedMethods(pointer_size) != nullptr;
}
inline bool ArtMethod::HasSameDexCacheResolvedMethods(ArtMethod** other_cache,
- size_t pointer_size) {
+ PointerSize pointer_size) {
return GetDexCacheResolvedMethods(pointer_size) == other_cache;
}
-inline bool ArtMethod::HasSameDexCacheResolvedMethods(ArtMethod* other, size_t pointer_size) {
+inline bool ArtMethod::HasSameDexCacheResolvedMethods(ArtMethod* other, PointerSize pointer_size) {
return GetDexCacheResolvedMethods(pointer_size) ==
other->GetDexCacheResolvedMethods(pointer_size);
}
-inline GcRoot<mirror::Class>* ArtMethod::GetDexCacheResolvedTypes(size_t pointer_size) {
+inline GcRoot<mirror::Class>* ArtMethod::GetDexCacheResolvedTypes(PointerSize pointer_size) {
return GetNativePointer<GcRoot<mirror::Class>*>(DexCacheResolvedTypesOffset(pointer_size),
pointer_size);
}
template <bool kWithCheck>
-inline mirror::Class* ArtMethod::GetDexCacheResolvedType(uint32_t type_index, size_t ptr_size) {
+inline mirror::Class* ArtMethod::GetDexCacheResolvedType(uint32_t type_index,
+ PointerSize pointer_size) {
if (kWithCheck) {
mirror::DexCache* dex_cache =
- GetInterfaceMethodIfProxy(ptr_size)->GetDeclaringClass()->GetDexCache();
+ GetInterfaceMethodIfProxy(pointer_size)->GetDeclaringClass()->GetDexCache();
if (UNLIKELY(type_index >= dex_cache->NumResolvedTypes())) {
ThrowArrayIndexOutOfBoundsException(type_index, dex_cache->NumResolvedTypes());
return nullptr;
}
}
- mirror::Class* klass = GetDexCacheResolvedTypes(ptr_size)[type_index].Read();
+ mirror::Class* klass = GetDexCacheResolvedTypes(pointer_size)[type_index].Read();
return (klass != nullptr && !klass->IsErroneous()) ? klass : nullptr;
}
-inline bool ArtMethod::HasDexCacheResolvedTypes(size_t pointer_size) {
+inline bool ArtMethod::HasDexCacheResolvedTypes(PointerSize pointer_size) {
return GetDexCacheResolvedTypes(pointer_size) != nullptr;
}
inline bool ArtMethod::HasSameDexCacheResolvedTypes(GcRoot<mirror::Class>* other_cache,
- size_t pointer_size) {
+ PointerSize pointer_size) {
return GetDexCacheResolvedTypes(pointer_size) == other_cache;
}
-inline bool ArtMethod::HasSameDexCacheResolvedTypes(ArtMethod* other, size_t pointer_size) {
+inline bool ArtMethod::HasSameDexCacheResolvedTypes(ArtMethod* other, PointerSize pointer_size) {
return GetDexCacheResolvedTypes(pointer_size) == other->GetDexCacheResolvedTypes(pointer_size);
}
inline mirror::Class* ArtMethod::GetClassFromTypeIndex(uint16_t type_idx,
bool resolve,
- size_t ptr_size) {
- mirror::Class* type = GetDexCacheResolvedType(type_idx, ptr_size);
+ PointerSize pointer_size) {
+ mirror::Class* type = GetDexCacheResolvedType(type_idx, pointer_size);
if (type == nullptr && resolve) {
type = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, this);
CHECK(type != nullptr || Thread::Current()->IsExceptionPending());
@@ -334,9 +337,9 @@
return GetDeclaringClass()->GetDexFile().GetCodeItem(GetCodeItemOffset());
}
-inline bool ArtMethod::IsResolvedTypeIdx(uint16_t type_idx, size_t ptr_size) {
+inline bool ArtMethod::IsResolvedTypeIdx(uint16_t type_idx, PointerSize pointer_size) {
DCHECK(!IsProxyMethod());
- return GetDexCacheResolvedType(type_idx, ptr_size) != nullptr;
+ return GetDexCacheResolvedType(type_idx, pointer_size) != nullptr;
}
inline int32_t ArtMethod::GetLineNumFromDexPC(uint32_t dex_pc) {
@@ -406,7 +409,7 @@
return GetDeclaringClass<kReadBarrierOption>()->IsProxyClass();
}
-inline ArtMethod* ArtMethod::GetInterfaceMethodIfProxy(size_t pointer_size) {
+inline ArtMethod* ArtMethod::GetInterfaceMethodIfProxy(PointerSize pointer_size) {
if (LIKELY(!IsProxyMethod())) {
return this;
}
@@ -422,22 +425,24 @@
}
inline void ArtMethod::SetDexCacheResolvedMethods(ArtMethod** new_dex_cache_methods,
- size_t ptr_size) {
- SetNativePointer(DexCacheResolvedMethodsOffset(ptr_size), new_dex_cache_methods, ptr_size);
+ PointerSize pointer_size) {
+ SetNativePointer(DexCacheResolvedMethodsOffset(pointer_size),
+ new_dex_cache_methods,
+ pointer_size);
}
inline void ArtMethod::SetDexCacheResolvedTypes(GcRoot<mirror::Class>* new_dex_cache_types,
- size_t ptr_size) {
- SetNativePointer(DexCacheResolvedTypesOffset(ptr_size), new_dex_cache_types, ptr_size);
+ PointerSize pointer_size) {
+ SetNativePointer(DexCacheResolvedTypesOffset(pointer_size), new_dex_cache_types, pointer_size);
}
-inline mirror::Class* ArtMethod::GetReturnType(bool resolve, size_t ptr_size) {
+inline mirror::Class* ArtMethod::GetReturnType(bool resolve, PointerSize pointer_size) {
DCHECK(!IsProxyMethod());
const DexFile* dex_file = GetDexFile();
const DexFile::MethodId& method_id = dex_file->GetMethodId(GetDexMethodIndex());
const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
uint16_t return_type_idx = proto_id.return_type_idx_;
- mirror::Class* type = GetDexCacheResolvedType(return_type_idx, ptr_size);
+ mirror::Class* type = GetDexCacheResolvedType(return_type_idx, pointer_size);
if (type == nullptr && resolve) {
type = Runtime::Current()->GetClassLinker()->ResolveType(return_type_idx, this);
CHECK(type != nullptr || Thread::Current()->IsExceptionPending());
@@ -446,7 +451,7 @@
}
template<ReadBarrierOption kReadBarrierOption, typename RootVisitorType>
-void ArtMethod::VisitRoots(RootVisitorType& visitor, size_t pointer_size) {
+void ArtMethod::VisitRoots(RootVisitorType& visitor, PointerSize pointer_size) {
if (LIKELY(!declaring_class_.IsNull())) {
visitor.VisitRoot(declaring_class_.AddressWithoutBarrier());
mirror::Class* klass = declaring_class_.Read<kReadBarrierOption>();
@@ -482,7 +487,7 @@
template <typename Visitor>
inline void ArtMethod::UpdateObjectsForImageRelocation(const Visitor& visitor,
- size_t pointer_size) {
+ PointerSize pointer_size) {
mirror::Class* old_class = GetDeclaringClassUnchecked<kWithoutReadBarrier>();
mirror::Class* new_class = visitor(old_class);
if (old_class != new_class) {
@@ -501,7 +506,7 @@
}
template <ReadBarrierOption kReadBarrierOption, typename Visitor>
-inline void ArtMethod::UpdateEntrypoints(const Visitor& visitor, size_t pointer_size) {
+inline void ArtMethod::UpdateEntrypoints(const Visitor& visitor, PointerSize pointer_size) {
if (IsNative<kReadBarrierOption>()) {
const void* old_native_code = GetEntryPointFromJniPtrSize(pointer_size);
const void* new_native_code = visitor(old_native_code);
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 113827a..60975d4 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -122,7 +122,7 @@
return dex_file->GetMethodSignature(mid) == dex_file2->GetMethodSignature(mid2);
}
-ArtMethod* ArtMethod::FindOverriddenMethod(size_t pointer_size) {
+ArtMethod* ArtMethod::FindOverriddenMethod(PointerSize pointer_size) {
if (IsStatic()) {
return nullptr;
}
@@ -196,7 +196,7 @@
// Default to handler not found.
uint32_t found_dex_pc = DexFile::kDexNoIndex;
// Iterate over the catch handlers associated with dex_pc.
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
for (CatchHandlerIterator it(*code_item, dex_pc); it.HasNext(); it.Next()) {
uint16_t iter_type_idx = it.GetHandlerTypeIndex();
// Catch all case
@@ -245,7 +245,7 @@
if (kIsDebugBuild) {
self->AssertThreadSuspensionIsAllowable();
CHECK_EQ(kRunnable, self->GetState());
- CHECK_STREQ(GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty(), shorty);
+ CHECK_STREQ(GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty(), shorty);
}
// Push a transition back into managed code onto the linked list in thread.
@@ -268,7 +268,7 @@
self, this, receiver, args + 1, result, /*stay_in_interpreter*/ true);
}
} else {
- DCHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
+ DCHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
constexpr bool kLogInvocationStartAndReturn = false;
bool have_quick_code = GetEntryPointFromQuickCompiledCode() != nullptr;
@@ -476,7 +476,7 @@
return Runtime::Current()->GetClassLinker()->GetOatMethodQuickCodeFor(this) != nullptr;
}
-void ArtMethod::CopyFrom(ArtMethod* src, size_t image_pointer_size) {
+void ArtMethod::CopyFrom(ArtMethod* src, PointerSize image_pointer_size) {
memcpy(reinterpret_cast<void*>(this), reinterpret_cast<const void*>(src),
Size(image_pointer_size));
declaring_class_ = GcRoot<mirror::Class>(const_cast<ArtMethod*>(src)->GetDeclaringClass());
@@ -499,18 +499,20 @@
hotness_count_ = 0;
}
-bool ArtMethod::IsImagePointerSize(size_t pointer_size) {
+bool ArtMethod::IsImagePointerSize(PointerSize pointer_size) {
// Hijack this function to get access to PtrSizedFieldsOffset.
//
// Ensure that PrtSizedFieldsOffset is correct. We rely here on usually having both 32-bit and
// 64-bit builds.
static_assert(std::is_standard_layout<ArtMethod>::value, "ArtMethod is not standard layout.");
- static_assert((sizeof(void*) != 4) ||
- (offsetof(ArtMethod, ptr_sized_fields_) == PtrSizedFieldsOffset(4)),
- "Unexpected 32-bit class layout.");
- static_assert((sizeof(void*) != 8) ||
- (offsetof(ArtMethod, ptr_sized_fields_) == PtrSizedFieldsOffset(8)),
- "Unexpected 64-bit class layout.");
+ static_assert(
+ (sizeof(void*) != 4) ||
+ (offsetof(ArtMethod, ptr_sized_fields_) == PtrSizedFieldsOffset(PointerSize::k32)),
+ "Unexpected 32-bit class layout.");
+ static_assert(
+ (sizeof(void*) != 8) ||
+ (offsetof(ArtMethod, ptr_sized_fields_) == PtrSizedFieldsOffset(PointerSize::k64)),
+ "Unexpected 64-bit class layout.");
Runtime* runtime = Runtime::Current();
if (runtime == nullptr) {
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 1d14203..acf06fd 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -21,6 +21,7 @@
#include "base/bit_utils.h"
#include "base/casts.h"
+#include "base/enums.h"
#include "dex_file.h"
#include "gc_root.h"
#include "invoke_type.h"
@@ -65,7 +66,7 @@
ImtConflictTable(ImtConflictTable* other,
ArtMethod* interface_method,
ArtMethod* implementation_method,
- size_t pointer_size) {
+ PointerSize pointer_size) {
const size_t count = other->NumEntries(pointer_size);
for (size_t i = 0; i < count; ++i) {
SetInterfaceMethod(i, pointer_size, other->GetInterfaceMethod(i, pointer_size));
@@ -79,30 +80,30 @@
}
// num_entries excludes the header.
- ImtConflictTable(size_t num_entries, size_t pointer_size) {
+ ImtConflictTable(size_t num_entries, PointerSize pointer_size) {
SetInterfaceMethod(num_entries, pointer_size, nullptr);
SetImplementationMethod(num_entries, pointer_size, nullptr);
}
// Set an entry at an index.
- void SetInterfaceMethod(size_t index, size_t pointer_size, ArtMethod* method) {
+ void SetInterfaceMethod(size_t index, PointerSize pointer_size, ArtMethod* method) {
SetMethod(index * kMethodCount + kMethodInterface, pointer_size, method);
}
- void SetImplementationMethod(size_t index, size_t pointer_size, ArtMethod* method) {
+ void SetImplementationMethod(size_t index, PointerSize pointer_size, ArtMethod* method) {
SetMethod(index * kMethodCount + kMethodImplementation, pointer_size, method);
}
- ArtMethod* GetInterfaceMethod(size_t index, size_t pointer_size) const {
+ ArtMethod* GetInterfaceMethod(size_t index, PointerSize pointer_size) const {
return GetMethod(index * kMethodCount + kMethodInterface, pointer_size);
}
- ArtMethod* GetImplementationMethod(size_t index, size_t pointer_size) const {
+ ArtMethod* GetImplementationMethod(size_t index, PointerSize pointer_size) const {
return GetMethod(index * kMethodCount + kMethodImplementation, pointer_size);
}
// Return true if two conflict tables are the same.
- bool Equals(ImtConflictTable* other, size_t pointer_size) const {
+ bool Equals(ImtConflictTable* other, PointerSize pointer_size) const {
size_t num = NumEntries(pointer_size);
if (num != other->NumEntries(pointer_size)) {
return false;
@@ -121,7 +122,7 @@
// NO_THREAD_SAFETY_ANALYSIS for calling with held locks. Visitor is passed a pair of ArtMethod*
// and also returns one. The order is <interface, implementation>.
template<typename Visitor>
- void Visit(const Visitor& visitor, size_t pointer_size) NO_THREAD_SAFETY_ANALYSIS {
+ void Visit(const Visitor& visitor, PointerSize pointer_size) NO_THREAD_SAFETY_ANALYSIS {
uint32_t table_index = 0;
for (;;) {
ArtMethod* interface_method = GetInterfaceMethod(table_index, pointer_size);
@@ -143,7 +144,7 @@
// Lookup the implementation ArtMethod associated to `interface_method`. Return null
// if not found.
- ArtMethod* Lookup(ArtMethod* interface_method, size_t pointer_size) const {
+ ArtMethod* Lookup(ArtMethod* interface_method, PointerSize pointer_size) const {
uint32_t table_index = 0;
for (;;) {
ArtMethod* current_interface_method = GetInterfaceMethod(table_index, pointer_size);
@@ -159,7 +160,7 @@
}
// Compute the number of entries in this table.
- size_t NumEntries(size_t pointer_size) const {
+ size_t NumEntries(PointerSize pointer_size) const {
uint32_t table_index = 0;
while (GetInterfaceMethod(table_index, pointer_size) != nullptr) {
++table_index;
@@ -168,41 +169,39 @@
}
// Compute the size in bytes taken by this table.
- size_t ComputeSize(size_t pointer_size) const {
+ size_t ComputeSize(PointerSize pointer_size) const {
// Add the end marker.
return ComputeSize(NumEntries(pointer_size), pointer_size);
}
// Compute the size in bytes needed for copying the given `table` and add
// one more entry.
- static size_t ComputeSizeWithOneMoreEntry(ImtConflictTable* table, size_t pointer_size) {
+ static size_t ComputeSizeWithOneMoreEntry(ImtConflictTable* table, PointerSize pointer_size) {
return table->ComputeSize(pointer_size) + EntrySize(pointer_size);
}
// Compute size with a fixed number of entries.
- static size_t ComputeSize(size_t num_entries, size_t pointer_size) {
+ static size_t ComputeSize(size_t num_entries, PointerSize pointer_size) {
return (num_entries + 1) * EntrySize(pointer_size); // Add one for null terminator.
}
- static size_t EntrySize(size_t pointer_size) {
- return pointer_size * static_cast<size_t>(kMethodCount);
+ static size_t EntrySize(PointerSize pointer_size) {
+ return static_cast<size_t>(pointer_size) * static_cast<size_t>(kMethodCount);
}
private:
- ArtMethod* GetMethod(size_t index, size_t pointer_size) const {
- if (pointer_size == 8) {
+ ArtMethod* GetMethod(size_t index, PointerSize pointer_size) const {
+ if (pointer_size == PointerSize::k64) {
return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(data64_[index]));
} else {
- DCHECK_EQ(pointer_size, 4u);
return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(data32_[index]));
}
}
- void SetMethod(size_t index, size_t pointer_size, ArtMethod* method) {
- if (pointer_size == 8) {
+ void SetMethod(size_t index, PointerSize pointer_size, ArtMethod* method) {
+ if (pointer_size == PointerSize::k64) {
data64_[index] = dchecked_integral_cast<uint64_t>(reinterpret_cast<uintptr_t>(method));
} else {
- DCHECK_EQ(pointer_size, 4u);
data32_[index] = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(method));
}
}
@@ -223,7 +222,7 @@
ArtMethod() : access_flags_(0), dex_code_item_offset_(0), dex_method_index_(0),
method_index_(0), hotness_count_(0) { }
- ArtMethod(ArtMethod* src, size_t image_pointer_size) {
+ ArtMethod(ArtMethod* src, PointerSize image_pointer_size) {
CopyFrom(src, image_pointer_size);
}
@@ -428,42 +427,45 @@
dex_method_index_ = new_idx;
}
- ALWAYS_INLINE ArtMethod** GetDexCacheResolvedMethods(size_t pointer_size)
+ ALWAYS_INLINE ArtMethod** GetDexCacheResolvedMethods(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE ArtMethod* GetDexCacheResolvedMethod(uint16_t method_index, size_t ptr_size)
+ ALWAYS_INLINE ArtMethod* GetDexCacheResolvedMethod(uint16_t method_index,
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE void SetDexCacheResolvedMethod(uint16_t method_index,
ArtMethod* new_method,
- size_t ptr_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE void SetDexCacheResolvedMethods(ArtMethod** new_dex_cache_methods, size_t ptr_size)
+ ALWAYS_INLINE void SetDexCacheResolvedMethods(ArtMethod** new_dex_cache_methods,
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- bool HasDexCacheResolvedMethods(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
- bool HasSameDexCacheResolvedMethods(ArtMethod* other, size_t pointer_size)
+ bool HasDexCacheResolvedMethods(PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool HasSameDexCacheResolvedMethods(ArtMethod* other, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- bool HasSameDexCacheResolvedMethods(ArtMethod** other_cache, size_t pointer_size)
+ bool HasSameDexCacheResolvedMethods(ArtMethod** other_cache, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
template <bool kWithCheck = true>
- mirror::Class* GetDexCacheResolvedType(uint32_t type_idx, size_t ptr_size)
+ mirror::Class* GetDexCacheResolvedType(uint32_t type_idx, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- void SetDexCacheResolvedTypes(GcRoot<mirror::Class>* new_dex_cache_types, size_t ptr_size)
+ void SetDexCacheResolvedTypes(GcRoot<mirror::Class>* new_dex_cache_types,
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- bool HasDexCacheResolvedTypes(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
- bool HasSameDexCacheResolvedTypes(ArtMethod* other, size_t pointer_size)
+ bool HasDexCacheResolvedTypes(PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool HasSameDexCacheResolvedTypes(ArtMethod* other, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- bool HasSameDexCacheResolvedTypes(GcRoot<mirror::Class>* other_cache, size_t pointer_size)
+ bool HasSameDexCacheResolvedTypes(GcRoot<mirror::Class>* other_cache, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Get the Class* from the type index into this method's dex cache.
- mirror::Class* GetClassFromTypeIndex(uint16_t type_idx, bool resolve, size_t ptr_size)
+ mirror::Class* GetClassFromTypeIndex(uint16_t type_idx, bool resolve, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Returns true if this method has the same name and signature of the other method.
bool HasSameNameAndSignature(ArtMethod* other) SHARED_REQUIRES(Locks::mutator_lock_);
// Find the method that this method overrides.
- ArtMethod* FindOverriddenMethod(size_t pointer_size)
+ ArtMethod* FindOverriddenMethod(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Find the method index for this method within other_dexfile. If this method isn't present then
@@ -478,21 +480,22 @@
SHARED_REQUIRES(Locks::mutator_lock_);
const void* GetEntryPointFromQuickCompiledCode() {
- return GetEntryPointFromQuickCompiledCodePtrSize(sizeof(void*));
+ return GetEntryPointFromQuickCompiledCodePtrSize(kRuntimePointerSize);
}
- ALWAYS_INLINE const void* GetEntryPointFromQuickCompiledCodePtrSize(size_t pointer_size) {
+ ALWAYS_INLINE const void* GetEntryPointFromQuickCompiledCodePtrSize(PointerSize pointer_size) {
return GetNativePointer<const void*>(
EntryPointFromQuickCompiledCodeOffset(pointer_size), pointer_size);
}
void SetEntryPointFromQuickCompiledCode(const void* entry_point_from_quick_compiled_code) {
SetEntryPointFromQuickCompiledCodePtrSize(entry_point_from_quick_compiled_code,
- sizeof(void*));
+ kRuntimePointerSize);
}
ALWAYS_INLINE void SetEntryPointFromQuickCompiledCodePtrSize(
- const void* entry_point_from_quick_compiled_code, size_t pointer_size) {
+ const void* entry_point_from_quick_compiled_code, PointerSize pointer_size) {
SetNativePointer(EntryPointFromQuickCompiledCodeOffset(pointer_size),
- entry_point_from_quick_compiled_code, pointer_size);
+ entry_point_from_quick_compiled_code,
+ pointer_size);
}
void RegisterNative(const void* native_method, bool is_fast)
@@ -500,81 +503,84 @@
void UnregisterNative() SHARED_REQUIRES(Locks::mutator_lock_);
- static MemberOffset DexCacheResolvedMethodsOffset(size_t pointer_size) {
+ static MemberOffset DexCacheResolvedMethodsOffset(PointerSize pointer_size) {
return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
- PtrSizedFields, dex_cache_resolved_methods_) / sizeof(void*) * pointer_size);
+ PtrSizedFields, dex_cache_resolved_methods_) / sizeof(void*)
+ * static_cast<size_t>(pointer_size));
}
- static MemberOffset DexCacheResolvedTypesOffset(size_t pointer_size) {
+ static MemberOffset DexCacheResolvedTypesOffset(PointerSize pointer_size) {
return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
- PtrSizedFields, dex_cache_resolved_types_) / sizeof(void*) * pointer_size);
+ PtrSizedFields, dex_cache_resolved_types_) / sizeof(void*)
+ * static_cast<size_t>(pointer_size));
}
- static MemberOffset DataOffset(size_t pointer_size) {
+ static MemberOffset DataOffset(PointerSize pointer_size) {
return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
- PtrSizedFields, data_) / sizeof(void*) * pointer_size);
+ PtrSizedFields, data_) / sizeof(void*) * static_cast<size_t>(pointer_size));
}
- static MemberOffset EntryPointFromJniOffset(size_t pointer_size) {
+ static MemberOffset EntryPointFromJniOffset(PointerSize pointer_size) {
return DataOffset(pointer_size);
}
- static MemberOffset EntryPointFromQuickCompiledCodeOffset(size_t pointer_size) {
+ static MemberOffset EntryPointFromQuickCompiledCodeOffset(PointerSize pointer_size) {
return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
- PtrSizedFields, entry_point_from_quick_compiled_code_) / sizeof(void*) * pointer_size);
+ PtrSizedFields, entry_point_from_quick_compiled_code_) / sizeof(void*)
+ * static_cast<size_t>(pointer_size));
}
- ImtConflictTable* GetImtConflictTable(size_t pointer_size) {
+ ImtConflictTable* GetImtConflictTable(PointerSize pointer_size) {
DCHECK(IsRuntimeMethod());
return reinterpret_cast<ImtConflictTable*>(GetDataPtrSize(pointer_size));
}
- ALWAYS_INLINE void SetImtConflictTable(ImtConflictTable* table, size_t pointer_size) {
+ ALWAYS_INLINE void SetImtConflictTable(ImtConflictTable* table, PointerSize pointer_size) {
DCHECK(IsRuntimeMethod());
SetDataPtrSize(table, pointer_size);
}
- ProfilingInfo* GetProfilingInfo(size_t pointer_size) {
+ ProfilingInfo* GetProfilingInfo(PointerSize pointer_size) {
return reinterpret_cast<ProfilingInfo*>(GetDataPtrSize(pointer_size));
}
ALWAYS_INLINE void SetProfilingInfo(ProfilingInfo* info) {
- SetDataPtrSize(info, sizeof(void*));
+ SetDataPtrSize(info, kRuntimePointerSize);
}
- ALWAYS_INLINE void SetProfilingInfoPtrSize(ProfilingInfo* info, size_t pointer_size) {
+ ALWAYS_INLINE void SetProfilingInfoPtrSize(ProfilingInfo* info, PointerSize pointer_size) {
SetDataPtrSize(info, pointer_size);
}
static MemberOffset ProfilingInfoOffset() {
- DCHECK(IsImagePointerSize(sizeof(void*)));
- return DataOffset(sizeof(void*));
+ DCHECK(IsImagePointerSize(kRuntimePointerSize));
+ return DataOffset(kRuntimePointerSize);
}
void* GetEntryPointFromJni() {
DCHECK(IsNative());
- return GetEntryPointFromJniPtrSize(sizeof(void*));
+ return GetEntryPointFromJniPtrSize(kRuntimePointerSize);
}
- ALWAYS_INLINE void* GetEntryPointFromJniPtrSize(size_t pointer_size) {
+ ALWAYS_INLINE void* GetEntryPointFromJniPtrSize(PointerSize pointer_size) {
return GetDataPtrSize(pointer_size);
}
void SetEntryPointFromJni(const void* entrypoint) {
DCHECK(IsNative());
- SetEntryPointFromJniPtrSize(entrypoint, sizeof(void*));
+ SetEntryPointFromJniPtrSize(entrypoint, kRuntimePointerSize);
}
- ALWAYS_INLINE void SetEntryPointFromJniPtrSize(const void* entrypoint, size_t pointer_size) {
+ ALWAYS_INLINE void SetEntryPointFromJniPtrSize(const void* entrypoint, PointerSize pointer_size) {
SetDataPtrSize(entrypoint, pointer_size);
}
- ALWAYS_INLINE void* GetDataPtrSize(size_t pointer_size) {
+ ALWAYS_INLINE void* GetDataPtrSize(PointerSize pointer_size) {
DCHECK(IsImagePointerSize(pointer_size));
return GetNativePointer<void*>(DataOffset(pointer_size), pointer_size);
}
- ALWAYS_INLINE void SetDataPtrSize(const void* data, size_t pointer_size) {
+ ALWAYS_INLINE void SetDataPtrSize(const void* data, PointerSize pointer_size) {
DCHECK(IsImagePointerSize(pointer_size));
SetNativePointer(DataOffset(pointer_size), data, pointer_size);
}
@@ -603,7 +609,7 @@
// NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename RootVisitorType>
- void VisitRoots(RootVisitorType& visitor, size_t pointer_size) NO_THREAD_SAFETY_ANALYSIS;
+ void VisitRoots(RootVisitorType& visitor, PointerSize pointer_size) NO_THREAD_SAFETY_ANALYSIS;
const DexFile* GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_);
@@ -624,7 +630,8 @@
const DexFile::CodeItem* GetCodeItem() SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsResolvedTypeIdx(uint16_t type_idx, size_t ptr_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsResolvedTypeIdx(uint16_t type_idx, PointerSize pointer_size)
+ SHARED_REQUIRES(Locks::mutator_lock_);
int32_t GetLineNumFromDexPC(uint32_t dex_pc) SHARED_REQUIRES(Locks::mutator_lock_);
@@ -645,14 +652,14 @@
// May cause thread suspension due to GetClassFromTypeIdx calling ResolveType this caused a large
// number of bugs at call sites.
- mirror::Class* GetReturnType(bool resolve, size_t ptr_size)
+ mirror::Class* GetReturnType(bool resolve, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
mirror::ClassLoader* GetClassLoader() SHARED_REQUIRES(Locks::mutator_lock_);
mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy(size_t pointer_size)
+ ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// May cause thread suspension due to class resolution.
@@ -660,22 +667,22 @@
SHARED_REQUIRES(Locks::mutator_lock_);
// Size of an instance of this native class.
- static size_t Size(size_t pointer_size) {
+ static size_t Size(PointerSize pointer_size) {
return PtrSizedFieldsOffset(pointer_size) +
- (sizeof(PtrSizedFields) / sizeof(void*)) * pointer_size;
+ (sizeof(PtrSizedFields) / sizeof(void*)) * static_cast<size_t>(pointer_size);
}
// Alignment of an instance of this native class.
- static size_t Alignment(size_t pointer_size) {
+ static size_t Alignment(PointerSize pointer_size) {
// The ArtMethod alignment is the same as image pointer size. This differs from
// alignof(ArtMethod) if cross-compiling with pointer_size != sizeof(void*).
- return pointer_size;
+ return static_cast<size_t>(pointer_size);
}
- void CopyFrom(ArtMethod* src, size_t image_pointer_size)
+ void CopyFrom(ArtMethod* src, PointerSize image_pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE GcRoot<mirror::Class>* GetDexCacheResolvedTypes(size_t pointer_size)
+ ALWAYS_INLINE GcRoot<mirror::Class>* GetDexCacheResolvedTypes(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Note, hotness_counter_ updates are non-atomic but it doesn't need to be precise. Also,
@@ -711,12 +718,13 @@
// Update heap objects and non-entrypoint pointers by the passed in visitor for image relocation.
// Does not use read barrier.
template <typename Visitor>
- ALWAYS_INLINE void UpdateObjectsForImageRelocation(const Visitor& visitor, size_t pointer_size)
+ ALWAYS_INLINE void UpdateObjectsForImageRelocation(const Visitor& visitor,
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Update entry points by passing them through the visitor.
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
- ALWAYS_INLINE void UpdateEntrypoints(const Visitor& visitor, size_t pointer_size);
+ ALWAYS_INLINE void UpdateEntrypoints(const Visitor& visitor, PointerSize pointer_size);
protected:
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
@@ -765,20 +773,20 @@
} ptr_sized_fields_;
private:
- static constexpr size_t PtrSizedFieldsOffset(size_t pointer_size) {
+ static constexpr size_t PtrSizedFieldsOffset(PointerSize pointer_size) {
// Round up to pointer size for padding field. Tested in art_method.cc.
- return RoundUp(offsetof(ArtMethod, hotness_count_) + sizeof(hotness_count_), pointer_size);
+ return RoundUp(offsetof(ArtMethod, hotness_count_) + sizeof(hotness_count_),
+ static_cast<size_t>(pointer_size));
}
// Compare given pointer size to the image pointer size.
- static bool IsImagePointerSize(size_t pointer_size);
+ static bool IsImagePointerSize(PointerSize pointer_size);
template<typename T>
- ALWAYS_INLINE T GetNativePointer(MemberOffset offset, size_t pointer_size) const {
+ ALWAYS_INLINE T GetNativePointer(MemberOffset offset, PointerSize pointer_size) const {
static_assert(std::is_pointer<T>::value, "T must be a pointer type");
- DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
const auto addr = reinterpret_cast<uintptr_t>(this) + offset.Uint32Value();
- if (pointer_size == sizeof(uint32_t)) {
+ if (pointer_size == PointerSize::k32) {
return reinterpret_cast<T>(*reinterpret_cast<const uint32_t*>(addr));
} else {
auto v = *reinterpret_cast<const uint64_t*>(addr);
@@ -787,11 +795,10 @@
}
template<typename T>
- ALWAYS_INLINE void SetNativePointer(MemberOffset offset, T new_value, size_t pointer_size) {
+ ALWAYS_INLINE void SetNativePointer(MemberOffset offset, T new_value, PointerSize pointer_size) {
static_assert(std::is_pointer<T>::value, "T must be a pointer type");
- DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
const auto addr = reinterpret_cast<uintptr_t>(this) + offset.Uint32Value();
- if (pointer_size == sizeof(uint32_t)) {
+ if (pointer_size == PointerSize::k32) {
uintptr_t ptr = reinterpret_cast<uintptr_t>(new_value);
*reinterpret_cast<uint32_t*>(addr) = dchecked_integral_cast<uint32_t>(ptr);
} else {
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index b7df90d..0619af8 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -57,8 +57,10 @@
#if defined(__LP64__)
#define POINTER_SIZE_SHIFT 3
+#define POINTER_SIZE art::PointerSize::k64
#else
#define POINTER_SIZE_SHIFT 2
+#define POINTER_SIZE art::PointerSize::k32
#endif
ADD_TEST_EQ(static_cast<size_t>(1U << POINTER_SIZE_SHIFT),
static_cast<size_t>(__SIZEOF_POINTER__))
@@ -71,54 +73,54 @@
// Offset of field Thread::tlsPtr_.exception.
#define THREAD_EXCEPTION_OFFSET (THREAD_CARD_TABLE_OFFSET + __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_EXCEPTION_OFFSET,
- art::Thread::ExceptionOffset<__SIZEOF_POINTER__>().Int32Value())
+ art::Thread::ExceptionOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.managed_stack.top_quick_frame_.
#define THREAD_TOP_QUICK_FRAME_OFFSET (THREAD_CARD_TABLE_OFFSET + (3 * __SIZEOF_POINTER__))
ADD_TEST_EQ(THREAD_TOP_QUICK_FRAME_OFFSET,
- art::Thread::TopOfManagedStackOffset<__SIZEOF_POINTER__>().Int32Value())
+ art::Thread::TopOfManagedStackOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.self.
#define THREAD_SELF_OFFSET (THREAD_CARD_TABLE_OFFSET + (9 * __SIZEOF_POINTER__))
ADD_TEST_EQ(THREAD_SELF_OFFSET,
- art::Thread::SelfOffset<__SIZEOF_POINTER__>().Int32Value())
+ art::Thread::SelfOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_objects.
#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_CARD_TABLE_OFFSET + 197 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET,
- art::Thread::ThreadLocalObjectsOffset<__SIZEOF_POINTER__>().Int32Value())
+ art::Thread::ThreadLocalObjectsOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_pos.
#define THREAD_LOCAL_POS_OFFSET (THREAD_LOCAL_OBJECTS_OFFSET + __SIZEOF_SIZE_T__)
ADD_TEST_EQ(THREAD_LOCAL_POS_OFFSET,
- art::Thread::ThreadLocalPosOffset<__SIZEOF_POINTER__>().Int32Value())
+ art::Thread::ThreadLocalPosOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_end.
#define THREAD_LOCAL_END_OFFSET (THREAD_LOCAL_POS_OFFSET + __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_END_OFFSET,
- art::Thread::ThreadLocalEndOffset<__SIZEOF_POINTER__>().Int32Value())
+ art::Thread::ThreadLocalEndOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.mterp_current_ibase.
#define THREAD_CURRENT_IBASE_OFFSET (THREAD_LOCAL_END_OFFSET + __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_CURRENT_IBASE_OFFSET,
- art::Thread::MterpCurrentIBaseOffset<__SIZEOF_POINTER__>().Int32Value())
+ art::Thread::MterpCurrentIBaseOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.mterp_default_ibase.
#define THREAD_DEFAULT_IBASE_OFFSET (THREAD_CURRENT_IBASE_OFFSET + __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_DEFAULT_IBASE_OFFSET,
- art::Thread::MterpDefaultIBaseOffset<__SIZEOF_POINTER__>().Int32Value())
+ art::Thread::MterpDefaultIBaseOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.mterp_alt_ibase.
#define THREAD_ALT_IBASE_OFFSET (THREAD_DEFAULT_IBASE_OFFSET + __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_ALT_IBASE_OFFSET,
- art::Thread::MterpAltIBaseOffset<__SIZEOF_POINTER__>().Int32Value())
+ art::Thread::MterpAltIBaseOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.rosalloc_runs.
#define THREAD_ROSALLOC_RUNS_OFFSET (THREAD_ALT_IBASE_OFFSET + __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_ROSALLOC_RUNS_OFFSET,
- art::Thread::RosAllocRunsOffset<__SIZEOF_POINTER__>().Int32Value())
+ art::Thread::RosAllocRunsOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_alloc_stack_top.
#define THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET (THREAD_ROSALLOC_RUNS_OFFSET + 16 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET,
- art::Thread::ThreadLocalAllocStackTopOffset<__SIZEOF_POINTER__>().Int32Value())
+ art::Thread::ThreadLocalAllocStackTopOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_alloc_stack_end.
#define THREAD_LOCAL_ALLOC_STACK_END_OFFSET (THREAD_ROSALLOC_RUNS_OFFSET + 17 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_ALLOC_STACK_END_OFFSET,
- art::Thread::ThreadLocalAllocStackEndOffset<__SIZEOF_POINTER__>().Int32Value())
+ art::Thread::ThreadLocalAllocStackEndOffset<POINTER_SIZE>().Int32Value())
// Offsets within ShadowFrame.
#define SHADOWFRAME_LINK_OFFSET 0
diff --git a/runtime/base/enums.h b/runtime/base/enums.h
new file mode 100644
index 0000000..51b86ea
--- /dev/null
+++ b/runtime/base/enums.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_ENUMS_H_
+#define ART_RUNTIME_BASE_ENUMS_H_
+
+#include <cstddef>
+#include <ostream>
+
+#include "base/logging.h"
+#include "base/macros.h"
+
+namespace art {
+
+enum class PointerSize : size_t {
+ k32 = 4,
+ k64 = 8
+};
+std::ostream& operator<<(std::ostream& os, const PointerSize& rhs);
+
+static constexpr PointerSize kRuntimePointerSize = sizeof(void*) == 8U
+ ? PointerSize::k64
+ : PointerSize::k32;
+
+template <typename T>
+static constexpr PointerSize ConvertToPointerSize(T any) {
+ if (any == 4 || any == 8) {
+ return static_cast<PointerSize>(any);
+ } else {
+ LOG(FATAL);
+ UNREACHABLE();
+ }
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_ENUMS_H_
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index f13fea0..ec589b2 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -323,7 +323,7 @@
quick_imt_conflict_trampoline_(nullptr),
quick_generic_jni_trampoline_(nullptr),
quick_to_interpreter_bridge_trampoline_(nullptr),
- image_pointer_size_(sizeof(void*)) {
+ image_pointer_size_(kRuntimePointerSize) {
CHECK(intern_table_ != nullptr);
static_assert(kFindArrayCacheSize == arraysize(find_array_class_cache_),
"Array cache size wrong.");
@@ -361,10 +361,6 @@
// Use the pointer size from the runtime since we are probably creating the image.
image_pointer_size_ = InstructionSetPointerSize(runtime->GetInstructionSet());
- if (!ValidPointerSize(image_pointer_size_)) {
- *error_msg = StringPrintf("Invalid image pointer size: %zu", image_pointer_size_);
- return false;
- }
// java_lang_Class comes first, it's needed for AllocClass
// The GC can't handle an object with a null class since we can't get the size of this object.
@@ -791,7 +787,7 @@
static void SanityCheckArtMethodPointerArray(mirror::PointerArray* arr,
mirror::Class* expected_class,
- size_t pointer_size,
+ PointerSize pointer_size,
const std::vector<gc::space::ImageSpace*>& spaces)
SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK(arr != nullptr);
@@ -809,7 +805,7 @@
static void SanityCheckArtMethodPointerArray(ArtMethod** arr,
size_t size,
- size_t pointer_size,
+ PointerSize pointer_size,
const std::vector<gc::space::ImageSpace*>& spaces)
SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK_EQ(arr != nullptr, size != 0u);
@@ -883,7 +879,7 @@
// Set image methods' entry point to interpreter.
class SetInterpreterEntrypointArtMethodVisitor : public ArtMethodVisitor {
public:
- explicit SetInterpreterEntrypointArtMethodVisitor(size_t image_pointer_size)
+ explicit SetInterpreterEntrypointArtMethodVisitor(PointerSize image_pointer_size)
: image_pointer_size_(image_pointer_size) {}
void Visit(ArtMethod* method) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -897,7 +893,7 @@
}
private:
- const size_t image_pointer_size_;
+ const PointerSize image_pointer_size_;
DISALLOW_COPY_AND_ASSIGN(SetInterpreterEntrypointArtMethodVisitor);
};
@@ -907,7 +903,7 @@
const void* quick_imt_conflict_trampoline;
const void* quick_generic_jni_trampoline;
const void* quick_to_interpreter_bridge_trampoline;
- size_t pointer_size;
+ PointerSize pointer_size;
ArtMethod* m;
bool error;
};
@@ -939,18 +935,19 @@
gc::Heap* const heap = runtime->GetHeap();
std::vector<gc::space::ImageSpace*> spaces = heap->GetBootImageSpaces();
CHECK(!spaces.empty());
- image_pointer_size_ = spaces[0]->GetImageHeader().GetPointerSize();
- if (!ValidPointerSize(image_pointer_size_)) {
- *error_msg = StringPrintf("Invalid image pointer size: %zu", image_pointer_size_);
+ uint32_t pointer_size_unchecked = spaces[0]->GetImageHeader().GetPointerSizeUnchecked();
+ if (!ValidPointerSize(pointer_size_unchecked)) {
+ *error_msg = StringPrintf("Invalid image pointer size: %u", pointer_size_unchecked);
return false;
}
+ image_pointer_size_ = spaces[0]->GetImageHeader().GetPointerSize();
if (!runtime->IsAotCompiler()) {
// Only the Aot compiler supports having an image with a different pointer size than the
// runtime. This happens on the host for compiling 32 bit tests since we use a 64 bit libart
// compiler. We may also use 32 bit dex2oat on a system with 64 bit apps.
- if (image_pointer_size_ != sizeof(void*)) {
+ if (image_pointer_size_ != kRuntimePointerSize) {
*error_msg = StringPrintf("Runtime must use current image pointer size: %zu vs %zu",
- image_pointer_size_,
+ static_cast<size_t>(image_pointer_size_),
sizeof(void*));
return false;
}
@@ -1150,7 +1147,7 @@
explicit FixupArtMethodArrayVisitor(const ImageHeader& header) : header_(header) {}
virtual void Visit(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) {
- GcRoot<mirror::Class>* resolved_types = method->GetDexCacheResolvedTypes(sizeof(void*));
+ GcRoot<mirror::Class>* resolved_types = method->GetDexCacheResolvedTypes(kRuntimePointerSize);
const bool is_copied = method->IsCopied();
if (resolved_types != nullptr) {
bool in_image_space = false;
@@ -1165,10 +1162,10 @@
if (!is_copied || in_image_space) {
// Go through the array so that we don't need to do a slow map lookup.
method->SetDexCacheResolvedTypes(*reinterpret_cast<GcRoot<mirror::Class>**>(resolved_types),
- sizeof(void*));
+ kRuntimePointerSize);
}
}
- ArtMethod** resolved_methods = method->GetDexCacheResolvedMethods(sizeof(void*));
+ ArtMethod** resolved_methods = method->GetDexCacheResolvedMethods(kRuntimePointerSize);
if (resolved_methods != nullptr) {
bool in_image_space = false;
if (kIsDebugBuild || is_copied) {
@@ -1182,7 +1179,7 @@
if (!is_copied || in_image_space) {
// Go through the array so that we don't need to do a slow map lookup.
method->SetDexCacheResolvedMethods(*reinterpret_cast<ArtMethod***>(resolved_methods),
- sizeof(void*));
+ kRuntimePointerSize);
}
}
}
@@ -1382,11 +1379,11 @@
VLOG(image) << "From " << klass->GetDexCache()->GetDexFile()->GetBaseLocation();
}
VLOG(image) << "Direct methods";
- for (ArtMethod& m : klass->GetDirectMethods(sizeof(void*))) {
+ for (ArtMethod& m : klass->GetDirectMethods(kRuntimePointerSize)) {
VLOG(image) << PrettyMethod(&m);
}
VLOG(image) << "Virtual methods";
- for (ArtMethod& m : klass->GetVirtualMethods(sizeof(void*))) {
+ for (ArtMethod& m : klass->GetVirtualMethods(kRuntimePointerSize)) {
VLOG(image) << PrettyMethod(&m);
}
}
@@ -1422,7 +1419,7 @@
}
}
if (kIsDebugBuild) {
- for (ArtMethod& m : klass->GetDirectMethods(sizeof(void*))) {
+ for (ArtMethod& m : klass->GetDirectMethods(kRuntimePointerSize)) {
const void* code = m.GetEntryPointFromQuickCompiledCode();
const void* oat_code = m.IsInvokable() ? GetQuickOatCodeFor(&m) : code;
if (!IsQuickResolutionStub(code) &&
@@ -1432,7 +1429,7 @@
DCHECK_EQ(code, oat_code) << PrettyMethod(&m);
}
}
- for (ArtMethod& m : klass->GetVirtualMethods(sizeof(void*))) {
+ for (ArtMethod& m : klass->GetVirtualMethods(kRuntimePointerSize)) {
const void* code = m.GetEntryPointFromQuickCompiledCode();
const void* oat_code = m.IsInvokable() ? GetQuickOatCodeFor(&m) : code;
if (!IsQuickResolutionStub(code) &&
@@ -1451,14 +1448,14 @@
if (*out_forward_dex_cache_array) {
ScopedTrace timing("Fixup ArtMethod dex cache arrays");
FixupArtMethodArrayVisitor visitor(header);
- header.VisitPackedArtMethods(&visitor, space->Begin(), sizeof(void*));
+ header.VisitPackedArtMethods(&visitor, space->Begin(), kRuntimePointerSize);
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader.Get());
}
if (kVerifyArtMethodDeclaringClasses) {
ScopedTrace timing("Verify declaring classes");
ReaderMutexLock rmu(self, *Locks::heap_bitmap_lock_);
VerifyDeclaringClassVisitor visitor;
- header.VisitPackedArtMethods(&visitor, space->Begin(), sizeof(void*));
+ header.VisitPackedArtMethods(&visitor, space->Begin(), kRuntimePointerSize);
}
return true;
}
@@ -1810,7 +1807,7 @@
// This verification needs to happen after the classes have been added to the class loader.
// Since it ensures classes are in the class table.
VerifyClassInTableArtMethodVisitor visitor2(class_table);
- header.VisitPackedArtMethods(&visitor2, space->Begin(), sizeof(void*));
+ header.VisitPackedArtMethods(&visitor2, space->Begin(), kRuntimePointerSize);
}
VLOG(class_linker) << "Adding image space took " << PrettyDuration(NanoTime() - start_time);
return true;
@@ -2054,9 +2051,10 @@
}
mirror::PointerArray* ClassLinker::AllocPointerArray(Thread* self, size_t length) {
- return down_cast<mirror::PointerArray*>(image_pointer_size_ == 8u ?
- static_cast<mirror::Array*>(mirror::LongArray::Alloc(self, length)) :
- static_cast<mirror::Array*>(mirror::IntArray::Alloc(self, length)));
+ return down_cast<mirror::PointerArray*>(
+ image_pointer_size_ == PointerSize::k64
+ ? static_cast<mirror::Array*>(mirror::LongArray::Alloc(self, length))
+ : static_cast<mirror::Array*>(mirror::IntArray::Alloc(self, length)));
}
mirror::DexCache* ClassLinker::AllocDexCache(Thread* self,
@@ -2081,8 +2079,6 @@
raw_arrays = dex_file.GetOatDexFile()->GetDexCacheArrays();
} else if (dex_file.NumStringIds() != 0u || dex_file.NumTypeIds() != 0u ||
dex_file.NumMethodIds() != 0u || dex_file.NumFieldIds() != 0u) {
- // NOTE: We "leak" the raw_arrays because we never destroy the dex cache.
- DCHECK(image_pointer_size_ == 4u || image_pointer_size_ == 8u);
// Zero-initialized.
raw_arrays = reinterpret_cast<uint8_t*>(linear_alloc->Alloc(self, layout.Size()));
}
@@ -4826,7 +4822,7 @@
}
static bool HasSameSignatureWithDifferentClassLoaders(Thread* self,
- size_t pointer_size,
+ PointerSize pointer_size,
Handle<mirror::Class> klass,
Handle<mirror::Class> super_klass,
ArtMethod* method1,
@@ -5042,7 +5038,7 @@
return class_loader == nullptr ? &boot_class_table_ : class_loader->GetClassTable();
}
-static ImTable* FindSuperImt(mirror::Class* klass, size_t pointer_size)
+static ImTable* FindSuperImt(mirror::Class* klass, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_) {
while (klass->HasSuperClass()) {
klass = klass->GetSuperClass();
@@ -5580,7 +5576,7 @@
LinkVirtualHashTable(Handle<mirror::Class> klass,
size_t hash_size,
uint32_t* hash_table,
- size_t image_pointer_size)
+ PointerSize image_pointer_size)
: klass_(klass),
hash_size_(hash_size),
hash_table_(hash_table),
@@ -5642,13 +5638,20 @@
Handle<mirror::Class> klass_;
const size_t hash_size_;
uint32_t* const hash_table_;
- const size_t image_pointer_size_;
+ const PointerSize image_pointer_size_;
};
const uint32_t LinkVirtualHashTable::invalid_index_ = std::numeric_limits<uint32_t>::max();
const uint32_t LinkVirtualHashTable::removed_index_ = std::numeric_limits<uint32_t>::max() - 1;
-bool ClassLinker::LinkVirtualMethods(
+// b/30419309
+#if defined(__i386__)
+#define X86_OPTNONE __attribute__((optnone))
+#else
+#define X86_OPTNONE
+#endif
+
+X86_OPTNONE bool ClassLinker::LinkVirtualMethods(
Thread* self,
Handle<mirror::Class> klass,
/*out*/std::unordered_map<size_t, ClassLinker::MethodTranslation>* default_translations) {
@@ -5896,7 +5899,7 @@
Handle<mirror::IfTable> iftable,
size_t ifstart,
Handle<mirror::Class> iface,
- size_t image_pointer_size)
+ PointerSize image_pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(self != nullptr);
DCHECK(iface.Get() != nullptr);
@@ -6045,7 +6048,7 @@
ArtMethod* interface_method,
ArtMethod* method,
bool force_new_conflict_method) {
- ImtConflictTable* current_table = conflict_method->GetImtConflictTable(sizeof(void*));
+ ImtConflictTable* current_table = conflict_method->GetImtConflictTable(kRuntimePointerSize);
Runtime* const runtime = Runtime::Current();
LinearAlloc* linear_alloc = GetAllocatorForClassLoader(klass->GetClassLoader());
bool new_entry = conflict_method == runtime->GetImtConflictMethod() || force_new_conflict_method;
@@ -6174,7 +6177,7 @@
ImtConflictTable* ClassLinker::CreateImtConflictTable(size_t count,
LinearAlloc* linear_alloc,
- size_t image_pointer_size) {
+ PointerSize image_pointer_size) {
void* data = linear_alloc->Alloc(Thread::Current(),
ImtConflictTable::ComputeSize(count,
image_pointer_size));
@@ -6507,7 +6510,7 @@
return nullptr;
}
-static void SanityCheckVTable(Handle<mirror::Class> klass, uint32_t pointer_size)
+static void SanityCheckVTable(Handle<mirror::Class> klass, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::PointerArray* check_vtable = klass->GetVTableDuringLinking();
mirror::Class* superclass = (klass->HasSuperClass()) ? klass->GetSuperClass() : nullptr;
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index d6822c5..fcc6b23 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -25,6 +25,7 @@
#include <vector>
#include "base/allocator.h"
+#include "base/enums.h"
#include "base/hash_set.h"
#include "base/macros.h"
#include "base/mutex.h"
@@ -566,8 +567,7 @@
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
- size_t GetImagePointerSize() const {
- DCHECK(ValidPointerSize(image_pointer_size_)) << image_pointer_size_;
+ PointerSize GetImagePointerSize() const {
return image_pointer_size_;
}
@@ -630,7 +630,7 @@
// Static version for when the class linker is not yet created.
static ImtConflictTable* CreateImtConflictTable(size_t count,
LinearAlloc* linear_alloc,
- size_t pointer_size);
+ PointerSize pointer_size);
// Create the IMT and conflict tables for a class.
@@ -1166,7 +1166,7 @@
const void* quick_to_interpreter_bridge_trampoline_;
// Image pointer size.
- size_t image_pointer_size_;
+ PointerSize image_pointer_size_;
friend class ImageDumper; // for DexLock
friend class ImageWriter; // for GetClassRoots
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 48b6316..5031cf3 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -21,6 +21,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "dex_file.h"
@@ -147,7 +148,7 @@
EXPECT_EQ(0U, JavaLangObject->NumStaticFields());
EXPECT_EQ(0U, JavaLangObject->NumDirectInterfaces());
- size_t pointer_size = class_linker_->GetImagePointerSize();
+ PointerSize pointer_size = class_linker_->GetImagePointerSize();
ArtMethod* unimplemented = runtime_->GetImtUnimplementedMethod();
ImTable* imt = JavaLangObject->GetImt(pointer_size);
ASSERT_NE(nullptr, imt);
@@ -216,7 +217,7 @@
mirror::Class* array_ptr = array->GetComponentType();
EXPECT_EQ(class_linker_->FindArrayClass(self, &array_ptr), array.Get());
- size_t pointer_size = class_linker_->GetImagePointerSize();
+ PointerSize pointer_size = class_linker_->GetImagePointerSize();
mirror::Class* JavaLangObject =
class_linker_->FindSystemClass(self, "Ljava/lang/Object;");
ImTable* JavaLangObject_imt = JavaLangObject->GetImt(pointer_size);
@@ -230,14 +231,14 @@
EXPECT_TRUE(method->GetName() != nullptr);
EXPECT_TRUE(method->GetSignature() != Signature::NoSignature());
- EXPECT_TRUE(method->HasDexCacheResolvedMethods(sizeof(void*)));
- EXPECT_TRUE(method->HasDexCacheResolvedTypes(sizeof(void*)));
+ EXPECT_TRUE(method->HasDexCacheResolvedMethods(kRuntimePointerSize));
+ EXPECT_TRUE(method->HasDexCacheResolvedTypes(kRuntimePointerSize));
EXPECT_TRUE(method->HasSameDexCacheResolvedMethods(
method->GetDeclaringClass()->GetDexCache()->GetResolvedMethods(),
- sizeof(void*)));
+ kRuntimePointerSize));
EXPECT_TRUE(method->HasSameDexCacheResolvedTypes(
method->GetDeclaringClass()->GetDexCache()->GetResolvedTypes(),
- sizeof(void*)));
+ kRuntimePointerSize));
}
void AssertField(mirror::Class* klass, ArtField* field)
@@ -275,7 +276,7 @@
if (klass->IsInterface()) {
EXPECT_TRUE(klass->IsAbstract());
// Check that all direct methods are static (either <clinit> or a regular static method).
- for (ArtMethod& m : klass->GetDirectMethods(sizeof(void*))) {
+ for (ArtMethod& m : klass->GetDirectMethods(kRuntimePointerSize)) {
EXPECT_TRUE(m.IsStatic());
EXPECT_TRUE(m.IsDirect());
}
@@ -312,19 +313,19 @@
EXPECT_FALSE(klass->IsPrimitive());
EXPECT_TRUE(klass->CanAccess(klass.Get()));
- for (ArtMethod& method : klass->GetDirectMethods(sizeof(void*))) {
+ for (ArtMethod& method : klass->GetDirectMethods(kRuntimePointerSize)) {
AssertMethod(&method);
EXPECT_TRUE(method.IsDirect());
EXPECT_EQ(klass.Get(), method.GetDeclaringClass());
}
- for (ArtMethod& method : klass->GetDeclaredVirtualMethods(sizeof(void*))) {
+ for (ArtMethod& method : klass->GetDeclaredVirtualMethods(kRuntimePointerSize)) {
AssertMethod(&method);
EXPECT_FALSE(method.IsDirect());
EXPECT_EQ(klass.Get(), method.GetDeclaringClass());
}
- for (ArtMethod& method : klass->GetCopiedMethods(sizeof(void*))) {
+ for (ArtMethod& method : klass->GetCopiedMethods(kRuntimePointerSize)) {
AssertMethod(&method);
EXPECT_FALSE(method.IsDirect());
EXPECT_TRUE(method.IsCopied());
@@ -435,7 +436,7 @@
auto* resolved_methods = dex_cache->GetResolvedMethods();
for (size_t i = 0, num_methods = dex_cache->NumResolvedMethods(); i != num_methods; ++i) {
EXPECT_TRUE(
- mirror::DexCache::GetElementPtrSize(resolved_methods, i, sizeof(void*)) != nullptr)
+ mirror::DexCache::GetElementPtrSize(resolved_methods, i, kRuntimePointerSize) != nullptr)
<< dex.GetLocation() << " i=" << i;
}
}
@@ -929,7 +930,7 @@
// Static final primitives that are initialized by a compile-time constant
// expression resolve to a copy of a constant value from the constant pool.
// So <clinit> should be null.
- ArtMethod* clinit = statics->FindDirectMethod("<clinit>", "()V", sizeof(void*));
+ ArtMethod* clinit = statics->FindDirectMethod("<clinit>", "()V", kRuntimePointerSize);
EXPECT_TRUE(clinit == nullptr);
EXPECT_EQ(9U, statics->NumStaticFields());
@@ -1016,15 +1017,15 @@
EXPECT_TRUE(J->IsAssignableFrom(B.Get()));
const Signature void_sig = I->GetDexCache()->GetDexFile()->CreateSignature("()V");
- ArtMethod* Ii = I->FindVirtualMethod("i", void_sig, sizeof(void*));
- ArtMethod* Jj1 = J->FindVirtualMethod("j1", void_sig, sizeof(void*));
- ArtMethod* Jj2 = J->FindVirtualMethod("j2", void_sig, sizeof(void*));
- ArtMethod* Kj1 = K->FindInterfaceMethod("j1", void_sig, sizeof(void*));
- ArtMethod* Kj2 = K->FindInterfaceMethod("j2", void_sig, sizeof(void*));
- ArtMethod* Kk = K->FindInterfaceMethod("k", void_sig, sizeof(void*));
- ArtMethod* Ai = A->FindVirtualMethod("i", void_sig, sizeof(void*));
- ArtMethod* Aj1 = A->FindVirtualMethod("j1", void_sig, sizeof(void*));
- ArtMethod* Aj2 = A->FindVirtualMethod("j2", void_sig, sizeof(void*));
+ ArtMethod* Ii = I->FindVirtualMethod("i", void_sig, kRuntimePointerSize);
+ ArtMethod* Jj1 = J->FindVirtualMethod("j1", void_sig, kRuntimePointerSize);
+ ArtMethod* Jj2 = J->FindVirtualMethod("j2", void_sig, kRuntimePointerSize);
+ ArtMethod* Kj1 = K->FindInterfaceMethod("j1", void_sig, kRuntimePointerSize);
+ ArtMethod* Kj2 = K->FindInterfaceMethod("j2", void_sig, kRuntimePointerSize);
+ ArtMethod* Kk = K->FindInterfaceMethod("k", void_sig, kRuntimePointerSize);
+ ArtMethod* Ai = A->FindVirtualMethod("i", void_sig, kRuntimePointerSize);
+ ArtMethod* Aj1 = A->FindVirtualMethod("j1", void_sig, kRuntimePointerSize);
+ ArtMethod* Aj2 = A->FindVirtualMethod("j2", void_sig, kRuntimePointerSize);
ASSERT_TRUE(Ii != nullptr);
ASSERT_TRUE(Jj1 != nullptr);
ASSERT_TRUE(Jj2 != nullptr);
@@ -1039,12 +1040,12 @@
EXPECT_NE(Jj2, Aj2);
EXPECT_EQ(Kj1, Jj1);
EXPECT_EQ(Kj2, Jj2);
- EXPECT_EQ(Ai, A->FindVirtualMethodForInterface(Ii, sizeof(void*)));
- EXPECT_EQ(Aj1, A->FindVirtualMethodForInterface(Jj1, sizeof(void*)));
- EXPECT_EQ(Aj2, A->FindVirtualMethodForInterface(Jj2, sizeof(void*)));
- EXPECT_EQ(Ai, A->FindVirtualMethodForVirtualOrInterface(Ii, sizeof(void*)));
- EXPECT_EQ(Aj1, A->FindVirtualMethodForVirtualOrInterface(Jj1, sizeof(void*)));
- EXPECT_EQ(Aj2, A->FindVirtualMethodForVirtualOrInterface(Jj2, sizeof(void*)));
+ EXPECT_EQ(Ai, A->FindVirtualMethodForInterface(Ii, kRuntimePointerSize));
+ EXPECT_EQ(Aj1, A->FindVirtualMethodForInterface(Jj1, kRuntimePointerSize));
+ EXPECT_EQ(Aj2, A->FindVirtualMethodForInterface(Jj2, kRuntimePointerSize));
+ EXPECT_EQ(Ai, A->FindVirtualMethodForVirtualOrInterface(Ii, kRuntimePointerSize));
+ EXPECT_EQ(Aj1, A->FindVirtualMethodForVirtualOrInterface(Jj1, kRuntimePointerSize));
+ EXPECT_EQ(Aj2, A->FindVirtualMethodForVirtualOrInterface(Jj2, kRuntimePointerSize));
ArtField* Afoo = mirror::Class::FindStaticField(soa.Self(), A, "foo", "Ljava/lang/String;");
ArtField* Bfoo = mirror::Class::FindStaticField(soa.Self(), B, "foo", "Ljava/lang/String;");
@@ -1069,8 +1070,8 @@
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", class_loader);
- ArtMethod* clinit = klass->FindClassInitializer(sizeof(void*));
- ArtMethod* getS0 = klass->FindDirectMethod("getS0", "()Ljava/lang/Object;", sizeof(void*));
+ ArtMethod* clinit = klass->FindClassInitializer(kRuntimePointerSize);
+ ArtMethod* getS0 = klass->FindDirectMethod("getS0", "()Ljava/lang/Object;", kRuntimePointerSize);
const DexFile::TypeId* type_id = dex_file->FindTypeId("LStaticsFromCode;");
ASSERT_TRUE(type_id != nullptr);
uint32_t type_idx = dex_file->GetIndexForTypeId(*type_id);
@@ -1134,19 +1135,19 @@
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Class;", class_loader);
ASSERT_TRUE(c != nullptr);
- EXPECT_EQ(c->GetClassSize(), mirror::Class::ClassClassSize(sizeof(void*)));
+ EXPECT_EQ(c->GetClassSize(), mirror::Class::ClassClassSize(kRuntimePointerSize));
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Object;", class_loader);
ASSERT_TRUE(c != nullptr);
- EXPECT_EQ(c->GetClassSize(), mirror::Object::ClassSize(sizeof(void*)));
+ EXPECT_EQ(c->GetClassSize(), mirror::Object::ClassSize(kRuntimePointerSize));
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/String;", class_loader);
ASSERT_TRUE(c != nullptr);
- EXPECT_EQ(c->GetClassSize(), mirror::String::ClassSize(sizeof(void*)));
+ EXPECT_EQ(c->GetClassSize(), mirror::String::ClassSize(kRuntimePointerSize));
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/DexCache;", class_loader);
ASSERT_TRUE(c != nullptr);
- EXPECT_EQ(c->GetClassSize(), mirror::DexCache::ClassSize(sizeof(void*)));
+ EXPECT_EQ(c->GetClassSize(), mirror::DexCache::ClassSize(kRuntimePointerSize));
}
static void CheckMethod(ArtMethod* method, bool verified)
@@ -1161,7 +1162,7 @@
SHARED_REQUIRES(Locks::mutator_lock_) {
EXPECT_EQ((c->GetAccessFlags() & kAccVerificationAttempted) != 0U, preverified)
<< "Class " << PrettyClass(c) << " not as expected";
- for (auto& m : c->GetMethods(sizeof(void*))) {
+ for (auto& m : c->GetMethods(kRuntimePointerSize)) {
CheckMethod(&m, preverified);
}
}
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 5b54f7d..9f3ff3f 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -23,6 +23,7 @@
#include "arch/context.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/time_utils.h"
#include "class_linker.h"
#include "class_linker-inl.h"
@@ -79,7 +80,7 @@
mirror::Class* declaring_class = m->GetDeclaringClass();
return declaring_class->FindDeclaredVirtualMethod(declaring_class->GetDexCache(),
m->GetDexMethodIndex(),
- sizeof(void*));
+ kRuntimePointerSize);
}
}
@@ -1406,7 +1407,7 @@
if (m == nullptr) {
return "null";
}
- return m->GetInterfaceMethodIfProxy(sizeof(void*))->GetName();
+ return m->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetName();
}
std::string Dbg::GetFieldName(JDWP::FieldId field_id) {
@@ -1526,9 +1527,9 @@
auto ptr_size = cl->GetImagePointerSize();
for (ArtMethod& m : c->GetMethods(ptr_size)) {
expandBufAddMethodId(pReply, ToMethodId(&m));
- expandBufAddUtf8String(pReply, m.GetInterfaceMethodIfProxy(sizeof(void*))->GetName());
- expandBufAddUtf8String(pReply,
- m.GetInterfaceMethodIfProxy(sizeof(void*))->GetSignature().ToString());
+ expandBufAddUtf8String(pReply, m.GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetName());
+ expandBufAddUtf8String(
+ pReply, m.GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetSignature().ToString());
if (with_generic) {
const char* generic_signature = "";
expandBufAddUtf8String(pReply, generic_signature);
@@ -3934,7 +3935,7 @@
mirror::Class* parameter_type =
m->GetClassFromTypeIndex(types->GetTypeItem(i).type_idx_,
true /* resolve */,
- sizeof(void*));
+ kRuntimePointerSize);
mirror::Object* argument = gRegistry->Get<mirror::Object*>(arg_values[i], &error);
if (error != JDWP::ERR_NONE) {
return JDWP::ERR_INVALID_OBJECT;
@@ -4025,7 +4026,7 @@
// Translate the method through the vtable, unless the debugger wants to suppress it.
ArtMethod* m = pReq->method;
- size_t image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver.Read() != nullptr) {
ArtMethod* actual_method =
pReq->klass.Read()->FindVirtualMethodForVirtualOrInterface(m, image_pointer_size);
@@ -5068,7 +5069,7 @@
ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
BufferedRootVisitor<128> root_visitor(visitor, RootInfo(kRootVMInternal));
for (Breakpoint& breakpoint : gBreakpoints) {
- breakpoint.Method()->VisitRoots(root_visitor, sizeof(void*));
+ breakpoint.Method()->VisitRoots(root_visitor, kRuntimePointerSize);
}
}
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 2a2d2c0..16087a5 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -29,6 +29,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/file_magic.h"
#include "base/hash_map.h"
#include "base/logging.h"
@@ -1328,7 +1329,7 @@
AnnotationValue annotation_value;
StackHandleScope<2> hs(Thread::Current());
Handle<mirror::Class> h_klass(hs.NewHandle(klass));
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
Handle<mirror::Class> return_type(hs.NewHandle(
method->GetReturnType(true /* resolve */, pointer_size)));
if (!ProcessAnnotationValue(h_klass, &annotation, &annotation_value, return_type, kAllObjects)) {
@@ -1620,12 +1621,12 @@
Handle<mirror::String> string_name(
hs.NewHandle(mirror::String::AllocFromModifiedUtf8(self, name)));
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
ArtMethod* annotation_method =
- annotation_class->FindDeclaredVirtualMethodByName(name, sizeof(void*));
+ annotation_class->FindDeclaredVirtualMethodByName(name, pointer_size);
if (annotation_method == nullptr) {
return nullptr;
}
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
Handle<mirror::Class> method_return(hs.NewHandle(
annotation_method->GetReturnType(true /* resolve */, pointer_size)));
@@ -1640,11 +1641,12 @@
Handle<mirror::Object> new_member(hs.NewHandle(annotation_member_class->AllocObject(self)));
mirror::Method* method_obj_ptr;
DCHECK(!Runtime::Current()->IsActiveTransaction());
- if (pointer_size == 8U) {
- method_obj_ptr = mirror::Method::CreateFromArtMethod<8U, false>(self, annotation_method);
+ if (pointer_size == PointerSize::k64) {
+ method_obj_ptr = mirror::Method::CreateFromArtMethod<PointerSize::k64, false>(
+ self, annotation_method);
} else {
- DCHECK_EQ(pointer_size, 4U);
- method_obj_ptr = mirror::Method::CreateFromArtMethod<4U, false>(self, annotation_method);
+ method_obj_ptr = mirror::Method::CreateFromArtMethod<PointerSize::k32, false>(
+ self, annotation_method);
}
Handle<mirror::Method> method_object(hs.NewHandle(method_obj_ptr));
@@ -1960,22 +1962,24 @@
if (method == nullptr) {
return false;
}
- size_t pointer_size = class_linker->GetImagePointerSize();
+ PointerSize pointer_size = class_linker->GetImagePointerSize();
set_object = true;
DCHECK(!Runtime::Current()->IsActiveTransaction());
if (method->IsConstructor()) {
- if (pointer_size == 8U) {
- element_object = mirror::Constructor::CreateFromArtMethod<8U, false>(self, method);
+ if (pointer_size == PointerSize::k64) {
+ element_object = mirror::Constructor::CreateFromArtMethod<PointerSize::k64,
+ false>(self, method);
} else {
- DCHECK_EQ(pointer_size, 4U);
- element_object = mirror::Constructor::CreateFromArtMethod<4U, false>(self, method);
+ element_object = mirror::Constructor::CreateFromArtMethod<PointerSize::k32,
+ false>(self, method);
}
} else {
- if (pointer_size == 8U) {
- element_object = mirror::Method::CreateFromArtMethod<8U, false>(self, method);
+ if (pointer_size == PointerSize::k64) {
+ element_object = mirror::Method::CreateFromArtMethod<PointerSize::k64,
+ false>(self, method);
} else {
- DCHECK_EQ(pointer_size, 4U);
- element_object = mirror::Method::CreateFromArtMethod<4U, false>(self, method);
+ element_object = mirror::Method::CreateFromArtMethod<PointerSize::k32,
+ false>(self, method);
}
}
if (element_object == nullptr) {
@@ -1998,12 +2002,11 @@
return false;
}
set_object = true;
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
- if (pointer_size == 8) {
- element_object = mirror::Field::CreateFromArtField<8U>(self, field, true);
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ if (pointer_size == PointerSize::k64) {
+ element_object = mirror::Field::CreateFromArtField<PointerSize::k64>(self, field, true);
} else {
- DCHECK_EQ(pointer_size, 4U);
- element_object = mirror::Field::CreateFromArtField<4U>(self, field, true);
+ element_object = mirror::Field::CreateFromArtField<PointerSize::k32>(self, field, true);
}
if (element_object == nullptr) {
return false;
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 7ecd595..204ba46 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -20,6 +20,7 @@
#include "entrypoint_utils.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "class_linker-inl.h"
#include "common_throws.h"
#include "dex_file.h"
@@ -52,7 +53,8 @@
uint32_t method_index = inline_info.GetMethodIndexAtDepth(encoding, inlining_depth);
InvokeType invoke_type = static_cast<InvokeType>(
inline_info.GetInvokeTypeAtDepth(encoding, inlining_depth));
- ArtMethod* inlined_method = outer_method->GetDexCacheResolvedMethod(method_index, sizeof(void*));
+ ArtMethod* inlined_method = outer_method->GetDexCacheResolvedMethod(method_index,
+ kRuntimePointerSize);
if (!inlined_method->IsRuntimeMethod()) {
return inlined_method;
}
@@ -89,7 +91,7 @@
Runtime::Current()->GetClassLinker()->GetClassRoot(ClassLinker::kJavaLangString);
// Update the dex cache for future lookups.
caller->GetDexCache()->SetResolvedType(method_id.class_idx_, cls);
- inlined_method = cls->FindVirtualMethod("charAt", "(I)C", sizeof(void*));
+ inlined_method = cls->FindVirtualMethod("charAt", "(I)C", kRuntimePointerSize);
} else {
mirror::Class* klass = caller->GetDexCache()->GetResolvedType(method_id.class_idx_);
DCHECK_EQ(klass->GetDexCache(), caller->GetDexCache())
@@ -98,12 +100,12 @@
case kDirect:
case kStatic:
inlined_method =
- klass->FindDirectMethod(klass->GetDexCache(), method_index, sizeof(void*));
+ klass->FindDirectMethod(klass->GetDexCache(), method_index, kRuntimePointerSize);
break;
case kSuper:
case kVirtual:
inlined_method =
- klass->FindVirtualMethod(klass->GetDexCache(), method_index, sizeof(void*));
+ klass->FindVirtualMethod(klass->GetDexCache(), method_index, kRuntimePointerSize);
break;
default:
LOG(FATAL) << "Unimplemented inlined invocation type: " << invoke_type;
@@ -114,7 +116,7 @@
// Update the dex cache for future lookups. Note that for static methods, this is safe
// when the class is being initialized, as the entrypoint for the ArtMethod is at
// this point still the resolution trampoline.
- outer_method->SetDexCacheResolvedMethod(method_index, inlined_method, sizeof(void*));
+ outer_method->SetDexCacheResolvedMethod(method_index, inlined_method, kRuntimePointerSize);
return inlined_method;
}
@@ -130,7 +132,7 @@
ArtMethod* method,
Thread* self, bool* slow_path) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- size_t pointer_size = class_linker->GetImagePointerSize();
+ PointerSize pointer_size = class_linker->GetImagePointerSize();
mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx, pointer_size);
if (UNLIKELY(klass == nullptr)) {
klass = class_linker->ResolveType(type_idx, method);
@@ -275,7 +277,7 @@
return nullptr; // Failure
}
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- size_t pointer_size = class_linker->GetImagePointerSize();
+ PointerSize pointer_size = class_linker->GetImagePointerSize();
mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx, pointer_size);
if (UNLIKELY(klass == nullptr)) { // Not in dex cache so try to resolve
klass = class_linker->ResolveType(type_idx, method);
@@ -381,7 +383,7 @@
//
// In particular, don't assume the dex instruction already correctly knows if the
// real field is static or not. The resolution must not be aware of this.
- ArtMethod* method = referrer->GetInterfaceMethodIfProxy(sizeof(void*));
+ ArtMethod* method = referrer->GetInterfaceMethodIfProxy(kRuntimePointerSize);
StackHandleScope<2> hs(self);
Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(method->GetDexCache()));
@@ -601,7 +603,7 @@
}
case kInterface: {
uint32_t imt_index = resolved_method->GetImtIndex();
- size_t pointer_size = class_linker->GetImagePointerSize();
+ PointerSize pointer_size = class_linker->GetImagePointerSize();
ArtMethod* imt_method = (*this_object)->GetClass()->GetImt(pointer_size)->
Get(imt_index, pointer_size);
if (!imt_method->IsRuntimeMethod()) {
@@ -655,7 +657,8 @@
inline ArtField* FindFieldFast(uint32_t field_idx, ArtMethod* referrer, FindFieldType type,
size_t expected_size) {
ArtField* resolved_field =
- referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx, sizeof(void*));
+ referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx,
+ kRuntimePointerSize);
if (UNLIKELY(resolved_field == nullptr)) {
return nullptr;
}
@@ -710,7 +713,7 @@
}
mirror::Class* referring_class = referrer->GetDeclaringClass();
ArtMethod* resolved_method =
- referring_class->GetDexCache()->GetResolvedMethod(method_idx, sizeof(void*));
+ referring_class->GetDexCache()->GetResolvedMethod(method_idx, kRuntimePointerSize);
if (UNLIKELY(resolved_method == nullptr)) {
return nullptr;
}
@@ -729,7 +732,8 @@
}
}
if (type == kInterface) { // Most common form of slow path dispatch.
- return this_object->GetClass()->FindVirtualMethodForInterface(resolved_method, sizeof(void*));
+ return this_object->GetClass()->FindVirtualMethodForInterface(resolved_method,
+ kRuntimePointerSize);
} else if (type == kStatic || type == kDirect) {
return resolved_method;
} else if (type == kSuper) {
@@ -752,15 +756,15 @@
// The super class does not have the method.
return nullptr;
}
- return super_class->GetVTableEntry(resolved_method->GetMethodIndex(), sizeof(void*));
+ return super_class->GetVTableEntry(resolved_method->GetMethodIndex(), kRuntimePointerSize);
} else {
return method_reference_class->FindVirtualMethodForInterfaceSuper(
- resolved_method, sizeof(void*));
+ resolved_method, kRuntimePointerSize);
}
} else {
DCHECK(type == kVirtual);
return this_object->GetClass()->GetVTableEntry(
- resolved_method->GetMethodIndex(), sizeof(void*));
+ resolved_method->GetMethodIndex(), kRuntimePointerSize);
}
}
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 197caa1..fd1c02f 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -18,6 +18,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/mutex.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
@@ -48,7 +49,7 @@
return nullptr; // Failure
}
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- size_t pointer_size = class_linker->GetImagePointerSize();
+ PointerSize pointer_size = class_linker->GetImagePointerSize();
mirror::Class* klass = referrer->GetDexCacheResolvedType<false>(type_idx, pointer_size);
if (UNLIKELY(klass == nullptr)) { // Not in dex cache so try to resolve
klass = class_linker->ResolveType(type_idx, referrer);
@@ -125,7 +126,7 @@
}
// Make sure that the result is an instance of the type this method was expected to return.
mirror::Class* return_type = self->GetCurrentMethod(nullptr)->GetReturnType(true /* resolve */,
- sizeof(void*));
+ kRuntimePointerSize);
if (!o->InstanceOf(return_type)) {
Runtime::Current()->GetJavaVM()->JniAbortF(nullptr,
@@ -188,7 +189,7 @@
StackHandleScope<1> hs(soa.Self());
auto h_interface_method(hs.NewHandle(soa.Decode<mirror::Method*>(interface_method_jobj)));
// This can cause thread suspension.
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
mirror::Class* result_type =
h_interface_method->GetArtMethod()->GetReturnType(true /* resolve */, pointer_size);
mirror::Object* result_ref = soa.Decode<mirror::Object*>(result);
@@ -208,10 +209,10 @@
mirror::Class* proxy_class = rcvr->GetClass();
mirror::Method* interface_method = soa.Decode<mirror::Method*>(interface_method_jobj);
ArtMethod* proxy_method = rcvr->GetClass()->FindVirtualMethodForInterface(
- interface_method->GetArtMethod(), sizeof(void*));
- auto virtual_methods = proxy_class->GetVirtualMethodsSlice(sizeof(void*));
+ interface_method->GetArtMethod(), kRuntimePointerSize);
+ auto virtual_methods = proxy_class->GetVirtualMethodsSlice(kRuntimePointerSize);
size_t num_virtuals = proxy_class->NumVirtualMethods();
- size_t method_size = ArtMethod::Size(sizeof(void*));
+ size_t method_size = ArtMethod::Size(kRuntimePointerSize);
// Rely on the fact that the methods are contiguous to determine the index of the method in
// the slice.
int throws_index = (reinterpret_cast<uintptr_t>(proxy_method) -
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index 331de91..a81a7e7 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_ENTRYPOINTS_QUICK_CALLEE_SAVE_FRAME_H_
#include "arch/instruction_set.h"
+#include "base/enums.h"
#include "base/mutex.h"
#include "runtime.h"
#include "thread-inl.h"
@@ -86,7 +87,7 @@
}
// Note: this specialized statement is sanity-checked in the quick-trampoline gtest.
-static constexpr size_t GetConstExprPointerSize(InstructionSet isa) {
+static constexpr PointerSize GetConstExprPointerSize(InstructionSet isa) {
// constexpr must be a return statement.
return (isa == kArm || isa == kThumb2) ? kArmPointerSize :
isa == kArm64 ? kArm64PointerSize :
@@ -94,14 +95,14 @@
isa == kMips64 ? kMips64PointerSize :
isa == kX86 ? kX86PointerSize :
isa == kX86_64 ? kX86_64PointerSize :
- isa == kNone ? (LOG(FATAL) << "kNone has no pointer size", 0) :
- (LOG(FATAL) << "Unknown instruction set" << isa, 0);
+ isa == kNone ? (LOG(FATAL) << "kNone has no pointer size", PointerSize::k32) :
+ (LOG(FATAL) << "Unknown instruction set" << isa, PointerSize::k32);
}
// Note: this specialized statement is sanity-checked in the quick-trampoline gtest.
static constexpr size_t GetCalleeSaveReturnPcOffset(InstructionSet isa,
Runtime::CalleeSaveType type) {
- return GetCalleeSaveFrameSize(isa, type) - GetConstExprPointerSize(isa);
+ return GetCalleeSaveFrameSize(isa, type) - static_cast<size_t>(GetConstExprPointerSize(isa));
}
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index c3b3ac0..4686a51 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -17,6 +17,7 @@
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "callee_save_frame.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "mirror/class-inl.h"
@@ -33,7 +34,7 @@
SHARED_REQUIRES(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \
- mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx, sizeof(void*)); \
+ mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx, kRuntimePointerSize); \
if (LIKELY(klass != nullptr && klass->IsInitialized() && !klass->IsFinalizable())) { \
size_t byte_count = klass->GetObjectSize(); \
byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \
diff --git a/runtime/entrypoints/quick/quick_entrypoints_enum.h b/runtime/entrypoints/quick/quick_entrypoints_enum.h
index 5a95491..8de1137 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_enum.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_enum.h
@@ -36,7 +36,7 @@
std::ostream& operator<<(std::ostream& os, const QuickEntrypointEnum& kind);
// Translate a QuickEntrypointEnum value to the corresponding ThreadOffset.
-template <size_t pointer_size>
+template <PointerSize pointer_size>
static ThreadOffset<pointer_size> GetThreadOffset(QuickEntrypointEnum trampoline) {
switch (trampoline)
{ // NOLINT(whitespace/braces)
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index 8e660a2..b5e560f 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -15,6 +15,7 @@
*/
#include "art_method-inl.h"
+#include "base/enums.h"
#include "callee_save_frame.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "instrumentation.h"
@@ -37,7 +38,7 @@
if (instrumentation->IsDeoptimized(method)) {
result = GetQuickToInterpreterBridge();
} else {
- result = instrumentation->GetQuickCodeFor(method, sizeof(void*));
+ result = instrumentation->GetQuickCodeFor(method, kRuntimePointerSize);
DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(result));
}
bool interpreter_entry = (result == GetQuickToInterpreterBridge());
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 0306bd6..9678079 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -15,6 +15,7 @@
*/
#include "art_method-inl.h"
+#include "base/enums.h"
#include "callee_save_frame.h"
#include "common_throws.h"
#include "dex_file-inl.h"
@@ -366,7 +367,7 @@
// next register is even.
static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0,
"Number of Quick FPR arguments not even");
- DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
}
virtual ~QuickArgumentVisitor() {}
@@ -659,7 +660,7 @@
DCHECK(!method->IsNative()) << PrettyMethod(method);
uint32_t shorty_len = 0;
- ArtMethod* non_proxy_method = method->GetInterfaceMethodIfProxy(sizeof(void*));
+ ArtMethod* non_proxy_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
const DexFile::CodeItem* code_item = non_proxy_method->GetCodeItem();
DCHECK(code_item != nullptr) << PrettyMethod(method);
const char* shorty = non_proxy_method->GetShorty(&shorty_len);
@@ -859,7 +860,7 @@
jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
// Placing arguments into args vector and remove the receiver.
- ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(sizeof(void*));
+ ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
CHECK(!non_proxy_method->IsStatic()) << PrettyMethod(proxy_method) << " "
<< PrettyMethod(non_proxy_method);
std::vector<jvalue> args;
@@ -872,14 +873,15 @@
args.erase(args.begin());
// Convert proxy method into expected interface method.
- ArtMethod* interface_method = proxy_method->FindOverriddenMethod(sizeof(void*));
+ ArtMethod* interface_method = proxy_method->FindOverriddenMethod(kRuntimePointerSize);
DCHECK(interface_method != nullptr) << PrettyMethod(proxy_method);
DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
self->EndAssertNoThreadSuspension(old_cause);
- DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
DCHECK(!Runtime::Current()->IsActiveTransaction());
jobject interface_method_jobj = soa.AddLocalReference<jobject>(
- mirror::Method::CreateFromArtMethod<sizeof(void*), false>(soa.Self(), interface_method));
+ mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(),
+ interface_method));
// All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
// that performs allocations.
@@ -1037,10 +1039,10 @@
ArtMethod* orig_called = called;
if (invoke_type == kVirtual) {
CHECK(receiver != nullptr) << invoke_type;
- called = receiver->GetClass()->FindVirtualMethodForVirtual(called, sizeof(void*));
+ called = receiver->GetClass()->FindVirtualMethodForVirtual(called, kRuntimePointerSize);
} else if (invoke_type == kInterface) {
CHECK(receiver != nullptr) << invoke_type;
- called = receiver->GetClass()->FindVirtualMethodForInterface(called, sizeof(void*));
+ called = receiver->GetClass()->FindVirtualMethodForInterface(called, kRuntimePointerSize);
} else {
DCHECK_EQ(invoke_type, kSuper);
CHECK(caller != nullptr) << invoke_type;
@@ -1053,10 +1055,10 @@
mirror::Class* ref_class = linker->ResolveReferencedClassOfMethod(
called_method.dex_method_index, dex_cache, class_loader);
if (ref_class->IsInterface()) {
- called = ref_class->FindVirtualMethodForInterfaceSuper(called, sizeof(void*));
+ called = ref_class->FindVirtualMethodForInterfaceSuper(called, kRuntimePointerSize);
} else {
called = caller->GetDeclaringClass()->GetSuperClass()->GetVTableEntry(
- called->GetMethodIndex(), sizeof(void*));
+ called->GetMethodIndex(), kRuntimePointerSize);
}
}
@@ -1070,7 +1072,7 @@
// FindVirtualMethodFor... This is ok for FindDexMethodIndexInOtherDexFile that only cares
// about the name and signature.
uint32_t update_dex_cache_method_index = called->GetDexMethodIndex();
- if (!called->HasSameDexCacheResolvedMethods(caller, sizeof(void*))) {
+ if (!called->HasSameDexCacheResolvedMethods(caller, kRuntimePointerSize)) {
// Calling from one dex file to another, need to compute the method index appropriate to
// the caller's dex file. Since we get here only if the original called was a runtime
// method, we've got the correct dex_file and a dex_method_idx from above.
@@ -1084,8 +1086,10 @@
}
if ((update_dex_cache_method_index != DexFile::kDexNoIndex) &&
(caller->GetDexCacheResolvedMethod(
- update_dex_cache_method_index, sizeof(void*)) != called)) {
- caller->SetDexCacheResolvedMethod(update_dex_cache_method_index, called, sizeof(void*));
+ update_dex_cache_method_index, kRuntimePointerSize) != called)) {
+ caller->SetDexCacheResolvedMethod(update_dex_cache_method_index,
+ called,
+ kRuntimePointerSize);
}
} else if (invoke_type == kStatic) {
const auto called_dex_method_idx = called->GetDexMethodIndex();
@@ -1095,7 +1099,9 @@
// b/19175856
if (called->GetDexFile() == called_method.dex_file &&
called_method.dex_method_index != called_dex_method_idx) {
- called->GetDexCache()->SetResolvedMethod(called_dex_method_idx, called, sizeof(void*));
+ called->GetDexCache()->SetResolvedMethod(called_dex_method_idx,
+ called,
+ kRuntimePointerSize);
}
}
@@ -1629,7 +1635,7 @@
SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* method = **m;
- DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp);
@@ -2164,22 +2170,22 @@
}
ArtMethod* interface_method = caller_method->GetDexCacheResolvedMethod(
- dex_method_idx, sizeof(void*));
+ dex_method_idx, kRuntimePointerSize);
DCHECK(interface_method != nullptr) << dex_method_idx << " " << PrettyMethod(caller_method);
ArtMethod* method = nullptr;
- ImTable* imt = cls->GetImt(sizeof(void*));
+ ImTable* imt = cls->GetImt(kRuntimePointerSize);
if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
// If the dex cache already resolved the interface method, look whether we have
// a match in the ImtConflictTable.
- ArtMethod* conflict_method = imt->Get(interface_method->GetImtIndex(), sizeof(void*));
+ ArtMethod* conflict_method = imt->Get(interface_method->GetImtIndex(), kRuntimePointerSize);
if (LIKELY(conflict_method->IsRuntimeMethod())) {
- ImtConflictTable* current_table = conflict_method->GetImtConflictTable(sizeof(void*));
+ ImtConflictTable* current_table = conflict_method->GetImtConflictTable(kRuntimePointerSize);
DCHECK(current_table != nullptr);
- method = current_table->Lookup(interface_method, sizeof(void*));
+ method = current_table->Lookup(interface_method, kRuntimePointerSize);
} else {
// It seems we aren't really a conflict method!
- method = cls->FindVirtualMethodForInterface(interface_method, sizeof(void*));
+ method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize);
}
if (method != nullptr) {
return GetTwoWordSuccessValue(
@@ -2188,7 +2194,7 @@
}
// No match, use the IfTable.
- method = cls->FindVirtualMethodForInterface(interface_method, sizeof(void*));
+ method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize);
if (UNLIKELY(method == nullptr)) {
ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(
interface_method, this_object, caller_method);
@@ -2217,14 +2223,15 @@
CHECK(self->IsExceptionPending());
return GetTwoWordFailureValue(); // Failure.
}
- interface_method = caller_method->GetDexCacheResolvedMethod(dex_method_idx, sizeof(void*));
+ interface_method =
+ caller_method->GetDexCacheResolvedMethod(dex_method_idx, kRuntimePointerSize);
DCHECK(!interface_method->IsRuntimeMethod());
}
// We arrive here if we have found an implementation, and it is not in the ImtConflictTable.
// We create a new table with the new pair { interface_method, method }.
uint32_t imt_index = interface_method->GetImtIndex();
- ArtMethod* conflict_method = imt->Get(imt_index, sizeof(void*));
+ ArtMethod* conflict_method = imt->Get(imt_index, kRuntimePointerSize);
if (conflict_method->IsRuntimeMethod()) {
ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable(
cls.Get(),
@@ -2237,7 +2244,7 @@
// data is consistent.
imt->Set(imt_index,
new_conflict_method,
- sizeof(void*));
+ kRuntimePointerSize);
}
}
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 9f073a6..f86921c 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -341,7 +341,7 @@
// If we don't have a potential method, we're outta here.
VLOG(signals) << "potential method: " << method_obj;
// TODO: Check linear alloc and image.
- DCHECK_ALIGNED(ArtMethod::Size(sizeof(void*)), sizeof(void*))
+ DCHECK_ALIGNED(ArtMethod::Size(kRuntimePointerSize), sizeof(void*))
<< "ArtMethod is not pointer aligned";
if (method_obj == nullptr || !IsAligned<sizeof(void*)>(method_obj)) {
VLOG(signals) << "no method";
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 6489a39..522f236 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -17,6 +17,7 @@
#include "allocation_record.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/stl_util.h"
#include "stack.h"
@@ -112,7 +113,7 @@
for (size_t i = 0, depth = record.GetDepth(); i < depth; ++i) {
const AllocRecordStackTraceElement& element = record.StackElement(i);
DCHECK(element.GetMethod() != nullptr);
- element.GetMethod()->VisitRoots(buffered_visitor, sizeof(void*));
+ element.GetMethod()->VisitRoots(buffered_visitor, kRuntimePointerSize);
}
}
}
@@ -200,7 +201,7 @@
ArtMethod* m = GetMethod();
// m may be null if we have inlined methods of unresolved classes. b/27858645
if (m != nullptr && !m->IsRuntimeMethod()) {
- m = m->GetInterfaceMethodIfProxy(sizeof(void*));
+ m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
trace_->AddStackElement(AllocRecordStackTraceElement(m, GetDexPc()));
}
return true;
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 90446b0..33f64d9 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -17,6 +17,7 @@
#include "concurrent_copying.h"
#include "art_field-inl.h"
+#include "base/enums.h"
#include "base/histogram-inl.h"
#include "base/stl_util.h"
#include "base/systrace.h"
@@ -1588,7 +1589,7 @@
ArtMethod* method = gc_root_source->GetArtMethod();
LOG(INTERNAL_FATAL) << "gc root in method " << method << " " << PrettyMethod(method);
RootPrinter root_printer;
- method->VisitRoots(root_printer, sizeof(void*));
+ method->VisitRoots(root_printer, kRuntimePointerSize);
}
ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
region_space_->DumpNonFreeRegions(LOG(INTERNAL_FATAL));
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 9f54f1c..e276137 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -23,6 +23,7 @@
#include <vector>
#include "base/bounded_fifo.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/mutex-inl.h"
@@ -430,7 +431,7 @@
<< " first_ref_field_offset="
<< (holder_->IsClass()
? holder_->AsClass()->GetFirstReferenceStaticFieldOffset(
- sizeof(void*))
+ kRuntimePointerSize)
: holder_->GetClass()->GetFirstReferenceInstanceFieldOffset())
<< " num_of_ref_fields="
<< (holder_->IsClass()
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 8cadc2e..d140b75 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -23,6 +23,7 @@
#include <unistd.h>
#include "art_method.h"
+#include "base/enums.h"
#include "base/macros.h"
#include "base/stl_util.h"
#include "base/scoped_flock.h"
@@ -754,7 +755,7 @@
public:
template<typename... Args>
explicit FixupObjectVisitor(gc::accounting::ContinuousSpaceBitmap* visited,
- const size_t pointer_size,
+ const PointerSize pointer_size,
Args... args)
: FixupVisitor(args...),
pointer_size_(pointer_size),
@@ -874,7 +875,7 @@
}
private:
- const size_t pointer_size_;
+ const PointerSize pointer_size_;
gc::accounting::ContinuousSpaceBitmap* const visited_;
};
@@ -908,7 +909,7 @@
class FixupArtMethodVisitor : public FixupVisitor, public ArtMethodVisitor {
public:
template<typename... Args>
- explicit FixupArtMethodVisitor(bool fixup_heap_objects, size_t pointer_size, Args... args)
+ explicit FixupArtMethodVisitor(bool fixup_heap_objects, PointerSize pointer_size, Args... args)
: FixupVisitor(args...),
fixup_heap_objects_(fixup_heap_objects),
pointer_size_(pointer_size) {}
@@ -938,7 +939,7 @@
private:
const bool fixup_heap_objects_;
- const size_t pointer_size_;
+ const PointerSize pointer_size_;
};
class FixupArtFieldVisitor : public FixupVisitor, public ArtFieldVisitor {
@@ -974,7 +975,7 @@
uint32_t boot_image_end = 0;
uint32_t boot_oat_begin = 0;
uint32_t boot_oat_end = 0;
- const size_t pointer_size = image_header.GetPointerSize();
+ const PointerSize pointer_size = image_header.GetPointerSize();
gc::Heap* const heap = Runtime::Current()->GetHeap();
heap->GetBootImagesSize(&boot_image_begin, &boot_image_end, &boot_oat_begin, &boot_oat_end);
if (boot_image_begin == boot_image_end) {
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
index f1a3256..5d62b59 100644
--- a/runtime/generated/asm_support_gen.h
+++ b/runtime/generated/asm_support_gen.h
@@ -33,13 +33,13 @@
#define RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET 0x10
DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::Runtime:: kRefsAndArgs))))
#define THREAD_FLAGS_OFFSET 0
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_FLAGS_OFFSET), (static_cast<int32_t>(art::Thread:: ThreadFlagsOffset<sizeof(void*)>().Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_FLAGS_OFFSET), (static_cast<int32_t>(art::Thread:: ThreadFlagsOffset<art::kRuntimePointerSize>().Int32Value())))
#define THREAD_ID_OFFSET 12
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_ID_OFFSET), (static_cast<int32_t>(art::Thread:: ThinLockIdOffset<sizeof(void*)>().Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_ID_OFFSET), (static_cast<int32_t>(art::Thread:: ThinLockIdOffset<art::kRuntimePointerSize>().Int32Value())))
#define THREAD_IS_GC_MARKING_OFFSET 52
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_IS_GC_MARKING_OFFSET), (static_cast<int32_t>(art::Thread:: IsGcMarkingOffset<sizeof(void*)>().Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_IS_GC_MARKING_OFFSET), (static_cast<int32_t>(art::Thread:: IsGcMarkingOffset<art::kRuntimePointerSize>().Int32Value())))
#define THREAD_CARD_TABLE_OFFSET 128
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_CARD_TABLE_OFFSET), (static_cast<int32_t>(art::Thread:: CardTableOffset<sizeof(void*)>().Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_CARD_TABLE_OFFSET), (static_cast<int32_t>(art::Thread:: CardTableOffset<art::kRuntimePointerSize>().Int32Value())))
#define CODEITEM_INSNS_OFFSET 16
DEFINE_CHECK_EQ(static_cast<int32_t>(CODEITEM_INSNS_OFFSET), (static_cast<int32_t>(__builtin_offsetof(art::DexFile::CodeItem, insns_))))
#define MIRROR_OBJECT_CLASS_OFFSET 0
@@ -53,21 +53,21 @@
#define ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT 0x1f
DEFINE_CHECK_EQ(static_cast<uint32_t>(ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT), (static_cast<uint32_t>((art::MostSignificantBit(art::kAccClassIsFinalizable)))))
#define ART_METHOD_DEX_CACHE_METHODS_OFFSET_32 20
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_DEX_CACHE_METHODS_OFFSET_32), (static_cast<int32_t>(art::ArtMethod:: DexCacheResolvedMethodsOffset(4).Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_DEX_CACHE_METHODS_OFFSET_32), (static_cast<int32_t>(art::ArtMethod:: DexCacheResolvedMethodsOffset(art::PointerSize::k32).Int32Value())))
#define ART_METHOD_DEX_CACHE_METHODS_OFFSET_64 24
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_DEX_CACHE_METHODS_OFFSET_64), (static_cast<int32_t>(art::ArtMethod:: DexCacheResolvedMethodsOffset(8).Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_DEX_CACHE_METHODS_OFFSET_64), (static_cast<int32_t>(art::ArtMethod:: DexCacheResolvedMethodsOffset(art::PointerSize::k64).Int32Value())))
#define ART_METHOD_DEX_CACHE_TYPES_OFFSET_32 24
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_DEX_CACHE_TYPES_OFFSET_32), (static_cast<int32_t>(art::ArtMethod:: DexCacheResolvedTypesOffset(4).Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_DEX_CACHE_TYPES_OFFSET_32), (static_cast<int32_t>(art::ArtMethod:: DexCacheResolvedTypesOffset(art::PointerSize::k32).Int32Value())))
#define ART_METHOD_DEX_CACHE_TYPES_OFFSET_64 32
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_DEX_CACHE_TYPES_OFFSET_64), (static_cast<int32_t>(art::ArtMethod:: DexCacheResolvedTypesOffset(8).Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_DEX_CACHE_TYPES_OFFSET_64), (static_cast<int32_t>(art::ArtMethod:: DexCacheResolvedTypesOffset(art::PointerSize::k64).Int32Value())))
#define ART_METHOD_JNI_OFFSET_32 28
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_JNI_OFFSET_32), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromJniOffset(4).Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_JNI_OFFSET_32), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromJniOffset(art::PointerSize::k32).Int32Value())))
#define ART_METHOD_JNI_OFFSET_64 40
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_JNI_OFFSET_64), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromJniOffset(8).Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_JNI_OFFSET_64), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromJniOffset(art::PointerSize::k64).Int32Value())))
#define ART_METHOD_QUICK_CODE_OFFSET_32 32
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_32), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromQuickCompiledCodeOffset(4).Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_32), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k32).Int32Value())))
#define ART_METHOD_QUICK_CODE_OFFSET_64 48
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_64), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromQuickCompiledCodeOffset(8).Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_64), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k64).Int32Value())))
#define LOCK_WORD_STATE_SHIFT 30
DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kStateShift)))
#define LOCK_WORD_STATE_MASK 0xc0000000
diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h
index ca206ef..2e1b8ed 100644
--- a/runtime/handle_scope-inl.h
+++ b/runtime/handle_scope-inl.h
@@ -57,9 +57,9 @@
return header_size + data_size;
}
-inline size_t HandleScope::SizeOf(size_t pointer_size, uint32_t num_references) {
+inline size_t HandleScope::SizeOf(PointerSize pointer_size, uint32_t num_references) {
// Assume that the layout is packed.
- size_t header_size = pointer_size + sizeof(number_of_references_);
+ size_t header_size = ReferencesOffset(pointer_size);
size_t data_size = sizeof(StackReference<mirror::Object>) * num_references;
return header_size + data_size;
}
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index d53a0e4..67d7054 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -19,6 +19,7 @@
#include <stack>
+#include "base/enums.h"
#include "base/logging.h"
#include "base/macros.h"
#include "handle.h"
@@ -52,7 +53,7 @@
static size_t SizeOf(uint32_t num_references);
// Returns the size of a HandleScope containing num_references handles.
- static size_t SizeOf(size_t pointer_size, uint32_t num_references);
+ static size_t SizeOf(PointerSize pointer_size, uint32_t num_references);
// Link to previous HandleScope or null.
HandleScope* GetLink() const {
@@ -73,18 +74,18 @@
ALWAYS_INLINE bool Contains(StackReference<mirror::Object>* handle_scope_entry) const;
// Offset of link within HandleScope, used by generated code.
- static size_t LinkOffset(size_t pointer_size ATTRIBUTE_UNUSED) {
+ static constexpr size_t LinkOffset(PointerSize pointer_size ATTRIBUTE_UNUSED) {
return 0;
}
// Offset of length within handle scope, used by generated code.
- static size_t NumberOfReferencesOffset(size_t pointer_size) {
- return pointer_size;
+ static constexpr size_t NumberOfReferencesOffset(PointerSize pointer_size) {
+ return static_cast<size_t>(pointer_size);
}
// Offset of link within handle scope, used by generated code.
- static size_t ReferencesOffset(size_t pointer_size) {
- return pointer_size + sizeof(number_of_references_);
+ static constexpr size_t ReferencesOffset(PointerSize pointer_size) {
+ return NumberOfReferencesOffset(pointer_size) + sizeof(number_of_references_);
}
// Placement new creation.
@@ -96,7 +97,7 @@
protected:
// Return backing storage used for references.
ALWAYS_INLINE StackReference<mirror::Object>* GetReferences() const {
- uintptr_t address = reinterpret_cast<uintptr_t>(this) + ReferencesOffset(sizeof(void*));
+ uintptr_t address = reinterpret_cast<uintptr_t>(this) + ReferencesOffset(kRuntimePointerSize);
return reinterpret_cast<StackReference<mirror::Object>*>(address);
}
diff --git a/runtime/handle_scope_test.cc b/runtime/handle_scope_test.cc
index dc99987..58f3800 100644
--- a/runtime/handle_scope_test.cc
+++ b/runtime/handle_scope_test.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "base/enums.h"
#include "gtest/gtest.h"
#include "handle_scope-inl.h"
#include "scoped_thread_state_change.h"
@@ -48,13 +49,13 @@
{
uintptr_t* link_ptr = reinterpret_cast<uintptr_t*>(table_base_ptr +
- HandleScope::LinkOffset(sizeof(void*)));
+ HandleScope::LinkOffset(kRuntimePointerSize));
EXPECT_EQ(*link_ptr, static_cast<size_t>(0x5678));
}
{
uint32_t* num_ptr = reinterpret_cast<uint32_t*>(table_base_ptr +
- HandleScope::NumberOfReferencesOffset(sizeof(void*)));
+ HandleScope::NumberOfReferencesOffset(kRuntimePointerSize));
EXPECT_EQ(*num_ptr, static_cast<size_t>(0x9ABC));
}
@@ -64,7 +65,7 @@
EXPECT_EQ(sizeof(StackReference<mirror::Object>), sizeof(uint32_t));
uint32_t* ref_ptr = reinterpret_cast<uint32_t*>(table_base_ptr +
- HandleScope::ReferencesOffset(sizeof(void*)));
+ HandleScope::ReferencesOffset(kRuntimePointerSize));
EXPECT_EQ(*ref_ptr, static_cast<uint32_t>(0x1234));
}
}
diff --git a/runtime/image-inl.h b/runtime/image-inl.h
index cd0557a..28620db 100644
--- a/runtime/image-inl.h
+++ b/runtime/image-inl.h
@@ -48,7 +48,7 @@
template <typename Visitor>
inline void ImageHeader::VisitPackedImTables(const Visitor& visitor,
uint8_t* base,
- size_t pointer_size) const {
+ PointerSize pointer_size) const {
const ImageSection& section = GetImageSection(kSectionImTables);
for (size_t pos = 0; pos < section.Size();) {
ImTable* imt = reinterpret_cast<ImTable*>(base + section.Offset() + pos);
@@ -66,7 +66,7 @@
template <typename Visitor>
inline void ImageHeader::VisitPackedImtConflictTables(const Visitor& visitor,
uint8_t* base,
- size_t pointer_size) const {
+ PointerSize pointer_size) const {
const ImageSection& section = GetImageSection(kSectionIMTConflictTables);
for (size_t pos = 0; pos < section.Size(); ) {
auto* table = reinterpret_cast<ImtConflictTable*>(base + section.Offset() + pos);
diff --git a/runtime/image.cc b/runtime/image.cc
index 2362a92..6888183 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -160,7 +160,7 @@
void ImageHeader::VisitPackedArtMethods(ArtMethodVisitor* visitor,
uint8_t* base,
- size_t pointer_size) const {
+ PointerSize pointer_size) const {
const size_t method_alignment = ArtMethod::Alignment(pointer_size);
const size_t method_size = ArtMethod::Size(pointer_size);
const ImageSection& methods = GetMethodsSection();
diff --git a/runtime/image.h b/runtime/image.h
index 06f06ee..a98cea1 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -19,6 +19,7 @@
#include <string.h>
+#include "base/enums.h"
#include "globals.h"
#include "mirror/object.h"
@@ -156,7 +157,11 @@
return reinterpret_cast<uint8_t*>(oat_file_end_);
}
- uint32_t GetPointerSize() const {
+ PointerSize GetPointerSize() const {
+ return ConvertToPointerSize(pointer_size_);
+ }
+
+ uint32_t GetPointerSizeUnchecked() const {
return pointer_size_;
}
@@ -273,7 +278,9 @@
// Visit ArtMethods in the section starting at base. Includes runtime methods.
// TODO: Delete base parameter if it is always equal to GetImageBegin.
- void VisitPackedArtMethods(ArtMethodVisitor* visitor, uint8_t* base, size_t pointer_size) const;
+ void VisitPackedArtMethods(ArtMethodVisitor* visitor,
+ uint8_t* base,
+ PointerSize pointer_size) const;
// Visit ArtMethods in the section starting at base.
// TODO: Delete base parameter if it is always equal to GetImageBegin.
@@ -282,12 +289,12 @@
template <typename Visitor>
void VisitPackedImTables(const Visitor& visitor,
uint8_t* base,
- size_t pointer_size) const;
+ PointerSize pointer_size) const;
template <typename Visitor>
void VisitPackedImtConflictTables(const Visitor& visitor,
uint8_t* base,
- size_t pointer_size) const;
+ PointerSize pointer_size) const;
private:
static const uint8_t kImageMagic[4];
diff --git a/runtime/imtable.h b/runtime/imtable.h
index 51faf70..2416621 100644
--- a/runtime/imtable.h
+++ b/runtime/imtable.h
@@ -32,10 +32,10 @@
// (non-marker) interfaces.
static constexpr size_t kSize = IMT_SIZE;
- ArtMethod* Get(size_t index, size_t pointer_size) {
+ ArtMethod* Get(size_t index, PointerSize pointer_size) {
DCHECK_LT(index, kSize);
uint8_t* ptr = reinterpret_cast<uint8_t*>(this) + OffsetOfElement(index, pointer_size);
- if (pointer_size == 4) {
+ if (pointer_size == PointerSize::k32) {
uint32_t value = *reinterpret_cast<uint32_t*>(ptr);
return reinterpret_cast<ArtMethod*>(value);
} else {
@@ -44,10 +44,10 @@
}
}
- void Set(size_t index, ArtMethod* method, size_t pointer_size) {
+ void Set(size_t index, ArtMethod* method, PointerSize pointer_size) {
DCHECK_LT(index, kSize);
uint8_t* ptr = reinterpret_cast<uint8_t*>(this) + OffsetOfElement(index, pointer_size);
- if (pointer_size == 4) {
+ if (pointer_size == PointerSize::k32) {
uintptr_t value = reinterpret_cast<uintptr_t>(method);
DCHECK_EQ(static_cast<uint32_t>(value), value); // Check that we dont lose any non 0 bits.
*reinterpret_cast<uint32_t*>(ptr) = static_cast<uint32_t>(value);
@@ -56,18 +56,18 @@
}
}
- static size_t OffsetOfElement(size_t index, size_t pointer_size) {
- return index * pointer_size;
+ static size_t OffsetOfElement(size_t index, PointerSize pointer_size) {
+ return index * static_cast<size_t>(pointer_size);
}
- void Populate(ArtMethod** data, size_t pointer_size) {
+ void Populate(ArtMethod** data, PointerSize pointer_size) {
for (size_t i = 0; i < kSize; ++i) {
Set(i, data[i], pointer_size);
}
}
- constexpr static size_t SizeInBytes(size_t pointer_size) {
- return kSize * pointer_size;
+ constexpr static size_t SizeInBytes(PointerSize pointer_size) {
+ return kSize * static_cast<size_t>(pointer_size);
}
};
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 7dfc83f..61ffe44 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -94,7 +94,7 @@
// We need the class to be resolved to install/uninstall stubs. Otherwise its methods
// could not be initialized or linked with regards to class inheritance.
} else {
- for (ArtMethod& method : klass->GetMethods(sizeof(void*))) {
+ for (ArtMethod& method : klass->GetMethods(kRuntimePointerSize)) {
InstallStubsForMethod(&method);
}
}
@@ -886,7 +886,7 @@
ConfigureStubs(key, InstrumentationLevel::kInstrumentNothing);
}
-const void* Instrumentation::GetQuickCodeFor(ArtMethod* method, size_t pointer_size) const {
+const void* Instrumentation::GetQuickCodeFor(ArtMethod* method, PointerSize pointer_size) const {
Runtime* runtime = Runtime::Current();
if (LIKELY(!instrumentation_stubs_installed_)) {
const void* code = method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size);
@@ -1063,7 +1063,7 @@
ArtMethod* method = instrumentation_frame.method_;
uint32_t length;
- const size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ const PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
char return_shorty = method->GetInterfaceMethodIfProxy(pointer_size)->GetShorty(&length)[0];
JValue return_value;
if (return_shorty == 'V') {
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 49dd060..757be8e 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -22,6 +22,7 @@
#include <unordered_set>
#include "arch/instruction_set.h"
+#include "base/enums.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "gc_root.h"
@@ -234,7 +235,7 @@
// Get the quick code for the given method. More efficient than asking the class linker as it
// will short-cut to GetCode if instrumentation and static method resolution stubs aren't
// installed.
- const void* GetQuickCodeFor(ArtMethod* method, size_t pointer_size) const
+ const void* GetQuickCodeFor(ArtMethod* method, PointerSize pointer_size) const
SHARED_REQUIRES(Locks::mutator_lock_);
void ForceInterpretOnly() {
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index 56e3bc5..684c471 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -16,6 +16,7 @@
#include "instrumentation.h"
+#include "base/enums.h"
#include "common_runtime_test.h"
#include "common_throws.h"
#include "class_linker-inl.h"
@@ -461,7 +462,7 @@
mirror::Class* klass = class_linker->FindClass(soa.Self(), "LInstrumentation;", loader);
ASSERT_TRUE(klass != nullptr);
ArtMethod* method_to_deoptimize = klass->FindDeclaredDirectMethod("instanceMethod", "()V",
- sizeof(void*));
+ kRuntimePointerSize);
ASSERT_TRUE(method_to_deoptimize != nullptr);
EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
@@ -508,7 +509,7 @@
mirror::Class* klass = class_linker->FindClass(soa.Self(), "LInstrumentation;", loader);
ASSERT_TRUE(klass != nullptr);
ArtMethod* method_to_deoptimize = klass->FindDeclaredDirectMethod("instanceMethod", "()V",
- sizeof(void*));
+ kRuntimePointerSize);
ASSERT_TRUE(method_to_deoptimize != nullptr);
EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 53d5e43..11b7ef4 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -18,6 +18,7 @@
#include <cmath>
+#include "base/enums.h"
#include "debugger.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "jit/jit.h"
@@ -537,7 +538,7 @@
}
method->Invoke(self, shadow_frame->GetVRegArgs(arg_offset),
(shadow_frame->NumberOfVRegs() - arg_offset) * sizeof(uint32_t),
- result, method->GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty());
+ result, method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty());
}
void SetStringInitValueToAllAliases(ShadowFrame* shadow_frame,
@@ -656,7 +657,8 @@
// As a special case for proxy methods, which are not dex-backed,
// we have to retrieve type information from the proxy's method
// interface method instead (which is dex backed since proxies are never interfaces).
- ArtMethod* method = new_shadow_frame->GetMethod()->GetInterfaceMethodIfProxy(sizeof(void*));
+ ArtMethod* method =
+ new_shadow_frame->GetMethod()->GetInterfaceMethodIfProxy(kRuntimePointerSize);
// We need to do runtime check on reference assignment. We need to load the shorty
// to get the exact type of each reference argument.
@@ -686,7 +688,7 @@
case 'L': {
Object* o = shadow_frame.GetVRegReference(src_reg);
if (do_assignability_check && o != nullptr) {
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
Class* arg_type =
method->GetClassFromTypeIndex(
params->GetTypeItem(shorty_pos).type_idx_, true /* resolve */, pointer_size);
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 7dfa6e2..174d4e0 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -26,6 +26,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/macros.h"
#include "class_linker-inl.h"
@@ -681,7 +682,7 @@
const uint32_t vtable_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
CHECK(receiver->GetClass()->ShouldHaveEmbeddedVTable());
ArtMethod* const called_method = receiver->GetClass()->GetEmbeddedVTableEntry(
- vtable_idx, sizeof(void*));
+ vtable_idx, kRuntimePointerSize);
if (UNLIKELY(called_method == nullptr)) {
CHECK(self->IsExceptionPending());
result->SetJ(0);
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 18330ba..8bfc10c 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "base/enums.h"
#include "base/stl_util.h" // MakeUnique
#include "experimental_flags.h"
#include "interpreter_common.h"
@@ -283,7 +284,7 @@
const size_t ref_idx = inst->VRegA_11x(inst_data);
Object* obj_result = shadow_frame.GetVRegReference(ref_idx);
if (do_assignability_check && obj_result != nullptr) {
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
Class* return_type = shadow_frame.GetMethod()->GetReturnType(true /* resolve */,
pointer_size);
// Re-load since it might have moved.
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 793260d..57443f1 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -29,6 +29,7 @@
#include "art_method-inl.h"
#include "base/casts.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/macros.h"
#include "class_linker.h"
@@ -301,21 +302,23 @@
return;
}
Runtime* runtime = Runtime::Current();
- size_t pointer_size = runtime->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = runtime->GetClassLinker()->GetImagePointerSize();
mirror::Field* field;
if (runtime->IsActiveTransaction()) {
- if (pointer_size == 8) {
- field = mirror::Field::CreateFromArtField<8U, true>(self, found, true);
+ if (pointer_size == PointerSize::k64) {
+ field = mirror::Field::CreateFromArtField<PointerSize::k64, true>(
+ self, found, true);
} else {
- DCHECK_EQ(pointer_size, 4U);
- field = mirror::Field::CreateFromArtField<4U, true>(self, found, true);
+ field = mirror::Field::CreateFromArtField<PointerSize::k32, true>(
+ self, found, true);
}
} else {
- if (pointer_size == 8) {
- field = mirror::Field::CreateFromArtField<8U, false>(self, found, true);
+ if (pointer_size == PointerSize::k64) {
+ field = mirror::Field::CreateFromArtField<PointerSize::k64, false>(
+ self, found, true);
} else {
- DCHECK_EQ(pointer_size, 4U);
- field = mirror::Field::CreateFromArtField<4U, false>(self, found, true);
+ field = mirror::Field::CreateFromArtField<PointerSize::k32, false>(
+ self, found, true);
}
}
result->SetL(field);
@@ -335,21 +338,23 @@
shadow_frame->GetVRegReference(arg_offset + 2)->AsObjectArray<mirror::Class>();
Runtime* runtime = Runtime::Current();
bool transaction = runtime->IsActiveTransaction();
- size_t pointer_size = runtime->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = runtime->GetClassLinker()->GetImagePointerSize();
mirror::Method* method;
if (transaction) {
- if (pointer_size == 8U) {
- method = mirror::Class::GetDeclaredMethodInternal<8U, true>(self, klass, name, args);
+ if (pointer_size == PointerSize::k64) {
+ method = mirror::Class::GetDeclaredMethodInternal<PointerSize::k64, true>(
+ self, klass, name, args);
} else {
- DCHECK_EQ(pointer_size, 4U);
- method = mirror::Class::GetDeclaredMethodInternal<4U, true>(self, klass, name, args);
+ method = mirror::Class::GetDeclaredMethodInternal<PointerSize::k32, true>(
+ self, klass, name, args);
}
} else {
- if (pointer_size == 8U) {
- method = mirror::Class::GetDeclaredMethodInternal<8U, false>(self, klass, name, args);
+ if (pointer_size == PointerSize::k64) {
+ method = mirror::Class::GetDeclaredMethodInternal<PointerSize::k64, false>(
+ self, klass, name, args);
} else {
- DCHECK_EQ(pointer_size, 4U);
- method = mirror::Class::GetDeclaredMethodInternal<4U, false>(self, klass, name, args);
+ method = mirror::Class::GetDeclaredMethodInternal<PointerSize::k32, false>(
+ self, klass, name, args);
}
}
result->SetL(method);
@@ -367,21 +372,23 @@
shadow_frame->GetVRegReference(arg_offset + 1)->AsObjectArray<mirror::Class>();
Runtime* runtime = Runtime::Current();
bool transaction = runtime->IsActiveTransaction();
- size_t pointer_size = runtime->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = runtime->GetClassLinker()->GetImagePointerSize();
mirror::Constructor* constructor;
if (transaction) {
- if (pointer_size == 8U) {
- constructor = mirror::Class::GetDeclaredConstructorInternal<8U, true>(self, klass, args);
+ if (pointer_size == PointerSize::k64) {
+ constructor = mirror::Class::GetDeclaredConstructorInternal<PointerSize::k64,
+ true>(self, klass, args);
} else {
- DCHECK_EQ(pointer_size, 4U);
- constructor = mirror::Class::GetDeclaredConstructorInternal<4U, true>(self, klass, args);
+ constructor = mirror::Class::GetDeclaredConstructorInternal<PointerSize::k32,
+ true>(self, klass, args);
}
} else {
- if (pointer_size == 8U) {
- constructor = mirror::Class::GetDeclaredConstructorInternal<8U, false>(self, klass, args);
+ if (pointer_size == PointerSize::k64) {
+ constructor = mirror::Class::GetDeclaredConstructorInternal<PointerSize::k64,
+ false>(self, klass, args);
} else {
- DCHECK_EQ(pointer_size, 4U);
- constructor = mirror::Class::GetDeclaredConstructorInternal<4U, false>(self, klass, args);
+ constructor = mirror::Class::GetDeclaredConstructorInternal<PointerSize::k32,
+ false>(self, klass, args);
}
}
result->SetL(constructor);
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index 814b001..7e1f795 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -20,6 +20,7 @@
#include <locale>
#include "base/casts.h"
+#include "base/enums.h"
#include "base/memory_tool.h"
#include "class_linker.h"
#include "common_runtime_test.h"
@@ -383,7 +384,7 @@
ScopedObjectAccess soa(self);
mirror::Class* klass = mirror::String::GetJavaLangString();
ArtMethod* method = klass->FindDeclaredDirectMethod("<init>", "(Ljava/lang/String;)V",
- sizeof(void*));
+ kRuntimePointerSize);
// create instruction data for invoke-direct {v0, v1} of method with fake index
uint16_t inst_data[3] = { 0x2070, 0x0000, 0x0010 };
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index cfe6cd1..d52030f 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -19,6 +19,7 @@
#include <dlfcn.h>
#include "art_method-inl.h"
+#include "base/enums.h"
#include "debugger.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "interpreter/interpreter.h"
@@ -258,7 +259,7 @@
// If we get a request to compile a proxy method, we pass the actual Java method
// of that proxy method, as the compiler does not expect a proxy method.
- ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(sizeof(void*));
+ ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
if (!code_cache_->NotifyCompilationOf(method_to_compile, self, osr)) {
return false;
}
@@ -410,7 +411,7 @@
// Get the actual Java method if this method is from a proxy class. The compiler
// and the JIT code cache do not expect methods from proxy classes.
- method = method->GetInterfaceMethodIfProxy(sizeof(void*));
+ method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
// Cheap check if the method has been compiled already. That's an indicator that we should
// osr into it.
@@ -616,7 +617,7 @@
int32_t new_count = starting_count + count; // int32 here to avoid wrap-around;
if (starting_count < warm_method_threshold_) {
if ((new_count >= warm_method_threshold_) &&
- (method->GetProfilingInfo(sizeof(void*)) == nullptr)) {
+ (method->GetProfilingInfo(kRuntimePointerSize) == nullptr)) {
bool success = ProfilingInfo::Create(self, method, /* retry_allocation */ false);
if (success) {
VLOG(jit) << "Start profiling " << PrettyMethod(method);
@@ -671,7 +672,7 @@
return;
}
- ProfilingInfo* profiling_info = method->GetProfilingInfo(sizeof(void*));
+ ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize);
// Update the entrypoint if the ProfilingInfo has one. The interpreter will call it
// instead of interpreting the method.
if ((profiling_info != nullptr) && (profiling_info->GetSavedEntryPoint() != nullptr)) {
@@ -689,7 +690,7 @@
ArtMethod* callee ATTRIBUTE_UNUSED) {
ScopedAssertNoThreadSuspension ants(thread, __FUNCTION__);
DCHECK(this_object != nullptr);
- ProfilingInfo* info = caller->GetProfilingInfo(sizeof(void*));
+ ProfilingInfo* info = caller->GetProfilingInfo(kRuntimePointerSize);
if (info != nullptr) {
// Since the instrumentation is marked from the declaring class we need to mark the card so
// that mod-union tables and card rescanning know about the update.
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 6b6f5a5..c2097f8 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -19,6 +19,7 @@
#include <sstream>
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/time_utils.h"
@@ -738,13 +739,14 @@
// a method has compiled code but no ProfilingInfo.
// We make sure compiled methods have a ProfilingInfo object. It is needed for
// code cache collection.
- if (ContainsPc(ptr) && info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr) {
+ if (ContainsPc(ptr) &&
+ info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) == nullptr) {
// We clear the inline caches as classes in it might be stalled.
info->ClearGcRootsInInlineCaches();
// Do a fence to make sure the clearing is seen before attaching to the method.
QuasiAtomic::ThreadFenceRelease();
info->GetMethod()->SetProfilingInfo(info);
- } else if (info->GetMethod()->GetProfilingInfo(sizeof(void*)) != info) {
+ } else if (info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) != info) {
// No need for this ProfilingInfo object anymore.
FreeData(reinterpret_cast<uint8_t*>(info));
return true;
@@ -762,7 +764,7 @@
// have memory leaks of compiled code otherwise.
for (const auto& it : method_code_map_) {
ArtMethod* method = it.second;
- if (method->GetProfilingInfo(sizeof(void*)) == nullptr) {
+ if (method->GetProfilingInfo(kRuntimePointerSize) == nullptr) {
const void* code_ptr = it.first;
const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
@@ -851,7 +853,7 @@
sizeof(void*));
// Check whether some other thread has concurrently created it.
- ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
+ ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
if (info != nullptr) {
return info;
}
@@ -919,7 +921,7 @@
return false;
}
- ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
+ ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
if (info == nullptr) {
VLOG(jit) << PrettyMethod(method) << " needs a ProfilingInfo to be compiled";
// Because the counter is not atomic, there are some rare cases where we may not
@@ -939,7 +941,7 @@
ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self) {
MutexLock mu(self, lock_);
- ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
+ ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
if (info != nullptr) {
info->IncrementInlineUse();
}
@@ -948,13 +950,13 @@
void JitCodeCache::DoneCompilerUse(ArtMethod* method, Thread* self) {
MutexLock mu(self, lock_);
- ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
+ ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
DCHECK(info != nullptr);
info->DecrementInlineUse();
}
void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self ATTRIBUTE_UNUSED, bool osr) {
- ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
+ ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
DCHECK(info->IsMethodBeingCompiled(osr));
info->SetIsMethodBeingCompiled(false, osr);
}
@@ -966,7 +968,7 @@
void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
const OatQuickMethodHeader* header) {
- ProfilingInfo* profiling_info = method->GetProfilingInfo(sizeof(void*));
+ ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize);
if ((profiling_info != nullptr) &&
(profiling_info->GetSavedEntryPoint() == header->GetEntryPoint())) {
// Prevent future uses of the compiled code.
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 4d4d1ea..5a469e5 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -21,6 +21,7 @@
#include <fcntl.h>
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/systrace.h"
#include "base/time_utils.h"
#include "compiler_filter.h"
@@ -206,12 +207,13 @@
if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
return true;
}
- for (ArtMethod& method : klass->GetMethods(sizeof(void*))) {
+ for (ArtMethod& method : klass->GetMethods(kRuntimePointerSize)) {
if (!method.IsNative()) {
if (method.GetCounter() >= startup_method_samples_ ||
- method.GetProfilingInfo(sizeof(void*)) != nullptr) {
+ method.GetProfilingInfo(kRuntimePointerSize) != nullptr) {
// Have samples, add to profile.
- const DexFile* dex_file = method.GetInterfaceMethodIfProxy(sizeof(void*))->GetDexFile();
+ const DexFile* dex_file =
+ method.GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetDexFile();
methods_->push_back(MethodReference(dex_file, method.GetDexMethodIndex()));
}
}
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 6ef3999..e1a4e2a 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -27,6 +27,7 @@
#include "art_method-inl.h"
#include "atomic.h"
#include "base/allocator.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/mutex.h"
#include "base/stl_util.h"
@@ -375,12 +376,12 @@
ScopedObjectAccess soa(env);
ArtMethod* m = soa.DecodeMethod(mid);
mirror::AbstractMethod* method;
- DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
DCHECK(!Runtime::Current()->IsActiveTransaction());
if (m->IsConstructor()) {
- method = mirror::Constructor::CreateFromArtMethod<sizeof(void*), false>(soa.Self(), m);
+ method = mirror::Constructor::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), m);
} else {
- method = mirror::Method::CreateFromArtMethod<sizeof(void*), false>(soa.Self(), m);
+ method = mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), m);
}
return soa.AddLocalReference<jobject>(method);
}
@@ -390,7 +391,7 @@
ScopedObjectAccess soa(env);
ArtField* f = soa.DecodeField(fid);
return soa.AddLocalReference<jobject>(
- mirror::Field::CreateFromArtField<sizeof(void*)>(soa.Self(), f, true));
+ mirror::Field::CreateFromArtField<kRuntimePointerSize>(soa.Self(), f, true));
}
static jclass GetObjectClass(JNIEnv* env, jobject java_object) {
diff --git a/runtime/memory_region.h b/runtime/memory_region.h
index fbb0441..f018c1f 100644
--- a/runtime/memory_region.h
+++ b/runtime/memory_region.h
@@ -178,9 +178,9 @@
}
// Is `address` aligned on a machine word?
- template<typename T> static bool IsWordAligned(const T* address) {
+ template<typename T> static constexpr bool IsWordAligned(const T* address) {
// Word alignment in bytes.
- size_t kWordAlignment = GetInstructionSetPointerSize(kRuntimeISA);
+ size_t kWordAlignment = static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA));
return IsAlignedParam(address, kWordAlignment);
}
diff --git a/runtime/mirror/abstract_method.cc b/runtime/mirror/abstract_method.cc
index ef39132..b4dce58 100644
--- a/runtime/mirror/abstract_method.cc
+++ b/runtime/mirror/abstract_method.cc
@@ -21,7 +21,7 @@
namespace art {
namespace mirror {
-template <size_t kPointerSize, bool kTransactionActive>
+template <PointerSize kPointerSize, bool kTransactionActive>
bool AbstractMethod::CreateFromArtMethod(ArtMethod* method) {
auto* interface_method = method->GetInterfaceMethodIfProxy(kPointerSize);
SetArtMethod<kTransactionActive>(method);
@@ -33,10 +33,14 @@
return true;
}
-template bool AbstractMethod::CreateFromArtMethod<4U, false>(ArtMethod* method);
-template bool AbstractMethod::CreateFromArtMethod<4U, true>(ArtMethod* method);
-template bool AbstractMethod::CreateFromArtMethod<8U, false>(ArtMethod* method);
-template bool AbstractMethod::CreateFromArtMethod<8U, true>(ArtMethod* method);
+template bool AbstractMethod::CreateFromArtMethod<PointerSize::k32, false>(
+ ArtMethod* method);
+template bool AbstractMethod::CreateFromArtMethod<PointerSize::k32, true>(
+ ArtMethod* method);
+template bool AbstractMethod::CreateFromArtMethod<PointerSize::k64, false>(
+ ArtMethod* method);
+template bool AbstractMethod::CreateFromArtMethod<PointerSize::k64, true>(
+ ArtMethod* method);
ArtMethod* AbstractMethod::GetArtMethod() {
return reinterpret_cast<ArtMethod*>(GetField64(ArtMethodOffset()));
diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h
index 936b14c..cfbe492 100644
--- a/runtime/mirror/abstract_method.h
+++ b/runtime/mirror/abstract_method.h
@@ -34,7 +34,7 @@
class MANAGED AbstractMethod : public AccessibleObject {
public:
// Called from Constructor::CreateFromArtMethod, Method::CreateFromArtMethod.
- template <size_t kPointerSize, bool kTransactionActive>
+ template <PointerSize kPointerSize, bool kTransactionActive>
bool CreateFromArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index c6fa15d..014e54b 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -30,7 +30,7 @@
namespace art {
namespace mirror {
-inline uint32_t Array::ClassSize(size_t pointer_size) {
+inline uint32_t Array::ClassSize(PointerSize pointer_size) {
uint32_t vtable_entries = Object::kVTableLength;
return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size);
}
@@ -371,25 +371,23 @@
}
template<typename T, VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
-inline T PointerArray::GetElementPtrSize(uint32_t idx, size_t ptr_size) {
+inline T PointerArray::GetElementPtrSize(uint32_t idx, PointerSize ptr_size) {
// C style casts here since we sometimes have T be a pointer, or sometimes an integer
// (for stack traces).
- if (ptr_size == 8) {
+ if (ptr_size == PointerSize::k64) {
return (T)static_cast<uintptr_t>(
AsLongArray<kVerifyFlags, kReadBarrierOption>()->GetWithoutChecks(idx));
}
- DCHECK_EQ(ptr_size, 4u);
return (T)static_cast<uintptr_t>(
AsIntArray<kVerifyFlags, kReadBarrierOption>()->GetWithoutChecks(idx));
}
template<bool kTransactionActive, bool kUnchecked>
-inline void PointerArray::SetElementPtrSize(uint32_t idx, uint64_t element, size_t ptr_size) {
- if (ptr_size == 8) {
+inline void PointerArray::SetElementPtrSize(uint32_t idx, uint64_t element, PointerSize ptr_size) {
+ if (ptr_size == PointerSize::k64) {
(kUnchecked ? down_cast<LongArray*>(static_cast<Object*>(this)) : AsLongArray())->
SetWithoutChecks<kTransactionActive>(idx, element);
} else {
- DCHECK_EQ(ptr_size, 4u);
DCHECK_LE(element, static_cast<uint64_t>(0xFFFFFFFFu));
(kUnchecked ? down_cast<IntArray*>(static_cast<Object*>(this)) : AsIntArray())
->SetWithoutChecks<kTransactionActive>(idx, static_cast<uint32_t>(element));
@@ -397,7 +395,7 @@
}
template<bool kTransactionActive, bool kUnchecked, typename T>
-inline void PointerArray::SetElementPtrSize(uint32_t idx, T* element, size_t ptr_size) {
+inline void PointerArray::SetElementPtrSize(uint32_t idx, T* element, PointerSize ptr_size) {
SetElementPtrSize<kTransactionActive, kUnchecked>(idx,
reinterpret_cast<uintptr_t>(element),
ptr_size);
@@ -405,7 +403,7 @@
template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption, typename Visitor>
inline void PointerArray::Fixup(mirror::PointerArray* dest,
- size_t pointer_size,
+ PointerSize pointer_size,
const Visitor& visitor) {
for (size_t i = 0, count = GetLength(); i < count; ++i) {
void* ptr = GetElementPtrSize<void*, kVerifyFlags, kReadBarrierOption>(i, pointer_size);
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 9a21ec2..c9e0cb3 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_MIRROR_ARRAY_H_
#define ART_RUNTIME_MIRROR_ARRAY_H_
+#include "base/enums.h"
#include "gc_root.h"
#include "gc/allocator_type.h"
#include "object.h"
@@ -31,7 +32,7 @@
class MANAGED Array : public Object {
public:
// The size of a java.lang.Class representing an array.
- static uint32_t ClassSize(size_t pointer_size);
+ static uint32_t ClassSize(PointerSize pointer_size);
// Allocates an array with the given properties, if kFillUsable is true the array will be of at
// least component_count size, however, if there's usable space at the end of the allocation the
@@ -186,14 +187,14 @@
template<typename T,
VerifyObjectFlags kVerifyFlags = kVerifyNone,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- T GetElementPtrSize(uint32_t idx, size_t ptr_size)
+ T GetElementPtrSize(uint32_t idx, PointerSize ptr_size)
SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive = false, bool kUnchecked = false>
- void SetElementPtrSize(uint32_t idx, uint64_t element, size_t ptr_size)
+ void SetElementPtrSize(uint32_t idx, uint64_t element, PointerSize ptr_size)
SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive = false, bool kUnchecked = false, typename T>
- void SetElementPtrSize(uint32_t idx, T* element, size_t ptr_size)
+ void SetElementPtrSize(uint32_t idx, T* element, PointerSize ptr_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Fixup the pointers in the dest arrays by passing our pointers through the visitor. Only copies
@@ -201,7 +202,7 @@
template <VerifyObjectFlags kVerifyFlags = kVerifyNone,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
- void Fixup(mirror::PointerArray* dest, size_t pointer_size, const Visitor& visitor)
+ void Fixup(mirror::PointerArray* dest, PointerSize pointer_size, const Visitor& visitor)
SHARED_REQUIRES(Locks::mutator_lock_);
};
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 9a9fd87..8f5419c 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -80,13 +80,12 @@
}
template<VerifyObjectFlags kVerifyFlags>
-inline ArraySlice<ArtMethod> Class::GetDirectMethodsSlice(size_t pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetDirectMethodsSlice(PointerSize pointer_size) {
DCHECK(IsLoaded() || IsErroneous());
- DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
return GetDirectMethodsSliceUnchecked(pointer_size);
}
-inline ArraySlice<ArtMethod> Class::GetDirectMethodsSliceUnchecked(size_t pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetDirectMethodsSliceUnchecked(PointerSize pointer_size) {
return ArraySlice<ArtMethod>(GetMethodsPtr(),
GetDirectMethodsStartOffset(),
GetVirtualMethodsStartOffset(),
@@ -95,13 +94,12 @@
}
template<VerifyObjectFlags kVerifyFlags>
-inline ArraySlice<ArtMethod> Class::GetDeclaredMethodsSlice(size_t pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetDeclaredMethodsSlice(PointerSize pointer_size) {
DCHECK(IsLoaded() || IsErroneous());
- DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
return GetDeclaredMethodsSliceUnchecked(pointer_size);
}
-inline ArraySlice<ArtMethod> Class::GetDeclaredMethodsSliceUnchecked(size_t pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetDeclaredMethodsSliceUnchecked(PointerSize pointer_size) {
return ArraySlice<ArtMethod>(GetMethodsPtr(),
GetDirectMethodsStartOffset(),
GetCopiedMethodsStartOffset(),
@@ -109,13 +107,13 @@
ArtMethod::Alignment(pointer_size));
}
template<VerifyObjectFlags kVerifyFlags>
-inline ArraySlice<ArtMethod> Class::GetDeclaredVirtualMethodsSlice(size_t pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetDeclaredVirtualMethodsSlice(PointerSize pointer_size) {
DCHECK(IsLoaded() || IsErroneous());
- DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
return GetDeclaredVirtualMethodsSliceUnchecked(pointer_size);
}
-inline ArraySlice<ArtMethod> Class::GetDeclaredVirtualMethodsSliceUnchecked(size_t pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetDeclaredVirtualMethodsSliceUnchecked(
+ PointerSize pointer_size) {
return ArraySlice<ArtMethod>(GetMethodsPtr(),
GetVirtualMethodsStartOffset(),
GetCopiedMethodsStartOffset(),
@@ -124,13 +122,12 @@
}
template<VerifyObjectFlags kVerifyFlags>
-inline ArraySlice<ArtMethod> Class::GetVirtualMethodsSlice(size_t pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetVirtualMethodsSlice(PointerSize pointer_size) {
DCHECK(IsLoaded() || IsErroneous());
- DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
return GetVirtualMethodsSliceUnchecked(pointer_size);
}
-inline ArraySlice<ArtMethod> Class::GetVirtualMethodsSliceUnchecked(size_t pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetVirtualMethodsSliceUnchecked(PointerSize pointer_size) {
LengthPrefixedArray<ArtMethod>* methods = GetMethodsPtr();
return ArraySlice<ArtMethod>(methods,
GetVirtualMethodsStartOffset(),
@@ -140,13 +137,12 @@
}
template<VerifyObjectFlags kVerifyFlags>
-inline ArraySlice<ArtMethod> Class::GetCopiedMethodsSlice(size_t pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetCopiedMethodsSlice(PointerSize pointer_size) {
DCHECK(IsLoaded() || IsErroneous());
- DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
return GetCopiedMethodsSliceUnchecked(pointer_size);
}
-inline ArraySlice<ArtMethod> Class::GetCopiedMethodsSliceUnchecked(size_t pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetCopiedMethodsSliceUnchecked(PointerSize pointer_size) {
LengthPrefixedArray<ArtMethod>* methods = GetMethodsPtr();
return ArraySlice<ArtMethod>(methods,
GetCopiedMethodsStartOffset(),
@@ -161,7 +157,7 @@
}
template<VerifyObjectFlags kVerifyFlags>
-inline ArraySlice<ArtMethod> Class::GetMethodsSlice(size_t pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetMethodsSlice(PointerSize pointer_size) {
DCHECK(IsLoaded() || IsErroneous());
LengthPrefixedArray<ArtMethod>* methods = GetMethodsPtr();
return ArraySlice<ArtMethod>(methods,
@@ -177,12 +173,12 @@
return (methods == nullptr) ? 0 : methods->size();
}
-inline ArtMethod* Class::GetDirectMethodUnchecked(size_t i, size_t pointer_size) {
+inline ArtMethod* Class::GetDirectMethodUnchecked(size_t i, PointerSize pointer_size) {
CheckPointerSize(pointer_size);
return &GetDirectMethodsSliceUnchecked(pointer_size).At(i);
}
-inline ArtMethod* Class::GetDirectMethod(size_t i, size_t pointer_size) {
+inline ArtMethod* Class::GetDirectMethod(size_t i, PointerSize pointer_size) {
CheckPointerSize(pointer_size);
return &GetDirectMethodsSlice(pointer_size).At(i);
}
@@ -212,20 +208,20 @@
}
template<VerifyObjectFlags kVerifyFlags>
-inline ArtMethod* Class::GetVirtualMethod(size_t i, size_t pointer_size) {
+inline ArtMethod* Class::GetVirtualMethod(size_t i, PointerSize pointer_size) {
CheckPointerSize(pointer_size);
DCHECK(IsResolved<kVerifyFlags>() || IsErroneous<kVerifyFlags>())
<< PrettyClass(this) << " status=" << GetStatus();
return GetVirtualMethodUnchecked(i, pointer_size);
}
-inline ArtMethod* Class::GetVirtualMethodDuringLinking(size_t i, size_t pointer_size) {
+inline ArtMethod* Class::GetVirtualMethodDuringLinking(size_t i, PointerSize pointer_size) {
CheckPointerSize(pointer_size);
DCHECK(IsLoaded() || IsErroneous());
return GetVirtualMethodUnchecked(i, pointer_size);
}
-inline ArtMethod* Class::GetVirtualMethodUnchecked(size_t i, size_t pointer_size) {
+inline ArtMethod* Class::GetVirtualMethodUnchecked(size_t i, PointerSize pointer_size) {
CheckPointerSize(pointer_size);
return &GetVirtualMethodsSliceUnchecked(pointer_size).At(i);
}
@@ -258,7 +254,7 @@
return GetVTable() != nullptr ? GetVTable()->GetLength() : 0;
}
-inline ArtMethod* Class::GetVTableEntry(uint32_t i, size_t pointer_size) {
+inline ArtMethod* Class::GetVTableEntry(uint32_t i, PointerSize pointer_size) {
if (ShouldHaveEmbeddedVTable()) {
return GetEmbeddedVTableEntry(i, pointer_size);
}
@@ -275,29 +271,29 @@
SetField32<false>(MemberOffset(EmbeddedVTableLengthOffset()), len);
}
-inline ImTable* Class::GetImt(size_t pointer_size) {
+inline ImTable* Class::GetImt(PointerSize pointer_size) {
return GetFieldPtrWithSize<ImTable*>(MemberOffset(ImtPtrOffset(pointer_size)), pointer_size);
}
-inline void Class::SetImt(ImTable* imt, size_t pointer_size) {
+inline void Class::SetImt(ImTable* imt, PointerSize pointer_size) {
return SetFieldPtrWithSize<false>(MemberOffset(ImtPtrOffset(pointer_size)), imt, pointer_size);
}
-inline MemberOffset Class::EmbeddedVTableEntryOffset(uint32_t i, size_t pointer_size) {
+inline MemberOffset Class::EmbeddedVTableEntryOffset(uint32_t i, PointerSize pointer_size) {
return MemberOffset(
EmbeddedVTableOffset(pointer_size).Uint32Value() + i * VTableEntrySize(pointer_size));
}
-inline ArtMethod* Class::GetEmbeddedVTableEntry(uint32_t i, size_t pointer_size) {
+inline ArtMethod* Class::GetEmbeddedVTableEntry(uint32_t i, PointerSize pointer_size) {
return GetFieldPtrWithSize<ArtMethod*>(EmbeddedVTableEntryOffset(i, pointer_size), pointer_size);
}
inline void Class::SetEmbeddedVTableEntryUnchecked(
- uint32_t i, ArtMethod* method, size_t pointer_size) {
+ uint32_t i, ArtMethod* method, PointerSize pointer_size) {
SetFieldPtrWithSize<false>(EmbeddedVTableEntryOffset(i, pointer_size), method, pointer_size);
}
-inline void Class::SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size) {
+inline void Class::SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method, PointerSize pointer_size) {
auto* vtable = GetVTableDuringLinking();
CHECK_EQ(method, vtable->GetElementPtrSize<ArtMethod*>(i, pointer_size));
SetEmbeddedVTableEntryUnchecked(i, method, pointer_size);
@@ -453,7 +449,8 @@
return false;
}
-inline ArtMethod* Class::FindVirtualMethodForInterface(ArtMethod* method, size_t pointer_size) {
+inline ArtMethod* Class::FindVirtualMethodForInterface(ArtMethod* method,
+ PointerSize pointer_size) {
Class* declaring_class = method->GetDeclaringClass();
DCHECK(declaring_class != nullptr) << PrettyClass(this);
DCHECK(declaring_class->IsInterface()) << PrettyMethod(method);
@@ -470,7 +467,7 @@
return nullptr;
}
-inline ArtMethod* Class::FindVirtualMethodForVirtual(ArtMethod* method, size_t pointer_size) {
+inline ArtMethod* Class::FindVirtualMethodForVirtual(ArtMethod* method, PointerSize pointer_size) {
// Only miranda or default methods may come from interfaces and be used as a virtual.
DCHECK(!method->GetDeclaringClass()->IsInterface() || method->IsDefault() || method->IsMiranda());
// The argument method may from a super class.
@@ -478,13 +475,13 @@
return GetVTableEntry(method->GetMethodIndex(), pointer_size);
}
-inline ArtMethod* Class::FindVirtualMethodForSuper(ArtMethod* method, size_t pointer_size) {
+inline ArtMethod* Class::FindVirtualMethodForSuper(ArtMethod* method, PointerSize pointer_size) {
DCHECK(!method->GetDeclaringClass()->IsInterface());
return GetSuperClass()->GetVTableEntry(method->GetMethodIndex(), pointer_size);
}
inline ArtMethod* Class::FindVirtualMethodForVirtualOrInterface(ArtMethod* method,
- size_t pointer_size) {
+ PointerSize pointer_size) {
if (method->IsDirect()) {
return method;
}
@@ -528,7 +525,7 @@
}
template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
-inline MemberOffset Class::GetFirstReferenceStaticFieldOffset(size_t pointer_size) {
+inline MemberOffset Class::GetFirstReferenceStaticFieldOffset(PointerSize pointer_size) {
DCHECK(IsResolved());
uint32_t base = sizeof(mirror::Class); // Static fields come after the class.
if (ShouldHaveEmbeddedVTable<kVerifyFlags, kReadBarrierOption>()) {
@@ -539,7 +536,8 @@
return MemberOffset(base);
}
-inline MemberOffset Class::GetFirstReferenceStaticFieldOffsetDuringLinking(size_t pointer_size) {
+inline MemberOffset Class::GetFirstReferenceStaticFieldOffsetDuringLinking(
+ PointerSize pointer_size) {
DCHECK(IsLoaded());
uint32_t base = sizeof(mirror::Class); // Static fields come after the class.
if (ShouldHaveEmbeddedVTable()) {
@@ -708,13 +706,13 @@
uint32_t num_32bit_static_fields,
uint32_t num_64bit_static_fields,
uint32_t num_ref_static_fields,
- size_t pointer_size) {
+ PointerSize pointer_size) {
// Space used by java.lang.Class and its instance fields.
uint32_t size = sizeof(Class);
// Space used by embedded tables.
if (has_embedded_vtable) {
- size = RoundUp(size + sizeof(uint32_t), pointer_size);
- size += pointer_size; // size of pointer to IMT
+ size = RoundUp(size + sizeof(uint32_t), static_cast<size_t>(pointer_size));
+ size += static_cast<size_t>(pointer_size); // size of pointer to IMT
size += num_vtable_entries * VTableEntrySize(pointer_size);
}
@@ -908,7 +906,7 @@
}
template<ReadBarrierOption kReadBarrierOption, class Visitor>
-void mirror::Class::VisitNativeRoots(Visitor& visitor, size_t pointer_size) {
+void mirror::Class::VisitNativeRoots(Visitor& visitor, PointerSize pointer_size) {
for (ArtField& field : GetSFieldsUnchecked()) {
// Visit roots first in case the declaring class gets moved.
field.VisitRoots(visitor);
@@ -928,35 +926,34 @@
}
}
-inline IterationRange<StrideIterator<ArtMethod>> Class::GetDirectMethods(size_t pointer_size) {
+inline IterationRange<StrideIterator<ArtMethod>> Class::GetDirectMethods(PointerSize pointer_size) {
CheckPointerSize(pointer_size);
return GetDirectMethodsSliceUnchecked(pointer_size).AsRange();
}
inline IterationRange<StrideIterator<ArtMethod>> Class::GetDeclaredMethods(
- size_t pointer_size) {
- CheckPointerSize(pointer_size);
+ PointerSize pointer_size) {
return GetDeclaredMethodsSliceUnchecked(pointer_size).AsRange();
}
inline IterationRange<StrideIterator<ArtMethod>> Class::GetDeclaredVirtualMethods(
- size_t pointer_size) {
- CheckPointerSize(pointer_size);
+ PointerSize pointer_size) {
return GetDeclaredVirtualMethodsSliceUnchecked(pointer_size).AsRange();
}
-inline IterationRange<StrideIterator<ArtMethod>> Class::GetVirtualMethods(size_t pointer_size) {
+inline IterationRange<StrideIterator<ArtMethod>> Class::GetVirtualMethods(
+ PointerSize pointer_size) {
CheckPointerSize(pointer_size);
return GetVirtualMethodsSliceUnchecked(pointer_size).AsRange();
}
-inline IterationRange<StrideIterator<ArtMethod>> Class::GetCopiedMethods(size_t pointer_size) {
+inline IterationRange<StrideIterator<ArtMethod>> Class::GetCopiedMethods(PointerSize pointer_size) {
CheckPointerSize(pointer_size);
return GetCopiedMethodsSliceUnchecked(pointer_size).AsRange();
}
-inline IterationRange<StrideIterator<ArtMethod>> Class::GetMethods(size_t pointer_size) {
+inline IterationRange<StrideIterator<ArtMethod>> Class::GetMethods(PointerSize pointer_size) {
CheckPointerSize(pointer_size);
return MakeIterationRangeFromLengthPrefixedArray(GetMethodsPtr(),
ArtMethod::Size(pointer_size),
@@ -979,13 +976,12 @@
return MakeIterationRangeFromLengthPrefixedArray(GetSFieldsPtrUnchecked());
}
-inline MemberOffset Class::EmbeddedVTableOffset(size_t pointer_size) {
+inline MemberOffset Class::EmbeddedVTableOffset(PointerSize pointer_size) {
CheckPointerSize(pointer_size);
- return MemberOffset(ImtPtrOffset(pointer_size).Uint32Value() + pointer_size);
+ return MemberOffset(ImtPtrOffset(pointer_size).Uint32Value() + static_cast<size_t>(pointer_size));
}
-inline void Class::CheckPointerSize(size_t pointer_size) {
- DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
+inline void Class::CheckPointerSize(PointerSize pointer_size) {
DCHECK_EQ(pointer_size, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
}
@@ -1040,7 +1036,7 @@
template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption, typename Visitor>
inline void Class::FixupNativePointers(mirror::Class* dest,
- size_t pointer_size,
+ PointerSize pointer_size,
const Visitor& visitor) {
// Update the field arrays.
LengthPrefixedArray<ArtField>* const sfields = GetSFieldsPtr();
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 375cb2f..f948be7 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -334,8 +334,9 @@
}
}
-ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const StringPiece& signature,
- size_t pointer_size) {
+ArtMethod* Class::FindInterfaceMethod(const StringPiece& name,
+ const StringPiece& signature,
+ PointerSize pointer_size) {
// Check the current class before checking the interfaces.
ArtMethod* method = FindDeclaredVirtualMethod(name, signature, pointer_size);
if (method != nullptr) {
@@ -353,8 +354,9 @@
return nullptr;
}
-ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const Signature& signature,
- size_t pointer_size) {
+ArtMethod* Class::FindInterfaceMethod(const StringPiece& name,
+ const Signature& signature,
+ PointerSize pointer_size) {
// Check the current class before checking the interfaces.
ArtMethod* method = FindDeclaredVirtualMethod(name, signature, pointer_size);
if (method != nullptr) {
@@ -372,8 +374,9 @@
return nullptr;
}
-ArtMethod* Class::FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
- size_t pointer_size) {
+ArtMethod* Class::FindInterfaceMethod(const DexCache* dex_cache,
+ uint32_t dex_method_idx,
+ PointerSize pointer_size) {
// Check the current class before checking the interfaces.
ArtMethod* method = FindDeclaredVirtualMethod(dex_cache, dex_method_idx, pointer_size);
if (method != nullptr) {
@@ -392,8 +395,9 @@
return nullptr;
}
-ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature,
- size_t pointer_size) {
+ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name,
+ const StringPiece& signature,
+ PointerSize pointer_size) {
for (auto& method : GetDirectMethods(pointer_size)) {
if (name == method.GetName() && method.GetSignature() == signature) {
return &method;
@@ -402,8 +406,9 @@
return nullptr;
}
-ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature,
- size_t pointer_size) {
+ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name,
+ const Signature& signature,
+ PointerSize pointer_size) {
for (auto& method : GetDirectMethods(pointer_size)) {
if (name == method.GetName() && signature == method.GetSignature()) {
return &method;
@@ -412,8 +417,9 @@
return nullptr;
}
-ArtMethod* Class::FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
- size_t pointer_size) {
+ArtMethod* Class::FindDeclaredDirectMethod(const DexCache* dex_cache,
+ uint32_t dex_method_idx,
+ PointerSize pointer_size) {
if (GetDexCache() == dex_cache) {
for (auto& method : GetDirectMethods(pointer_size)) {
if (method.GetDexMethodIndex() == dex_method_idx) {
@@ -424,8 +430,9 @@
return nullptr;
}
-ArtMethod* Class::FindDirectMethod(const StringPiece& name, const StringPiece& signature,
- size_t pointer_size) {
+ArtMethod* Class::FindDirectMethod(const StringPiece& name,
+ const StringPiece& signature,
+ PointerSize pointer_size) {
for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredDirectMethod(name, signature, pointer_size);
if (method != nullptr) {
@@ -435,8 +442,9 @@
return nullptr;
}
-ArtMethod* Class::FindDirectMethod(const StringPiece& name, const Signature& signature,
- size_t pointer_size) {
+ArtMethod* Class::FindDirectMethod(const StringPiece& name,
+ const Signature& signature,
+ PointerSize pointer_size) {
for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredDirectMethod(name, signature, pointer_size);
if (method != nullptr) {
@@ -447,7 +455,7 @@
}
ArtMethod* Class::FindDirectMethod(
- const DexCache* dex_cache, uint32_t dex_method_idx, size_t pointer_size) {
+ const DexCache* dex_cache, uint32_t dex_method_idx, PointerSize pointer_size) {
for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredDirectMethod(dex_cache, dex_method_idx, pointer_size);
if (method != nullptr) {
@@ -457,7 +465,8 @@
return nullptr;
}
-ArtMethod* Class::FindDeclaredDirectMethodByName(const StringPiece& name, size_t pointer_size) {
+ArtMethod* Class::FindDeclaredDirectMethodByName(const StringPiece& name,
+ PointerSize pointer_size) {
for (auto& method : GetDirectMethods(pointer_size)) {
ArtMethod* const np_method = method.GetInterfaceMethodIfProxy(pointer_size);
if (name == np_method->GetName()) {
@@ -471,8 +480,9 @@
// because they do not only find 'declared' methods and will return copied methods. This behavior is
// desired and correct but the naming can lead to confusion because in the java language declared
// excludes interface methods which might be found by this.
-ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature,
- size_t pointer_size) {
+ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name,
+ const StringPiece& signature,
+ PointerSize pointer_size) {
for (auto& method : GetVirtualMethods(pointer_size)) {
ArtMethod* const np_method = method.GetInterfaceMethodIfProxy(pointer_size);
if (name == np_method->GetName() && np_method->GetSignature() == signature) {
@@ -482,8 +492,9 @@
return nullptr;
}
-ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature,
- size_t pointer_size) {
+ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name,
+ const Signature& signature,
+ PointerSize pointer_size) {
for (auto& method : GetVirtualMethods(pointer_size)) {
ArtMethod* const np_method = method.GetInterfaceMethodIfProxy(pointer_size);
if (name == np_method->GetName() && signature == np_method->GetSignature()) {
@@ -493,8 +504,9 @@
return nullptr;
}
-ArtMethod* Class::FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
- size_t pointer_size) {
+ArtMethod* Class::FindDeclaredVirtualMethod(const DexCache* dex_cache,
+ uint32_t dex_method_idx,
+ PointerSize pointer_size) {
if (GetDexCache() == dex_cache) {
for (auto& method : GetDeclaredVirtualMethods(pointer_size)) {
if (method.GetDexMethodIndex() == dex_method_idx) {
@@ -505,7 +517,8 @@
return nullptr;
}
-ArtMethod* Class::FindDeclaredVirtualMethodByName(const StringPiece& name, size_t pointer_size) {
+ArtMethod* Class::FindDeclaredVirtualMethodByName(const StringPiece& name,
+ PointerSize pointer_size) {
for (auto& method : GetVirtualMethods(pointer_size)) {
ArtMethod* const np_method = method.GetInterfaceMethodIfProxy(pointer_size);
if (name == np_method->GetName()) {
@@ -516,7 +529,7 @@
}
ArtMethod* Class::FindVirtualMethod(
- const StringPiece& name, const StringPiece& signature, size_t pointer_size) {
+ const StringPiece& name, const StringPiece& signature, PointerSize pointer_size) {
for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredVirtualMethod(name, signature, pointer_size);
if (method != nullptr) {
@@ -527,7 +540,7 @@
}
ArtMethod* Class::FindVirtualMethod(
- const StringPiece& name, const Signature& signature, size_t pointer_size) {
+ const StringPiece& name, const Signature& signature, PointerSize pointer_size) {
for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredVirtualMethod(name, signature, pointer_size);
if (method != nullptr) {
@@ -538,7 +551,7 @@
}
ArtMethod* Class::FindVirtualMethod(
- const DexCache* dex_cache, uint32_t dex_method_idx, size_t pointer_size) {
+ const DexCache* dex_cache, uint32_t dex_method_idx, PointerSize pointer_size) {
for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredVirtualMethod(dex_cache, dex_method_idx, pointer_size);
if (method != nullptr) {
@@ -548,7 +561,7 @@
return nullptr;
}
-ArtMethod* Class::FindVirtualMethodForInterfaceSuper(ArtMethod* method, size_t pointer_size) {
+ArtMethod* Class::FindVirtualMethodForInterfaceSuper(ArtMethod* method, PointerSize pointer_size) {
DCHECK(method->GetDeclaringClass()->IsInterface());
DCHECK(IsInterface()) << "Should only be called on a interface class";
// Check if we have one defined on this interface first. This includes searching copied ones to
@@ -613,7 +626,7 @@
return abstract_methods.empty() ? nullptr : abstract_methods[0];
}
-ArtMethod* Class::FindClassInitializer(size_t pointer_size) {
+ArtMethod* Class::FindClassInitializer(PointerSize pointer_size) {
for (ArtMethod& method : GetDirectMethods(pointer_size)) {
if (method.IsClassInitializer()) {
DCHECK_STREQ(method.GetName(), "<clinit>");
@@ -803,7 +816,7 @@
return nullptr;
}
-void Class::SetSkipAccessChecksFlagOnAllMethods(size_t pointer_size) {
+void Class::SetSkipAccessChecksFlagOnAllMethods(PointerSize pointer_size) {
DCHECK(IsVerified());
for (auto& m : GetMethods(pointer_size)) {
if (!m.IsNative() && m.IsInvokable()) {
@@ -917,7 +930,7 @@
return GetDexFile().GetInterfacesList(*class_def);
}
-void Class::PopulateEmbeddedVTable(size_t pointer_size) {
+void Class::PopulateEmbeddedVTable(PointerSize pointer_size) {
PointerArray* table = GetVTableDuringLinking();
CHECK(table != nullptr) << PrettyClass(this);
const size_t table_length = table->GetLength();
@@ -963,9 +976,12 @@
// The pre-fence visitor for Class::CopyOf().
class CopyClassVisitor {
public:
- CopyClassVisitor(Thread* self, Handle<mirror::Class>* orig, size_t new_length,
- size_t copy_bytes, ImTable* imt,
- size_t pointer_size)
+ CopyClassVisitor(Thread* self,
+ Handle<mirror::Class>* orig,
+ size_t new_length,
+ size_t copy_bytes,
+ ImTable* imt,
+ PointerSize pointer_size)
: self_(self), orig_(orig), new_length_(new_length),
copy_bytes_(copy_bytes), imt_(imt), pointer_size_(pointer_size) {
}
@@ -991,12 +1007,11 @@
const size_t new_length_;
const size_t copy_bytes_;
ImTable* imt_;
- const size_t pointer_size_;
+ const PointerSize pointer_size_;
DISALLOW_COPY_AND_ASSIGN(CopyClassVisitor);
};
-Class* Class::CopyOf(Thread* self, int32_t new_length,
- ImTable* imt, size_t pointer_size) {
+Class* Class::CopyOf(Thread* self, int32_t new_length, ImTable* imt, PointerSize pointer_size) {
DCHECK_GE(new_length, static_cast<int32_t>(sizeof(Class)));
// We may get copied by a compacting GC.
StackHandleScope<1> hs(self);
@@ -1022,14 +1037,14 @@
// TODO: Move this to java_lang_Class.cc?
ArtMethod* Class::GetDeclaredConstructor(
- Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args, size_t pointer_size) {
+ Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args, PointerSize pointer_size) {
for (auto& m : GetDirectMethods(pointer_size)) {
// Skip <clinit> which is a static constructor, as well as non constructors.
if (m.IsStatic() || !m.IsConstructor()) {
continue;
}
// May cause thread suspension and exceptions.
- if (m.GetInterfaceMethodIfProxy(sizeof(void*))->EqualParameters(args)) {
+ if (m.GetInterfaceMethodIfProxy(kRuntimePointerSize)->EqualParameters(args)) {
return &m;
}
if (UNLIKELY(self->IsExceptionPending())) {
@@ -1053,7 +1068,7 @@
return (type_id == nullptr) ? DexFile::kDexNoIndex : dex_file.GetIndexForTypeId(*type_id);
}
-template <size_t kPointerSize, bool kTransactionActive>
+template <PointerSize kPointerSize, bool kTransactionActive>
mirror::Method* Class::GetDeclaredMethodInternal(Thread* self,
mirror::Class* klass,
mirror::String* name,
@@ -1124,31 +1139,31 @@
}
template
-mirror::Method* Class::GetDeclaredMethodInternal<4U, false>(
+mirror::Method* Class::GetDeclaredMethodInternal<PointerSize::k32, false>(
Thread* self,
mirror::Class* klass,
mirror::String* name,
mirror::ObjectArray<mirror::Class>* args);
template
-mirror::Method* Class::GetDeclaredMethodInternal<4U, true>(
+mirror::Method* Class::GetDeclaredMethodInternal<PointerSize::k32, true>(
Thread* self,
mirror::Class* klass,
mirror::String* name,
mirror::ObjectArray<mirror::Class>* args);
template
-mirror::Method* Class::GetDeclaredMethodInternal<8U, false>(
+mirror::Method* Class::GetDeclaredMethodInternal<PointerSize::k64, false>(
Thread* self,
mirror::Class* klass,
mirror::String* name,
mirror::ObjectArray<mirror::Class>* args);
template
-mirror::Method* Class::GetDeclaredMethodInternal<8U, true>(
+mirror::Method* Class::GetDeclaredMethodInternal<PointerSize::k64, true>(
Thread* self,
mirror::Class* klass,
mirror::String* name,
mirror::ObjectArray<mirror::Class>* args);
-template <size_t kPointerSize, bool kTransactionActive>
+template <PointerSize kPointerSize, bool kTransactionActive>
mirror::Constructor* Class::GetDeclaredConstructorInternal(
Thread* self,
mirror::Class* klass,
@@ -1162,19 +1177,23 @@
// mirror::Constructor::CreateFromArtMethod<kTransactionActive>(self, result)
-template mirror::Constructor* Class::GetDeclaredConstructorInternal<4U, false>(
+template
+mirror::Constructor* Class::GetDeclaredConstructorInternal<PointerSize::k32, false>(
Thread* self,
mirror::Class* klass,
mirror::ObjectArray<mirror::Class>* args);
-template mirror::Constructor* Class::GetDeclaredConstructorInternal<4U, true>(
+template
+mirror::Constructor* Class::GetDeclaredConstructorInternal<PointerSize::k32, true>(
Thread* self,
mirror::Class* klass,
mirror::ObjectArray<mirror::Class>* args);
-template mirror::Constructor* Class::GetDeclaredConstructorInternal<8U, false>(
+template
+mirror::Constructor* Class::GetDeclaredConstructorInternal<PointerSize::k64, false>(
Thread* self,
mirror::Class* klass,
mirror::ObjectArray<mirror::Class>* args);
-template mirror::Constructor* Class::GetDeclaredConstructorInternal<8U, true>(
+template
+mirror::Constructor* Class::GetDeclaredConstructorInternal<PointerSize::k64, true>(
Thread* self,
mirror::Class* klass,
mirror::ObjectArray<mirror::Class>* args);
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 3ba9e1a..5c490de 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_MIRROR_CLASS_H_
#define ART_RUNTIME_MIRROR_CLASS_H_
+#include "base/enums.h"
#include "base/iteration_range.h"
#include "dex_file.h"
#include "class_flags.h"
@@ -556,17 +557,17 @@
uint32_t num_32bit_static_fields,
uint32_t num_64bit_static_fields,
uint32_t num_ref_static_fields,
- size_t pointer_size);
+ PointerSize pointer_size);
// The size of java.lang.Class.class.
- static uint32_t ClassClassSize(size_t pointer_size) {
+ static uint32_t ClassClassSize(PointerSize pointer_size) {
// The number of vtable entries in java.lang.Class.
uint32_t vtable_entries = Object::kVTableLength + 72;
return ComputeClassSize(true, vtable_entries, 0, 0, 4, 1, 0, pointer_size);
}
// The size of a java.lang.Class representing a primitive such as int.class.
- static uint32_t PrimitiveClassSize(size_t pointer_size) {
+ static uint32_t PrimitiveClassSize(PointerSize pointer_size) {
return ComputeClassSize(false, 0, 0, 0, 0, 0, 0, pointer_size);
}
@@ -703,7 +704,7 @@
// Also updates the dex_cache_strings_ variable from new_dex_cache.
void SetDexCache(DexCache* new_dex_cache) SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDirectMethods(size_t pointer_size)
+ ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDirectMethods(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE LengthPrefixedArray<ArtMethod>* GetMethodsPtr()
@@ -713,7 +714,7 @@
return MemberOffset(OFFSETOF_MEMBER(Class, methods_));
}
- ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetMethods(size_t pointer_size)
+ ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetMethods(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
void SetMethodsPtr(LengthPrefixedArray<ArtMethod>* new_methods,
@@ -727,65 +728,66 @@
SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE ArraySlice<ArtMethod> GetDirectMethodsSlice(size_t pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetDirectMethodsSlice(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE ArtMethod* GetDirectMethod(size_t i, size_t pointer_size)
+ ALWAYS_INLINE ArtMethod* GetDirectMethod(size_t i, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Use only when we are allocating populating the method arrays.
- ALWAYS_INLINE ArtMethod* GetDirectMethodUnchecked(size_t i, size_t pointer_size)
+ ALWAYS_INLINE ArtMethod* GetDirectMethodUnchecked(size_t i, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE ArtMethod* GetVirtualMethodUnchecked(size_t i, size_t pointer_size)
+ ALWAYS_INLINE ArtMethod* GetVirtualMethodUnchecked(size_t i, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the number of static, private, and constructor methods.
ALWAYS_INLINE uint32_t NumDirectMethods() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE ArraySlice<ArtMethod> GetMethodsSlice(size_t pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetMethodsSlice(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredMethodsSlice(size_t pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredMethodsSlice(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDeclaredMethods(
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- template <size_t kPointerSize, bool kTransactionActive>
+ template <PointerSize kPointerSize, bool kTransactionActive>
static Method* GetDeclaredMethodInternal(Thread* self,
mirror::Class* klass,
mirror::String* name,
mirror::ObjectArray<mirror::Class>* args)
SHARED_REQUIRES(Locks::mutator_lock_);
- template <size_t kPointerSize, bool kTransactionActive>
+ template <PointerSize kPointerSize, bool kTransactionActive>
static Constructor* GetDeclaredConstructorInternal(Thread* self,
mirror::Class* klass,
mirror::ObjectArray<mirror::Class>* args)
SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredVirtualMethodsSlice(size_t pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredVirtualMethodsSlice(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDeclaredVirtualMethods(
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE ArraySlice<ArtMethod> GetCopiedMethodsSlice(size_t pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetCopiedMethodsSlice(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetCopiedMethods(size_t pointer_size)
+ ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetCopiedMethods(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE ArraySlice<ArtMethod> GetVirtualMethodsSlice(size_t pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetVirtualMethodsSlice(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetVirtualMethods(size_t pointer_size)
+ ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetVirtualMethods(
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the number of non-inherited virtual methods (sum of declared and copied methods).
@@ -800,10 +802,10 @@
ALWAYS_INLINE uint32_t NumMethods() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ArtMethod* GetVirtualMethod(size_t i, size_t pointer_size)
+ ArtMethod* GetVirtualMethod(size_t i, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* GetVirtualMethodDuringLinking(size_t i, size_t pointer_size)
+ ArtMethod* GetVirtualMethodDuringLinking(size_t i, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -822,9 +824,10 @@
return MemberOffset(sizeof(Class));
}
- static MemberOffset ImtPtrOffset(size_t pointer_size) {
+ static MemberOffset ImtPtrOffset(PointerSize pointer_size) {
return MemberOffset(
- RoundUp(EmbeddedVTableLengthOffset().Uint32Value() + sizeof(uint32_t), pointer_size));
+ RoundUp(EmbeddedVTableLengthOffset().Uint32Value() + sizeof(uint32_t),
+ static_cast<size_t>(pointer_size)));
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -841,124 +844,126 @@
bool HasVTable() SHARED_REQUIRES(Locks::mutator_lock_);
- static MemberOffset EmbeddedVTableEntryOffset(uint32_t i, size_t pointer_size);
+ static MemberOffset EmbeddedVTableEntryOffset(uint32_t i, PointerSize pointer_size);
int32_t GetVTableLength() SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* GetVTableEntry(uint32_t i, size_t pointer_size)
+ ArtMethod* GetVTableEntry(uint32_t i, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
int32_t GetEmbeddedVTableLength() SHARED_REQUIRES(Locks::mutator_lock_);
void SetEmbeddedVTableLength(int32_t len) SHARED_REQUIRES(Locks::mutator_lock_);
- ImTable* GetImt(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ ImTable* GetImt(PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
- void SetImt(ImTable* imt, size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetImt(ImTable* imt, PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* GetEmbeddedVTableEntry(uint32_t i, size_t pointer_size)
+ ArtMethod* GetEmbeddedVTableEntry(uint32_t i, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- void SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size)
+ void SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- inline void SetEmbeddedVTableEntryUnchecked(uint32_t i, ArtMethod* method, size_t pointer_size)
+ inline void SetEmbeddedVTableEntryUnchecked(uint32_t i,
+ ArtMethod* method,
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- void PopulateEmbeddedVTable(size_t pointer_size)
+ void PopulateEmbeddedVTable(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Given a method implemented by this class but potentially from a super class, return the
// specific implementation method for this class.
- ArtMethod* FindVirtualMethodForVirtual(ArtMethod* method, size_t pointer_size)
+ ArtMethod* FindVirtualMethodForVirtual(ArtMethod* method, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Given a method implemented by this class' super class, return the specific implementation
// method for this class.
- ArtMethod* FindVirtualMethodForSuper(ArtMethod* method, size_t pointer_size)
+ ArtMethod* FindVirtualMethodForSuper(ArtMethod* method, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Given a method from some implementor of this interface, return the specific implementation
// method for this class.
- ArtMethod* FindVirtualMethodForInterfaceSuper(ArtMethod* method, size_t pointer_size)
+ ArtMethod* FindVirtualMethodForInterfaceSuper(ArtMethod* method, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Given a method implemented by this class, but potentially from a
// super class or interface, return the specific implementation
// method for this class.
- ArtMethod* FindVirtualMethodForInterface(ArtMethod* method, size_t pointer_size)
+ ArtMethod* FindVirtualMethodForInterface(ArtMethod* method, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE;
- ArtMethod* FindVirtualMethodForVirtualOrInterface(ArtMethod* method, size_t pointer_size)
+ ArtMethod* FindVirtualMethodForVirtualOrInterface(ArtMethod* method, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindInterfaceMethod(const StringPiece& name, const StringPiece& signature,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindInterfaceMethod(const StringPiece& name, const Signature& signature,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDirectMethod(const StringPiece& name, const StringPiece& signature,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDirectMethod(const StringPiece& name, const Signature& signature,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* FindDeclaredVirtualMethodByName(const StringPiece& name, size_t pointer_size)
+ ArtMethod* FindDeclaredVirtualMethodByName(const StringPiece& name, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* FindDeclaredDirectMethodByName(const StringPiece& name, size_t pointer_size)
+ ArtMethod* FindDeclaredDirectMethodByName(const StringPiece& name, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindVirtualMethod(const StringPiece& name, const StringPiece& signature,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindVirtualMethod(const StringPiece& name, const Signature& signature,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* FindClassInitializer(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod* FindClassInitializer(PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
bool HasDefaultMethods() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccHasDefaultMethod) != 0;
@@ -1040,11 +1045,11 @@
// Get the offset of the first reference static field. Other reference static fields follow.
template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- MemberOffset GetFirstReferenceStaticFieldOffset(size_t pointer_size)
+ MemberOffset GetFirstReferenceStaticFieldOffset(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Get the offset of the first reference static field. Other reference static fields follow.
- MemberOffset GetFirstReferenceStaticFieldOffsetDuringLinking(size_t pointer_size)
+ MemberOffset GetFirstReferenceStaticFieldOffsetDuringLinking(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Gets the static fields of the class.
@@ -1154,11 +1159,11 @@
// Visit native roots visits roots which are keyed off the native pointers such as ArtFields and
// ArtMethods.
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, class Visitor>
- void VisitNativeRoots(Visitor& visitor, size_t pointer_size)
+ void VisitNativeRoots(Visitor& visitor, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// When class is verified, set the kAccSkipAccessChecks flag on each method.
- void SetSkipAccessChecksFlagOnAllMethods(size_t pointer_size)
+ void SetSkipAccessChecksFlagOnAllMethods(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Get the descriptor of the class. In a few cases a std::string is required, rather than
@@ -1193,7 +1198,7 @@
SHARED_REQUIRES(Locks::mutator_lock_);
Class* CopyOf(Thread* self, int32_t new_length, ImTable* imt,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// For proxy class only.
@@ -1217,7 +1222,7 @@
// May cause thread suspension due to EqualParameters.
ArtMethod* GetDeclaredConstructor(
- Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args, size_t pointer_size)
+ Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
static int32_t GetInnerClassFlags(Handle<Class> h_this, int32_t default_value)
@@ -1244,27 +1249,28 @@
return GetClassLoader() == nullptr;
}
- static size_t ImTableEntrySize(size_t pointer_size) {
- return pointer_size;
+ static size_t ImTableEntrySize(PointerSize pointer_size) {
+ return static_cast<size_t>(pointer_size);
}
- static size_t VTableEntrySize(size_t pointer_size) {
- return pointer_size;
+ static size_t VTableEntrySize(PointerSize pointer_size) {
+ return static_cast<size_t>(pointer_size);
}
- ALWAYS_INLINE ArraySlice<ArtMethod> GetDirectMethodsSliceUnchecked(size_t pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetDirectMethodsSliceUnchecked(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE ArraySlice<ArtMethod> GetVirtualMethodsSliceUnchecked(size_t pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetVirtualMethodsSliceUnchecked(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredMethodsSliceUnchecked(size_t pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredMethodsSliceUnchecked(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredVirtualMethodsSliceUnchecked(size_t pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredVirtualMethodsSliceUnchecked(
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE ArraySlice<ArtMethod> GetCopiedMethodsSliceUnchecked(size_t pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetCopiedMethodsSliceUnchecked(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Fix up all of the native pointers in the class by running them through the visitor. Only sets
@@ -1274,7 +1280,7 @@
template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
- void FixupNativePointers(mirror::Class* dest, size_t pointer_size, const Visitor& visitor)
+ void FixupNativePointers(mirror::Class* dest, PointerSize pointer_size, const Visitor& visitor)
SHARED_REQUIRES(Locks::mutator_lock_);
private:
@@ -1318,8 +1324,9 @@
bool ProxyDescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_);
// Check that the pointer size matches the one in the class linker.
- ALWAYS_INLINE static void CheckPointerSize(size_t pointer_size);
- static MemberOffset EmbeddedVTableOffset(size_t pointer_size);
+ ALWAYS_INLINE static void CheckPointerSize(PointerSize pointer_size);
+
+ static MemberOffset EmbeddedVTableOffset(PointerSize pointer_size);
template <bool kVisitNativeRoots,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 0b3461f..84469ea 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -22,6 +22,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/casts.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "mirror/class.h"
#include "runtime.h"
@@ -29,7 +30,7 @@
namespace art {
namespace mirror {
-inline uint32_t DexCache::ClassSize(size_t pointer_size) {
+inline uint32_t DexCache::ClassSize(PointerSize pointer_size) {
uint32_t vtable_entries = Object::kVTableLength + 5;
return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size);
}
@@ -60,7 +61,7 @@
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this);
}
-inline ArtField* DexCache::GetResolvedField(uint32_t field_idx, size_t ptr_size) {
+inline ArtField* DexCache::GetResolvedField(uint32_t field_idx, PointerSize ptr_size) {
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size);
DCHECK_LT(field_idx, NumResolvedFields()); // NOTE: Unchecked, i.e. not throwing AIOOB.
ArtField* field = GetElementPtrSize(GetResolvedFields(), field_idx, ptr_size);
@@ -70,13 +71,13 @@
return field;
}
-inline void DexCache::SetResolvedField(uint32_t field_idx, ArtField* field, size_t ptr_size) {
+inline void DexCache::SetResolvedField(uint32_t field_idx, ArtField* field, PointerSize ptr_size) {
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size);
DCHECK_LT(field_idx, NumResolvedFields()); // NOTE: Unchecked, i.e. not throwing AIOOB.
SetElementPtrSize(GetResolvedFields(), field_idx, field, ptr_size);
}
-inline ArtMethod* DexCache::GetResolvedMethod(uint32_t method_idx, size_t ptr_size) {
+inline ArtMethod* DexCache::GetResolvedMethod(uint32_t method_idx, PointerSize ptr_size) {
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size);
DCHECK_LT(method_idx, NumResolvedMethods()); // NOTE: Unchecked, i.e. not throwing AIOOB.
ArtMethod* method = GetElementPtrSize<ArtMethod*>(GetResolvedMethods(), method_idx, ptr_size);
@@ -88,19 +89,20 @@
return method;
}
-inline void DexCache::SetResolvedMethod(uint32_t method_idx, ArtMethod* method, size_t ptr_size) {
+inline void DexCache::SetResolvedMethod(uint32_t method_idx,
+ ArtMethod* method,
+ PointerSize ptr_size) {
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size);
DCHECK_LT(method_idx, NumResolvedMethods()); // NOTE: Unchecked, i.e. not throwing AIOOB.
SetElementPtrSize(GetResolvedMethods(), method_idx, method, ptr_size);
}
template <typename PtrType>
-inline PtrType DexCache::GetElementPtrSize(PtrType* ptr_array, size_t idx, size_t ptr_size) {
- if (ptr_size == 8u) {
+inline PtrType DexCache::GetElementPtrSize(PtrType* ptr_array, size_t idx, PointerSize ptr_size) {
+ if (ptr_size == PointerSize::k64) {
uint64_t element = reinterpret_cast<const uint64_t*>(ptr_array)[idx];
return reinterpret_cast<PtrType>(dchecked_integral_cast<uintptr_t>(element));
} else {
- DCHECK_EQ(ptr_size, 4u);
uint32_t element = reinterpret_cast<const uint32_t*>(ptr_array)[idx];
return reinterpret_cast<PtrType>(dchecked_integral_cast<uintptr_t>(element));
}
@@ -110,12 +112,11 @@
inline void DexCache::SetElementPtrSize(PtrType* ptr_array,
size_t idx,
PtrType ptr,
- size_t ptr_size) {
- if (ptr_size == 8u) {
+ PointerSize ptr_size) {
+ if (ptr_size == PointerSize::k64) {
reinterpret_cast<uint64_t*>(ptr_array)[idx] =
dchecked_integral_cast<uint64_t>(reinterpret_cast<uintptr_t>(ptr));
} else {
- DCHECK_EQ(ptr_size, 4u);
reinterpret_cast<uint32_t*>(ptr_array)[idx] =
dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(ptr));
}
diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc
index 692c6cb..57066d8 100644
--- a/runtime/mirror/dex_cache.cc
+++ b/runtime/mirror/dex_cache.cc
@@ -41,7 +41,7 @@
uint32_t num_resolved_methods,
ArtField** resolved_fields,
uint32_t num_resolved_fields,
- size_t pointer_size) {
+ PointerSize pointer_size) {
CHECK(dex_file != nullptr);
CHECK(location != nullptr);
CHECK_EQ(num_strings != 0u, strings != nullptr);
@@ -67,7 +67,7 @@
}
}
-void DexCache::Fixup(ArtMethod* trampoline, size_t pointer_size) {
+void DexCache::Fixup(ArtMethod* trampoline, PointerSize pointer_size) {
// Fixup the resolve methods array to contain trampoline for resolution.
CHECK(trampoline != nullptr);
CHECK(trampoline->IsRuntimeMethod());
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 7912510..d02a0d8 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -39,7 +39,7 @@
class MANAGED DexCache FINAL : public Object {
public:
// Size of java.lang.DexCache.class.
- static uint32_t ClassSize(size_t pointer_size);
+ static uint32_t ClassSize(PointerSize pointer_size);
// Size of an instance of java.lang.DexCache not including referenced values.
static constexpr uint32_t InstanceSize() {
@@ -56,9 +56,9 @@
uint32_t num_resolved_methods,
ArtField** resolved_fields,
uint32_t num_resolved_fields,
- size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
- void Fixup(ArtMethod* trampoline, size_t pointer_size)
+ void Fixup(ArtMethod* trampoline, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
@@ -119,18 +119,20 @@
void SetResolvedType(uint32_t type_idx, Class* resolved) SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, size_t ptr_size)
+ ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, PointerSize ptr_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE void SetResolvedMethod(uint32_t method_idx, ArtMethod* resolved, size_t ptr_size)
+ ALWAYS_INLINE void SetResolvedMethod(uint32_t method_idx,
+ ArtMethod* resolved,
+ PointerSize ptr_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Pointer sized variant, used for patching.
- ALWAYS_INLINE ArtField* GetResolvedField(uint32_t idx, size_t ptr_size)
+ ALWAYS_INLINE ArtField* GetResolvedField(uint32_t idx, PointerSize ptr_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Pointer sized variant, used for patching.
- ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field, size_t ptr_size)
+ ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field, PointerSize ptr_size)
SHARED_REQUIRES(Locks::mutator_lock_);
GcRoot<String>* GetStrings() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -202,10 +204,10 @@
// so they need to be public.
template <typename PtrType>
- static PtrType GetElementPtrSize(PtrType* ptr_array, size_t idx, size_t ptr_size);
+ static PtrType GetElementPtrSize(PtrType* ptr_array, size_t idx, PointerSize ptr_size);
template <typename PtrType>
- static void SetElementPtrSize(PtrType* ptr_array, size_t idx, PtrType ptr, size_t ptr_size);
+ static void SetElementPtrSize(PtrType* ptr_array, size_t idx, PtrType ptr, PointerSize ptr_size);
private:
// Visit instance fields of the dex cache as well as its associated arrays.
diff --git a/runtime/mirror/field-inl.h b/runtime/mirror/field-inl.h
index 4183476..8b0f8ce 100644
--- a/runtime/mirror/field-inl.h
+++ b/runtime/mirror/field-inl.h
@@ -27,9 +27,8 @@
namespace mirror {
-template <size_t kPointerSize, bool kTransactionActive>
-inline mirror::Field* Field::CreateFromArtField(Thread* self, ArtField* field,
- bool force_resolve) {
+template <PointerSize kPointerSize, bool kTransactionActive>
+inline mirror::Field* Field::CreateFromArtField(Thread* self, ArtField* field, bool force_resolve) {
StackHandleScope<2> hs(self);
// Try to resolve type before allocating since this is a thread suspension point.
Handle<mirror::Class> type = hs.NewHandle(field->GetType<true>());
diff --git a/runtime/mirror/field.cc b/runtime/mirror/field.cc
index ff6847c..65f6b16 100644
--- a/runtime/mirror/field.cc
+++ b/runtime/mirror/field.cc
@@ -68,7 +68,7 @@
}
}
mirror::DexCache* const dex_cache = declaring_class->GetDexCache();
- ArtField* const art_field = dex_cache->GetResolvedField(GetDexFieldIndex(), sizeof(void*));
+ ArtField* const art_field = dex_cache->GetResolvedField(GetDexFieldIndex(), kRuntimePointerSize);
CHECK(art_field != nullptr);
CHECK_EQ(declaring_class, art_field->GetDeclaringClass());
return art_field;
diff --git a/runtime/mirror/field.h b/runtime/mirror/field.h
index 2bd6132..93fd7f1 100644
--- a/runtime/mirror/field.h
+++ b/runtime/mirror/field.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_MIRROR_FIELD_H_
#include "accessible_object.h"
+#include "base/enums.h"
#include "gc_root.h"
#include "object.h"
#include "object_callbacks.h"
@@ -92,7 +93,7 @@
// Slow, try to use only for PrettyField and such.
ArtField* GetArtField() SHARED_REQUIRES(Locks::mutator_lock_);
- template <size_t kPointerSize, bool kTransactionActive = false>
+ template <PointerSize kPointerSize, bool kTransactionActive = false>
static mirror::Field* CreateFromArtField(Thread* self, ArtField* field,
bool force_resolve)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
diff --git a/runtime/mirror/method.cc b/runtime/mirror/method.cc
index 3cc70e1..ef16719 100644
--- a/runtime/mirror/method.cc
+++ b/runtime/mirror/method.cc
@@ -51,7 +51,7 @@
array_class_ = GcRoot<Class>(nullptr);
}
-template <size_t kPointerSize, bool kTransactionActive>
+template <PointerSize kPointerSize, bool kTransactionActive>
Method* Method::CreateFromArtMethod(Thread* self, ArtMethod* method) {
DCHECK(!method->IsConstructor()) << PrettyMethod(method);
auto* ret = down_cast<Method*>(StaticClass()->AllocObject(self));
@@ -62,10 +62,14 @@
return ret;
}
-template Method* Method::CreateFromArtMethod<4U, false>(Thread* self, ArtMethod* method);
-template Method* Method::CreateFromArtMethod<4U, true>(Thread* self, ArtMethod* method);
-template Method* Method::CreateFromArtMethod<8U, false>(Thread* self, ArtMethod* method);
-template Method* Method::CreateFromArtMethod<8U, true>(Thread* self, ArtMethod* method);
+template Method* Method::CreateFromArtMethod<PointerSize::k32, false>(Thread* self,
+ ArtMethod* method);
+template Method* Method::CreateFromArtMethod<PointerSize::k32, true>(Thread* self,
+ ArtMethod* method);
+template Method* Method::CreateFromArtMethod<PointerSize::k64, false>(Thread* self,
+ ArtMethod* method);
+template Method* Method::CreateFromArtMethod<PointerSize::k64, true>(Thread* self,
+ ArtMethod* method);
void Method::VisitRoots(RootVisitor* visitor) {
static_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
@@ -99,7 +103,7 @@
array_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
}
-template <size_t kPointerSize, bool kTransactionActive>
+template <PointerSize kPointerSize, bool kTransactionActive>
Constructor* Constructor::CreateFromArtMethod(Thread* self, ArtMethod* method) {
DCHECK(method->IsConstructor()) << PrettyMethod(method);
auto* ret = down_cast<Constructor*>(StaticClass()->AllocObject(self));
@@ -110,10 +114,14 @@
return ret;
}
-template Constructor* Constructor::CreateFromArtMethod<4U, false>(Thread* self, ArtMethod* method);
-template Constructor* Constructor::CreateFromArtMethod<4U, true>(Thread* self, ArtMethod* method);
-template Constructor* Constructor::CreateFromArtMethod<8U, false>(Thread* self, ArtMethod* method);
-template Constructor* Constructor::CreateFromArtMethod<8U, true>(Thread* self, ArtMethod* method);
+template Constructor* Constructor::CreateFromArtMethod<PointerSize::k32, false>(
+ Thread* self, ArtMethod* method);
+template Constructor* Constructor::CreateFromArtMethod<PointerSize::k32, true>(
+ Thread* self, ArtMethod* method);
+template Constructor* Constructor::CreateFromArtMethod<PointerSize::k64, false>(
+ Thread* self, ArtMethod* method);
+template Constructor* Constructor::CreateFromArtMethod<PointerSize::k64, true>(
+ Thread* self, ArtMethod* method);
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/method.h b/runtime/mirror/method.h
index ecd6a74..be51784 100644
--- a/runtime/mirror/method.h
+++ b/runtime/mirror/method.h
@@ -28,7 +28,7 @@
// C++ mirror of java.lang.reflect.Method.
class MANAGED Method : public AbstractMethod {
public:
- template <size_t kPointerSize, bool kTransactionActive>
+ template <PointerSize kPointerSize, bool kTransactionActive>
static Method* CreateFromArtMethod(Thread* self, ArtMethod* method)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
@@ -60,7 +60,7 @@
// C++ mirror of java.lang.reflect.Constructor.
class MANAGED Constructor: public AbstractMethod {
public:
- template <size_t kPointerSize, bool kTransactionActive>
+ template <PointerSize kPointerSize, bool kTransactionActive>
static Constructor* CreateFromArtMethod(Thread* self, ArtMethod* method)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index e1097fa..0592c6c 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -40,7 +40,7 @@
namespace art {
namespace mirror {
-inline uint32_t Object::ClassSize(size_t pointer_size) {
+inline uint32_t Object::ClassSize(PointerSize pointer_size) {
uint32_t vtable_entries = kVTableLength;
return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size);
}
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index e174cbc..a4bdbad 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_MIRROR_OBJECT_H_
#include "base/casts.h"
+#include "base/enums.h"
#include "globals.h"
#include "object_reference.h"
#include "offsets.h"
@@ -74,7 +75,7 @@
static constexpr size_t kVTableLength = 11;
// The size of the java.lang.Class representing a java.lang.Object.
- static uint32_t ClassSize(size_t pointer_size);
+ static uint32_t ClassSize(PointerSize pointer_size);
// Size of an instance of java.lang.Object.
static constexpr uint32_t InstanceSize() {
@@ -473,7 +474,7 @@
void SetFieldPtr(MemberOffset field_offset, T new_value)
SHARED_REQUIRES(Locks::mutator_lock_) {
SetFieldPtrWithSize<kTransactionActive, kCheckTransaction, kVerifyFlags>(
- field_offset, new_value, sizeof(void*));
+ field_offset, new_value, kRuntimePointerSize);
}
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
@@ -485,11 +486,11 @@
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
- ALWAYS_INLINE void SetFieldPtrWithSize(MemberOffset field_offset, T new_value,
- size_t pointer_size)
+ ALWAYS_INLINE void SetFieldPtrWithSize(MemberOffset field_offset,
+ T new_value,
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
- if (pointer_size == 4) {
+ if (pointer_size == PointerSize::k32) {
intptr_t ptr = reinterpret_cast<intptr_t>(new_value);
DCHECK_EQ(static_cast<int32_t>(ptr), ptr); // Check that we dont lose any non 0 bits.
SetField32<kTransactionActive, kCheckTransaction, kVerifyFlags>(
@@ -521,19 +522,19 @@
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
T GetFieldPtr(MemberOffset field_offset)
SHARED_REQUIRES(Locks::mutator_lock_) {
- return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset, sizeof(void*));
+ return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset, kRuntimePointerSize);
}
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
T GetFieldPtr64(MemberOffset field_offset)
SHARED_REQUIRES(Locks::mutator_lock_) {
- return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset, 8u);
+ return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset,
+ PointerSize::k64);
}
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
- ALWAYS_INLINE T GetFieldPtrWithSize(MemberOffset field_offset, size_t pointer_size)
+ ALWAYS_INLINE T GetFieldPtrWithSize(MemberOffset field_offset, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
- if (pointer_size == 4) {
+ if (pointer_size == PointerSize::k32) {
return reinterpret_cast<T>(GetField32<kVerifyFlags, kIsVolatile>(field_offset));
} else {
int64_t v = GetField64<kVerifyFlags, kIsVolatile>(field_offset);
diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h
index 4257396..a99d616 100644
--- a/runtime/mirror/object_array.h
+++ b/runtime/mirror/object_array.h
@@ -26,7 +26,7 @@
class MANAGED ObjectArray: public Array {
public:
// The size of Object[].class.
- static uint32_t ClassSize(size_t pointer_size) {
+ static uint32_t ClassSize(PointerSize pointer_size) {
return Array::ClassSize(pointer_size);
}
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index c1284a6..0034220 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -24,6 +24,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "asm_support.h"
+#include "base/enums.h"
#include "class-inl.h"
#include "class_linker.h"
#include "class_linker-inl.h"
@@ -78,9 +79,11 @@
EXPECT_EQ(kObjectReferenceSize, sizeof(HeapReference<Object>));
EXPECT_EQ(kObjectHeaderSize, sizeof(Object));
EXPECT_EQ(ART_METHOD_QUICK_CODE_OFFSET_32,
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value());
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(PointerSize::k32).
+ Int32Value());
EXPECT_EQ(ART_METHOD_QUICK_CODE_OFFSET_64,
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(8).Int32Value());
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(PointerSize::k64).
+ Int32Value());
}
TEST_F(ObjectTest, IsInSamePackage) {
@@ -306,7 +309,7 @@
// pretend we are trying to call 'new char[3]' from String.toCharArray
ScopedObjectAccess soa(Thread::Current());
Class* java_util_Arrays = class_linker_->FindSystemClass(soa.Self(), "Ljava/util/Arrays;");
- ArtMethod* sort = java_util_Arrays->FindDirectMethod("sort", "([I)V", sizeof(void*));
+ ArtMethod* sort = java_util_Arrays->FindDirectMethod("sort", "([I)V", kRuntimePointerSize);
const DexFile::TypeId* type_id = java_lang_dex_file_->FindTypeId("[I");
ASSERT_TRUE(type_id != nullptr);
uint32_t type_idx = java_lang_dex_file_->GetIndexForTypeId(*type_id);
@@ -363,7 +366,7 @@
StackHandleScope<2> hs(soa.Self());
Handle<mirror::ClassLoader> loader(hs.NewHandle(soa.Decode<ClassLoader*>(class_loader)));
Class* klass = class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", loader);
- ArtMethod* clinit = klass->FindClassInitializer(sizeof(void*));
+ ArtMethod* clinit = klass->FindClassInitializer(kRuntimePointerSize);
const DexFile::TypeId* klass_type_id = dex_file->FindTypeId("LStaticsFromCode;");
ASSERT_TRUE(klass_type_id != nullptr);
@@ -499,22 +502,22 @@
Class* klass2 = linker->FindClass(soa.Self(), "LProtoCompare2;", class_loader_2);
ASSERT_TRUE(klass2 != nullptr);
- ArtMethod* m1_1 = klass1->GetVirtualMethod(0, sizeof(void*));
+ ArtMethod* m1_1 = klass1->GetVirtualMethod(0, kRuntimePointerSize);
EXPECT_STREQ(m1_1->GetName(), "m1");
- ArtMethod* m2_1 = klass1->GetVirtualMethod(1, sizeof(void*));
+ ArtMethod* m2_1 = klass1->GetVirtualMethod(1, kRuntimePointerSize);
EXPECT_STREQ(m2_1->GetName(), "m2");
- ArtMethod* m3_1 = klass1->GetVirtualMethod(2, sizeof(void*));
+ ArtMethod* m3_1 = klass1->GetVirtualMethod(2, kRuntimePointerSize);
EXPECT_STREQ(m3_1->GetName(), "m3");
- ArtMethod* m4_1 = klass1->GetVirtualMethod(3, sizeof(void*));
+ ArtMethod* m4_1 = klass1->GetVirtualMethod(3, kRuntimePointerSize);
EXPECT_STREQ(m4_1->GetName(), "m4");
- ArtMethod* m1_2 = klass2->GetVirtualMethod(0, sizeof(void*));
+ ArtMethod* m1_2 = klass2->GetVirtualMethod(0, kRuntimePointerSize);
EXPECT_STREQ(m1_2->GetName(), "m1");
- ArtMethod* m2_2 = klass2->GetVirtualMethod(1, sizeof(void*));
+ ArtMethod* m2_2 = klass2->GetVirtualMethod(1, kRuntimePointerSize);
EXPECT_STREQ(m2_2->GetName(), "m2");
- ArtMethod* m3_2 = klass2->GetVirtualMethod(2, sizeof(void*));
+ ArtMethod* m3_2 = klass2->GetVirtualMethod(2, kRuntimePointerSize);
EXPECT_STREQ(m3_2->GetName(), "m3");
- ArtMethod* m4_2 = klass2->GetVirtualMethod(3, sizeof(void*));
+ ArtMethod* m4_2 = klass2->GetVirtualMethod(3, kRuntimePointerSize);
EXPECT_STREQ(m4_2->GetName(), "m4");
}
diff --git a/runtime/mirror/reference-inl.h b/runtime/mirror/reference-inl.h
index 12bfe38..039989b 100644
--- a/runtime/mirror/reference-inl.h
+++ b/runtime/mirror/reference-inl.h
@@ -22,7 +22,7 @@
namespace art {
namespace mirror {
-inline uint32_t Reference::ClassSize(size_t pointer_size) {
+inline uint32_t Reference::ClassSize(PointerSize pointer_size) {
uint32_t vtable_entries = Object::kVTableLength + 4;
return Class::ComputeClassSize(false, vtable_entries, 2, 0, 0, 0, 0, pointer_size);
}
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index e8ad5fa..38c6616 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_MIRROR_REFERENCE_H_
#define ART_RUNTIME_MIRROR_REFERENCE_H_
+#include "base/enums.h"
#include "class.h"
#include "gc_root.h"
#include "object.h"
@@ -43,7 +44,7 @@
class MANAGED Reference : public Object {
public:
// Size of java.lang.ref.Reference.class.
- static uint32_t ClassSize(size_t pointer_size);
+ static uint32_t ClassSize(PointerSize pointer_size);
// Size of an instance of java.lang.ref.Reference.
static constexpr uint32_t InstanceSize() {
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index 96f2098..d3660e5 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -33,7 +33,7 @@
namespace art {
namespace mirror {
-inline uint32_t String::ClassSize(size_t pointer_size) {
+inline uint32_t String::ClassSize(PointerSize pointer_size) {
uint32_t vtable_entries = Object::kVTableLength + 57;
return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 1, 2, pointer_size);
}
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index e2cfb8d..d492ba3 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -35,7 +35,7 @@
class MANAGED String FINAL : public Object {
public:
// Size of java.lang.String.class.
- static uint32_t ClassSize(size_t pointer_size);
+ static uint32_t ClassSize(PointerSize pointer_size);
// Size of an instance of java.lang.String not including its value array.
static constexpr uint32_t InstanceSize() {
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index f068b3e..0bccc8b 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -17,6 +17,7 @@
#include "throwable.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "class-inl.h"
#include "dex_file-inl.h"
#include "gc/accounting/card_table-inl.h"
@@ -106,7 +107,7 @@
if (depth == 0) {
result += "(Throwable with empty stack trace)";
} else {
- const size_t ptr_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ const PointerSize ptr_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
for (int32_t i = 0; i < depth; ++i) {
ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, ptr_size);
uintptr_t dex_pc = method_trace->GetElementPtrSize<uintptr_t>(i + depth, ptr_size);
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index d987f65..45e49e2 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -29,6 +29,7 @@
#include "art_method-inl.h"
#include "arch/instruction_set.h"
+#include "base/enums.h"
#include "class_linker-inl.h"
#include "common_throws.h"
#include "debugger.h"
@@ -329,7 +330,7 @@
static void PreloadDexCachesResolveField(Handle<mirror::DexCache> dex_cache, uint32_t field_idx,
bool is_static)
SHARED_REQUIRES(Locks::mutator_lock_) {
- ArtField* field = dex_cache->GetResolvedField(field_idx, sizeof(void*));
+ ArtField* field = dex_cache->GetResolvedField(field_idx, kRuntimePointerSize);
if (field != nullptr) {
return;
}
@@ -350,14 +351,14 @@
return;
}
// LOG(INFO) << "VMRuntime.preloadDexCaches resolved field " << PrettyField(field);
- dex_cache->SetResolvedField(field_idx, field, sizeof(void*));
+ dex_cache->SetResolvedField(field_idx, field, kRuntimePointerSize);
}
// Based on ClassLinker::ResolveMethod.
static void PreloadDexCachesResolveMethod(Handle<mirror::DexCache> dex_cache, uint32_t method_idx,
InvokeType invoke_type)
SHARED_REQUIRES(Locks::mutator_lock_) {
- ArtMethod* method = dex_cache->GetResolvedMethod(method_idx, sizeof(void*));
+ ArtMethod* method = dex_cache->GetResolvedMethod(method_idx, kRuntimePointerSize);
if (method != nullptr) {
return;
}
@@ -370,14 +371,14 @@
switch (invoke_type) {
case kDirect:
case kStatic:
- method = klass->FindDirectMethod(dex_cache.Get(), method_idx, sizeof(void*));
+ method = klass->FindDirectMethod(dex_cache.Get(), method_idx, kRuntimePointerSize);
break;
case kInterface:
- method = klass->FindInterfaceMethod(dex_cache.Get(), method_idx, sizeof(void*));
+ method = klass->FindInterfaceMethod(dex_cache.Get(), method_idx, kRuntimePointerSize);
break;
case kSuper:
case kVirtual:
- method = klass->FindVirtualMethod(dex_cache.Get(), method_idx, sizeof(void*));
+ method = klass->FindVirtualMethod(dex_cache.Get(), method_idx, kRuntimePointerSize);
break;
default:
LOG(FATAL) << "Unreachable - invocation type: " << invoke_type;
@@ -387,7 +388,7 @@
return;
}
// LOG(INFO) << "VMRuntime.preloadDexCaches resolved method " << PrettyMethod(method);
- dex_cache->SetResolvedMethod(method_idx, method, sizeof(void*));
+ dex_cache->SetResolvedMethod(method_idx, method, kRuntimePointerSize);
}
struct DexCacheStats {
@@ -462,7 +463,7 @@
}
}
for (size_t j = 0; j < dex_cache->NumResolvedMethods(); j++) {
- ArtMethod* method = dex_cache->GetResolvedMethod(j, sizeof(void*));
+ ArtMethod* method = dex_cache->GetResolvedMethod(j, kRuntimePointerSize);
if (method != nullptr) {
filled->num_methods++;
}
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 02a97f5..6d5e7c7 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -19,6 +19,7 @@
#include <iostream>
#include "art_field-inl.h"
+#include "base/enums.h"
#include "class_linker.h"
#include "common_throws.h"
#include "dex_file-inl.h"
@@ -136,9 +137,9 @@
}
for (ArtField& field : ifields) {
if (!public_only || field.IsPublic()) {
- auto* reflect_field = mirror::Field::CreateFromArtField<sizeof(void*)>(self,
- &field,
- force_resolve);
+ auto* reflect_field = mirror::Field::CreateFromArtField<kRuntimePointerSize>(self,
+ &field,
+ force_resolve);
if (reflect_field == nullptr) {
if (kIsDebugBuild) {
self->AssertPendingException();
@@ -151,9 +152,9 @@
}
for (ArtField& field : sfields) {
if (!public_only || field.IsPublic()) {
- auto* reflect_field = mirror::Field::CreateFromArtField<sizeof(void*)>(self,
- &field,
- force_resolve);
+ auto* reflect_field = mirror::Field::CreateFromArtField<kRuntimePointerSize>(self,
+ &field,
+ force_resolve);
if (reflect_field == nullptr) {
if (kIsDebugBuild) {
self->AssertPendingException();
@@ -226,15 +227,11 @@
SHARED_REQUIRES(Locks::mutator_lock_) {
ArtField* art_field = FindFieldByName(self, name, c->GetIFieldsPtr());
if (art_field != nullptr) {
- return mirror::Field::CreateFromArtField<sizeof(void*)>(self,
- art_field,
- true);
+ return mirror::Field::CreateFromArtField<kRuntimePointerSize>(self, art_field, true);
}
art_field = FindFieldByName(self, name, c->GetSFieldsPtr());
if (art_field != nullptr) {
- return mirror::Field::CreateFromArtField<sizeof(void*)>(self,
- art_field,
- true);
+ return mirror::Field::CreateFromArtField<kRuntimePointerSize>(self, art_field, true);
}
return nullptr;
}
@@ -331,9 +328,10 @@
static jobject Class_getDeclaredConstructorInternal(
JNIEnv* env, jobject javaThis, jobjectArray args) {
ScopedFastNativeObjectAccess soa(env);
- DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
DCHECK(!Runtime::Current()->IsActiveTransaction());
- mirror::Constructor* result = mirror::Class::GetDeclaredConstructorInternal<sizeof(void*), false>(
+ mirror::Constructor* result = mirror::Class::GetDeclaredConstructorInternal<kRuntimePointerSize,
+ false>(
soa.Self(),
DecodeClass(soa, javaThis),
soa.Decode<mirror::ObjectArray<mirror::Class>*>(args));
@@ -353,7 +351,7 @@
Handle<mirror::Class> h_klass = hs.NewHandle(DecodeClass(soa, javaThis));
size_t constructor_count = 0;
// Two pass approach for speed.
- for (auto& m : h_klass->GetDirectMethods(sizeof(void*))) {
+ for (auto& m : h_klass->GetDirectMethods(kRuntimePointerSize)) {
constructor_count += MethodMatchesConstructor(&m, publicOnly != JNI_FALSE) ? 1u : 0u;
}
auto h_constructors = hs.NewHandle(mirror::ObjectArray<mirror::Constructor>::Alloc(
@@ -363,11 +361,11 @@
return nullptr;
}
constructor_count = 0;
- for (auto& m : h_klass->GetDirectMethods(sizeof(void*))) {
+ for (auto& m : h_klass->GetDirectMethods(kRuntimePointerSize)) {
if (MethodMatchesConstructor(&m, publicOnly != JNI_FALSE)) {
- DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
DCHECK(!Runtime::Current()->IsActiveTransaction());
- auto* constructor = mirror::Constructor::CreateFromArtMethod<sizeof(void*), false>(
+ auto* constructor = mirror::Constructor::CreateFromArtMethod<kRuntimePointerSize, false>(
soa.Self(), &m);
if (UNLIKELY(constructor == nullptr)) {
soa.Self()->AssertPendingOOMException();
@@ -382,9 +380,9 @@
static jobject Class_getDeclaredMethodInternal(JNIEnv* env, jobject javaThis,
jobject name, jobjectArray args) {
ScopedFastNativeObjectAccess soa(env);
- DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
DCHECK(!Runtime::Current()->IsActiveTransaction());
- mirror::Method* result = mirror::Class::GetDeclaredMethodInternal<sizeof(void*), false>(
+ mirror::Method* result = mirror::Class::GetDeclaredMethodInternal<kRuntimePointerSize, false>(
soa.Self(),
DecodeClass(soa, javaThis),
soa.Decode<mirror::String*>(name),
@@ -398,7 +396,7 @@
StackHandleScope<2> hs(soa.Self());
Handle<mirror::Class> klass = hs.NewHandle(DecodeClass(soa, javaThis));
size_t num_methods = 0;
- for (auto& m : klass->GetDeclaredMethods(sizeof(void*))) {
+ for (auto& m : klass->GetDeclaredMethods(kRuntimePointerSize)) {
auto modifiers = m.GetAccessFlags();
// Add non-constructor declared methods.
if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
@@ -409,13 +407,14 @@
auto ret = hs.NewHandle(mirror::ObjectArray<mirror::Method>::Alloc(
soa.Self(), mirror::Method::ArrayClass(), num_methods));
num_methods = 0;
- for (auto& m : klass->GetDeclaredMethods(sizeof(void*))) {
+ for (auto& m : klass->GetDeclaredMethods(kRuntimePointerSize)) {
auto modifiers = m.GetAccessFlags();
if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
(modifiers & kAccConstructor) == 0) {
- DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
DCHECK(!Runtime::Current()->IsActiveTransaction());
- auto* method = mirror::Method::CreateFromArtMethod<sizeof(void*), false>(soa.Self(), &m);
+ auto* method =
+ mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), &m);
if (method == nullptr) {
soa.Self()->AssertPendingException();
return nullptr;
@@ -627,7 +626,7 @@
auto* constructor = klass->GetDeclaredConstructor(
soa.Self(),
ScopedNullHandle<mirror::ObjectArray<mirror::Class>>(),
- sizeof(void*));
+ kRuntimePointerSize);
if (UNLIKELY(constructor == nullptr)) {
soa.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;",
"%s has no zero argument constructor",
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index 54b8afd..dd46233 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -17,6 +17,7 @@
#include "java_lang_reflect_Constructor.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "class_linker.h"
#include "class_linker-inl.h"
#include "jni_internal.h"
@@ -65,7 +66,7 @@
static jobjectArray Constructor_getExceptionTypes(JNIEnv* env, jobject javaMethod) {
ScopedFastNativeObjectAccess soa(env);
ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod)
- ->GetInterfaceMethodIfProxy(sizeof(void*));
+ ->GetInterfaceMethodIfProxy(kRuntimePointerSize);
mirror::ObjectArray<mirror::Class>* result_array =
method->GetDexFile()->GetExceptionTypesForMethod(method);
if (result_array == nullptr) {
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index 78999c2..c3f2a27 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -17,6 +17,7 @@
#include "java_lang_reflect_Method.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "class_linker.h"
#include "class_linker-inl.h"
#include "jni_internal.h"
@@ -57,7 +58,7 @@
mirror::Class* klass = method->GetDeclaringClass();
int throws_index = -1;
size_t i = 0;
- for (const auto& m : klass->GetDeclaredVirtualMethods(sizeof(void*))) {
+ for (const auto& m : klass->GetDeclaredVirtualMethods(kRuntimePointerSize)) {
if (&m == method) {
throws_index = i;
break;
diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc
index 61a1085..155c008 100644
--- a/runtime/native_bridge_art_interface.cc
+++ b/runtime/native_bridge_art_interface.cc
@@ -21,6 +21,7 @@
#include "nativebridge/native_bridge.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/macros.h"
#include "dex_file-inl.h"
@@ -45,7 +46,7 @@
mirror::Class* c = soa.Decode<mirror::Class*>(clazz);
uint32_t native_method_count = 0;
- for (auto& m : c->GetMethods(sizeof(void*))) {
+ for (auto& m : c->GetMethods(kRuntimePointerSize)) {
native_method_count += m.IsNative() ? 1u : 0u;
}
return native_method_count;
@@ -60,7 +61,7 @@
mirror::Class* c = soa.Decode<mirror::Class*>(clazz);
uint32_t count = 0;
- for (auto& m : c->GetMethods(sizeof(void*))) {
+ for (auto& m : c->GetMethods(kRuntimePointerSize)) {
if (m.IsNative()) {
if (count < method_count) {
methods[count].name = m.GetName();
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 61dc287..68610a7 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -34,6 +34,7 @@
#include "art_method-inl.h"
#include "base/bit_vector.h"
+#include "base/enums.h"
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/unix_file/fd_file.h"
@@ -260,7 +261,7 @@
return false;
}
- size_t pointer_size = GetInstructionSetPointerSize(GetOatHeader().GetInstructionSet());
+ PointerSize pointer_size = GetInstructionSetPointerSize(GetOatHeader().GetInstructionSet());
uint8_t* dex_cache_arrays = bss_begin_;
uint32_t dex_file_count = GetOatHeader().GetDexFileCount();
oat_dex_files_storage_.reserve(dex_file_count);
diff --git a/runtime/offsets.h b/runtime/offsets.h
index 9d5063f..aaf5c0c 100644
--- a/runtime/offsets.h
+++ b/runtime/offsets.h
@@ -19,6 +19,7 @@
#include <ostream>
+#include "base/enums.h"
#include "globals.h"
namespace art {
@@ -51,12 +52,15 @@
};
// Offsets relative to the current running thread.
-template<size_t pointer_size>
+template<PointerSize pointer_size>
class ThreadOffset : public Offset {
public:
explicit ThreadOffset(size_t val) : Offset(val) {}
};
+using ThreadOffset32 = ThreadOffset<PointerSize::k32>;
+using ThreadOffset64 = ThreadOffset<PointerSize::k64>;
+
// Offsets relative to an object.
class MemberOffset : public Offset {
public:
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 1ce7e48..82e57b4 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -18,6 +18,7 @@
#include <vector>
#include "art_field-inl.h"
+#include "base/enums.h"
#include "class_linker-inl.h"
#include "common_compiler_test.h"
#include "mirror/field-inl.h"
@@ -60,31 +61,31 @@
jsize array_index = 0;
// Fill the method array
- DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
ArtMethod* method = javaLangObject->FindDeclaredVirtualMethod(
- "equals", "(Ljava/lang/Object;)Z", sizeof(void*));
+ "equals", "(Ljava/lang/Object;)Z", kRuntimePointerSize);
CHECK(method != nullptr);
DCHECK(!Runtime::Current()->IsActiveTransaction());
soa.Env()->SetObjectArrayElement(
proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
- mirror::Method::CreateFromArtMethod<sizeof(void*), false>(soa.Self(), method)));
- method = javaLangObject->FindDeclaredVirtualMethod("hashCode", "()I", sizeof(void*));
+ mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), method)));
+ method = javaLangObject->FindDeclaredVirtualMethod("hashCode", "()I", kRuntimePointerSize);
CHECK(method != nullptr);
soa.Env()->SetObjectArrayElement(
proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
- mirror::Method::CreateFromArtMethod<sizeof(void*), false>(soa.Self(), method)));
+ mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), method)));
method = javaLangObject->FindDeclaredVirtualMethod(
- "toString", "()Ljava/lang/String;", sizeof(void*));
+ "toString", "()Ljava/lang/String;", kRuntimePointerSize);
CHECK(method != nullptr);
soa.Env()->SetObjectArrayElement(
proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
- mirror::Method::CreateFromArtMethod<sizeof(void*), false>(soa.Self(), method)));
+ mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), method)));
// Now adds all interfaces virtual methods.
for (mirror::Class* interface : interfaces) {
- for (auto& m : interface->GetDeclaredVirtualMethods(sizeof(void*))) {
+ for (auto& m : interface->GetDeclaredVirtualMethods(kRuntimePointerSize)) {
soa.Env()->SetObjectArrayElement(
proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
- mirror::Method::CreateFromArtMethod<sizeof(void*), false>(soa.Self(), &m)));
+ mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), &m)));
}
}
CHECK_EQ(array_index, methods_count);
@@ -228,19 +229,19 @@
EXPECT_EQ(static_fields1->At(0).GetDeclaringClass(), proxyClass1.Get());
EXPECT_EQ(static_fields1->At(1).GetDeclaringClass(), proxyClass1.Get());
- ASSERT_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
+ ASSERT_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
ASSERT_FALSE(Runtime::Current()->IsActiveTransaction());
Handle<mirror::Field> field00 =
- hs.NewHandle(mirror::Field::CreateFromArtField<sizeof(void*), false>(
+ hs.NewHandle(mirror::Field::CreateFromArtField<kRuntimePointerSize, false>(
soa.Self(), &static_fields0->At(0), true));
Handle<mirror::Field> field01 =
- hs.NewHandle(mirror::Field::CreateFromArtField<sizeof(void*), false>(
+ hs.NewHandle(mirror::Field::CreateFromArtField<kRuntimePointerSize, false>(
soa.Self(), &static_fields0->At(1), true));
Handle<mirror::Field> field10 =
- hs.NewHandle(mirror::Field::CreateFromArtField<sizeof(void*), false>(
+ hs.NewHandle(mirror::Field::CreateFromArtField<kRuntimePointerSize, false>(
soa.Self(), &static_fields1->At(0), true));
Handle<mirror::Field> field11 =
- hs.NewHandle(mirror::Field::CreateFromArtField<sizeof(void*), false>(
+ hs.NewHandle(mirror::Field::CreateFromArtField<kRuntimePointerSize, false>(
soa.Self(), &static_fields1->At(1), true));
EXPECT_EQ(field00->GetArtField(), &static_fields0->At(0));
EXPECT_EQ(field01->GetArtField(), &static_fields0->At(1));
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
index 1dea562..a6e3693 100644
--- a/runtime/quick/inline_method_analyser.cc
+++ b/runtime/quick/inline_method_analyser.cc
@@ -18,6 +18,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
#include "dex_instruction.h"
@@ -145,7 +146,7 @@
DCHECK_EQ(invoke_direct->VRegC_35c(),
method->GetCodeItem()->registers_size_ - method->GetCodeItem()->ins_size_);
uint32_t method_index = invoke_direct->VRegB_35c();
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
ArtMethod* target_method =
method->GetDexCache()->GetResolvedMethod(method_index, pointer_size);
if (kIsDebugBuild && target_method != nullptr) {
@@ -214,7 +215,7 @@
SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(IsInstructionIPut(new_iput->Opcode()));
uint32_t field_index = new_iput->VRegC_22c();
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
mirror::DexCache* dex_cache = method->GetDexCache();
ArtField* field = dex_cache->GetResolvedField(field_index, pointer_size);
if (UNLIKELY(field == nullptr)) {
@@ -732,7 +733,7 @@
return false;
}
mirror::DexCache* dex_cache = method->GetDexCache();
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
ArtField* field = dex_cache->GetResolvedField(field_idx, pointer_size);
if (field == nullptr || field->IsStatic()) {
return false;
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index e9dd7aa..46d9e7f 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -18,6 +18,7 @@
#include "arch/context.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "dex_instruction.h"
#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
@@ -509,11 +510,7 @@
} else {
// PC needs to be of the quick-to-interpreter bridge.
int32_t offset;
- #ifdef __LP64__
- offset = GetThreadOffset<8>(kQuickQuickToInterpreterBridge).Int32Value();
- #else
- offset = GetThreadOffset<4>(kQuickQuickToInterpreterBridge).Int32Value();
- #endif
+ offset = GetThreadOffset<kRuntimePointerSize>(kQuickQuickToInterpreterBridge).Int32Value();
handler_quick_frame_pc_ = *reinterpret_cast<uintptr_t*>(
reinterpret_cast<uint8_t*>(self_) + offset);
}
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 28c27cd..8a531d9 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -18,6 +18,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "class_linker.h"
#include "common_throws.h"
#include "dex_file-inl.h"
@@ -222,7 +223,7 @@
for (size_t i = 1, args_offset = 0; i < shorty_len_; ++i, ++args_offset) {
mirror::Object* arg = args->Get(args_offset);
if (((shorty_[i] == 'L') && (arg != nullptr)) || ((arg == nullptr && shorty_[i] != 'L'))) {
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
mirror::Class* dst_class =
m->GetClassFromTypeIndex(classes->GetTypeItem(args_offset).type_idx_,
true /* resolve */,
@@ -358,7 +359,7 @@
}
// TODO: If args contain object references, it may cause problems.
Thread* const self = Thread::Current();
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
for (uint32_t i = 0; i < num_params; i++) {
uint16_t type_idx = params->GetTypeItem(i).type_idx_;
mirror::Class* param_type = m->GetClassFromTypeIndex(type_idx,
@@ -424,7 +425,7 @@
static ArtMethod* FindVirtualMethod(mirror::Object* receiver, ArtMethod* method)
SHARED_REQUIRES(Locks::mutator_lock_) {
- return receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(method, sizeof(void*));
+ return receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(method, kRuntimePointerSize);
}
@@ -434,7 +435,7 @@
SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t* args = arg_array->GetArray();
if (UNLIKELY(soa.Env()->check_jni)) {
- CheckMethodArguments(soa.Vm(), method->GetInterfaceMethodIfProxy(sizeof(void*)), args);
+ CheckMethodArguments(soa.Vm(), method->GetInterfaceMethodIfProxy(kRuntimePointerSize), args);
}
method->Invoke(soa.Self(), args, arg_array->GetNumBytes(), result, shorty);
}
@@ -458,7 +459,8 @@
}
mirror::Object* receiver = method->IsStatic() ? nullptr : soa.Decode<mirror::Object*>(obj);
uint32_t shorty_len = 0;
- const char* shorty = method->GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty(&shorty_len);
+ const char* shorty =
+ method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty(&shorty_len);
JValue result;
ArgArray arg_array(shorty, shorty_len);
arg_array.BuildArgArrayFromVarArgs(soa, receiver, args);
@@ -488,7 +490,8 @@
}
mirror::Object* receiver = method->IsStatic() ? nullptr : soa.Decode<mirror::Object*>(obj);
uint32_t shorty_len = 0;
- const char* shorty = method->GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty(&shorty_len);
+ const char* shorty =
+ method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty(&shorty_len);
JValue result;
ArgArray arg_array(shorty, shorty_len);
arg_array.BuildArgArrayFromJValues(soa, receiver, args);
@@ -519,7 +522,8 @@
receiver = nullptr;
}
uint32_t shorty_len = 0;
- const char* shorty = method->GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty(&shorty_len);
+ const char* shorty =
+ method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty(&shorty_len);
JValue result;
ArgArray arg_array(shorty, shorty_len);
arg_array.BuildArgArrayFromJValues(soa, receiver, args);
@@ -550,7 +554,8 @@
receiver = nullptr;
}
uint32_t shorty_len = 0;
- const char* shorty = method->GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty(&shorty_len);
+ const char* shorty =
+ method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty(&shorty_len);
JValue result;
ArgArray arg_array(shorty, shorty_len);
arg_array.BuildArgArrayFromVarArgs(soa, receiver, args);
@@ -602,13 +607,13 @@
}
// Find the actual implementation of the virtual method.
- m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(m, sizeof(void*));
+ m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(m, kRuntimePointerSize);
}
}
// Get our arrays of arguments and their types, and check they're the same size.
auto* objects = soa.Decode<mirror::ObjectArray<mirror::Object>*>(javaArgs);
- auto* np_method = m->GetInterfaceMethodIfProxy(sizeof(void*));
+ auto* np_method = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
const DexFile::TypeList* classes = np_method->GetParameterTypeList();
uint32_t classes_size = (classes == nullptr) ? 0 : classes->Size();
uint32_t arg_count = (objects != nullptr) ? objects->GetLength() : 0;
@@ -775,8 +780,9 @@
UnboxingFailureKind(f).c_str(),
PrettyDescriptor(dst_class).c_str()).c_str());
} else {
- ThrowNullPointerException(StringPrintf("Expected to unbox a '%s' primitive type but was returned null",
- PrettyDescriptor(dst_class).c_str()).c_str());
+ ThrowNullPointerException(
+ StringPrintf("Expected to unbox a '%s' primitive type but was returned null",
+ PrettyDescriptor(dst_class).c_str()).c_str());
}
return false;
}
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index a098bf0..016f3c7 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -21,6 +21,7 @@
#include "ScopedLocalRef.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "common_compiler_test.h"
#include "scoped_thread_state_change.h"
@@ -107,8 +108,8 @@
class_loader);
CHECK(c != nullptr);
- *method = is_static ? c->FindDirectMethod(method_name, method_signature, sizeof(void*))
- : c->FindVirtualMethod(method_name, method_signature, sizeof(void*));
+ *method = is_static ? c->FindDirectMethod(method_name, method_signature, kRuntimePointerSize)
+ : c->FindVirtualMethod(method_name, method_signature, kRuntimePointerSize);
CHECK(method != nullptr);
if (is_static) {
@@ -517,7 +518,9 @@
mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LMain;", class_loader);
ASSERT_TRUE(klass != nullptr);
- ArtMethod* method = klass->FindDirectMethod("main", "([Ljava/lang/String;)V", sizeof(void*));
+ ArtMethod* method = klass->FindDirectMethod("main",
+ "([Ljava/lang/String;)V",
+ kRuntimePointerSize);
ASSERT_TRUE(method != nullptr);
// Start runtime.
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 079c079..9f0ef7c 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -55,6 +55,7 @@
#include "atomic.h"
#include "base/arena_allocator.h"
#include "base/dumpable.h"
+#include "base/enums.h"
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/unix_file/fd_file.h"
@@ -1508,7 +1509,7 @@
// Visiting the roots of these ArtMethods is not currently required since all the GcRoots are
// null.
BufferedRootVisitor<16> buffered_visitor(visitor, RootInfo(kRootVMInternal));
- const size_t pointer_size = GetClassLinker()->GetImagePointerSize();
+ const PointerSize pointer_size = GetClassLinker()->GetImagePointerSize();
if (HasResolutionMethod()) {
resolution_method_->VisitRoots(buffered_visitor, pointer_size);
}
@@ -1592,7 +1593,7 @@
ClassLinker* const class_linker = GetClassLinker();
ArtMethod* method = class_linker->CreateRuntimeMethod(linear_alloc);
// When compiling, the code pointer will get set later when the image is loaded.
- const size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
+ const PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set_);
if (IsAotCompiler()) {
method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
} else {
@@ -1614,7 +1615,7 @@
auto* method = GetClassLinker()->CreateRuntimeMethod(GetLinearAlloc());
// When compiling, the code pointer will get set later when the image is loaded.
if (IsAotCompiler()) {
- size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
+ PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set_);
method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
} else {
method->SetEntryPointFromQuickCompiledCode(GetQuickResolutionStub());
@@ -1624,7 +1625,7 @@
ArtMethod* Runtime::CreateCalleeSaveMethod() {
auto* method = GetClassLinker()->CreateRuntimeMethod(GetLinearAlloc());
- size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
+ PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set_);
method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
DCHECK_NE(instruction_set_, kNone);
DCHECK(method->IsRuntimeMethod());
@@ -1919,7 +1920,7 @@
void Runtime::FixupConflictTables() {
// We can only do this after the class linker is created.
- const size_t pointer_size = GetClassLinker()->GetImagePointerSize();
+ const PointerSize pointer_size = GetClassLinker()->GetImagePointerSize();
if (imt_unimplemented_method_->GetImtConflictTable(pointer_size) == nullptr) {
imt_unimplemented_method_->SetImtConflictTable(
ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size),
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 1d913f2..dc5cada 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -18,6 +18,7 @@
#include "arch/context.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/hex_dump.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "entrypoints/runtime_asm_entrypoints.h"
@@ -167,7 +168,7 @@
SHARED_REQUIRES(Locks::mutator_lock_);
mirror::Object* StackVisitor::GetThisObject() const {
- DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
ArtMethod* m = GetMethod();
if (m->IsStatic()) {
return nullptr;
@@ -748,7 +749,8 @@
// The only remaining case is if the method is native and uses the generic JNI stub.
DCHECK(method->IsNative());
ClassLinker* class_linker = runtime->GetClassLinker();
- const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(method, sizeof(void*));
+ const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(method,
+ kRuntimePointerSize);
DCHECK(class_linker->IsQuickGenericJniStub(entry_point)) << PrettyMethod(method);
// Generic JNI frame.
uint32_t handle_refs = GetNumberOfReferenceArgsWithoutReceiver(method) + 1;
@@ -908,7 +910,7 @@
int StackVisitor::GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item,
uint32_t core_spills, uint32_t fp_spills,
size_t frame_size, int reg, InstructionSet isa) {
- size_t pointer_size = InstructionSetPointerSize(isa);
+ PointerSize pointer_size = InstructionSetPointerSize(isa);
if (kIsDebugBuild) {
auto* runtime = Runtime::Current();
if (runtime != nullptr) {
@@ -931,7 +933,8 @@
* Special temporaries may have custom locations and the logic above deals with that.
* However, non-special temporaries are placed relative to the outs.
*/
- int temps_start = code_item->outs_size_ * sizeof(uint32_t) + pointer_size /* art method */;
+ int temps_start = code_item->outs_size_ * sizeof(uint32_t)
+ + static_cast<size_t>(pointer_size) /* art method */;
int relative_offset = (reg - (temp_threshold + max_num_special_temps)) * sizeof(uint32_t);
return temps_start + relative_offset;
} else if (reg < num_regs) {
@@ -939,7 +942,8 @@
return locals_start + (reg * sizeof(uint32_t));
} else {
// Handle ins.
- return frame_size + ((reg - num_regs) * sizeof(uint32_t)) + pointer_size /* art method */;
+ return frame_size + ((reg - num_regs) * sizeof(uint32_t))
+ + static_cast<size_t>(pointer_size) /* art method */;
}
}
diff --git a/runtime/stack.h b/runtime/stack.h
index c594ec6..cf33ae1 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -734,7 +734,7 @@
static int GetOutVROffset(uint16_t out_num, InstructionSet isa) {
// According to stack model, the first out is above the Method referernce.
- return InstructionSetPointerSize(isa) + out_num * sizeof(uint32_t);
+ return static_cast<size_t>(InstructionSetPointerSize(isa)) + out_num * sizeof(uint32_t);
}
bool IsInInlinedFrame() const {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 9cce171..76f3161 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1383,7 +1383,7 @@
if (m->IsRuntimeMethod()) {
return true;
}
- m = m->GetInterfaceMethodIfProxy(sizeof(void*));
+ m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
const int kMaxRepetition = 3;
mirror::Class* c = m->GetDeclaringClass();
mirror::DexCache* dex_cache = c->GetDexCache();
@@ -2111,7 +2111,7 @@
// the i'th frame.
mirror::ObjectArray<mirror::Object>* trace_;
// For cross compilation.
- const size_t pointer_size_;
+ const PointerSize pointer_size_;
DISALLOW_COPY_AND_ASSIGN(BuildInternalStackTraceVisitor);
};
@@ -2198,9 +2198,9 @@
mirror::PointerArray* const method_trace =
down_cast<mirror::PointerArray*>(decoded_traces->Get(0));
// Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
- ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, sizeof(void*));
+ ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, kRuntimePointerSize);
uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>(
- i + method_trace->GetLength() / 2, sizeof(void*));
+ i + method_trace->GetLength() / 2, kRuntimePointerSize);
int32_t line_number;
StackHandleScope<3> hs(soa.Self());
auto class_name_object(hs.NewHandle<mirror::String>(nullptr));
@@ -2231,7 +2231,7 @@
}
}
}
- const char* method_name = method->GetInterfaceMethodIfProxy(sizeof(void*))->GetName();
+ const char* method_name = method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetName();
CHECK(method_name != nullptr);
Handle<mirror::String> method_name_object(
hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name)));
@@ -2408,10 +2408,12 @@
}
// Explicitly instantiate 32 and 64bit thread offset dumping support.
-template void Thread::DumpThreadOffset<4>(std::ostream& os, uint32_t offset);
-template void Thread::DumpThreadOffset<8>(std::ostream& os, uint32_t offset);
+template
+void Thread::DumpThreadOffset<PointerSize::k32>(std::ostream& os, uint32_t offset);
+template
+void Thread::DumpThreadOffset<PointerSize::k64>(std::ostream& os, uint32_t offset);
-template<size_t ptr_size>
+template<PointerSize ptr_size>
void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
#define DO_THREAD_OFFSET(x, y) \
if (offset == (x).Uint32Value()) { \
diff --git a/runtime/thread.h b/runtime/thread.h
index 9a4eb97..840b781 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -28,6 +28,7 @@
#include "arch/context.h"
#include "arch/instruction_set.h"
#include "atomic.h"
+#include "base/enums.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "entrypoints/jni/jni_entrypoints.h"
@@ -179,7 +180,7 @@
SHARED_REQUIRES(Locks::mutator_lock_);
// Translates 172 to pAllocArrayFromCode and so on.
- template<size_t size_of_pointers>
+ template<PointerSize size_of_pointers>
static void DumpThreadOffset(std::ostream& os, uint32_t offset);
// Dumps a one-line summary of thread state (used for operator<<).
@@ -532,21 +533,21 @@
// Offsets of various members of native Thread class, used by compiled code.
//
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> ThinLockIdOffset() {
return ThreadOffset<pointer_size>(
OFFSETOF_MEMBER(Thread, tls32_) +
OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> ThreadFlagsOffset() {
return ThreadOffset<pointer_size>(
OFFSETOF_MEMBER(Thread, tls32_) +
OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> IsGcMarkingOffset() {
return ThreadOffset<pointer_size>(
OFFSETOF_MEMBER(Thread, tls32_) +
@@ -557,121 +558,125 @@
void DeoptimizeWithDeoptimizationException(JValue* result) SHARED_REQUIRES(Locks::mutator_lock_);
private:
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
size_t scale;
size_t shrink;
- if (pointer_size == sizeof(void*)) {
+ if (pointer_size == kRuntimePointerSize) {
scale = 1;
shrink = 1;
- } else if (pointer_size > sizeof(void*)) {
- scale = pointer_size / sizeof(void*);
+ } else if (pointer_size > kRuntimePointerSize) {
+ scale = static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize);
shrink = 1;
} else {
- DCHECK_GT(sizeof(void*), pointer_size);
+ DCHECK_GT(kRuntimePointerSize, pointer_size);
scale = 1;
- shrink = sizeof(void*) / pointer_size;
+ shrink = static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size);
}
return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
}
public:
static uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
- size_t pointer_size) {
- DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
- if (pointer_size == 4) {
- return QuickEntryPointOffset<4>(quick_entrypoint_offset).Uint32Value();
+ PointerSize pointer_size) {
+ if (pointer_size == PointerSize::k32) {
+ return QuickEntryPointOffset<PointerSize::k32>(quick_entrypoint_offset).
+ Uint32Value();
} else {
- return QuickEntryPointOffset<8>(quick_entrypoint_offset).Uint32Value();
+ return QuickEntryPointOffset<PointerSize::k64>(quick_entrypoint_offset).
+ Uint32Value();
}
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> QuickEntryPointOffset(size_t quick_entrypoint_offset) {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> SelfOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> MterpCurrentIBaseOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_current_ibase));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> MterpDefaultIBaseOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_default_ibase));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> MterpAltIBaseOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_alt_ibase));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> ExceptionOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> PeerOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> CardTableOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> ThreadLocalPosOffset() {
- return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, thread_local_pos));
+ return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
+ thread_local_pos));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> ThreadLocalEndOffset() {
- return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, thread_local_end));
+ return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
+ thread_local_end));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
- return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, thread_local_objects));
+ return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
+ thread_local_objects));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> RosAllocRunsOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
rosalloc_runs));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
thread_local_alloc_stack_top));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
thread_local_alloc_stack_end));
@@ -713,19 +718,19 @@
return tlsPtr_.stack_end == tlsPtr_.stack_begin;
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> StackEndOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> JniEnvOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> TopOfManagedStackOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
@@ -752,7 +757,7 @@
return tlsPtr_.managed_stack.PopShadowFrame();
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> TopShadowFrameOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
@@ -794,7 +799,7 @@
return handle_scope;
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> TopHandleScopeOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
top_handle_scope));
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 1e15960..56a26de 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -21,6 +21,7 @@
#include "art_method-inl.h"
#include "base/casts.h"
+#include "base/enums.h"
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/time_utils.h"
@@ -854,7 +855,7 @@
}
std::string Trace::GetMethodLine(ArtMethod* method) {
- method = method->GetInterfaceMethodIfProxy(sizeof(void*));
+ method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
return StringPrintf("%#x\t%s\t%s\t%s\t%s\n", (EncodeTraceMethod(method) << TraceActionBits),
PrettyDescriptor(method->GetDeclaringClassDescriptor()).c_str(), method->GetName(),
method->GetSignature().ToString().c_str(), method->GetDeclaringClassSourceFile());
diff --git a/runtime/utils/dex_cache_arrays_layout-inl.h b/runtime/utils/dex_cache_arrays_layout-inl.h
index 6922564..7733a51 100644
--- a/runtime/utils/dex_cache_arrays_layout-inl.h
+++ b/runtime/utils/dex_cache_arrays_layout-inl.h
@@ -27,7 +27,7 @@
namespace art {
-inline DexCacheArraysLayout::DexCacheArraysLayout(size_t pointer_size,
+inline DexCacheArraysLayout::DexCacheArraysLayout(PointerSize pointer_size,
const DexFile::Header& header)
: pointer_size_(pointer_size),
/* types_offset_ is always 0u, so it's constexpr */
@@ -39,10 +39,9 @@
RoundUp(strings_offset_ + StringsSize(header.string_ids_size_), FieldsAlignment())),
size_(
RoundUp(fields_offset_ + FieldsSize(header.field_ids_size_), Alignment())) {
- DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
}
-inline DexCacheArraysLayout::DexCacheArraysLayout(size_t pointer_size, const DexFile* dex_file)
+inline DexCacheArraysLayout::DexCacheArraysLayout(PointerSize pointer_size, const DexFile* dex_file)
: DexCacheArraysLayout(pointer_size, dex_file->GetHeader()) {
}
@@ -50,19 +49,24 @@
// GcRoot<> alignment is 4, i.e. lower than or equal to the pointer alignment.
static_assert(alignof(GcRoot<mirror::Class>) == 4, "Expecting alignof(GcRoot<>) == 4");
static_assert(alignof(GcRoot<mirror::String>) == 4, "Expecting alignof(GcRoot<>) == 4");
- DCHECK(pointer_size_ == 4u || pointer_size_ == 8u);
// Pointer alignment is the same as pointer size.
- return pointer_size_;
+ return static_cast<size_t>(pointer_size_);
+}
+
+template <typename T>
+static constexpr PointerSize GcRootAsPointerSize() {
+ return ConvertToPointerSize(sizeof(GcRoot<T>));
}
inline size_t DexCacheArraysLayout::TypeOffset(uint32_t type_idx) const {
- return types_offset_ + ElementOffset(sizeof(GcRoot<mirror::Class>), type_idx);
+ return types_offset_ + ElementOffset(GcRootAsPointerSize<mirror::Class>(), type_idx);
}
inline size_t DexCacheArraysLayout::TypesSize(size_t num_elements) const {
// App image patching relies on having enough room for a forwarding pointer in the types array.
// See FixupArtMethodArrayVisitor and ClassLinker::AddImageSpace.
- return std::max(ArraySize(sizeof(GcRoot<mirror::Class>), num_elements), pointer_size_);
+ return std::max(ArraySize(GcRootAsPointerSize<mirror::Class>(), num_elements),
+ static_cast<size_t>(pointer_size_));
}
inline size_t DexCacheArraysLayout::TypesAlignment() const {
@@ -75,19 +79,19 @@
inline size_t DexCacheArraysLayout::MethodsSize(size_t num_elements) const {
// App image patching relies on having enough room for a forwarding pointer in the methods array.
- return std::max(ArraySize(pointer_size_, num_elements), pointer_size_);
+ return std::max(ArraySize(pointer_size_, num_elements), static_cast<size_t>(pointer_size_));
}
inline size_t DexCacheArraysLayout::MethodsAlignment() const {
- return pointer_size_;
+ return static_cast<size_t>(pointer_size_);
}
inline size_t DexCacheArraysLayout::StringOffset(uint32_t string_idx) const {
- return strings_offset_ + ElementOffset(sizeof(GcRoot<mirror::String>), string_idx);
+ return strings_offset_ + ElementOffset(GcRootAsPointerSize<mirror::String>(), string_idx);
}
inline size_t DexCacheArraysLayout::StringsSize(size_t num_elements) const {
- return ArraySize(sizeof(GcRoot<mirror::String>), num_elements);
+ return ArraySize(GcRootAsPointerSize<mirror::String>(), num_elements);
}
inline size_t DexCacheArraysLayout::StringsAlignment() const {
@@ -103,15 +107,15 @@
}
inline size_t DexCacheArraysLayout::FieldsAlignment() const {
- return pointer_size_;
+ return static_cast<size_t>(pointer_size_);
}
-inline size_t DexCacheArraysLayout::ElementOffset(size_t element_size, uint32_t idx) {
- return element_size * idx;
+inline size_t DexCacheArraysLayout::ElementOffset(PointerSize element_size, uint32_t idx) {
+ return static_cast<size_t>(element_size) * idx;
}
-inline size_t DexCacheArraysLayout::ArraySize(size_t element_size, uint32_t num_elements) {
- return element_size * num_elements;
+inline size_t DexCacheArraysLayout::ArraySize(PointerSize element_size, uint32_t num_elements) {
+ return static_cast<size_t>(element_size) * num_elements;
}
} // namespace art
diff --git a/runtime/utils/dex_cache_arrays_layout.h b/runtime/utils/dex_cache_arrays_layout.h
index cd84460..f2437fa 100644
--- a/runtime/utils/dex_cache_arrays_layout.h
+++ b/runtime/utils/dex_cache_arrays_layout.h
@@ -31,7 +31,7 @@
// Construct an invalid layout.
DexCacheArraysLayout()
: /* types_offset_ is always 0u */
- pointer_size_(0u),
+ pointer_size_(kRuntimePointerSize),
methods_offset_(0u),
strings_offset_(0u),
fields_offset_(0u),
@@ -39,10 +39,10 @@
}
// Construct a layout for a particular dex file header.
- DexCacheArraysLayout(size_t pointer_size, const DexFile::Header& header);
+ DexCacheArraysLayout(PointerSize pointer_size, const DexFile::Header& header);
// Construct a layout for a particular dex file.
- DexCacheArraysLayout(size_t pointer_size, const DexFile* dex_file);
+ DexCacheArraysLayout(PointerSize pointer_size, const DexFile* dex_file);
bool Valid() const {
return Size() != 0u;
@@ -96,17 +96,17 @@
private:
static constexpr size_t types_offset_ = 0u;
- const size_t pointer_size_; // Must be first for construction initialization order.
+ const PointerSize pointer_size_; // Must be first for construction initialization order.
const size_t methods_offset_;
const size_t strings_offset_;
const size_t fields_offset_;
const size_t size_;
- static size_t Alignment(size_t pointer_size);
+ static size_t Alignment(PointerSize pointer_size);
- static size_t ElementOffset(size_t element_size, uint32_t idx);
+ static size_t ElementOffset(PointerSize element_size, uint32_t idx);
- static size_t ArraySize(size_t element_size, uint32_t num_elements);
+ static size_t ArraySize(PointerSize element_size, uint32_t num_elements);
};
} // namespace art
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index f00edff..55b6e01 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -16,6 +16,7 @@
#include "utils.h"
+#include "base/enums.h"
#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "mirror/array.h"
@@ -187,17 +188,17 @@
ASSERT_TRUE(c != nullptr);
ArtMethod* m;
- m = c->FindVirtualMethod("charAt", "(I)C", sizeof(void*));
+ m = c->FindVirtualMethod("charAt", "(I)C", kRuntimePointerSize);
ASSERT_TRUE(m != nullptr);
EXPECT_EQ("Java_java_lang_String_charAt", JniShortName(m));
EXPECT_EQ("Java_java_lang_String_charAt__I", JniLongName(m));
- m = c->FindVirtualMethod("indexOf", "(Ljava/lang/String;I)I", sizeof(void*));
+ m = c->FindVirtualMethod("indexOf", "(Ljava/lang/String;I)I", kRuntimePointerSize);
ASSERT_TRUE(m != nullptr);
EXPECT_EQ("Java_java_lang_String_indexOf", JniShortName(m));
EXPECT_EQ("Java_java_lang_String_indexOf__Ljava_lang_String_2I", JniLongName(m));
- m = c->FindDirectMethod("copyValueOf", "([CII)Ljava/lang/String;", sizeof(void*));
+ m = c->FindDirectMethod("copyValueOf", "([CII)Ljava/lang/String;", kRuntimePointerSize);
ASSERT_TRUE(m != nullptr);
EXPECT_EQ("Java_java_lang_String_copyValueOf", JniShortName(m));
EXPECT_EQ("Java_java_lang_String_copyValueOf___3CII", JniLongName(m));
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index f2ae85a..8eeeef6 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -20,6 +20,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/mutex-inl.h"
#include "base/stl_util.h"
@@ -2839,7 +2840,7 @@
ArtMethod* called_method = VerifyInvocationArgs(inst, type, is_range);
const RegType* return_type = nullptr;
if (called_method != nullptr) {
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
mirror::Class* return_type_class = called_method->GetReturnType(can_load_classes_,
pointer_size);
if (return_type_class != nullptr) {
@@ -2882,7 +2883,7 @@
} else {
is_constructor = called_method->IsConstructor();
return_type_descriptor = called_method->GetReturnTypeDescriptor();
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
mirror::Class* return_type_class = called_method->GetReturnType(can_load_classes_,
pointer_size);
if (return_type_class != nullptr) {
@@ -4969,7 +4970,7 @@
const RegType& MethodVerifier::GetMethodReturnType() {
if (return_type_ == nullptr) {
if (mirror_method_ != nullptr) {
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
mirror::Class* return_type_class = mirror_method_->GetReturnType(can_load_classes_,
pointer_size);
if (return_type_class != nullptr) {
diff --git a/test/497-inlining-and-class-loader/clear_dex_cache.cc b/test/497-inlining-and-class-loader/clear_dex_cache.cc
index 50d1a63..1597c4a 100644
--- a/test/497-inlining-and-class-loader/clear_dex_cache.cc
+++ b/test/497-inlining-and-class-loader/clear_dex_cache.cc
@@ -15,6 +15,7 @@
*/
#include "art_method-inl.h"
+#include "base/enums.h"
#include "jni.h"
#include "scoped_thread_state_change.h"
#include "stack.h"
@@ -44,8 +45,8 @@
CHECK(array != nullptr);
mirror::PointerArray* pointer_array = soa.Decode<mirror::PointerArray*>(array);
for (size_t i = 0; i != num_methods; ++i) {
- ArtMethod* method = mirror::DexCache::GetElementPtrSize(methods, i, sizeof(void*));
- pointer_array->SetElementPtrSize(i, method, sizeof(void*));
+ ArtMethod* method = mirror::DexCache::GetElementPtrSize(methods, i, kRuntimePointerSize);
+ pointer_array->SetElementPtrSize(i, method, kRuntimePointerSize);
}
return array;
}
@@ -61,8 +62,8 @@
CHECK_EQ(methods != nullptr, old != nullptr);
CHECK_EQ(num_methods, static_cast<size_t>(old->GetLength()));
for (size_t i = 0; i != num_methods; ++i) {
- ArtMethod* method = old->GetElementPtrSize<ArtMethod*>(i, sizeof(void*));
- mirror::DexCache::SetElementPtrSize(methods, i, method, sizeof(void*));
+ ArtMethod* method = old->GetElementPtrSize<ArtMethod*>(i, kRuntimePointerSize);
+ mirror::DexCache::SetElementPtrSize(methods, i, method, kRuntimePointerSize);
}
}
diff --git a/test/566-polymorphic-inlining/polymorphic_inline.cc b/test/566-polymorphic-inlining/polymorphic_inline.cc
index 9f4c6c9..89293cc 100644
--- a/test/566-polymorphic-inlining/polymorphic_inline.cc
+++ b/test/566-polymorphic-inlining/polymorphic_inline.cc
@@ -15,6 +15,7 @@
*/
#include "art_method.h"
+#include "base/enums.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "jit/profiling_info.h"
@@ -29,7 +30,7 @@
mirror::Class* klass = soa.Decode<mirror::Class*>(cls);
jit::Jit* jit = Runtime::Current()->GetJit();
jit::JitCodeCache* code_cache = jit->GetCodeCache();
- ArtMethod* method = klass->FindDeclaredDirectMethodByName(method_name, sizeof(void*));
+ ArtMethod* method = klass->FindDeclaredDirectMethodByName(method_name, kRuntimePointerSize);
OatQuickMethodHeader* header = nullptr;
// Infinite loop... Test harness will have its own timeout.
@@ -53,7 +54,7 @@
static void allocate_profiling_info(jclass cls, const char* method_name) {
ScopedObjectAccess soa(Thread::Current());
mirror::Class* klass = soa.Decode<mirror::Class*>(cls);
- ArtMethod* method = klass->FindDeclaredDirectMethodByName(method_name, sizeof(void*));
+ ArtMethod* method = klass->FindDeclaredDirectMethodByName(method_name, kRuntimePointerSize);
ProfilingInfo::Create(soa.Self(), method, /* retry_allocation */ true);
}
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index e70a95c..806e130 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -16,6 +16,7 @@
#include "jni.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "dex_file-inl.h"
#include "jit/jit.h"
@@ -135,7 +136,7 @@
CHECK(chars.c_str() != nullptr);
mirror::Class* klass = soa.Decode<mirror::Class*>(cls);
- ArtMethod* method = klass->FindDeclaredDirectMethodByName(chars.c_str(), sizeof(void*));
+ ArtMethod* method = klass->FindDeclaredDirectMethodByName(chars.c_str(), kRuntimePointerSize);
jit::JitCodeCache* code_cache = jit->GetCodeCache();
OatQuickMethodHeader* header = nullptr;
diff --git a/tools/cpp-define-generator/offset_dexcache.def b/tools/cpp-define-generator/offset_dexcache.def
index ec4b248..3b26518 100644
--- a/tools/cpp-define-generator/offset_dexcache.def
+++ b/tools/cpp-define-generator/offset_dexcache.def
@@ -18,11 +18,12 @@
#if defined(DEFINE_INCLUDE_DEPENDENCIES)
#include "art_method.h" // art::ArtMethod
+#include "base/enums.h" // PointerSize
#endif
#define DEFINE_ART_METHOD_OFFSET(field_name, method_name) \
- DEFINE_EXPR(ART_METHOD_ ## field_name ## _OFFSET_32, int32_t, art::ArtMethod::method_name##Offset(4).Int32Value()) \
- DEFINE_EXPR(ART_METHOD_ ## field_name ## _OFFSET_64, int32_t, art::ArtMethod::method_name##Offset(8).Int32Value())
+ DEFINE_EXPR(ART_METHOD_ ## field_name ## _OFFSET_32, int32_t, art::ArtMethod::method_name##Offset(art::PointerSize::k32).Int32Value()) \
+ DEFINE_EXPR(ART_METHOD_ ## field_name ## _OFFSET_64, int32_t, art::ArtMethod::method_name##Offset(art::PointerSize::k64).Int32Value())
// New macro suffix Method Name (of the Offset method)
DEFINE_ART_METHOD_OFFSET(DEX_CACHE_METHODS, DexCacheResolvedMethods)
diff --git a/tools/cpp-define-generator/offset_thread.def b/tools/cpp-define-generator/offset_thread.def
index 71648e6..6f94d38 100644
--- a/tools/cpp-define-generator/offset_thread.def
+++ b/tools/cpp-define-generator/offset_thread.def
@@ -17,13 +17,14 @@
// Offsets within ShadowFrame.
#if defined(DEFINE_INCLUDE_DEPENDENCIES)
+#include "base/enums.h" // PointerSize
#include "stack.h" // art::ShadowFrame
#endif
#include "common.def" // DEFINE_OFFSET_EXPR
#define DEFINE_THREAD_OFFSET(field_name, method_name) \
- DEFINE_OFFSET_EXPR(Thread, field_name, int32_t, art::Thread::method_name##Offset<sizeof(void*)>().Int32Value())
+ DEFINE_OFFSET_EXPR(Thread, field_name, int32_t, art::Thread::method_name##Offset<art::kRuntimePointerSize>().Int32Value())
// New macro suffix Method Name (of the Offset method)
DEFINE_THREAD_OFFSET(FLAGS, ThreadFlags)