Merge "Fix race in CommitCodeInternal and cleanup"
diff --git a/Android.mk b/Android.mk
index 1c94629..19c65a1 100644
--- a/Android.mk
+++ b/Android.mk
@@ -98,6 +98,7 @@
 include $(art_path)/build/Android.gtest.mk
 include $(art_path)/test/Android.run-test.mk
 
+# Make sure /system is writable on the device.
 TEST_ART_ADB_ROOT_AND_REMOUNT := \
     (adb root && \
      adb wait-for-device remount && \
@@ -122,8 +123,10 @@
 	$(TEST_ART_ADB_ROOT_AND_REMOUNT)
 	adb sync system && adb sync data
 else
+# TEST_ART_ADB_ROOT_AND_REMOUNT is not needed here, as we are only
+# pushing things to the chroot dir, which is expected to be under
+# /data on the device.
 test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS)
-	$(TEST_ART_ADB_ROOT_AND_REMOUNT)
 	adb wait-for-device
 	adb push $(PRODUCT_OUT)/system $(ART_TEST_CHROOT)/
 	adb push $(PRODUCT_OUT)/data $(ART_TEST_CHROOT)/
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index a52e163..a33d537 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -59,7 +59,7 @@
   template <typename T>
   bool UsuallyEquals(const T& expected, const T& actual,
                      typename std::enable_if<
-                         detail::SupportsEqualityOperator<T>::value>::type* = 0) {
+                         detail::SupportsEqualityOperator<T>::value>::type* = nullptr) {
     return expected == actual;
   }
 
@@ -73,8 +73,8 @@
   template <typename T, typename ... Ignore>
   bool UsuallyEquals(const T& expected, const T& actual,
                      const Ignore& ... more ATTRIBUTE_UNUSED,
-                     typename std::enable_if<std::is_pod<T>::value>::type* = 0,
-                     typename std::enable_if<!detail::SupportsEqualityOperator<T>::value>::type* = 0
+                     typename std::enable_if<std::is_pod<T>::value>::type* = nullptr,
+                     typename std::enable_if<!detail::SupportsEqualityOperator<T>::value>::type* = nullptr
                      ) {
     return memcmp(std::addressof(expected), std::addressof(actual), sizeof(T)) == 0;
   }
diff --git a/cmdline/detail/cmdline_parse_argument_detail.h b/cmdline/detail/cmdline_parse_argument_detail.h
index 65c1114..d011e7f 100644
--- a/cmdline/detail/cmdline_parse_argument_detail.h
+++ b/cmdline/detail/cmdline_parse_argument_detail.h
@@ -90,7 +90,7 @@
 struct CmdlineParserArgumentInfo {
   // This version will only be used if TArg is arithmetic and thus has the <= operators.
   template <typename T = TArg>  // Necessary to get SFINAE to kick in.
-  bool CheckRange(const TArg& value, typename EnableIfNumeric<T>::type* = 0) {
+  bool CheckRange(const TArg& value, typename EnableIfNumeric<T>::type* = nullptr) {
     if (has_range_) {
       return min_ <= value && value <= max_;
     }
@@ -99,7 +99,7 @@
 
   // This version will be used at other times when TArg is not arithmetic.
   template <typename T = TArg>
-  bool CheckRange(const TArg&, typename DisableIfNumeric<T>::type* = 0) {
+  bool CheckRange(const TArg&, typename DisableIfNumeric<T>::type* = nullptr) {
     assert(!has_range_);
     return true;
   }
diff --git a/cmdline/detail/cmdline_parser_detail.h b/cmdline/detail/cmdline_parser_detail.h
index 4c26ba3..2078d7a 100644
--- a/cmdline/detail/cmdline_parser_detail.h
+++ b/cmdline/detail/cmdline_parser_detail.h
@@ -90,7 +90,7 @@
 template <typename T>
 std::string ToStringAny(const T& value,
                         typename std::enable_if<
-                            SupportsInsertionOperator<T>::value>::type* = 0) {
+                            SupportsInsertionOperator<T>::value>::type* = nullptr) {
   std::stringstream stream;
   stream << value;
   return stream.str();
@@ -99,7 +99,7 @@
 template <typename T>
 std::string ToStringAny(const std::vector<T> value,
                         typename std::enable_if<
-                            SupportsInsertionOperator<T>::value>::type* = 0) {
+                            SupportsInsertionOperator<T>::value>::type* = nullptr) {
   std::stringstream stream;
   stream << "vector{";
 
@@ -118,7 +118,7 @@
 template <typename T>
 std::string ToStringAny(const T&,
                         typename std::enable_if<
-                            !SupportsInsertionOperator<T>::value>::type* = 0
+                            !SupportsInsertionOperator<T>::value>::type* = nullptr
 ) {
   return std::string("(unknown type [no operator<< implemented] for )");
 }
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 62d547d..8cc6cf1 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -116,9 +116,6 @@
   return true;
 }
 
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wframe-larger-than="
-
 bool CompilerOptions::ParseCompilerOptions(const std::vector<std::string>& options,
                                            bool ignore_unrecognized,
                                            std::string* error_msg) {
@@ -133,8 +130,6 @@
   return ReadCompilerOptions(args, this, error_msg);
 }
 
-#pragma GCC diagnostic pop
-
 bool CompilerOptions::IsImageClass(const char* descriptor) const {
   // Historical note: We used to hold the set indirectly and there was a distinction between an
   // empty set and a null, null meaning to include all classes. However, the distiction has been
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index b0e0337..fd17364 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -81,7 +81,9 @@
     stack_maps.BeginStackMapEntry(kDexPc, native_pc_offset);
     stack_maps.EndStackMapEntry();
     stack_maps.EndMethod();
-    const size_t stack_maps_size = stack_maps.PrepareForFillIn();
+    ScopedArenaVector<uint8_t> stack_map = stack_maps.Encode();
+
+    const size_t stack_maps_size = stack_map.size();
     const size_t header_size = sizeof(OatQuickMethodHeader);
     const size_t code_alignment = GetInstructionSetAlignment(kRuntimeISA);
 
@@ -90,9 +92,8 @@
     uint8_t* code_ptr =
       AlignUp(&fake_header_code_and_maps_[stack_maps_size + header_size], code_alignment);
 
-    MemoryRegion stack_maps_region(&fake_header_code_and_maps_[0], stack_maps_size);
-    stack_maps.FillInCodeInfo(stack_maps_region);
-    OatQuickMethodHeader method_header(code_ptr - stack_maps_region.begin(), code_size);
+    memcpy(&fake_header_code_and_maps_[0], stack_map.data(), stack_maps_size);
+    OatQuickMethodHeader method_header(code_ptr - fake_header_code_and_maps_.data(), code_size);
     static_assert(std::is_trivially_copyable<OatQuickMethodHeader>::value, "Cannot use memcpy");
     memcpy(code_ptr - header_size, &method_header, header_size);
     memcpy(code_ptr, fake_code_.data(), fake_code_.size());
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index b0a05da..a13efca 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -737,17 +737,15 @@
 
 void CodeGenerator::GenerateLoadClassRuntimeCall(HLoadClass* cls) {
   DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
+  DCHECK(!cls->MustGenerateClinitCheck());
   LocationSummary* locations = cls->GetLocations();
   MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
   if (cls->NeedsAccessCheck()) {
-    CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
-    InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
-  } else if (cls->MustGenerateClinitCheck()) {
-    CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
-    InvokeRuntime(kQuickInitializeStaticStorage, cls, cls->GetDexPc());
+    CheckEntrypointTypes<kQuickResolveTypeAndVerifyAccess, void*, uint32_t>();
+    InvokeRuntime(kQuickResolveTypeAndVerifyAccess, cls, cls->GetDexPc());
   } else {
-    CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
-    InvokeRuntime(kQuickInitializeType, cls, cls->GetDexPc());
+    CheckEntrypointTypes<kQuickResolveType, void*, uint32_t>();
+    InvokeRuntime(kQuickResolveType, cls, cls->GetDexPc());
   }
 }
 
@@ -963,12 +961,6 @@
 
 CodeGenerator::~CodeGenerator() {}
 
-void CodeGenerator::ComputeStackMapSize(size_t* stack_map_size) {
-  DCHECK(stack_map_size != nullptr);
-  StackMapStream* stack_map_stream = GetStackMapStream();
-  *stack_map_size = stack_map_stream->PrepareForFillIn();
-}
-
 size_t CodeGenerator::GetNumberOfJitRoots() const {
   DCHECK(code_generation_data_ != nullptr);
   return code_generation_data_->GetNumberOfJitRoots();
@@ -1035,13 +1027,12 @@
   }
 }
 
-void CodeGenerator::BuildStackMaps(MemoryRegion stack_map_region,
-                                   const DexFile::CodeItem* code_item_for_osr_check) {
-  StackMapStream* stack_map_stream = GetStackMapStream();
-  stack_map_stream->FillInCodeInfo(stack_map_region);
-  if (kIsDebugBuild && code_item_for_osr_check != nullptr) {
-    CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map_region), *code_item_for_osr_check);
+ScopedArenaVector<uint8_t> CodeGenerator::BuildStackMaps(const DexFile::CodeItem* code_item) {
+  ScopedArenaVector<uint8_t> stack_map = GetStackMapStream()->Encode();
+  if (kIsDebugBuild && code_item != nullptr) {
+    CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map.data()), *code_item);
   }
+  return stack_map;
 }
 
 void CodeGenerator::RecordPcInfo(HInstruction* instruction,
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 3d58d29..e77d621 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -350,9 +350,7 @@
 
   void AddSlowPath(SlowPathCode* slow_path);
 
-  void BuildStackMaps(MemoryRegion stack_map_region,
-                      const DexFile::CodeItem* code_item_for_osr_check);
-  void ComputeStackMapSize(size_t* stack_map_size);
+  ScopedArenaVector<uint8_t> BuildStackMaps(const DexFile::CodeItem* code_item_for_osr_check);
   size_t GetNumberOfJitRoots() const;
 
   // Fills the `literals` array with literals collected during code generation.
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 760b1dd..723446b 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -89,15 +89,10 @@
 
 // Reference load (except object array loads) is using LDR Wt, [Xn, #offset] which can handle
 // offset < 16KiB. For offsets >= 16KiB, the load shall be emitted as two or more instructions.
-// For the Baker read barrier implementation using link-generated thunks we need to split
+// For the Baker read barrier implementation using link-time generated thunks we need to split
 // the offset explicitly.
 constexpr uint32_t kReferenceLoadMinFarOffset = 16 * KB;
 
-// Flags controlling the use of link-time generated thunks for Baker read barriers.
-constexpr bool kBakerReadBarrierLinkTimeThunksEnableForFields = true;
-constexpr bool kBakerReadBarrierLinkTimeThunksEnableForArrays = true;
-constexpr bool kBakerReadBarrierLinkTimeThunksEnableForGcRoots = true;
-
 // Some instructions have special requirements for a temporary, for example
 // LoadClass/kBssEntry and LoadString/kBssEntry for Baker read barrier require
 // temp that's not an R0 (to avoid an extra move) and Baker read barrier field
@@ -164,6 +159,16 @@
   return ARM64ReturnLocation(return_type);
 }
 
+static RegisterSet OneRegInReferenceOutSaveEverythingCallerSaves() {
+  InvokeRuntimeCallingConvention calling_convention;
+  RegisterSet caller_saves = RegisterSet::Empty();
+  caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode()));
+  DCHECK_EQ(calling_convention.GetRegisterAt(0).GetCode(),
+            RegisterFrom(calling_convention.GetReturnLocation(DataType::Type::kReference),
+                         DataType::Type::kReference).GetCode());
+  return caller_saves;
+}
+
 // NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
 #define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()->  // NOLINT
 #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, x).Int32Value()
@@ -307,35 +312,41 @@
 
 class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
  public:
-  LoadClassSlowPathARM64(HLoadClass* cls,
-                         HInstruction* at,
-                         uint32_t dex_pc,
-                         bool do_clinit)
-      : SlowPathCodeARM64(at),
-        cls_(cls),
-        dex_pc_(dex_pc),
-        do_clinit_(do_clinit) {
+  LoadClassSlowPathARM64(HLoadClass* cls, HInstruction* at)
+      : SlowPathCodeARM64(at), cls_(cls) {
     DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+    DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
   }
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
     Location out = locations->Out();
-    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
+    const uint32_t dex_pc = instruction_->GetDexPc();
+    bool must_resolve_type = instruction_->IsLoadClass() && cls_->MustResolveTypeOnSlowPath();
+    bool must_do_clinit = instruction_->IsClinitCheck() || cls_->MustGenerateClinitCheck();
 
+    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
     __ Bind(GetEntryLabel());
     SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
-    dex::TypeIndex type_index = cls_->GetTypeIndex();
-    __ Mov(calling_convention.GetRegisterAt(0).W(), type_index.index_);
-    QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
-                                                : kQuickInitializeType;
-    arm64_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
-    if (do_clinit_) {
-      CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
+    if (must_resolve_type) {
+      DCHECK(IsSameDexFile(cls_->GetDexFile(), arm64_codegen->GetGraph()->GetDexFile()));
+      dex::TypeIndex type_index = cls_->GetTypeIndex();
+      __ Mov(calling_convention.GetRegisterAt(0).W(), type_index.index_);
+      arm64_codegen->InvokeRuntime(kQuickResolveType, instruction_, dex_pc, this);
+      CheckEntrypointTypes<kQuickResolveType, void*, uint32_t>();
+      // If we also must_do_clinit, the resolved type is now in the correct register.
     } else {
-      CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
+      DCHECK(must_do_clinit);
+      Location source = instruction_->IsLoadClass() ? out : locations->InAt(0);
+      arm64_codegen->MoveLocation(LocationFrom(calling_convention.GetRegisterAt(0)),
+                                  source,
+                                  cls_->GetType());
+    }
+    if (must_do_clinit) {
+      arm64_codegen->InvokeRuntime(kQuickInitializeStaticStorage, instruction_, dex_pc, this);
+      CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, mirror::Class*>();
     }
 
     // Move the class to the desired location.
@@ -354,12 +365,6 @@
   // The class this slow path will load.
   HLoadClass* const cls_;
 
-  // The dex PC of `at_`.
-  const uint32_t dex_pc_;
-
-  // Whether to initialize the class.
-  const bool do_clinit_;
-
   DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64);
 };
 
@@ -670,50 +675,6 @@
   DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathBaseARM64);
 };
 
-// Slow path marking an object reference `ref` during a read
-// barrier. The field `obj.field` in the object `obj` holding this
-// reference does not get updated by this slow path after marking.
-//
-// This means that after the execution of this slow path, `ref` will
-// always be up-to-date, but `obj.field` may not; i.e., after the
-// flip, `ref` will be a to-space reference, but `obj.field` will
-// probably still be a from-space reference (unless it gets updated by
-// another thread, or if another thread installed another object
-// reference (different from `ref`) in `obj.field`).
-//
-// Argument `entrypoint` must be a register location holding the read
-// barrier marking runtime entry point to be invoked or an empty
-// location; in the latter case, the read barrier marking runtime
-// entry point will be loaded by the slow path code itself.
-class ReadBarrierMarkSlowPathARM64 : public ReadBarrierMarkSlowPathBaseARM64 {
- public:
-  ReadBarrierMarkSlowPathARM64(HInstruction* instruction,
-                               Location ref,
-                               Location entrypoint = Location::NoLocation())
-      : ReadBarrierMarkSlowPathBaseARM64(instruction, ref, entrypoint) {
-    DCHECK(kEmitCompilerReadBarrier);
-  }
-
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathARM64"; }
-
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
-    LocationSummary* locations = instruction_->GetLocations();
-    DCHECK(locations->CanCall());
-    DCHECK(ref_.IsRegister()) << ref_;
-    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_.reg())) << ref_.reg();
-    DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString())
-        << "Unexpected instruction in read barrier marking slow path: "
-        << instruction_->DebugName();
-
-    __ Bind(GetEntryLabel());
-    GenerateReadBarrierMarkRuntimeCall(codegen);
-    __ B(GetExitLabel());
-  }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathARM64);
-};
-
 // Slow path loading `obj`'s lock word, loading a reference from
 // object `*(obj + offset + (index << scale_factor))` into `ref`, and
 // marking `ref` if `obj` is gray according to the lock word (Baker
@@ -1403,7 +1364,9 @@
       jit_string_patches_(StringReferenceValueComparator(),
                           graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       jit_class_patches_(TypeReferenceValueComparator(),
-                         graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
+                         graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+      jit_baker_read_barrier_slow_paths_(std::less<uint32_t>(),
+                                         graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
   // Save the link register (containing the return address) to mimic Quick.
   AddAllocatedRegister(LocationFrom(lr));
 }
@@ -1418,6 +1381,16 @@
 
 void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) {
   EmitJumpTables();
+
+  // Emit JIT baker read barrier slow paths.
+  DCHECK(Runtime::Current()->UseJitCompilation() || jit_baker_read_barrier_slow_paths_.empty());
+  for (auto& entry : jit_baker_read_barrier_slow_paths_) {
+    uint32_t encoded_data = entry.first;
+    vixl::aarch64::Label* slow_path_entry = &entry.second.label;
+    __ Bind(slow_path_entry);
+    CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name */ nullptr);
+  }
+
   // Ensure we emit the literal pool.
   __ FinalizeCode();
 
@@ -2302,17 +2275,16 @@
                                                            : LocationSummary::kNoCall);
   if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
     locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
-    // We need a temporary register for the read barrier marking slow
-    // path in CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier.
-    if (kBakerReadBarrierLinkTimeThunksEnableForFields &&
-        !Runtime::Current()->UseJitCompilation() &&
-        !field_info.IsVolatile()) {
-      // If link-time thunks for the Baker read barrier are enabled, for AOT
-      // non-volatile loads we need a temporary only if the offset is too big.
+    if (!field_info.IsVolatile()) {
+      // We need a temporary register for the read barrier load in
+      // CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier()
+      // only if the offset is too big.
       if (field_info.GetFieldOffset().Uint32Value() >= kReferenceLoadMinFarOffset) {
         locations->AddTemp(FixedTempLocation());
       }
     } else {
+      // Volatile fields need a temporary register for the read barrier marking slow
+      // path in CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier().
       locations->AddTemp(Location::RequiresRegister());
     }
   }
@@ -2776,14 +2748,11 @@
                                                            : LocationSummary::kNoCall);
   if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
     locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
-    // We need a temporary register for the read barrier marking slow
-    // path in CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier.
-    if (kBakerReadBarrierLinkTimeThunksEnableForFields &&
-        !Runtime::Current()->UseJitCompilation() &&
-        instruction->GetIndex()->IsConstant()) {
+    if (instruction->GetIndex()->IsConstant()) {
       // Array loads with constant index are treated as field loads.
-      // If link-time thunks for the Baker read barrier are enabled, for AOT
-      // constant index loads we need a temporary only if the offset is too big.
+      // We need a temporary register for the read barrier load in
+      // CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier()
+      // only if the offset is too big.
       uint32_t offset = CodeGenerator::GetArrayDataOffset(instruction);
       uint32_t index = instruction->GetIndex()->AsIntConstant()->GetValue();
       offset += index << DataType::SizeShift(DataType::Type::kReference);
@@ -2791,6 +2760,8 @@
         locations->AddTemp(FixedTempLocation());
       }
     } else {
+      // We need a non-scratch temporary for the array data pointer in
+      // CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier().
       locations->AddTemp(Location::RequiresRegister());
     }
   }
@@ -2846,7 +2817,7 @@
     } else {
       Register temp = WRegisterFrom(locations->GetTemp(0));
       codegen_->GenerateArrayLoadWithBakerReadBarrier(
-          instruction, out, obj.W(), offset, index, temp, /* needs_null_check */ false);
+          out, obj.W(), offset, index, temp, /* needs_null_check */ false);
     }
   } else {
     // General case.
@@ -3178,12 +3149,14 @@
   if (check->HasUses()) {
     locations->SetOut(Location::SameAsFirstInput());
   }
+  // Rely on the type initialization to save everything we need.
+  locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
 }
 
 void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) {
   // We assume the class is not null.
-  SlowPathCodeARM64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathARM64(
-      check->GetLoadClass(), check, check->GetDexPc(), true);
+  SlowPathCodeARM64* slow_path =
+      new (codegen_->GetScopedAllocator()) LoadClassSlowPathARM64(check->GetLoadClass(), check);
   codegen_->AddSlowPath(slow_path);
   GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
 }
@@ -4734,9 +4707,18 @@
   return NewPcRelativePatch(&dex_file, string_index.index_, adrp_label, &string_bss_entry_patches_);
 }
 
-vixl::aarch64::Label* CodeGeneratorARM64::NewBakerReadBarrierPatch(uint32_t custom_data) {
-  baker_read_barrier_patches_.emplace_back(custom_data);
-  return &baker_read_barrier_patches_.back().label;
+void CodeGeneratorARM64::EmitBakerReadBarrierCbnz(uint32_t custom_data) {
+  ExactAssemblyScope guard(GetVIXLAssembler(), 1 * vixl::aarch64::kInstructionSize);
+  if (Runtime::Current()->UseJitCompilation()) {
+    auto it = jit_baker_read_barrier_slow_paths_.FindOrAdd(custom_data);
+    vixl::aarch64::Label* slow_path_entry = &it->second.label;
+    __ cbnz(mr, slow_path_entry);
+  } else {
+    baker_read_barrier_patches_.emplace_back(custom_data);
+    vixl::aarch64::Label* cbnz_label = &baker_read_barrier_patches_.back().label;
+    __ bind(cbnz_label);
+    __ cbnz(mr, static_cast<int64_t>(0));  // Placeholder, patched at link-time.
+  }
 }
 
 vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativePatch(
@@ -5053,13 +5035,7 @@
   if (cls->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
     if (!kUseReadBarrier || kUseBakerReadBarrier) {
       // Rely on the type resolution or initialization and marking to save everything we need.
-      RegisterSet caller_saves = RegisterSet::Empty();
-      InvokeRuntimeCallingConvention calling_convention;
-      caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode()));
-      DCHECK_EQ(calling_convention.GetRegisterAt(0).GetCode(),
-                RegisterFrom(calling_convention.GetReturnLocation(DataType::Type::kReference),
-                             DataType::Type::kReference).GetCode());
-      locations->SetCustomSlowPathCallerSaves(caller_saves);
+      locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
     } else {
       // For non-Baker read barrier we have a temp-clobbering call.
     }
@@ -5171,8 +5147,8 @@
   bool do_clinit = cls->MustGenerateClinitCheck();
   if (generate_null_check || do_clinit) {
     DCHECK(cls->CanCallRuntime());
-    SlowPathCodeARM64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathARM64(
-        cls, cls, cls->GetDexPc(), do_clinit);
+    SlowPathCodeARM64* slow_path =
+        new (codegen_->GetScopedAllocator()) LoadClassSlowPathARM64(cls, cls);
     codegen_->AddSlowPath(slow_path);
     if (generate_null_check) {
       __ Cbz(out, slow_path->GetEntryLabel());
@@ -5257,13 +5233,7 @@
     if (load->GetLoadKind() == HLoadString::LoadKind::kBssEntry) {
       if (!kUseReadBarrier || kUseBakerReadBarrier) {
         // Rely on the pResolveString and marking to save everything we need.
-        RegisterSet caller_saves = RegisterSet::Empty();
-        InvokeRuntimeCallingConvention calling_convention;
-        caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode()));
-        DCHECK_EQ(calling_convention.GetRegisterAt(0).GetCode(),
-                  RegisterFrom(calling_convention.GetReturnLocation(DataType::Type::kReference),
-                               DataType::Type::kReference).GetCode());
-        locations->SetCustomSlowPathCallerSaves(caller_saves);
+        locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
       } else {
         // For non-Baker read barrier we have a temp-clobbering call.
       }
@@ -6255,76 +6225,39 @@
     if (kUseBakerReadBarrier) {
       // Fast path implementation of art::ReadBarrier::BarrierForRoot when
       // Baker's read barrier are used.
-      if (kBakerReadBarrierLinkTimeThunksEnableForGcRoots &&
-          !Runtime::Current()->UseJitCompilation()) {
-        // Query `art::Thread::Current()->GetIsGcMarking()` (stored in
-        // the Marking Register) to decide whether we need to enter
-        // the slow path to mark the GC root.
-        //
-        // We use link-time generated thunks for the slow path. That thunk
-        // checks the reference and jumps to the entrypoint if needed.
-        //
-        //     lr = &return_address;
-        //     GcRoot<mirror::Object> root = *(obj+offset);  // Original reference load.
-        //     if (mr) {  // Thread::Current()->GetIsGcMarking()
-        //       goto gc_root_thunk<root_reg>(lr)
-        //     }
-        //   return_address:
 
-        UseScratchRegisterScope temps(GetVIXLAssembler());
-        DCHECK(temps.IsAvailable(ip0));
-        DCHECK(temps.IsAvailable(ip1));
-        temps.Exclude(ip0, ip1);
-        uint32_t custom_data = EncodeBakerReadBarrierGcRootData(root_reg.GetCode());
-        vixl::aarch64::Label* cbnz_label = NewBakerReadBarrierPatch(custom_data);
+      // Query `art::Thread::Current()->GetIsGcMarking()` (stored in
+      // the Marking Register) to decide whether we need to enter
+      // the slow path to mark the GC root.
+      //
+      // We use shared thunks for the slow path; shared within the method
+      // for JIT, across methods for AOT. That thunk checks the reference
+      // and jumps to the entrypoint if needed.
+      //
+      //     lr = &return_address;
+      //     GcRoot<mirror::Object> root = *(obj+offset);  // Original reference load.
+      //     if (mr) {  // Thread::Current()->GetIsGcMarking()
+      //       goto gc_root_thunk<root_reg>(lr)
+      //     }
+      //   return_address:
 
-        EmissionCheckScope guard(GetVIXLAssembler(), 3 * vixl::aarch64::kInstructionSize);
-        vixl::aarch64::Label return_address;
-        __ adr(lr, &return_address);
-        if (fixup_label != nullptr) {
-          __ Bind(fixup_label);
-        }
-        static_assert(BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_OFFSET == -8,
-                      "GC root LDR must be 2 instruction (8B) before the return address label.");
-        __ ldr(root_reg, MemOperand(obj.X(), offset));
-        __ Bind(cbnz_label);
-        __ cbnz(mr, static_cast<int64_t>(0));  // Placeholder, patched at link-time.
-        __ Bind(&return_address);
-      } else {
-        // Query `art::Thread::Current()->GetIsGcMarking()` (stored in
-        // the Marking Register) to decide whether we need to enter
-        // the slow path to mark the GC root.
-        //
-        //   GcRoot<mirror::Object> root = *(obj+offset);  // Original reference load.
-        //   if (mr) {  // Thread::Current()->GetIsGcMarking()
-        //     // Slow path.
-        //     entrypoint = Thread::Current()->pReadBarrierMarkReg ## root.reg()
-        //     root = entrypoint(root);  // root = ReadBarrier::Mark(root);  // Entry point call.
-        //   }
+      UseScratchRegisterScope temps(GetVIXLAssembler());
+      DCHECK(temps.IsAvailable(ip0));
+      DCHECK(temps.IsAvailable(ip1));
+      temps.Exclude(ip0, ip1);
+      uint32_t custom_data = EncodeBakerReadBarrierGcRootData(root_reg.GetCode());
 
-        // Slow path marking the GC root `root`. The entrypoint will
-        // be loaded by the slow path code.
-        SlowPathCodeARM64* slow_path =
-            new (GetScopedAllocator()) ReadBarrierMarkSlowPathARM64(instruction, root);
-        AddSlowPath(slow_path);
-
-        // /* GcRoot<mirror::Object> */ root = *(obj + offset)
-        if (fixup_label == nullptr) {
-          __ Ldr(root_reg, MemOperand(obj, offset));
-        } else {
-          EmitLdrOffsetPlaceholder(fixup_label, root_reg, obj);
-        }
-        static_assert(
-            sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>),
-            "art::mirror::CompressedReference<mirror::Object> and art::GcRoot<mirror::Object> "
-            "have different sizes.");
-        static_assert(sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(int32_t),
-                      "art::mirror::CompressedReference<mirror::Object> and int32_t "
-                      "have different sizes.");
-
-        __ Cbnz(mr, slow_path->GetEntryLabel());
-        __ Bind(slow_path->GetExitLabel());
+      ExactAssemblyScope guard(GetVIXLAssembler(), 3 * vixl::aarch64::kInstructionSize);
+      vixl::aarch64::Label return_address;
+      __ adr(lr, &return_address);
+      if (fixup_label != nullptr) {
+        __ bind(fixup_label);
       }
+      static_assert(BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_OFFSET == -8,
+                    "GC root LDR must be 2 instruction (8B) before the return address label.");
+      __ ldr(root_reg, MemOperand(obj.X(), offset));
+      EmitBakerReadBarrierCbnz(custom_data);
+      __ bind(&return_address);
     } else {
       // GC root loaded through a slow path for read barriers other
       // than Baker's.
@@ -6361,18 +6294,17 @@
   DCHECK(kEmitCompilerReadBarrier);
   DCHECK(kUseBakerReadBarrier);
 
-  if (kBakerReadBarrierLinkTimeThunksEnableForFields &&
-      !use_load_acquire &&
-      !Runtime::Current()->UseJitCompilation()) {
+  if (!use_load_acquire) {
     // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
     // Marking Register) to decide whether we need to enter the slow
     // path to mark the reference. Then, in the slow path, check the
     // gray bit in the lock word of the reference's holder (`obj`) to
     // decide whether to mark `ref` or not.
     //
-    // We use link-time generated thunks for the slow path. That thunk checks
-    // the holder and jumps to the entrypoint if needed. If the holder is not
-    // gray, it creates a fake dependency and returns to the LDR instruction.
+    // We use shared thunks for the slow path; shared within the method
+    // for JIT, across methods for AOT. That thunk checks the holder
+    // and jumps to the entrypoint if needed. If the holder is not gray,
+    // it creates a fake dependency and returns to the LDR instruction.
     //
     //     lr = &gray_return_address;
     //     if (mr) {  // Thread::Current()->GetIsGcMarking()
@@ -6398,15 +6330,13 @@
     DCHECK(temps.IsAvailable(ip1));
     temps.Exclude(ip0, ip1);
     uint32_t custom_data = EncodeBakerReadBarrierFieldData(base.GetCode(), obj.GetCode());
-    vixl::aarch64::Label* cbnz_label = NewBakerReadBarrierPatch(custom_data);
 
     {
-      EmissionCheckScope guard(GetVIXLAssembler(),
+      ExactAssemblyScope guard(GetVIXLAssembler(),
                                (kPoisonHeapReferences ? 4u : 3u) * vixl::aarch64::kInstructionSize);
       vixl::aarch64::Label return_address;
       __ adr(lr, &return_address);
-      __ Bind(cbnz_label);
-      __ cbnz(mr, static_cast<int64_t>(0));  // Placeholder, patched at link-time.
+      EmitBakerReadBarrierCbnz(custom_data);
       static_assert(BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4),
                     "Field LDR must be 1 instruction (4B) before the return address label; "
                     " 2 instructions (8B) for heap poisoning.");
@@ -6415,8 +6345,12 @@
       if (needs_null_check) {
         MaybeRecordImplicitNullCheck(instruction);
       }
-      GetAssembler()->MaybeUnpoisonHeapReference(ref_reg);
-      __ Bind(&return_address);
+      // Unpoison the reference explicitly if needed. MaybeUnpoisonHeapReference() uses
+      // macro instructions disallowed in ExactAssemblyScope.
+      if (kPoisonHeapReferences) {
+        __ neg(ref_reg, Operand(ref_reg));
+      }
+      __ bind(&return_address);
     }
     MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1));
     return;
@@ -6437,8 +6371,7 @@
                                             use_load_acquire);
 }
 
-void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
-                                                               Location ref,
+void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(Location ref,
                                                                Register obj,
                                                                uint32_t data_offset,
                                                                Location index,
@@ -6452,74 +6385,57 @@
       "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
   size_t scale_factor = DataType::SizeShift(DataType::Type::kReference);
 
-  if (kBakerReadBarrierLinkTimeThunksEnableForArrays &&
-      !Runtime::Current()->UseJitCompilation()) {
-    // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
-    // Marking Register) to decide whether we need to enter the slow
-    // path to mark the reference. Then, in the slow path, check the
-    // gray bit in the lock word of the reference's holder (`obj`) to
-    // decide whether to mark `ref` or not.
-    //
-    // We use link-time generated thunks for the slow path. That thunk checks
-    // the holder and jumps to the entrypoint if needed. If the holder is not
-    // gray, it creates a fake dependency and returns to the LDR instruction.
-    //
-    //     lr = &gray_return_address;
-    //     if (mr) {  // Thread::Current()->GetIsGcMarking()
-    //       goto array_thunk<base_reg>(lr)
-    //     }
-    //   not_gray_return_address:
-    //     // Original reference load. If the offset is too large to fit
-    //     // into LDR, we use an adjusted base register here.
-    //     HeapReference<mirror::Object> reference = data[index];
-    //   gray_return_address:
+  // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
+  // Marking Register) to decide whether we need to enter the slow
+  // path to mark the reference. Then, in the slow path, check the
+  // gray bit in the lock word of the reference's holder (`obj`) to
+  // decide whether to mark `ref` or not.
+  //
+  // We use shared thunks for the slow path; shared within the method
+  // for JIT, across methods for AOT. That thunk checks the holder
+  // and jumps to the entrypoint if needed. If the holder is not gray,
+  // it creates a fake dependency and returns to the LDR instruction.
+  //
+  //     lr = &gray_return_address;
+  //     if (mr) {  // Thread::Current()->GetIsGcMarking()
+  //       goto array_thunk<base_reg>(lr)
+  //     }
+  //   not_gray_return_address:
+  //     // Original reference load. If the offset is too large to fit
+  //     // into LDR, we use an adjusted base register here.
+  //     HeapReference<mirror::Object> reference = data[index];
+  //   gray_return_address:
 
-    DCHECK(index.IsValid());
-    Register index_reg = RegisterFrom(index, DataType::Type::kInt32);
-    Register ref_reg = RegisterFrom(ref, DataType::Type::kReference);
+  DCHECK(index.IsValid());
+  Register index_reg = RegisterFrom(index, DataType::Type::kInt32);
+  Register ref_reg = RegisterFrom(ref, DataType::Type::kReference);
 
-    UseScratchRegisterScope temps(GetVIXLAssembler());
-    DCHECK(temps.IsAvailable(ip0));
-    DCHECK(temps.IsAvailable(ip1));
-    temps.Exclude(ip0, ip1);
-    uint32_t custom_data = EncodeBakerReadBarrierArrayData(temp.GetCode());
-    vixl::aarch64::Label* cbnz_label = NewBakerReadBarrierPatch(custom_data);
+  UseScratchRegisterScope temps(GetVIXLAssembler());
+  DCHECK(temps.IsAvailable(ip0));
+  DCHECK(temps.IsAvailable(ip1));
+  temps.Exclude(ip0, ip1);
+  uint32_t custom_data = EncodeBakerReadBarrierArrayData(temp.GetCode());
 
-    __ Add(temp.X(), obj.X(), Operand(data_offset));
-    {
-      EmissionCheckScope guard(GetVIXLAssembler(),
-                               (kPoisonHeapReferences ? 4u : 3u) * vixl::aarch64::kInstructionSize);
-      vixl::aarch64::Label return_address;
-      __ adr(lr, &return_address);
-      __ Bind(cbnz_label);
-      __ cbnz(mr, static_cast<int64_t>(0));  // Placeholder, patched at link-time.
-      static_assert(BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4),
-                    "Array LDR must be 1 instruction (4B) before the return address label; "
-                    " 2 instructions (8B) for heap poisoning.");
-      __ ldr(ref_reg, MemOperand(temp.X(), index_reg.X(), LSL, scale_factor));
-      DCHECK(!needs_null_check);  // The thunk cannot handle the null check.
-      GetAssembler()->MaybeUnpoisonHeapReference(ref_reg);
-      __ Bind(&return_address);
+  __ Add(temp.X(), obj.X(), Operand(data_offset));
+  {
+    ExactAssemblyScope guard(GetVIXLAssembler(),
+                             (kPoisonHeapReferences ? 4u : 3u) * vixl::aarch64::kInstructionSize);
+    vixl::aarch64::Label return_address;
+    __ adr(lr, &return_address);
+    EmitBakerReadBarrierCbnz(custom_data);
+    static_assert(BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4),
+                  "Array LDR must be 1 instruction (4B) before the return address label; "
+                  " 2 instructions (8B) for heap poisoning.");
+    __ ldr(ref_reg, MemOperand(temp.X(), index_reg.X(), LSL, scale_factor));
+    DCHECK(!needs_null_check);  // The thunk cannot handle the null check.
+    // Unpoison the reference explicitly if needed. MaybeUnpoisonHeapReference() uses
+    // macro instructions disallowed in ExactAssemblyScope.
+    if (kPoisonHeapReferences) {
+      __ neg(ref_reg, Operand(ref_reg));
     }
-    MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1));
-    return;
+    __ bind(&return_address);
   }
-
-  // Array cells are never volatile variables, therefore array loads
-  // never use Load-Acquire instructions on ARM64.
-  const bool use_load_acquire = false;
-
-  // /* HeapReference<Object> */ ref =
-  //     *(obj + data_offset + index * sizeof(HeapReference<Object>))
-  GenerateReferenceLoadWithBakerReadBarrier(instruction,
-                                            ref,
-                                            obj,
-                                            data_offset,
-                                            index,
-                                            scale_factor,
-                                            temp,
-                                            needs_null_check,
-                                            use_load_acquire);
+  MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1));
 }
 
 void CodeGeneratorARM64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
@@ -6988,7 +6904,12 @@
       UNREACHABLE();
   }
 
-  if (GetCompilerOptions().GenerateAnyDebugInfo()) {
+  // For JIT, the slow path is considered part of the compiled method,
+  // so JIT should pass null as `debug_name`. Tests may not have a runtime.
+  DCHECK(Runtime::Current() == nullptr ||
+         !Runtime::Current()->UseJitCompilation() ||
+         debug_name == nullptr);
+  if (debug_name != nullptr && GetCompilerOptions().GenerateAnyDebugInfo()) {
     std::ostringstream oss;
     oss << "BakerReadBarrierThunk";
     switch (kind) {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 93bab31..5aeb0b4 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -619,9 +619,9 @@
                                                dex::StringIndex string_index,
                                                vixl::aarch64::Label* adrp_label = nullptr);
 
-  // Add a new baker read barrier patch and return the label to be bound
-  // before the CBNZ instruction.
-  vixl::aarch64::Label* NewBakerReadBarrierPatch(uint32_t custom_data);
+  // Emit the CBNZ instruction for baker read barrier and record
+  // the associated patch for AOT or slow path for JIT.
+  void EmitBakerReadBarrierCbnz(uint32_t custom_data);
 
   vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageAddressLiteral(uint64_t address);
   vixl::aarch64::Literal<uint32_t>* DeduplicateJitStringLiteral(const DexFile& dex_file,
@@ -672,8 +672,7 @@
                                              bool use_load_acquire);
   // Fast path implementation of ReadBarrier::Barrier for a heap
   // reference array load when Baker's read barriers are used.
-  void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
-                                             Location ref,
+  void GenerateArrayLoadWithBakerReadBarrier(Location ref,
                                              vixl::aarch64::Register obj,
                                              uint32_t data_offset,
                                              Location index,
@@ -928,6 +927,19 @@
   // Patches for class literals in JIT compiled code.
   TypeToLiteralMap jit_class_patches_;
 
+  // Baker read barrier slow paths, mapping custom data (uint32_t) to label.
+  // Wrap the label to work around vixl::aarch64::Label being non-copyable
+  // and non-moveable and as such unusable in ArenaSafeMap<>.
+  struct LabelWrapper {
+    LabelWrapper(const LabelWrapper& src)
+        : label() {
+      DCHECK(!src.label.IsLinked() && !src.label.IsBound());
+    }
+    LabelWrapper() = default;
+    vixl::aarch64::Label label;
+  };
+  ArenaSafeMap<uint32_t, LabelWrapper> jit_baker_read_barrier_slow_paths_;
+
   friend class linker::Arm64RelativePatcherTest;
   DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM64);
 };
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 6d6d1a2..3e63c26 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -85,15 +85,10 @@
 
 // Reference load (except object array loads) is using LDR Rt, [Rn, #offset] which can handle
 // offset < 4KiB. For offsets >= 4KiB, the load shall be emitted as two or more instructions.
-// For the Baker read barrier implementation using link-generated thunks we need to split
+// For the Baker read barrier implementation using link-time generated thunks we need to split
 // the offset explicitly.
 constexpr uint32_t kReferenceLoadMinFarOffset = 4 * KB;
 
-// Flags controlling the use of link-time generated thunks for Baker read barriers.
-constexpr bool kBakerReadBarrierLinkTimeThunksEnableForFields = true;
-constexpr bool kBakerReadBarrierLinkTimeThunksEnableForArrays = true;
-constexpr bool kBakerReadBarrierLinkTimeThunksEnableForGcRoots = true;
-
 // Using a base helps identify when we hit Marking Register check breakpoints.
 constexpr int kMarkingRegisterCheckBreakCodeBaseCode = 0x10;
 
@@ -108,14 +103,6 @@
 // Marker that code is yet to be, and must, be implemented.
 #define TODO_VIXL32(level) LOG(level) << __PRETTY_FUNCTION__ << " unimplemented "
 
-static inline void EmitPlaceholderBne(CodeGeneratorARMVIXL* codegen, vixl32::Label* patch_label) {
-  ExactAssemblyScope eas(codegen->GetVIXLAssembler(), kMaxInstructionSizeInBytes);
-  __ bind(patch_label);
-  vixl32::Label placeholder_label;
-  __ b(ne, EncodingSize(Wide), &placeholder_label);  // Placeholder, patched at link-time.
-  __ bind(&placeholder_label);
-}
-
 static inline bool CanEmitNarrowLdr(vixl32::Register rt, vixl32::Register rn, uint32_t offset) {
   return rt.IsLow() && rn.IsLow() && offset < 32u;
 }
@@ -150,6 +137,15 @@
   int32_t adr_location_;
 };
 
+static RegisterSet OneRegInReferenceOutSaveEverythingCallerSaves() {
+  InvokeRuntimeCallingConventionARMVIXL calling_convention;
+  RegisterSet caller_saves = RegisterSet::Empty();
+  caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(0)));
+  // TODO: Add GetReturnLocation() to the calling convention so that we can DCHECK()
+  // that the the kPrimNot result register is the same as the first argument register.
+  return caller_saves;
+}
+
 // SaveLiveRegisters and RestoreLiveRegisters from SlowPathCodeARM operate on sets of S registers,
 // for each live D registers they treat two corresponding S registers as live ones.
 //
@@ -509,29 +505,39 @@
 
 class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL {
  public:
-  LoadClassSlowPathARMVIXL(HLoadClass* cls, HInstruction* at, uint32_t dex_pc, bool do_clinit)
-      : SlowPathCodeARMVIXL(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+  LoadClassSlowPathARMVIXL(HLoadClass* cls, HInstruction* at)
+      : SlowPathCodeARMVIXL(at), cls_(cls) {
     DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+    DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
   }
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
     Location out = locations->Out();
+    const uint32_t dex_pc = instruction_->GetDexPc();
+    bool must_resolve_type = instruction_->IsLoadClass() && cls_->MustResolveTypeOnSlowPath();
+    bool must_do_clinit = instruction_->IsClinitCheck() || cls_->MustGenerateClinitCheck();
 
     CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
     __ Bind(GetEntryLabel());
     SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConventionARMVIXL calling_convention;
-    dex::TypeIndex type_index = cls_->GetTypeIndex();
-    __ Mov(calling_convention.GetRegisterAt(0), type_index.index_);
-    QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
-                                                : kQuickInitializeType;
-    arm_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
-    if (do_clinit_) {
-      CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
+    if (must_resolve_type) {
+      DCHECK(IsSameDexFile(cls_->GetDexFile(), arm_codegen->GetGraph()->GetDexFile()));
+      dex::TypeIndex type_index = cls_->GetTypeIndex();
+      __ Mov(calling_convention.GetRegisterAt(0), type_index.index_);
+      arm_codegen->InvokeRuntime(kQuickResolveType, instruction_, dex_pc, this);
+      CheckEntrypointTypes<kQuickResolveType, void*, uint32_t>();
+      // If we also must_do_clinit, the resolved type is now in the correct register.
     } else {
-      CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
+      DCHECK(must_do_clinit);
+      Location source = instruction_->IsLoadClass() ? out : locations->InAt(0);
+      arm_codegen->Move32(LocationFrom(calling_convention.GetRegisterAt(0)), source);
+    }
+    if (must_do_clinit) {
+      arm_codegen->InvokeRuntime(kQuickInitializeStaticStorage, instruction_, dex_pc, this);
+      CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, mirror::Class*>();
     }
 
     // Move the class to the desired location.
@@ -549,12 +555,6 @@
   // The class this slow path will load.
   HLoadClass* const cls_;
 
-  // The dex PC of `at_`.
-  const uint32_t dex_pc_;
-
-  // Whether to initialize the class.
-  const bool do_clinit_;
-
   DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARMVIXL);
 };
 
@@ -783,50 +783,6 @@
   DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathBaseARMVIXL);
 };
 
-// Slow path marking an object reference `ref` during a read
-// barrier. The field `obj.field` in the object `obj` holding this
-// reference does not get updated by this slow path after marking.
-//
-// This means that after the execution of this slow path, `ref` will
-// always be up-to-date, but `obj.field` may not; i.e., after the
-// flip, `ref` will be a to-space reference, but `obj.field` will
-// probably still be a from-space reference (unless it gets updated by
-// another thread, or if another thread installed another object
-// reference (different from `ref`) in `obj.field`).
-//
-// Argument `entrypoint` must be a register location holding the read
-// barrier marking runtime entry point to be invoked or an empty
-// location; in the latter case, the read barrier marking runtime
-// entry point will be loaded by the slow path code itself.
-class ReadBarrierMarkSlowPathARMVIXL : public ReadBarrierMarkSlowPathBaseARMVIXL {
- public:
-  ReadBarrierMarkSlowPathARMVIXL(HInstruction* instruction,
-                                 Location ref,
-                                 Location entrypoint = Location::NoLocation())
-      : ReadBarrierMarkSlowPathBaseARMVIXL(instruction, ref, entrypoint) {
-    DCHECK(kEmitCompilerReadBarrier);
-  }
-
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathARMVIXL"; }
-
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
-    LocationSummary* locations = instruction_->GetLocations();
-    DCHECK(locations->CanCall());
-    DCHECK(ref_.IsRegister()) << ref_;
-    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_.reg())) << ref_.reg();
-    DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString())
-        << "Unexpected instruction in read barrier marking slow path: "
-        << instruction_->DebugName();
-
-    __ Bind(GetEntryLabel());
-    GenerateReadBarrierMarkRuntimeCall(codegen);
-    __ B(GetExitLabel());
-  }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathARMVIXL);
-};
-
 // Slow path loading `obj`'s lock word, loading a reference from
 // object `*(obj + offset + (index << scale_factor))` into `ref`, and
 // marking `ref` if `obj` is gray according to the lock word (Baker
@@ -2352,7 +2308,9 @@
       jit_string_patches_(StringReferenceValueComparator(),
                           graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       jit_class_patches_(TypeReferenceValueComparator(),
-                         graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
+                         graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+      jit_baker_read_barrier_slow_paths_(std::less<uint32_t>(),
+                                         graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
   // Always save the LR register to mimic Quick.
   AddAllocatedRegister(Location::RegisterLocation(LR));
   // Give D30 and D31 as scratch register to VIXL. The register allocator only works on
@@ -2408,6 +2366,16 @@
 
 void CodeGeneratorARMVIXL::Finalize(CodeAllocator* allocator) {
   FixJumpTables();
+
+  // Emit JIT baker read barrier slow paths.
+  DCHECK(Runtime::Current()->UseJitCompilation() || jit_baker_read_barrier_slow_paths_.empty());
+  for (auto& entry : jit_baker_read_barrier_slow_paths_) {
+    uint32_t encoded_data = entry.first;
+    vixl::aarch32::Label* slow_path_entry = &entry.second.label;
+    __ Bind(slow_path_entry);
+    CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name */ nullptr);
+  }
+
   GetAssembler()->FinalizeCode();
   CodeGenerator::Finalize(allocator);
 
@@ -5947,16 +5915,10 @@
     locations->AddTemp(Location::RequiresRegister());
     locations->AddTemp(Location::RequiresRegister());
   } else if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
-    // We need a temporary register for the read barrier marking slow
-    // path in CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier.
-    if (kBakerReadBarrierLinkTimeThunksEnableForFields &&
-        !Runtime::Current()->UseJitCompilation()) {
-      // If link-time thunks for the Baker read barrier are enabled, for AOT
-      // loads we need a temporary only if the offset is too big.
-      if (field_info.GetFieldOffset().Uint32Value() >= kReferenceLoadMinFarOffset) {
-        locations->AddTemp(Location::RequiresRegister());
-      }
-    } else {
+    // We need a temporary register for the read barrier load in
+    // CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier()
+    // only if the offset is too big.
+    if (field_info.GetFieldOffset().Uint32Value() >= kReferenceLoadMinFarOffset) {
       locations->AddTemp(Location::RequiresRegister());
     }
   }
@@ -6371,12 +6333,11 @@
         object_array_get_with_read_barrier ? Location::kOutputOverlap : Location::kNoOutputOverlap);
   }
   if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
-    if (kBakerReadBarrierLinkTimeThunksEnableForFields &&
-        !Runtime::Current()->UseJitCompilation() &&
-        instruction->GetIndex()->IsConstant()) {
+    if (instruction->GetIndex()->IsConstant()) {
       // Array loads with constant index are treated as field loads.
-      // If link-time thunks for the Baker read barrier are enabled, for AOT
-      // constant index loads we need a temporary only if the offset is too big.
+      // We need a temporary register for the read barrier load in
+      // CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier()
+      // only if the offset is too big.
       uint32_t offset = CodeGenerator::GetArrayDataOffset(instruction);
       uint32_t index = instruction->GetIndex()->AsIntConstant()->GetValue();
       offset += index << DataType::SizeShift(DataType::Type::kReference);
@@ -6384,9 +6345,8 @@
         locations->AddTemp(Location::RequiresRegister());
       }
     } else {
-      // If using introspection, we need a non-scratch temporary for the array data pointer.
-      // Otherwise, we need a temporary register for the read barrier marking slow
-      // path in CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier.
+      // We need a non-scratch temporary for the array data pointer in
+      // CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier().
       locations->AddTemp(Location::RequiresRegister());
     }
   } else if (mirror::kUseStringCompression && instruction->IsStringCharAt()) {
@@ -6516,7 +6476,7 @@
         } else {
           Location temp = locations->GetTemp(0);
           codegen_->GenerateArrayLoadWithBakerReadBarrier(
-              instruction, out_loc, obj, data_offset, index, temp, /* needs_null_check */ false);
+              out_loc, obj, data_offset, index, temp, /* needs_null_check */ false);
         }
       } else {
         vixl32::Register out = OutputRegister(instruction);
@@ -7412,12 +7372,7 @@
   if (load_kind == HLoadClass::LoadKind::kBssEntry) {
     if (!kUseReadBarrier || kUseBakerReadBarrier) {
       // Rely on the type resolution or initialization and marking to save everything we need.
-      RegisterSet caller_saves = RegisterSet::Empty();
-      InvokeRuntimeCallingConventionARMVIXL calling_convention;
-      caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(0)));
-      // TODO: Add GetReturnLocation() to the calling convention so that we can DCHECK()
-      // that the the kPrimNot result register is the same as the first argument register.
-      locations->SetCustomSlowPathCallerSaves(caller_saves);
+      locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
     } else {
       // For non-Baker read barrier we have a temp-clobbering call.
     }
@@ -7504,8 +7459,7 @@
   if (generate_null_check || cls->MustGenerateClinitCheck()) {
     DCHECK(cls->CanCallRuntime());
     LoadClassSlowPathARMVIXL* slow_path =
-        new (codegen_->GetScopedAllocator()) LoadClassSlowPathARMVIXL(
-            cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+        new (codegen_->GetScopedAllocator()) LoadClassSlowPathARMVIXL(cls, cls);
     codegen_->AddSlowPath(slow_path);
     if (generate_null_check) {
       __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
@@ -7546,15 +7500,14 @@
   if (check->HasUses()) {
     locations->SetOut(Location::SameAsFirstInput());
   }
+  // Rely on the type initialization to save everything we need.
+  locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
 }
 
 void InstructionCodeGeneratorARMVIXL::VisitClinitCheck(HClinitCheck* check) {
   // We assume the class is not null.
   LoadClassSlowPathARMVIXL* slow_path =
-      new (codegen_->GetScopedAllocator()) LoadClassSlowPathARMVIXL(check->GetLoadClass(),
-                                                                    check,
-                                                                    check->GetDexPc(),
-                                                                    /* do_clinit */ true);
+      new (codegen_->GetScopedAllocator()) LoadClassSlowPathARMVIXL(check->GetLoadClass(), check);
   codegen_->AddSlowPath(slow_path);
   GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
 }
@@ -7668,12 +7621,7 @@
     if (load_kind == HLoadString::LoadKind::kBssEntry) {
       if (!kUseReadBarrier || kUseBakerReadBarrier) {
         // Rely on the pResolveString and marking to save everything we need, including temps.
-        RegisterSet caller_saves = RegisterSet::Empty();
-        InvokeRuntimeCallingConventionARMVIXL calling_convention;
-        caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(0)));
-        // TODO: Add GetReturnLocation() to the calling convention so that we can DCHECK()
-        // that the the kPrimNot result register is the same as the first argument register.
-        locations->SetCustomSlowPathCallerSaves(caller_saves);
+        locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
       } else {
         // For non-Baker read barrier we have a temp-clobbering call.
       }
@@ -8792,73 +8740,41 @@
     if (kUseBakerReadBarrier) {
       // Fast path implementation of art::ReadBarrier::BarrierForRoot when
       // Baker's read barrier are used.
-      if (kBakerReadBarrierLinkTimeThunksEnableForGcRoots &&
-          !Runtime::Current()->UseJitCompilation()) {
-        // Query `art::Thread::Current()->GetIsGcMarking()` (stored in
-        // the Marking Register) to decide whether we need to enter
-        // the slow path to mark the GC root.
-        //
-        // We use link-time generated thunks for the slow path. That thunk
-        // checks the reference and jumps to the entrypoint if needed.
-        //
-        //     lr = &return_address;
-        //     GcRoot<mirror::Object> root = *(obj+offset);  // Original reference load.
-        //     if (mr) {  // Thread::Current()->GetIsGcMarking()
-        //       goto gc_root_thunk<root_reg>(lr)
-        //     }
-        //   return_address:
 
-        UseScratchRegisterScope temps(GetVIXLAssembler());
-        temps.Exclude(ip);
-        bool narrow = CanEmitNarrowLdr(root_reg, obj, offset);
-        uint32_t custom_data = EncodeBakerReadBarrierGcRootData(root_reg.GetCode(), narrow);
-        vixl32::Label* bne_label = NewBakerReadBarrierPatch(custom_data);
+      // Query `art::Thread::Current()->GetIsGcMarking()` (stored in
+      // the Marking Register) to decide whether we need to enter
+      // the slow path to mark the GC root.
+      //
+      // We use shared thunks for the slow path; shared within the method
+      // for JIT, across methods for AOT. That thunk checks the reference
+      // and jumps to the entrypoint if needed.
+      //
+      //     lr = &return_address;
+      //     GcRoot<mirror::Object> root = *(obj+offset);  // Original reference load.
+      //     if (mr) {  // Thread::Current()->GetIsGcMarking()
+      //       goto gc_root_thunk<root_reg>(lr)
+      //     }
+      //   return_address:
 
-        vixl::EmissionCheckScope guard(GetVIXLAssembler(), 4 * vixl32::kMaxInstructionSizeInBytes);
-        vixl32::Label return_address;
-        EmitAdrCode adr(GetVIXLAssembler(), lr, &return_address);
-        __ cmp(mr, Operand(0));
-        // Currently the offset is always within range. If that changes,
-        // we shall have to split the load the same way as for fields.
-        DCHECK_LT(offset, kReferenceLoadMinFarOffset);
-        ptrdiff_t old_offset = GetVIXLAssembler()->GetBuffer()->GetCursorOffset();
-        __ ldr(EncodingSize(narrow ? Narrow : Wide), root_reg, MemOperand(obj, offset));
-        EmitPlaceholderBne(this, bne_label);
-        __ Bind(&return_address);
-        DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(),
-                  narrow ? BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_NARROW_OFFSET
-                         : BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_WIDE_OFFSET);
-      } else {
-        // Query `art::Thread::Current()->GetIsGcMarking()` (stored in
-        // the Marking Register) to decide whether we need to enter
-        // the slow path to mark the GC root.
-        //
-        //   GcRoot<mirror::Object> root = *(obj+offset);  // Original reference load.
-        //   if (mr) {  // Thread::Current()->GetIsGcMarking()
-        //     // Slow path.
-        //     entrypoint = Thread::Current()->pReadBarrierMarkReg ## root.reg()
-        //     root = entrypoint(root);  // root = ReadBarrier::Mark(root);  // Entry point call.
-        //   }
+      UseScratchRegisterScope temps(GetVIXLAssembler());
+      temps.Exclude(ip);
+      bool narrow = CanEmitNarrowLdr(root_reg, obj, offset);
+      uint32_t custom_data = EncodeBakerReadBarrierGcRootData(root_reg.GetCode(), narrow);
 
-        // Slow path marking the GC root `root`. The entrypoint will
-        // be loaded by the slow path code.
-        SlowPathCodeARMVIXL* slow_path =
-            new (GetScopedAllocator()) ReadBarrierMarkSlowPathARMVIXL(instruction, root);
-        AddSlowPath(slow_path);
-
-        // /* GcRoot<mirror::Object> */ root = *(obj + offset)
-        GetAssembler()->LoadFromOffset(kLoadWord, root_reg, obj, offset);
-        static_assert(
-            sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>),
-            "art::mirror::CompressedReference<mirror::Object> and art::GcRoot<mirror::Object> "
-            "have different sizes.");
-        static_assert(sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(int32_t),
-                      "art::mirror::CompressedReference<mirror::Object> and int32_t "
-                      "have different sizes.");
-
-        __ CompareAndBranchIfNonZero(mr, slow_path->GetEntryLabel());
-        __ Bind(slow_path->GetExitLabel());
-      }
+      vixl::EmissionCheckScope guard(GetVIXLAssembler(), 4 * vixl32::kMaxInstructionSizeInBytes);
+      vixl32::Label return_address;
+      EmitAdrCode adr(GetVIXLAssembler(), lr, &return_address);
+      __ cmp(mr, Operand(0));
+      // Currently the offset is always within range. If that changes,
+      // we shall have to split the load the same way as for fields.
+      DCHECK_LT(offset, kReferenceLoadMinFarOffset);
+      ptrdiff_t old_offset = GetVIXLAssembler()->GetBuffer()->GetCursorOffset();
+      __ ldr(EncodingSize(narrow ? Narrow : Wide), root_reg, MemOperand(obj, offset));
+      EmitBakerReadBarrierBne(custom_data);
+      __ Bind(&return_address);
+      DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(),
+                narrow ? BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_NARROW_OFFSET
+                       : BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_WIDE_OFFSET);
     } else {
       // GC root loaded through a slow path for read barriers other
       // than Baker's.
@@ -8886,87 +8802,76 @@
   DCHECK(kEmitCompilerReadBarrier);
   DCHECK(kUseBakerReadBarrier);
 
-  if (kBakerReadBarrierLinkTimeThunksEnableForFields &&
-      !Runtime::Current()->UseJitCompilation()) {
-    // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
-    // Marking Register) to decide whether we need to enter the slow
-    // path to mark the reference. Then, in the slow path, check the
-    // gray bit in the lock word of the reference's holder (`obj`) to
-    // decide whether to mark `ref` or not.
-    //
-    // We use link-time generated thunks for the slow path. That thunk checks
-    // the holder and jumps to the entrypoint if needed. If the holder is not
-    // gray, it creates a fake dependency and returns to the LDR instruction.
-    //
-    //     lr = &gray_return_address;
-    //     if (mr) {  // Thread::Current()->GetIsGcMarking()
-    //       goto field_thunk<holder_reg, base_reg>(lr)
-    //     }
-    //   not_gray_return_address:
-    //     // Original reference load. If the offset is too large to fit
-    //     // into LDR, we use an adjusted base register here.
-    //     HeapReference<mirror::Object> reference = *(obj+offset);
-    //   gray_return_address:
+  // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
+  // Marking Register) to decide whether we need to enter the slow
+  // path to mark the reference. Then, in the slow path, check the
+  // gray bit in the lock word of the reference's holder (`obj`) to
+  // decide whether to mark `ref` or not.
+  //
+  // We use shared thunks for the slow path; shared within the method
+  // for JIT, across methods for AOT. That thunk checks the holder
+  // and jumps to the entrypoint if needed. If the holder is not gray,
+  // it creates a fake dependency and returns to the LDR instruction.
+  //
+  //     lr = &gray_return_address;
+  //     if (mr) {  // Thread::Current()->GetIsGcMarking()
+  //       goto field_thunk<holder_reg, base_reg>(lr)
+  //     }
+  //   not_gray_return_address:
+  //     // Original reference load. If the offset is too large to fit
+  //     // into LDR, we use an adjusted base register here.
+  //     HeapReference<mirror::Object> reference = *(obj+offset);
+  //   gray_return_address:
 
-    DCHECK_ALIGNED(offset, sizeof(mirror::HeapReference<mirror::Object>));
-    vixl32::Register ref_reg = RegisterFrom(ref, DataType::Type::kReference);
-    bool narrow = CanEmitNarrowLdr(ref_reg, obj, offset);
-    vixl32::Register base = obj;
-    if (offset >= kReferenceLoadMinFarOffset) {
-      base = RegisterFrom(temp);
-      static_assert(IsPowerOfTwo(kReferenceLoadMinFarOffset), "Expecting a power of 2.");
-      __ Add(base, obj, Operand(offset & ~(kReferenceLoadMinFarOffset - 1u)));
-      offset &= (kReferenceLoadMinFarOffset - 1u);
-      // Use narrow LDR only for small offsets. Generating narrow encoding LDR for the large
-      // offsets with `(offset & (kReferenceLoadMinFarOffset - 1u)) < 32u` would most likely
-      // increase the overall code size when taking the generated thunks into account.
-      DCHECK(!narrow);
-    }
-    UseScratchRegisterScope temps(GetVIXLAssembler());
-    temps.Exclude(ip);
-    uint32_t custom_data = EncodeBakerReadBarrierFieldData(base.GetCode(), obj.GetCode(), narrow);
-    vixl32::Label* bne_label = NewBakerReadBarrierPatch(custom_data);
-
-    {
-      vixl::EmissionCheckScope guard(
-          GetVIXLAssembler(),
-          (kPoisonHeapReferences ? 5u : 4u) * vixl32::kMaxInstructionSizeInBytes);
-      vixl32::Label return_address;
-      EmitAdrCode adr(GetVIXLAssembler(), lr, &return_address);
-      __ cmp(mr, Operand(0));
-      EmitPlaceholderBne(this, bne_label);
-      ptrdiff_t old_offset = GetVIXLAssembler()->GetBuffer()->GetCursorOffset();
-      __ ldr(EncodingSize(narrow ? Narrow : Wide), ref_reg, MemOperand(base, offset));
-      if (needs_null_check) {
-        MaybeRecordImplicitNullCheck(instruction);
-      }
-      // Note: We need a specific width for the unpoisoning NEG.
-      if (kPoisonHeapReferences) {
-        if (narrow) {
-          // The only 16-bit encoding is T1 which sets flags outside IT block (i.e. RSBS, not RSB).
-          __ rsbs(EncodingSize(Narrow), ref_reg, ref_reg, Operand(0));
-        } else {
-          __ rsb(EncodingSize(Wide), ref_reg, ref_reg, Operand(0));
-        }
-      }
-      __ Bind(&return_address);
-      DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(),
-                narrow ? BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET
-                       : BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET);
-    }
-    MaybeGenerateMarkingRegisterCheck(/* code */ 20, /* temp_loc */ LocationFrom(ip));
-    return;
+  DCHECK_ALIGNED(offset, sizeof(mirror::HeapReference<mirror::Object>));
+  vixl32::Register ref_reg = RegisterFrom(ref, DataType::Type::kReference);
+  bool narrow = CanEmitNarrowLdr(ref_reg, obj, offset);
+  vixl32::Register base = obj;
+  if (offset >= kReferenceLoadMinFarOffset) {
+    base = RegisterFrom(temp);
+    static_assert(IsPowerOfTwo(kReferenceLoadMinFarOffset), "Expecting a power of 2.");
+    __ Add(base, obj, Operand(offset & ~(kReferenceLoadMinFarOffset - 1u)));
+    offset &= (kReferenceLoadMinFarOffset - 1u);
+    // Use narrow LDR only for small offsets. Generating narrow encoding LDR for the large
+    // offsets with `(offset & (kReferenceLoadMinFarOffset - 1u)) < 32u` would most likely
+    // increase the overall code size when taking the generated thunks into account.
+    DCHECK(!narrow);
   }
+  UseScratchRegisterScope temps(GetVIXLAssembler());
+  temps.Exclude(ip);
+  uint32_t custom_data = EncodeBakerReadBarrierFieldData(base.GetCode(), obj.GetCode(), narrow);
 
-  // /* HeapReference<Object> */ ref = *(obj + offset)
-  Location no_index = Location::NoLocation();
-  ScaleFactor no_scale_factor = TIMES_1;
-  GenerateReferenceLoadWithBakerReadBarrier(
-      instruction, ref, obj, offset, no_index, no_scale_factor, temp, needs_null_check);
+  {
+    vixl::EmissionCheckScope guard(
+        GetVIXLAssembler(),
+        (kPoisonHeapReferences ? 5u : 4u) * vixl32::kMaxInstructionSizeInBytes);
+    vixl32::Label return_address;
+    EmitAdrCode adr(GetVIXLAssembler(), lr, &return_address);
+    __ cmp(mr, Operand(0));
+    EmitBakerReadBarrierBne(custom_data);
+    ptrdiff_t old_offset = GetVIXLAssembler()->GetBuffer()->GetCursorOffset();
+    __ ldr(EncodingSize(narrow ? Narrow : Wide), ref_reg, MemOperand(base, offset));
+    if (needs_null_check) {
+      MaybeRecordImplicitNullCheck(instruction);
+    }
+    // Note: We need a specific width for the unpoisoning NEG.
+    if (kPoisonHeapReferences) {
+      if (narrow) {
+        // The only 16-bit encoding is T1 which sets flags outside IT block (i.e. RSBS, not RSB).
+        __ rsbs(EncodingSize(Narrow), ref_reg, ref_reg, Operand(0));
+      } else {
+        __ rsb(EncodingSize(Wide), ref_reg, ref_reg, Operand(0));
+      }
+    }
+    __ Bind(&return_address);
+    DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(),
+              narrow ? BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET
+                     : BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET);
+  }
+  MaybeGenerateMarkingRegisterCheck(/* code */ 20, /* temp_loc */ LocationFrom(ip));
 }
 
-void CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
-                                                                 Location ref,
+void CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier(Location ref,
                                                                  vixl32::Register obj,
                                                                  uint32_t data_offset,
                                                                  Location index,
@@ -8980,66 +8885,57 @@
       "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
   ScaleFactor scale_factor = TIMES_4;
 
-  if (kBakerReadBarrierLinkTimeThunksEnableForArrays &&
-      !Runtime::Current()->UseJitCompilation()) {
-    // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
-    // Marking Register) to decide whether we need to enter the slow
-    // path to mark the reference. Then, in the slow path, check the
-    // gray bit in the lock word of the reference's holder (`obj`) to
-    // decide whether to mark `ref` or not.
-    //
-    // We use link-time generated thunks for the slow path. That thunk checks
-    // the holder and jumps to the entrypoint if needed. If the holder is not
-    // gray, it creates a fake dependency and returns to the LDR instruction.
-    //
-    //     lr = &gray_return_address;
-    //     if (mr) {  // Thread::Current()->GetIsGcMarking()
-    //       goto array_thunk<base_reg>(lr)
-    //     }
-    //   not_gray_return_address:
-    //     // Original reference load. If the offset is too large to fit
-    //     // into LDR, we use an adjusted base register here.
-    //     HeapReference<mirror::Object> reference = data[index];
-    //   gray_return_address:
+  // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
+  // Marking Register) to decide whether we need to enter the slow
+  // path to mark the reference. Then, in the slow path, check the
+  // gray bit in the lock word of the reference's holder (`obj`) to
+  // decide whether to mark `ref` or not.
+  //
+  // We use shared thunks for the slow path; shared within the method
+  // for JIT, across methods for AOT. That thunk checks the holder
+  // and jumps to the entrypoint if needed. If the holder is not gray,
+  // it creates a fake dependency and returns to the LDR instruction.
+  //
+  //     lr = &gray_return_address;
+  //     if (mr) {  // Thread::Current()->GetIsGcMarking()
+  //       goto array_thunk<base_reg>(lr)
+  //     }
+  //   not_gray_return_address:
+  //     // Original reference load. If the offset is too large to fit
+  //     // into LDR, we use an adjusted base register here.
+  //     HeapReference<mirror::Object> reference = data[index];
+  //   gray_return_address:
 
-    DCHECK(index.IsValid());
-    vixl32::Register index_reg = RegisterFrom(index, DataType::Type::kInt32);
-    vixl32::Register ref_reg = RegisterFrom(ref, DataType::Type::kReference);
-    vixl32::Register data_reg = RegisterFrom(temp, DataType::Type::kInt32);  // Raw pointer.
+  DCHECK(index.IsValid());
+  vixl32::Register index_reg = RegisterFrom(index, DataType::Type::kInt32);
+  vixl32::Register ref_reg = RegisterFrom(ref, DataType::Type::kReference);
+  vixl32::Register data_reg = RegisterFrom(temp, DataType::Type::kInt32);  // Raw pointer.
 
-    UseScratchRegisterScope temps(GetVIXLAssembler());
-    temps.Exclude(ip);
-    uint32_t custom_data = EncodeBakerReadBarrierArrayData(data_reg.GetCode());
-    vixl32::Label* bne_label = NewBakerReadBarrierPatch(custom_data);
+  UseScratchRegisterScope temps(GetVIXLAssembler());
+  temps.Exclude(ip);
+  uint32_t custom_data = EncodeBakerReadBarrierArrayData(data_reg.GetCode());
 
-    __ Add(data_reg, obj, Operand(data_offset));
-    {
-      vixl::EmissionCheckScope guard(
-          GetVIXLAssembler(),
-          (kPoisonHeapReferences ? 5u : 4u) * vixl32::kMaxInstructionSizeInBytes);
-      vixl32::Label return_address;
-      EmitAdrCode adr(GetVIXLAssembler(), lr, &return_address);
-      __ cmp(mr, Operand(0));
-      EmitPlaceholderBne(this, bne_label);
-      ptrdiff_t old_offset = GetVIXLAssembler()->GetBuffer()->GetCursorOffset();
-      __ ldr(ref_reg, MemOperand(data_reg, index_reg, vixl32::LSL, scale_factor));
-      DCHECK(!needs_null_check);  // The thunk cannot handle the null check.
-      // Note: We need a Wide NEG for the unpoisoning.
-      if (kPoisonHeapReferences) {
-        __ rsb(EncodingSize(Wide), ref_reg, ref_reg, Operand(0));
-      }
-      __ Bind(&return_address);
-      DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(),
-                BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET);
+  __ Add(data_reg, obj, Operand(data_offset));
+  {
+    vixl::EmissionCheckScope guard(
+        GetVIXLAssembler(),
+        (kPoisonHeapReferences ? 5u : 4u) * vixl32::kMaxInstructionSizeInBytes);
+    vixl32::Label return_address;
+    EmitAdrCode adr(GetVIXLAssembler(), lr, &return_address);
+    __ cmp(mr, Operand(0));
+    EmitBakerReadBarrierBne(custom_data);
+    ptrdiff_t old_offset = GetVIXLAssembler()->GetBuffer()->GetCursorOffset();
+    __ ldr(ref_reg, MemOperand(data_reg, index_reg, vixl32::LSL, scale_factor));
+    DCHECK(!needs_null_check);  // The thunk cannot handle the null check.
+    // Note: We need a Wide NEG for the unpoisoning.
+    if (kPoisonHeapReferences) {
+      __ rsb(EncodingSize(Wide), ref_reg, ref_reg, Operand(0));
     }
-    MaybeGenerateMarkingRegisterCheck(/* code */ 21, /* temp_loc */ LocationFrom(ip));
-    return;
+    __ Bind(&return_address);
+    DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(),
+              BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET);
   }
-
-  // /* HeapReference<Object> */ ref =
-  //     *(obj + data_offset + index * sizeof(HeapReference<Object>))
-  GenerateReferenceLoadWithBakerReadBarrier(
-      instruction, ref, obj, data_offset, index, scale_factor, temp, needs_null_check);
+  MaybeGenerateMarkingRegisterCheck(/* code */ 21, /* temp_loc */ LocationFrom(ip));
 }
 
 void CodeGeneratorARMVIXL::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
@@ -9491,9 +9387,20 @@
   return &patches->back();
 }
 
-vixl32::Label* CodeGeneratorARMVIXL::NewBakerReadBarrierPatch(uint32_t custom_data) {
-  baker_read_barrier_patches_.emplace_back(custom_data);
-  return &baker_read_barrier_patches_.back().label;
+void CodeGeneratorARMVIXL::EmitBakerReadBarrierBne(uint32_t custom_data) {
+  ExactAssemblyScope eas(GetVIXLAssembler(), 1 * k32BitT32InstructionSizeInBytes);
+  if (Runtime::Current()->UseJitCompilation()) {
+    auto it = jit_baker_read_barrier_slow_paths_.FindOrAdd(custom_data);
+    vixl::aarch32::Label* slow_path_entry = &it->second.label;
+    __ b(ne, EncodingSize(Wide), slow_path_entry);
+  } else {
+    baker_read_barrier_patches_.emplace_back(custom_data);
+    vixl::aarch32::Label* patch_label = &baker_read_barrier_patches_.back().label;
+    __ bind(patch_label);
+    vixl32::Label placeholder_label;
+    __ b(ne, EncodingSize(Wide), &placeholder_label);  // Placeholder, patched at link-time.
+    __ bind(&placeholder_label);
+  }
 }
 
 VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateBootImageAddressLiteral(uint32_t address) {
@@ -10085,7 +9992,12 @@
       UNREACHABLE();
   }
 
-  if (GetCompilerOptions().GenerateAnyDebugInfo()) {
+  // For JIT, the slow path is considered part of the compiled method,
+  // so JIT should pass null as `debug_name`. Tests may not have a runtime.
+  DCHECK(Runtime::Current() == nullptr ||
+         !Runtime::Current()->UseJitCompilation() ||
+         debug_name == nullptr);
+  if (debug_name != nullptr && GetCompilerOptions().GenerateAnyDebugInfo()) {
     std::ostringstream oss;
     oss << "BakerReadBarrierThunk";
     switch (kind) {
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index fc8cf98..0106236 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -589,9 +589,9 @@
   PcRelativePatchInfo* NewStringBssEntryPatch(const DexFile& dex_file,
                                               dex::StringIndex string_index);
 
-  // Add a new baker read barrier patch and return the label to be bound
-  // before the BNE instruction.
-  vixl::aarch32::Label* NewBakerReadBarrierPatch(uint32_t custom_data);
+  // Emit the BNE instruction for baker read barrier and record
+  // the associated patch for AOT or slow path for JIT.
+  void EmitBakerReadBarrierBne(uint32_t custom_data);
 
   VIXLUInt32Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
   VIXLUInt32Literal* DeduplicateJitStringLiteral(const DexFile& dex_file,
@@ -632,8 +632,7 @@
                                              bool needs_null_check);
   // Fast path implementation of ReadBarrier::Barrier for a heap
   // reference array load when Baker's read barriers are used.
-  void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
-                                             Location ref,
+  void GenerateArrayLoadWithBakerReadBarrier(Location ref,
                                              vixl::aarch32::Register obj,
                                              uint32_t data_offset,
                                              Location index,
@@ -916,6 +915,19 @@
   // Patches for class literals in JIT compiled code.
   TypeToLiteralMap jit_class_patches_;
 
+  // Baker read barrier slow paths, mapping custom data (uint32_t) to label.
+  // Wrap the label to work around vixl::aarch32::Label being non-copyable
+  // and non-moveable and as such unusable in ArenaSafeMap<>.
+  struct LabelWrapper {
+    LabelWrapper(const LabelWrapper& src)
+        : label() {
+      DCHECK(!src.label.IsReferenced() && !src.label.IsBound());
+    }
+    LabelWrapper() = default;
+    vixl::aarch32::Label label;
+  };
+  ArenaSafeMap<uint32_t, LabelWrapper> jit_baker_read_barrier_slow_paths_;
+
   friend class linker::Thumb2RelativePatcherTest;
   DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARMVIXL);
 };
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 4aed2c0..0ed5756 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -160,6 +160,14 @@
   return MipsReturnLocation(type);
 }
 
+static RegisterSet OneRegInReferenceOutSaveEverythingCallerSaves() {
+  InvokeRuntimeCallingConvention calling_convention;
+  RegisterSet caller_saves = RegisterSet::Empty();
+  caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+  // The reference is returned in the same register. This differs from the standard return location.
+  return caller_saves;
+}
+
 // NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
 #define __ down_cast<CodeGeneratorMIPS*>(codegen)->GetAssembler()->  // NOLINT
 #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, x).Int32Value()
@@ -222,35 +230,41 @@
 
 class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
  public:
-  LoadClassSlowPathMIPS(HLoadClass* cls,
-                        HInstruction* at,
-                        uint32_t dex_pc,
-                        bool do_clinit)
-      : SlowPathCodeMIPS(at),
-        cls_(cls),
-        dex_pc_(dex_pc),
-        do_clinit_(do_clinit) {
+  LoadClassSlowPathMIPS(HLoadClass* cls, HInstruction* at)
+      : SlowPathCodeMIPS(at), cls_(cls) {
     DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+    DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
   }
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
     Location out = locations->Out();
+    const uint32_t dex_pc = instruction_->GetDexPc();
+    bool must_resolve_type = instruction_->IsLoadClass() && cls_->MustResolveTypeOnSlowPath();
+    bool must_do_clinit = instruction_->IsClinitCheck() || cls_->MustGenerateClinitCheck();
+
     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
-    InvokeRuntimeCallingConvention calling_convention;
-    DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
     __ Bind(GetEntryLabel());
     SaveLiveRegisters(codegen, locations);
 
-    dex::TypeIndex type_index = cls_->GetTypeIndex();
-    __ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
-    QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
-                                                : kQuickInitializeType;
-    mips_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
-    if (do_clinit_) {
-      CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
+    InvokeRuntimeCallingConvention calling_convention;
+    if (must_resolve_type) {
+      DCHECK(IsSameDexFile(cls_->GetDexFile(), mips_codegen->GetGraph()->GetDexFile()));
+      dex::TypeIndex type_index = cls_->GetTypeIndex();
+      __ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
+      mips_codegen->InvokeRuntime(kQuickResolveType, instruction_, dex_pc, this);
+      CheckEntrypointTypes<kQuickResolveType, void*, uint32_t>();
+      // If we also must_do_clinit, the resolved type is now in the correct register.
     } else {
-      CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
+      DCHECK(must_do_clinit);
+      Location source = instruction_->IsLoadClass() ? out : locations->InAt(0);
+      mips_codegen->MoveLocation(Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+                                 source,
+                                 cls_->GetType());
+    }
+    if (must_do_clinit) {
+      mips_codegen->InvokeRuntime(kQuickInitializeStaticStorage, instruction_, dex_pc, this);
+      CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, mirror::Class*>();
     }
 
     // Move the class to the desired location.
@@ -272,12 +286,6 @@
   // The class this slow path will load.
   HLoadClass* const cls_;
 
-  // The dex PC of `at_`.
-  const uint32_t dex_pc_;
-
-  // Whether to initialize the class.
-  const bool do_clinit_;
-
   DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS);
 };
 
@@ -3594,15 +3602,14 @@
   if (check->HasUses()) {
     locations->SetOut(Location::SameAsFirstInput());
   }
+  // Rely on the type initialization to save everything we need.
+  locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
 }
 
 void InstructionCodeGeneratorMIPS::VisitClinitCheck(HClinitCheck* check) {
   // We assume the class is not null.
-  SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS(
-      check->GetLoadClass(),
-      check,
-      check->GetDexPc(),
-      true);
+  SlowPathCodeMIPS* slow_path =
+      new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS(check->GetLoadClass(), check);
   codegen_->AddSlowPath(slow_path);
   GenerateClassInitializationCheck(slow_path,
                                    check->GetLocations()->InAt(0).AsRegister<Register>());
@@ -8137,10 +8144,7 @@
   if (load_kind == HLoadClass::LoadKind::kBssEntry) {
     if (!kUseReadBarrier || kUseBakerReadBarrier) {
       // Rely on the type resolution or initialization and marking to save everything we need.
-      RegisterSet caller_saves = RegisterSet::Empty();
-      InvokeRuntimeCallingConvention calling_convention;
-      caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-      locations->SetCustomSlowPathCallerSaves(caller_saves);
+      locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
     } else {
       // For non-Baker read barriers we have a temp-clobbering call.
     }
@@ -8277,8 +8281,8 @@
 
   if (generate_null_check || cls->MustGenerateClinitCheck()) {
     DCHECK(cls->CanCallRuntime());
-    SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS(
-        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+    SlowPathCodeMIPS* slow_path =
+        new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS(cls, cls);
     codegen_->AddSlowPath(slow_path);
     if (generate_null_check) {
       __ Beqz(out, slow_path->GetEntryLabel());
@@ -8371,10 +8375,7 @@
     if (load_kind == HLoadString::LoadKind::kBssEntry) {
       if (!kUseReadBarrier || kUseBakerReadBarrier) {
         // Rely on the pResolveString and marking to save everything we need.
-        RegisterSet caller_saves = RegisterSet::Empty();
-        InvokeRuntimeCallingConvention calling_convention;
-        caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-        locations->SetCustomSlowPathCallerSaves(caller_saves);
+        locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
       } else {
         // For non-Baker read barriers we have a temp-clobbering call.
       }
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 7516913..2b6928e 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -112,6 +112,14 @@
   return Mips64ReturnLocation(type);
 }
 
+static RegisterSet OneRegInReferenceOutSaveEverythingCallerSaves() {
+  InvokeRuntimeCallingConvention calling_convention;
+  RegisterSet caller_saves = RegisterSet::Empty();
+  caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+  // The reference is returned in the same register. This differs from the standard return location.
+  return caller_saves;
+}
+
 // NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
 #define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()->  // NOLINT
 #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, x).Int32Value()
@@ -175,35 +183,41 @@
 
 class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
  public:
-  LoadClassSlowPathMIPS64(HLoadClass* cls,
-                          HInstruction* at,
-                          uint32_t dex_pc,
-                          bool do_clinit)
-      : SlowPathCodeMIPS64(at),
-        cls_(cls),
-        dex_pc_(dex_pc),
-        do_clinit_(do_clinit) {
+  LoadClassSlowPathMIPS64(HLoadClass* cls, HInstruction* at)
+      : SlowPathCodeMIPS64(at), cls_(cls) {
     DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+    DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
   }
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
     Location out = locations->Out();
+    const uint32_t dex_pc = instruction_->GetDexPc();
+    bool must_resolve_type = instruction_->IsLoadClass() && cls_->MustResolveTypeOnSlowPath();
+    bool must_do_clinit = instruction_->IsClinitCheck() || cls_->MustGenerateClinitCheck();
+
     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
-    InvokeRuntimeCallingConvention calling_convention;
-    DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
     __ Bind(GetEntryLabel());
     SaveLiveRegisters(codegen, locations);
 
-    dex::TypeIndex type_index = cls_->GetTypeIndex();
-    __ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
-    QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
-                                                : kQuickInitializeType;
-    mips64_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
-    if (do_clinit_) {
-      CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
+    InvokeRuntimeCallingConvention calling_convention;
+    if (must_resolve_type) {
+      DCHECK(IsSameDexFile(cls_->GetDexFile(), mips64_codegen->GetGraph()->GetDexFile()));
+      dex::TypeIndex type_index = cls_->GetTypeIndex();
+      __ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
+      mips64_codegen->InvokeRuntime(kQuickResolveType, instruction_, dex_pc, this);
+      CheckEntrypointTypes<kQuickResolveType, void*, uint32_t>();
+      // If we also must_do_clinit, the resolved type is now in the correct register.
     } else {
-      CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
+      DCHECK(must_do_clinit);
+      Location source = instruction_->IsLoadClass() ? out : locations->InAt(0);
+      mips64_codegen->MoveLocation(Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+                                   source,
+                                   cls_->GetType());
+    }
+    if (must_do_clinit) {
+      mips64_codegen->InvokeRuntime(kQuickInitializeStaticStorage, instruction_, dex_pc, this);
+      CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, mirror::Class*>();
     }
 
     // Move the class to the desired location.
@@ -225,12 +239,6 @@
   // The class this slow path will load.
   HLoadClass* const cls_;
 
-  // The dex PC of `at_`.
-  const uint32_t dex_pc_;
-
-  // Whether to initialize the class.
-  const bool do_clinit_;
-
   DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS64);
 };
 
@@ -3149,15 +3157,14 @@
   if (check->HasUses()) {
     locations->SetOut(Location::SameAsFirstInput());
   }
+  // Rely on the type initialization to save everything we need.
+  locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
 }
 
 void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) {
   // We assume the class is not null.
-  SlowPathCodeMIPS64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS64(
-      check->GetLoadClass(),
-      check,
-      check->GetDexPc(),
-      true);
+  SlowPathCodeMIPS64* slow_path =
+      new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS64(check->GetLoadClass(), check);
   codegen_->AddSlowPath(slow_path);
   GenerateClassInitializationCheck(slow_path,
                                    check->GetLocations()->InAt(0).AsRegister<GpuRegister>());
@@ -6209,10 +6216,7 @@
   if (load_kind == HLoadClass::LoadKind::kBssEntry) {
     if (!kUseReadBarrier || kUseBakerReadBarrier) {
       // Rely on the type resolution or initialization and marking to save everything we need.
-      RegisterSet caller_saves = RegisterSet::Empty();
-      InvokeRuntimeCallingConvention calling_convention;
-      caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-      locations->SetCustomSlowPathCallerSaves(caller_saves);
+      locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
     } else {
       // For non-Baker read barriers we have a temp-clobbering call.
     }
@@ -6315,8 +6319,8 @@
 
   if (generate_null_check || cls->MustGenerateClinitCheck()) {
     DCHECK(cls->CanCallRuntime());
-    SlowPathCodeMIPS64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS64(
-        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+    SlowPathCodeMIPS64* slow_path =
+        new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS64(cls, cls);
     codegen_->AddSlowPath(slow_path);
     if (generate_null_check) {
       __ Beqzc(out, slow_path->GetEntryLabel());
@@ -6384,10 +6388,7 @@
     if (load_kind == HLoadString::LoadKind::kBssEntry) {
       if (!kUseReadBarrier || kUseBakerReadBarrier) {
         // Rely on the pResolveString and marking to save everything we need.
-        RegisterSet caller_saves = RegisterSet::Empty();
-        InvokeRuntimeCallingConvention calling_convention;
-        caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-        locations->SetCustomSlowPathCallerSaves(caller_saves);
+        locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
       } else {
         // For non-Baker read barriers we have a temp-clobbering call.
       }
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 30436ee..a835aed 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -55,6 +55,15 @@
 static constexpr int64_t kDoubleNaN = INT64_C(0x7FF8000000000000);
 static constexpr int32_t kFloatNaN = INT32_C(0x7FC00000);
 
+static RegisterSet OneRegInReferenceOutSaveEverythingCallerSaves() {
+  InvokeRuntimeCallingConvention calling_convention;
+  RegisterSet caller_saves = RegisterSet::Empty();
+  caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+  // TODO: Add GetReturnLocation() to the calling convention so that we can DCHECK()
+  // that the the kPrimNot result register is the same as the first argument register.
+  return caller_saves;
+}
+
 // NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
 #define __ down_cast<X86Assembler*>(codegen->GetAssembler())->  // NOLINT
 #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, x).Int32Value()
@@ -255,36 +264,42 @@
 
 class LoadClassSlowPathX86 : public SlowPathCode {
  public:
-  LoadClassSlowPathX86(HLoadClass* cls,
-                       HInstruction* at,
-                       uint32_t dex_pc,
-                       bool do_clinit)
-      : SlowPathCode(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+  LoadClassSlowPathX86(HLoadClass* cls, HInstruction* at)
+      : SlowPathCode(at), cls_(cls) {
     DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+    DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
   }
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
+    Location out = locations->Out();
+    const uint32_t dex_pc = instruction_->GetDexPc();
+    bool must_resolve_type = instruction_->IsLoadClass() && cls_->MustResolveTypeOnSlowPath();
+    bool must_do_clinit = instruction_->IsClinitCheck() || cls_->MustGenerateClinitCheck();
+
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
     SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
-    dex::TypeIndex type_index = cls_->GetTypeIndex();
-    __ movl(calling_convention.GetRegisterAt(0), Immediate(type_index.index_));
-    x86_codegen->InvokeRuntime(do_clinit_ ? kQuickInitializeStaticStorage
-                                          : kQuickInitializeType,
-                               instruction_,
-                               dex_pc_,
-                               this);
-    if (do_clinit_) {
-      CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
+    if (must_resolve_type) {
+      DCHECK(IsSameDexFile(cls_->GetDexFile(), x86_codegen->GetGraph()->GetDexFile()));
+      dex::TypeIndex type_index = cls_->GetTypeIndex();
+      __ movl(calling_convention.GetRegisterAt(0), Immediate(type_index.index_));
+      x86_codegen->InvokeRuntime(kQuickResolveType, instruction_, dex_pc, this);
+      CheckEntrypointTypes<kQuickResolveType, void*, uint32_t>();
+      // If we also must_do_clinit, the resolved type is now in the correct register.
     } else {
-      CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
+      DCHECK(must_do_clinit);
+      Location source = instruction_->IsLoadClass() ? out : locations->InAt(0);
+      x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), source);
+    }
+    if (must_do_clinit) {
+      x86_codegen->InvokeRuntime(kQuickInitializeStaticStorage, instruction_, dex_pc, this);
+      CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, mirror::Class*>();
     }
 
     // Move the class to the desired location.
-    Location out = locations->Out();
     if (out.IsValid()) {
       DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
       x86_codegen->Move32(out, Location::RegisterLocation(EAX));
@@ -299,12 +314,6 @@
   // The class this slow path will load.
   HLoadClass* const cls_;
 
-  // The dex PC of `at_`.
-  const uint32_t dex_pc_;
-
-  // Whether to initialize the class.
-  const bool do_clinit_;
-
   DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathX86);
 };
 
@@ -6489,10 +6498,7 @@
   if (load_kind == HLoadClass::LoadKind::kBssEntry) {
     if (!kUseReadBarrier || kUseBakerReadBarrier) {
       // Rely on the type resolution and/or initialization to save everything.
-      RegisterSet caller_saves = RegisterSet::Empty();
-      InvokeRuntimeCallingConvention calling_convention;
-      caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-      locations->SetCustomSlowPathCallerSaves(caller_saves);
+      locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
     } else {
       // For non-Baker read barrier we have a temp-clobbering call.
     }
@@ -6588,8 +6594,7 @@
 
   if (generate_null_check || cls->MustGenerateClinitCheck()) {
     DCHECK(cls->CanCallRuntime());
-    SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86(
-        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+    SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86(cls, cls);
     codegen_->AddSlowPath(slow_path);
 
     if (generate_null_check) {
@@ -6632,12 +6637,14 @@
   if (check->HasUses()) {
     locations->SetOut(Location::SameAsFirstInput());
   }
+  // Rely on the type initialization to save everything we need.
+  locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
 }
 
 void InstructionCodeGeneratorX86::VisitClinitCheck(HClinitCheck* check) {
   // We assume the class to not be null.
-  SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86(
-      check->GetLoadClass(), check, check->GetDexPc(), true);
+  SlowPathCode* slow_path =
+      new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86(check->GetLoadClass(), check);
   codegen_->AddSlowPath(slow_path);
   GenerateClassInitializationCheck(slow_path,
                                    check->GetLocations()->InAt(0).AsRegister<Register>());
@@ -6711,10 +6718,7 @@
     if (load_kind == HLoadString::LoadKind::kBssEntry) {
       if (!kUseReadBarrier || kUseBakerReadBarrier) {
         // Rely on the pResolveString to save everything.
-        RegisterSet caller_saves = RegisterSet::Empty();
-        InvokeRuntimeCallingConvention calling_convention;
-        caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-        locations->SetCustomSlowPathCallerSaves(caller_saves);
+        locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
       } else {
         // For non-Baker read barrier we have a temp-clobbering call.
       }
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 0d7837e..dee891b 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -56,6 +56,13 @@
 
 static constexpr int kC2ConditionMask = 0x400;
 
+static RegisterSet OneRegInReferenceOutSaveEverythingCallerSaves() {
+  // Custom calling convention: RAX serves as both input and output.
+  RegisterSet caller_saves = RegisterSet::Empty();
+  caller_saves.Add(Location::RegisterLocation(RAX));
+  return caller_saves;
+}
+
 // NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
 #define __ down_cast<X86_64Assembler*>(codegen->GetAssembler())->  // NOLINT
 #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, x).Int32Value()
@@ -239,34 +246,41 @@
 
 class LoadClassSlowPathX86_64 : public SlowPathCode {
  public:
-  LoadClassSlowPathX86_64(HLoadClass* cls,
-                          HInstruction* at,
-                          uint32_t dex_pc,
-                          bool do_clinit)
-      : SlowPathCode(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+  LoadClassSlowPathX86_64(HLoadClass* cls, HInstruction* at)
+      : SlowPathCode(at), cls_(cls) {
     DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+    DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
   }
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
+    Location out = locations->Out();
+    const uint32_t dex_pc = instruction_->GetDexPc();
+    bool must_resolve_type = instruction_->IsLoadClass() && cls_->MustResolveTypeOnSlowPath();
+    bool must_do_clinit = instruction_->IsClinitCheck() || cls_->MustGenerateClinitCheck();
+
     CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
-
     SaveLiveRegisters(codegen, locations);
 
     // Custom calling convention: RAX serves as both input and output.
-    __ movl(CpuRegister(RAX), Immediate(cls_->GetTypeIndex().index_));
-    x86_64_codegen->InvokeRuntime(do_clinit_ ? kQuickInitializeStaticStorage : kQuickInitializeType,
-                                  instruction_,
-                                  dex_pc_,
-                                  this);
-    if (do_clinit_) {
-      CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
+    if (must_resolve_type) {
+      DCHECK(IsSameDexFile(cls_->GetDexFile(), x86_64_codegen->GetGraph()->GetDexFile()));
+      dex::TypeIndex type_index = cls_->GetTypeIndex();
+      __ movl(CpuRegister(RAX), Immediate(type_index.index_));
+      x86_64_codegen->InvokeRuntime(kQuickResolveType, instruction_, dex_pc, this);
+      CheckEntrypointTypes<kQuickResolveType, void*, uint32_t>();
+      // If we also must_do_clinit, the resolved type is now in the correct register.
     } else {
-      CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
+      DCHECK(must_do_clinit);
+      Location source = instruction_->IsLoadClass() ? out : locations->InAt(0);
+      x86_64_codegen->Move(Location::RegisterLocation(RAX), source);
+    }
+    if (must_do_clinit) {
+      x86_64_codegen->InvokeRuntime(kQuickInitializeStaticStorage, instruction_, dex_pc, this);
+      CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, mirror::Class*>();
     }
 
-    Location out = locations->Out();
     // Move the class to the desired location.
     if (out.IsValid()) {
       DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
@@ -283,12 +297,6 @@
   // The class this slow path will load.
   HLoadClass* const cls_;
 
-  // The dex PC of `at_`.
-  const uint32_t dex_pc_;
-
-  // Whether to initialize the class.
-  const bool do_clinit_;
-
   DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathX86_64);
 };
 
@@ -5831,10 +5839,7 @@
   if (load_kind == HLoadClass::LoadKind::kBssEntry) {
     if (!kUseReadBarrier || kUseBakerReadBarrier) {
       // Rely on the type resolution and/or initialization to save everything.
-      // Custom calling convention: RAX serves as both input and output.
-      RegisterSet caller_saves = RegisterSet::Empty();
-      caller_saves.Add(Location::RegisterLocation(RAX));
-      locations->SetCustomSlowPathCallerSaves(caller_saves);
+      locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
     } else {
       // For non-Baker read barrier we have a temp-clobbering call.
     }
@@ -5927,8 +5932,8 @@
 
   if (generate_null_check || cls->MustGenerateClinitCheck()) {
     DCHECK(cls->CanCallRuntime());
-    SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86_64(
-        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+    SlowPathCode* slow_path =
+        new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86_64(cls, cls);
     codegen_->AddSlowPath(slow_path);
     if (generate_null_check) {
       __ testl(out, out);
@@ -5949,6 +5954,8 @@
   if (check->HasUses()) {
     locations->SetOut(Location::SameAsFirstInput());
   }
+  // Rely on the type initialization to save everything we need.
+  locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
 }
 
 void LocationsBuilderX86_64::VisitLoadMethodHandle(HLoadMethodHandle* load) {
@@ -5973,8 +5980,8 @@
 
 void InstructionCodeGeneratorX86_64::VisitClinitCheck(HClinitCheck* check) {
   // We assume the class to not be null.
-  SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86_64(
-      check->GetLoadClass(), check, check->GetDexPc(), true);
+  SlowPathCode* slow_path =
+      new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86_64(check->GetLoadClass(), check);
   codegen_->AddSlowPath(slow_path);
   GenerateClassInitializationCheck(slow_path,
                                    check->GetLocations()->InAt(0).AsRegister<CpuRegister>());
@@ -6008,10 +6015,7 @@
     if (load->GetLoadKind() == HLoadString::LoadKind::kBssEntry) {
       if (!kUseReadBarrier || kUseBakerReadBarrier) {
         // Rely on the pResolveString to save everything.
-        // Custom calling convention: RAX serves as both input and output.
-        RegisterSet caller_saves = RegisterSet::Empty();
-        caller_saves.Add(Location::RegisterLocation(RAX));
-        locations->SetCustomSlowPathCallerSaves(caller_saves);
+        locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
       } else {
         // For non-Baker read barrier we have a temp-clobbering call.
       }
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 8f822cc..79a7e2c 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1301,6 +1301,15 @@
     ++it;
     if (dominator->StrictlyDominates(user)) {
       user->ReplaceInput(replacement, index);
+    } else if (user->IsPhi() && !user->AsPhi()->IsCatchPhi()) {
+      // If the input flows from a block dominated by `dominator`, we can replace it.
+      // We do not perform this for catch phis as we don't have control flow support
+      // for their inputs.
+      const ArenaVector<HBasicBlock*>& predecessors = user->GetBlock()->GetPredecessors();
+      HBasicBlock* predecessor = predecessors[index];
+      if (dominator->GetBlock()->Dominates(predecessor)) {
+        user->ReplaceInput(replacement, index);
+      }
     }
   }
 }
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 16a7417..8b9e1da 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -6284,6 +6284,13 @@
   bool IsInBootImage() const { return GetPackedFlag<kFlagIsInBootImage>(); }
   bool MustGenerateClinitCheck() const { return GetPackedFlag<kFlagGenerateClInitCheck>(); }
 
+  bool MustResolveTypeOnSlowPath() const {
+    // Check that this instruction has a slow path.
+    DCHECK(GetLoadKind() != LoadKind::kRuntimeCall);  // kRuntimeCall calls on main path.
+    DCHECK(GetLoadKind() == LoadKind::kBssEntry || MustGenerateClinitCheck());
+    return GetLoadKind() == LoadKind::kBssEntry;
+  }
+
   void MarkInBootImage() {
     SetPackedFlag<kFlagIsInBootImage>(true);
   }
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index d96746f..c40cbcf 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -711,15 +711,7 @@
                                          CodeGenerator* codegen,
                                          const DexFile::CodeItem* code_item_for_osr_check) const {
   ArenaVector<linker::LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
-  ArenaVector<uint8_t> stack_map(allocator->Adapter(kArenaAllocStackMaps));
-  ArenaVector<uint8_t> method_info(allocator->Adapter(kArenaAllocStackMaps));
-  size_t stack_map_size = 0;
-  size_t method_info_size = 0;
-  codegen->ComputeStackMapSize(&stack_map_size);
-  stack_map.resize(stack_map_size);
-  method_info.resize(method_info_size);
-  codegen->BuildStackMaps(MemoryRegion(stack_map.data(), stack_map.size()),
-                          code_item_for_osr_check);
+  ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item_for_osr_check);
 
   CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod(
       GetCompilerDriver(),
@@ -1097,19 +1089,19 @@
   return compiled_method;
 }
 
-static void CreateJniStackMap(ArenaStack* arena_stack,
-                              const JniCompiledMethod& jni_compiled_method,
-                              /* out */ ArenaVector<uint8_t>* stack_map) {
-  ScopedArenaAllocator allocator(arena_stack);
-  StackMapStream stack_map_stream(&allocator, jni_compiled_method.GetInstructionSet());
-  stack_map_stream.BeginMethod(
+static ScopedArenaVector<uint8_t> CreateJniStackMap(ScopedArenaAllocator* allocator,
+                                                    const JniCompiledMethod& jni_compiled_method) {
+  // StackMapStream is quite large, so allocate it using the ScopedArenaAllocator
+  // to stay clear of the frame size limit.
+  std::unique_ptr<StackMapStream> stack_map_stream(
+      new (allocator) StackMapStream(allocator, jni_compiled_method.GetInstructionSet()));
+  stack_map_stream->BeginMethod(
       jni_compiled_method.GetFrameSize(),
       jni_compiled_method.GetCoreSpillMask(),
       jni_compiled_method.GetFpSpillMask(),
       /* num_dex_registers */ 0);
-  stack_map_stream.EndMethod();
-  stack_map->resize(stack_map_stream.PrepareForFillIn());
-  stack_map_stream.FillInCodeInfo(MemoryRegion(stack_map->data(), stack_map->size()));
+  stack_map_stream->EndMethod();
+  return stack_map_stream->Encode();
 }
 
 CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
@@ -1163,8 +1155,9 @@
       compiler_options, access_flags, method_idx, dex_file);
   MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledNativeStub);
 
-  ArenaVector<uint8_t> stack_map(allocator.Adapter(kArenaAllocStackMaps));
-  CreateJniStackMap(&arena_stack, jni_compiled_method, &stack_map);
+  ScopedArenaAllocator stack_map_allocator(&arena_stack);  // Will hold the stack map.
+  ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator,
+                                                           jni_compiled_method);
   return CompiledMethod::SwapAllocCompiledMethod(
       GetCompilerDriver(),
       jni_compiled_method.GetInstructionSet(),
@@ -1229,11 +1222,11 @@
     ScopedNullHandle<mirror::ObjectArray<mirror::Object>> roots;
     ArenaSet<ArtMethod*, std::less<ArtMethod*>> cha_single_implementation_list(
         allocator.Adapter(kArenaAllocCHA));
-    ArenaVector<uint8_t> stack_map(allocator.Adapter(kArenaAllocStackMaps));
     ArenaStack arena_stack(runtime->GetJitArenaPool());
     // StackMapStream is large and it does not fit into this frame, so we need helper method.
-    // TODO: Try to avoid the extra memory copy that results from this.
-    CreateJniStackMap(&arena_stack, jni_compiled_method, &stack_map);
+    ScopedArenaAllocator stack_map_allocator(&arena_stack);  // Will hold the stack map.
+    ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator,
+                                                             jni_compiled_method);
     uint8_t* stack_map_data = nullptr;
     uint8_t* roots_data = nullptr;
     uint32_t data_size = code_cache->ReserveData(self,
@@ -1326,8 +1319,7 @@
     }
   }
 
-  size_t stack_map_size = 0;
-  codegen->ComputeStackMapSize(&stack_map_size);
+  ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item);
   size_t number_of_roots = codegen->GetNumberOfJitRoots();
   // We allocate an object array to ensure the JIT roots that we will collect in EmitJitRoots
   // will be visible by the GC between EmitLiterals and CommitCode. Once CommitCode is
@@ -1345,7 +1337,7 @@
   uint8_t* stack_map_data = nullptr;
   uint8_t* roots_data = nullptr;
   uint32_t data_size = code_cache->ReserveData(self,
-                                               stack_map_size,
+                                               stack_map.size(),
                                                number_of_roots,
                                                method,
                                                &stack_map_data,
@@ -1354,7 +1346,7 @@
     MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
     return false;
   }
-  codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size), code_item);
+  memcpy(stack_map_data, stack_map.data(), stack_map.size());
   codegen->EmitJitRoots(code_allocator.GetData(), roots, roots_data);
 
   const void* code = code_cache->CommitCode(
@@ -1395,7 +1387,7 @@
     info.code_address = code_address;
     info.code_size = code_allocator.GetMemory().size();
     info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
-    info.code_info = stack_map_size == 0 ? nullptr : stack_map_data;
+    info.code_info = stack_map.size() == 0 ? nullptr : stack_map_data;
     info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data());
     GenerateJitDebugInfo(method, info);
   }
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index 831bccc..060613d 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -150,7 +150,9 @@
     if (can_merge_with_load_class && !load_class->HasUses()) {
       load_class->GetBlock()->RemoveInstruction(load_class);
     }
-  } else if (can_merge_with_load_class && !load_class->NeedsAccessCheck()) {
+  } else if (can_merge_with_load_class &&
+             load_class->GetLoadKind() != HLoadClass::LoadKind::kRuntimeCall) {
+    DCHECK(!load_class->NeedsAccessCheck());
     // Pass the initialization duty to the `HLoadClass` instruction,
     // and remove the instruction from the graph.
     DCHECK(load_class->HasEnvironment());
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index 588ea03..1aa16f4 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -545,60 +545,67 @@
 void HScheduler::Schedule(HGraph* graph) {
   // We run lsa here instead of in a separate pass to better control whether we
   // should run the analysis or not.
+  const HeapLocationCollector* heap_location_collector = nullptr;
   LoadStoreAnalysis lsa(graph);
   if (!only_optimize_loop_blocks_ || graph->HasLoops()) {
     lsa.Run();
-    scheduling_graph_.SetHeapLocationCollector(lsa.GetHeapLocationCollector());
+    heap_location_collector = &lsa.GetHeapLocationCollector();
   }
 
   for (HBasicBlock* block : graph->GetReversePostOrder()) {
     if (IsSchedulable(block)) {
-      Schedule(block);
+      Schedule(block, heap_location_collector);
     }
   }
 }
 
-void HScheduler::Schedule(HBasicBlock* block) {
-  ScopedArenaVector<SchedulingNode*> scheduling_nodes(allocator_->Adapter(kArenaAllocScheduler));
+void HScheduler::Schedule(HBasicBlock* block,
+                          const HeapLocationCollector* heap_location_collector) {
+  ScopedArenaAllocator allocator(block->GetGraph()->GetArenaStack());
+  ScopedArenaVector<SchedulingNode*> scheduling_nodes(allocator.Adapter(kArenaAllocScheduler));
 
   // Build the scheduling graph.
-  scheduling_graph_.Clear();
+  SchedulingGraph scheduling_graph(this, &allocator, heap_location_collector);
   for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
     HInstruction* instruction = it.Current();
     CHECK_EQ(instruction->GetBlock(), block)
         << instruction->DebugName()
         << " is in block " << instruction->GetBlock()->GetBlockId()
         << ", and expected in block " << block->GetBlockId();
-    SchedulingNode* node = scheduling_graph_.AddNode(instruction, IsSchedulingBarrier(instruction));
+    SchedulingNode* node = scheduling_graph.AddNode(instruction, IsSchedulingBarrier(instruction));
     CalculateLatency(node);
     scheduling_nodes.push_back(node);
   }
 
-  if (scheduling_graph_.Size() <= 1) {
-    scheduling_graph_.Clear();
+  if (scheduling_graph.Size() <= 1) {
     return;
   }
 
   cursor_ = block->GetLastInstruction();
 
+  // The list of candidates for scheduling. A node becomes a candidate when all
+  // its predecessors have been scheduled.
+  ScopedArenaVector<SchedulingNode*> candidates(allocator.Adapter(kArenaAllocScheduler));
+
   // Find the initial candidates for scheduling.
-  candidates_.clear();
   for (SchedulingNode* node : scheduling_nodes) {
     if (!node->HasUnscheduledSuccessors()) {
       node->MaybeUpdateCriticalPath(node->GetLatency());
-      candidates_.push_back(node);
+      candidates.push_back(node);
     }
   }
 
-  ScopedArenaVector<SchedulingNode*> initial_candidates(allocator_->Adapter(kArenaAllocScheduler));
+  ScopedArenaVector<SchedulingNode*> initial_candidates(allocator.Adapter(kArenaAllocScheduler));
   if (kDumpDotSchedulingGraphs) {
     // Remember the list of initial candidates for debug output purposes.
-    initial_candidates.assign(candidates_.begin(), candidates_.end());
+    initial_candidates.assign(candidates.begin(), candidates.end());
   }
 
   // Schedule all nodes.
-  while (!candidates_.empty()) {
-    Schedule(selector_->PopHighestPriorityNode(&candidates_, scheduling_graph_));
+  selector_->Reset();
+  while (!candidates.empty()) {
+    SchedulingNode* node = selector_->PopHighestPriorityNode(&candidates, scheduling_graph);
+    Schedule(node, &candidates);
   }
 
   if (kDumpDotSchedulingGraphs) {
@@ -607,11 +614,12 @@
     std::stringstream description;
     description << graph->GetDexFile().PrettyMethod(graph->GetMethodIdx())
         << " B" << block->GetBlockId();
-    scheduling_graph_.DumpAsDotGraph(description.str(), initial_candidates);
+    scheduling_graph.DumpAsDotGraph(description.str(), initial_candidates);
   }
 }
 
-void HScheduler::Schedule(SchedulingNode* scheduling_node) {
+void HScheduler::Schedule(SchedulingNode* scheduling_node,
+                          /*inout*/ ScopedArenaVector<SchedulingNode*>* candidates) {
   // Check whether any of the node's predecessors will be valid candidates after
   // this node is scheduled.
   uint32_t path_to_node = scheduling_node->GetCriticalPath();
@@ -620,7 +628,7 @@
         path_to_node + predecessor->GetInternalLatency() + predecessor->GetLatency());
     predecessor->DecrementNumberOfUnscheduledSuccessors();
     if (!predecessor->HasUnscheduledSuccessors()) {
-      candidates_.push_back(predecessor);
+      candidates->push_back(predecessor);
     }
   }
   for (SchedulingNode* predecessor : scheduling_node->GetOtherPredecessors()) {
@@ -630,7 +638,7 @@
     // correctness. So we do not use them to compute the critical path.
     predecessor->DecrementNumberOfUnscheduledSuccessors();
     if (!predecessor->HasUnscheduledSuccessors()) {
-      candidates_.push_back(predecessor);
+      candidates->push_back(predecessor);
     }
   }
 
@@ -779,7 +787,6 @@
 #if defined(ART_ENABLE_CODEGEN_arm64) || defined(ART_ENABLE_CODEGEN_arm)
   // Phase-local allocator that allocates scheduler internal data structures like
   // scheduling nodes, internel nodes map, dependencies, etc.
-  ScopedArenaAllocator allocator(graph_->GetArenaStack());
   CriticalPathSchedulingNodeSelector critical_path_selector;
   RandomSchedulingNodeSelector random_selector;
   SchedulingNodeSelector* selector = schedule_randomly
@@ -795,7 +802,7 @@
   switch (instruction_set_) {
 #ifdef ART_ENABLE_CODEGEN_arm64
     case InstructionSet::kArm64: {
-      arm64::HSchedulerARM64 scheduler(&allocator, selector);
+      arm64::HSchedulerARM64 scheduler(selector);
       scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
       scheduler.Schedule(graph_);
       break;
@@ -805,7 +812,7 @@
     case InstructionSet::kThumb2:
     case InstructionSet::kArm: {
       arm::SchedulingLatencyVisitorARM arm_latency_visitor(codegen_);
-      arm::HSchedulerARM scheduler(&allocator, selector, &arm_latency_visitor);
+      arm::HSchedulerARM scheduler(selector, &arm_latency_visitor);
       scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
       scheduler.Schedule(graph_);
       break;
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index c7683e0..fd48d84 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -251,12 +251,14 @@
  */
 class SchedulingGraph : public ValueObject {
  public:
-  SchedulingGraph(const HScheduler* scheduler, ScopedArenaAllocator* allocator)
+  SchedulingGraph(const HScheduler* scheduler,
+                  ScopedArenaAllocator* allocator,
+                  const HeapLocationCollector* heap_location_collector)
       : scheduler_(scheduler),
         allocator_(allocator),
         contains_scheduling_barrier_(false),
         nodes_map_(allocator_->Adapter(kArenaAllocScheduler)),
-        heap_location_collector_(nullptr) {}
+        heap_location_collector_(heap_location_collector) {}
 
   SchedulingNode* AddNode(HInstruction* instr, bool is_scheduling_barrier = false) {
     std::unique_ptr<SchedulingNode> node(
@@ -268,15 +270,6 @@
     return result;
   }
 
-  void Clear() {
-    nodes_map_.clear();
-    contains_scheduling_barrier_ = false;
-  }
-
-  void SetHeapLocationCollector(const HeapLocationCollector& heap_location_collector) {
-    heap_location_collector_ = &heap_location_collector;
-  }
-
   SchedulingNode* GetNode(const HInstruction* instr) const {
     auto it = nodes_map_.find(instr);
     if (it == nodes_map_.end()) {
@@ -329,7 +322,7 @@
 
   ScopedArenaHashMap<const HInstruction*, std::unique_ptr<SchedulingNode>> nodes_map_;
 
-  const HeapLocationCollector* heap_location_collector_;
+  const HeapLocationCollector* const heap_location_collector_;
 };
 
 /*
@@ -377,6 +370,7 @@
 
 class SchedulingNodeSelector : public ArenaObject<kArenaAllocScheduler> {
  public:
+  virtual void Reset() {}
   virtual SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes,
                                                  const SchedulingGraph& graph) = 0;
   virtual ~SchedulingNodeSelector() {}
@@ -418,6 +412,7 @@
  public:
   CriticalPathSchedulingNodeSelector() : prev_select_(nullptr) {}
 
+  void Reset() OVERRIDE { prev_select_ = nullptr; }
   SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes,
                                          const SchedulingGraph& graph) OVERRIDE;
 
@@ -434,16 +429,11 @@
 
 class HScheduler {
  public:
-  HScheduler(ScopedArenaAllocator* allocator,
-             SchedulingLatencyVisitor* latency_visitor,
-             SchedulingNodeSelector* selector)
-      : allocator_(allocator),
-        latency_visitor_(latency_visitor),
+  HScheduler(SchedulingLatencyVisitor* latency_visitor, SchedulingNodeSelector* selector)
+      : latency_visitor_(latency_visitor),
         selector_(selector),
         only_optimize_loop_blocks_(true),
-        scheduling_graph_(this, allocator),
-        cursor_(nullptr),
-        candidates_(allocator_->Adapter(kArenaAllocScheduler)) {}
+        cursor_(nullptr) {}
   virtual ~HScheduler() {}
 
   void Schedule(HGraph* graph);
@@ -454,8 +444,9 @@
   virtual bool IsSchedulingBarrier(const HInstruction* instruction) const;
 
  protected:
-  void Schedule(HBasicBlock* block);
-  void Schedule(SchedulingNode* scheduling_node);
+  void Schedule(HBasicBlock* block, const HeapLocationCollector* heap_location_collector);
+  void Schedule(SchedulingNode* scheduling_node,
+                /*inout*/ ScopedArenaVector<SchedulingNode*>* candidates);
   void Schedule(HInstruction* instruction);
 
   // Any instruction returning `false` via this method will prevent its
@@ -476,19 +467,12 @@
     node->SetInternalLatency(latency_visitor_->GetLastVisitedInternalLatency());
   }
 
-  ScopedArenaAllocator* const allocator_;
   SchedulingLatencyVisitor* const latency_visitor_;
   SchedulingNodeSelector* const selector_;
   bool only_optimize_loop_blocks_;
 
-  // We instantiate the members below as part of this class to avoid
-  // instantiating them locally for every chunk scheduled.
-  SchedulingGraph scheduling_graph_;
   // A pointer indicating where the next instruction to be scheduled will be inserted.
   HInstruction* cursor_;
-  // The list of candidates for scheduling. A node becomes a candidate when all
-  // its predecessors have been scheduled.
-  ScopedArenaVector<SchedulingNode*> candidates_;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HScheduler);
diff --git a/compiler/optimizing/scheduler_arm.cc b/compiler/optimizing/scheduler_arm.cc
index 8dcadaa..d89d117 100644
--- a/compiler/optimizing/scheduler_arm.cc
+++ b/compiler/optimizing/scheduler_arm.cc
@@ -679,7 +679,7 @@
           } else {
             last_visited_internal_latency_ += kArmIntegerOpLatency;
           }
-          last_visited_internal_latency_ = kArmMemoryLoadLatency;
+          last_visited_latency_ = kArmMemoryLoadLatency;
         }
       }
       break;
diff --git a/compiler/optimizing/scheduler_arm.h b/compiler/optimizing/scheduler_arm.h
index 0cb8684..2f36948 100644
--- a/compiler/optimizing/scheduler_arm.h
+++ b/compiler/optimizing/scheduler_arm.h
@@ -137,10 +137,9 @@
 
 class HSchedulerARM : public HScheduler {
  public:
-  HSchedulerARM(ScopedArenaAllocator* allocator,
-                SchedulingNodeSelector* selector,
+  HSchedulerARM(SchedulingNodeSelector* selector,
                 SchedulingLatencyVisitorARM* arm_latency_visitor)
-      : HScheduler(allocator, arm_latency_visitor, selector) {}
+      : HScheduler(arm_latency_visitor, selector) {}
   ~HSchedulerARM() OVERRIDE {}
 
   bool IsSchedulable(const HInstruction* instruction) const OVERRIDE {
diff --git a/compiler/optimizing/scheduler_arm64.h b/compiler/optimizing/scheduler_arm64.h
index 4f394d5..0d2f8d9 100644
--- a/compiler/optimizing/scheduler_arm64.h
+++ b/compiler/optimizing/scheduler_arm64.h
@@ -134,8 +134,8 @@
 
 class HSchedulerARM64 : public HScheduler {
  public:
-  HSchedulerARM64(ScopedArenaAllocator* allocator, SchedulingNodeSelector* selector)
-      : HScheduler(allocator, &arm64_latency_visitor_, selector) {}
+  explicit HSchedulerARM64(SchedulingNodeSelector* selector)
+      : HScheduler(&arm64_latency_visitor_, selector) {}
   ~HSchedulerARM64() OVERRIDE {}
 
   bool IsSchedulable(const HInstruction* instruction) const OVERRIDE {
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index 7079e07..fe23fb4 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -146,7 +146,9 @@
     environment->SetRawEnvAt(1, mul);
     mul->AddEnvUseAt(div_check->GetEnvironment(), 1);
 
-    SchedulingGraph scheduling_graph(scheduler, GetScopedAllocator());
+    SchedulingGraph scheduling_graph(scheduler,
+                                     GetScopedAllocator(),
+                                     /* heap_location_collector */ nullptr);
     // Instructions must be inserted in reverse order into the scheduling graph.
     for (HInstruction* instr : ReverseRange(block_instructions)) {
       scheduling_graph.AddNode(instr);
@@ -276,11 +278,10 @@
       entry->AddInstruction(instr);
     }
 
-    SchedulingGraph scheduling_graph(scheduler, GetScopedAllocator());
     HeapLocationCollector heap_location_collector(graph_);
     heap_location_collector.VisitBasicBlock(entry);
     heap_location_collector.BuildAliasingMatrix();
-    scheduling_graph.SetHeapLocationCollector(heap_location_collector);
+    SchedulingGraph scheduling_graph(scheduler, GetScopedAllocator(), &heap_location_collector);
 
     for (HInstruction* instr : ReverseRange(block_instructions)) {
       // Build scheduling graph with memory access aliasing information
@@ -354,13 +355,13 @@
 #if defined(ART_ENABLE_CODEGEN_arm64)
 TEST_F(SchedulerTest, DependencyGraphAndSchedulerARM64) {
   CriticalPathSchedulingNodeSelector critical_path_selector;
-  arm64::HSchedulerARM64 scheduler(GetScopedAllocator(), &critical_path_selector);
+  arm64::HSchedulerARM64 scheduler(&critical_path_selector);
   TestBuildDependencyGraphAndSchedule(&scheduler);
 }
 
 TEST_F(SchedulerTest, ArrayAccessAliasingARM64) {
   CriticalPathSchedulingNodeSelector critical_path_selector;
-  arm64::HSchedulerARM64 scheduler(GetScopedAllocator(), &critical_path_selector);
+  arm64::HSchedulerARM64 scheduler(&critical_path_selector);
   TestDependencyGraphOnAliasingArrayAccesses(&scheduler);
 }
 #endif
@@ -369,14 +370,14 @@
 TEST_F(SchedulerTest, DependencyGraphAndSchedulerARM) {
   CriticalPathSchedulingNodeSelector critical_path_selector;
   arm::SchedulingLatencyVisitorARM arm_latency_visitor(/*CodeGenerator*/ nullptr);
-  arm::HSchedulerARM scheduler(GetScopedAllocator(), &critical_path_selector, &arm_latency_visitor);
+  arm::HSchedulerARM scheduler(&critical_path_selector, &arm_latency_visitor);
   TestBuildDependencyGraphAndSchedule(&scheduler);
 }
 
 TEST_F(SchedulerTest, ArrayAccessAliasingARM) {
   CriticalPathSchedulingNodeSelector critical_path_selector;
   arm::SchedulingLatencyVisitorARM arm_latency_visitor(/*CodeGenerator*/ nullptr);
-  arm::HSchedulerARM scheduler(GetScopedAllocator(), &critical_path_selector, &arm_latency_visitor);
+  arm::HSchedulerARM scheduler(&critical_path_selector, &arm_latency_visitor);
   TestDependencyGraphOnAliasingArrayAccesses(&scheduler);
 }
 #endif
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 429054c..3918b65 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -45,9 +45,10 @@
                                  uint32_t num_dex_registers) {
   DCHECK(!in_method_) << "Mismatched Begin/End calls";
   in_method_ = true;
-  DCHECK_EQ(frame_size_in_bytes_, 0u) << "BeginMethod was already called";
+  DCHECK_EQ(packed_frame_size_, 0u) << "BeginMethod was already called";
 
-  frame_size_in_bytes_ = frame_size_in_bytes;
+  DCHECK_ALIGNED(frame_size_in_bytes, kStackAlignment);
+  packed_frame_size_ = frame_size_in_bytes / kStackAlignment;
   core_spill_mask_ = core_spill_mask;
   fp_spill_mask_ = fp_spill_mask;
   num_dex_registers_ = num_dex_registers;
@@ -56,6 +57,15 @@
 void StackMapStream::EndMethod() {
   DCHECK(in_method_) << "Mismatched Begin/End calls";
   in_method_ = false;
+
+  // Read the stack masks now. The compiler might have updated them.
+  for (size_t i = 0; i < lazy_stack_masks_.size(); i++) {
+    BitVector* stack_mask = lazy_stack_masks_[i];
+    if (stack_mask != nullptr && stack_mask->GetNumberOfBits() != 0) {
+      stack_maps_[i][StackMap::kStackMaskIndex] =
+          stack_masks_.Dedup(stack_mask->GetRawStorage(), stack_mask->GetNumberOfBits());
+    }
+  }
 }
 
 void StackMapStream::BeginStackMapEntry(uint32_t dex_pc,
@@ -280,46 +290,28 @@
   bit_table.Encode(out);
 }
 
-size_t StackMapStream::PrepareForFillIn() {
-  DCHECK_EQ(out_.size(), 0u);
+ScopedArenaVector<uint8_t> StackMapStream::Encode() {
+  DCHECK(in_stack_map_ == false) << "Mismatched Begin/End calls";
+  DCHECK(in_inline_info_ == false) << "Mismatched Begin/End calls";
 
-  // Read the stack masks now. The compiler might have updated them.
-  for (size_t i = 0; i < lazy_stack_masks_.size(); i++) {
-    BitVector* stack_mask = lazy_stack_masks_[i];
-    if (stack_mask != nullptr && stack_mask->GetNumberOfBits() != 0) {
-      stack_maps_[i][StackMap::kStackMaskIndex] =
-        stack_masks_.Dedup(stack_mask->GetRawStorage(), stack_mask->GetNumberOfBits());
-    }
-  }
-
-  EncodeUnsignedLeb128(&out_, frame_size_in_bytes_);
-  EncodeUnsignedLeb128(&out_, core_spill_mask_);
-  EncodeUnsignedLeb128(&out_, fp_spill_mask_);
-  EncodeUnsignedLeb128(&out_, num_dex_registers_);
-  BitMemoryWriter<ScopedArenaVector<uint8_t>> out(&out_, out_.size() * kBitsPerByte);
+  ScopedArenaVector<uint8_t> buffer(allocator_->Adapter(kArenaAllocStackMapStream));
+  BitMemoryWriter<ScopedArenaVector<uint8_t>> out(&buffer);
+  EncodeVarintBits(out, packed_frame_size_);
+  EncodeVarintBits(out, core_spill_mask_);
+  EncodeVarintBits(out, fp_spill_mask_);
+  EncodeVarintBits(out, num_dex_registers_);
   EncodeTable(out, stack_maps_);
-  EncodeTable(out, inline_infos_);
-  EncodeTable(out, method_infos_);
   EncodeTable(out, register_masks_);
   EncodeTable(out, stack_masks_);
+  EncodeTable(out, inline_infos_);
+  EncodeTable(out, method_infos_);
   EncodeTable(out, dex_register_masks_);
   EncodeTable(out, dex_register_maps_);
   EncodeTable(out, dex_register_catalog_);
 
-  return out_.size();
-}
-
-void StackMapStream::FillInCodeInfo(MemoryRegion region) {
-  DCHECK(in_stack_map_ == false) << "Mismatched Begin/End calls";
-  DCHECK(in_inline_info_ == false) << "Mismatched Begin/End calls";
-  DCHECK_NE(0u, out_.size()) << "PrepareForFillIn not called before FillIn";
-  DCHECK_EQ(region.size(), out_.size());
-
-  region.CopyFromVector(0, out_);
-
   // Verify that we can load the CodeInfo and check some essentials.
-  CodeInfo code_info(region);
-  CHECK_EQ(code_info.Size(), out_.size());
+  CodeInfo code_info(buffer.data());
+  CHECK_EQ(code_info.Size(), buffer.size());
   CHECK_EQ(code_info.GetNumberOfStackMaps(), stack_maps_.size());
 
   // Verify all written data (usually only in debug builds).
@@ -328,6 +320,8 @@
       dcheck(code_info);
     }
   }
+
+  return buffer;
 }
 
 }  // namespace art
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index de79f49..df11709 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -34,10 +34,11 @@
  * Collects and builds stack maps for a method. All the stack maps
  * for a method are placed in a CodeInfo object.
  */
-class StackMapStream : public ValueObject {
+class StackMapStream : public DeletableArenaObject<kArenaAllocStackMapStream> {
  public:
   explicit StackMapStream(ScopedArenaAllocator* allocator, InstructionSet instruction_set)
-      : instruction_set_(instruction_set),
+      : allocator_(allocator),
+        instruction_set_(instruction_set),
         stack_maps_(allocator),
         inline_infos_(allocator),
         method_infos_(allocator),
@@ -46,13 +47,13 @@
         dex_register_masks_(allocator),
         dex_register_maps_(allocator),
         dex_register_catalog_(allocator),
-        out_(allocator->Adapter(kArenaAllocStackMapStream)),
         lazy_stack_masks_(allocator->Adapter(kArenaAllocStackMapStream)),
         current_stack_map_(),
         current_inline_infos_(allocator->Adapter(kArenaAllocStackMapStream)),
         current_dex_registers_(allocator->Adapter(kArenaAllocStackMapStream)),
         previous_dex_registers_(allocator->Adapter(kArenaAllocStackMapStream)),
         dex_register_timestamp_(allocator->Adapter(kArenaAllocStackMapStream)),
+        expected_num_dex_registers_(0u),
         temp_dex_register_mask_(allocator, 32, true, kArenaAllocStackMapStream),
         temp_dex_register_map_(allocator->Adapter(kArenaAllocStackMapStream)) {
   }
@@ -87,18 +88,18 @@
   uint32_t GetStackMapNativePcOffset(size_t i);
   void SetStackMapNativePcOffset(size_t i, uint32_t native_pc_offset);
 
-  // Prepares the stream to fill in a memory region. Must be called before FillIn.
-  // Returns the size (in bytes) needed to store this stream.
-  size_t PrepareForFillIn();
-  void FillInCodeInfo(MemoryRegion region);
+  // Encode all stack map data.
+  // The returned vector is allocated using the allocator passed to the StackMapStream.
+  ScopedArenaVector<uint8_t> Encode();
 
  private:
   static constexpr uint32_t kNoValue = -1;
 
   void CreateDexRegisterMap();
 
+  ScopedArenaAllocator* allocator_;
   const InstructionSet instruction_set_;
-  uint32_t frame_size_in_bytes_ = 0;
+  uint32_t packed_frame_size_ = 0;
   uint32_t core_spill_mask_ = 0;
   uint32_t fp_spill_mask_ = 0;
   uint32_t num_dex_registers_ = 0;
@@ -110,7 +111,6 @@
   BitmapTableBuilder dex_register_masks_;
   BitTableBuilder<MaskInfo> dex_register_maps_;
   BitTableBuilder<DexRegisterInfo> dex_register_catalog_;
-  ScopedArenaVector<uint8_t> out_;
 
   ScopedArenaVector<BitVector*> lazy_stack_masks_;
 
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 16a9216..a281bb3 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -62,12 +62,9 @@
   stream.EndStackMapEntry();
 
   stream.EndMethod();
-  size_t size = stream.PrepareForFillIn();
-  void* memory = allocator.Alloc(size, kArenaAllocMisc);
-  MemoryRegion region(memory, size);
-  stream.FillInCodeInfo(region);
+  ScopedArenaVector<uint8_t> memory = stream.Encode();
 
-  CodeInfo code_info(region);
+  CodeInfo code_info(memory.data());
   ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
 
   uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
@@ -151,12 +148,9 @@
   stream.EndStackMapEntry();
 
   stream.EndMethod();
-  size_t size = stream.PrepareForFillIn();
-  void* memory = allocator.Alloc(size, kArenaAllocMisc);
-  MemoryRegion region(memory, size);
-  stream.FillInCodeInfo(region);
+  ScopedArenaVector<uint8_t> memory = stream.Encode();
 
-  CodeInfo code_info(region);
+  CodeInfo code_info(memory.data());
   ASSERT_EQ(4u, code_info.GetNumberOfStackMaps());
 
   uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
@@ -324,12 +318,9 @@
   stream.EndStackMapEntry();
 
   stream.EndMethod();
-  size_t size = stream.PrepareForFillIn();
-  void* memory = allocator.Alloc(size, kArenaAllocMisc);
-  MemoryRegion region(memory, size);
-  stream.FillInCodeInfo(region);
+  ScopedArenaVector<uint8_t> memory = stream.Encode();
 
-  CodeInfo code_info(region);
+  CodeInfo code_info(memory.data());
   ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
 
   uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
@@ -382,12 +373,9 @@
   stream.EndStackMapEntry();
 
   stream.EndMethod();
-  size_t size = stream.PrepareForFillIn();
-  void* memory = allocator.Alloc(size, kArenaAllocMisc);
-  MemoryRegion region(memory, size);
-  stream.FillInCodeInfo(region);
+  ScopedArenaVector<uint8_t> memory = stream.Encode();
 
-  CodeInfo code_info(region);
+  CodeInfo code_info(memory.data());
   ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
 
   uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
@@ -444,12 +432,9 @@
   stream.EndStackMapEntry();
 
   stream.EndMethod();
-  size_t size = stream.PrepareForFillIn();
-  void* memory = allocator.Alloc(size, kArenaAllocMisc);
-  MemoryRegion region(memory, size);
-  stream.FillInCodeInfo(region);
+  ScopedArenaVector<uint8_t> memory = stream.Encode();
 
-  CodeInfo ci(region);
+  CodeInfo ci(memory.data());
 
   // Verify first stack map.
   StackMap sm0 = ci.GetStackMapAt(0);
@@ -495,12 +480,9 @@
   stream.EndStackMapEntry();
 
   stream.EndMethod();
-  size_t size = stream.PrepareForFillIn();
-  void* memory = allocator.Alloc(size, kArenaAllocMisc);
-  MemoryRegion region(memory, size);
-  stream.FillInCodeInfo(region);
+  ScopedArenaVector<uint8_t> memory = stream.Encode();
 
-  CodeInfo code_info(region);
+  CodeInfo code_info(memory.data());
   ASSERT_EQ(2u, code_info.GetNumberOfStackMaps());
 
   uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
@@ -597,12 +579,9 @@
   stream.EndStackMapEntry();
 
   stream.EndMethod();
-  size_t size = stream.PrepareForFillIn();
-  void* memory = allocator.Alloc(size, kArenaAllocMisc);
-  MemoryRegion region(memory, size);
-  stream.FillInCodeInfo(region);
+  ScopedArenaVector<uint8_t> memory = stream.Encode();
 
-  CodeInfo ci(region);
+  CodeInfo ci(memory.data());
 
   {
     // Verify first stack map.
@@ -744,12 +723,9 @@
   stream.EndStackMapEntry();
 
   stream.EndMethod();
-  size_t size = stream.PrepareForFillIn();
-  void* memory = allocator.Alloc(size, kArenaAllocMisc);
-  MemoryRegion region(memory, size);
-  stream.FillInCodeInfo(region);
+  ScopedArenaVector<uint8_t> memory = stream.Encode();
 
-  CodeInfo code_info(region);
+  CodeInfo code_info(memory.data());
   ASSERT_EQ(2u, code_info.GetNumberOfStackMaps());
 
   StackMap stack_map1 = code_info.GetStackMapForNativePcOffset(4 * kPcAlign);
@@ -771,9 +747,7 @@
   stream.EndStackMapEntry();
 
   stream.EndMethod();
-  std::vector<uint8_t> memory(stream.PrepareForFillIn());
-  MemoryRegion region(memory.data(), memory.size());
-  stream.FillInCodeInfo(region);
+  ScopedArenaVector<uint8_t> memory = stream.Encode();
 
   std::vector<uint8_t> out;
   CodeInfo::DedupeMap dedupe_map;
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index a1a547c..28942da 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -38,6 +38,7 @@
 #include "compiled_method-inl.h"
 #include "debug/method_debug_info.h"
 #include "dex/art_dex_file_loader.h"
+#include "dex/class_accessor-inl.h"
 #include "dex/dex_file-inl.h"
 #include "dex/dex_file_loader.h"
 #include "dex/dex_file_types.h"
@@ -791,7 +792,7 @@
     return true;
   }
 
-  virtual bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) = 0;
+  virtual bool VisitMethod(size_t class_def_method_index, const ClassAccessor::Method& method) = 0;
 
   virtual bool EndClass() {
     if (kIsDebugBuild) {
@@ -861,10 +862,10 @@
       : DexMethodVisitor(writer, /* offset */ 0u) {}
 
   bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED,
-                   const ClassDataItemIterator& it) OVERRIDE {
+                   const ClassAccessor::Method& method) OVERRIDE {
     // Look for patches with .bss references and prepare maps with placeholders for their offsets.
     CompiledMethod* compiled_method = writer_->compiler_driver_->GetCompiledMethod(
-        MethodReference(dex_file_, it.GetMemberIndex()));
+        MethodReference(dex_file_, method.GetIndex()));
     if (HasCompiledCode(compiled_method)) {
       for (const LinkerPatch& patch : compiled_method->GetPatches()) {
         if (patch.GetType() == LinkerPatch::Type::kDataBimgRelRo) {
@@ -943,12 +944,12 @@
   }
 
   bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED,
-                   const ClassDataItemIterator& it) OVERRIDE {
+                   const ClassAccessor::Method& method) OVERRIDE {
     // Fill in the compiled_methods_ array for methods that have a
     // CompiledMethod. We track the number of non-null entries in
     // compiled_methods_with_code_ since we only want to allocate
     // OatMethodOffsets for the compiled methods.
-    uint32_t method_idx = it.GetMemberIndex();
+    uint32_t method_idx = method.GetIndex();
     CompiledMethod* compiled_method =
         writer_->compiler_driver_->GetCompiledMethod(MethodReference(dex_file_, method_idx));
     compiled_methods_.push_back(compiled_method);
@@ -1150,7 +1151,7 @@
   }
 
   bool VisitMethod(size_t class_def_method_index,
-                   const ClassDataItemIterator& it)
+                   const ClassAccessor::Method& method)
       OVERRIDE
       REQUIRES_SHARED(Locks::mutator_lock_)  {
     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
@@ -1182,7 +1183,7 @@
         }
       }
 
-      MethodReference method_ref(dex_file_, it.GetMemberIndex());
+      MethodReference method_ref(dex_file_, method.GetIndex());
 
       // Lookup method hotness from profile, if available.
       // Otherwise assume a default of none-hotness.
@@ -1199,8 +1200,8 @@
           method_ref,
           method_offsets_index_,
           class_def_index_,
-          it.GetMethodAccessFlags(),
-          it.GetMethodCodeItem(),
+          method.GetAccessFlags(),
+          method.GetCodeItem(),
           debug_info_idx
       };
       ordered_methods_.push_back(method_data);
@@ -1442,7 +1443,8 @@
   InitMapMethodVisitor(OatWriter* writer, size_t offset)
       : OatDexMethodVisitor(writer, offset) {}
 
-  bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it ATTRIBUTE_UNUSED)
+  bool VisitMethod(size_t class_def_method_index,
+                   const ClassAccessor::Method& method ATTRIBUTE_UNUSED)
       OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
     OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
     CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -1543,7 +1545,7 @@
     return true;
   }
 
-  bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) OVERRIDE
+  bool VisitMethod(size_t class_def_method_index, const ClassAccessor::Method& method) OVERRIDE
       REQUIRES_SHARED(Locks::mutator_lock_) {
     // Skip methods that are not in the image.
     if (!IsImageClass()) {
@@ -1562,22 +1564,22 @@
 
     Thread* self = Thread::Current();
     ObjPtr<mirror::DexCache> dex_cache = class_linker_->FindDexCache(self, *dex_file_);
-    ArtMethod* method;
+    ArtMethod* resolved_method;
     if (writer_->GetCompilerOptions().IsBootImage()) {
-      const InvokeType invoke_type = it.GetMethodInvokeType(
-          dex_file_->GetClassDef(class_def_index_));
+      const InvokeType invoke_type = method.GetInvokeType(
+          dex_file_->GetClassDef(class_def_index_).access_flags_);
       // Unchecked as we hold mutator_lock_ on entry.
       ScopedObjectAccessUnchecked soa(self);
       StackHandleScope<1> hs(self);
-      method = class_linker_->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
-          it.GetMemberIndex(),
+      resolved_method = class_linker_->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
+          method.GetIndex(),
           hs.NewHandle(dex_cache),
           ScopedNullHandle<mirror::ClassLoader>(),
           /* referrer */ nullptr,
           invoke_type);
-      if (method == nullptr) {
+      if (resolved_method == nullptr) {
         LOG(FATAL_WITHOUT_ABORT) << "Unexpected failure to resolve a method: "
-            << dex_file_->PrettyMethod(it.GetMemberIndex(), true);
+            << dex_file_->PrettyMethod(method.GetIndex(), true);
         self->AssertPendingException();
         mirror::Throwable* exc = self->GetException();
         std::string dump = exc->Dump();
@@ -1588,12 +1590,14 @@
       // Should already have been resolved by the compiler.
       // It may not be resolved if the class failed to verify, in this case, don't set the
       // entrypoint. This is not fatal since we shall use a resolution method.
-      method = class_linker_->LookupResolvedMethod(it.GetMemberIndex(), dex_cache, class_loader_);
+      resolved_method = class_linker_->LookupResolvedMethod(method.GetIndex(),
+                                                            dex_cache,
+                                                            class_loader_);
     }
-    if (method != nullptr &&
+    if (resolved_method != nullptr &&
         compiled_method != nullptr &&
         compiled_method->GetQuickCode().size() != 0) {
-      method->SetEntryPointFromQuickCompiledCodePtrSize(
+      resolved_method->SetEntryPointFromQuickCompiledCodePtrSize(
           reinterpret_cast<void*>(offsets.code_offset_), pointer_size_);
     }
 
@@ -1904,7 +1908,7 @@
       DCHECK(target != nullptr);
       const void* oat_code_offset =
           target->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size_);
-      if (oat_code_offset != 0) {
+      if (oat_code_offset != nullptr) {
         DCHECK(!writer_->GetCompilerOptions().IsBootImage());
         DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickResolutionStub(oat_code_offset));
         DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(oat_code_offset));
@@ -2001,26 +2005,17 @@
 // Visit all methods from all classes in all dex files with the specified visitor.
 bool OatWriter::VisitDexMethods(DexMethodVisitor* visitor) {
   for (const DexFile* dex_file : *dex_files_) {
-    const size_t class_def_count = dex_file->NumClassDefs();
-    for (size_t class_def_index = 0; class_def_index != class_def_count; ++class_def_index) {
-      if (UNLIKELY(!visitor->StartClass(dex_file, class_def_index))) {
+    for (ClassAccessor accessor : dex_file->GetClasses()) {
+      if (UNLIKELY(!visitor->StartClass(dex_file, accessor.GetClassDefIndex()))) {
         return false;
       }
       if (MayHaveCompiledMethods()) {
-        const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
-        const uint8_t* class_data = dex_file->GetClassData(class_def);
-        if (class_data != nullptr) {  // ie not an empty class, such as a marker interface
-          ClassDataItemIterator it(*dex_file, class_data);
-          it.SkipAllFields();
-          size_t class_def_method_index = 0u;
-          while (it.HasNextMethod()) {
-            if (!visitor->VisitMethod(class_def_method_index, it)) {
-              return false;
-            }
-            ++class_def_method_index;
-            it.Next();
+        size_t class_def_method_index = 0u;
+        for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+          if (!visitor->VisitMethod(class_def_method_index, method)) {
+            return false;
           }
-          DCHECK(!it.HasNext());
+          ++class_def_method_index;
         }
       }
       if (UNLIKELY(!visitor->EndClass())) {
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index bb27e8c..d73f10a 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -26,6 +26,7 @@
 #include "compiled_method-inl.h"
 #include "compiler.h"
 #include "debug/method_debug_info.h"
+#include "dex/class_accessor-inl.h"
 #include "dex/dex_file_loader.h"
 #include "dex/quick_compiler_callbacks.h"
 #include "dex/test_dex_file_builder.h"
@@ -428,22 +429,15 @@
   CHECK_EQ(dex_file.GetLocationChecksum(), oat_dex_file->GetDexFileLocationChecksum());
   ScopedObjectAccess soa(Thread::Current());
   auto pointer_size = class_linker->GetImagePointerSize();
-  for (size_t i = 0; i < dex_file.NumClassDefs(); i++) {
-    const DexFile::ClassDef& class_def = dex_file.GetClassDef(i);
-    const uint8_t* class_data = dex_file.GetClassData(class_def);
+  for (ClassAccessor accessor : dex_file.GetClasses()) {
+    size_t num_virtual_methods = accessor.NumVirtualMethods();
 
-    size_t num_virtual_methods = 0;
-    if (class_data != nullptr) {
-      ClassDataItemIterator it(dex_file, class_data);
-      num_virtual_methods = it.NumVirtualMethods();
-    }
-
-    const char* descriptor = dex_file.GetClassDescriptor(class_def);
+    const char* descriptor = accessor.GetDescriptor();
     ObjPtr<mirror::Class> klass = class_linker->FindClass(soa.Self(),
                                                           descriptor,
                                                           ScopedNullHandle<mirror::ClassLoader>());
 
-    const OatFile::OatClass oat_class = oat_dex_file->GetOatClass(i);
+    const OatFile::OatClass oat_class = oat_dex_file->GetOatClass(accessor.GetClassDefIndex());
     CHECK_EQ(ClassStatus::kNotReady, oat_class.GetStatus()) << descriptor;
     CHECK_EQ(kCompile ? OatClassType::kOatClassAllCompiled : OatClassType::kOatClassNoneCompiled,
              oat_class.GetType()) << descriptor;
diff --git a/dexlayout/dex_ir_builder.cc b/dexlayout/dex_ir_builder.cc
index a04a234..a83a46b 100644
--- a/dexlayout/dex_ir_builder.cc
+++ b/dexlayout/dex_ir_builder.cc
@@ -21,6 +21,7 @@
 
 #include "dex_ir_builder.h"
 
+#include "dex/class_accessor-inl.h"
 #include "dex/code_item_accessors-inl.h"
 #include "dex/dex_file_exception_helpers.h"
 #include "dexlayout.h"
@@ -162,7 +163,7 @@
                                    const DexFile::CodeItem* disk_code_item,
                                    uint32_t offset,
                                    uint32_t dex_method_index);
-  ClassData* CreateClassData(const DexFile& dex_file, const uint8_t* encoded_data, uint32_t offset);
+  ClassData* CreateClassData(const DexFile& dex_file, const DexFile::ClassDef& class_def);
 
   void AddAnnotationsFromMapListSection(const DexFile& dex_file,
                                         uint32_t start_offset,
@@ -197,7 +198,7 @@
                         uint8_t length,
                         EncodedValue* item);
 
-  MethodItem GenerateMethodItem(const DexFile& dex_file, ClassDataItemIterator& cdii);
+  MethodItem GenerateMethodItem(const DexFile& dex_file, const ClassAccessor::Method& method);
 
   ParameterAnnotation* GenerateParameterAnnotation(
       const DexFile& dex_file,
@@ -488,8 +489,7 @@
   const uint8_t* static_data = dex_file.GetEncodedStaticFieldValuesArray(disk_class_def);
   EncodedArrayItem* static_values =
       CreateEncodedArrayItem(dex_file, static_data, disk_class_def.static_values_off_);
-  ClassData* class_data = CreateClassData(
-      dex_file, dex_file.GetClassData(disk_class_def), disk_class_def.class_data_off_);
+  ClassData* class_data = CreateClassData(dex_file, disk_class_def);
   CreateAndAddIndexedItem(header_->ClassDefs(),
                           header_->ClassDefs().GetOffset() + i * ClassDef::ItemSize(),
                           i,
@@ -894,36 +894,43 @@
   return code_item;
 }
 
-ClassData* BuilderMaps::CreateClassData(
-    const DexFile& dex_file, const uint8_t* encoded_data, uint32_t offset) {
+ClassData* BuilderMaps::CreateClassData(const DexFile& dex_file,
+                                        const DexFile::ClassDef& class_def) {
   // Read the fields and methods defined by the class, resolving the circular reference from those
   // to classes by setting class at the same time.
+  const uint32_t offset = class_def.class_data_off_;
   ClassData* class_data = class_datas_map_.GetExistingObject(offset);
-  if (class_data == nullptr && encoded_data != nullptr) {
-    ClassDataItemIterator cdii(dex_file, encoded_data);
+  if (class_data == nullptr && offset != 0u) {
+    ClassAccessor accessor(dex_file, class_def);
     // Static fields.
     FieldItemVector* static_fields = new FieldItemVector();
-    for (; cdii.HasNextStaticField(); cdii.Next()) {
-      FieldId* field_item = header_->FieldIds()[cdii.GetMemberIndex()];
-      uint32_t access_flags = cdii.GetRawMemberAccessFlags();
+    for (const ClassAccessor::Field& field : accessor.GetStaticFields()) {
+      FieldId* field_item = header_->FieldIds()[field.GetIndex()];
+      uint32_t access_flags = field.GetRawAccessFlags();
       static_fields->emplace_back(access_flags, field_item);
     }
-    // Instance fields.
     FieldItemVector* instance_fields = new FieldItemVector();
-    for (; cdii.HasNextInstanceField(); cdii.Next()) {
-      FieldId* field_item = header_->FieldIds()[cdii.GetMemberIndex()];
-      uint32_t access_flags = cdii.GetRawMemberAccessFlags();
+    for (const ClassAccessor::Field& field : accessor.GetInstanceFields()) {
+      FieldId* field_item = header_->FieldIds()[field.GetIndex()];
+      uint32_t access_flags = field.GetRawAccessFlags();
       instance_fields->emplace_back(access_flags, field_item);
     }
     // Direct methods.
     MethodItemVector* direct_methods = new MethodItemVector();
-    for (; cdii.HasNextDirectMethod(); cdii.Next()) {
-      direct_methods->push_back(GenerateMethodItem(dex_file, cdii));
+    auto direct_methods_it = accessor.GetDirectMethods();
+    for (auto it = direct_methods_it.begin(); it != direct_methods_it.end(); ++it) {
+      direct_methods->push_back(GenerateMethodItem(dex_file, *it));
     }
     // Virtual methods.
     MethodItemVector* virtual_methods = new MethodItemVector();
-    for (; cdii.HasNextVirtualMethod(); cdii.Next()) {
-      virtual_methods->push_back(GenerateMethodItem(dex_file, cdii));
+    auto virtual_methods_it = accessor.GetVirtualMethods();
+    const uint8_t* last_data_ptr;
+    for (auto it = virtual_methods_it.begin(); ; ++it) {
+      if (it == virtual_methods_it.end()) {
+        last_data_ptr = it->GetDataPointer();
+        break;
+      }
+      virtual_methods->push_back(GenerateMethodItem(dex_file, *it));
     }
     class_data = class_datas_map_.CreateAndAddItem(header_->ClassDatas(),
                                                    eagerly_assign_offsets_,
@@ -932,7 +939,7 @@
                                                    instance_fields,
                                                    direct_methods,
                                                    virtual_methods);
-    class_data->SetSize(cdii.EndDataPointer() - encoded_data);
+    class_data->SetSize(last_data_ptr - dex_file.GetClassData(class_def));
   }
   return class_data;
 }
@@ -1168,16 +1175,17 @@
   }
 }
 
-MethodItem BuilderMaps::GenerateMethodItem(const DexFile& dex_file, ClassDataItemIterator& cdii) {
-  MethodId* method_id = header_->MethodIds()[cdii.GetMemberIndex()];
-  uint32_t access_flags = cdii.GetRawMemberAccessFlags();
-  const DexFile::CodeItem* disk_code_item = cdii.GetMethodCodeItem();
+MethodItem BuilderMaps::GenerateMethodItem(const DexFile& dex_file,
+                                           const ClassAccessor::Method& method) {
+  MethodId* method_id = header_->MethodIds()[method.GetIndex()];
+  uint32_t access_flags = method.GetRawAccessFlags();
+  const DexFile::CodeItem* disk_code_item = method.GetCodeItem();
   // Temporary hack to prevent incorrectly deduping code items if they have the same offset since
   // they may have different debug info streams.
   CodeItem* code_item = DedupeOrCreateCodeItem(dex_file,
                                                disk_code_item,
-                                               cdii.GetMethodCodeItemOffset(),
-                                               cdii.GetMemberIndex());
+                                               method.GetCodeItemOffset(),
+                                               method.GetIndex());
   return MethodItem(access_flags, method_id, code_item);
 }
 
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index 2b1352d..a20930b 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -26,6 +26,7 @@
 #include "common_runtime_test.h"
 #include "dex/art_dex_file_loader.h"
 #include "dex/base64_test_util.h"
+#include "dex/class_accessor-inl.h"
 #include "dex/code_item_accessors-inl.h"
 #include "dex/dex_file-inl.h"
 #include "dex/dex_file_loader.h"
@@ -682,16 +683,9 @@
   MutateDexFile(temp_dex.GetFile(), GetTestDexFileName("ManyMethods"), [] (DexFile* dex) {
     bool mutated_successfully = false;
     // Change the dex instructions to make an opcode that spans past the end of the code item.
-    for (size_t i = 0; i < dex->NumClassDefs(); ++i) {
-      const DexFile::ClassDef& def = dex->GetClassDef(i);
-      const uint8_t* data = dex->GetClassData(def);
-      if (data == nullptr) {
-        continue;
-      }
-      ClassDataItemIterator it(*dex, data);
-      it.SkipAllFields();
-      while (it.HasNextMethod()) {
-        DexFile::CodeItem* item = const_cast<DexFile::CodeItem*>(it.GetMethodCodeItem());
+    for (ClassAccessor accessor : dex->GetClasses()) {
+      for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+        DexFile::CodeItem* item = const_cast<DexFile::CodeItem*>(method.GetCodeItem());
         if (item != nullptr) {
           CodeItemInstructionAccessor instructions(*dex, item);
           if (instructions.begin() != instructions.end()) {
@@ -714,7 +708,6 @@
             }
           }
         }
-        it.Next();
       }
     }
     CHECK(mutated_successfully)
diff --git a/libartbase/base/bit_struct_detail.h b/libartbase/base/bit_struct_detail.h
index 68c2e44..60de1b6 100644
--- a/libartbase/base/bit_struct_detail.h
+++ b/libartbase/base/bit_struct_detail.h
@@ -85,7 +85,7 @@
   static constexpr FalseT Test(...);
 
  public:
-  static constexpr bool value = decltype(Test<T>(0))::value;
+  static constexpr bool value = decltype(Test<T>(nullptr))::value;
 };
 
 // Infer the type of the member of &T::M.
diff --git a/libartbase/base/mem_map_fuchsia.cc b/libartbase/base/mem_map_fuchsia.cc
index db31efb..d1c92ce 100644
--- a/libartbase/base/mem_map_fuchsia.cc
+++ b/libartbase/base/mem_map_fuchsia.cc
@@ -41,8 +41,8 @@
                               ZX_INFO_VMAR,
                               &vmarinfo,
                               sizeof(vmarinfo),
-                              NULL,
-                              NULL), ZX_OK) << "could not find info from root vmar";
+                              nullptr,
+                              nullptr), ZX_OK) << "could not find info from root vmar";
 
   uintptr_t lower_mem_start = FUCHSIA_LOWER_MEM_START - vmarinfo.base;
   fuchsia_lowmem_size = FUCHSIA_LOWER_MEM_SIZE;
@@ -97,8 +97,8 @@
                                 ZX_INFO_VMAR,
                                 &vmarinfo,
                                 sizeof(vmarinfo),
-                                NULL,
-                                NULL);
+                                nullptr,
+                                nullptr);
     if (status < 0 || reinterpret_cast<uintptr_t>(start) < vmarinfo.base) {
       errno = EINVAL;
       return MAP_FAILED;
diff --git a/libartbase/base/transform_iterator.h b/libartbase/base/transform_iterator.h
index 9265543..5b0574d 100644
--- a/libartbase/base/transform_iterator.h
+++ b/libartbase/base/transform_iterator.h
@@ -71,7 +71,7 @@
     return *this;
   }
 
-  TransformIterator& operator++(int) {
+  TransformIterator operator++(int) {
     TransformIterator tmp(*this);
     ++*this;
     return tmp;
@@ -86,7 +86,7 @@
     return *this;
   }
 
-  TransformIterator& operator--(int) {
+  TransformIterator operator--(int) {
     TransformIterator tmp(*this);
     --*this;
     return tmp;
diff --git a/libdexfile/dex/class_accessor-inl.h b/libdexfile/dex/class_accessor-inl.h
index dd91438..21db2cf 100644
--- a/libdexfile/dex/class_accessor-inl.h
+++ b/libdexfile/dex/class_accessor-inl.h
@@ -32,9 +32,16 @@
     : ClassAccessor(dex_file, dex_file.GetIndexForClassDef(class_def)) {}
 
 inline ClassAccessor::ClassAccessor(const DexFile& dex_file, uint32_t class_def_index)
+    : ClassAccessor(dex_file,
+                    dex_file.GetClassData(dex_file.GetClassDef(class_def_index)),
+                    class_def_index) {}
+
+inline ClassAccessor::ClassAccessor(const DexFile& dex_file,
+                                    const uint8_t* class_data,
+                                    uint32_t class_def_index)
     : dex_file_(dex_file),
       class_def_index_(class_def_index),
-      ptr_pos_(dex_file.GetClassData(dex_file.GetClassDef(class_def_index))),
+      ptr_pos_(class_data),
       num_static_fields_(ptr_pos_ != nullptr ? DecodeUnsignedLeb128(&ptr_pos_) : 0u),
       num_instance_fields_(ptr_pos_ != nullptr ? DecodeUnsignedLeb128(&ptr_pos_) : 0u),
       num_direct_methods_(ptr_pos_ != nullptr ? DecodeUnsignedLeb128(&ptr_pos_) : 0u),
diff --git a/libdexfile/dex/class_accessor.h b/libdexfile/dex/class_accessor.h
index 5579be2..9b9ac87 100644
--- a/libdexfile/dex/class_accessor.h
+++ b/libdexfile/dex/class_accessor.h
@@ -61,6 +61,10 @@
       return dex_file_;
     }
 
+    const uint8_t* GetDataPointer() const {
+      return ptr_pos_;
+    }
+
    protected:
     // Internal data pointer for reading.
     const DexFile& dex_file_;
@@ -133,6 +137,7 @@
     uint32_t code_off_ = 0u;
 
     friend class ClassAccessor;
+    friend class DexFileVerifier;
   };
 
   // A decoded version of the field of a class_data_item.
@@ -159,6 +164,7 @@
 
     bool is_static_ = true;
     friend class ClassAccessor;
+    friend class DexFileVerifier;
   };
 
   template <typename DataType>
@@ -225,6 +231,10 @@
       return !(*this < rhs);
     }
 
+    const uint8_t* GetDataPointer() const {
+      return data_.ptr_pos_;
+    }
+
    private:
     // Read data at current position.
     void ReadData() {
@@ -244,14 +254,20 @@
     const uint32_t partition_pos_;
     // At iterator_end_, the iterator is no longer valid.
     const uint32_t iterator_end_;
+
+    friend class DexFileVerifier;
   };
 
   // Not explicit specifically for range-based loops.
   ALWAYS_INLINE ClassAccessor(const ClassIteratorData& data);
 
-  ClassAccessor(const DexFile& dex_file, const DexFile::ClassDef& class_def);
+  ALWAYS_INLINE ClassAccessor(const DexFile& dex_file, const DexFile::ClassDef& class_def);
 
-  ClassAccessor(const DexFile& dex_file, uint32_t class_def_index);
+  ALWAYS_INLINE ClassAccessor(const DexFile& dex_file, uint32_t class_def_index);
+
+  ClassAccessor(const DexFile& dex_file,
+                const uint8_t* class_data,
+                uint32_t class_def_index = DexFile::kDexNoIndex32);
 
   // Return the code item for a method.
   const DexFile::CodeItem* GetCodeItem(const Method& method) const;
@@ -354,6 +370,8 @@
   const uint32_t num_instance_fields_ = 0u;
   const uint32_t num_direct_methods_ = 0u;
   const uint32_t num_virtual_methods_ = 0u;
+
+  friend class DexFileVerifier;
 };
 
 }  // namespace art
diff --git a/libdexfile/dex/code_item_accessors.h b/libdexfile/dex/code_item_accessors.h
index 5786d3f..695cc7b 100644
--- a/libdexfile/dex/code_item_accessors.h
+++ b/libdexfile/dex/code_item_accessors.h
@@ -80,7 +80,7 @@
   uint32_t insns_size_in_code_units_ = 0;
 
   // Pointer to the instructions, null if there is no code item.
-  const uint16_t* insns_ = 0;
+  const uint16_t* insns_ = nullptr;
 };
 
 // Abstracts accesses to code item fields other than debug info for CompactDexFile and
diff --git a/libdexfile/dex/descriptors_names.cc b/libdexfile/dex/descriptors_names.cc
index 206f7a0..1e8eb33 100644
--- a/libdexfile/dex/descriptors_names.cc
+++ b/libdexfile/dex/descriptors_names.cc
@@ -19,6 +19,7 @@
 #include "android-base/stringprintf.h"
 #include "android-base/strings.h"
 
+#include "base/macros.h"
 #include "dex/utf-inl.h"
 
 namespace art {
@@ -162,7 +163,7 @@
 }
 
 // Helper for IsValidPartOfMemberNameUtf8(), a bit vector indicating valid low ascii.
-static uint32_t DEX_MEMBER_VALID_LOW_ASCII[4] = {
+static constexpr uint32_t DEX_MEMBER_VALID_LOW_ASCII[4] = {
   0x00000000,  // 00..1f low control characters; nothing valid
   0x03ff2010,  // 20..3f digits and symbols; valid: '0'..'9', '$', '-'
   0x87fffffe,  // 40..5f uppercase etc.; valid: 'A'..'Z', '_'
@@ -170,6 +171,7 @@
 };
 
 // Helper for IsValidPartOfMemberNameUtf8(); do not call directly.
+COLD_ATTR
 static bool IsValidPartOfMemberNameUtf8Slow(const char** pUtf8Ptr) {
   /*
    * It's a multibyte encoded character. Decode it and analyze. We
@@ -244,6 +246,7 @@
  * this function returns false, then the given pointer may only have
  * been partially advanced.
  */
+ALWAYS_INLINE
 static bool IsValidPartOfMemberNameUtf8(const char** pUtf8Ptr) {
   uint8_t c = (uint8_t) **pUtf8Ptr;
   if (LIKELY(c <= 0x7f)) {
diff --git a/libdexfile/dex/dex_file.h b/libdexfile/dex/dex_file.h
index 4e88ef6..25cd2f4 100644
--- a/libdexfile/dex/dex_file.h
+++ b/libdexfile/dex/dex_file.h
@@ -80,6 +80,7 @@
 
   // The value of an invalid index.
   static const uint16_t kDexNoIndex16 = 0xFFFF;
+  static const uint32_t kDexNoIndex32 = 0xFFFFFFFF;
 
   // Raw header_item.
   struct Header {
diff --git a/libdexfile/dex/dex_file_verifier.cc b/libdexfile/dex/dex_file_verifier.cc
index fda6376..fd011c8 100644
--- a/libdexfile/dex/dex_file_verifier.cc
+++ b/libdexfile/dex/dex_file_verifier.cc
@@ -23,6 +23,7 @@
 #include "android-base/stringprintf.h"
 
 #include "base/leb128.h"
+#include "class_accessor-inl.h"
 #include "code_item_accessors-inl.h"
 #include "descriptors_names.h"
 #include "dex_file-inl.h"
@@ -614,9 +615,8 @@
                                                uint32_t class_access_flags,
                                                dex::TypeIndex class_type_index,
                                                uint32_t code_offset,
-                                               ClassDataItemIterator* direct_it,
-                                               bool expect_direct) {
-  DCHECK_EQ(expect_direct, direct_it == nullptr);
+                                               ClassAccessor::Method* direct_method,
+                                               size_t* remaining_directs) {
   // Check for overflow.
   if (!CheckIndex(idx, header_->method_ids_size_, "class_data_item method_idx")) {
     return false;
@@ -634,12 +634,14 @@
     return false;
   }
 
-  // Check that it's not defined as both direct and virtual.
-  if (!expect_direct) {
+  // For virtual methods, we cross reference the method index to make sure it doesn't match any
+  // direct methods.
+  const bool expect_direct = direct_method == nullptr;
+  if (!expect_direct && *remaining_directs > 0) {
     // The direct methods are already known to be in ascending index order. So just keep up
     // with the current index.
-    for (; direct_it->HasNextDirectMethod(); direct_it->Next()) {
-      uint32_t direct_idx = direct_it->GetMemberIndex();
+    while (true) {
+      const uint32_t direct_idx = direct_method->GetIndex();
       if (direct_idx > idx) {
         break;
       }
@@ -647,6 +649,11 @@
         ErrorStringPrintf("Found virtual method with same index as direct method: %d", idx);
         return false;
       }
+      --*remaining_directs;
+      if (*remaining_directs == 0u) {
+        break;
+      }
+      direct_method->Read();
     }
   }
 
@@ -960,11 +967,14 @@
     return true;
   }
 
-  ClassDataItemIterator field_it(*dex_file_, ptr_);
+  ClassAccessor accessor(*dex_file_, ptr_);
   EncodedStaticFieldValueIterator array_it(*dex_file_, *class_def);
 
-  for (; field_it.HasNextStaticField() && array_it.HasNext(); field_it.Next(), array_it.Next()) {
-    uint32_t index = field_it.GetMemberIndex();
+  for (const ClassAccessor::Field& field : accessor.GetStaticFields()) {
+    if (!array_it.HasNext()) {
+      break;
+    }
+    uint32_t index = field.GetIndex();
     const DexFile::TypeId& type_id = dex_file_->GetTypeId(dex_file_->GetFieldId(index).type_idx_);
     const char* field_type_name =
         dex_file_->GetStringData(dex_file_->GetStringId(type_id.descriptor_idx_));
@@ -1041,6 +1051,7 @@
         ErrorStringPrintf("unexpected static field initial value type: %x", array_type);
         return false;
     }
+    array_it.Next();
   }
 
   if (array_it.HasNext()) {
@@ -1051,87 +1062,103 @@
 }
 
 template <bool kStatic>
-bool DexFileVerifier::CheckIntraClassDataItemFields(ClassDataItemIterator* it,
+bool DexFileVerifier::CheckIntraClassDataItemFields(size_t count,
+                                                    ClassAccessor::Field* field,
                                                     bool* have_class,
                                                     dex::TypeIndex* class_type_index,
                                                     const DexFile::ClassDef** class_def) {
-  DCHECK(it != nullptr);
+  DCHECK(field != nullptr);
   constexpr const char* kTypeDescr = kStatic ? "static field" : "instance field";
 
-  // These calls use the raw access flags to check whether the whole dex field is valid.
+  if (count == 0u) {
+    return true;
+  }
+  field->Read();
 
-  if (!*have_class && (kStatic ? it->HasNextStaticField() : it->HasNextInstanceField())) {
-    *have_class = FindClassIndexAndDef(it->GetMemberIndex(), true, class_type_index, class_def);
+  if (!*have_class) {
+    *have_class = FindClassIndexAndDef(field->GetIndex(), true, class_type_index, class_def);
     if (!*have_class) {
       // Should have really found one.
       ErrorStringPrintf("could not find declaring class for %s index %" PRIu32,
                         kTypeDescr,
-                        it->GetMemberIndex());
+                        field->GetIndex());
       return false;
     }
   }
-  DCHECK(*class_def != nullptr ||
-         !(kStatic ? it->HasNextStaticField() : it->HasNextInstanceField()));
+  DCHECK(*class_def != nullptr);
 
   uint32_t prev_index = 0;
-  for (; kStatic ? it->HasNextStaticField() : it->HasNextInstanceField(); it->Next()) {
-    uint32_t curr_index = it->GetMemberIndex();
+  for (size_t i = 0; ;) {
+    uint32_t curr_index = field->GetIndex();
+    // These calls use the raw access flags to check whether the whole dex field is valid.
     if (!CheckOrder(kTypeDescr, curr_index, prev_index)) {
       return false;
     }
     if (!CheckClassDataItemField(curr_index,
-                                 it->GetRawMemberAccessFlags(),
+                                 field->GetRawAccessFlags(),
                                  (*class_def)->access_flags_,
                                  *class_type_index,
                                  kStatic)) {
       return false;
     }
-
+    ++i;
+    if (i >= count) {
+      break;
+    }
+    field->Read();
     prev_index = curr_index;
   }
 
   return true;
 }
 
-template <bool kDirect>
-bool DexFileVerifier::CheckIntraClassDataItemMethods(
-    ClassDataItemIterator* it,
-    ClassDataItemIterator* direct_it,
-    bool* have_class,
-    dex::TypeIndex* class_type_index,
-    const DexFile::ClassDef** class_def) {
-  DCHECK(it != nullptr);
-  constexpr const char* kTypeDescr = kDirect ? "direct method" : "virtual method";
+bool DexFileVerifier::CheckIntraClassDataItemMethods(ClassAccessor::Method* method,
+                                                     size_t num_methods,
+                                                     ClassAccessor::Method* direct_method,
+                                                     size_t num_directs,
+                                                     bool* have_class,
+                                                     dex::TypeIndex* class_type_index,
+                                                     const DexFile::ClassDef** class_def) {
+  DCHECK(method != nullptr);
+  const char* kTypeDescr = method->IsStaticOrDirect() ? "direct method" : "virtual method";
 
-  if (!*have_class && (kDirect ? it->HasNextDirectMethod() : it->HasNextVirtualMethod())) {
-    *have_class = FindClassIndexAndDef(it->GetMemberIndex(), false, class_type_index, class_def);
+  if (num_methods == 0u) {
+    return true;
+  }
+  method->Read();
+
+  if (!*have_class) {
+    *have_class = FindClassIndexAndDef(method->GetIndex(), false, class_type_index, class_def);
     if (!*have_class) {
       // Should have really found one.
       ErrorStringPrintf("could not find declaring class for %s index %" PRIu32,
                         kTypeDescr,
-                        it->GetMemberIndex());
+                        method->GetIndex());
       return false;
     }
   }
-  DCHECK(*class_def != nullptr ||
-         !(kDirect ? it->HasNextDirectMethod() : it->HasNextVirtualMethod()));
+  DCHECK(*class_def != nullptr);
 
   uint32_t prev_index = 0;
-  for (; kDirect ? it->HasNextDirectMethod() : it->HasNextVirtualMethod(); it->Next()) {
-    uint32_t curr_index = it->GetMemberIndex();
+  for (size_t i = 0; ;) {
+    uint32_t curr_index = method->GetIndex();
     if (!CheckOrder(kTypeDescr, curr_index, prev_index)) {
       return false;
     }
     if (!CheckClassDataItemMethod(curr_index,
-                                  it->GetRawMemberAccessFlags(),
+                                  method->GetRawAccessFlags(),
                                   (*class_def)->access_flags_,
                                   *class_type_index,
-                                  it->GetMethodCodeItemOffset(),
-                                  direct_it,
-                                  kDirect)) {
+                                  method->GetCodeItemOffset(),
+                                  direct_method,
+                                  &num_directs)) {
       return false;
     }
-
+    ++i;
+    if (i >= num_methods) {
+      break;
+    }
+    method->Read();
     prev_index = curr_index;
   }
 
@@ -1139,7 +1166,7 @@
 }
 
 bool DexFileVerifier::CheckIntraClassDataItem() {
-  ClassDataItemIterator it(*dex_file_, ptr_);
+  ClassAccessor accessor(*dex_file_, ptr_);
 
   // This code is complicated by the fact that we don't directly know which class this belongs to.
   // So we need to explicitly search with the first item we find (either field or method), and then,
@@ -1148,14 +1175,18 @@
   dex::TypeIndex class_type_index;
   const DexFile::ClassDef* class_def = nullptr;
 
+  ClassAccessor::Field field(*dex_file_, accessor.ptr_pos_);
   // Check fields.
-  if (!CheckIntraClassDataItemFields<true>(&it,
+  if (!CheckIntraClassDataItemFields<true>(accessor.NumStaticFields(),
+                                           &field,
                                            &have_class,
                                            &class_type_index,
                                            &class_def)) {
     return false;
   }
-  if (!CheckIntraClassDataItemFields<false>(&it,
+  field.NextSection();
+  if (!CheckIntraClassDataItemFields<false>(accessor.NumInstanceFields(),
+                                            &field,
                                             &have_class,
                                             &class_type_index,
                                             &class_def)) {
@@ -1163,31 +1194,37 @@
   }
 
   // Check methods.
-  ClassDataItemIterator direct_it = it;
-
-  if (!CheckIntraClassDataItemMethods<true>(&it,
-                                            nullptr /* direct_it */,
-                                            &have_class,
-                                            &class_type_index,
-                                            &class_def)) {
+  ClassAccessor::Method method(*dex_file_, field.ptr_pos_);
+  if (!CheckIntraClassDataItemMethods(&method,
+                                      accessor.NumDirectMethods(),
+                                      nullptr /* direct_it */,
+                                      0u,
+                                      &have_class,
+                                      &class_type_index,
+                                      &class_def)) {
     return false;
   }
-  if (!CheckIntraClassDataItemMethods<false>(&it,
-                                             &direct_it,
-                                             &have_class,
-                                             &class_type_index,
-                                             &class_def)) {
+  ClassAccessor::Method direct_methods(*dex_file_, field.ptr_pos_);
+  method.NextSection();
+  if (accessor.NumDirectMethods() != 0u) {
+    direct_methods.Read();
+  }
+  if (!CheckIntraClassDataItemMethods(&method,
+                                      accessor.NumVirtualMethods(),
+                                      &direct_methods,
+                                      accessor.NumDirectMethods(),
+                                      &have_class,
+                                      &class_type_index,
+                                      &class_def)) {
     return false;
   }
 
-  const uint8_t* end_ptr = it.EndDataPointer();
-
   // Check static field types against initial static values in encoded array.
   if (!CheckStaticFieldTypes(class_def)) {
     return false;
   }
 
-  ptr_ = end_ptr;
+  ptr_ = method.ptr_pos_;
   return true;
 }
 
@@ -1965,17 +2002,21 @@
 }
 
 dex::TypeIndex DexFileVerifier::FindFirstClassDataDefiner(const uint8_t* ptr, bool* success) {
-  ClassDataItemIterator it(*dex_file_, ptr);
+  ClassAccessor accessor(*dex_file_, ptr);
   *success = true;
 
-  if (it.HasNextStaticField() || it.HasNextInstanceField()) {
-    LOAD_FIELD(field, it.GetMemberIndex(), "first_class_data_definer field_id",
+  if (accessor.NumFields() != 0) {
+    ClassAccessor::Field read_field(*dex_file_, accessor.ptr_pos_);
+    read_field.Read();
+    LOAD_FIELD(field, read_field.GetIndex(), "first_class_data_definer field_id",
                *success = false; return dex::TypeIndex(DexFile::kDexNoIndex16))
     return field->class_idx_;
   }
 
-  if (it.HasNextMethod()) {
-    LOAD_METHOD(method, it.GetMemberIndex(), "first_class_data_definer method_id",
+  if (accessor.NumMethods() != 0) {
+    ClassAccessor::Method read_method(*dex_file_, accessor.ptr_pos_);
+    read_method.Read();
+    LOAD_METHOD(method, read_method.GetIndex(), "first_class_data_definer method_id",
                 *success = false; return dex::TypeIndex(DexFile::kDexNoIndex16))
     return method->class_idx_;
   }
@@ -2556,33 +2597,35 @@
 }
 
 bool DexFileVerifier::CheckInterClassDataItem() {
-  ClassDataItemIterator it(*dex_file_, ptr_);
+  ClassAccessor accessor(*dex_file_, ptr_);
   bool success;
   dex::TypeIndex defining_class = FindFirstClassDataDefiner(ptr_, &success);
   if (!success) {
     return false;
   }
 
-  for (; it.HasNextStaticField() || it.HasNextInstanceField(); it.Next()) {
-    LOAD_FIELD(field, it.GetMemberIndex(), "inter_class_data_item field_id", return false)
+  for (const ClassAccessor::Field& read_field : accessor.GetFields()) {
+    LOAD_FIELD(field, read_field.GetIndex(), "inter_class_data_item field_id", return false)
     if (UNLIKELY(field->class_idx_ != defining_class)) {
       ErrorStringPrintf("Mismatched defining class for class_data_item field");
       return false;
     }
   }
-  for (; it.HasNextMethod(); it.Next()) {
-    uint32_t code_off = it.GetMethodCodeItemOffset();
+  auto methods = accessor.GetMethods();
+  auto it = methods.begin();
+  for (; it != methods.end(); ++it) {
+    uint32_t code_off = it->GetCodeItemOffset();
     if (code_off != 0 && !CheckOffsetToTypeMap(code_off, DexFile::kDexTypeCodeItem)) {
       return false;
     }
-    LOAD_METHOD(method, it.GetMemberIndex(), "inter_class_data_item method_id", return false)
+    LOAD_METHOD(method, it->GetIndex(), "inter_class_data_item method_id", return false)
     if (UNLIKELY(method->class_idx_ != defining_class)) {
       ErrorStringPrintf("Mismatched defining class for class_data_item method");
       return false;
     }
   }
 
-  ptr_ = it.EndDataPointer();
+  ptr_ = it.GetDataPointer();
   return true;
 }
 
diff --git a/libdexfile/dex/dex_file_verifier.h b/libdexfile/dex/dex_file_verifier.h
index 43d1093..79ddea4 100644
--- a/libdexfile/dex/dex_file_verifier.h
+++ b/libdexfile/dex/dex_file_verifier.h
@@ -22,6 +22,7 @@
 
 #include "base/hash_map.h"
 #include "base/safe_map.h"
+#include "class_accessor.h"
 #include "dex_file.h"
 #include "dex_file_types.h"
 
@@ -90,8 +91,8 @@
                                 uint32_t class_access_flags,
                                 dex::TypeIndex class_type_index,
                                 uint32_t code_offset,
-                                ClassDataItemIterator* direct_it,
-                                bool expect_direct);
+                                ClassAccessor::Method* direct_method,
+                                size_t* remaining_directs);
   ALWAYS_INLINE
   bool CheckOrder(const char* type_descr, uint32_t curr_index, uint32_t prev_index);
   bool CheckStaticFieldTypes(const DexFile::ClassDef* class_def);
@@ -105,15 +106,17 @@
   // Check all fields of the given type from the given iterator. Load the class data from the first
   // field, if necessary (and return it), or use the given values.
   template <bool kStatic>
-  bool CheckIntraClassDataItemFields(ClassDataItemIterator* it,
+  bool CheckIntraClassDataItemFields(size_t count,
+                                     ClassAccessor::Field* field,
                                      bool* have_class,
                                      dex::TypeIndex* class_type_index,
                                      const DexFile::ClassDef** class_def);
   // Check all methods of the given type from the given iterator. Load the class data from the first
   // method, if necessary (and return it), or use the given values.
-  template <bool kDirect>
-  bool CheckIntraClassDataItemMethods(ClassDataItemIterator* it,
-                                      ClassDataItemIterator* direct_it,
+  bool CheckIntraClassDataItemMethods(ClassAccessor::Method* method,
+                                      size_t num_methods,
+                                      ClassAccessor::Method* direct_method,
+                                      size_t num_directs,
                                       bool* have_class,
                                       dex::TypeIndex* class_type_index,
                                       const DexFile::ClassDef** class_def);
diff --git a/libdexfile/dex/dex_file_verifier_test.cc b/libdexfile/dex/dex_file_verifier_test.cc
index 78b53a0..a22a457 100644
--- a/libdexfile/dex/dex_file_verifier_test.cc
+++ b/libdexfile/dex/dex_file_verifier_test.cc
@@ -25,6 +25,7 @@
 #include "base/leb128.h"
 #include "base/macros.h"
 #include "base64_test_util.h"
+#include "class_accessor-inl.h"
 #include "descriptors_names.h"
 #include "dex_file-inl.h"
 #include "dex_file_loader.h"
@@ -238,27 +239,10 @@
 static const uint8_t* FindMethodData(const DexFile* dex_file,
                                      const char* name,
                                      /*out*/ uint32_t* method_idx = nullptr) {
-  const DexFile::ClassDef& class_def = dex_file->GetClassDef(0);
-  const uint8_t* class_data = dex_file->GetClassData(class_def);
+  ClassAccessor accessor(*dex_file, dex_file->GetClassDef(0));
 
-  ClassDataItemIterator it(*dex_file, class_data);
-
-  const uint8_t* trailing = class_data;
-  // Need to manually decode the four entries. DataPointer() doesn't work for this, as the first
-  // element has already been loaded into the iterator.
-  DecodeUnsignedLeb128(&trailing);
-  DecodeUnsignedLeb128(&trailing);
-  DecodeUnsignedLeb128(&trailing);
-  DecodeUnsignedLeb128(&trailing);
-
-  // Skip all fields.
-  while (it.HasNextStaticField() || it.HasNextInstanceField()) {
-    trailing = it.DataPointer();
-    it.Next();
-  }
-
-  while (it.HasNextMethod()) {
-    uint32_t method_index = it.GetMemberIndex();
+  for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+    uint32_t method_index = method.GetIndex();
     dex::StringIndex name_index = dex_file->GetMethodId(method_index).name_idx_;
     const DexFile::StringId& string_id = dex_file->GetStringId(name_index);
     const char* str = dex_file->GetStringData(string_id);
@@ -266,12 +250,11 @@
       if (method_idx != nullptr) {
         *method_idx = method_index;
       }
-      DecodeUnsignedLeb128(&trailing);
+      // Go back 2 lebs to the access flags.
+      const uint8_t* trailing = ReverseSearchUnsignedLeb128(method.GetDataPointer());
+      trailing = ReverseSearchUnsignedLeb128(trailing);
       return trailing;
     }
-
-    trailing = it.DataPointer();
-    it.Next();
   }
 
   return nullptr;
@@ -849,31 +832,17 @@
 // is to the access flags, so that the caller doesn't have to handle the leb128-encoded method-index
 // delta.
 static const uint8_t* FindFieldData(const DexFile* dex_file, const char* name) {
-  const DexFile::ClassDef& class_def = dex_file->GetClassDef(0);
-  const uint8_t* class_data = dex_file->GetClassData(class_def);
+  ClassAccessor accessor(*dex_file, dex_file->GetClassDef(0));
 
-  ClassDataItemIterator it(*dex_file, class_data);
-
-  const uint8_t* trailing = class_data;
-  // Need to manually decode the four entries. DataPointer() doesn't work for this, as the first
-  // element has already been loaded into the iterator.
-  DecodeUnsignedLeb128(&trailing);
-  DecodeUnsignedLeb128(&trailing);
-  DecodeUnsignedLeb128(&trailing);
-  DecodeUnsignedLeb128(&trailing);
-
-  while (it.HasNextStaticField() || it.HasNextInstanceField()) {
-    uint32_t field_index = it.GetMemberIndex();
+  for (const ClassAccessor::Field& field : accessor.GetFields()) {
+    uint32_t field_index = field.GetIndex();
     dex::StringIndex name_index = dex_file->GetFieldId(field_index).name_idx_;
     const DexFile::StringId& string_id = dex_file->GetStringId(name_index);
     const char* str = dex_file->GetStringData(string_id);
     if (strcmp(name, str) == 0) {
-      DecodeUnsignedLeb128(&trailing);
-      return trailing;
+      // Go to the back of the access flags.
+      return ReverseSearchUnsignedLeb128(field.GetDataPointer());
     }
-
-    trailing = it.DataPointer();
-    it.Next();
   }
 
   return nullptr;
diff --git a/openjdkjvm/OpenjdkJvm.cc b/openjdkjvm/OpenjdkJvm.cc
index df002b6..8d0200c 100644
--- a/openjdkjvm/OpenjdkJvm.cc
+++ b/openjdkjvm/OpenjdkJvm.cc
@@ -187,7 +187,7 @@
 }
 
 JNIEXPORT int jio_vfprintf(FILE* fp, const char* fmt, va_list args) {
-    assert(fp != NULL);
+    assert(fp != nullptr);
     return vfprintf(fp, fmt, args);
 }
 
@@ -203,7 +203,7 @@
 JNIEXPORT jlong JVM_CurrentTimeMillis(JNIEnv* env ATTRIBUTE_UNUSED,
                                       jclass clazz ATTRIBUTE_UNUSED) {
     struct timeval tv;
-    gettimeofday(&tv, (struct timezone *) NULL);
+    gettimeofday(&tv, (struct timezone *) nullptr);
     jlong when = tv.tv_sec * 1000LL + tv.tv_usec / 1000;
     return when;
 }
@@ -319,8 +319,8 @@
                                  jstring javaFilename,
                                  jobject javaLoader) {
   ScopedUtfChars filename(env, javaFilename);
-  if (filename.c_str() == NULL) {
-    return NULL;
+  if (filename.c_str() == nullptr) {
+    return nullptr;
   }
 
   std::string error_msg;
@@ -348,7 +348,7 @@
   art::ScopedObjectAccess soa(env);
   art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
   art::Thread* thread = art::Thread::FromManagedThread(soa, jthread);
-  if (thread != NULL) {
+  if (thread != nullptr) {
     thread->SetNativePriority(prio);
   }
 }
@@ -421,7 +421,7 @@
                                               art::SuspendReason::kInternal,
                                               &timed_out);
   }
-  if (thread != NULL) {
+  if (thread != nullptr) {
     {
       art::ScopedObjectAccess soa(env);
       thread->SetThreadName(name.c_str());
diff --git a/openjdkjvmti/ti_timers.cc b/openjdkjvmti/ti_timers.cc
index 24fb041..11b58c4 100644
--- a/openjdkjvmti/ti_timers.cc
+++ b/openjdkjvmti/ti_timers.cc
@@ -83,7 +83,7 @@
   // No CLOCK_MONOTONIC support on older Mac OS.
   struct timeval t;
   t.tv_sec = t.tv_usec = 0;
-  gettimeofday(&t, NULL);
+  gettimeofday(&t, nullptr);
   *nanos_ptr = static_cast<jlong>(t.tv_sec)*1000000000LL + static_cast<jlong>(t.tv_usec)*1000LL;
 #endif
 
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 96d7c96..c86baa1 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -1036,8 +1036,8 @@
 .endm
 
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_type, artInitializeTypeFromCode
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_resolve_type, artResolveTypeFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_type_and_verify_access, artResolveTypeAndVerifyAccessFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_handle, artResolveMethodHandleFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_type, artResolveMethodTypeFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 6b0de48..8b77453 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1561,8 +1561,8 @@
      * returned.
      */
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_type, artInitializeTypeFromCode
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_resolve_type, artResolveTypeFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_type_and_verify_access, artResolveTypeAndVerifyAccessFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_handle, artResolveMethodHandleFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_type, artResolveMethodTypeFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 2b69c17..05172db 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -192,15 +192,15 @@
   qpoints->pCheckInstanceOf = art_quick_check_instance_of;
   static_assert(!IsDirectEntrypoint(kQuickCheckInstanceOf), "Non-direct C stub marked direct.");
 
-  // DexCache
+  // Resolution and initialization
   qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
   static_assert(!IsDirectEntrypoint(kQuickInitializeStaticStorage),
                 "Non-direct C stub marked direct.");
-  qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access;
-  static_assert(!IsDirectEntrypoint(kQuickInitializeTypeAndVerifyAccess),
+  qpoints->pResolveTypeAndVerifyAccess = art_quick_resolve_type_and_verify_access;
+  static_assert(!IsDirectEntrypoint(kQuickResolveTypeAndVerifyAccess),
                 "Non-direct C stub marked direct.");
-  qpoints->pInitializeType = art_quick_initialize_type;
-  static_assert(!IsDirectEntrypoint(kQuickInitializeType), "Non-direct C stub marked direct.");
+  qpoints->pResolveType = art_quick_resolve_type;
+  static_assert(!IsDirectEntrypoint(kQuickResolveType), "Non-direct C stub marked direct.");
   qpoints->pResolveString = art_quick_resolve_string;
   static_assert(!IsDirectEntrypoint(kQuickResolveString), "Non-direct C stub marked direct.");
   qpoints->pResolveMethodHandle = art_quick_resolve_method_handle;
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 303333c..b10d1fc 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -2086,13 +2086,13 @@
     /*
      * Entry from managed code when dex cache misses for a type_idx.
      */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_type, artInitializeTypeFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_resolve_type, artResolveTypeFromCode
 
     /*
      * Entry from managed code when type_idx needs to be checked for access and dex cache may also
      * miss.
      */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_type_and_verify_access, artResolveTypeAndVerifyAccessFromCode
 
     /*
      * Called by managed code when the value in rSUSPEND has been decremented to 0.
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index f35cb16..ebf1d5b 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1986,13 +1986,13 @@
     /*
      * Entry from managed code when dex cache misses for a type_idx.
      */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_type, artInitializeTypeFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_resolve_type, artResolveTypeFromCode
 
     /*
      * Entry from managed code when type_idx needs to be checked for access and dex cache may also
      * miss.
      */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_type_and_verify_access, artResolveTypeAndVerifyAccessFromCode
 
     /*
      * Called by managed code when the value in rSUSPEND has been decremented to 0.
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 9fe41ca..b0bed56 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1281,8 +1281,8 @@
 GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_64
 
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_type, artInitializeTypeFromCode
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_resolve_type, artResolveTypeFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_type_and_verify_access, artResolveTypeAndVerifyAccessFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_handle, artResolveMethodHandleFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_type, artResolveMethodTypeFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index c41d3e4..a8a648f 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1301,8 +1301,8 @@
 END_FUNCTION art_quick_alloc_object_initialized_region_tlab
 
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_type, artInitializeTypeFromCode
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_resolve_type, artResolveTypeFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_type_and_verify_access, artResolveTypeAndVerifyAccessFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_handle, artResolveMethodHandleFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_type, artResolveMethodTypeFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index ac22f07..f693524 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -374,12 +374,50 @@
       case Intrinsics::kSystemArrayCopyChar:
       case Intrinsics::kStringGetCharsNoCheck:
       case Intrinsics::kReferenceGetReferent:
+      case Intrinsics::kMemoryPeekByte:
+      case Intrinsics::kMemoryPokeByte:
+      case Intrinsics::kUnsafeCASInt:
+      case Intrinsics::kUnsafeCASLong:
+      case Intrinsics::kUnsafeCASObject:
+      case Intrinsics::kUnsafeGet:
+      case Intrinsics::kUnsafeGetAndAddInt:
+      case Intrinsics::kUnsafeGetAndAddLong:
+      case Intrinsics::kUnsafeGetAndSetInt:
+      case Intrinsics::kUnsafeGetAndSetLong:
+      case Intrinsics::kUnsafeGetAndSetObject:
+      case Intrinsics::kUnsafeGetLong:
+      case Intrinsics::kUnsafeGetLongVolatile:
+      case Intrinsics::kUnsafeGetObject:
+      case Intrinsics::kUnsafeGetObjectVolatile:
+      case Intrinsics::kUnsafeGetVolatile:
+      case Intrinsics::kUnsafePut:
+      case Intrinsics::kUnsafePutLong:
+      case Intrinsics::kUnsafePutLongOrdered:
+      case Intrinsics::kUnsafePutLongVolatile:
+      case Intrinsics::kUnsafePutObject:
+      case Intrinsics::kUnsafePutObjectOrdered:
+      case Intrinsics::kUnsafePutObjectVolatile:
+      case Intrinsics::kUnsafePutOrdered:
+      case Intrinsics::kUnsafePutVolatile:
+      case Intrinsics::kUnsafeLoadFence:
+      case Intrinsics::kUnsafeStoreFence:
+      case Intrinsics::kUnsafeFullFence:
         // These intrinsics are on the light greylist and will fail a DCHECK in
         // SetIntrinsic() if their flags change on the respective dex methods.
         // Note that the DCHECK currently won't fail if the dex methods are
         // whitelisted, e.g. in the core image (b/77733081). As a result, we
         // might print warnings but we won't change the semantics.
         return HiddenApiAccessFlags::kLightGreylist;
+      case Intrinsics::kStringNewStringFromBytes:
+      case Intrinsics::kStringNewStringFromChars:
+      case Intrinsics::kStringNewStringFromString:
+      case Intrinsics::kMemoryPeekIntNative:
+      case Intrinsics::kMemoryPeekLongNative:
+      case Intrinsics::kMemoryPeekShortNative:
+      case Intrinsics::kMemoryPokeIntNative:
+      case Intrinsics::kMemoryPokeLongNative:
+      case Intrinsics::kMemoryPokeShortNative:
+        return HiddenApiAccessFlags::kDarkGreylist;
       case Intrinsics::kVarHandleFullFence:
       case Intrinsics::kVarHandleAcquireFence:
       case Intrinsics::kVarHandleReleaseFence:
@@ -475,7 +513,7 @@
     // (b) only VarHandle intrinsics are blacklisted at the moment and they
     // should not be used outside tests with disabled API checks.
     if (hidden_api_flags != HiddenApiAccessFlags::kWhitelist) {
-      DCHECK_EQ(hidden_api_flags, GetHiddenApiAccessFlags());
+      DCHECK_EQ(hidden_api_flags, GetHiddenApiAccessFlags()) << PrettyMethod();
     }
   } else {
     SetAccessFlags(new_value);
diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h
index 938489b..aa32113 100644
--- a/runtime/entrypoints/quick/quick_default_externs.h
+++ b/runtime/entrypoints/quick/quick_default_externs.h
@@ -33,10 +33,10 @@
 // Cast entrypoints.
 extern "C" void art_quick_check_instance_of(art::mirror::Object*, art::mirror::Class*);
 
-// DexCache entrypoints.
-extern "C" void* art_quick_initialize_static_storage(uint32_t);
-extern "C" void* art_quick_initialize_type(uint32_t);
-extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t);
+// Resolution and initialization entrypoints.
+extern "C" void* art_quick_initialize_static_storage(art::mirror::Class*);
+extern "C" void* art_quick_resolve_type(uint32_t);
+extern "C" void* art_quick_resolve_type_and_verify_access(uint32_t);
 extern "C" void* art_quick_resolve_method_handle(uint32_t);
 extern "C" void* art_quick_resolve_method_type(uint32_t);
 extern "C" void* art_quick_resolve_string(uint32_t);
diff --git a/runtime/entrypoints/quick/quick_default_init_entrypoints.h b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
index 5dcece4..8e784c1 100644
--- a/runtime/entrypoints/quick/quick_default_init_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
@@ -33,10 +33,10 @@
   // Alloc
   ResetQuickAllocEntryPoints(qpoints, /* is_marking */ true);
 
-  // DexCache
+  // Resolution and initialization
   qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
-  qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access;
-  qpoints->pInitializeType = art_quick_initialize_type;
+  qpoints->pResolveTypeAndVerifyAccess = art_quick_resolve_type_and_verify_access;
+  qpoints->pResolveType = art_quick_resolve_type;
   qpoints->pResolveMethodHandle = art_quick_resolve_method_handle;
   qpoints->pResolveMethodType = art_quick_resolve_method_type;
   qpoints->pResolveString = art_quick_resolve_string;
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 85d633f..c4d85a3 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -95,7 +95,7 @@
 static inline void StoreStringInBss(ArtMethod* outer_method,
                                     dex::StringIndex string_idx,
                                     ObjPtr<mirror::String> resolved_string)
-    REQUIRES_SHARED(Locks::mutator_lock_) __attribute__((optnone)) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   const DexFile* dex_file = outer_method->GetDexFile();
   DCHECK(dex_file != nullptr);
   const OatDexFile* oat_dex_file = dex_file->GetOatDexFile();
@@ -129,27 +129,25 @@
   return outer_method->GetDexFile() == caller->GetDexFile();
 }
 
-extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx, Thread* self)
+extern "C" mirror::Class* artInitializeStaticStorageFromCode(mirror::Class* klass, Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   // Called to ensure static storage base is initialized for direct static field reads and writes.
   // A class may be accessing another class' fields when it doesn't have access, as access has been
   // given by inheritance.
   ScopedQuickEntrypointChecks sqec(self);
-  auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(
-      self, CalleeSaveType::kSaveEverythingForClinit);
-  ArtMethod* caller = caller_and_outer.caller;
-  ObjPtr<mirror::Class> result = ResolveVerifyAndClinit(dex::TypeIndex(type_idx),
-                                                        caller,
-                                                        self,
-                                                        /* can_run_clinit */ true,
-                                                        /* verify_access */ false);
-  if (LIKELY(result != nullptr) && CanReferenceBss(caller_and_outer.outer_method, caller)) {
-    StoreTypeInBss(caller_and_outer.outer_method, dex::TypeIndex(type_idx), result);
+  DCHECK(klass != nullptr);
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  StackHandleScope<1> hs(self);
+  Handle<mirror::Class> h_klass = hs.NewHandle(klass);
+  bool success = class_linker->EnsureInitialized(
+      self, h_klass, /* can_init_fields */ true, /* can_init_parents */ true);
+  if (UNLIKELY(!success)) {
+    return nullptr;
   }
-  return result.Ptr();
+  return h_klass.Get();
 }
 
-extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx, Thread* self)
+extern "C" mirror::Class* artResolveTypeFromCode(uint32_t type_idx, Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   // Called when the .bss slot was empty or for main-path runtime call.
   ScopedQuickEntrypointChecks sqec(self);
@@ -167,7 +165,7 @@
   return result.Ptr();
 }
 
-extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, Thread* self)
+extern "C" mirror::Class* artResolveTypeAndVerifyAccessFromCode(uint32_t type_idx, Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   // Called when caller isn't guaranteed to have access to a type.
   ScopedQuickEntrypointChecks sqec(self);
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index 4ce954c..42b680e 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -38,9 +38,9 @@
   V(InstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*) \
   V(CheckInstanceOf, void, mirror::Object*, mirror::Class*) \
 \
-  V(InitializeStaticStorage, void*, uint32_t) \
-  V(InitializeTypeAndVerifyAccess, void*, uint32_t) \
-  V(InitializeType, void*, uint32_t) \
+  V(InitializeStaticStorage, void*, mirror::Class*) \
+  V(ResolveTypeAndVerifyAccess, void*, uint32_t) \
+  V(ResolveType, void*, uint32_t) \
   V(ResolveMethodHandle, void*, uint32_t) \
   V(ResolveMethodType, void*, uint32_t) \
   V(ResolveString, void*, uint32_t) \
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index c3cd793..cb85804 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -181,11 +181,11 @@
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInstanceofNonTrivial, pCheckInstanceOf, sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckInstanceOf, pInitializeStaticStorage,
                          sizeof(void*));
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeStaticStorage, pInitializeTypeAndVerifyAccess,
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeStaticStorage, pResolveTypeAndVerifyAccess,
                          sizeof(void*));
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeTypeAndVerifyAccess, pInitializeType,
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pResolveTypeAndVerifyAccess, pResolveType,
                          sizeof(void*));
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeType, pResolveMethodHandle, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pResolveType, pResolveMethodHandle, sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pResolveMethodHandle, pResolveMethodType, sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pResolveMethodType, pResolveString, sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pResolveString, pSet8Instance, sizeof(void*));
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index c6ec174..436eb2c 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -247,14 +247,14 @@
                                                /* out */ size_t* bytes_tl_bulk_allocated) {
   DCHECK_ALIGNED(num_bytes, kAlignment);
   DCHECK_GT(num_bytes, kRegionSize);
-  size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize;
-  DCHECK_GT(num_regs, 0U);
-  DCHECK_LT((num_regs - 1) * kRegionSize, num_bytes);
-  DCHECK_LE(num_bytes, num_regs * kRegionSize);
+  size_t num_regs_in_large_region = RoundUp(num_bytes, kRegionSize) / kRegionSize;
+  DCHECK_GT(num_regs_in_large_region, 0U);
+  DCHECK_LT((num_regs_in_large_region - 1) * kRegionSize, num_bytes);
+  DCHECK_LE(num_bytes, num_regs_in_large_region * kRegionSize);
   MutexLock mu(Thread::Current(), region_lock_);
   if (!kForEvac) {
     // Retain sufficient free regions for full evacuation.
-    if ((num_non_free_regions_ + num_regs) * 2 > num_regions_) {
+    if ((num_non_free_regions_ + num_regs_in_large_region) * 2 > num_regions_) {
       return nullptr;
     }
   }
@@ -265,7 +265,7 @@
     size_t next_region1 = -1;
     mirror::Object* region1 = AllocLargeInRange<kForEvac>(cyclic_alloc_region_index_,
                                                           num_regions_,
-                                                          num_regs,
+                                                          num_regs_in_large_region,
                                                           bytes_allocated,
                                                           usable_size,
                                                           bytes_tl_bulk_allocated,
@@ -280,16 +280,16 @@
     }
 
     // If the previous attempt failed, try to find a range of free regions within
-    // [0, cyclic_alloc_region_index_ + num_regions_ - 1).
+    // [0, min(cyclic_alloc_region_index_ + num_regs_in_large_region - 1, num_regions_)).
     size_t next_region2 = -1;
-    mirror::Object* region2 =
-        AllocLargeInRange<kForEvac>(0,
-                                    cyclic_alloc_region_index_ + num_regions_ - 1,
-                                    num_regs,
-                                    bytes_allocated,
-                                    usable_size,
-                                    bytes_tl_bulk_allocated,
-                                    &next_region2);
+    mirror::Object* region2 = AllocLargeInRange<kForEvac>(
+            0,
+            std::min(cyclic_alloc_region_index_ + num_regs_in_large_region - 1, num_regions_),
+            num_regs_in_large_region,
+            bytes_allocated,
+            usable_size,
+            bytes_tl_bulk_allocated,
+            &next_region2);
     if (region2 != nullptr) {
       DCHECK_LT(0u, next_region2);
       DCHECK_LE(next_region2, num_regions_);
@@ -302,7 +302,7 @@
     // Try to find a range of free regions within [0, num_regions_).
     mirror::Object* region = AllocLargeInRange<kForEvac>(0,
                                                          num_regions_,
-                                                         num_regs,
+                                                         num_regs_in_large_region,
                                                          bytes_allocated,
                                                          usable_size,
                                                          bytes_tl_bulk_allocated);
@@ -316,17 +316,21 @@
 template<bool kForEvac>
 inline mirror::Object* RegionSpace::AllocLargeInRange(size_t begin,
                                                       size_t end,
-                                                      size_t num_regs,
+                                                      size_t num_regs_in_large_region,
                                                       /* out */ size_t* bytes_allocated,
                                                       /* out */ size_t* usable_size,
                                                       /* out */ size_t* bytes_tl_bulk_allocated,
                                                       /* out */ size_t* next_region) {
+  DCHECK_LE(0u, begin);
+  DCHECK_LT(begin, end);
+  DCHECK_LE(end, num_regions_);
   size_t left = begin;
-  while (left + num_regs - 1 < end) {
+  while (left + num_regs_in_large_region - 1 < end) {
     bool found = true;
     size_t right = left;
-    DCHECK_LT(right, left + num_regs) << "The inner loop should iterate at least once";
-    while (right < left + num_regs) {
+    DCHECK_LT(right, left + num_regs_in_large_region)
+        << "The inner loop should iterate at least once";
+    while (right < left + num_regs_in_large_region) {
       if (regions_[right].IsFree()) {
         ++right;
         // Ensure `right` is not going beyond the past-the-end index of the region space.
@@ -338,7 +342,7 @@
     }
     if (found) {
       // `right` points to the one region past the last free region.
-      DCHECK_EQ(left + num_regs, right);
+      DCHECK_EQ(left + num_regs_in_large_region, right);
       Region* first_reg = &regions_[left];
       DCHECK(first_reg->IsFree());
       first_reg->UnfreeLarge(this, time_);
@@ -347,7 +351,7 @@
       } else {
         ++num_non_free_regions_;
       }
-      size_t allocated = num_regs * kRegionSize;
+      size_t allocated = num_regs_in_large_region * kRegionSize;
       // We make 'top' all usable bytes, as the caller of this
       // allocation may use all of 'usable_size' (see mirror::Array::Alloc).
       first_reg->SetTop(first_reg->Begin() + allocated);
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index a129171..fa33a8a 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -586,17 +586,17 @@
   Region* AllocateRegion(bool for_evac) REQUIRES(region_lock_);
 
   // Scan region range [`begin`, `end`) in increasing order to try to
-  // allocate a large region having a size of `num_regs` regions. If
-  // there is no space in the region space to allocate this large
-  // region, return null.
+  // allocate a large region having a size of `num_regs_in_large_region`
+  // regions. If there is no space in the region space to allocate this
+  // large region, return null.
   //
   // If argument `next_region` is not null, use `*next_region` to
   // return the index to the region next to the allocated large region
   // returned by this method.
   template<bool kForEvac>
-  mirror::Object* AllocLargeInRange(size_t num_regs,
-                                    size_t begin,
+  mirror::Object* AllocLargeInRange(size_t begin,
                                     size_t end,
+                                    size_t num_regs_in_large_region,
                                     /* out */ size_t* bytes_allocated,
                                     /* out */ size_t* usable_size,
                                     /* out */ size_t* bytes_tl_bulk_allocated,
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index dc42cfa..3f44928 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -1590,7 +1590,7 @@
   if (obj == nullptr) {
     return;
   }
-  MarkRootObject(obj, 0, xlate[info.GetType()], info.GetThreadId());
+  MarkRootObject(obj, nullptr, xlate[info.GetType()], info.GetThreadId());
 }
 
 // If "direct_to_ddms" is true, the other arguments are ignored, and data is
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 22a6e9d..74aa787 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -1077,6 +1077,8 @@
     Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) {
   if (CheckCallers(shadow_frame,
                    { "void java.lang.Thread.init(java.lang.ThreadGroup, java.lang.Runnable, "
+                         "java.lang.String, long, java.security.AccessControlContext)",
+                     "void java.lang.Thread.init(java.lang.ThreadGroup, java.lang.Runnable, "
                          "java.lang.String, long)",
                      "void java.lang.Thread.<init>()",
                      "void java.util.logging.LogManager$Cleaner.<init>("
@@ -1111,6 +1113,8 @@
                    { "java.lang.Thread$State java.lang.Thread.getState()",
                      "java.lang.ThreadGroup java.lang.Thread.getThreadGroup()",
                      "void java.lang.Thread.init(java.lang.ThreadGroup, java.lang.Runnable, "
+                         "java.lang.String, long, java.security.AccessControlContext)",
+                     "void java.lang.Thread.init(java.lang.ThreadGroup, java.lang.Runnable, "
                          "java.lang.String, long)",
                      "void java.lang.Thread.<init>()",
                      "void java.util.logging.LogManager$Cleaner.<init>("
@@ -1965,7 +1969,7 @@
   const auto& iter = invoke_handlers_.find(name);
   if (iter != invoke_handlers_.end()) {
     // Clear out the result in case it's not zeroed out.
-    result->SetL(0);
+    result->SetL(nullptr);
 
     // Push the shadow frame. This is so the failing method can be seen in abort dumps.
     self->PushShadowFrame(shadow_frame);
@@ -1986,7 +1990,7 @@
   const auto& iter = jni_handlers_.find(name);
   if (iter != jni_handlers_.end()) {
     // Clear out the result in case it's not zeroed out.
-    result->SetL(0);
+    result->SetL(nullptr);
     (*iter->second)(self, method, receiver, args, result);
   } else if (Runtime::Current()->IsActiveTransaction()) {
     AbortTransactionF(self, "Attempt to invoke native method in non-started runtime: %s",
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 9409b76..0353ea7 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -1159,7 +1159,7 @@
   }
   basket.className = Dbg::GetClassName(basket.locationClass.Get());
   basket.exceptionClass.Assign(exception_object->GetClass());
-  basket.caught = (pCatchLoc->method != 0);
+  basket.caught = (pCatchLoc->method != nullptr);
   basket.thisPtr.Assign(thisPtr);
 
   /* don't try to post an exception caused by the debugger */
diff --git a/runtime/jni/check_jni.cc b/runtime/jni/check_jni.cc
index 66bd74b..c5e8830 100644
--- a/runtime/jni/check_jni.cc
+++ b/runtime/jni/check_jni.cc
@@ -1462,7 +1462,7 @@
         break;
       }
     }
-    return 0;
+    return nullptr;
   }
 
   void AbortF(const char* fmt, ...) __attribute__((__format__(__printf__, 2, 3))) {
diff --git a/runtime/method_handles.cc b/runtime/method_handles.cc
index 01a32a2..570fc48 100644
--- a/runtime/method_handles.cc
+++ b/runtime/method_handles.cc
@@ -484,7 +484,7 @@
                                                       first_dest_reg,
                                                       new_shadow_frame)) {
           DCHECK(self->IsExceptionPending());
-          result->SetL(0);
+          result->SetL(nullptr);
           return false;
         }
       } else {
@@ -500,7 +500,7 @@
                                                     operands,
                                                     new_shadow_frame)) {
           DCHECK(self->IsExceptionPending());
-          result->SetL(0);
+          result->SetL(nullptr);
           return false;
         }
       }
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index cdba6b2..b598df3 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -233,13 +233,13 @@
   if (base_address == nullptr) {
     ScopedObjectAccess soa(env);
     ThrowWrappedIOException("dexFileBuffer not direct");
-    return 0;
+    return nullptr;
   }
 
   std::unique_ptr<MemMap> dex_mem_map(AllocateDexMemoryMap(env, start, end));
   if (dex_mem_map == nullptr) {
     DCHECK(Thread::Current()->IsExceptionPending());
-    return 0;
+    return nullptr;
   }
 
   size_t length = static_cast<size_t>(end - start);
@@ -255,7 +255,7 @@
   std::unique_ptr<MemMap> dex_mem_map(AllocateDexMemoryMap(env, start, end));
   if (dex_mem_map == nullptr) {
     DCHECK(Thread::Current()->IsExceptionPending());
-    return 0;
+    return nullptr;
   }
 
   auto destination = reinterpret_cast<jbyte*>(dex_mem_map.get()->Begin());
@@ -273,7 +273,7 @@
                                          jobjectArray dex_elements) {
   ScopedUtfChars sourceName(env, javaSourceName);
   if (sourceName.c_str() == nullptr) {
-    return 0;
+    return nullptr;
   }
 
   Runtime* const runtime = Runtime::Current();
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index f1e267b..7ac4086 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -388,7 +388,7 @@
 // as PSS, private/shared dirty/shared data are available via
 // /proc/<pid>/smaps.
 static void VMDebug_getHeapSpaceStats(JNIEnv* env, jclass, jlongArray data) {
-  jlong* arr = reinterpret_cast<jlong*>(env->GetPrimitiveArrayCritical(data, 0));
+  jlong* arr = reinterpret_cast<jlong*>(env->GetPrimitiveArrayCritical(data, nullptr));
   if (arr == nullptr || env->GetArrayLength(data) < 9) {
     return;
   }
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index e54674f..4b4d6e3 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -121,7 +121,7 @@
 static jobject Constructor_newInstanceFromSerialization(JNIEnv* env, jclass unused ATTRIBUTE_UNUSED,
                                                         jclass ctorClass, jclass allocClass) {
     jmethodID ctor = env->GetMethodID(ctorClass, "<init>", "()V");
-    DCHECK(ctor != NULL);
+    DCHECK(ctor != nullptr);
     return env->NewObject(allocClass, ctor);
 }
 
diff --git a/runtime/oat.h b/runtime/oat.h
index 69aaceb..037c8f9 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,8 +32,8 @@
 class PACKED(4) OatHeader {
  public:
   static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
-  // Last oat version changed reason: Move MethodInfo into CodeInfo.
-  static constexpr uint8_t kOatVersion[] = { '1', '5', '8', '\0' };
+  // Last oat version changed reason: Add stack map fast path for GC.
+  static constexpr uint8_t kOatVersion[] = { '1', '6', '1', '\0' };
 
   static constexpr const char* kImageLocationKey = "image-location";
   static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 5f87bf0..4ed26fa 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -550,7 +550,7 @@
   const IndexBssMapping* const method_bss_mapping_ = nullptr;
   const IndexBssMapping* const type_bss_mapping_ = nullptr;
   const IndexBssMapping* const string_bss_mapping_ = nullptr;
-  const uint32_t* const oat_class_offsets_pointer_ = 0u;
+  const uint32_t* const oat_class_offsets_pointer_ = nullptr;
   TypeLookupTable lookup_table_;
   const DexLayoutSections* const dex_layout_sections_ = nullptr;
 
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 7383d47..a44e5a4 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -333,6 +333,9 @@
           .IntoKey(M::UseStderrLogger)
       .Define("-Xonly-use-system-oat-files")
           .IntoKey(M::OnlyUseSystemOatFiles)
+      .Define("-Xverifier-logging-threshold=_")
+          .WithType<unsigned int>()
+          .IntoKey(M::VerifierLoggingThreshold)
       .Ignore({
           "-ea", "-da", "-enableassertions", "-disableassertions", "--runtime-arg", "-esa",
           "-dsa", "-enablesystemassertions", "-disablesystemassertions", "-Xrs", "-Xint:_",
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index a81c4d0..facebda 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -273,7 +273,8 @@
       pruned_dalvik_cache_(false),
       // Initially assume we perceive jank in case the process state is never updated.
       process_state_(kProcessStateJankPerceptible),
-      zygote_no_threads_(false) {
+      zygote_no_threads_(false),
+      verifier_logging_threshold_ms_(100) {
   static_assert(Runtime::kCalleeSaveSize ==
                     static_cast<uint32_t>(CalleeSaveType::kLastCalleeSaveType), "Unexpected size");
 
@@ -1438,6 +1439,8 @@
     }
   }
 
+  verifier_logging_threshold_ms_ = runtime_options.GetOrDefault(Opt::VerifierLoggingThreshold);
+
   std::string error_msg;
   java_vm_ = JavaVMExt::Create(this, runtime_options, &error_msg);
   if (java_vm_.get() == nullptr) {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index f413733..ca93e24 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -767,6 +767,10 @@
 
   static constexpr int32_t kUnsetSdkVersion = 0u;
 
+  uint32_t GetVerifierLoggingThresholdMs() const {
+    return verifier_logging_threshold_ms_;
+  }
+
  private:
   static void InitPlatformSignalHandlers();
 
@@ -1073,6 +1077,8 @@
 
   std::unique_ptr<MemMap> protected_fault_page_;
 
+  uint32_t verifier_logging_threshold_ms_;
+
   DISALLOW_COPY_AND_ASSIGN(Runtime);
 };
 
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 3f9a322..ef21f9f 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -152,5 +152,6 @@
 RUNTIME_OPTIONS_KEY (Unit,                UseStderrLogger)
 
 RUNTIME_OPTIONS_KEY (Unit,                OnlyUseSystemOatFiles)
+RUNTIME_OPTIONS_KEY (unsigned int,        VerifierLoggingThreshold,       100)
 
 #undef RUNTIME_OPTIONS_KEY
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index cd82284..d1000c5 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -50,24 +50,26 @@
 }
 
 void CodeInfo::Decode(const uint8_t* data, DecodeFlags flags) {
-  const uint8_t* begin = data;
-  frame_size_in_bytes_ = DecodeUnsignedLeb128(&data);
-  core_spill_mask_ = DecodeUnsignedLeb128(&data);
-  fp_spill_mask_ = DecodeUnsignedLeb128(&data);
-  number_of_dex_registers_ = DecodeUnsignedLeb128(&data);
-  BitMemoryReader reader(data, /* bit_offset */ 0);
+  BitMemoryReader reader(data);
+  packed_frame_size_ = DecodeVarintBits(reader);
+  core_spill_mask_ = DecodeVarintBits(reader);
+  fp_spill_mask_ = DecodeVarintBits(reader);
+  number_of_dex_registers_ = DecodeVarintBits(reader);
   DecodeTable(stack_maps_, reader, data);
+  DecodeTable(register_masks_, reader, data);
+  DecodeTable(stack_masks_, reader, data);
+  if (flags & DecodeFlags::GcMasksOnly) {
+    return;
+  }
   DecodeTable(inline_infos_, reader, data);
   DecodeTable(method_infos_, reader, data);
   if (flags & DecodeFlags::InlineInfoOnly) {
     return;
   }
-  DecodeTable(register_masks_, reader, data);
-  DecodeTable(stack_masks_, reader, data);
   DecodeTable(dex_register_masks_, reader, data);
   DecodeTable(dex_register_maps_, reader, data);
   DecodeTable(dex_register_catalog_, reader, data);
-  size_in_bits_ = (data - begin) * kBitsPerByte + reader.GetBitOffset();
+  size_in_bits_ = reader.GetBitOffset();
 }
 
 template<typename Accessor>
@@ -91,18 +93,17 @@
 size_t CodeInfo::Dedupe(std::vector<uint8_t>* out, const uint8_t* in, DedupeMap* dedupe_map) {
   // Remember the current offset in the output buffer so that we can return it later.
   const size_t result = out->size();
-  // Copy the header which encodes QuickMethodFrameInfo.
-  EncodeUnsignedLeb128(out, DecodeUnsignedLeb128(&in));
-  EncodeUnsignedLeb128(out, DecodeUnsignedLeb128(&in));
-  EncodeUnsignedLeb128(out, DecodeUnsignedLeb128(&in));
-  EncodeUnsignedLeb128(out, DecodeUnsignedLeb128(&in));
-  BitMemoryReader reader(in, /* bit_offset */ 0);
+  BitMemoryReader reader(in);
   BitMemoryWriter<std::vector<uint8_t>> writer(out, /* bit_offset */ out->size() * kBitsPerByte);
+  EncodeVarintBits(writer, DecodeVarintBits(reader));  // packed_frame_size_.
+  EncodeVarintBits(writer, DecodeVarintBits(reader));  // core_spill_mask_.
+  EncodeVarintBits(writer, DecodeVarintBits(reader));  // fp_spill_mask_.
+  EncodeVarintBits(writer, DecodeVarintBits(reader));  // number_of_dex_registers_.
   DedupeTable<StackMap>(writer, reader, dedupe_map);
-  DedupeTable<InlineInfo>(writer, reader, dedupe_map);
-  DedupeTable<MethodInfo>(writer, reader, dedupe_map);
   DedupeTable<RegisterMask>(writer, reader, dedupe_map);
   DedupeTable<MaskInfo>(writer, reader, dedupe_map);
+  DedupeTable<InlineInfo>(writer, reader, dedupe_map);
+  DedupeTable<MethodInfo>(writer, reader, dedupe_map);
   DedupeTable<MaskInfo>(writer, reader, dedupe_map);
   DedupeTable<DexRegisterMapInfo>(writer, reader, dedupe_map);
   DedupeTable<DexRegisterInfo>(writer, reader, dedupe_map);
@@ -213,10 +214,10 @@
   Stats* stats = parent->Child("CodeInfo");
   stats->AddBytes(Size());
   AddTableSizeStats<StackMap>("StackMaps", stack_maps_, stats);
-  AddTableSizeStats<InlineInfo>("InlineInfos", inline_infos_, stats);
-  AddTableSizeStats<MethodInfo>("MethodInfo", method_infos_, stats);
   AddTableSizeStats<RegisterMask>("RegisterMasks", register_masks_, stats);
   AddTableSizeStats<MaskInfo>("StackMasks", stack_masks_, stats);
+  AddTableSizeStats<InlineInfo>("InlineInfos", inline_infos_, stats);
+  AddTableSizeStats<MethodInfo>("MethodInfo", method_infos_, stats);
   AddTableSizeStats<MaskInfo>("DexRegisterMasks", dex_register_masks_, stats);
   AddTableSizeStats<DexRegisterMapInfo>("DexRegisterMaps", dex_register_maps_, stats);
   AddTableSizeStats<DexRegisterInfo>("DexRegisterCatalog", dex_register_catalog_, stats);
@@ -278,10 +279,10 @@
   vios->Stream() << "CodeInfo\n";
   ScopedIndentation indent1(vios);
   DumpTable<StackMap>(vios, "StackMaps", stack_maps_, verbose);
-  DumpTable<InlineInfo>(vios, "InlineInfos", inline_infos_, verbose);
-  DumpTable<MethodInfo>(vios, "MethodInfo", method_infos_, verbose);
   DumpTable<RegisterMask>(vios, "RegisterMasks", register_masks_, verbose);
   DumpTable<MaskInfo>(vios, "StackMasks", stack_masks_, verbose, true /* is_mask */);
+  DumpTable<InlineInfo>(vios, "InlineInfos", inline_infos_, verbose);
+  DumpTable<MethodInfo>(vios, "MethodInfo", method_infos_, verbose);
   DumpTable<MaskInfo>(vios, "DexRegisterMasks", dex_register_masks_, verbose, true /* is_mask */);
   DumpTable<DexRegisterMapInfo>(vios, "DexRegisterMaps", dex_register_maps_, verbose);
   DumpTable<DexRegisterInfo>(vios, "DexRegisterCatalog", dex_register_catalog_, verbose);
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 8bfae7c..d6db05a 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -271,19 +271,17 @@
  public:
   enum DecodeFlags {
     Default = 0,
+    // Limits the decoding only to the data needed by GC.
+    GcMasksOnly = 1,
     // Limits the decoding only to the main stack map table and inline info table.
     // This is sufficient for many use cases and makes the header decoding faster.
-    InlineInfoOnly = 1,
+    InlineInfoOnly = 2,
   };
 
   explicit CodeInfo(const uint8_t* data, DecodeFlags flags = DecodeFlags::Default) {
     Decode(reinterpret_cast<const uint8_t*>(data), flags);
   }
 
-  explicit CodeInfo(MemoryRegion region) : CodeInfo(region.begin()) {
-    DCHECK_EQ(Size(), region.size());
-  }
-
   explicit CodeInfo(const OatQuickMethodHeader* header, DecodeFlags flags = DecodeFlags::Default);
 
   size_t Size() const {
@@ -416,10 +414,11 @@
   void AddSizeStats(/*out*/ Stats* parent) const;
 
   ALWAYS_INLINE static QuickMethodFrameInfo DecodeFrameInfo(const uint8_t* data) {
+    BitMemoryReader reader(data);
     return QuickMethodFrameInfo(
-        DecodeUnsignedLeb128(&data),
-        DecodeUnsignedLeb128(&data),
-        DecodeUnsignedLeb128(&data));
+        DecodeVarintBits(reader) * kStackAlignment,  // Decode packed_frame_size_ and unpack.
+        DecodeVarintBits(reader),  // core_spill_mask_.
+        DecodeVarintBits(reader));  // fp_spill_mask_.
   }
 
   typedef std::map<BitMemoryRegion, uint32_t, BitMemoryRegion::Less> DedupeMap;
@@ -444,15 +443,15 @@
 
   void Decode(const uint8_t* data, DecodeFlags flags);
 
-  uint32_t frame_size_in_bytes_;
+  uint32_t packed_frame_size_;  // Frame size in kStackAlignment units.
   uint32_t core_spill_mask_;
   uint32_t fp_spill_mask_;
   uint32_t number_of_dex_registers_;
   BitTable<StackMap> stack_maps_;
-  BitTable<InlineInfo> inline_infos_;
-  BitTable<MethodInfo> method_infos_;
   BitTable<RegisterMask> register_masks_;
   BitTable<MaskInfo> stack_masks_;
+  BitTable<InlineInfo> inline_infos_;
+  BitTable<MethodInfo> method_infos_;
   BitTable<MaskInfo> dex_register_masks_;
   BitTable<DexRegisterMapInfo> dex_register_maps_;
   BitTable<DexRegisterInfo> dex_register_catalog_;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 18dc0e8..0703a07 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -3223,8 +3223,8 @@
   QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial)
   QUICK_ENTRY_POINT_INFO(pCheckInstanceOf)
   QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage)
-  QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess)
-  QUICK_ENTRY_POINT_INFO(pInitializeType)
+  QUICK_ENTRY_POINT_INFO(pResolveTypeAndVerifyAccess)
+  QUICK_ENTRY_POINT_INFO(pResolveType)
   QUICK_ENTRY_POINT_INFO(pResolveString)
   QUICK_ENTRY_POINT_INFO(pSet8Instance)
   QUICK_ENTRY_POINT_INFO(pSet8Static)
@@ -3604,7 +3604,9 @@
       StackReference<mirror::Object>* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>(
           reinterpret_cast<uintptr_t>(cur_quick_frame));
       uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
-      CodeInfo code_info(method_header);
+      CodeInfo code_info(method_header, kPrecise
+          ? CodeInfo::DecodeFlags::Default  // We will need dex register maps.
+          : CodeInfo::DecodeFlags::GcMasksOnly);
       StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
       DCHECK(map.IsValid());
 
@@ -3621,7 +3623,7 @@
             vreg_info.VisitStack(&new_ref, i, this);
             if (ref != new_ref) {
               ref_addr->Assign(new_ref);
-           }
+            }
           }
         }
       }
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 01b6bf8..a1b8938 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -439,7 +439,7 @@
   }
   if (kTimeVerifyMethod) {
     uint64_t duration_ns = NanoTime() - start_ns;
-    if (duration_ns > MsToNs(100)) {
+    if (duration_ns > MsToNs(Runtime::Current()->GetVerifierLoggingThresholdMs())) {
       LOG(WARNING) << "Verification of " << dex_file->PrettyMethod(method_idx)
                    << " took " << PrettyDuration(duration_ns)
                    << (IsLargeMethod(verifier.CodeItem()) ? " (large method)" : "");
diff --git a/test/008-exceptions/multidex.jpp b/test/008-exceptions/multidex.jpp
deleted file mode 100644
index a3746f5..0000000
--- a/test/008-exceptions/multidex.jpp
+++ /dev/null
@@ -1,27 +0,0 @@
-BadError:
-  @@com.android.jack.annotations.ForceInMainDex
-  class BadError
-BadInit:
-  @@com.android.jack.annotations.ForceInMainDex
-  class BadInit
-BadErrorNoStringInit:
-  @@com.android.jack.annotations.ForceInMainDex
-  class BadErrorNoStringInit
-BadInitNoStringInit:
-  @@com.android.jack.annotations.ForceInMainDex
-  class BadInitNoStringInit
-BadSuperClass:
-  @@com.android.jack.annotations.ForceInMainDex
-  class BadSuperClass
-DerivedFromBadSuperClass:
-  @@com.android.jack.annotations.ForceInMainDex
-  class DerivedFromBadSuperClass
-Main:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Main
-MultiDexBadInit:
-  @@com.android.jack.annotations.ForceInMainDex
-  class MultiDexBadInit
-MultiDexBadInitWrapper1:
-  @@com.android.jack.annotations.ForceInMainDex
-  class MultiDexBadInitWrapper1
diff --git a/test/162-method-resolution/multidex.jpp b/test/162-method-resolution/multidex.jpp
deleted file mode 100644
index 5722f7f..0000000
--- a/test/162-method-resolution/multidex.jpp
+++ /dev/null
@@ -1,127 +0,0 @@
-Test1Base:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test1Base
-Test1Derived:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test1Derived
-Test1User2:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test1User2
-
-Test2Base:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test2Base
-Test2Derived:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test2Derived
-Test2Interface:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test2Interface
-Test2User:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test2User
-Test2User2:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test2User2
-
-Test3Base:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test3Base
-Test3Derived:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test3Derived
-Test3Interface:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test3Interface
-
-Test4Interface:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test4Interface
-Test4Derived:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test4Derived
-Test4User:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test4User
-
-Test5Interface:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test5Interface
-Test5Base:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test5Base
-Test5Derived:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test5Derived
-Test5User:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test5User
-Test5User2:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test5User2
-
-Test6Interface:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test6Interface
-Test6Derived:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test6Derived
-Test6User:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test6User
-Test6User2:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test6User2
-
-Test7Base:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test7Base
-Test7Interface:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test7Interface
-Test7Derived:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test7Derived
-Test7User:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test7User
-
-Test8Base:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test8Base
-Test8Derived:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test8Derived
-Test8User:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test8User
-Test8User2:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test8User2
-
-Test9Base:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test9Base
-Test9Derived:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test9Derived
-Test9User:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test9User
-Test9User2:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test9User2
-
-Test10Base:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test10Base
-Test10Interface:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test10Interface
-Test10User:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Test10User
-
-Main:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Main
diff --git a/test/1917-get-stack-frame/src/art/Test1917.java b/test/1917-get-stack-frame/src/art/Test1917.java
index def7530..75af43b 100644
--- a/test/1917-get-stack-frame/src/art/Test1917.java
+++ b/test/1917-get-stack-frame/src/art/Test1917.java
@@ -134,13 +134,20 @@
 
     System.out.println("Recurring 5 times on another thread");
     Thread thr = new Thread(
-        new RecurCount(5, new StackTraceGenerator(makePrintStackFramesConsumer())));
+        Thread.currentThread().getThreadGroup(),
+        new RecurCount(5, new StackTraceGenerator(makePrintStackFramesConsumer())),
+        "Recurring Thread 1",
+        10*1000000 /* 10 mb*/);
     thr.start();
     thr.join();
 
     System.out.println("Recurring 5 times on another thread. Stack trace from main thread!");
     ThreadPauser pause = new ThreadPauser();
-    Thread thr2 = new Thread(new RecurCount(5, pause));
+    Thread thr2 = new Thread(
+        Thread.currentThread().getThreadGroup(),
+        new RecurCount(5, pause),
+        "Recurring Thread 2",
+        10*1000000 /* 10 mb*/);
     thr2.start();
     pause.waitForOtherThreadToPause();
     new StackTraceGenerator(thr2, makePrintStackFramesConsumer()).run();
diff --git a/test/1934-jvmti-signal-thread/src/art/Test1934.java b/test/1934-jvmti-signal-thread/src/art/Test1934.java
index 308f17b..c71090b 100644
--- a/test/1934-jvmti-signal-thread/src/art/Test1934.java
+++ b/test/1934-jvmti-signal-thread/src/art/Test1934.java
@@ -71,10 +71,14 @@
     ensureInitialized(java.util.concurrent.locks.LockSupport.class);
   }
 
+  public static Thread createThread(Runnable r, String name) {
+    return new Thread(Thread.currentThread().getThreadGroup(), r, name, /* 10 mb */ 10 * 1000000);
+  }
+
   public static void testStopBeforeStart() throws Exception {
     final Throwable[] out_err = new Throwable[] { null, };
     final Object tst = new Object();
-    Thread target = new Thread(() -> { while (true) { } }, "waiting thread!");
+    Thread target = createThread(() -> { while (true) { } }, "waiting thread!");
     target.setUncaughtExceptionHandler((t, e) -> { out_err[0] = e; });
     System.out.println("stopping other thread before starting");
     try {
@@ -93,7 +97,7 @@
   public static void testInterruptBeforeStart() throws Exception {
     final Throwable[] out_err = new Throwable[] { null, };
     final Object tst = new Object();
-    Thread target = new Thread(() -> { while (true) { } }, "waiting thread!");
+    Thread target = createThread(() -> { while (true) { } }, "waiting thread!");
     target.setUncaughtExceptionHandler((t, e) -> { out_err[0] = e; });
     System.out.println("interrupting other thread before starting");
     try {
@@ -113,7 +117,7 @@
     final Throwable[] out_err = new Throwable[] { null, };
     final Object tst = new Object();
     final Semaphore sem = new Semaphore(0);
-    Thread target = new Thread(() -> {
+    Thread target = createThread(() -> {
       sem.release();
       while (true) {
         try {
@@ -140,7 +144,7 @@
     final Throwable[] out_err = new Throwable[] { null, };
     final Object tst = new Object();
     final Semaphore sem = new Semaphore(0);
-    Thread target = new Thread(() -> {
+    Thread target = createThread(() -> {
       sem.release();
       while (true) {
         try {
@@ -172,7 +176,7 @@
     final Throwable[] out_err = new Throwable[] { null, };
     final long native_monitor_id = allocNativeMonitor();
     final Semaphore sem = new Semaphore(0);
-    Thread target = new Thread(() -> {
+    Thread target = createThread(() -> {
       sem.release();
       nativeWaitForOtherThread(native_monitor_id);
       // We need to make sure we do something that can get the exception to be actually noticed.
@@ -214,7 +218,7 @@
   public static void testStopRecur() throws Exception {
     final Throwable[] out_err = new Throwable[] { null, };
     final Semaphore sem = new Semaphore(0);
-    Thread target = new Thread(() -> {
+    Thread target = createThread(() -> {
       sem.release();
       while (true) {
         doRecurCnt(null, 50);
@@ -235,7 +239,7 @@
   public static void testInterruptRecur() throws Exception {
     final Throwable[] out_err = new Throwable[] { null, };
     final Semaphore sem = new Semaphore(0);
-    Thread target = new Thread(() -> {
+    Thread target = createThread(() -> {
       sem.release();
       while (true) {
         doRecurCnt(() -> {
@@ -258,7 +262,7 @@
   public static void testStopSpinning() throws Exception {
     final Throwable[] out_err = new Throwable[] { null, };
     final Semaphore sem = new Semaphore(0);
-    Thread target = new Thread(() -> { sem.release(); while (true) {} }, "Spinning thread!");
+    Thread target = createThread(() -> { sem.release(); while (true) {} }, "Spinning thread!");
     target.setUncaughtExceptionHandler((t, e) -> { out_err[0] = e; });
     target.start();
     sem.acquire();
@@ -273,7 +277,7 @@
 
   public static void testInterruptSpinning() throws Exception {
     final Semaphore sem = new Semaphore(0);
-    Thread target = new Thread(() -> {
+    Thread target = createThread(() -> {
       sem.release();
       while (!Thread.currentThread().isInterrupted()) { }
     }, "Spinning thread!");
diff --git a/test/462-checker-inlining-dex-files/multidex.jpp b/test/462-checker-inlining-dex-files/multidex.jpp
deleted file mode 100644
index ae55456..0000000
--- a/test/462-checker-inlining-dex-files/multidex.jpp
+++ /dev/null
@@ -1,8 +0,0 @@
-Main:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Main
-
-AAA:
-  @@com.android.jack.annotations.ForceInMainDex
-  class AAA
-
diff --git a/test/556-invoke-super/multidex.jpp b/test/556-invoke-super/multidex.jpp
deleted file mode 100644
index fe01801..0000000
--- a/test/556-invoke-super/multidex.jpp
+++ /dev/null
@@ -1,4 +0,0 @@
-Main:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Main*
-
diff --git a/test/563-checker-fakestring/smali/TestCase.smali b/test/563-checker-fakestring/smali/TestCase.smali
index 9d10bd7..0fe39ee 100644
--- a/test/563-checker-fakestring/smali/TestCase.smali
+++ b/test/563-checker-fakestring/smali/TestCase.smali
@@ -305,3 +305,35 @@
    return-object v0
 
 .end method
+
+## CHECK-START: java.lang.String TestCase.loopAndStringInitAndPhi(byte[], boolean) register (after)
+## CHECK:                        NewInstance
+## CHECK-NOT:                    NewInstance
+## CHECK-DAG:   <<Invoke1:l\d+>> InvokeStaticOrDirect method_name:java.lang.String.<init>
+## CHECK-DAG:   <<Invoke2:l\d+>> InvokeStaticOrDirect method_name:java.lang.String.<init>
+## CHECK-DAG:   <<Phi:l\d+>>     Phi [<<Invoke2>>,<<Invoke1>>]
+## CHECK-DAG:                    Return [<<Phi>>]
+.method public static loopAndStringInitAndPhi([BZ)Ljava/lang/String;
+   .registers 4
+
+   if-nez p1, :allocate_other
+   new-instance v0, Ljava/lang/String;
+
+   # Loop
+   :loop_header
+   if-eqz p1, :loop_exit
+   goto :loop_header
+
+   :loop_exit
+   const-string v1, "UTF8"
+   invoke-direct {v0, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+   goto : exit
+
+   :allocate_other
+   const-string v1, "UTF8"
+   new-instance v0, Ljava/lang/String;
+   invoke-direct {v0, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+   :exit
+   return-object v0
+
+.end method
diff --git a/test/563-checker-fakestring/src/Main.java b/test/563-checker-fakestring/src/Main.java
index 3639d59..df9e9dc 100644
--- a/test/563-checker-fakestring/src/Main.java
+++ b/test/563-checker-fakestring/src/Main.java
@@ -113,7 +113,6 @@
       result = (String) m.invoke(null, new Object[] { testData, false });
       assertEqual(testString, result);
     }
-
     {
       Method m = c.getMethod(
           "deoptimizeNewInstanceAfterLoop", int[].class, byte[].class, int.class);
@@ -127,6 +126,13 @@
         }
       }
     }
+    {
+      Method m = c.getMethod("loopAndStringInitAndPhi", byte[].class, boolean.class);
+      String result = (String) m.invoke(null, new Object[] { testData, true });
+      assertEqual(testString, result);
+      result = (String) m.invoke(null, new Object[] { testData, false });
+      assertEqual(testString, result);
+    }
   }
 
   public static boolean doThrow = false;
diff --git a/test/569-checker-pattern-replacement/multidex.jpp b/test/569-checker-pattern-replacement/multidex.jpp
deleted file mode 100644
index cfc8ad1..0000000
--- a/test/569-checker-pattern-replacement/multidex.jpp
+++ /dev/null
@@ -1,8 +0,0 @@
-Main:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Main
-
-BaseInMainDex:
-  @@com.android.jack.annotations.ForceInMainDex
-  class BaseInMainDex
-
diff --git a/test/616-cha-interface-default/multidex.jpp b/test/616-cha-interface-default/multidex.jpp
deleted file mode 100644
index b0d200e..0000000
--- a/test/616-cha-interface-default/multidex.jpp
+++ /dev/null
@@ -1,3 +0,0 @@
-Main:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Main
diff --git a/test/616-cha-proxy-method-inline/multidex.jpp b/test/616-cha-proxy-method-inline/multidex.jpp
deleted file mode 100644
index b0d200e..0000000
--- a/test/616-cha-proxy-method-inline/multidex.jpp
+++ /dev/null
@@ -1,3 +0,0 @@
-Main:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Main
diff --git a/test/626-const-class-linking/multidex.jpp b/test/626-const-class-linking/multidex.jpp
deleted file mode 100644
index c7a6648..0000000
--- a/test/626-const-class-linking/multidex.jpp
+++ /dev/null
@@ -1,27 +0,0 @@
-ClassPair:
-  @@com.android.jack.annotations.ForceInMainDex
-  class ClassPair
-DefiningLoader:
-  @@com.android.jack.annotations.ForceInMainDex
-  class DefiningLoader
-DelegatingLoader:
-  @@com.android.jack.annotations.ForceInMainDex
-  class DelegatingLoader
-Helper1:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Helper1
-Main:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Main
-MisbehavingLoader:
-  @@com.android.jack.annotations.ForceInMainDex
-  class MisbehavingLoader
-RacyLoader:
-  @@com.android.jack.annotations.ForceInMainDex
-  class RacyLoader
-RacyMisbehavingHelper:
-  @@com.android.jack.annotations.ForceInMainDex
-  class RacyMisbehavingHelper
-RacyMisbehavingLoader:
-  @@com.android.jack.annotations.ForceInMainDex
-  class RacyMisbehavingLoader
diff --git a/test/638-checker-inline-caches/multidex.jpp b/test/638-checker-inline-caches/multidex.jpp
deleted file mode 100644
index 69a2cc1..0000000
--- a/test/638-checker-inline-caches/multidex.jpp
+++ /dev/null
@@ -1,12 +0,0 @@
-Main:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Main
-Super:
-  @@com.android.jack.annotations.ForceInMainDex
-  class Super
-SubA:
-  @@com.android.jack.annotations.ForceInMainDex
-  class SubA
-SubB
-  @@com.android.jack.annotations.ForceInMainDex
-  class SubB
diff --git a/test/674-hiddenapi/hiddenapi.cc b/test/674-hiddenapi/hiddenapi.cc
index 04c3fbf..96754c3 100644
--- a/test/674-hiddenapi/hiddenapi.cc
+++ b/test/674-hiddenapi/hiddenapi.cc
@@ -63,8 +63,8 @@
 
 static jobject NewInstance(JNIEnv* env, jclass klass) {
   jmethodID constructor = env->GetMethodID(klass, "<init>", "()V");
-  if (constructor == NULL) {
-    return NULL;
+  if (constructor == nullptr) {
+    return nullptr;
   }
   return env->NewObject(klass, constructor);
 }
@@ -74,7 +74,7 @@
   ScopedUtfChars utf_name(env, name);
   jfieldID field = is_static ? env->GetStaticFieldID(klass, utf_name.c_str(), "I")
                              : env->GetFieldID(klass, utf_name.c_str(), "I");
-  if (field == NULL) {
+  if (field == nullptr) {
     env->ExceptionClear();
     return JNI_FALSE;
   }
@@ -87,7 +87,7 @@
   ScopedUtfChars utf_name(env, name);
   jfieldID field = is_static ? env->GetStaticFieldID(klass, utf_name.c_str(), "I")
                              : env->GetFieldID(klass, utf_name.c_str(), "I");
-  if (field == NULL) {
+  if (field == nullptr) {
     env->ExceptionClear();
     return JNI_FALSE;
   }
@@ -95,7 +95,7 @@
     env->GetStaticIntField(klass, field);
   } else {
     jobject obj = NewInstance(env, klass);
-    if (obj == NULL) {
+    if (obj == nullptr) {
       env->ExceptionDescribe();
       env->ExceptionClear();
       return JNI_FALSE;
@@ -117,7 +117,7 @@
   ScopedUtfChars utf_name(env, name);
   jfieldID field = is_static ? env->GetStaticFieldID(klass, utf_name.c_str(), "I")
                              : env->GetFieldID(klass, utf_name.c_str(), "I");
-  if (field == NULL) {
+  if (field == nullptr) {
     env->ExceptionClear();
     return JNI_FALSE;
   }
@@ -125,7 +125,7 @@
     env->SetStaticIntField(klass, field, 42);
   } else {
     jobject obj = NewInstance(env, klass);
-    if (obj == NULL) {
+    if (obj == nullptr) {
       env->ExceptionDescribe();
       env->ExceptionClear();
       return JNI_FALSE;
@@ -147,7 +147,7 @@
   ScopedUtfChars utf_name(env, name);
   jmethodID method = is_static ? env->GetStaticMethodID(klass, utf_name.c_str(), "()I")
                                : env->GetMethodID(klass, utf_name.c_str(), "()I");
-  if (method == NULL) {
+  if (method == nullptr) {
     env->ExceptionClear();
     return JNI_FALSE;
   }
@@ -160,7 +160,7 @@
   ScopedUtfChars utf_name(env, name);
   jmethodID method = is_static ? env->GetStaticMethodID(klass, utf_name.c_str(), "()I")
                                : env->GetMethodID(klass, utf_name.c_str(), "()I");
-  if (method == NULL) {
+  if (method == nullptr) {
     env->ExceptionClear();
     return JNI_FALSE;
   }
@@ -169,7 +169,7 @@
     env->CallStaticIntMethodA(klass, method, nullptr);
   } else {
     jobject obj = NewInstance(env, klass);
-    if (obj == NULL) {
+    if (obj == nullptr) {
       env->ExceptionDescribe();
       env->ExceptionClear();
       return JNI_FALSE;
@@ -191,7 +191,7 @@
   ScopedUtfChars utf_name(env, name);
   jmethodID method = is_static ? env->GetStaticMethodID(klass, utf_name.c_str(), "()I")
                                : env->GetMethodID(klass, utf_name.c_str(), "()I");
-  if (method == NULL) {
+  if (method == nullptr) {
     env->ExceptionClear();
     return JNI_FALSE;
   }
@@ -200,7 +200,7 @@
     env->CallStaticIntMethod(klass, method);
   } else {
     jobject obj = NewInstance(env, klass);
-    if (obj == NULL) {
+    if (obj == nullptr) {
       env->ExceptionDescribe();
       env->ExceptionClear();
       return JNI_FALSE;
@@ -224,7 +224,7 @@
     JNIEnv* env, jclass, jclass klass, jstring args) {
   ScopedUtfChars utf_args(env, args);
   jmethodID constructor = env->GetMethodID(klass, "<init>", utf_args.c_str());
-  if (constructor == NULL) {
+  if (constructor == nullptr) {
     env->ExceptionClear();
     return JNI_FALSE;
   }
@@ -236,7 +236,7 @@
     JNIEnv* env, jclass, jclass klass, jstring args) {
   ScopedUtfChars utf_args(env, args);
   jmethodID constructor = env->GetMethodID(klass, "<init>", utf_args.c_str());
-  if (constructor == NULL) {
+  if (constructor == nullptr) {
     env->ExceptionClear();
     return JNI_FALSE;
   }
@@ -261,7 +261,7 @@
     JNIEnv* env, jclass, jclass klass, jstring args) {
   ScopedUtfChars utf_args(env, args);
   jmethodID constructor = env->GetMethodID(klass, "<init>", utf_args.c_str());
-  if (constructor == NULL) {
+  if (constructor == nullptr) {
     env->ExceptionClear();
     return JNI_FALSE;
   }
diff --git a/test/683-clinit-inline-static-invoke/expected.txt b/test/683-clinit-inline-static-invoke/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/683-clinit-inline-static-invoke/expected.txt
diff --git a/test/683-clinit-inline-static-invoke/info.txt b/test/683-clinit-inline-static-invoke/info.txt
new file mode 100644
index 0000000..32e5cdc
--- /dev/null
+++ b/test/683-clinit-inline-static-invoke/info.txt
@@ -0,0 +1,3 @@
+Regression test for a bug where the class initialization check for an inlined
+call to a static method used a type index from the wrong dex file because the
+current dex file does not have a TypeId for it. This was likely to crash.
diff --git a/test/683-clinit-inline-static-invoke/src-multidex/MyTimeZone.java b/test/683-clinit-inline-static-invoke/src-multidex/MyTimeZone.java
new file mode 100644
index 0000000..b74b310
--- /dev/null
+++ b/test/683-clinit-inline-static-invoke/src-multidex/MyTimeZone.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import android.icu.util.TimeZone;
+
+public abstract class MyTimeZone extends TimeZone {
+  // Reference to MyTimeZone.getDefaultTimeZoneType() shall resolve
+  // to TimeZone.getDefaultTimeZoneType() which should be easily inlined.
+}
diff --git a/test/683-clinit-inline-static-invoke/src/Main.java b/test/683-clinit-inline-static-invoke/src/Main.java
new file mode 100644
index 0000000..b4ccfaa
--- /dev/null
+++ b/test/683-clinit-inline-static-invoke/src/Main.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) {
+    // The following is a simple static field getter that can be inlined, referenced
+    // through a subclass with the declaring class having no TypeId in current DexFile.
+    // When we inline this getter, we're left with HLoadClass+HClinitCheck which cannot
+    // be merged back to the InvokeStaticOrDirect for implicit class init check.
+    // The declaring class is in the boot image, so the LoadClass can load it using the
+    // .data.bimg.rel.ro section. However, the ClinitCheck entrypoint was previously
+    // taking a type index of the declaring class and since we did not have a valid
+    // TypeId in the current DexFile, we erroneously provided the type index from the
+    // declaring DexFile and that caused a crash. This was fixed by changing the
+    // ClinitCheck entrypoint to take the Class reference from LoadClass.
+    int dummy = MyTimeZone.getDefaultTimeZoneType();
+  }
+}
diff --git a/test/913-heaps/expected.txt b/test/913-heaps/expected.txt
index 01d374b..065b854 100644
--- a/test/913-heaps/expected.txt
+++ b/test/913-heaps/expected.txt
@@ -1,9 +1,9 @@
 ---
 true true
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=136, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=136, length=-1]
-root@root --(thread)--> 3000@0 [size=136, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
 1001@0 --(superclass)--> 1000@0 [size=123456780000, length=-1]
 1002@0 --(interface)--> 2001@0 [size=123456780004, length=-1]
 1002@0 --(superclass)--> 1001@0 [size=123456780001, length=-1]
@@ -44,14 +44,14 @@
 ---
 root@root --(jni-global)--> 1@1000 [size=16, length=-1]
 root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=136, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 8])--> 1@1000 [size=16, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=136, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
 root@root --(thread)--> 1@1000 [size=16, length=-1]
-root@root --(thread)--> 3000@0 [size=136, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
 1001@0 --(superclass)--> 1000@0 [size=123456780005, length=-1]
 1002@0 --(interface)--> 2001@0 [size=123456780009, length=-1]
 1002@0 --(superclass)--> 1001@0 [size=123456780006, length=-1]
@@ -90,18 +90,18 @@
 5@1002 --(field@9)--> 6@1000 [size=16, length=-1]
 6@1000 --(class)--> 1000@0 [size=123456780005, length=-1]
 ---
-root@root --(thread)--> 3000@0 [size=136, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
 ---
 3@1001 --(class)--> 1001@0 [size=123456780011, length=-1]
 ---
-root@root --(thread)--> 3000@0 [size=136, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
 ---
 3@1001 --(class)--> 1001@0 [size=123456780016, length=-1]
 ---
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=136, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=136, length=-1]
-root@root --(thread)--> 3000@0 [size=136, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
 ---
 1001@0 --(superclass)--> 1000@0 [size=123456780020, length=-1]
 3@1001 --(class)--> 1001@0 [size=123456780021, length=-1]
@@ -110,14 +110,14 @@
 ---
 root@root --(jni-global)--> 1@1000 [size=16, length=-1]
 root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=136, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 8])--> 1@1000 [size=16, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=136, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
 root@root --(thread)--> 1@1000 [size=16, length=-1]
-root@root --(thread)--> 3000@0 [size=136, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
 ---
 1001@0 --(superclass)--> 1000@0 [size=123456780025, length=-1]
 3@1001 --(class)--> 1001@0 [size=123456780026, length=-1]
@@ -198,10 +198,10 @@
 ---
 ---
 ---- untagged objects
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=136, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=136, length=-1]
-root@root --(thread)--> 3000@0 [size=136, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
 1001@0 --(superclass)--> 1000@0 [size=123456780050, length=-1]
 1002@0 --(interface)--> 2001@0 [size=123456780054, length=-1]
 1002@0 --(superclass)--> 1001@0 [size=123456780051, length=-1]
@@ -242,14 +242,14 @@
 ---
 root@root --(jni-global)--> 1@1000 [size=16, length=-1]
 root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=136, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 8])--> 1@1000 [size=16, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=136, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
 root@root --(thread)--> 1@1000 [size=16, length=-1]
-root@root --(thread)--> 3000@0 [size=136, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
 1001@0 --(superclass)--> 1000@0 [size=123456780055, length=-1]
 1002@0 --(interface)--> 2001@0 [size=123456780059, length=-1]
 1002@0 --(superclass)--> 1001@0 [size=123456780056, length=-1]
@@ -289,9 +289,9 @@
 6@1000 --(class)--> 1000@0 [size=123456780055, length=-1]
 ---
 ---- tagged classes
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=136, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=136, length=-1]
-root@root --(thread)--> 3000@0 [size=136, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
 1001@0 --(superclass)--> 1000@0 [size=123456780060, length=-1]
 1002@0 --(interface)--> 2001@0 [size=123456780064, length=-1]
 1002@0 --(superclass)--> 1001@0 [size=123456780061, length=-1]
@@ -316,9 +316,9 @@
 5@1002 --(field@8)--> 500@0 [size=20, length=2]
 6@1000 --(class)--> 1000@0 [size=123456780060, length=-1]
 ---
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=136, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=136, length=-1]
-root@root --(thread)--> 3000@0 [size=136, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
 1001@0 --(superclass)--> 1000@0 [size=123456780065, length=-1]
 1002@0 --(interface)--> 2001@0 [size=123456780069, length=-1]
 1002@0 --(superclass)--> 1001@0 [size=123456780066, length=-1]
diff --git a/test/knownfailures.json b/test/knownfailures.json
index a7c71a5..7322a35 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -548,6 +548,18 @@
         "bug": "b/33650497"
     },
     {
+        "tests": "1946-list-descriptors",
+        "description": "ASAN+interp-ac/switch interpreter means this is too slow to finish in the timeout",
+        "variant": "target & interp-ac",
+        "env_vars": {"SANITIZE_TARGET": "address"}
+    },
+    {
+        "tests": "1946-list-descriptors",
+        "description": "ASAN+interp-ac/switch interpreter means this is too slow to finish in the timeout",
+        "variant": "host & interp-ac",
+        "env_vars": {"SANITIZE_HOST": "address"}
+    },
+    {
         "tests": "202-thread-oome",
         "description": "ASAN aborts when large thread stacks are requested.",
         "variant": "host",
@@ -666,6 +678,11 @@
         "description": ["Requires zip, which isn't available on device"]
     },
     {
+        "tests": ["683-clinit-inline-static-invoke"],
+        "variant": "jvm",
+        "description": ["Uses android-specific boot image class."]
+    },
+    {
         "tests": ["1941-dispose-stress", "522-checker-regression-monitor-exit"],
         "variant": "jvm",
         "bug": "b/73888836",
@@ -1016,15 +1033,15 @@
     },
     {
         "tests": ["712-varhandle-invocations"],
-        "variant": "interpreter & gcstress",
+        "variant": "gcstress",
         "bug": "b/111630237",
         "description": ["Test timing out under gcstress possibly due to slower unwinding by libbacktrace"]
     },
     {
-        "tests": ["712-varhandle-invocations", "624-checker-stringops"],
+        "tests": ["624-checker-stringops"],
         "variant": "optimizing & gcstress | speed-profile & gcstress",
         "bug": "b/111545159",
-        "description": ["These tests seem to expose some error with our gc when run in these configurations"]
+        "description": ["Seem to expose some error with our gc when run in these configurations"]
     },
     {
         "tests": ["021-string2"],
@@ -1033,17 +1050,8 @@
         "description": ["Stack too big."]
     },
     {
-        "tests": ["1934-jvmti-signal-thread"],
-        "env_vars": {"SANITIZE_HOST": "address"},
-        "variant": "interp-ac",
-        "bug": "b/111837501",
-        "description": ["Unexpected exception thrown"]
-    },
-    {
-        "tests": ["1917-get-stack-frame"],
-        "env_vars": {"SANITIZE_HOST": "address"},
-        "variant": "interp-ac",
-        "bug": "b/112071036",
-        "description": ["Unexpected exception thrown"]
+        "tests": ["566-polymorphic-inlining"],
+        "variant": "jit & debuggable",
+        "description": ["We do not inline with debuggable."]
     }
 ]
diff --git a/tools/Android.mk b/tools/Android.mk
index 9ecf0cd..e90f5f5 100644
--- a/tools/Android.mk
+++ b/tools/Android.mk
@@ -32,3 +32,5 @@
 LOCAL_SRC_FILES := art
 LOCAL_MODULE_STEM := art
 include $(BUILD_PREBUILT)
+
+include $(LOCAL_PATH)/class2greylist/test/Android.mk
diff --git a/tools/art_verifier/Android.bp b/tools/art_verifier/Android.bp
new file mode 100644
index 0000000..afd52fb
--- /dev/null
+++ b/tools/art_verifier/Android.bp
@@ -0,0 +1,48 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+art_cc_defaults {
+    name: "art_verifier-defaults",
+    defaults: ["art_defaults"],
+    host_supported: true,
+    srcs: [
+        "art_verifier.cc",
+    ],
+    header_libs: [
+        "art_cmdlineparser_headers",
+    ],
+    static_libs: art_static_dependencies + [
+        "libart",
+        "libartbase",
+        "libdexfile",
+        "libprofile",
+    ],
+    target: {
+        android: {
+            static_libs: [
+                "libtombstoned_client_static",
+            ],
+        },
+        darwin: {
+            enabled: false,
+        },
+    },
+}
+
+art_cc_binary {
+    name: "art_verifier",
+    defaults: ["art_verifier-defaults"],
+}
diff --git a/tools/art_verifier/art_verifier.cc b/tools/art_verifier/art_verifier.cc
new file mode 100644
index 0000000..fc62410
--- /dev/null
+++ b/tools/art_verifier/art_verifier.cc
@@ -0,0 +1,267 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string>
+#include <vector>
+
+#include "android-base/logging.h"
+
+#include "base/logging.h"
+#include "base/os.h"
+#include "class_linker-inl.h"
+#include "dex/art_dex_file_loader.h"
+#include "dex/class_accessor-inl.h"
+#include "dex/dex_file-inl.h"
+#include "interpreter/unstarted_runtime.h"
+#include "mirror/class-inl.h"
+#include "mirror/dex_cache-inl.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+#include "verifier/method_verifier.h"
+#include "well_known_classes.h"
+
+#include <sys/stat.h>
+#include "cmdline.h"
+
+namespace art {
+
+namespace {
+
+bool LoadDexFile(const std::string& dex_filename,
+                 std::vector<std::unique_ptr<const DexFile>>* dex_files) {
+  const ArtDexFileLoader dex_file_loader;
+  std::string error_msg;
+  if (!dex_file_loader.Open(dex_filename.c_str(),
+                            dex_filename.c_str(),
+                            /* verify */ true,
+                            /* verify_checksum */ true,
+                            &error_msg,
+                            dex_files)) {
+    LOG(ERROR) << error_msg;
+    return false;
+  }
+  return true;
+}
+
+jobject Install(Runtime* runtime,
+                std::vector<std::unique_ptr<const DexFile>>& in,
+                std::vector<const DexFile*>* out)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  Thread* self = Thread::Current();
+  CHECK(self != nullptr);
+
+  // Need well-known-classes.
+  WellKnownClasses::Init(self->GetJniEnv());
+  // Need a class loader. Fake that we're a compiler.
+  // Note: this will run initializers through the unstarted runtime, so make sure it's
+  //       initialized.
+  interpreter::UnstartedRuntime::Initialize();
+
+  for (std::unique_ptr<const DexFile>& dex_file : in) {
+    out->push_back(dex_file.release());
+  }
+
+  ClassLinker* class_linker = runtime->GetClassLinker();
+
+  jobject class_loader = class_linker->CreatePathClassLoader(self, *out);
+
+  // Need to register dex files to get a working dex cache.
+  for (const DexFile* dex_file : *out) {
+    ObjPtr<mirror::DexCache> dex_cache = class_linker->RegisterDexFile(
+        *dex_file, self->DecodeJObject(class_loader)->AsClassLoader());
+    CHECK(dex_cache != nullptr);
+  }
+
+  return class_loader;
+}
+
+struct MethodVerifierArgs : public CmdlineArgs {
+ protected:
+  using Base = CmdlineArgs;
+
+  virtual ParseStatus ParseCustom(const StringPiece& option,
+                                  std::string* error_msg) OVERRIDE {
+    {
+      ParseStatus base_parse = Base::ParseCustom(option, error_msg);
+      if (base_parse != kParseUnknownArgument) {
+        return base_parse;
+      }
+    }
+
+    if (option.starts_with("--dex-file=")) {
+      dex_filename_ = option.substr(strlen("--dex-file=")).data();
+    } else if (option == "--dex-file-verifier") {
+      dex_file_verifier_ = true;
+    } else if (option == "--verbose") {
+      method_verifier_verbose_ = true;
+    } else if (option == "--verbose-debug") {
+      method_verifier_verbose_debug_ = true;
+    } else if (option.starts_with("--repetitions=")) {
+      char* end;
+      repetitions_ = strtoul(option.substr(strlen("--repetitions=")).data(), &end, 10);
+    } else {
+      return kParseUnknownArgument;
+    }
+
+    return kParseOk;
+  }
+
+  virtual ParseStatus ParseChecks(std::string* error_msg) OVERRIDE {
+    // Perform the parent checks.
+    ParseStatus parent_checks = Base::ParseChecks(error_msg);
+    if (parent_checks != kParseOk) {
+      return parent_checks;
+    }
+
+    // Perform our own checks.
+    if (dex_filename_ == nullptr) {
+      *error_msg = "--dex-filename not set";
+      return kParseError;
+    }
+
+    return kParseOk;
+  }
+
+  virtual std::string GetUsage() const {
+    std::string usage;
+
+    usage +=
+        "Usage: method_verifier_cmd [options] ...\n"
+        // Dex file is required.
+        "  --dex-file=<file.dex>: specifies an input dex file.\n"
+        "      Example: --dex-file=app.apk\n"
+        "  --dex-file-verifier: only run dex file verifier.\n"
+        "  --verbose: use verbose verifier mode.\n"
+        "  --verbose-debug: use verbose verifier debug mode.\n"
+        "  --repetitions=<count>: repeat the verification count times.\n"
+        "\n";
+
+    usage += Base::GetUsage();
+
+    return usage;
+  }
+
+ public:
+  const char* dex_filename_ = nullptr;
+
+  bool dex_file_verifier_ = false;
+
+  bool method_verifier_verbose_ = false;
+  bool method_verifier_verbose_debug_ = false;
+
+  size_t repetitions_ = 0u;
+};
+
+struct MethodVerifierMain : public CmdlineMain<MethodVerifierArgs> {
+  bool NeedsRuntime() OVERRIDE {
+    return true;
+  }
+
+  bool ExecuteWithoutRuntime() OVERRIDE {
+    LOG(FATAL) << "Unreachable";
+    UNREACHABLE();
+  }
+
+  bool ExecuteWithRuntime(Runtime* runtime) OVERRIDE {
+    CHECK(args_ != nullptr);
+
+    const size_t dex_reps = args_->dex_file_verifier_
+                                // If we're focused on the dex file verifier, use the
+                                // repetitions parameter.
+                                ? std::max(static_cast<size_t>(1u), args_->repetitions_)
+                                // Otherwise just load the dex files once.
+                                : 1;
+
+    std::vector<std::unique_ptr<const DexFile>> unique_dex_files;
+    for (size_t i = 0; i != dex_reps; ++i) {
+      if (args_->dex_file_verifier_ && args_->repetitions_ != 0) {
+        LOG(INFO) << "Repetition " << (i + 1);
+      }
+      unique_dex_files.clear();
+      if (!LoadDexFile(args_->dex_filename_, &unique_dex_files)) {
+        return false;
+      }
+    }
+    if (args_->dex_file_verifier_) {
+      // We're done here.
+      return true;
+    }
+
+    ScopedObjectAccess soa(Thread::Current());
+    std::vector<const DexFile*> dex_files;
+    jobject class_loader = Install(runtime, unique_dex_files, &dex_files);
+    CHECK(class_loader != nullptr);
+
+    StackHandleScope<2> scope(soa.Self());
+    Handle<mirror::ClassLoader> h_loader = scope.NewHandle(
+        soa.Decode<mirror::ClassLoader>(class_loader));
+    MutableHandle<mirror::Class> h_klass(scope.NewHandle<mirror::Class>(nullptr));
+
+    if (args_->method_verifier_verbose_) {
+      gLogVerbosity.verifier = true;
+    }
+    if (args_->method_verifier_verbose_debug_) {
+      gLogVerbosity.verifier_debug = true;
+    }
+
+    const size_t verifier_reps = std::max(static_cast<size_t>(1u), args_->repetitions_);
+
+    ClassLinker* class_linker = runtime->GetClassLinker();
+    for (size_t i = 0; i != verifier_reps; ++i) {
+      if (args_->repetitions_ != 0) {
+        LOG(INFO) << "Repetition " << (i + 1);
+      }
+      for (const DexFile* dex_file : dex_files) {
+        for (ClassAccessor accessor : dex_file->GetClasses()) {
+          const char* descriptor = accessor.GetDescriptor();
+          h_klass.Assign(class_linker->FindClass(soa.Self(), descriptor, h_loader));
+          if (h_klass == nullptr || h_klass->IsErroneous()) {
+            if (args_->repetitions_ == 0) {
+              LOG(ERROR) << "Warning: could not load " << descriptor;
+            }
+            soa.Self()->ClearException();
+            continue;
+          }
+          std::string error_msg;
+          verifier::FailureKind res =
+            verifier::MethodVerifier::VerifyClass(soa.Self(),
+                                                  h_klass.Get(),
+                                                  runtime->GetCompilerCallbacks(),
+                                                  true,
+                                                  verifier::HardFailLogMode::kLogWarning,
+                                                  &error_msg);
+          if (args_->repetitions_ == 0) {
+            LOG(INFO) << descriptor << ": " << res << " " << error_msg;
+          }
+        }
+      }
+    }
+
+    return true;
+  }
+};
+
+}  // namespace
+
+}  // namespace art
+
+int main(int argc, char** argv) {
+  // Output all logging to stderr.
+  android::base::SetLogger(android::base::StderrLogger);
+
+  art::MethodVerifierMain main;
+  return main.Main(argc, argv);
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java b/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
index 6685752..6b9ef16 100644
--- a/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
+++ b/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
@@ -81,14 +81,19 @@
         mStatus.debug("Visit member %s : %s", member.getName(), member.getSignature());
         for (AnnotationEntry a : member.getAnnotationEntries()) {
             if (mAnnotationType.equals(a.getAnnotationType())) {
-                mStatus.debug("Method has annotation %s", mAnnotationType);
+                mStatus.debug("Member has annotation %s", mAnnotationType);
+                boolean bridge = (member.getAccessFlags() & Const.ACC_BRIDGE) != 0;
+                if (bridge) {
+                    mStatus.debug("Member is a bridge", mAnnotationType);
+                }
                 String signature = String.format(Locale.US, signatureFormatString,
                         getClassDescriptor(definingClass), member.getName(), member.getSignature());
                 for (ElementValuePair property : a.getElementValuePairs()) {
                     switch (property.getNameString()) {
                         case EXPECTED_SIGNATURE:
                             String expected = property.getValue().stringifyValue();
-                            if (!signature.equals(expected)) {
+                            // Don't enforce for bridge methods; they're generated so won't match.
+                            if (!bridge && !signature.equals(expected)) {
                                 error(definingClass, member,
                                         "Expected signature does not match generated:\n"
                                                 + "Expected:  %s\n"
diff --git a/tools/class2greylist/test/src/com/android/javac/AnnotationVisitorTest.java b/tools/class2greylist/test/src/com/android/javac/AnnotationVisitorTest.java
index 2d97218..a4ad20c 100644
--- a/tools/class2greylist/test/src/com/android/javac/AnnotationVisitorTest.java
+++ b/tools/class2greylist/test/src/com/android/javac/AnnotationVisitorTest.java
@@ -19,20 +19,22 @@
 import static com.google.common.truth.Truth.assertThat;
 
 import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
-import static org.mockito.MockitoAnnotations.initMocks;
+import static org.mockito.Mockito.withSettings;
 
-import com.android.class2greylist.Status;
 import com.android.class2greylist.AnnotationVisitor;
+import com.android.class2greylist.Status;
 
 import com.google.common.base.Joiner;
 
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.TestName;
 import org.mockito.ArgumentCaptor;
-import org.mockito.Mock;
 
 import java.io.IOException;
 
@@ -40,13 +42,17 @@
 
     private static final String ANNOTATION = "Lannotation/Anno;";
 
+    @Rule
+    public TestName mTestName = new TestName();
+
     private Javac mJavac;
-    @Mock
     private Status mStatus;
 
     @Before
     public void setup() throws IOException {
-        initMocks(this);
+        System.out.println(String.format("\n============== STARTING TEST: %s ==============\n",
+                mTestName.getMethodName()));
+        mStatus = mock(Status.class, withSettings().verboseLogging());
         mJavac = new Javac();
         mJavac.addSource("annotation.Anno", Joiner.on('\n').join(
                 "package annotation;",
@@ -199,4 +205,125 @@
         verify(mStatus, never()).greylistEntry(any(String.class));
     }
 
+    @Test
+    public void testMethodArgGenerics() throws IOException {
+        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+                "package a.b;",
+                "import annotation.Anno;",
+                "public class Class<T extends String> {",
+                "  @Anno(expectedSignature=\"La/b/Class;->method(Ljava/lang/String;)V\")",
+                "  public void method(T arg) {}",
+                "}"));
+        assertThat(mJavac.compile()).isTrue();
+
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, mStatus)
+                .visit();
+
+        assertNoErrors();
+        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+        verify(mStatus, times(1)).greylistEntry(greylist.capture());
+        assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method(Ljava/lang/String;)V");
+    }
+
+    @Test
+    public void testOverrideMethodWithBridge() throws IOException {
+        mJavac.addSource("a.b.Base", Joiner.on('\n').join(
+                "package a.b;",
+                "abstract class Base<T> {",
+                "  protected abstract void method(T arg);",
+                "}"));
+
+        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+                "package a.b;",
+                "import annotation.Anno;",
+                "public class Class<T extends String> extends Base<T> {",
+                "  @Override",
+                "  @Anno(expectedSignature=\"La/b/Class;->method(Ljava/lang/String;)V\")",
+                "  public void method(T arg) {}",
+                "}"));
+        assertThat(mJavac.compile()).isTrue();
+
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), ANNOTATION, mStatus)
+                .visit();
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, mStatus)
+                .visit();
+
+        assertNoErrors();
+        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+        // A bridge method is generated for the above, so we expect 2 greylist entries.
+        verify(mStatus, times(2)).greylistEntry(greylist.capture());
+        assertThat(greylist.getAllValues()).containsExactly(
+                "La/b/Class;->method(Ljava/lang/Object;)V",
+                "La/b/Class;->method(Ljava/lang/String;)V");
+    }
+
+    @Test
+    public void testOverridePublicMethodWithBridge() throws IOException {
+        mJavac.addSource("a.b.Base", Joiner.on('\n').join(
+                "package a.b;",
+                "public abstract class Base<T> {",
+                "  public void method(T arg) {}",
+                "}"));
+
+        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+                "package a.b;",
+                "import annotation.Anno;",
+                "public class Class<T extends String> extends Base<T> {",
+                "  @Override",
+                "  @Anno(expectedSignature=\"La/b/Class;->method(Ljava/lang/String;)V\")",
+                "  public void method(T arg) {}",
+                "}"));
+        assertThat(mJavac.compile()).isTrue();
+
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), ANNOTATION, mStatus)
+                .visit();
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, mStatus)
+                .visit();
+
+        assertNoErrors();
+        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+        // A bridge method is generated for the above, so we expect 2 greylist entries.
+        verify(mStatus, times(2)).greylistEntry(greylist.capture());
+        assertThat(greylist.getAllValues()).containsExactly(
+                "La/b/Class;->method(Ljava/lang/Object;)V",
+                "La/b/Class;->method(Ljava/lang/String;)V");
+    }
+
+    @Test
+    public void testBridgeMethodsFromInterface() throws IOException {
+        mJavac.addSource("a.b.Interface", Joiner.on('\n').join(
+                "package a.b;",
+                "public interface Interface {",
+                "  public void method(Object arg);",
+                "}"));
+
+        mJavac.addSource("a.b.Base", Joiner.on('\n').join(
+                "package a.b;",
+                "import annotation.Anno;",
+                "class Base {",
+                "  @Anno(expectedSignature=\"La/b/Base;->method(Ljava/lang/Object;)V\")",
+                "  public void method(Object arg) {}",
+                "}"));
+
+        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+                "package a.b;",
+                "public class Class extends Base implements Interface {",
+                "}"));
+        assertThat(mJavac.compile()).isTrue();
+
+        new AnnotationVisitor(
+                mJavac.getCompiledClass("a.b.Interface"), ANNOTATION, mStatus).visit();
+        new AnnotationVisitor(
+                mJavac.getCompiledClass("a.b.Base"), ANNOTATION, mStatus).visit();
+        new AnnotationVisitor(
+                mJavac.getCompiledClass("a.b.Class"), ANNOTATION, mStatus).visit();
+
+        assertNoErrors();
+        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+        // A bridge method is generated for the above, so we expect 2 greylist entries.
+        verify(mStatus, times(2)).greylistEntry(greylist.capture());
+        assertThat(greylist.getAllValues()).containsExactly(
+                "La/b/Class;->method(Ljava/lang/Object;)V",
+                "La/b/Base;->method(Ljava/lang/Object;)V");
+    }
 }
diff --git a/tools/hiddenapi/hiddenapi.cc b/tools/hiddenapi/hiddenapi.cc
index 0381381..e4bec06 100644
--- a/tools/hiddenapi/hiddenapi.cc
+++ b/tools/hiddenapi/hiddenapi.cc
@@ -75,7 +75,9 @@
   UsageError("");
   UsageError("  Command \"list\": dump lists of public and private API");
   UsageError("    --boot-dex=<filename>: dex file which belongs to boot class path");
-  UsageError("    --stub-dex=<filename>: dex/apk file which belongs to SDK API stubs");
+  UsageError("    --stub-classpath=<filenames>: colon-separated list of dex/apk files");
+  UsageError("        which form API stubs of boot class path. Multiple classpaths can");
+  UsageError("        be specified");
   UsageError("");
   UsageError("    --out-public=<filename>: output file for a list of all public APIs");
   UsageError("    --out-private=<filename>: output file for a list of all private APIs");
@@ -121,7 +123,7 @@
     return list;
   }
 
-  inline bool IsVisible() const { return HasAccessFlags(kAccPublic); }
+  inline bool IsPublic() const { return HasAccessFlags(kAccPublic); }
 
   inline bool Equals(const DexClass& other) const {
     bool equals = GetDescriptor() == other.GetDescriptor();
@@ -178,11 +180,10 @@
 
   inline bool IsMethod() const { return it_.IsAtMethod(); }
   inline bool IsVirtualMethod() const { return it_.IsAtVirtualMethod(); }
+  inline bool IsConstructor() const { return IsMethod() && HasAccessFlags(kAccConstructor); }
 
-  // Returns true if the member is public/protected and is in a public class.
-  inline bool IsVisible() const {
-    return GetDeclaringClass().IsVisible() &&
-           (HasAccessFlags(kAccPublic) || HasAccessFlags(kAccProtected));
+  inline bool IsPublicOrProtected() const {
+    return HasAccessFlags(kAccPublic) || HasAccessFlags(kAccProtected);
   }
 
   // Constructs a string with a unique signature of this class member.
@@ -344,6 +345,24 @@
     return ForEachResolvableMember_Impl(other, fn) != ResolutionResult::kNotFound;
   }
 
+  // Returns true if this class contains at least one member matching `other`.
+  bool HasMatchingMember(const DexMember& other) {
+    return ForEachMatchingMember(
+        other, [](const DexMember&) { return true; }) != ResolutionResult::kNotFound;
+  }
+
+  // Recursively iterates over all subclasses of this class and invokes `fn`
+  // on each one. If `fn` returns false for a particular subclass, exploring its
+  // subclasses is skipped.
+  template<typename Fn>
+  void ForEachSubClass(Fn fn) {
+    for (HierarchyClass* subclass : extended_by_) {
+      if (fn(subclass)) {
+        subclass->ForEachSubClass(fn);
+      }
+    }
+  }
+
  private:
   // Result of resolution which takes into account whether the member was found
   // for the first time or not. This is just a performance optimization to prevent
@@ -438,7 +457,7 @@
 
 class Hierarchy FINAL {
  public:
-  explicit Hierarchy(ClassPath& class_path) : class_path_(class_path) {
+  explicit Hierarchy(ClassPath& classpath) : classpath_(classpath) {
     BuildClassHierarchy();
   }
 
@@ -454,6 +473,48 @@
     return (klass != nullptr) && klass->ForEachResolvableMember(other, fn);
   }
 
+  // Returns true if `member`, which belongs to this classpath, is visible to
+  // code in child class loaders.
+  bool IsMemberVisible(const DexMember& member) {
+    if (!member.IsPublicOrProtected()) {
+      // Member is private or package-private. Cannot be visible.
+      return false;
+    } else if (member.GetDeclaringClass().IsPublic()) {
+      // Member is public or protected, and class is public. It must be visible.
+      return true;
+    } else if (member.IsConstructor()) {
+      // Member is public or protected constructor and class is not public.
+      // Must be hidden because it cannot be implicitly exposed by a subclass.
+      return false;
+    } else {
+      // Member is public or protected method, but class is not public. Check if
+      // it is exposed through a public subclass.
+      // Example code (`foo` exposed by ClassB):
+      //   class ClassA { public void foo() { ... } }
+      //   public class ClassB extends ClassA {}
+      HierarchyClass* klass = FindClass(member.GetDeclaringClass().GetDescriptor());
+      CHECK(klass != nullptr);
+      bool visible = false;
+      klass->ForEachSubClass([&visible, &member](HierarchyClass* subclass) {
+        if (subclass->HasMatchingMember(member)) {
+          // There is a member which matches `member` in `subclass`, either
+          // a virtual method overriding `member` or a field overshadowing
+          // `member`. In either case, `member` remains hidden.
+          CHECK(member.IsVirtualMethod() || !member.IsMethod());
+          return false;  // do not explore deeper
+        } else if (subclass->GetOneDexClass().IsPublic()) {
+          // `subclass` inherits and exposes `member`.
+          visible = true;
+          return false;  // do not explore deeper
+        } else {
+          // `subclass` inherits `member` but does not expose it.
+          return true;   // explore deeper
+        }
+      });
+      return visible;
+    }
+  }
+
  private:
   HierarchyClass* FindClass(const std::string& descriptor) {
     auto it = classes_.find(descriptor);
@@ -467,7 +528,7 @@
   void BuildClassHierarchy() {
     // Create one HierarchyClass entry in `classes_` per class descriptor
     // and add all DexClass objects with the same descriptor to that entry.
-    class_path_.ForEachDexClass([this](DexClass& klass) {
+    classpath_.ForEachDexClass([this](DexClass& klass) {
       classes_[klass.GetDescriptor()].AddDexClass(klass);
     });
 
@@ -494,7 +555,7 @@
     }
   }
 
-  ClassPath& class_path_;
+  ClassPath& classpath_;
   std::map<std::string, HierarchyClass> classes_;
 };
 
@@ -547,8 +608,9 @@
           const StringPiece option(argv[i]);
           if (option.starts_with("--boot-dex=")) {
             boot_dex_paths_.push_back(option.substr(strlen("--boot-dex=")).ToString());
-          } else if (option.starts_with("--stub-dex=")) {
-            stub_dex_paths_.push_back(option.substr(strlen("--stub-dex=")).ToString());
+          } else if (option.starts_with("--stub-classpath=")) {
+            stub_classpaths_.push_back(android::base::Split(
+                option.substr(strlen("--stub-classpath=")).ToString(), ":"));
           } else if (option.starts_with("--out-public=")) {
             out_public_path_ = option.substr(strlen("--out-public=")).ToString();
           } else if (option.starts_with("--out-private=")) {
@@ -578,10 +640,10 @@
     OpenApiFile(blacklist_path_, api_list, HiddenApiAccessFlags::kBlacklist);
 
     // Open all dex files.
-    ClassPath boot_class_path(boot_dex_paths_, /* open_writable */ true);
+    ClassPath boot_classpath(boot_dex_paths_, /* open_writable */ true);
 
     // Set access flags of all members.
-    boot_class_path.ForEachDexMember([&api_list](DexMember& boot_member) {
+    boot_classpath.ForEachDexMember([&api_list](DexMember& boot_member) {
       auto it = api_list.find(boot_member.GetApiEntry());
       if (it == api_list.end()) {
         boot_member.SetHidden(HiddenApiAccessFlags::kWhitelist);
@@ -590,7 +652,7 @@
       }
     });
 
-    boot_class_path.UpdateDexChecksums();
+    boot_classpath.UpdateDexChecksums();
   }
 
   void OpenApiFile(const std::string& path,
@@ -614,7 +676,7 @@
   void ListApi() {
     if (boot_dex_paths_.empty()) {
       Usage("No boot DEX files specified");
-    } else if (stub_dex_paths_.empty()) {
+    } else if (stub_classpaths_.empty()) {
       Usage("No stub DEX files specified");
     } else if (out_public_path_.empty()) {
       Usage("No public API output path specified");
@@ -630,39 +692,42 @@
     std::set<std::string> unresolved;
 
     // Open all dex files.
-    ClassPath stub_class_path(stub_dex_paths_, /* open_writable */ false);
-    ClassPath boot_class_path(boot_dex_paths_, /* open_writable */ false);
-    Hierarchy boot_hierarchy(boot_class_path);
+    ClassPath boot_classpath(boot_dex_paths_, /* open_writable */ false);
+    Hierarchy boot_hierarchy(boot_classpath);
 
     // Mark all boot dex members private.
-    boot_class_path.ForEachDexMember([&boot_members](DexMember& boot_member) {
+    boot_classpath.ForEachDexMember([&boot_members](DexMember& boot_member) {
       boot_members[boot_member.GetApiEntry()] = false;
     });
 
     // Resolve each SDK dex member against the framework and mark it white.
-    stub_class_path.ForEachDexMember(
-        [&boot_hierarchy, &boot_members, &unresolved](DexMember& stub_member) {
-          if (!stub_member.IsVisible()) {
-            // Typically fake constructors and inner-class `this` fields.
-            return;
-          }
-          bool resolved = boot_hierarchy.ForEachResolvableMember(
-              stub_member,
-              [&boot_members](DexMember& boot_member) {
-                std::string entry = boot_member.GetApiEntry();
-                auto it = boot_members.find(entry);
-                CHECK(it != boot_members.end());
-                if (it->second) {
-                  return false;  // has been marked before
-                } else {
-                  it->second = true;
-                  return true;  // marked for the first time
-                }
-              });
-          if (!resolved) {
-            unresolved.insert(stub_member.GetApiEntry());
-          }
-        });
+    for (const std::vector<std::string>& stub_classpath_dex : stub_classpaths_) {
+      ClassPath stub_classpath(stub_classpath_dex, /* open_writable */ false);
+      Hierarchy stub_hierarchy(stub_classpath);
+      stub_classpath.ForEachDexMember(
+          [&stub_hierarchy, &boot_hierarchy, &boot_members, &unresolved](DexMember& stub_member) {
+            if (!stub_hierarchy.IsMemberVisible(stub_member)) {
+              // Typically fake constructors and inner-class `this` fields.
+              return;
+            }
+            bool resolved = boot_hierarchy.ForEachResolvableMember(
+                stub_member,
+                [&boot_members](DexMember& boot_member) {
+                  std::string entry = boot_member.GetApiEntry();
+                  auto it = boot_members.find(entry);
+                  CHECK(it != boot_members.end());
+                  if (it->second) {
+                    return false;  // has been marked before
+                  } else {
+                    it->second = true;
+                    return true;  // marked for the first time
+                  }
+                });
+            if (!resolved) {
+              unresolved.insert(stub_member.GetApiEntry());
+            }
+          });
+    }
 
     // Print errors.
     for (const std::string& str : unresolved) {
@@ -685,7 +750,10 @@
 
   // Paths to DEX files which should be processed.
   std::vector<std::string> boot_dex_paths_;
-  std::vector<std::string> stub_dex_paths_;
+
+  // Set of public API stub classpaths. Each classpath is formed by a list
+  // of DEX/APK files in the order they appear on the classpath.
+  std::vector<std::vector<std::string>> stub_classpaths_;
 
   // Paths to text files which contain the lists of API members.
   std::string light_greylist_path_;
diff --git a/tools/jfuzz/jfuzz.cc b/tools/jfuzz/jfuzz.cc
index 60c6275..a97a99c 100644
--- a/tools/jfuzz/jfuzz.cc
+++ b/tools/jfuzz/jfuzz.cc
@@ -1302,7 +1302,7 @@
 int32_t main(int32_t argc, char** argv) {
   // Time-based seed.
   struct timeval tp;
-  gettimeofday(&tp, NULL);
+  gettimeofday(&tp, nullptr);
 
   // Defaults.
   uint32_t seed = (tp.tv_sec * 1000000 + tp.tv_usec);
diff --git a/tools/libcore_gcstress_debug_failures.txt b/tools/libcore_gcstress_debug_failures.txt
index 06f6339..942a4e0 100644
--- a/tools/libcore_gcstress_debug_failures.txt
+++ b/tools/libcore_gcstress_debug_failures.txt
@@ -9,13 +9,14 @@
   result: EXEC_FAILED,
   modes: [device],
   names: ["jsr166.CompletableFutureTest#testCompleteOnTimeout_completed",
+          "jsr166.CompletableFutureTest#testDelayedExecutor",
           "libcore.libcore.icu.TransliteratorTest#testAll",
           "libcore.libcore.icu.RelativeDateTimeFormatterTest#test_bug25821045",
           "libcore.libcore.icu.RelativeDateTimeFormatterTest#test_bug25883157",
           "libcore.java.lang.ref.ReferenceQueueTest#testRemoveWithDelayedResultAndTimeout",
-          "libcore.java.lang.ref.ReferenceQueueTest#testRemoveWithDelayedResultAndNoTimeout",
           "libcore.java.util.TimeZoneTest#testSetDefaultDeadlock",
           "libcore.javax.crypto.CipherBasicsTest#testBasicEncryption",
+          "org.apache.harmony.tests.java.text.MessageFormatTest#test_parseLjava_lang_String",
           "org.apache.harmony.tests.java.util.TimerTest#testThrowingTaskKillsTimerThread"]
 },
 {
diff --git a/tools/libcore_gcstress_failures.txt b/tools/libcore_gcstress_failures.txt
index e049cb3..6840f9e 100644
--- a/tools/libcore_gcstress_failures.txt
+++ b/tools/libcore_gcstress_failures.txt
@@ -9,5 +9,39 @@
   result: EXEC_FAILED,
   modes: [device],
   names: ["libcore.javax.crypto.CipherBasicsTest#testGcmEncryption"]
+},
+{
+  description: "Timeouts.",
+  result: EXEC_FAILED,
+  names: ["libcore.java.lang.ref.ReferenceQueueTest#testRemoveWithDelayedResultAndNoTimeout"]
+},
+{
+  description: "Timeouts.",
+  result: EXEC_FAILED,
+  modes: [host],
+  names: ["libcore.java.lang.ref.ReferenceQueueTest#testRemoveWithDelayedResultAndTimeout",
+          "org.apache.harmony.luni.tests.internal.net.www.protocol.http.HttpURLConnectionTest#testGetOutputStream",
+          "org.apache.harmony.luni.tests.internal.net.www.protocol.http.HttpURLConnectionTest#testProxyAuthorization"]
+},
+{
+  description: "Timeouts.",
+  result: EXEC_FAILED,
+  modes: [device],
+  names: ["libcore.java.lang.StringTest#testFastPathString_wellFormedUtf8Sequence",
+          "org.apache.harmony.tests.java.lang.ref.ReferenceQueueTest#test_remove",
+          "org.apache.harmony.tests.java.util.TimerTest#testOverdueTaskExecutesImmediately",
+          "org.apache.harmony.tests.java.util.WeakHashMapTest#test_keySet_hasNext",
+          "libcore.java.text.DecimalFormatTest#testCurrencySymbolSpacing",
+          "libcore.java.text.SimpleDateFormatTest#testLocales"]
+},
+{
+  description: "GC crash",
+  result: EXEC_FAILED,
+  bug: 111545159,
+  names: ["org.apache.harmony.tests.java.util.AbstractSequentialListTest#test_addAllILjava_util_Collection",
+          "org.apache.harmony.tests.java.util.HashtableTest#test_putLjava_lang_ObjectLjava_lang_Object",
+          "org.apache.harmony.tests.java.util.VectorTest#test_addAllILjava_util_Collection",
+          "org.apache.harmony.tests.java.util.VectorTest#test_addAllLjava_util_Collection",
+          "org.apache.harmony.tests.java.io.BufferedWriterTest#test_write_LStringII_Exception"]
 }
 ]
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index aff009a..240d63c 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -156,7 +156,11 @@
 # Increase the timeout, as vogar cannot set individual test
 # timeout when being asked to run packages, and some tests go above
 # the default timeout.
-vogar_args="$vogar_args --timeout 480"
+if $gcstress && $debug && $device_mode; then
+  vogar_args="$vogar_args --timeout 960"
+else
+  vogar_args="$vogar_args --timeout 480"
+fi
 
 # set the toolchain to use.
 vogar_args="$vogar_args --toolchain d8 --language CUR"