Merge "Remove unnecessary `explicit` qualifiers on constructors."
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 7fc6fa2..42b792c 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -56,6 +56,7 @@
     false,  // kIntrinsicReferenceGetReferent
     false,  // kIntrinsicCharAt
     false,  // kIntrinsicCompareTo
+    false,  // kIntrinsicEquals
     false,  // kIntrinsicGetCharsNoCheck
     false,  // kIntrinsicIsEmptyOrLength
     false,  // kIntrinsicIndexOf
@@ -95,6 +96,7 @@
 static_assert(!kIntrinsicIsStatic[kIntrinsicReferenceGetReferent], "Get must not be static");
 static_assert(!kIntrinsicIsStatic[kIntrinsicCharAt], "CharAt must not be static");
 static_assert(!kIntrinsicIsStatic[kIntrinsicCompareTo], "CompareTo must not be static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicEquals], "String equals must not be static");
 static_assert(!kIntrinsicIsStatic[kIntrinsicGetCharsNoCheck], "GetCharsNoCheck must not be static");
 static_assert(!kIntrinsicIsStatic[kIntrinsicIsEmptyOrLength], "IsEmptyOrLength must not be static");
 static_assert(!kIntrinsicIsStatic[kIntrinsicIndexOf], "IndexOf must not be static");
@@ -192,6 +194,7 @@
     "getReferent",           // kNameCacheReferenceGet
     "charAt",                // kNameCacheCharAt
     "compareTo",             // kNameCacheCompareTo
+    "equals",                // kNameCacheEquals
     "getCharsNoCheck",       // kNameCacheGetCharsNoCheck
     "isEmpty",               // kNameCacheIsEmpty
     "indexOf",               // kNameCacheIndexOf
@@ -284,6 +287,8 @@
     { kClassCacheVoid, 2, { kClassCacheLong, kClassCacheLong } },
     // kProtoCacheJS_V
     { kClassCacheVoid, 2, { kClassCacheLong, kClassCacheShort } },
+    // kProtoCacheObject_Z
+    { kClassCacheBoolean, 1, { kClassCacheJavaLangObject } },
     // kProtoCacheObjectJII_Z
     { kClassCacheBoolean, 4, { kClassCacheJavaLangObject, kClassCacheLong,
         kClassCacheInt, kClassCacheInt } },
@@ -418,6 +423,7 @@
 
     INTRINSIC(JavaLangString, CharAt, I_C, kIntrinsicCharAt, 0),
     INTRINSIC(JavaLangString, CompareTo, String_I, kIntrinsicCompareTo, 0),
+    INTRINSIC(JavaLangString, Equals, Object_Z, kIntrinsicEquals, 0),
     INTRINSIC(JavaLangString, GetCharsNoCheck, IICharArrayI_V, kIntrinsicGetCharsNoCheck, 0),
     INTRINSIC(JavaLangString, IsEmpty, _Z, kIntrinsicIsEmptyOrLength, kIntrinsicFlagIsEmpty),
     INTRINSIC(JavaLangString, IndexOf, II_I, kIntrinsicIndexOf, kIntrinsicFlagNone),
@@ -588,6 +594,9 @@
       return backend->GenInlinedCharAt(info);
     case kIntrinsicCompareTo:
       return backend->GenInlinedStringCompareTo(info);
+    case kIntrinsicEquals:
+      // Quick does not implement this intrinsic.
+      return false;
     case kIntrinsicGetCharsNoCheck:
       return backend->GenInlinedStringGetCharsNoCheck(info);
     case kIntrinsicIsEmptyOrLength:
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index bcb9ee5..d6c8bfb 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -170,6 +170,7 @@
       kNameCacheReferenceGetReferent,
       kNameCacheCharAt,
       kNameCacheCompareTo,
+      kNameCacheEquals,
       kNameCacheGetCharsNoCheck,
       kNameCacheIsEmpty,
       kNameCacheIndexOf,
@@ -243,6 +244,7 @@
       kProtoCacheJJ_J,
       kProtoCacheJJ_V,
       kProtoCacheJS_V,
+      kProtoCacheObject_Z,
       kProtoCacheObjectJII_Z,
       kProtoCacheObjectJJJ_Z,
       kProtoCacheObjectJObjectObject_Z,
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 299b995..fa4667e 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -829,6 +829,18 @@
       std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve)
      : exceptions_to_resolve_(exceptions_to_resolve) {}
 
+  virtual bool Visit(mirror::Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+    const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+    for (auto& m : c->GetVirtualMethods(pointer_size)) {
+      ResolveExceptionsForMethod(&m);
+    }
+    for (auto& m : c->GetDirectMethods(pointer_size)) {
+      ResolveExceptionsForMethod(&m);
+    }
+    return true;
+  }
+
+ private:
   void ResolveExceptionsForMethod(ArtMethod* method_handle) SHARED_REQUIRES(Locks::mutator_lock_) {
     const DexFile::CodeItem* code_item = method_handle->GetCodeItem();
     if (code_item == nullptr) {
@@ -864,18 +876,6 @@
     }
   }
 
-  virtual bool Visit(mirror::Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
-    const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
-    for (auto& m : c->GetVirtualMethods(pointer_size)) {
-      ResolveExceptionsForMethod(&m);
-    }
-    for (auto& m : c->GetDirectMethods(pointer_size)) {
-      ResolveExceptionsForMethod(&m);
-    }
-    return true;
-  }
-
- private:
   std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve_;
 };
 
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 4607ebe..77d6628 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1005,6 +1005,31 @@
   GetMoveResolver()->EmitNativeCode(&parallel_move);
 }
 
+void CodeGenerator::ValidateInvokeRuntime(HInstruction* instruction, SlowPathCode* slow_path) {
+  // Ensure that the call kind indication given to the register allocator is
+  // coherent with the runtime call generated, and that the GC side effect is
+  // set when required.
+  if (slow_path == nullptr) {
+    DCHECK(instruction->GetLocations()->WillCall());
+    DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()));
+  } else {
+    DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath() || slow_path->IsFatal());
+    DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
+           // Control flow would not come back into the code if a fatal slow
+           // path is taken, so we do not care if it triggers GC.
+           slow_path->IsFatal() ||
+           // HDeoptimize is a special case: we know we are not coming back from
+           // it into the code.
+           instruction->IsDeoptimize());
+  }
+
+  // Check the coherency of leaf information.
+  DCHECK(instruction->IsSuspendCheck()
+         || ((slow_path != nullptr) && slow_path->IsFatal())
+         || instruction->GetLocations()->CanCall()
+         || !IsLeafMethod());
+}
+
 void SlowPathCode::RecordPcInfo(CodeGenerator* codegen, HInstruction* instruction, uint32_t dex_pc) {
   codegen->RecordPcInfo(instruction, dex_pc, this);
 }
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 540da1c..2582444 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -292,6 +292,8 @@
     return type == Primitive::kPrimNot && !value->IsNullConstant();
   }
 
+  void ValidateInvokeRuntime(HInstruction* instruction, SlowPathCode* slow_path);
+
   void AddAllocatedRegister(Location location) {
     allocated_registers_.Add(location);
   }
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 6c0292c..1bd4216 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -953,23 +953,10 @@
                                      HInstruction* instruction,
                                      uint32_t dex_pc,
                                      SlowPathCode* slow_path) {
-  // Ensure that the call kind indication given to the register allocator is
-  // coherent with the runtime call generated.
-  if (slow_path == nullptr) {
-    DCHECK(instruction->GetLocations()->WillCall());
-  } else {
-    DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath() || slow_path->IsFatal());
-  }
-
+  ValidateInvokeRuntime(instruction, slow_path);
   __ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset);
   __ blx(LR);
   RecordPcInfo(instruction, dex_pc, slow_path);
-  DCHECK(instruction->IsSuspendCheck()
-      || instruction->IsBoundsCheck()
-      || instruction->IsNullCheck()
-      || instruction->IsDivZeroCheck()
-      || instruction->GetLocations()->CanCall()
-      || !IsLeafMethod());
 }
 
 void InstructionCodeGeneratorARM::HandleGoto(HInstruction* got, HBasicBlock* successor) {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index d93ef1b..b8ac421 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1102,23 +1102,11 @@
                                        HInstruction* instruction,
                                        uint32_t dex_pc,
                                        SlowPathCode* slow_path) {
-  // Ensure that the call kind indication given to the register allocator is
-  // coherent with the runtime call generated.
-  if (slow_path == nullptr) {
-    DCHECK(instruction->GetLocations()->WillCall());
-  } else {
-    DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath() || slow_path->IsFatal());
-  }
-
+  ValidateInvokeRuntime(instruction, slow_path);
   BlockPoolsScope block_pools(GetVIXLAssembler());
   __ Ldr(lr, MemOperand(tr, entry_point_offset));
   __ Blr(lr);
   RecordPcInfo(instruction, dex_pc, slow_path);
-  DCHECK(instruction->IsSuspendCheck()
-         || instruction->IsBoundsCheck()
-         || instruction->IsNullCheck()
-         || instruction->IsDivZeroCheck()
-         || !IsLeafMethod());
 }
 
 void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path,
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 4cee4a3..167e025 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -976,23 +976,11 @@
                                         HInstruction* instruction,
                                         uint32_t dex_pc,
                                         SlowPathCode* slow_path) {
-  // Ensure that the call kind indication given to the register allocator is
-  // coherent with the runtime call generated.
-  if (slow_path == nullptr) {
-    DCHECK(instruction->GetLocations()->WillCall());
-  } else {
-    DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath() || slow_path->IsFatal());
-  }
-
+  ValidateInvokeRuntime(instruction, slow_path);
   // TODO: anything related to T9/GP/GOT/PIC/.so's?
   __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
   __ Jalr(T9);
   RecordPcInfo(instruction, dex_pc, slow_path);
-  DCHECK(instruction->IsSuspendCheck()
-      || instruction->IsBoundsCheck()
-      || instruction->IsNullCheck()
-      || instruction->IsDivZeroCheck()
-      || !IsLeafMethod());
 }
 
 void InstructionCodeGeneratorMIPS64::GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path,
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 38a7c53..091a3e5 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -429,21 +429,9 @@
                                      HInstruction* instruction,
                                      uint32_t dex_pc,
                                      SlowPathCode* slow_path) {
-  // Ensure that the call kind indication given to the register allocator is
-  // coherent with the runtime call generated.
-  if (slow_path == nullptr) {
-    DCHECK(instruction->GetLocations()->WillCall());
-  } else {
-    DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath() || slow_path->IsFatal());
-  }
-
+  ValidateInvokeRuntime(instruction, slow_path);
   __ fs()->call(entry_point);
   RecordPcInfo(instruction, dex_pc, slow_path);
-  DCHECK(instruction->IsSuspendCheck()
-         || instruction->IsBoundsCheck()
-         || instruction->IsNullCheck()
-         || instruction->IsDivZeroCheck()
-         || !IsLeafMethod());
 }
 
 CodeGeneratorX86::CodeGeneratorX86(HGraph* graph,
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 18ab595..2c5cef3 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -484,21 +484,9 @@
                                         HInstruction* instruction,
                                         uint32_t dex_pc,
                                         SlowPathCode* slow_path) {
-  // Ensure that the call kind indication given to the register allocator is
-  // coherent with the runtime call generated.
-  if (slow_path == nullptr) {
-    DCHECK(instruction->GetLocations()->WillCall());
-  } else {
-    DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath() || slow_path->IsFatal());
-  }
-
+  ValidateInvokeRuntime(instruction, slow_path);
   __ gs()->call(entry_point);
   RecordPcInfo(instruction, dex_pc, slow_path);
-  DCHECK(instruction->IsSuspendCheck()
-         || instruction->IsBoundsCheck()
-         || instruction->IsNullCheck()
-         || instruction->IsDivZeroCheck()
-         || !IsLeafMethod());
 }
 
 static constexpr int kNumberOfCpuRegisterPairs = 0;
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index 6269d16..5de629d 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -128,7 +128,7 @@
     for (i.Advance(); !i.Done(); i.Advance()) {
       HInstruction* inst = i.Current();
       DCHECK(!inst->IsControlFlow());
-      if (!inst->DoesAnyWrite()
+      if (!inst->HasSideEffects()
           && !inst->CanThrow()
           && !inst->IsSuspendCheck()
           // If we added an explicit barrier then we should keep it.
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 3900646..833dfb0 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -264,7 +264,7 @@
   // odd buckets to speed up deletion.
   size_t HashCode(HInstruction* instruction) const {
     size_t hash_code = instruction->ComputeHashCode();
-    if (instruction->GetSideEffects().DoesAnyRead()) {
+    if (instruction->GetSideEffects().HasDependencies()) {
       return (hash_code << 1) | 0;
     } else {
       return (hash_code << 1) | 1;
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
index 5c6239b..42ef3ff 100644
--- a/compiler/optimizing/gvn_test.cc
+++ b/compiler/optimizing/gvn_test.cc
@@ -266,6 +266,8 @@
   ArenaPool pool;
   ArenaAllocator allocator(&pool);
 
+  static const SideEffects kCanTriggerGC = SideEffects::CanTriggerGC();
+
   HGraph* graph = CreateGraph(&allocator);
   HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
   graph->AddBlock(entry);
@@ -309,7 +311,7 @@
   ASSERT_TRUE(inner_loop_header->GetLoopInformation()->IsIn(
       *outer_loop_header->GetLoopInformation()));
 
-  // Check that the loops don't have side effects.
+  // Check that the only side effect of loops is to potentially trigger GC.
   {
     // Make one block with a side effect.
     entry->AddInstruction(new (&allocator) HInstanceFieldSet(parameter,
@@ -327,6 +329,8 @@
     ASSERT_FALSE(side_effects.GetBlockEffects(outer_loop_body).DoesAnyWrite());
     ASSERT_FALSE(side_effects.GetLoopEffects(outer_loop_header).DoesAnyWrite());
     ASSERT_FALSE(side_effects.GetLoopEffects(inner_loop_header).DoesAnyWrite());
+    ASSERT_TRUE(side_effects.GetLoopEffects(outer_loop_header).Equals(kCanTriggerGC));
+    ASSERT_TRUE(side_effects.GetLoopEffects(inner_loop_header).Equals(kCanTriggerGC));
   }
 
   // Check that the side effects of the outer loop does not affect the inner loop.
@@ -348,6 +352,7 @@
     ASSERT_TRUE(side_effects.GetBlockEffects(outer_loop_body).DoesAnyWrite());
     ASSERT_TRUE(side_effects.GetLoopEffects(outer_loop_header).DoesAnyWrite());
     ASSERT_FALSE(side_effects.GetLoopEffects(inner_loop_header).DoesAnyWrite());
+    ASSERT_TRUE(side_effects.GetLoopEffects(inner_loop_header).Equals(kCanTriggerGC));
   }
 
   // Check that the side effects of the inner loop affects the outer loop.
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 55e964e..3db9816 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -31,7 +31,7 @@
   switch (i) {
     case Intrinsics::kNone:
       return kInterface;  // Non-sensical for intrinsic.
-#define OPTIMIZING_INTRINSICS(Name, IsStatic) \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment) \
     case Intrinsics::k ## Name:               \
       return IsStatic;
 #include "intrinsics_list.h"
@@ -42,7 +42,21 @@
   return kInterface;
 }
 
-
+// Function that returns whether an intrinsic needs an environment or not.
+static inline IntrinsicNeedsEnvironment IntrinsicNeedsEnvironment(Intrinsics i) {
+  switch (i) {
+    case Intrinsics::kNone:
+      return kNeedsEnvironment;  // Non-sensical for intrinsic.
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment) \
+    case Intrinsics::k ## Name:               \
+      return NeedsEnvironment;
+#include "intrinsics_list.h"
+INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
+#undef INTRINSICS_LIST
+#undef OPTIMIZING_INTRINSICS
+  }
+  return kNeedsEnvironment;
+}
 
 static Primitive::Type GetType(uint64_t data, bool is_op_size) {
   if (is_op_size) {
@@ -70,7 +84,10 @@
   }
 }
 
-static Intrinsics GetIntrinsic(InlineMethod method) {
+static Intrinsics GetIntrinsic(InlineMethod method, InstructionSet instruction_set) {
+  if (instruction_set == kMips || instruction_set == kMips64) {
+    return Intrinsics::kNone;
+  }
   switch (method.opcode) {
     // Floating-point conversions.
     case kIntrinsicDoubleCvt:
@@ -197,6 +214,8 @@
       return Intrinsics::kStringCharAt;
     case kIntrinsicCompareTo:
       return Intrinsics::kStringCompareTo;
+    case kIntrinsicEquals:
+      return Intrinsics::kStringEquals;
     case kIntrinsicGetCharsNoCheck:
       return Intrinsics::kStringGetCharsNoCheck;
     case kIntrinsicIsEmptyOrLength:
@@ -349,7 +368,7 @@
             driver_->GetMethodInlinerMap()->GetMethodInliner(&invoke->GetDexFile());
         DCHECK(inliner != nullptr);
         if (inliner->IsIntrinsic(invoke->GetDexMethodIndex(), &method)) {
-          Intrinsics intrinsic = GetIntrinsic(method);
+          Intrinsics intrinsic = GetIntrinsic(method, graph_->GetInstructionSet());
 
           if (intrinsic != Intrinsics::kNone) {
             if (!CheckInvokeType(intrinsic, invoke)) {
@@ -357,7 +376,7 @@
                            << intrinsic << " for "
                            << PrettyMethod(invoke->GetDexMethodIndex(), invoke->GetDexFile());
             } else {
-              invoke->SetIntrinsic(intrinsic);
+              invoke->SetIntrinsic(intrinsic, IntrinsicNeedsEnvironment(intrinsic));
             }
           }
         }
@@ -371,7 +390,7 @@
     case Intrinsics::kNone:
       os << "None";
       break;
-#define OPTIMIZING_INTRINSICS(Name, IsStatic) \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment) \
     case Intrinsics::k ## Name: \
       os << # Name; \
       break;
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 9044982..d1a17b6 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -54,7 +54,7 @@
     switch (invoke->GetIntrinsic()) {
       case Intrinsics::kNone:
         return;
-#define OPTIMIZING_INTRINSICS(Name, IsStatic) \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment) \
       case Intrinsics::k ## Name:             \
         Visit ## Name(invoke);                \
         return;
@@ -69,7 +69,7 @@
 
   // Define visitor methods.
 
-#define OPTIMIZING_INTRINSICS(Name, IsStatic)                    \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment)                    \
   virtual void Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
   }
 #include "intrinsics_list.h"
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index a797654..1527a6a 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -1110,6 +1110,7 @@
 UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
 UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
 UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
+UNIMPLEMENTED_INTRINSIC(StringEquals)
 
 #undef UNIMPLEMENTED_INTRINSIC
 
diff --git a/compiler/optimizing/intrinsics_arm.h b/compiler/optimizing/intrinsics_arm.h
index 38c2ef4..f013bd6 100644
--- a/compiler/optimizing/intrinsics_arm.h
+++ b/compiler/optimizing/intrinsics_arm.h
@@ -38,7 +38,7 @@
 
   // Define visitor methods.
 
-#define OPTIMIZING_INTRINSICS(Name, IsStatic)   \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment)   \
   void Visit ## Name(HInvoke* invoke) OVERRIDE;
 #include "intrinsics_list.h"
 INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
@@ -64,7 +64,7 @@
 
   // Define visitor methods.
 
-#define OPTIMIZING_INTRINSICS(Name, IsStatic)   \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment)   \
   void Visit ## Name(HInvoke* invoke) OVERRIDE;
 #include "intrinsics_list.h"
 INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 2c93fea..5631373 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1229,6 +1229,7 @@
 UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
 UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
 UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
+UNIMPLEMENTED_INTRINSIC(StringEquals)
 
 #undef UNIMPLEMENTED_INTRINSIC
 
diff --git a/compiler/optimizing/intrinsics_arm64.h b/compiler/optimizing/intrinsics_arm64.h
index ba21889..ebaf5e5 100644
--- a/compiler/optimizing/intrinsics_arm64.h
+++ b/compiler/optimizing/intrinsics_arm64.h
@@ -41,7 +41,7 @@
 
   // Define visitor methods.
 
-#define OPTIMIZING_INTRINSICS(Name, IsStatic)   \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment)   \
   void Visit ## Name(HInvoke* invoke) OVERRIDE;
 #include "intrinsics_list.h"
 INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
@@ -65,7 +65,7 @@
 
   // Define visitor methods.
 
-#define OPTIMIZING_INTRINSICS(Name, IsStatic)   \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment)   \
   void Visit ## Name(HInvoke* invoke) OVERRIDE;
 #include "intrinsics_list.h"
 INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
diff --git a/compiler/optimizing/intrinsics_list.h b/compiler/optimizing/intrinsics_list.h
index d28c5a3..15ee5d4 100644
--- a/compiler/optimizing/intrinsics_list.h
+++ b/compiler/optimizing/intrinsics_list.h
@@ -18,75 +18,77 @@
 #define ART_COMPILER_OPTIMIZING_INTRINSICS_LIST_H_
 
 // All intrinsics supported by the optimizing compiler. Format is name, then whether it is expected
-// to be a HInvokeStaticOrDirect node (compared to HInvokeVirtual).
+// to be a HInvokeStaticOrDirect node (compared to HInvokeVirtual), then whether it requires an
+// environment.
 
 #define INTRINSICS_LIST(V) \
-  V(DoubleDoubleToRawLongBits, kStatic) \
-  V(DoubleLongBitsToDouble, kStatic) \
-  V(FloatFloatToRawIntBits, kStatic) \
-  V(FloatIntBitsToFloat, kStatic) \
-  V(IntegerReverse, kStatic) \
-  V(IntegerReverseBytes, kStatic) \
-  V(IntegerNumberOfLeadingZeros, kStatic) \
-  V(LongReverse, kStatic) \
-  V(LongReverseBytes, kStatic) \
-  V(LongNumberOfLeadingZeros, kStatic) \
-  V(ShortReverseBytes, kStatic) \
-  V(MathAbsDouble, kStatic) \
-  V(MathAbsFloat, kStatic) \
-  V(MathAbsLong, kStatic) \
-  V(MathAbsInt, kStatic) \
-  V(MathMinDoubleDouble, kStatic) \
-  V(MathMinFloatFloat, kStatic) \
-  V(MathMinLongLong, kStatic) \
-  V(MathMinIntInt, kStatic) \
-  V(MathMaxDoubleDouble, kStatic) \
-  V(MathMaxFloatFloat, kStatic) \
-  V(MathMaxLongLong, kStatic) \
-  V(MathMaxIntInt, kStatic) \
-  V(MathSqrt, kStatic) \
-  V(MathCeil, kStatic) \
-  V(MathFloor, kStatic) \
-  V(MathRint, kStatic) \
-  V(MathRoundDouble, kStatic) \
-  V(MathRoundFloat, kStatic) \
-  V(SystemArrayCopyChar, kStatic) \
-  V(ThreadCurrentThread, kStatic) \
-  V(MemoryPeekByte, kStatic) \
-  V(MemoryPeekIntNative, kStatic) \
-  V(MemoryPeekLongNative, kStatic) \
-  V(MemoryPeekShortNative, kStatic) \
-  V(MemoryPokeByte, kStatic) \
-  V(MemoryPokeIntNative, kStatic) \
-  V(MemoryPokeLongNative, kStatic) \
-  V(MemoryPokeShortNative, kStatic) \
-  V(StringCharAt, kDirect) \
-  V(StringCompareTo, kDirect) \
-  V(StringGetCharsNoCheck, kDirect) \
-  V(StringIndexOf, kDirect) \
-  V(StringIndexOfAfter, kDirect) \
-  V(StringNewStringFromBytes, kStatic) \
-  V(StringNewStringFromChars, kStatic) \
-  V(StringNewStringFromString, kStatic) \
-  V(UnsafeCASInt, kDirect) \
-  V(UnsafeCASLong, kDirect) \
-  V(UnsafeCASObject, kDirect) \
-  V(UnsafeGet, kDirect) \
-  V(UnsafeGetVolatile, kDirect) \
-  V(UnsafeGetObject, kDirect) \
-  V(UnsafeGetObjectVolatile, kDirect) \
-  V(UnsafeGetLong, kDirect) \
-  V(UnsafeGetLongVolatile, kDirect) \
-  V(UnsafePut, kDirect) \
-  V(UnsafePutOrdered, kDirect) \
-  V(UnsafePutVolatile, kDirect) \
-  V(UnsafePutObject, kDirect) \
-  V(UnsafePutObjectOrdered, kDirect) \
-  V(UnsafePutObjectVolatile, kDirect) \
-  V(UnsafePutLong, kDirect) \
-  V(UnsafePutLongOrdered, kDirect) \
-  V(UnsafePutLongVolatile, kDirect) \
-  V(ReferenceGetReferent, kDirect)
+  V(DoubleDoubleToRawLongBits, kStatic, kNeedsEnvironment) \
+  V(DoubleLongBitsToDouble, kStatic, kNeedsEnvironment) \
+  V(FloatFloatToRawIntBits, kStatic, kNeedsEnvironment) \
+  V(FloatIntBitsToFloat, kStatic, kNeedsEnvironment) \
+  V(IntegerReverse, kStatic, kNeedsEnvironment) \
+  V(IntegerReverseBytes, kStatic, kNeedsEnvironment) \
+  V(IntegerNumberOfLeadingZeros, kStatic, kNeedsEnvironment) \
+  V(LongReverse, kStatic, kNeedsEnvironment) \
+  V(LongReverseBytes, kStatic, kNeedsEnvironment) \
+  V(LongNumberOfLeadingZeros, kStatic, kNeedsEnvironment) \
+  V(ShortReverseBytes, kStatic, kNeedsEnvironment) \
+  V(MathAbsDouble, kStatic, kNeedsEnvironment) \
+  V(MathAbsFloat, kStatic, kNeedsEnvironment) \
+  V(MathAbsLong, kStatic, kNeedsEnvironment) \
+  V(MathAbsInt, kStatic, kNeedsEnvironment) \
+  V(MathMinDoubleDouble, kStatic, kNeedsEnvironment) \
+  V(MathMinFloatFloat, kStatic, kNeedsEnvironment) \
+  V(MathMinLongLong, kStatic, kNeedsEnvironment) \
+  V(MathMinIntInt, kStatic, kNeedsEnvironment) \
+  V(MathMaxDoubleDouble, kStatic, kNeedsEnvironment) \
+  V(MathMaxFloatFloat, kStatic, kNeedsEnvironment) \
+  V(MathMaxLongLong, kStatic, kNeedsEnvironment) \
+  V(MathMaxIntInt, kStatic, kNeedsEnvironment) \
+  V(MathSqrt, kStatic, kNeedsEnvironment) \
+  V(MathCeil, kStatic, kNeedsEnvironment) \
+  V(MathFloor, kStatic, kNeedsEnvironment) \
+  V(MathRint, kStatic, kNeedsEnvironment) \
+  V(MathRoundDouble, kStatic, kNeedsEnvironment) \
+  V(MathRoundFloat, kStatic, kNeedsEnvironment) \
+  V(SystemArrayCopyChar, kStatic, kNeedsEnvironment) \
+  V(ThreadCurrentThread, kStatic, kNeedsEnvironment) \
+  V(MemoryPeekByte, kStatic, kNeedsEnvironment) \
+  V(MemoryPeekIntNative, kStatic, kNeedsEnvironment) \
+  V(MemoryPeekLongNative, kStatic, kNeedsEnvironment) \
+  V(MemoryPeekShortNative, kStatic, kNeedsEnvironment) \
+  V(MemoryPokeByte, kStatic, kNeedsEnvironment) \
+  V(MemoryPokeIntNative, kStatic, kNeedsEnvironment) \
+  V(MemoryPokeLongNative, kStatic, kNeedsEnvironment) \
+  V(MemoryPokeShortNative, kStatic, kNeedsEnvironment) \
+  V(StringCharAt, kDirect, kNeedsEnvironment) \
+  V(StringCompareTo, kDirect, kNeedsEnvironment) \
+  V(StringEquals, kDirect, kNeedsEnvironment) \
+  V(StringGetCharsNoCheck, kDirect, kNeedsEnvironment) \
+  V(StringIndexOf, kDirect, kNeedsEnvironment) \
+  V(StringIndexOfAfter, kDirect, kNeedsEnvironment) \
+  V(StringNewStringFromBytes, kStatic, kNeedsEnvironment) \
+  V(StringNewStringFromChars, kStatic, kNeedsEnvironment) \
+  V(StringNewStringFromString, kStatic, kNeedsEnvironment) \
+  V(UnsafeCASInt, kDirect, kNeedsEnvironment) \
+  V(UnsafeCASLong, kDirect, kNeedsEnvironment) \
+  V(UnsafeCASObject, kDirect, kNeedsEnvironment) \
+  V(UnsafeGet, kDirect, kNeedsEnvironment) \
+  V(UnsafeGetVolatile, kDirect, kNeedsEnvironment) \
+  V(UnsafeGetObject, kDirect, kNeedsEnvironment) \
+  V(UnsafeGetObjectVolatile, kDirect, kNeedsEnvironment) \
+  V(UnsafeGetLong, kDirect, kNeedsEnvironment) \
+  V(UnsafeGetLongVolatile, kDirect, kNeedsEnvironment) \
+  V(UnsafePut, kDirect, kNeedsEnvironment) \
+  V(UnsafePutOrdered, kDirect, kNeedsEnvironment) \
+  V(UnsafePutVolatile, kDirect, kNeedsEnvironment) \
+  V(UnsafePutObject, kDirect, kNeedsEnvironment) \
+  V(UnsafePutObjectOrdered, kDirect, kNeedsEnvironment) \
+  V(UnsafePutObjectVolatile, kDirect, kNeedsEnvironment) \
+  V(UnsafePutLong, kDirect, kNeedsEnvironment) \
+  V(UnsafePutLongOrdered, kDirect, kNeedsEnvironment) \
+  V(UnsafePutLongVolatile, kDirect, kNeedsEnvironment) \
+  V(ReferenceGetReferent, kDirect, kNeedsEnvironment)
 
 #endif  // ART_COMPILER_OPTIMIZING_INTRINSICS_LIST_H_
 #undef ART_COMPILER_OPTIMIZING_INTRINSICS_LIST_H_   // #define is only for lint.
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 993c005..3c8be27 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1758,6 +1758,7 @@
 UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
 UNIMPLEMENTED_INTRINSIC(IntegerNumberOfLeadingZeros)
 UNIMPLEMENTED_INTRINSIC(LongNumberOfLeadingZeros)
+UNIMPLEMENTED_INTRINSIC(StringEquals)
 
 #undef UNIMPLEMENTED_INTRINSIC
 
diff --git a/compiler/optimizing/intrinsics_x86.h b/compiler/optimizing/intrinsics_x86.h
index 4292ec7..ac68f39 100644
--- a/compiler/optimizing/intrinsics_x86.h
+++ b/compiler/optimizing/intrinsics_x86.h
@@ -36,7 +36,7 @@
 
   // Define visitor methods.
 
-#define OPTIMIZING_INTRINSICS(Name, IsStatic)   \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment)   \
   void Visit ## Name(HInvoke* invoke) OVERRIDE;
 #include "intrinsics_list.h"
 INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
@@ -61,7 +61,7 @@
 
   // Define visitor methods.
 
-#define OPTIMIZING_INTRINSICS(Name, IsStatic)   \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment)   \
   void Visit ## Name(HInvoke* invoke) OVERRIDE;
 #include "intrinsics_list.h"
 INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 85d40d7..b4926c2 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1607,6 +1607,7 @@
 UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
 UNIMPLEMENTED_INTRINSIC(IntegerNumberOfLeadingZeros)
 UNIMPLEMENTED_INTRINSIC(LongNumberOfLeadingZeros)
+UNIMPLEMENTED_INTRINSIC(StringEquals)
 
 #undef UNIMPLEMENTED_INTRINSIC
 
diff --git a/compiler/optimizing/intrinsics_x86_64.h b/compiler/optimizing/intrinsics_x86_64.h
index 0e0e72c..17293af 100644
--- a/compiler/optimizing/intrinsics_x86_64.h
+++ b/compiler/optimizing/intrinsics_x86_64.h
@@ -36,7 +36,7 @@
 
   // Define visitor methods.
 
-#define OPTIMIZING_INTRINSICS(Name, IsStatic)   \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment)   \
   void Visit ## Name(HInvoke* invoke) OVERRIDE;
 #include "intrinsics_list.h"
 INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
@@ -61,7 +61,7 @@
 
   // Define visitor methods.
 
-#define OPTIMIZING_INTRINSICS(Name, IsStatic)   \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment)   \
   void Visit ## Name(HInvoke* invoke) OVERRIDE;
 #include "intrinsics_list.h"
 INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 718469f..89709fa 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1215,23 +1215,33 @@
 };
 
 /**
- * Side-effects representation for write/read dependences on fields/arrays.
+ * Side-effects representation.
  *
- * The dependence analysis uses type disambiguation (e.g. a float field write
- * cannot modify the value of an integer field read) and the access type (e.g.
- * a reference array write cannot modify the value of a reference field read
- * [although it may modify the reference fetch prior to reading the field,
- * which is represented by its own write/read dependence]). The analysis
- * makes conservative points-to assumptions on reference types (e.g. two same
- * typed arrays are assumed to be the same, and any reference read depends
- * on any reference read without further regard of its type).
+ * For write/read dependences on fields/arrays, the dependence analysis uses
+ * type disambiguation (e.g. a float field write cannot modify the value of an
+ * integer field read) and the access type (e.g.  a reference array write cannot
+ * modify the value of a reference field read [although it may modify the
+ * reference fetch prior to reading the field, which is represented by its own
+ * write/read dependence]). The analysis makes conservative points-to
+ * assumptions on reference types (e.g. two same typed arrays are assumed to be
+ * the same, and any reference read depends on any reference read without
+ * further regard of its type).
  *
- * The internal representation uses the following 36-bit flags assignments:
+ * The internal representation uses 38-bit and is described in the table below.
+ * The first line indicates the side effect, and for field/array accesses the
+ * second line indicates the type of the access (in the order of the
+ * Primitive::Type enum).
+ * The two numbered lines below indicate the bit position in the bitfield (read
+ * vertically).
  *
- *   |ARRAY-R  |FIELD-R  |ARRAY-W  |FIELD-W  |
- *   +---------+---------+---------+---------+
- *   |543210987|654321098|765432109|876543210|
- *   |DFJISCBZL|DFJISCBZL|DFJISCBZL|DFJISCBZL|
+ *   |Depends on GC|ARRAY-R  |FIELD-R  |Can trigger GC|ARRAY-W  |FIELD-W  |
+ *   +-------------+---------+---------+--------------+---------+---------+
+ *   |             |DFJISCBZL|DFJISCBZL|              |DFJISCBZL|DFJISCBZL|
+ *   |      3      |333333322|222222221|       1      |111111110|000000000|
+ *   |      7      |654321098|765432109|       8      |765432109|876543210|
+ *
+ * Note that, to ease the implementation, 'changes' bits are least significant
+ * bits, while 'dependency' bits are most significant bits.
  */
 class SideEffects : public ValueObject {
  public:
@@ -1242,6 +1252,22 @@
   }
 
   static SideEffects All() {
+    return SideEffects(kAllChangeBits | kAllDependOnBits);
+  }
+
+  static SideEffects AllChanges() {
+    return SideEffects(kAllChangeBits);
+  }
+
+  static SideEffects AllDependencies() {
+    return SideEffects(kAllDependOnBits);
+  }
+
+  static SideEffects AllExceptGCDependency() {
+    return AllWritesAndReads().Union(SideEffects::CanTriggerGC());
+  }
+
+  static SideEffects AllWritesAndReads() {
     return SideEffects(kAllWrites | kAllReads);
   }
 
@@ -1255,7 +1281,7 @@
 
   static SideEffects FieldWriteOfType(Primitive::Type type, bool is_volatile) {
     return is_volatile
-        ? All()
+        ? AllWritesAndReads()
         : SideEffects(TypeFlagWithAlias(type, kFieldWriteOffset));
   }
 
@@ -1265,7 +1291,7 @@
 
   static SideEffects FieldReadOfType(Primitive::Type type, bool is_volatile) {
     return is_volatile
-        ? All()
+        ? AllWritesAndReads()
         : SideEffects(TypeFlagWithAlias(type, kFieldReadOffset));
   }
 
@@ -1273,11 +1299,40 @@
     return SideEffects(TypeFlagWithAlias(type, kArrayReadOffset));
   }
 
+  static SideEffects CanTriggerGC() {
+    return SideEffects(1ULL << kCanTriggerGCBit);
+  }
+
+  static SideEffects DependsOnGC() {
+    return SideEffects(1ULL << kDependsOnGCBit);
+  }
+
   // Combines the side-effects of this and the other.
   SideEffects Union(SideEffects other) const {
     return SideEffects(flags_ | other.flags_);
   }
 
+  SideEffects Exclusion(SideEffects other) const {
+    return SideEffects(flags_ & ~other.flags_);
+  }
+
+  bool Includes(SideEffects other) const {
+    return (other.flags_ & flags_) == other.flags_;
+  }
+
+  bool HasSideEffects() const {
+    return (flags_ & kAllChangeBits);
+  }
+
+  bool HasDependencies() const {
+    return (flags_ & kAllDependOnBits);
+  }
+
+  // Returns true if there are no side effects or dependencies.
+  bool DoesNothing() const {
+    return flags_ == 0;
+  }
+
   // Returns true if something is written.
   bool DoesAnyWrite() const {
     return (flags_ & kAllWrites);
@@ -1288,47 +1343,81 @@
     return (flags_ & kAllReads);
   }
 
-  // Returns true if nothing is written or read.
-  bool DoesNothing() const {
-    return flags_ == 0;
-  }
-
   // Returns true if potentially everything is written and read
   // (every type and every kind of access).
+  bool DoesAllReadWrite() const {
+    return (flags_ & (kAllWrites | kAllReads)) == (kAllWrites | kAllReads);
+  }
+
   bool DoesAll() const {
-    return flags_ == (kAllWrites | kAllReads);
+    return flags_ == (kAllChangeBits | kAllDependOnBits);
   }
 
   // Returns true if this may read something written by other.
   bool MayDependOn(SideEffects other) const {
-    const uint64_t reads = (flags_ & kAllReads) >> kFieldReadOffset;
-    return (other.flags_ & reads);
+    const uint64_t depends_on_flags = (flags_ & kAllDependOnBits) >> kChangeBits;
+    return (other.flags_ & depends_on_flags);
   }
 
   // Returns string representation of flags (for debugging only).
-  // Format: |DFJISCBZL|DFJISCBZL|DFJISCBZL|DFJISCBZL|
+  // Format: |x|DFJISCBZL|DFJISCBZL|y|DFJISCBZL|DFJISCBZL|
   std::string ToString() const {
-    static const char *kDebug = "LZBCSIJFD";
     std::string flags = "|";
-    for (int s = 35; s >= 0; s--) {
-      const int t = s % kBits;
-      if ((flags_ >> s) & 1)
-        flags += kDebug[t];
-      if (t == 0)
+    for (int s = kLastBit; s >= 0; s--) {
+      bool current_bit_is_set = ((flags_ >> s) & 1) != 0;
+      if ((s == kDependsOnGCBit) || (s == kCanTriggerGCBit)) {
+        // This is a bit for the GC side effect.
+        if (current_bit_is_set) {
+          flags += "GC";
+        }
         flags += "|";
+      } else {
+        // This is a bit for the array/field analysis.
+        // The underscore character stands for the 'can trigger GC' bit.
+        static const char *kDebug = "LZBCSIJFDLZBCSIJFD_LZBCSIJFDLZBCSIJFD";
+        if (current_bit_is_set) {
+          flags += kDebug[s];
+        }
+        if ((s == kFieldWriteOffset) || (s == kArrayWriteOffset) ||
+            (s == kFieldReadOffset) || (s == kArrayReadOffset)) {
+          flags += "|";
+        }
+      }
     }
     return flags;
   }
 
- private:
-  static constexpr int kBits = 9;
-  static constexpr int kFieldWriteOffset = 0 * kBits;
-  static constexpr int kArrayWriteOffset = 1 * kBits;
-  static constexpr int kFieldReadOffset  = 2 * kBits;
-  static constexpr int kArrayReadOffset  = 3 * kBits;
+  bool Equals(const SideEffects& other) const { return flags_ == other.flags_; }
 
-  static constexpr uint64_t kAllWrites = 0x0003ffff;
-  static constexpr uint64_t kAllReads  = kAllWrites << kFieldReadOffset;
+ private:
+  static constexpr int kFieldArrayAnalysisBits = 9;
+
+  static constexpr int kFieldWriteOffset = 0;
+  static constexpr int kArrayWriteOffset = kFieldWriteOffset + kFieldArrayAnalysisBits;
+  static constexpr int kLastBitForWrites = kArrayWriteOffset + kFieldArrayAnalysisBits - 1;
+  static constexpr int kCanTriggerGCBit = kLastBitForWrites + 1;
+
+  static constexpr int kChangeBits = kCanTriggerGCBit + 1;
+
+  static constexpr int kFieldReadOffset = kCanTriggerGCBit + 1;
+  static constexpr int kArrayReadOffset = kFieldReadOffset + kFieldArrayAnalysisBits;
+  static constexpr int kLastBitForReads = kArrayReadOffset + kFieldArrayAnalysisBits - 1;
+  static constexpr int kDependsOnGCBit = kLastBitForReads + 1;
+
+  static constexpr int kLastBit = kDependsOnGCBit;
+  static constexpr int kDependOnBits = kLastBit + 1 - kChangeBits;
+
+  // Aliases.
+
+  static_assert(kChangeBits == kDependOnBits,
+                "the 'change' bits should match the 'depend on' bits.");
+
+  static constexpr uint64_t kAllChangeBits = ((1ULL << kChangeBits) - 1);
+  static constexpr uint64_t kAllDependOnBits = ((1ULL << kDependOnBits) - 1) << kChangeBits;
+  static constexpr uint64_t kAllWrites =
+      ((1ULL << (kLastBitForWrites + 1 - kFieldWriteOffset)) - 1) << kFieldWriteOffset;
+  static constexpr uint64_t kAllReads =
+      ((1ULL << (kLastBitForReads + 1 - kFieldReadOffset)) - 1) << kFieldReadOffset;
 
   // Work around the fact that HIR aliases I/F and J/D.
   // TODO: remove this interceptor once HIR types are clean
@@ -1610,6 +1699,7 @@
   virtual bool IsControlFlow() const { return false; }
   virtual bool CanThrow() const { return false; }
 
+  bool HasSideEffects() const { return side_effects_.HasSideEffects(); }
   bool DoesAnyWrite() const { return side_effects_.DoesAnyWrite(); }
 
   // Does not apply for all instructions, but having this at top level greatly
@@ -2302,7 +2392,9 @@
  public:
   HBinaryOperation(Primitive::Type result_type,
                    HInstruction* left,
-                   HInstruction* right) : HExpression(result_type, SideEffects::None()) {
+                   HInstruction* right,
+                   SideEffects side_effects = SideEffects::None())
+      : HExpression(result_type, side_effects) {
     SetRawInputAt(0, left);
     SetRawInputAt(1, right);
   }
@@ -2626,7 +2718,9 @@
            HInstruction* second,
            ComparisonBias bias,
            uint32_t dex_pc)
-      : HBinaryOperation(Primitive::kPrimInt, first, second), bias_(bias), dex_pc_(dex_pc) {
+      : HBinaryOperation(Primitive::kPrimInt, first, second, SideEffectsForArchRuntimeCalls(type)),
+        bias_(bias),
+        dex_pc_(dex_pc) {
     DCHECK_EQ(type, first->GetType());
     DCHECK_EQ(type, second->GetType());
   }
@@ -2649,7 +2743,12 @@
 
   bool IsGtBias() { return bias_ == ComparisonBias::kGtBias; }
 
-  uint32_t GetDexPc() const { return dex_pc_; }
+  uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
+
+  static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type type) {
+    // MIPS64 uses a runtime call for FP comparisons.
+    return Primitive::IsFloatingPointType(type) ? SideEffects::CanTriggerGC() : SideEffects::None();
+  }
 
   DECLARE_INSTRUCTION(Compare);
 
@@ -2791,7 +2890,7 @@
 };
 
 enum class Intrinsics {
-#define OPTIMIZING_INTRINSICS(Name, IsStatic) k ## Name,
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment) k ## Name,
 #include "intrinsics_list.h"
   kNone,
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
@@ -2800,13 +2899,18 @@
 };
 std::ostream& operator<<(std::ostream& os, const Intrinsics& intrinsic);
 
+enum IntrinsicNeedsEnvironment {
+  kNoEnvironment,        // Intrinsic does not require an environment.
+  kNeedsEnvironment      // Intrinsic requires an environment.
+};
+
 class HInvoke : public HInstruction {
  public:
   size_t InputCount() const OVERRIDE { return inputs_.Size(); }
 
   // Runtime needs to walk the stack, so Dex -> Dex calls need to
   // know their environment.
-  bool NeedsEnvironment() const OVERRIDE { return true; }
+  bool NeedsEnvironment() const OVERRIDE { return needs_environment_ == kNeedsEnvironment; }
 
   void SetArgumentAt(size_t index, HInstruction* argument) {
     SetRawInputAt(index, argument);
@@ -2831,8 +2935,9 @@
     return intrinsic_;
   }
 
-  void SetIntrinsic(Intrinsics intrinsic) {
+  void SetIntrinsic(Intrinsics intrinsic, IntrinsicNeedsEnvironment needs_environment) {
     intrinsic_ = intrinsic;
+    needs_environment_ = needs_environment;
   }
 
   bool IsFromInlinedInvoke() const {
@@ -2851,14 +2956,16 @@
           uint32_t dex_pc,
           uint32_t dex_method_index,
           InvokeType original_invoke_type)
-    : HInstruction(SideEffects::All()),  // assume write/read on all fields/arrays
+    : HInstruction(
+          SideEffects::AllExceptGCDependency()),  // Assume write/read on all fields/arrays.
       number_of_arguments_(number_of_arguments),
       inputs_(arena, number_of_arguments),
       return_type_(return_type),
       dex_pc_(dex_pc),
       dex_method_index_(dex_method_index),
       original_invoke_type_(original_invoke_type),
-      intrinsic_(Intrinsics::kNone) {
+      intrinsic_(Intrinsics::kNone),
+      needs_environment_(kNeedsEnvironment) {
     uint32_t number_of_inputs = number_of_arguments + number_of_other_inputs;
     inputs_.SetSize(number_of_inputs);
   }
@@ -2875,6 +2982,7 @@
   const uint32_t dex_method_index_;
   const InvokeType original_invoke_type_;
   Intrinsics intrinsic_;
+  IntrinsicNeedsEnvironment needs_environment_;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HInvoke);
@@ -3068,7 +3176,7 @@
                uint16_t type_index,
                const DexFile& dex_file,
                QuickEntrypointEnum entrypoint)
-      : HExpression(Primitive::kPrimNot, SideEffects::None()),
+      : HExpression(Primitive::kPrimNot, SideEffects::CanTriggerGC()),
         dex_pc_(dex_pc),
         type_index_(type_index),
         dex_file_(dex_file),
@@ -3131,7 +3239,7 @@
             uint16_t type_index,
             const DexFile& dex_file,
             QuickEntrypointEnum entrypoint)
-      : HExpression(Primitive::kPrimNot, SideEffects::None()),
+      : HExpression(Primitive::kPrimNot, SideEffects::CanTriggerGC()),
         dex_pc_(dex_pc),
         type_index_(type_index),
         dex_file_(dex_file),
@@ -3232,7 +3340,8 @@
 class HDiv : public HBinaryOperation {
  public:
   HDiv(Primitive::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc)
-      : HBinaryOperation(result_type, left, right), dex_pc_(dex_pc) {}
+      : HBinaryOperation(result_type, left, right, SideEffectsForArchRuntimeCalls()),
+        dex_pc_(dex_pc) {}
 
   template <typename T>
   T Compute(T x, T y) const {
@@ -3252,6 +3361,11 @@
 
   uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
 
+  static SideEffects SideEffectsForArchRuntimeCalls() {
+    // The generated code can use a runtime call.
+    return SideEffects::CanTriggerGC();
+  }
+
   DECLARE_INSTRUCTION(Div);
 
  private:
@@ -3263,7 +3377,8 @@
 class HRem : public HBinaryOperation {
  public:
   HRem(Primitive::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc)
-      : HBinaryOperation(result_type, left, right), dex_pc_(dex_pc) {}
+      : HBinaryOperation(result_type, left, right, SideEffectsForArchRuntimeCalls()),
+        dex_pc_(dex_pc) {}
 
   template <typename T>
   T Compute(T x, T y) const {
@@ -3283,6 +3398,10 @@
 
   uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
 
+  static SideEffects SideEffectsForArchRuntimeCalls() {
+    return SideEffects::CanTriggerGC();
+  }
+
   DECLARE_INSTRUCTION(Rem);
 
  private:
@@ -3593,7 +3712,8 @@
  public:
   // Instantiate a type conversion of `input` to `result_type`.
   HTypeConversion(Primitive::Type result_type, HInstruction* input, uint32_t dex_pc)
-      : HExpression(result_type, SideEffects::None()), dex_pc_(dex_pc) {
+      : HExpression(result_type, SideEffectsForArchRuntimeCalls(input->GetType(), result_type)),
+        dex_pc_(dex_pc) {
     SetRawInputAt(0, input);
     DCHECK_NE(input->GetType(), result_type);
   }
@@ -3613,6 +3733,19 @@
   // containing the result.  If the input cannot be converted, return nullptr.
   HConstant* TryStaticEvaluation() const;
 
+  static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type input_type,
+                                                    Primitive::Type result_type) {
+    // Some architectures may not require the 'GC' side effects, but at this point
+    // in the compilation process we do not know what architecture we will
+    // generate code for, so we must be conservative.
+    if (((input_type == Primitive::kPrimFloat || input_type == Primitive::kPrimDouble)
+         && result_type == Primitive::kPrimLong)
+        || (input_type == Primitive::kPrimLong && result_type == Primitive::kPrimFloat)) {
+      return SideEffects::CanTriggerGC();
+    }
+    return SideEffects::None();
+  }
+
   DECLARE_INSTRUCTION(TypeConversion);
 
  private:
@@ -3879,7 +4012,9 @@
             HInstruction* value,
             Primitive::Type expected_component_type,
             uint32_t dex_pc)
-      : HTemplateInstruction(SideEffects::ArrayWriteOfType(expected_component_type)),
+      : HTemplateInstruction(
+            SideEffects::ArrayWriteOfType(expected_component_type).Union(
+                SideEffectsForArchRuntimeCalls(value->GetType()))),
         dex_pc_(dex_pc),
         expected_component_type_(expected_component_type),
         needs_type_check_(value->GetType() == Primitive::kPrimNot),
@@ -3932,6 +4067,10 @@
         : expected_component_type_;
   }
 
+  static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type value_type) {
+    return (value_type == Primitive::kPrimNot) ? SideEffects::CanTriggerGC() : SideEffects::None();
+  }
+
   DECLARE_INSTRUCTION(ArraySet);
 
  private:
@@ -4026,7 +4165,7 @@
 class HSuspendCheck : public HTemplateInstruction<0> {
  public:
   explicit HSuspendCheck(uint32_t dex_pc)
-      : HTemplateInstruction(SideEffects::None()), dex_pc_(dex_pc), slow_path_(nullptr) {}
+      : HTemplateInstruction(SideEffects::CanTriggerGC()), dex_pc_(dex_pc), slow_path_(nullptr) {}
 
   bool NeedsEnvironment() const OVERRIDE {
     return true;
@@ -4058,7 +4197,7 @@
              const DexFile& dex_file,
              bool is_referrers_class,
              uint32_t dex_pc)
-      : HExpression(Primitive::kPrimNot, SideEffects::None()),
+      : HExpression(Primitive::kPrimNot, SideEffectsForArchRuntimeCalls()),
         type_index_(type_index),
         dex_file_(dex_file),
         is_referrers_class_(is_referrers_class),
@@ -4119,6 +4258,10 @@
 
   bool NeedsDexCache() const OVERRIDE { return !is_referrers_class_; }
 
+  static SideEffects SideEffectsForArchRuntimeCalls() {
+    return SideEffects::CanTriggerGC();
+  }
+
   DECLARE_INSTRUCTION(LoadClass);
 
  private:
@@ -4138,7 +4281,7 @@
 class HLoadString : public HExpression<1> {
  public:
   HLoadString(HCurrentMethod* current_method, uint32_t string_index, uint32_t dex_pc)
-      : HExpression(Primitive::kPrimNot, SideEffects::None()),
+      : HExpression(Primitive::kPrimNot, SideEffectsForArchRuntimeCalls()),
         string_index_(string_index),
         dex_pc_(dex_pc) {
     SetRawInputAt(0, current_method);
@@ -4159,6 +4302,10 @@
   bool NeedsEnvironment() const OVERRIDE { return false; }
   bool NeedsDexCache() const OVERRIDE { return true; }
 
+  static SideEffects SideEffectsForArchRuntimeCalls() {
+    return SideEffects::CanTriggerGC();
+  }
+
   DECLARE_INSTRUCTION(LoadString);
 
  private:
@@ -4175,8 +4322,8 @@
  public:
   HClinitCheck(HLoadClass* constant, uint32_t dex_pc)
       : HExpression(
-          Primitive::kPrimNot,
-          SideEffects::AllWrites()),  // assume write on all fields/arrays
+            Primitive::kPrimNot,
+            SideEffects::AllChanges()),  // Assume write/read on all fields/arrays.
         dex_pc_(dex_pc) {
     SetRawInputAt(0, constant);
   }
@@ -4305,7 +4452,7 @@
 class HThrow : public HTemplateInstruction<1> {
  public:
   HThrow(HInstruction* exception, uint32_t dex_pc)
-      : HTemplateInstruction(SideEffects::None()), dex_pc_(dex_pc) {
+      : HTemplateInstruction(SideEffects::CanTriggerGC()), dex_pc_(dex_pc) {
     SetRawInputAt(0, exception);
   }
 
@@ -4331,7 +4478,7 @@
               HLoadClass* constant,
               bool class_is_final,
               uint32_t dex_pc)
-      : HExpression(Primitive::kPrimBoolean, SideEffects::None()),
+      : HExpression(Primitive::kPrimBoolean, SideEffectsForArchRuntimeCalls(class_is_final)),
         class_is_final_(class_is_final),
         must_do_null_check_(true),
         dex_pc_(dex_pc) {
@@ -4357,6 +4504,10 @@
   bool MustDoNullCheck() const { return must_do_null_check_; }
   void ClearMustDoNullCheck() { must_do_null_check_ = false; }
 
+  static SideEffects SideEffectsForArchRuntimeCalls(bool class_is_final) {
+    return class_is_final ? SideEffects::None() : SideEffects::CanTriggerGC();
+  }
+
   DECLARE_INSTRUCTION(InstanceOf);
 
  private:
@@ -4416,7 +4567,7 @@
              HLoadClass* constant,
              bool class_is_final,
              uint32_t dex_pc)
-      : HTemplateInstruction(SideEffects::None()),
+      : HTemplateInstruction(SideEffects::CanTriggerGC()),
         class_is_final_(class_is_final),
         must_do_null_check_(true),
         dex_pc_(dex_pc) {
@@ -4458,7 +4609,7 @@
  public:
   explicit HMemoryBarrier(MemBarrierKind barrier_kind)
       : HTemplateInstruction(
-          SideEffects::All()),  // assume write/read on all fields/arrays
+            SideEffects::AllWritesAndReads()),  // Assume write/read on all fields/arrays.
         barrier_kind_(barrier_kind) {}
 
   MemBarrierKind GetBarrierKind() { return barrier_kind_; }
@@ -4479,7 +4630,8 @@
   };
 
   HMonitorOperation(HInstruction* object, OperationKind kind, uint32_t dex_pc)
-    : HTemplateInstruction(SideEffects::All()),  // assume write/read on all fields/arrays
+    : HTemplateInstruction(
+          SideEffects::AllExceptGCDependency()),  // Assume write/read on all fields/arrays.
       kind_(kind), dex_pc_(dex_pc) {
     SetRawInputAt(0, object);
   }
diff --git a/compiler/optimizing/side_effects_analysis.cc b/compiler/optimizing/side_effects_analysis.cc
index 9dbf638..1c3e255 100644
--- a/compiler/optimizing/side_effects_analysis.cc
+++ b/compiler/optimizing/side_effects_analysis.cc
@@ -47,8 +47,8 @@
          inst_it.Advance()) {
       HInstruction* instruction = inst_it.Current();
       effects = effects.Union(instruction->GetSideEffects());
-      // If every possible write/read is represented, scanning further
-      // will not add any more information to side-effects of this block.
+      // If all side effects are represented, scanning further will not add any
+      // more information to side-effects of this block.
       if (effects.DoesAll()) {
         break;
       }
diff --git a/compiler/optimizing/side_effects_test.cc b/compiler/optimizing/side_effects_test.cc
index 8db5a8a..ec45d6b 100644
--- a/compiler/optimizing/side_effects_test.cc
+++ b/compiler/optimizing/side_effects_test.cc
@@ -77,7 +77,7 @@
   EXPECT_TRUE(all.DoesAnyWrite());
   EXPECT_TRUE(all.DoesAnyRead());
   EXPECT_FALSE(all.DoesNothing());
-  EXPECT_TRUE(all.DoesAll());
+  EXPECT_TRUE(all.DoesAllReadWrite());
 }
 
 TEST(SideEffectsTest, None) {
@@ -85,7 +85,7 @@
   EXPECT_FALSE(none.DoesAnyWrite());
   EXPECT_FALSE(none.DoesAnyRead());
   EXPECT_TRUE(none.DoesNothing());
-  EXPECT_FALSE(none.DoesAll());
+  EXPECT_FALSE(none.DoesAllReadWrite());
 }
 
 TEST(SideEffectsTest, DependencesAndNoDependences) {
@@ -176,33 +176,53 @@
     s = s.Union(SideEffects::FieldReadOfType(type, false));
     s = s.Union(SideEffects::ArrayReadOfType(type));
   }
-  EXPECT_TRUE(s.DoesAll());
+  EXPECT_TRUE(s.DoesAllReadWrite());
+}
+
+TEST(SideEffectsTest, GC) {
+  SideEffects can_trigger_gc = SideEffects::CanTriggerGC();
+  SideEffects depends_on_gc = SideEffects::DependsOnGC();
+  SideEffects all_changes = SideEffects::AllChanges();
+  SideEffects all_dependencies = SideEffects::AllDependencies();
+
+  EXPECT_TRUE(depends_on_gc.MayDependOn(can_trigger_gc));
+  EXPECT_TRUE(depends_on_gc.Union(can_trigger_gc).MayDependOn(can_trigger_gc));
+  EXPECT_FALSE(can_trigger_gc.MayDependOn(depends_on_gc));
+
+  EXPECT_TRUE(depends_on_gc.MayDependOn(all_changes));
+  EXPECT_TRUE(depends_on_gc.Union(can_trigger_gc).MayDependOn(all_changes));
+  EXPECT_FALSE(can_trigger_gc.MayDependOn(all_changes));
+
+  EXPECT_TRUE(all_changes.Includes(can_trigger_gc));
+  EXPECT_FALSE(all_changes.Includes(depends_on_gc));
+  EXPECT_TRUE(all_dependencies.Includes(depends_on_gc));
+  EXPECT_FALSE(all_dependencies.Includes(can_trigger_gc));
 }
 
 TEST(SideEffectsTest, BitStrings) {
   EXPECT_STREQ(
-      "|||||",
+      "|||||||",
       SideEffects::None().ToString().c_str());
   EXPECT_STREQ(
-      "|DFJISCBZL|DFJISCBZL|DFJISCBZL|DFJISCBZL|",
+      "|GC|DFJISCBZL|DFJISCBZL|GC|DFJISCBZL|DFJISCBZL|",
       SideEffects::All().ToString().c_str());
   EXPECT_STREQ(
-      "|||DFJISCBZL|DFJISCBZL|",
+      "|||||DFJISCBZL|DFJISCBZL|",
       SideEffects::AllWrites().ToString().c_str());
   EXPECT_STREQ(
-      "|DFJISCBZL|DFJISCBZL|||",
+      "||DFJISCBZL|DFJISCBZL||||",
       SideEffects::AllReads().ToString().c_str());
   EXPECT_STREQ(
-      "||||L|",
+      "||||||L|",
       SideEffects::FieldWriteOfType(Primitive::kPrimNot, false).ToString().c_str());
   EXPECT_STREQ(
-      "|||Z||",
+      "|||||Z||",
       SideEffects::ArrayWriteOfType(Primitive::kPrimBoolean).ToString().c_str());
   EXPECT_STREQ(
-      "||B|||",
+      "|||B||||",
       SideEffects::FieldReadOfType(Primitive::kPrimByte, false).ToString().c_str());
   EXPECT_STREQ(
-      "|DJ||||",  // note: DJ alias
+      "||DJ|||||",  // note: DJ alias
       SideEffects::ArrayReadOfType(Primitive::kPrimDouble).ToString().c_str());
   SideEffects s = SideEffects::None();
   s = s.Union(SideEffects::FieldWriteOfType(Primitive::kPrimChar, false));
@@ -212,7 +232,7 @@
   s = s.Union(SideEffects::ArrayReadOfType(Primitive::kPrimFloat));
   s = s.Union(SideEffects::ArrayReadOfType(Primitive::kPrimDouble));
   EXPECT_STREQ(
-      "|DFJI|FI|S|DJC|",   // note: DJ/FI alias.
+      "||DFJI|FI||S|DJC|",   // note: DJ/FI alias.
       s.ToString().c_str());
 }
 
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 0886e32..f19263d 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -37,6 +37,7 @@
 #include "base/unix_file/fd_file.h"
 #include "base/value_object.h"
 #include "class_linker-inl.h"
+#include "class_table-inl.h"
 #include "compiler_callbacks.h"
 #include "debugger.h"
 #include "dex_file-inl.h"
@@ -582,6 +583,7 @@
 
   // Setup the ClassLoader, verifying the object_size_.
   class_root = FindSystemClass(self, "Ljava/lang/ClassLoader;");
+  class_root->SetClassLoaderClass();
   CHECK_EQ(class_root->GetObjectSize(), mirror::ClassLoader::InstanceSize());
   SetClassRoot(kJavaLangClassLoader, class_root);
 
@@ -1273,15 +1275,10 @@
     // Moving concurrent:
     // Need to make sure to not copy ArtMethods without doing read barriers since the roots are
     // marked concurrently and we don't hold the classlinker_classes_lock_ when we do the copy.
-    boot_class_table_.VisitRoots(visitor, flags);
+    boot_class_table_.VisitRoots(buffered_visitor);
     for (GcRoot<mirror::ClassLoader>& root : class_loaders_) {
       // May be null for boot ClassLoader.
       root.VisitRoot(visitor, RootInfo(kRootVMInternal));
-      ClassTable* const class_table = root.Read()->GetClassTable();
-      if (class_table != nullptr) {
-        // May be null if we have no classes.
-        class_table->VisitRoots(visitor, flags);
-      }
     }
   } else if ((flags & kVisitRootFlagNewRoots) != 0) {
     for (auto& root : new_class_roots_) {
@@ -2810,6 +2807,10 @@
   }
   VerifyObject(klass);
   class_table->InsertWithHash(klass, hash);
+  if (class_loader != nullptr) {
+    // This is necessary because we need to have the card dirtied for remembered sets.
+    Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
+  }
   if (log_new_class_table_roots_) {
     new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
   }
@@ -4375,6 +4376,11 @@
     klass->SetFinalizable();
   }
 
+  // Inherit class loader flag form super class.
+  if (super->IsClassLoaderClass()) {
+    klass->SetClassLoaderClass();
+  }
+
   // Inherit reference flags (if any) from the superclass.
   int reference_flags = (super->GetAccessFlags() & kAccReferenceFlagsMask);
   if (reference_flags != 0) {
diff --git a/runtime/class_table-inl.h b/runtime/class_table-inl.h
new file mode 100644
index 0000000..dc60a2c
--- /dev/null
+++ b/runtime/class_table-inl.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_CLASS_TABLE_INL_H_
+#define ART_RUNTIME_CLASS_TABLE_INL_H_
+
+#include "class_table.h"
+
+namespace art {
+
+template<class Visitor>
+void ClassTable::VisitRoots(Visitor& visitor) {
+  for (ClassSet& class_set : classes_) {
+    for (GcRoot<mirror::Class>& root : class_set) {
+      visitor.VisitRoot(root.AddressWithoutBarrier());
+    }
+  }
+}
+
+template<class Visitor>
+void ClassTable::VisitRoots(const Visitor& visitor) {
+  for (ClassSet& class_set : classes_) {
+    for (GcRoot<mirror::Class>& root : class_set) {
+      visitor.VisitRoot(root.AddressWithoutBarrier());
+    }
+  }
+}
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_CLASS_TABLE_INL_H_
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index c245d4e..fc8e6c4 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -61,16 +61,6 @@
   return existing;
 }
 
-void ClassTable::VisitRoots(RootVisitor* visitor, VisitRootFlags flags ATTRIBUTE_UNUSED) {
-  BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
-      visitor, RootInfo(kRootStickyClass));
-  for (ClassSet& class_set : classes_) {
-    for (GcRoot<mirror::Class>& root : class_set) {
-      buffered_visitor.VisitRoot(root);
-    }
-  }
-}
-
 bool ClassTable::Visit(ClassVisitor* visitor) {
   for (ClassSet& class_set : classes_) {
     for (GcRoot<mirror::Class>& root : class_set) {
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 4182954..6b18d90 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -67,8 +67,15 @@
   mirror::Class* UpdateClass(const char* descriptor, mirror::Class* new_klass, size_t hash)
       REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
-      REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+  // NO_THREAD_SAFETY_ANALYSIS for object marking requiring heap bitmap lock.
+  template<class Visitor>
+  void VisitRoots(Visitor& visitor)
+      SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_)
+      NO_THREAD_SAFETY_ANALYSIS;
+  template<class Visitor>
+  void VisitRoots(const Visitor& visitor)
+      SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_)
+      NO_THREAD_SAFETY_ANALYSIS;
 
   // Return false if the callback told us to exit.
   bool Visit(ClassVisitor* visitor)
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 3b6d2aa..b0a8a5b 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -1276,7 +1276,7 @@
   explicit MarkVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {
   }
 
-  void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
+  void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
       ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
       REQUIRES(Locks::heap_bitmap_lock_) {
     if (kCheckLocks) {
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index b814432..efa065b 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -67,7 +67,6 @@
     // non moving space). This can happen if there is significant virtual address space
     // fragmentation.
   }
-  AllocationTimer alloc_timer(this, &obj);
   // bytes allocated for the (individual) object.
   size_t bytes_allocated;
   size_t usable_size;
@@ -386,22 +385,6 @@
   return ret;
 }
 
-inline Heap::AllocationTimer::AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr)
-    : heap_(heap), allocated_obj_ptr_(allocated_obj_ptr),
-      allocation_start_time_(kMeasureAllocationTime ? NanoTime() / kTimeAdjust : 0u) { }
-
-inline Heap::AllocationTimer::~AllocationTimer() {
-  if (kMeasureAllocationTime) {
-    mirror::Object* allocated_obj = *allocated_obj_ptr_;
-    // Only if the allocation succeeded, record the time.
-    if (allocated_obj != nullptr) {
-      uint64_t allocation_end_time = NanoTime() / kTimeAdjust;
-      heap_->total_allocation_time_.FetchAndAddSequentiallyConsistent(
-          allocation_end_time - allocation_start_time_);
-    }
-  }
-}
-
 inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const {
   // We need to have a zygote space or else our newly allocated large object can end up in the
   // Zygote resulting in it being prematurely freed.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 89773ce..e56351f 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -205,7 +205,6 @@
       target_utilization_(target_utilization),
       foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
       total_wait_time_(0),
-      total_allocation_time_(0),
       verify_object_mode_(kVerifyObjectModeDisabled),
       disable_moving_gc_count_(0),
       is_running_on_memory_tool_(Runtime::Current()->IsRunningOnMemoryTool()),
@@ -230,7 +229,8 @@
       alloc_tracking_enabled_(false),
       backtrace_lock_(nullptr),
       seen_backtrace_count_(0u),
-      unique_backtrace_count_(0u) {
+      unique_backtrace_count_(0u),
+      gc_disabled_for_shutdown_(false) {
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     LOG(INFO) << "Heap() entering";
   }
@@ -981,8 +981,6 @@
     total_paused_time += collector->GetTotalPausedTimeNs();
     collector->DumpPerformanceInfo(os);
   }
-  uint64_t allocation_time =
-      static_cast<uint64_t>(total_allocation_time_.LoadRelaxed()) * kTimeAdjust;
   if (total_duration != 0) {
     const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
     os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
@@ -1000,11 +998,6 @@
   os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
   os << "Total memory " << PrettySize(GetTotalMemory()) << "\n";
   os << "Max memory " << PrettySize(GetMaxMemory()) << "\n";
-  if (kMeasureAllocationTime) {
-    os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n";
-    os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated)
-       << "\n";
-  }
   if (HasZygoteSpace()) {
     os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
   }
@@ -1037,7 +1030,6 @@
   for (auto& collector : garbage_collectors_) {
     collector->ResetMeasurements();
   }
-  total_allocation_time_.StoreRelaxed(0);
   total_bytes_freed_ever_ = 0;
   total_objects_freed_ever_ = 0;
   total_wait_time_ = 0;
@@ -2443,6 +2435,9 @@
       LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
       return collector::kGcTypeNone;
     }
+    if (gc_disabled_for_shutdown_) {
+      return collector::kGcTypeNone;
+    }
     collector_type_running_ = collector_type_;
   }
   if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
@@ -3856,5 +3851,12 @@
   }
 }
 
+void Heap::DisableGCForShutdown() {
+  Thread* const self = Thread::Current();
+  CHECK(Runtime::Current()->IsShuttingDown(self));
+  MutexLock mu(self, *gc_complete_lock_);
+  gc_disabled_for_shutdown_ = true;
+}
+
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 055095d..d94f109 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -126,7 +126,6 @@
 class Heap {
  public:
   // If true, measure the total allocation time.
-  static constexpr bool kMeasureAllocationTime = false;
   static constexpr size_t kDefaultStartingSize = kPageSize;
   static constexpr size_t kDefaultInitialSize = 2 * MB;
   static constexpr size_t kDefaultMaximumSize = 256 * MB;
@@ -746,6 +745,8 @@
       SHARED_REQUIRES(Locks::mutator_lock_)
       REQUIRES(!Locks::alloc_tracker_lock_);
 
+  void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_);
+
  private:
   class ConcurrentGCTask;
   class CollectorTransitionTask;
@@ -1213,9 +1214,6 @@
   // Total time which mutators are paused or waiting for GC to complete.
   uint64_t total_wait_time_;
 
-  // Total number of objects allocated in microseconds.
-  AtomicInteger total_allocation_time_;
-
   // The current state of heap verification, may be enabled or disabled.
   VerifyObjectMode verify_object_mode_;
 
@@ -1297,6 +1295,10 @@
   // Stack trace hashes that we already saw,
   std::unordered_set<uint64_t> seen_backtraces_ GUARDED_BY(backtrace_lock_);
 
+  // We disable GC when we are shutting down the runtime in case there are daemon threads still
+  // allocating.
+  bool gc_disabled_for_shutdown_ GUARDED_BY(gc_complete_lock_);
+
   friend class CollectorTransitionTask;
   friend class collector::GarbageCollector;
   friend class collector::MarkCompact;
@@ -1309,18 +1311,6 @@
   friend class VerifyObjectVisitor;
   friend class space::SpaceTest;
 
-  class AllocationTimer {
-   public:
-    ALWAYS_INLINE AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr);
-    ALWAYS_INLINE ~AllocationTimer();
-   private:
-    Heap* const heap_;
-    mirror::Object** allocated_obj_ptr_;
-    const uint64_t allocation_start_time_;
-
-    DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationTimer);
-  };
-
   DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
 };
 
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index e67ea3f..713797f 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -883,6 +883,7 @@
                      gc::EqAllocRecordTypesPtr<gc::AllocRecordStackTraceElement>> frames_;
   std::unordered_map<const mirror::Object*, const gc::AllocRecordStackTrace*> allocation_records_;
 
+  friend class GcRootVisitor;
   DISALLOW_COPY_AND_ASSIGN(Hprof);
 };
 
@@ -1023,12 +1024,47 @@
   ++objects_in_segment_;
 }
 
+// Use for visiting the GcRoots held live by ArtFields, ArtMethods, and ClassLoaders.
+class GcRootVisitor {
+ public:
+  explicit GcRootVisitor(Hprof* hprof) : hprof_(hprof) {}
+
+  void operator()(mirror::Object* obj ATTRIBUTE_UNUSED,
+                  MemberOffset offset ATTRIBUTE_UNUSED,
+                  bool is_static ATTRIBUTE_UNUSED) const {}
+
+  // Note that these don't have read barriers. Its OK however since the GC is guaranteed to not be
+  // running during the hprof dumping process.
+  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+      SHARED_REQUIRES(Locks::mutator_lock_) {
+    if (!root->IsNull()) {
+      VisitRoot(root);
+    }
+  }
+
+  void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+      SHARED_REQUIRES(Locks::mutator_lock_) {
+    mirror::Object* obj = root->AsMirrorPtr();
+    // The two cases are either classes or dex cache arrays. If it is a dex cache array, then use
+    // VM internal. Otherwise the object is a declaring class of an ArtField or ArtMethod or a
+    // class from a ClassLoader.
+    hprof_->VisitRoot(obj, RootInfo(obj->IsClass() ? kRootStickyClass : kRootVMInternal));
+  }
+
+
+ private:
+  Hprof* const hprof_;
+};
+
 void Hprof::DumpHeapObject(mirror::Object* obj) {
   // Ignore classes that are retired.
   if (obj->IsClass() && obj->AsClass()->IsRetired()) {
     return;
   }
 
+  GcRootVisitor visitor(this);
+  obj->VisitReferences<true>(visitor, VoidFunctor());
+
   gc::Heap* const heap = Runtime::Current()->GetHeap();
   const gc::space::ContinuousSpace* const space = heap->FindContinuousSpaceFromObject(obj, true);
   HprofHeapId heap_type = HPROF_HEAP_APP;
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 513ab37..dc60a38 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -236,6 +236,15 @@
     SetAccessFlags(flags | kAccClassIsStringClass);
   }
 
+  ALWAYS_INLINE bool IsClassLoaderClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+    return (GetField32(AccessFlagsOffset()) & kAccClassIsClassLoaderClass) != 0;
+  }
+
+  ALWAYS_INLINE void SetClassLoaderClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+    uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
+    SetAccessFlags(flags | kAccClassIsClassLoaderClass);
+  }
+
   // Returns true if the class is abstract.
   ALWAYS_INLINE bool IsAbstract() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccAbstract) != 0;
diff --git a/runtime/mirror/class_loader-inl.h b/runtime/mirror/class_loader-inl.h
new file mode 100644
index 0000000..35f3664
--- /dev/null
+++ b/runtime/mirror/class_loader-inl.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_CLASS_LOADER_INL_H_
+#define ART_RUNTIME_MIRROR_CLASS_LOADER_INL_H_
+
+#include "class_loader.h"
+
+#include "base/mutex-inl.h"
+#include "class_table-inl.h"
+
+namespace art {
+namespace mirror {
+
+template <const bool kVisitClass, VerifyObjectFlags kVerifyFlags, typename Visitor>
+inline void ClassLoader::VisitReferences(mirror::Class* klass, const Visitor& visitor) {
+  // Visit instance fields first.
+  VisitInstanceFieldsReferences<kVisitClass>(klass, visitor);
+  // Visit classes loaded after.
+  ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+  ClassTable* const class_table = GetClassTable();
+  if (class_table != nullptr) {
+    class_table->VisitRoots(visitor);
+  }
+}
+
+}  // namespace mirror
+}  // namespace art
+
+#endif  // ART_RUNTIME_MIRROR_CLASS_LOADER_INL_H_
diff --git a/runtime/mirror/class_loader.h b/runtime/mirror/class_loader.h
index 940aaa6..21c652a 100644
--- a/runtime/mirror/class_loader.h
+++ b/runtime/mirror/class_loader.h
@@ -26,6 +26,8 @@
 
 namespace mirror {
 
+class Class;
+
 // C++ mirror of java.lang.ClassLoader
 class MANAGED ClassLoader : public Object {
  public:
@@ -44,6 +46,12 @@
     SetField64<false>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, class_table_),
                       reinterpret_cast<uint64_t>(class_table));
   }
+  // Visit instance fields of the class loader as well as its associated classes.
+  // Null class loader is handled by ClassLinker::VisitClassRoots.
+  template <const bool kVisitClass, VerifyObjectFlags kVerifyFlags, typename Visitor>
+  void VisitReferences(mirror::Class* klass, const Visitor& visitor)
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::classlinker_classes_lock_);
 
  private:
   // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index c5610b5..7b1660b 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -25,6 +25,7 @@
 #include "array-inl.h"
 #include "class.h"
 #include "class_linker.h"
+#include "class_loader-inl.h"
 #include "lock_word-inl.h"
 #include "monitor.h"
 #include "object_array-inl.h"
@@ -997,6 +998,18 @@
   klass->VisitFieldsReferences<kVisitClass, true>(0, visitor);
 }
 
+
+template<VerifyObjectFlags kVerifyFlags>
+inline bool Object::IsClassLoader() {
+  return GetClass<kVerifyFlags>()->IsClassLoaderClass();
+}
+
+template<VerifyObjectFlags kVerifyFlags>
+inline mirror::ClassLoader* Object::AsClassLoader() {
+  DCHECK(IsClassLoader<kVerifyFlags>());
+  return down_cast<mirror::ClassLoader*>(this);
+}
+
 template <const bool kVisitClass, VerifyObjectFlags kVerifyFlags, typename Visitor,
     typename JavaLangRefVisitor>
 inline void Object::VisitReferences(const Visitor& visitor,
@@ -1010,6 +1023,9 @@
     } else if (kVisitClass) {
       visitor(this, ClassOffset(), false);
     }
+  } else if (klass->IsClassLoaderClass()) {
+    mirror::ClassLoader* class_loader = AsClassLoader<kVerifyFlags>();
+    class_loader->VisitReferences<kVisitClass, kVerifyFlags>(klass, visitor);
   } else {
     DCHECK(!klass->IsVariableSize());
     VisitInstanceFieldsReferences<kVisitClass>(klass, visitor);
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index eea9f37..4967a14 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -37,6 +37,7 @@
 
 class Array;
 class Class;
+class ClassLoader;
 class FinalizerReference;
 template<class T> class ObjectArray;
 template<class T> class PrimitiveArray;
@@ -156,6 +157,11 @@
   template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ObjectArray<T>* AsObjectArray() SHARED_REQUIRES(Locks::mutator_lock_);
 
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  bool IsClassLoader() SHARED_REQUIRES(Locks::mutator_lock_);
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  ClassLoader* AsClassLoader() SHARED_REQUIRES(Locks::mutator_lock_);
+
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   bool IsArrayInstance() SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/modifiers.h b/runtime/modifiers.h
index 8586dd1..8b363a6 100644
--- a/runtime/modifiers.h
+++ b/runtime/modifiers.h
@@ -55,6 +55,9 @@
 // Special runtime-only flags.
 // Note: if only kAccClassIsReference is set, we have a soft reference.
 
+// class is ClassLoader or one of its subclasses
+static constexpr uint32_t kAccClassIsClassLoaderClass   = 0x10000000;
+
 // class/ancestor overrides finalize()
 static constexpr uint32_t kAccClassIsFinalizable        = 0x80000000;
 // class is a soft/weak/phantom ref
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index 75ff27f..dd3703c 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -57,6 +57,7 @@
   kIntrinsicReferenceGetReferent,
   kIntrinsicCharAt,
   kIntrinsicCompareTo,
+  kIntrinsicEquals,
   kIntrinsicGetCharsNoCheck,
   kIntrinsicIsEmptyOrLength,
   kIntrinsicIndexOf,
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 62d1e84..d449f42 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -78,6 +78,13 @@
     Runtime::Current()->DetachCurrentThread();
   }
   WaitForOtherNonDaemonThreadsToExit();
+  // Disable GC and wait for GC to complete in case there are still daemon threads doing
+  // allocations.
+  gc::Heap* const heap = Runtime::Current()->GetHeap();
+  heap->DisableGCForShutdown();
+  // In case a GC is in progress, wait for it to finish.
+  heap->WaitForGcToComplete(gc::kGcCauseBackground, Thread::Current());
+
   // TODO: there's an unaddressed race here where a thread may attach during shutdown, see
   //       Thread::Init.
   SuspendAllDaemonThreads();
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 0181e5b..1661534 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -107,7 +107,7 @@
 }
 
 static void SafelyMarkAllRegistersAsConflicts(MethodVerifier* verifier, RegisterLine* reg_line) {
-  if (verifier->IsConstructor()) {
+  if (verifier->IsInstanceConstructor()) {
     // Before we mark all regs as conflicts, check that we don't have an uninitialized this.
     reg_line->CheckConstructorReturn(verifier);
   }
@@ -1373,9 +1373,15 @@
     // argument as uninitialized. This restricts field access until the superclass constructor is
     // called.
     const RegType& declaring_class = GetDeclaringClass();
-    if (IsConstructor() && !declaring_class.IsJavaLangObject()) {
-      reg_line->SetRegisterType(this, arg_start + cur_arg,
-                                reg_types_.UninitializedThisArgument(declaring_class));
+    if (IsConstructor()) {
+      if (declaring_class.IsJavaLangObject()) {
+        // "this" is implicitly initialized.
+        reg_line->SetThisInitialized();
+        reg_line->SetRegisterType(this, arg_start + cur_arg, declaring_class);
+      } else {
+        reg_line->SetRegisterType(this, arg_start + cur_arg,
+                                  reg_types_.UninitializedThisArgument(declaring_class));
+      }
     } else {
       reg_line->SetRegisterType(this, arg_start + cur_arg, declaring_class);
     }
@@ -1698,16 +1704,6 @@
   std::unique_ptr<RegisterLine> branch_line;
   std::unique_ptr<RegisterLine> fallthrough_line;
 
-  /*
-   * If we are in a constructor, and we currently have an UninitializedThis type
-   * in a register somewhere, we need to make sure it isn't overwritten.
-   */
-  bool track_uninitialized_this = false;
-  size_t uninitialized_this_loc = 0;
-  if (IsConstructor()) {
-    track_uninitialized_this = work_line_->GetUninitializedThisLoc(this, &uninitialized_this_loc);
-  }
-
   switch (inst->Opcode()) {
     case Instruction::NOP:
       /*
@@ -1785,14 +1781,14 @@
       break;
     }
     case Instruction::RETURN_VOID:
-      if (!IsConstructor() || work_line_->CheckConstructorReturn(this)) {
+      if (!IsInstanceConstructor() || work_line_->CheckConstructorReturn(this)) {
         if (!GetMethodReturnType().IsConflict()) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-void not expected";
         }
       }
       break;
     case Instruction::RETURN:
-      if (!IsConstructor() || work_line_->CheckConstructorReturn(this)) {
+      if (!IsInstanceConstructor() || work_line_->CheckConstructorReturn(this)) {
         /* check the method signature */
         const RegType& return_type = GetMethodReturnType();
         if (!return_type.IsCategory1Types()) {
@@ -1817,7 +1813,7 @@
       }
       break;
     case Instruction::RETURN_WIDE:
-      if (!IsConstructor() || work_line_->CheckConstructorReturn(this)) {
+      if (!IsInstanceConstructor() || work_line_->CheckConstructorReturn(this)) {
         /* check the method signature */
         const RegType& return_type = GetMethodReturnType();
         if (!return_type.IsCategory2Types()) {
@@ -1833,7 +1829,7 @@
       }
       break;
     case Instruction::RETURN_OBJECT:
-      if (!IsConstructor() || work_line_->CheckConstructorReturn(this)) {
+      if (!IsInstanceConstructor() || work_line_->CheckConstructorReturn(this)) {
         const RegType& return_type = GetMethodReturnType();
         if (!return_type.IsReferenceTypes()) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-object not expected";
@@ -3003,20 +2999,6 @@
      */
   }  // end - switch (dec_insn.opcode)
 
-  /*
-   * If we are in a constructor, and we had an UninitializedThis type
-   * in a register somewhere, we need to make sure it wasn't overwritten.
-   */
-  if (track_uninitialized_this) {
-    bool was_invoke_direct = (inst->Opcode() == Instruction::INVOKE_DIRECT ||
-                              inst->Opcode() == Instruction::INVOKE_DIRECT_RANGE);
-    if (work_line_->WasUninitializedThisOverwritten(this, uninitialized_this_loc,
-                                                    was_invoke_direct)) {
-      Fail(VERIFY_ERROR_BAD_CLASS_HARD)
-          << "Constructor failed to initialize this object";
-    }
-  }
-
   if (have_pending_hard_failure_) {
     if (Runtime::Current()->IsAotCompiler()) {
       /* When AOT compiling, check that the last failure is a hard failure */
@@ -4378,6 +4360,10 @@
       const Instruction* ret_inst = Instruction::At(code_item_->insns_ + next_insn);
       Instruction::Code opcode = ret_inst->Opcode();
       if (opcode == Instruction::RETURN_VOID || opcode == Instruction::RETURN_VOID_NO_BARRIER) {
+        // Explicitly copy the this-initialized flag from the merge-line, as we didn't copy its
+        // state. Must be done before SafelyMarkAllRegistersAsConflicts as that will do the
+        // super-constructor-call checking.
+        target_line->CopyThisInitialized(*merge_line);
         SafelyMarkAllRegistersAsConflicts(this, target_line);
       } else {
         target_line->CopyFromLine(merge_line);
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 3b59bba..21f8543 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -283,6 +283,10 @@
     return (method_access_flags_ & kAccStatic) != 0;
   }
 
+  bool IsInstanceConstructor() const {
+    return IsConstructor() && !IsStatic();
+  }
+
   SafeMap<uint32_t, std::set<uint32_t>>& GetStringInitPcRegMap() {
     return string_init_pc_reg_map_;
   }
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 2838681..f286a45 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -18,66 +18,30 @@
 
 #include "base/stringprintf.h"
 #include "dex_instruction-inl.h"
-#include "method_verifier.h"
+#include "method_verifier-inl.h"
 #include "register_line-inl.h"
 #include "reg_type-inl.h"
 
 namespace art {
 namespace verifier {
 
-bool RegisterLine::WasUninitializedThisOverwritten(MethodVerifier* verifier,
-                                                   size_t this_loc,
-                                                   bool was_invoke_direct) const {
-  DCHECK(verifier->IsConstructor());
-
-  // Is the UnintializedThis type still there?
-  if (GetRegisterType(verifier, this_loc).IsUninitializedThisReference() ||
-      GetRegisterType(verifier, this_loc).IsUnresolvedAndUninitializedThisReference()) {
-    return false;
-  }
-
-  // If there is an initialized reference here now, did we just perform an invoke-direct? Note that
-  // this is the correct approach for dex bytecode: results of invoke-direct are stored in the
-  // result register. Overwriting "this_loc" can only be done by a constructor call.
-  if (GetRegisterType(verifier, this_loc).IsReferenceTypes() && was_invoke_direct) {
-    return false;
-    // Otherwise we could have just copied a different initialized reference to this location.
-  }
-
-  // The UnintializedThis in the register is gone, so check to see if it's somewhere else now.
-  for (size_t i = 0; i < num_regs_; i++) {
-    if (GetRegisterType(verifier, i).IsUninitializedThisReference() ||
-        GetRegisterType(verifier, i).IsUnresolvedAndUninitializedThisReference()) {
-      // We found it somewhere else...
-      return false;
-    }
-  }
-
-  // The UninitializedThis is gone from the original register, and now we can't find it.
-  return true;
-}
-
-bool RegisterLine::GetUninitializedThisLoc(MethodVerifier* verifier, size_t* vreg) const {
-  for (size_t i = 0; i < num_regs_; i++) {
-    if (GetRegisterType(verifier, i).IsUninitializedThisReference() ||
-        GetRegisterType(verifier, i).IsUnresolvedAndUninitializedThisReference()) {
-      *vreg = i;
-      return true;
-    }
-  }
-  return false;
-}
-
 bool RegisterLine::CheckConstructorReturn(MethodVerifier* verifier) const {
-  for (size_t i = 0; i < num_regs_; i++) {
-    if (GetRegisterType(verifier, i).IsUninitializedThisReference() ||
-        GetRegisterType(verifier, i).IsUnresolvedAndUninitializedThisReference()) {
-      verifier->Fail(VERIFY_ERROR_BAD_CLASS_SOFT)
-          << "Constructor returning without calling superclass constructor";
-      return false;
+  if (kIsDebugBuild && this_initialized_) {
+    // Ensure that there is no UninitializedThisReference type anymore if this_initialized_ is true.
+    for (size_t i = 0; i < num_regs_; i++) {
+      const RegType& type = GetRegisterType(verifier, i);
+      CHECK(!type.IsUninitializedThisReference() &&
+            !type.IsUnresolvedAndUninitializedThisReference())
+          << i << ": " << type.IsUninitializedThisReference() << " in "
+          << PrettyMethod(verifier->GetMethodReference().dex_method_index,
+                          *verifier->GetMethodReference().dex_file);
     }
   }
-  return true;
+  if (!this_initialized_) {
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+        << "Constructor returning without calling superclass constructor";
+  }
+  return this_initialized_;
 }
 
 const RegType& RegisterLine::GetInvocationThis(MethodVerifier* verifier, const Instruction* inst,
@@ -148,6 +112,11 @@
       }
     }
   }
+  // Is this initializing "this"?
+  if (uninit_type.IsUninitializedThisReference() ||
+      uninit_type.IsUnresolvedAndUninitializedThisReference()) {
+    this_initialized_ = true;
+  }
   DCHECK_GT(changed, 0u);
 }
 
@@ -432,6 +401,11 @@
       }
     }
   }
+  // Check whether "this" was initialized in both paths.
+  if (this_initialized_ && !incoming_line->this_initialized_) {
+    this_initialized_ = false;
+    changed = true;
+  }
   return changed;
 }
 
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 4fb3a2c..f61e51f 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -114,6 +114,7 @@
     memcpy(&line_, &src->line_, num_regs_ * sizeof(uint16_t));
     monitors_ = src->monitors_;
     reg_to_lock_depths_ = src->reg_to_lock_depths_;
+    this_initialized_ = src->this_initialized_;
   }
 
   std::string Dump(MethodVerifier* verifier) const SHARED_REQUIRES(Locks::mutator_lock_);
@@ -149,6 +150,14 @@
   void MarkAllRegistersAsConflictsExcept(MethodVerifier* verifier, uint32_t vsrc);
   void MarkAllRegistersAsConflictsExceptWide(MethodVerifier* verifier, uint32_t vsrc);
 
+  void SetThisInitialized() {
+    this_initialized_ = true;
+  }
+
+  void CopyThisInitialized(const RegisterLine& src) {
+    this_initialized_ = src.this_initialized_;
+  }
+
   /*
    * Check constraints on constructor return. Specifically, make sure that the "this" argument got
    * initialized.
@@ -158,18 +167,6 @@
    */
   bool CheckConstructorReturn(MethodVerifier* verifier) const;
 
-  /*
-   * Check if an UninitializedThis at the specified location has been overwritten before
-   * being correctly initialized.
-   */
-  bool WasUninitializedThisOverwritten(MethodVerifier* verifier, size_t this_loc,
-                                       bool was_invoke_direct) const;
-
-  /*
-   * Get the first location of an UninitializedThis type, or return kInvalidVreg if there are none.
-   */
-  bool GetUninitializedThisLoc(MethodVerifier* verifier, size_t* vreg) const;
-
   // Compare two register lines. Returns 0 if they match.
   // Using this for a sort is unwise, since the value can change based on machine endianness.
   int CompareLine(const RegisterLine* line2) const {
@@ -354,7 +351,7 @@
   }
 
   RegisterLine(size_t num_regs, MethodVerifier* verifier)
-      : num_regs_(num_regs) {
+      : num_regs_(num_regs), this_initialized_(false) {
     memset(&line_, 0, num_regs_ * sizeof(uint16_t));
     SetResultTypeToUnknown(verifier);
   }
@@ -372,6 +369,9 @@
   // monitor-enter on v5 and then on v6, we expect the monitor-exit to be on v6 then on v5.
   AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier> reg_to_lock_depths_;
 
+  // Whether "this" initialization (a constructor supercall) has happened.
+  bool this_initialized_;
+
   // An array of RegType Ids associated with each dex register.
   uint16_t line_[0];
 
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index 884f280..e2101a6 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -38,4 +38,5 @@
 b/22411633 (5)
 b/22777307
 b/22881413
+b/20843113
 Done!
diff --git a/test/800-smali/smali/b_20843113.smali b/test/800-smali/smali/b_20843113.smali
new file mode 100644
index 0000000..ab3dc41
--- /dev/null
+++ b/test/800-smali/smali/b_20843113.smali
@@ -0,0 +1,34 @@
+.class public LB20843113;
+.super Ljava/lang/Object;
+
+
+.method public constructor <init>(I)V
+.registers 2
+
+:Label1
+       # An instruction that may throw, so as to pass UninitializedThis to the handler
+       div-int v1, v1, v1
+
+       # Call the super-constructor
+       invoke-direct {v0}, Ljava/lang/Object;-><init>()V
+
+       # Return normally.
+       return-void
+
+:Label2
+
+
+:Handler
+       move-exception v0                    # Overwrite the (last) "this" register. This should be
+                                            # allowed as we will terminate abnormally below.
+
+       throw v0                             # Terminate abnormally
+
+.catchall {:Label1 .. :Label2} :Handler
+.end method
+
+# Just a dummy.
+.method public static run()V
+.registers 1
+       return-void
+.end method
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index e1ac749..3c88040 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -122,6 +122,7 @@
         testCases.add(new TestCase("b/22777307", "B22777307", "run", null, new InstantiationError(),
                 null));
         testCases.add(new TestCase("b/22881413", "B22881413", "run", null, null, null));
+        testCases.add(new TestCase("b/20843113", "B20843113", "run", null, null, null));
     }
 
     public void runTests() {