Merge "Compile time performance improvements focusing on interpret-only."
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 01c8f80..9f0a696 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -38,7 +38,7 @@
 #include "verifier/dex_gc_map.h"
 #include "verifier/method_verifier.h"
 #include "verifier/method_verifier-inl.h"
-#include "verifier/register_line.h"
+#include "verifier/reg_type-inl.h"
 #include "verifier/register_line-inl.h"
 
 namespace art {
@@ -127,7 +127,7 @@
         dex_gc_map_.push_back((i >> 8) & 0xFF);
       }
       verifier::RegisterLine* line = method_verifier->GetRegLine(i);
-      line->WriteReferenceBitMap(dex_gc_map_, ref_bitmap_bytes);
+      line->WriteReferenceBitMap(method_verifier, &dex_gc_map_, ref_bitmap_bytes);
     }
   }
   DCHECK_EQ(dex_gc_map_.size(), table_size);
@@ -151,7 +151,7 @@
       map_index++;
       verifier::RegisterLine* line = method_verifier->GetRegLine(i);
       for (size_t j = 0; j < code_item->registers_size_; j++) {
-        if (line->GetRegisterType(j).IsNonZeroReferenceTypes()) {
+        if (line->GetRegisterType(method_verifier, j).IsNonZeroReferenceTypes()) {
           DCHECK_LT(j / 8, map.RegWidth());
           DCHECK_EQ((reg_bitmap[j / 8] >> (j % 8)) & 1, 1);
         } else if ((j / 8) < map.RegWidth()) {
@@ -178,7 +178,7 @@
       local_gc_points++;
       max_insn = i;
       verifier::RegisterLine* line = method_verifier->GetRegLine(i);
-      max_ref_reg = line->GetMaxNonZeroReferenceReg(max_ref_reg);
+      max_ref_reg = line->GetMaxNonZeroReferenceReg(method_verifier, max_ref_reg);
     }
   }
   *gc_points = local_gc_points;
@@ -217,7 +217,8 @@
     bool is_range = (inst->Opcode() ==  Instruction::INVOKE_VIRTUAL_RANGE) ||
         (inst->Opcode() ==  Instruction::INVOKE_INTERFACE_RANGE);
     const verifier::RegType&
-        reg_type(line->GetRegisterType(is_range ? inst->VRegC_3rc() : inst->VRegC_35c()));
+        reg_type(line->GetRegisterType(method_verifier,
+                                       is_range ? inst->VRegC_3rc() : inst->VRegC_35c()));
 
     if (!reg_type.HasClass()) {
       // We will compute devirtualization information only when we know the Class of the reg type.
@@ -284,17 +285,20 @@
       const verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
       bool is_safe_cast = false;
       if (code == Instruction::CHECK_CAST) {
-        const verifier::RegType& reg_type(line->GetRegisterType(inst->VRegA_21c()));
+        const verifier::RegType& reg_type(line->GetRegisterType(method_verifier,
+                                                                inst->VRegA_21c()));
         const verifier::RegType& cast_type =
             method_verifier->ResolveCheckedClass(inst->VRegB_21c());
         is_safe_cast = cast_type.IsStrictlyAssignableFrom(reg_type);
       } else {
-        const verifier::RegType& array_type(line->GetRegisterType(inst->VRegB_23x()));
+        const verifier::RegType& array_type(line->GetRegisterType(method_verifier,
+                                                                  inst->VRegB_23x()));
         // We only know its safe to assign to an array if the array type is precise. For example,
         // an Object[] can have any type of object stored in it, but it may also be assigned a
         // String[] in which case the stores need to be of Strings.
         if (array_type.IsPreciseReference()) {
-          const verifier::RegType& value_type(line->GetRegisterType(inst->VRegA_23x()));
+          const verifier::RegType& value_type(line->GetRegisterType(method_verifier,
+                                                                    inst->VRegA_23x()));
           const verifier::RegType& component_type = method_verifier->GetRegTypeCache()
               ->GetComponentType(array_type, method_verifier->GetClassLoader());
           is_safe_cast = component_type.IsStrictlyAssignableFrom(value_type);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index db6a01e..bbd1939 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1725,15 +1725,15 @@
      */
     Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
     std::string error_msg;
-    if (verifier::MethodVerifier::VerifyClass(&dex_file, dex_cache, class_loader, &class_def, true,
-                                              &error_msg) ==
+    if (verifier::MethodVerifier::VerifyClass(soa.Self(), &dex_file, dex_cache, class_loader,
+                                              &class_def, true, &error_msg) ==
                                                   verifier::MethodVerifier::kHardFailure) {
       LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor)
                  << " because: " << error_msg;
     }
   } else if (!SkipClass(jclass_loader, dex_file, klass.Get())) {
     CHECK(klass->IsResolved()) << PrettyClass(klass.Get());
-    class_linker->VerifyClass(klass);
+    class_linker->VerifyClass(soa.Self(), klass);
 
     if (klass->IsErroneous()) {
       // ClassLinker::VerifyClass throws, which isn't useful in the compiler.
@@ -1778,7 +1778,7 @@
     if (klass->IsVerified()) {
       // Attempt to initialize the class but bail if we either need to initialize the super-class
       // or static fields.
-      manager->GetClassLinker()->EnsureInitialized(klass, false, false);
+      manager->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, false);
       if (!klass->IsInitialized()) {
         // We don't want non-trivial class initialization occurring on multiple threads due to
         // deadlock problems. For example, a parent class is initialized (holding its lock) that
@@ -1792,7 +1792,7 @@
         ObjectLock<mirror::Class> lock(soa.Self(), h_klass);
         // Attempt to initialize allowing initialization of parent classes but still not static
         // fields.
-        manager->GetClassLinker()->EnsureInitialized(klass, false, true);
+        manager->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, true);
         if (!klass->IsInitialized()) {
           // We need to initialize static fields, we only do this for image classes that aren't
           // marked with the $NoPreloadHolder (which implies this should not be initialized early).
@@ -1811,7 +1811,8 @@
             // Run the class initializer in transaction mode.
             runtime->EnterTransactionMode(&transaction);
             const mirror::Class::Status old_status = klass->GetStatus();
-            bool success = manager->GetClassLinker()->EnsureInitialized(klass, true, true);
+            bool success = manager->GetClassLinker()->EnsureInitialized(soa.Self(), klass, true,
+                                                                        true);
             // TODO we detach transaction from runtime to indicate we quit the transactional
             // mode which prevents the GC from visiting objects modified during the transaction.
             // Ensure GC is not run so don't access freed objects when aborting transaction.
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 4cdf618..6ca0bcd 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -656,7 +656,8 @@
         StackHandleScope<1> hs(soa.Self());
         Handle<mirror::DexCache> dex_cache(
             hs.NewHandle(runtime->GetClassLinker()->FindDexCache(dex_file)));
-        verifier::MethodVerifier verifier(&dex_file, dex_cache, NullHandle<mirror::ClassLoader>(),
+        verifier::MethodVerifier verifier(soa.Self(), &dex_file, dex_cache,
+                                          NullHandle<mirror::ClassLoader>(),
                                           &class_def, code_item, dex_method_idx,
                                           NullHandle<mirror::ArtMethod>(), method_access_flags,
                                           true, true, true);
@@ -966,9 +967,10 @@
       StackHandleScope<1> hs(soa.Self());
       Handle<mirror::DexCache> dex_cache(
           hs.NewHandle(Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file)));
-      verifier::MethodVerifier::VerifyMethodAndDump(os, dex_method_idx, dex_file, dex_cache,
-                                                    NullHandle<mirror::ClassLoader>(), &class_def,
-                                                    code_item, NullHandle<mirror::ArtMethod>(),
+      verifier::MethodVerifier::VerifyMethodAndDump(soa.Self(), os, dex_method_idx, dex_file,
+                                                    dex_cache, NullHandle<mirror::ClassLoader>(),
+                                                    &class_def, code_item,
+                                                    NullHandle<mirror::ArtMethod>(),
                                                     method_access_flags);
     }
   }
diff --git a/runtime/barrier.cc b/runtime/barrier.cc
index 5f43bec..b8edad3 100644
--- a/runtime/barrier.cc
+++ b/runtime/barrier.cc
@@ -57,17 +57,19 @@
   }
 }
 
-void Barrier::Increment(Thread* self, int delta, uint32_t timeout_ms) {
+bool Barrier::Increment(Thread* self, int delta, uint32_t timeout_ms) {
   MutexLock mu(self, lock_);
   SetCountLocked(self, count_ + delta);
+  bool timed_out = false;
   if (count_ != 0) {
-    condition_.TimedWait(self, timeout_ms, 0);
+    timed_out = condition_.TimedWait(self, timeout_ms, 0);
   }
+  return timed_out;
 }
 
 void Barrier::SetCountLocked(Thread* self, int count) {
   count_ = count;
-  if (count_ == 0) {
+  if (count == 0) {
     condition_.Broadcast(self);
   }
 }
diff --git a/runtime/barrier.h b/runtime/barrier.h
index a433cac..167e1d6 100644
--- a/runtime/barrier.h
+++ b/runtime/barrier.h
@@ -38,10 +38,11 @@
   void Init(Thread* self, int count);
 
   // Increment the count by delta, wait on condition if count is non zero.
-  void Increment(Thread* self, int delta);
+  void Increment(Thread* self, int delta) LOCKS_EXCLUDED(lock_);
 
-  // Increment the count by delta, wait on condition if count is non zero, with a timeout
-  void Increment(Thread* self, int delta, uint32_t timeout_ms) LOCKS_EXCLUDED(lock_);
+  // Increment the count by delta, wait on condition if count is non zero, with a timeout. Returns
+  // true if time out occurred.
+  bool Increment(Thread* self, int delta, uint32_t timeout_ms) LOCKS_EXCLUDED(lock_);
 
  private:
   void SetCountLocked(Thread* self, int count) EXCLUSIVE_LOCKS_REQUIRED(lock_);
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index f01ea0c..52a3dea 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -773,8 +773,9 @@
   guard_.recursion_count_ = old_recursion_count;
 }
 
-void ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
+bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
   DCHECK(self == NULL || self == Thread::Current());
+  bool timed_out = false;
   guard_.AssertExclusiveHeld(self);
   guard_.CheckSafeToWait(self);
   unsigned int old_recursion_count = guard_.recursion_count_;
@@ -790,6 +791,7 @@
   if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, NULL, 0) != 0) {
     if (errno == ETIMEDOUT) {
       // Timed out we're done.
+      timed_out = true;
     } else if ((errno == EAGAIN) || (errno == EINTR)) {
       // A signal or ConditionVariable::Signal/Broadcast has come in.
     } else {
@@ -814,13 +816,16 @@
   timespec ts;
   InitTimeSpec(true, clock, ms, ns, &ts);
   int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts));
-  if (rc != 0 && rc != ETIMEDOUT) {
+  if (rc == ETIMEDOUT) {
+    timed_out = true;
+  } else if (rc != 0) {
     errno = rc;
     PLOG(FATAL) << "TimedWait failed for " << name_;
   }
   guard_.exclusive_owner_ = old_owner;
 #endif
   guard_.recursion_count_ = old_recursion_count;
+  return timed_out;
 }
 
 void Locks::Init() {
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 6642b1e..354298e 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -382,7 +382,7 @@
   // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
   //       pointer copy, thereby defeating annotalysis.
   void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
-  void TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
+  bool TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
   // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
   // when waiting.
   // TODO: remove this.
diff --git a/runtime/base/stringpiece.h b/runtime/base/stringpiece.h
index 91b83f6..2dde245 100644
--- a/runtime/base/stringpiece.h
+++ b/runtime/base/stringpiece.h
@@ -184,10 +184,22 @@
   return memcmp(p1, p2, len) == 0;
 }
 
+inline bool operator==(const StringPiece& x, const char* y) {
+  if (y == nullptr) {
+    return x.size() == 0;
+  } else {
+    return strncmp(x.data(), y, x.size()) == 0 && y[x.size()] == '\0';
+  }
+}
+
 inline bool operator!=(const StringPiece& x, const StringPiece& y) {
   return !(x == y);
 }
 
+inline bool operator!=(const StringPiece& x, const char* y) {
+  return !(x == y);
+}
+
 inline bool operator<(const StringPiece& x, const StringPiece& y) {
   const int r = memcmp(x.data(), y.data(),
                        std::min(x.size(), y.size()));
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 16cddd5..4474f1b 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -64,7 +64,7 @@
 #include "ScopedLocalRef.h"
 #include "scoped_thread_state_change.h"
 #include "handle_scope-inl.h"
-#include "thread.h"
+#include "thread-inl.h"
 #include "utils.h"
 #include "verifier/method_verifier.h"
 #include "well_known_classes.h"
@@ -89,21 +89,29 @@
   // a NoClassDefFoundError (v2 2.17.5).  The exception to this rule is if we
   // failed in verification, in which case v2 5.4.1 says we need to re-throw
   // the previous error.
-  if (!Runtime::Current()->IsCompiler()) {  // Give info if this occurs at runtime.
+  Runtime* runtime = Runtime::Current();
+  bool is_compiler = runtime->IsCompiler();
+  if (!is_compiler) {  // Give info if this occurs at runtime.
     LOG(INFO) << "Rejecting re-init on previously-failed class " << PrettyClass(c);
   }
 
   CHECK(c->IsErroneous()) << PrettyClass(c) << " " << c->GetStatus();
   Thread* self = Thread::Current();
-  ThrowLocation throw_location = self->GetCurrentLocationForThrow();
-  if (c->GetVerifyErrorClass() != NULL) {
-    // TODO: change the verifier to store an _instance_, with a useful detail message?
-    std::string temp;
-    self->ThrowNewException(throw_location, c->GetVerifyErrorClass()->GetDescriptor(&temp),
-                            PrettyDescriptor(c).c_str());
+  if (is_compiler) {
+    // At compile time, accurate errors and NCDFE are disabled to speed compilation.
+    mirror::Throwable* pre_allocated = runtime->GetPreAllocatedNoClassDefFoundError();
+    self->SetException(ThrowLocation(), pre_allocated);
   } else {
-    self->ThrowNewException(throw_location, "Ljava/lang/NoClassDefFoundError;",
-                            PrettyDescriptor(c).c_str());
+    ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+    if (c->GetVerifyErrorClass() != NULL) {
+      // TODO: change the verifier to store an _instance_, with a useful detail message?
+      std::string temp;
+      self->ThrowNewException(throw_location, c->GetVerifyErrorClass()->GetDescriptor(&temp),
+                              PrettyDescriptor(c).c_str());
+    } else {
+      self->ThrowNewException(throw_location, "Ljava/lang/NoClassDefFoundError;",
+                              PrettyDescriptor(c).c_str());
+    }
   }
 }
 
@@ -438,7 +446,7 @@
   for (size_t i = 0; i != boot_class_path.size(); ++i) {
     const DexFile* dex_file = boot_class_path[i];
     CHECK(dex_file != NULL);
-    AppendToBootClassPath(*dex_file);
+    AppendToBootClassPath(self, *dex_file);
   }
 
   // now we can use FindSystemClass
@@ -666,7 +674,7 @@
     if (!c->IsArrayClass() && !c->IsPrimitive()) {
       StackHandleScope<1> hs(self);
       Handle<mirror::Class> h_class(hs.NewHandle(GetClassRoot(ClassRoot(i))));
-      EnsureInitialized(h_class, true, true);
+      EnsureInitialized(self, h_class, true, true);
       self->AssertNoPendingException();
     }
   }
@@ -1880,7 +1888,7 @@
     while (!local_arg.success) {
       size_t class_table_size;
       {
-        ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+        ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
         class_table_size = class_table_.size();
       }
       mirror::Class* class_type = mirror::Class::GetJavaLangClass();
@@ -2024,7 +2032,7 @@
     }
     CHECK(h_class->IsRetired());
     // Get the updated class from class table.
-    klass = LookupClass(descriptor, h_class.Get()->GetClassLoader());
+    klass = LookupClass(self, descriptor, h_class.Get()->GetClassLoader());
   }
 
   // Wait for the class if it has not already been linked.
@@ -2083,11 +2091,11 @@
   ClassPathEntry pair = FindInClassPath(descriptor, boot_class_path_);
   // Check if this would be found in the parent boot class loader.
   if (pair.second != nullptr) {
-    mirror::Class* klass = LookupClass(descriptor, nullptr);
+    mirror::Class* klass = LookupClass(self, descriptor, nullptr);
     if (klass != nullptr) {
       return EnsureResolved(self, descriptor, klass);
     }
-    klass = DefineClass(descriptor, NullHandle<mirror::ClassLoader>(), *pair.first,
+    klass = DefineClass(self, descriptor, NullHandle<mirror::ClassLoader>(), *pair.first,
                         *pair.second);
     if (klass != nullptr) {
       return klass;
@@ -2139,7 +2147,7 @@
               if (dex_class_def != nullptr) {
                 RegisterDexFile(*dex_file);
                 mirror::Class* klass =
-                    DefineClass(descriptor, class_loader, *dex_file, *dex_class_def);
+                    DefineClass(self, descriptor, class_loader, *dex_file, *dex_class_def);
                 if (klass == nullptr) {
                   CHECK(self->IsExceptionPending()) << descriptor;
                   self->ClearException();
@@ -2167,7 +2175,7 @@
     return FindPrimitiveClass(descriptor[0]);
   }
   // Find the class in the loaded classes table.
-  mirror::Class* klass = LookupClass(descriptor, class_loader.Get());
+  mirror::Class* klass = LookupClass(self, descriptor, class_loader.Get());
   if (klass != nullptr) {
     return EnsureResolved(self, descriptor, klass);
   }
@@ -2178,7 +2186,8 @@
     // The boot class loader, search the boot class path.
     ClassPathEntry pair = FindInClassPath(descriptor, boot_class_path_);
     if (pair.second != nullptr) {
-      return DefineClass(descriptor, NullHandle<mirror::ClassLoader>(), *pair.first, *pair.second);
+      return DefineClass(self, descriptor, NullHandle<mirror::ClassLoader>(), *pair.first,
+                         *pair.second);
     } else {
       // The boot class loader is searched ahead of the application class loader, failures are
       // expected and will be wrapped in a ClassNotFoundException. Use the pre-allocated error to
@@ -2190,7 +2199,7 @@
   } else if (Runtime::Current()->UseCompileTimeClassPath()) {
     // First try with the bootstrap class loader.
     if (class_loader.Get() != nullptr) {
-      klass = LookupClass(descriptor, nullptr);
+      klass = LookupClass(self, descriptor, nullptr);
       if (klass != nullptr) {
         return EnsureResolved(self, descriptor, klass);
       }
@@ -2199,7 +2208,8 @@
     // a NoClassDefFoundError being allocated.
     ClassPathEntry pair = FindInClassPath(descriptor, boot_class_path_);
     if (pair.second != nullptr) {
-      return DefineClass(descriptor, NullHandle<mirror::ClassLoader>(), *pair.first, *pair.second);
+      return DefineClass(self, descriptor, NullHandle<mirror::ClassLoader>(), *pair.first,
+                         *pair.second);
     }
     // Next try the compile time class path.
     const std::vector<const DexFile*>* class_path;
@@ -2211,7 +2221,12 @@
     }
     pair = FindInClassPath(descriptor, *class_path);
     if (pair.second != nullptr) {
-      return DefineClass(descriptor, class_loader, *pair.first, *pair.second);
+      return DefineClass(self, descriptor, class_loader, *pair.first, *pair.second);
+    } else {
+      // Use the pre-allocated NCDFE at compile time to avoid wasting time constructing exceptions.
+      mirror::Throwable* pre_allocated = Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
+      self->SetException(ThrowLocation(), pre_allocated);
+      return nullptr;
     }
   } else {
     ScopedObjectAccessUnchecked soa(self);
@@ -2254,11 +2269,10 @@
   return nullptr;
 }
 
-mirror::Class* ClassLinker::DefineClass(const char* descriptor,
+mirror::Class* ClassLinker::DefineClass(Thread* self, const char* descriptor,
                                         ConstHandle<mirror::ClassLoader> class_loader,
                                         const DexFile& dex_file,
                                         const DexFile::ClassDef& dex_class_def) {
-  Thread* self = Thread::Current();
   StackHandleScope<3> hs(self);
   auto klass = hs.NewHandle<mirror::Class>(nullptr);
   bool should_allocate = false;
@@ -2299,7 +2313,7 @@
     return nullptr;
   }
   klass->SetDexCache(FindDexCache(dex_file));
-  LoadClass(dex_file, dex_class_def, klass, class_loader.Get());
+  LoadClass(self, dex_file, dex_class_def, klass, class_loader.Get());
   ObjectLock<mirror::Class> lock(self, klass);
   if (self->IsExceptionPending()) {
     // An exception occured during load, set status to erroneous while holding klass' lock in case
@@ -2764,7 +2778,7 @@
 
 
 
-void ClassLinker::LoadClass(const DexFile& dex_file,
+void ClassLinker::LoadClass(Thread* self, const DexFile& dex_file,
                             const DexFile::ClassDef& dex_class_def,
                             ConstHandle<mirror::Class> klass,
                             mirror::ClassLoader* class_loader) {
@@ -2800,22 +2814,21 @@
     OatFile::OatClass oat_class = FindOatClass(dex_file, klass->GetDexClassDefIndex(),
                                                &has_oat_class);
     if (has_oat_class) {
-      LoadClassMembers(dex_file, class_data, klass, class_loader, &oat_class);
+      LoadClassMembers(self, dex_file, class_data, klass, class_loader, &oat_class);
     }
   }
   if (!has_oat_class) {
-    LoadClassMembers(dex_file, class_data, klass, class_loader, nullptr);
+    LoadClassMembers(self, dex_file, class_data, klass, class_loader, nullptr);
   }
 }
 
-void ClassLinker::LoadClassMembers(const DexFile& dex_file,
+void ClassLinker::LoadClassMembers(Thread* self, const DexFile& dex_file,
                                    const byte* class_data,
                                    ConstHandle<mirror::Class> klass,
                                    mirror::ClassLoader* class_loader,
                                    const OatFile::OatClass* oat_class) {
   // Load fields.
   ClassDataItemIterator it(dex_file, class_data);
-  Thread* self = Thread::Current();
   if (it.NumStaticFields() != 0) {
     mirror::ObjectArray<mirror::ArtField>* statics = AllocArtFieldArray(self, it.NumStaticFields());
     if (UNLIKELY(statics == NULL)) {
@@ -2834,6 +2847,7 @@
     klass->SetIFields(fields);
   }
   for (size_t i = 0; it.HasNextStaticField(); i++, it.Next()) {
+    self->AllowThreadSuspension();
     StackHandleScope<1> hs(self);
     Handle<mirror::ArtField> sfield(hs.NewHandle(AllocArtField(self)));
     if (UNLIKELY(sfield.Get() == NULL)) {
@@ -2844,6 +2858,7 @@
     LoadField(dex_file, it, klass, sfield);
   }
   for (size_t i = 0; it.HasNextInstanceField(); i++, it.Next()) {
+    self->AllowThreadSuspension();
     StackHandleScope<1> hs(self);
     Handle<mirror::ArtField> ifield(hs.NewHandle(AllocArtField(self)));
     if (UNLIKELY(ifield.Get() == NULL)) {
@@ -2879,6 +2894,7 @@
   uint32_t last_dex_method_index = DexFile::kDexNoIndex;
   size_t last_class_def_method_index = 0;
   for (size_t i = 0; it.HasNextDirectMethod(); i++, it.Next()) {
+    self->AllowThreadSuspension();
     StackHandleScope<1> hs(self);
     Handle<mirror::ArtMethod> method(hs.NewHandle(LoadMethod(self, dex_file, it, klass)));
     if (UNLIKELY(method.Get() == NULL)) {
@@ -2899,6 +2915,7 @@
     class_def_method_index++;
   }
   for (size_t i = 0; it.HasNextVirtualMethod(); i++, it.Next()) {
+    self->AllowThreadSuspension();
     StackHandleScope<1> hs(self);
     Handle<mirror::ArtMethod> method(hs.NewHandle(LoadMethod(self, dex_file, it, klass)));
     if (UNLIKELY(method.Get() == NULL)) {
@@ -2987,8 +3004,7 @@
   return dst;
 }
 
-void ClassLinker::AppendToBootClassPath(const DexFile& dex_file) {
-  Thread* self = Thread::Current();
+void ClassLinker::AppendToBootClassPath(Thread* self, const DexFile& dex_file) {
   StackHandleScope<1> hs(self);
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(AllocDexCache(self, dex_file)));
   CHECK(dex_cache.Get() != NULL) << "Failed to allocate dex cache for " << dex_file.GetLocation();
@@ -3142,7 +3158,7 @@
   if (component_type.Get() == nullptr) {
     DCHECK(self->IsExceptionPending());
     // We need to accept erroneous classes as component types.
-    component_type.Assign(LookupClass(descriptor + 1, class_loader.Get()));
+    component_type.Assign(LookupClass(self, descriptor + 1, class_loader.Get()));
     if (component_type.Get() == nullptr) {
       DCHECK(self->IsExceptionPending());
       return nullptr;
@@ -3172,7 +3188,7 @@
   // class to the hash table --- necessary because of possible races with
   // other threads.)
   if (class_loader.Get() != component_type->GetClassLoader()) {
-    mirror::Class* new_class = LookupClass(descriptor, component_type->GetClassLoader());
+    mirror::Class* new_class = LookupClass(self, descriptor, component_type->GetClassLoader());
     if (new_class != NULL) {
       return new_class;
     }
@@ -3391,11 +3407,11 @@
   return false;
 }
 
-mirror::Class* ClassLinker::LookupClass(const char* descriptor,
+mirror::Class* ClassLinker::LookupClass(Thread* self, const char* descriptor,
                                         const mirror::ClassLoader* class_loader) {
   size_t hash = Hash(descriptor);
   {
-    ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+    ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
     mirror::Class* result = LookupClassFromTableLocked(descriptor, class_loader, hash);
     if (result != NULL) {
       return result;
@@ -3531,9 +3547,8 @@
   }
 }
 
-void ClassLinker::VerifyClass(ConstHandle<mirror::Class> klass) {
+void ClassLinker::VerifyClass(Thread* self, ConstHandle<mirror::Class> klass) {
   // TODO: assert that the monitor on the Class is held
-  Thread* self = Thread::Current();
   ObjectLock<mirror::Class> lock(self, klass);
 
   // Don't attempt to re-verify if already sufficiently verified.
@@ -3576,7 +3591,7 @@
     ObjectLock<mirror::Class> lock(self, super);
 
     if (!super->IsVerified() && !super->IsErroneous()) {
-      VerifyClass(super);
+      VerifyClass(self, super);
     }
     if (!super->IsCompileTimeVerified()) {
       std::string error_msg(
@@ -3617,7 +3632,7 @@
   verifier::MethodVerifier::FailureKind verifier_failure = verifier::MethodVerifier::kNoFailure;
   std::string error_msg;
   if (!preverified) {
-    verifier_failure = verifier::MethodVerifier::VerifyClass(klass.Get(),
+    verifier_failure = verifier::MethodVerifier::VerifyClass(self, klass.Get(),
                                                              Runtime::Current()->IsCompiler(),
                                                              &error_msg);
   }
@@ -3652,7 +3667,7 @@
         klass->SetStatus(mirror::Class::kStatusVerified, self);
         // As this is a fake verified status, make sure the methods are _not_ marked preverified
         // later.
-        klass->SetAccessFlags(klass->GetAccessFlags() | kAccPreverified);
+        klass->SetPreverified();
       }
     }
   } else {
@@ -3675,9 +3690,9 @@
 }
 
 void ClassLinker::EnsurePreverifiedMethods(ConstHandle<mirror::Class> klass) {
-  if ((klass->GetAccessFlags() & kAccPreverified) == 0) {
+  if (!klass->IsPreverified()) {
     klass->SetPreverifiedFlagOnAllMethods();
-    klass->SetAccessFlags(klass->GetAccessFlags() | kAccPreverified);
+    klass->SetPreverified();
   }
 }
 
@@ -4108,12 +4123,8 @@
   return true;
 }
 
-bool ClassLinker::IsInitialized() const {
-  return init_done_;
-}
-
-bool ClassLinker::InitializeClass(ConstHandle<mirror::Class> klass, bool can_init_statics,
-                                  bool can_init_parents) {
+bool ClassLinker::InitializeClass(Thread* self, ConstHandle<mirror::Class> klass,
+                                  bool can_init_statics, bool can_init_parents) {
   // see JLS 3rd edition, 12.4.2 "Detailed Initialization Procedure" for the locking protocol
 
   // Are we already initialized and therefore done?
@@ -4128,7 +4139,7 @@
     return false;
   }
 
-  Thread* self = Thread::Current();
+  self->AllowThreadSuspension();
   uint64_t t0;
   {
     ObjectLock<mirror::Class> lock(self, klass);
@@ -4147,7 +4158,7 @@
     CHECK(klass->IsResolved()) << PrettyClass(klass.Get()) << ": state=" << klass->GetStatus();
 
     if (!klass->IsVerified()) {
-      VerifyClass(klass);
+      VerifyClass(self, klass);
       if (!klass->IsVerified()) {
         // We failed to verify, expect either the klass to be erroneous or verification failed at
         // compile time.
@@ -4186,6 +4197,7 @@
       klass->SetStatus(mirror::Class::kStatusError, self);
       return false;
     }
+    self->AllowThreadSuspension();
 
     CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusVerified) << PrettyClass(klass.Get());
 
@@ -4205,7 +4217,7 @@
       CHECK(can_init_parents);
       StackHandleScope<1> hs(self);
       Handle<mirror::Class> handle_scope_super(hs.NewHandle(super_class));
-      bool super_initialized = InitializeClass(handle_scope_super, can_init_statics, true);
+      bool super_initialized = InitializeClass(self, handle_scope_super, can_init_statics, true);
       if (!super_initialized) {
         // The super class was verified ahead of entering initializing, we should only be here if
         // the super class became erroneous due to initialization.
@@ -4258,6 +4270,7 @@
     clinit->Invoke(self, NULL, 0, &result, "V");
   }
 
+  self->AllowThreadSuspension();
   uint64_t t1 = NanoTime();
 
   bool success = true;
@@ -4374,15 +4387,14 @@
   return true;
 }
 
-bool ClassLinker::EnsureInitialized(ConstHandle<mirror::Class> c, bool can_init_fields,
-                                    bool can_init_parents) {
+bool ClassLinker::EnsureInitialized(Thread* self, ConstHandle<mirror::Class> c,
+                                    bool can_init_fields, bool can_init_parents) {
   DCHECK(c.Get() != nullptr);
   if (c->IsInitialized()) {
     EnsurePreverifiedMethods(c);
     return true;
   }
-  const bool success = InitializeClass(c, can_init_fields, can_init_parents);
-  Thread* self = Thread::Current();
+  const bool success = InitializeClass(self, c, can_init_fields, can_init_parents);
   if (!success) {
     if (can_init_fields && can_init_parents) {
       CHECK(self->IsExceptionPending()) << PrettyClass(c.Get());
@@ -4442,11 +4454,11 @@
   if (!LinkMethods(self, klass, interfaces)) {
     return false;
   }
-  if (!LinkInstanceFields(klass)) {
+  if (!LinkInstanceFields(self, klass)) {
     return false;
   }
   size_t class_size;
-  if (!LinkStaticFields(klass, &class_size)) {
+  if (!LinkStaticFields(self, klass, &class_size)) {
     return false;
   }
   CreateReferenceInstanceOffsets(klass);
@@ -4603,6 +4615,7 @@
 // Populate the class vtable and itable. Compute return type indices.
 bool ClassLinker::LinkMethods(Thread* self, ConstHandle<mirror::Class> klass,
                               ConstHandle<mirror::ObjectArray<mirror::Class>> interfaces) {
+  self->AllowThreadSuspension();
   if (klass->IsInterface()) {
     // No vtable.
     size_t count = klass->NumVirtualMethods();
@@ -4780,6 +4793,7 @@
       iftable->SetInterface(i, super_interface);
     }
   }
+  self->AllowThreadSuspension();
   // Flatten the interface inheritance hierarchy.
   size_t idx = super_ifcount;
   for (size_t i = 0; i < num_interfaces; i++) {
@@ -4823,6 +4837,7 @@
       }
     }
   }
+  self->AllowThreadSuspension();
   // Shrink iftable in case duplicates were found
   if (idx < ifcount) {
     iftable.Assign(down_cast<mirror::IfTable*>(iftable->CopyOf(self, idx * mirror::IfTable::kMax)));
@@ -4840,6 +4855,7 @@
   if (klass->IsInterface()) {
     return true;
   }
+  self->AllowThreadSuspension();
   // Allocate imtable
   bool imtable_changed = false;
   Handle<mirror::ObjectArray<mirror::ArtMethod>> imtable(
@@ -4858,6 +4874,7 @@
       miranda_list(hs.NewHandle(AllocArtMethodArray(self, max_miranda_methods)));
   size_t miranda_list_size = 0;  // The current size of miranda_list.
   for (size_t i = 0; i < ifcount; ++i) {
+    self->AllowThreadSuspension();
     size_t num_methods = iftable->GetInterface(i)->NumVirtualMethods();
     if (num_methods > 0) {
       StackHandleScope<2> hs(self);
@@ -4984,19 +5001,19 @@
     CHECK(vtable->Get(i) != NULL);
   }
 
-//  klass->DumpClass(std::cerr, Class::kDumpClassFullDetail);
+  self->AllowThreadSuspension();
 
   return true;
 }
 
-bool ClassLinker::LinkInstanceFields(ConstHandle<mirror::Class> klass) {
+bool ClassLinker::LinkInstanceFields(Thread* self, ConstHandle<mirror::Class> klass) {
   CHECK(klass.Get() != NULL);
-  return LinkFields(klass, false, nullptr);
+  return LinkFields(self, klass, false, nullptr);
 }
 
-bool ClassLinker::LinkStaticFields(ConstHandle<mirror::Class> klass, size_t* class_size) {
+bool ClassLinker::LinkStaticFields(Thread* self, ConstHandle<mirror::Class> klass, size_t* class_size) {
   CHECK(klass.Get() != NULL);
-  return LinkFields(klass, true, class_size);
+  return LinkFields(self, klass, true, class_size);
 }
 
 struct LinkFieldsComparator {
@@ -5026,7 +5043,9 @@
   }
 };
 
-bool ClassLinker::LinkFields(ConstHandle<mirror::Class> klass, bool is_static, size_t* class_size) {
+bool ClassLinker::LinkFields(Thread* self, ConstHandle<mirror::Class> klass, bool is_static,
+                             size_t* class_size) {
+  self->AllowThreadSuspension();
   size_t num_fields =
       is_static ? klass->NumStaticFields() : klass->NumInstanceFields();
 
@@ -5057,7 +5076,7 @@
   // we want a relatively stable order so that adding new fields
   // minimizes disruption of C++ version such as Class and Method.
   std::deque<mirror::ArtField*> grouped_and_sorted_fields;
-  const char* old_no_suspend_cause  = Thread::Current()->StartAssertNoThreadSuspension(
+  const char* old_no_suspend_cause  = self->StartAssertNoThreadSuspension(
       "Naked ArtField references in deque");
   for (size_t i = 0; i < num_fields; i++) {
     mirror::ArtField* f = fields->Get(i);
@@ -5103,8 +5122,7 @@
                     fields, &grouped_and_sorted_fields, &gaps);
   CHECK(grouped_and_sorted_fields.empty()) << "Missed " << grouped_and_sorted_fields.size() <<
       " fields.";
-
-  Thread::Current()->EndAssertNoThreadSuspension(old_no_suspend_cause);
+  self->EndAssertNoThreadSuspension(old_no_suspend_cause);
 
   // We lie to the GC about the java.lang.ref.Reference.referent field, so it doesn't scan it.
   if (!is_static && klass->DescriptorEquals("Ljava/lang/ref/Reference;")) {
@@ -5520,10 +5538,12 @@
 }
 
 void ClassLinker::DumpForSigQuit(std::ostream& os) {
+  Thread* self = Thread::Current();
   if (dex_cache_image_class_lookup_required_) {
+    ScopedObjectAccess soa(self);
     MoveImageClassesToClassTable();
   }
-  ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+  ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
   os << "Loaded classes: " << class_table_.size() << " allocated classes\n";
 }
 
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 11ac326..b6c62a9 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -92,17 +92,20 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Returns true if the class linker is initialized.
-  bool IsInitialized() const;
+  bool IsInitialized() const {
+    return init_done_;
+  }
 
   // Define a new a class based on a ClassDef from a DexFile
-  mirror::Class* DefineClass(const char* descriptor,
+  mirror::Class* DefineClass(Thread* self, const char* descriptor,
                              ConstHandle<mirror::ClassLoader> class_loader,
                              const DexFile& dex_file, const DexFile::ClassDef& dex_class_def)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Finds a class by its descriptor, returning NULL if it isn't wasn't loaded
   // by the given 'class_loader'.
-  mirror::Class* LookupClass(const char* descriptor, const mirror::ClassLoader* class_loader)
+  mirror::Class* LookupClass(Thread* self, const char* descriptor,
+                             const mirror::ClassLoader* class_loader)
       LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -124,8 +127,7 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void DumpForSigQuit(std::ostream& os)
-      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_);
 
   size_t NumLoadedClasses()
       LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
@@ -221,7 +223,8 @@
   // Returns true on success, false if there's an exception pending.
   // can_run_clinit=false allows the compiler to attempt to init a class,
   // given the restriction that no <clinit> execution is possible.
-  bool EnsureInitialized(ConstHandle<mirror::Class> c, bool can_init_fields, bool can_init_parents)
+  bool EnsureInitialized(Thread* self, ConstHandle<mirror::Class> c, bool can_init_fields,
+                         bool can_init_parents)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Initializes classes that have instances in the image but that have
@@ -323,7 +326,8 @@
                                                                               size_t length)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void VerifyClass(ConstHandle<mirror::Class> klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void VerifyClass(Thread* self, ConstHandle<mirror::Class> klass)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class* klass,
                                mirror::Class::Status& oat_file_class_status)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -444,7 +448,7 @@
                                   ConstHandle<mirror::ClassLoader> class_loader)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void AppendToBootClassPath(const DexFile& dex_file)
+  void AppendToBootClassPath(Thread* self, const DexFile& dex_file)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void AppendToBootClassPath(const DexFile& dex_file, ConstHandle<mirror::DexCache> dex_cache)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -454,15 +458,11 @@
   uint32_t SizeOfClassWithoutEmbeddedTables(const DexFile& dex_file,
                                             const DexFile::ClassDef& dex_class_def);
 
-  void LoadClass(const DexFile& dex_file,
-                 const DexFile::ClassDef& dex_class_def,
-                 ConstHandle<mirror::Class> klass,
-                 mirror::ClassLoader* class_loader)
+  void LoadClass(Thread* self, const DexFile& dex_file, const DexFile::ClassDef& dex_class_def,
+                 ConstHandle<mirror::Class> klass, mirror::ClassLoader* class_loader)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void LoadClassMembers(const DexFile& dex_file,
-                        const byte* class_data,
-                        ConstHandle<mirror::Class> klass,
-                        mirror::ClassLoader* class_loader,
+  void LoadClassMembers(Thread* self, const DexFile& dex_file, const byte* class_data,
+                        ConstHandle<mirror::Class> klass, mirror::ClassLoader* class_loader,
                         const OatFile::OatClass* oat_class)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -488,7 +488,7 @@
   bool IsDexFileRegisteredLocked(const DexFile& dex_file)
       SHARED_LOCKS_REQUIRED(dex_lock_, Locks::mutator_lock_);
 
-  bool InitializeClass(ConstHandle<mirror::Class> klass, bool can_run_clinit,
+  bool InitializeClass(Thread* self, ConstHandle<mirror::Class> klass, bool can_run_clinit,
                        bool can_init_parents)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool WaitForInitializeClass(ConstHandle<mirror::Class> klass, Thread* self,
@@ -528,11 +528,12 @@
                             ConstHandle<mirror::ObjectArray<mirror::Class>> interfaces)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool LinkStaticFields(ConstHandle<mirror::Class> klass, size_t* class_size)
+  bool LinkStaticFields(Thread* self, ConstHandle<mirror::Class> klass, size_t* class_size)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool LinkInstanceFields(ConstHandle<mirror::Class> klass)
+  bool LinkInstanceFields(Thread* self, ConstHandle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool LinkFields(ConstHandle<mirror::Class> klass, bool is_static, size_t* class_size)
+  bool LinkFields(Thread* self, ConstHandle<mirror::Class> klass, bool is_static,
+                  size_t* class_size)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void LinkCode(ConstHandle<mirror::ArtMethod> method, const OatFile::OatClass* oat_class,
                 const DexFile& dex_file, uint32_t dex_method_index, uint32_t method_index)
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index ee5fbb7..273d4c0 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -855,7 +855,7 @@
       hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("Statics"))));
   Handle<mirror::Class> statics(
       hs.NewHandle(class_linker_->FindClass(soa.Self(), "LStatics;", class_loader)));
-  class_linker_->EnsureInitialized(statics, true, true);
+  class_linker_->EnsureInitialized(soa.Self(), statics, true, true);
 
   // Static final primitives that are initialized by a compile-time constant
   // expression resolve to a copy of a constant value from the constant pool.
@@ -1133,7 +1133,7 @@
 
   CheckPreverified(security_manager.Get(), false);
 
-  class_linker_->EnsureInitialized(security_manager, true, true);
+  class_linker_->EnsureInitialized(soa.Self(), security_manager, true, true);
   CheckPreverified(security_manager.Get(), true);
 }
 
@@ -1148,7 +1148,7 @@
 
   CheckPreverified(statics.Get(), false);
 
-  class_linker_->EnsureInitialized(statics, true, true);
+  class_linker_->EnsureInitialized(soa.Self(), statics, true, true);
   CheckPreverified(statics.Get(), true);
 }
 
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index d1e041c..50bb0c9 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -3051,7 +3051,7 @@
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
   Handle<mirror::ArtMethod> method(hs.NewHandle(m));
-  verifier::MethodVerifier verifier(dex_cache->GetDexFile(), dex_cache, class_loader,
+  verifier::MethodVerifier verifier(self, dex_cache->GetDexFile(), dex_cache, class_loader,
                                     &m->GetClassDef(), code_item, m->GetDexMethodIndex(), method,
                                     m->GetAccessFlags(), false, true, false);
   // Note: we don't need to verify the method.
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index b6810b0..b913220 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -190,7 +190,7 @@
   }
 
   // Reads an instruction out of the stream from the current address plus an offset.
-  const Instruction* RelativeAt(int32_t offset) const {
+  const Instruction* RelativeAt(int32_t offset) const WARN_UNUSED {
     return At(reinterpret_cast<const uint16_t*>(this) + offset);
   }
 
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 38842cb..4ef7d74 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -79,7 +79,7 @@
     // has changed and to null-check the return value in case the
     // initialization fails.
     *slow_path = true;
-    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_klass, true, true)) {
+    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_klass, true, true)) {
       DCHECK(self->IsExceptionPending());
       return nullptr;  // Failure
     } else {
@@ -107,7 +107,7 @@
     // has changed and to null-check the return value in case the
     // initialization fails.
     *slow_path = true;
-    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) {
+    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
       DCHECK(self->IsExceptionPending());
       return nullptr;  // Failure
     }
@@ -324,7 +324,7 @@
     } else {
       StackHandleScope<1> hs(self);
       Handle<mirror::Class> h_class(hs.NewHandle(fields_class));
-      if (LIKELY(class_linker->EnsureInitialized(h_class, true, true))) {
+      if (LIKELY(class_linker->EnsureInitialized(self, h_class, true, true))) {
         // Otherwise let's ensure the class is initialized before resolving the field.
         return resolved_field;
       }
@@ -603,7 +603,7 @@
   }
   StackHandleScope<1> hs(self);
   Handle<mirror::Class> h_class(hs.NewHandle(klass));
-  if (!class_linker->EnsureInitialized(h_class, true, true)) {
+  if (!class_linker->EnsureInitialized(self, h_class, true, true)) {
     CHECK(self->IsExceptionPending());
     return nullptr;  // Failure - Indicate to caller to deliver exception
   }
@@ -640,18 +640,6 @@
   }
 }
 
-static inline void CheckSuspend(Thread* thread) {
-  for (;;) {
-    if (thread->ReadFlag(kCheckpointRequest)) {
-      thread->RunCheckpointFunction();
-    } else if (thread->ReadFlag(kSuspendRequest)) {
-      thread->FullSuspendCheck();
-    } else {
-      break;
-    }
-  }
-}
-
 template <typename INT_TYPE, typename FLOAT_TYPE>
 static inline INT_TYPE art_float_to_integral(FLOAT_TYPE f) {
   const INT_TYPE kMaxInt = static_cast<INT_TYPE>(std::numeric_limits<INT_TYPE>::max());
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 44c89ad..08edecf 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -174,8 +174,6 @@
 void CheckReferenceResult(mirror::Object* o, Thread* self)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-static inline void CheckSuspend(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
 JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, const char* shorty,
                                     jobject rcvr_jobj, jobject interface_art_method_jobj,
                                     std::vector<jvalue>& args)
diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
index 64faf76..b617636 100644
--- a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
+++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
@@ -36,7 +36,8 @@
       self->PushShadowFrame(shadow_frame);
       StackHandleScope<1> hs(self);
       Handle<mirror::Class> h_class(hs.NewHandle(declaringClass));
-      if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true))) {
+      if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true,
+                                                                            true))) {
         self->PopShadowFrame();
         DCHECK(self->IsExceptionPending());
         return;
diff --git a/runtime/entrypoints/portable/portable_thread_entrypoints.cc b/runtime/entrypoints/portable/portable_thread_entrypoints.cc
index 23e1c36..7d5ccc2 100644
--- a/runtime/entrypoints/portable/portable_thread_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_thread_entrypoints.cc
@@ -14,11 +14,9 @@
  * limitations under the License.
  */
 
-#include "entrypoints/entrypoint_utils-inl.h"
-#include "mirror/art_method.h"
-#include "mirror/object-inl.h"
 #include "verifier/dex_gc_map.h"
 #include "stack.h"
+#include "thread-inl.h"
 
 namespace art {
 
@@ -71,7 +69,7 @@
 
 extern "C" void art_portable_test_suspend_from_code(Thread* self)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  CheckSuspend(self);
+  self->CheckSuspend();
   if (Runtime::Current()->GetInstrumentation()->ShouldPortableCodeDeoptimize()) {
     // Save out the shadow frame to the heap
     ShadowFrameCopyVisitor visitor(self);
diff --git a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
index 9f75b0f..7f6144b 100644
--- a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
@@ -215,7 +215,7 @@
     if (method->IsStatic() && !method->GetDeclaringClass()->IsInitialized()) {
       // Ensure static method's class is initialized.
       Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
-      if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) {
+      if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
         DCHECK(Thread::Current()->IsExceptionPending());
         self->PopManagedStackFragment(fragment);
         return 0;
@@ -399,7 +399,7 @@
     // Ensure that the called method's class is initialized.
     StackHandleScope<1> hs(self);
     Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
-    linker->EnsureInitialized(called_class, true, true);
+    linker->EnsureInitialized(self, called_class, true, true);
     if (LIKELY(called_class->IsInitialized())) {
       code = called->GetEntryPointFromPortableCompiledCode();
       // TODO: remove this after we solve the link issue.
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 6537249..87f04bb 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -14,15 +14,8 @@
  * limitations under the License.
  */
 
-#include "dex_file-inl.h"
-#include "entrypoints/entrypoint_utils-inl.h"
-#include "mirror/art_method-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/object.h"
 #include "mirror/object-inl.h"
-#include "mirror/object_array-inl.h"
-#include "scoped_thread_state_change.h"
-#include "thread.h"
+#include "thread-inl.h"
 #include "verify_object-inl.h"
 
 namespace art {
@@ -56,7 +49,7 @@
     // In fast JNI mode we never transitioned out of runnable. Perform a suspend check if there
     // is a flag raised.
     DCHECK(Locks::mutator_lock_->IsSharedHeld(self));
-    CheckSuspend(self);
+    self->CheckSuspend();
   }
 }
 
diff --git a/runtime/entrypoints/quick/quick_thread_entrypoints.cc b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
index 118cd7f..ea75fb6 100644
--- a/runtime/entrypoints/quick/quick_thread_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
@@ -15,17 +15,15 @@
  */
 
 #include "callee_save_frame.h"
-#include "entrypoints/entrypoint_utils-inl.h"
-#include "thread.h"
-#include "thread_list.h"
+#include "thread-inl.h"
 
 namespace art {
 
-extern "C" void artTestSuspendFromCode(Thread* thread, StackReference<mirror::ArtMethod>* sp)
+extern "C" void artTestSuspendFromCode(Thread* self, StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   // Called when suspend count check value is 0 and thread->suspend_count_ != 0
-  FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsOnly);
-  CheckSuspend(thread);
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  self->CheckSuspend();
 }
 
 }  // namespace art
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index dfd2e11..95dd8be 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -496,7 +496,7 @@
       // Ensure static method's class is initialized.
       StackHandleScope<1> hs(self);
       Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
-      if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) {
+      if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
         DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(method);
         self->PopManagedStackFragment(fragment);
         return 0;
@@ -808,7 +808,7 @@
     // Ensure that the called method's class is initialized.
     StackHandleScope<1> hs(soa.Self());
     Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
-    linker->EnsureInitialized(called_class, true, true);
+    linker->EnsureInitialized(soa.Self(), called_class, true, true);
     if (LIKELY(called_class->IsInitialized())) {
       code = called->GetEntryPointFromQuickCompiledCode();
     } else if (called_class->IsInitializing()) {
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 99633a3..6033a5f 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -45,7 +45,7 @@
     my_klass_ = class_linker_->FindClass(soa.Self(), "LExceptionHandle;", class_loader);
     ASSERT_TRUE(my_klass_ != NULL);
     Handle<mirror::Class> klass(hs.NewHandle(my_klass_));
-    class_linker_->EnsureInitialized(klass, true, true);
+    class_linker_->EnsureInitialized(soa.Self(), klass, true, true);
     my_klass_ = klass.Get();
 
     dex_ = my_klass_->GetDexCache()->GetDexFile();
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index e775965..351e1c6 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -459,7 +459,7 @@
                                                               bool fail_ok) const;
   space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const;
 
-  void DumpForSigQuit(std::ostream& os) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void DumpForSigQuit(std::ostream& os);
 
   // Do a pending heap transition or trim.
   void DoPendingTransitionOrTrim() LOCKS_EXCLUDED(heap_trim_request_lock_);
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 47a7f0d..7e685e8 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -462,7 +462,7 @@
     ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
     StackHandleScope<1> hs(self);
     Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
-    if (UNLIKELY(!class_linker->EnsureInitialized(h_class, true, true))) {
+    if (UNLIKELY(!class_linker->EnsureInitialized(self, h_class, true, true))) {
       CHECK(self->IsExceptionPending());
       self->PopShadowFrame();
       return;
@@ -537,7 +537,7 @@
       StackHandleScope<1> hs(self);
       HandleWrapper<Class> h_declaring_class(hs.NewHandleWrapper(&declaring_class));
       if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(
-          h_declaring_class, true, true))) {
+          self, h_declaring_class, true, true))) {
         DCHECK(self->IsExceptionPending());
         self->PopShadowFrame();
         return;
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 5724e35..2129c1b 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -800,7 +800,7 @@
   if (found != nullptr && initialize_class) {
     StackHandleScope<1> hs(self);
     Handle<mirror::Class> h_class(hs.NewHandle(found));
-    if (!class_linker->EnsureInitialized(h_class, true, true)) {
+    if (!class_linker->EnsureInitialized(self, h_class, true, true)) {
       CHECK(self->IsExceptionPending());
       return;
     }
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 5a1d01e..9358632 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -192,7 +192,7 @@
     ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
     StackHandleScope<1> hs(self);
     Handle<mirror::Class> h_class(hs.NewHandle(java_lang_string_class));
-    if (UNLIKELY(!class_linker->EnsureInitialized(h_class, true, true))) {
+    if (UNLIKELY(!class_linker->EnsureInitialized(self, h_class, true, true))) {
       DCHECK(self->IsExceptionPending());
       return nullptr;
     }
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index 755e1ed..5c8a6c6 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -249,9 +249,7 @@
       // perform the memory barrier now.
       QuasiAtomic::ThreadFenceForConstructor();
     }
-    if (UNLIKELY(self->TestAllFlags())) {
-      CheckSuspend(self);
-    }
+    self->AllowThreadSuspension();
     instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
     if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
       instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
@@ -268,9 +266,7 @@
   HANDLE_INSTRUCTION_START(RETURN_VOID_BARRIER) {
     QuasiAtomic::ThreadFenceForConstructor();
     JValue result;
-    if (UNLIKELY(self->TestAllFlags())) {
-      CheckSuspend(self);
-    }
+    self->AllowThreadSuspension();
     instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
     if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
       instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
@@ -288,9 +284,7 @@
     JValue result;
     result.SetJ(0);
     result.SetI(shadow_frame.GetVReg(inst->VRegA_11x(inst_data)));
-    if (UNLIKELY(self->TestAllFlags())) {
-      CheckSuspend(self);
-    }
+    self->AllowThreadSuspension();
     instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
     if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
       instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
@@ -307,9 +301,7 @@
   HANDLE_INSTRUCTION_START(RETURN_WIDE) {
     JValue result;
     result.SetJ(shadow_frame.GetVRegLong(inst->VRegA_11x(inst_data)));
-    if (UNLIKELY(self->TestAllFlags())) {
-      CheckSuspend(self);
-    }
+    self->AllowThreadSuspension();
     instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
     if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
       instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
@@ -325,9 +317,7 @@
 
   HANDLE_INSTRUCTION_START(RETURN_OBJECT) {
     JValue result;
-    if (UNLIKELY(self->TestAllFlags())) {
-      CheckSuspend(self);
-    }
+    self->AllowThreadSuspension();
     const uint8_t vreg_index = inst->VRegA_11x(inst_data);
     Object* obj_result = shadow_frame.GetVRegReference(vreg_index);
     if (do_assignability_check && obj_result != NULL) {
@@ -632,7 +622,7 @@
     int8_t offset = inst->VRegA_10t(inst_data);
     if (IsBackwardBranch(offset)) {
       if (UNLIKELY(self->TestAllFlags())) {
-        CheckSuspend(self);
+        self->CheckSuspend();
         UPDATE_HANDLER_TABLE();
       }
     }
@@ -644,7 +634,7 @@
     int16_t offset = inst->VRegA_20t();
     if (IsBackwardBranch(offset)) {
       if (UNLIKELY(self->TestAllFlags())) {
-        CheckSuspend(self);
+        self->CheckSuspend();
         UPDATE_HANDLER_TABLE();
       }
     }
@@ -656,7 +646,7 @@
     int32_t offset = inst->VRegA_30t();
     if (IsBackwardBranch(offset)) {
       if (UNLIKELY(self->TestAllFlags())) {
-        CheckSuspend(self);
+        self->CheckSuspend();
         UPDATE_HANDLER_TABLE();
       }
     }
@@ -668,7 +658,7 @@
     int32_t offset = DoPackedSwitch(inst, shadow_frame, inst_data);
     if (IsBackwardBranch(offset)) {
       if (UNLIKELY(self->TestAllFlags())) {
-        CheckSuspend(self);
+        self->CheckSuspend();
         UPDATE_HANDLER_TABLE();
       }
     }
@@ -680,7 +670,7 @@
     int32_t offset = DoSparseSwitch(inst, shadow_frame, inst_data);
     if (IsBackwardBranch(offset)) {
       if (UNLIKELY(self->TestAllFlags())) {
-        CheckSuspend(self);
+        self->CheckSuspend();
         UPDATE_HANDLER_TABLE();
       }
     }
@@ -773,7 +763,7 @@
       int16_t offset = inst->VRegC_22t();
       if (IsBackwardBranch(offset)) {
         if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
+          self->CheckSuspend();
           UPDATE_HANDLER_TABLE();
         }
       }
@@ -789,7 +779,7 @@
       int16_t offset = inst->VRegC_22t();
       if (IsBackwardBranch(offset)) {
         if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
+          self->CheckSuspend();
           UPDATE_HANDLER_TABLE();
         }
       }
@@ -805,7 +795,7 @@
       int16_t offset = inst->VRegC_22t();
       if (IsBackwardBranch(offset)) {
         if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
+          self->CheckSuspend();
           UPDATE_HANDLER_TABLE();
         }
       }
@@ -821,7 +811,7 @@
       int16_t offset = inst->VRegC_22t();
       if (IsBackwardBranch(offset)) {
         if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
+          self->CheckSuspend();
           UPDATE_HANDLER_TABLE();
         }
       }
@@ -837,7 +827,7 @@
       int16_t offset = inst->VRegC_22t();
       if (IsBackwardBranch(offset)) {
         if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
+          self->CheckSuspend();
           UPDATE_HANDLER_TABLE();
         }
       }
@@ -853,7 +843,7 @@
       int16_t offset = inst->VRegC_22t();
       if (IsBackwardBranch(offset)) {
         if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
+          self->CheckSuspend();
           UPDATE_HANDLER_TABLE();
         }
       }
@@ -869,7 +859,7 @@
       int16_t offset = inst->VRegB_21t();
       if (IsBackwardBranch(offset)) {
         if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
+          self->CheckSuspend();
           UPDATE_HANDLER_TABLE();
         }
       }
@@ -885,7 +875,7 @@
       int16_t offset = inst->VRegB_21t();
       if (IsBackwardBranch(offset)) {
         if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
+          self->CheckSuspend();
           UPDATE_HANDLER_TABLE();
         }
       }
@@ -901,7 +891,7 @@
       int16_t offset = inst->VRegB_21t();
       if (IsBackwardBranch(offset)) {
         if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
+          self->CheckSuspend();
           UPDATE_HANDLER_TABLE();
         }
       }
@@ -917,7 +907,7 @@
       int16_t offset = inst->VRegB_21t();
       if (IsBackwardBranch(offset)) {
         if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
+          self->CheckSuspend();
           UPDATE_HANDLER_TABLE();
         }
       }
@@ -933,7 +923,7 @@
       int16_t offset = inst->VRegB_21t();
       if (IsBackwardBranch(offset)) {
         if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
+          self->CheckSuspend();
           UPDATE_HANDLER_TABLE();
         }
       }
@@ -949,7 +939,7 @@
       int16_t offset = inst->VRegB_21t();
       if (IsBackwardBranch(offset)) {
         if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
+          self->CheckSuspend();
           UPDATE_HANDLER_TABLE();
         }
       }
@@ -2399,7 +2389,7 @@
   exception_pending_label: {
     CHECK(self->IsExceptionPending());
     if (UNLIKELY(self->TestAllFlags())) {
-      CheckSuspend(self);
+      self->CheckSuspend();
       UPDATE_HANDLER_TABLE();
     }
     instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 6054a25..c6cef6a 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -22,9 +22,7 @@
 #define HANDLE_PENDING_EXCEPTION()                                                              \
   do {                                                                                          \
     DCHECK(self->IsExceptionPending());                                                         \
-    if (UNLIKELY(self->TestAllFlags())) {                                                       \
-      CheckSuspend(self);                                                                       \
-    }                                                                                           \
+    self->AllowThreadSuspension();                                                              \
     uint32_t found_dex_pc = FindNextInstructionFollowingException(self, shadow_frame,           \
                                                                   inst->GetDexPc(insns),        \
                                                                   instrumentation);             \
@@ -175,9 +173,7 @@
           // perform the memory barrier now.
           QuasiAtomic::ThreadFenceForConstructor();
         }
-        if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
-        }
+        self->AllowThreadSuspension();
         if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
           instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
                                            shadow_frame.GetMethod(), inst->GetDexPc(insns),
@@ -191,9 +187,7 @@
       case Instruction::RETURN_VOID_BARRIER: {
         QuasiAtomic::ThreadFenceForConstructor();
         JValue result;
-        if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
-        }
+        self->AllowThreadSuspension();
         if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
           instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
                                            shadow_frame.GetMethod(), inst->GetDexPc(insns),
@@ -208,9 +202,7 @@
         JValue result;
         result.SetJ(0);
         result.SetI(shadow_frame.GetVReg(inst->VRegA_11x(inst_data)));
-        if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
-        }
+        self->AllowThreadSuspension();
         if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
           instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
                                            shadow_frame.GetMethod(), inst->GetDexPc(insns),
@@ -224,9 +216,7 @@
       case Instruction::RETURN_WIDE: {
         JValue result;
         result.SetJ(shadow_frame.GetVRegLong(inst->VRegA_11x(inst_data)));
-        if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
-        }
+        self->AllowThreadSuspension();
         if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
           instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
                                            shadow_frame.GetMethod(), inst->GetDexPc(insns),
@@ -239,9 +229,7 @@
       }
       case Instruction::RETURN_OBJECT: {
         JValue result;
-        if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
-        }
+        self->AllowThreadSuspension();
         const size_t ref_idx = inst->VRegA_11x(inst_data);
         Object* obj_result = shadow_frame.GetVRegReference(ref_idx);
         if (do_assignability_check && obj_result != NULL) {
@@ -545,9 +533,7 @@
         PREAMBLE();
         int8_t offset = inst->VRegA_10t(inst_data);
         if (IsBackwardBranch(offset)) {
-          if (UNLIKELY(self->TestAllFlags())) {
-            CheckSuspend(self);
-          }
+          self->AllowThreadSuspension();
         }
         inst = inst->RelativeAt(offset);
         break;
@@ -556,9 +542,7 @@
         PREAMBLE();
         int16_t offset = inst->VRegA_20t();
         if (IsBackwardBranch(offset)) {
-          if (UNLIKELY(self->TestAllFlags())) {
-            CheckSuspend(self);
-          }
+          self->AllowThreadSuspension();
         }
         inst = inst->RelativeAt(offset);
         break;
@@ -567,9 +551,7 @@
         PREAMBLE();
         int32_t offset = inst->VRegA_30t();
         if (IsBackwardBranch(offset)) {
-          if (UNLIKELY(self->TestAllFlags())) {
-            CheckSuspend(self);
-          }
+          self->AllowThreadSuspension();
         }
         inst = inst->RelativeAt(offset);
         break;
@@ -578,9 +560,7 @@
         PREAMBLE();
         int32_t offset = DoPackedSwitch(inst, shadow_frame, inst_data);
         if (IsBackwardBranch(offset)) {
-          if (UNLIKELY(self->TestAllFlags())) {
-            CheckSuspend(self);
-          }
+          self->AllowThreadSuspension();
         }
         inst = inst->RelativeAt(offset);
         break;
@@ -589,9 +569,7 @@
         PREAMBLE();
         int32_t offset = DoSparseSwitch(inst, shadow_frame, inst_data);
         if (IsBackwardBranch(offset)) {
-          if (UNLIKELY(self->TestAllFlags())) {
-            CheckSuspend(self);
-          }
+          self->AllowThreadSuspension();
         }
         inst = inst->RelativeAt(offset);
         break;
@@ -682,9 +660,7 @@
         if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) == shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
           int16_t offset = inst->VRegC_22t();
           if (IsBackwardBranch(offset)) {
-            if (UNLIKELY(self->TestAllFlags())) {
-              CheckSuspend(self);
-            }
+            self->AllowThreadSuspension();
           }
           inst = inst->RelativeAt(offset);
         } else {
@@ -697,9 +673,7 @@
         if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) != shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
           int16_t offset = inst->VRegC_22t();
           if (IsBackwardBranch(offset)) {
-            if (UNLIKELY(self->TestAllFlags())) {
-              CheckSuspend(self);
-            }
+            self->AllowThreadSuspension();
           }
           inst = inst->RelativeAt(offset);
         } else {
@@ -712,9 +686,7 @@
         if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) < shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
           int16_t offset = inst->VRegC_22t();
           if (IsBackwardBranch(offset)) {
-            if (UNLIKELY(self->TestAllFlags())) {
-              CheckSuspend(self);
-            }
+            self->AllowThreadSuspension();
           }
           inst = inst->RelativeAt(offset);
         } else {
@@ -727,9 +699,7 @@
         if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
           int16_t offset = inst->VRegC_22t();
           if (IsBackwardBranch(offset)) {
-            if (UNLIKELY(self->TestAllFlags())) {
-              CheckSuspend(self);
-            }
+            self->AllowThreadSuspension();
           }
           inst = inst->RelativeAt(offset);
         } else {
@@ -742,9 +712,7 @@
         if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) > shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
           int16_t offset = inst->VRegC_22t();
           if (IsBackwardBranch(offset)) {
-            if (UNLIKELY(self->TestAllFlags())) {
-              CheckSuspend(self);
-            }
+            self->AllowThreadSuspension();
           }
           inst = inst->RelativeAt(offset);
         } else {
@@ -757,9 +725,7 @@
         if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
           int16_t offset = inst->VRegC_22t();
           if (IsBackwardBranch(offset)) {
-            if (UNLIKELY(self->TestAllFlags())) {
-              CheckSuspend(self);
-            }
+            self->AllowThreadSuspension();
           }
           inst = inst->RelativeAt(offset);
         } else {
@@ -772,9 +738,7 @@
         if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) == 0) {
           int16_t offset = inst->VRegB_21t();
           if (IsBackwardBranch(offset)) {
-            if (UNLIKELY(self->TestAllFlags())) {
-              CheckSuspend(self);
-            }
+            self->AllowThreadSuspension();
           }
           inst = inst->RelativeAt(offset);
         } else {
@@ -787,9 +751,7 @@
         if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) != 0) {
           int16_t offset = inst->VRegB_21t();
           if (IsBackwardBranch(offset)) {
-            if (UNLIKELY(self->TestAllFlags())) {
-              CheckSuspend(self);
-            }
+            self->AllowThreadSuspension();
           }
           inst = inst->RelativeAt(offset);
         } else {
@@ -802,9 +764,7 @@
         if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) < 0) {
           int16_t offset = inst->VRegB_21t();
           if (IsBackwardBranch(offset)) {
-            if (UNLIKELY(self->TestAllFlags())) {
-              CheckSuspend(self);
-            }
+            self->AllowThreadSuspension();
           }
           inst = inst->RelativeAt(offset);
         } else {
@@ -817,9 +777,7 @@
         if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) >= 0) {
           int16_t offset = inst->VRegB_21t();
           if (IsBackwardBranch(offset)) {
-            if (UNLIKELY(self->TestAllFlags())) {
-              CheckSuspend(self);
-            }
+            self->AllowThreadSuspension();
           }
           inst = inst->RelativeAt(offset);
         } else {
@@ -832,9 +790,7 @@
         if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) > 0) {
           int16_t offset = inst->VRegB_21t();
           if (IsBackwardBranch(offset)) {
-            if (UNLIKELY(self->TestAllFlags())) {
-              CheckSuspend(self);
-            }
+            self->AllowThreadSuspension();
           }
           inst = inst->RelativeAt(offset);
         } else {
@@ -847,9 +803,7 @@
         if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) <= 0) {
           int16_t offset = inst->VRegB_21t();
           if (IsBackwardBranch(offset)) {
-            if (UNLIKELY(self->TestAllFlags())) {
-              CheckSuspend(self);
-            }
+            self->AllowThreadSuspension();
           }
           inst = inst->RelativeAt(offset);
         } else {
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 36f01db..bf979c1 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -108,7 +108,7 @@
   }
   StackHandleScope<1> hs(self);
   Handle<mirror::Class> h_klass(hs.NewHandle(klass));
-  if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_klass, true, true)) {
+  if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_klass, true, true)) {
     return nullptr;
   }
   return h_klass.Get();
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index a1097b4..aad678f 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -266,6 +266,16 @@
     return (GetAccessFlags() & kAccSynthetic) != 0;
   }
 
+  // Returns true if the class can avoid access checks.
+  bool IsPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return (GetAccessFlags() & kAccPreverified) != 0;
+  }
+
+  void SetPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
+    SetAccessFlags(flags | kAccPreverified);
+  }
+
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool IsTypeOfReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags<kVerifyFlags>() & kAccClassIsReference) != 0;
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 14d6cd9..ff9dc38 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -189,8 +189,8 @@
       StackHandleScope<1> hs(soa.Self());
       Handle<mirror::ClassLoader> class_loader(
           hs.NewHandle(soa.Decode<mirror::ClassLoader*>(javaLoader)));
-      mirror::Class* result = class_linker->DefineClass(descriptor.c_str(), class_loader, *dex_file,
-                                                        *dex_class_def);
+      mirror::Class* result = class_linker->DefineClass(soa.Self(), descriptor.c_str(),
+                                                        class_loader, *dex_file, *dex_class_def);
       if (result != nullptr) {
         VLOG(class_linker) << "DexFile_defineClassNative returning " << result;
         return soa.AddLocalReference<jclass>(result);
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index db0a5c5..e1d9fc7 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -238,7 +238,7 @@
 }
 
 // Based on ClassLinker::ResolveType.
-static void PreloadDexCachesResolveType(mirror::DexCache* dex_cache, uint32_t type_idx)
+static void PreloadDexCachesResolveType(Thread* self, mirror::DexCache* dex_cache, uint32_t type_idx)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
   if (klass != NULL) {
@@ -250,7 +250,7 @@
   if (class_name[1] == '\0') {
     klass = linker->FindPrimitiveClass(class_name[0]);
   } else {
-    klass = linker->LookupClass(class_name, NULL);
+    klass = linker->LookupClass(self, class_name, NULL);
   }
   if (klass == NULL) {
     return;
@@ -427,7 +427,6 @@
 
   Runtime* runtime = Runtime::Current();
   ClassLinker* linker = runtime->GetClassLinker();
-  Thread* self = ThreadForEnv(env);
 
   // We use a std::map to avoid heap allocating StringObjects to lookup in gDvm.literalStrings
   StringTable strings;
@@ -440,7 +439,7 @@
   for (size_t i = 0; i< boot_class_path.size(); i++) {
     const DexFile* dex_file = boot_class_path[i];
     CHECK(dex_file != NULL);
-    StackHandleScope<1> hs(self);
+    StackHandleScope<1> hs(soa.Self());
     Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(*dex_file)));
 
     if (kPreloadDexCachesStrings) {
@@ -451,7 +450,7 @@
 
     if (kPreloadDexCachesTypes) {
       for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
-        PreloadDexCachesResolveType(dex_cache.Get(), i);
+        PreloadDexCachesResolveType(soa.Self(), dex_cache.Get(), i);
       }
     }
 
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 124bdf5..b11cbdf 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -78,7 +78,7 @@
     return nullptr;
   }
   if (initialize) {
-    class_linker->EnsureInitialized(c, true, true);
+    class_linker->EnsureInitialized(soa.Self(), c, true, true);
   }
   return soa.AddLocalReference<jclass>(c.Get());
 }
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index 761e800..f6a46bd 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -24,17 +24,18 @@
 
 namespace art {
 
-static jclass VMClassLoader_findLoadedClass(JNIEnv* env, jclass, jobject javaLoader, jstring javaName) {
+static jclass VMClassLoader_findLoadedClass(JNIEnv* env, jclass, jobject javaLoader,
+                                            jstring javaName) {
   ScopedFastNativeObjectAccess soa(env);
   mirror::ClassLoader* loader = soa.Decode<mirror::ClassLoader*>(javaLoader);
   ScopedUtfChars name(env, javaName);
-  if (name.c_str() == NULL) {
-    return NULL;
+  if (name.c_str() == nullptr) {
+    return nullptr;
   }
   ClassLinker* cl = Runtime::Current()->GetClassLinker();
   std::string descriptor(DotToDescriptor(name.c_str()));
-  mirror::Class* c = cl->LookupClass(descriptor.c_str(), loader);
-  if (c != NULL && c->IsResolved()) {
+  mirror::Class* c = cl->LookupClass(soa.Self(), descriptor.c_str(), loader);
+  if (c != nullptr && c->IsResolved()) {
     return soa.AddLocalReference<jclass>(c);
   }
   if (loader != nullptr) {
@@ -47,7 +48,7 @@
   }
   // Class wasn't resolved so it may be erroneous or not yet ready, force the caller to go into
   // the regular loadClass code.
-  return NULL;
+  return nullptr;
 }
 
 static jint VMClassLoader_getBootClassPathSize(JNIEnv*, jclass) {
@@ -67,13 +68,15 @@
  * with '/'); if it's not we'd need to make it absolute as part of forming
  * the URL string.
  */
-static jstring VMClassLoader_getBootClassPathResource(JNIEnv* env, jclass, jstring javaName, jint index) {
+static jstring VMClassLoader_getBootClassPathResource(JNIEnv* env, jclass, jstring javaName,
+                                                      jint index) {
   ScopedUtfChars name(env, javaName);
   if (name.c_str() == nullptr) {
     return nullptr;
   }
 
-  const std::vector<const DexFile*>& path = Runtime::Current()->GetClassLinker()->GetBootClassPath();
+  const std::vector<const DexFile*>& path =
+      Runtime::Current()->GetClassLinker()->GetBootClassPath();
   if (index < 0 || size_t(index) >= path.size()) {
     return nullptr;
   }
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index 34cb93a..0542aeb 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -48,7 +48,7 @@
     return nullptr;
   }
 
-  if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(c, true, true)) {
+  if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(soa.Self(), c, true, true)) {
     DCHECK(soa.Self()->IsExceptionPending());
     return nullptr;
   }
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index 3903ffc..ad88109 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -97,7 +97,8 @@
     StackHandleScope<2> hs(soa.Self());
     HandleWrapper<mirror::ArtField> h_f(hs.NewHandleWrapper(f));
     Handle<mirror::Class> h_klass(hs.NewHandle((*f)->GetDeclaringClass()));
-    if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_klass, true, true))) {
+    if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(soa.Self(), h_klass, true,
+                                                                          true))) {
       DCHECK(soa.Self()->IsExceptionPending());
       *class_or_rcvr = nullptr;
       return false;
diff --git a/runtime/oat.h b/runtime/oat.h
index 6d5fefe..6a32e3e 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -152,7 +152,7 @@
 enum OatClassType {
   kOatClassAllCompiled = 0,   // OatClass is followed by an OatMethodOffsets for each method.
   kOatClassSomeCompiled = 1,  // A bitmap of which OatMethodOffsets are present follows the OatClass.
-  kOatClassNoneCompiled = 2,  // All methods are interpretted so no OatMethodOffsets are necessary.
+  kOatClassNoneCompiled = 2,  // All methods are interpreted so no OatMethodOffsets are necessary.
   kOatClassMax = 3,
 };
 
diff --git a/runtime/profiler.cc b/runtime/profiler.cc
index a6a2475..cde4177 100644
--- a/runtime/profiler.cc
+++ b/runtime/profiler.cc
@@ -119,12 +119,12 @@
 }
 
 // A closure that is called by the thread checkpoint code.
-class SampleCheckpoint : public Closure {
+class SampleCheckpoint FINAL : public Closure {
  public:
   explicit SampleCheckpoint(BackgroundMethodSamplingProfiler* const profiler) :
     profiler_(profiler) {}
 
-  virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
+  void Run(Thread* thread) OVERRIDE {
     Thread* self = Thread::Current();
     if (thread == nullptr) {
       LOG(ERROR) << "Checkpoint with nullptr thread";
@@ -192,6 +192,7 @@
       VLOG(profiler) << "Delaying profile start for " << delay_secs << " secs";
       MutexLock mu(self, profiler->wait_lock_);
       profiler->period_condition_.TimedWait(self, delay_secs * 1000, 0);
+      // We were either signaled by Stop or timedout, in either case ignore the timed out result.
 
       // Expand the backoff by its coefficient, but don't go beyond the max.
       backoff = std::min(backoff * profiler->options_.GetBackoffCoefficient(), kMaxBackoffSecs);
@@ -238,17 +239,13 @@
       // is done with a timeout so that we can detect problems with the checkpoint
       // running code.  We should never see this.
       const uint32_t kWaitTimeoutMs = 10000;
-      const uint32_t kWaitTimeoutUs = kWaitTimeoutMs * 1000;
 
-      uint64_t waitstart_us = MicroTime();
       // Wait for all threads to pass the barrier.
-      profiler->profiler_barrier_->Increment(self, barrier_count, kWaitTimeoutMs);
-      uint64_t waitend_us = MicroTime();
-      uint64_t waitdiff_us = waitend_us - waitstart_us;
+      bool timed_out =  profiler->profiler_barrier_->Increment(self, barrier_count, kWaitTimeoutMs);
 
       // We should never get a timeout.  If we do, it suggests a problem with the checkpoint
       // code.  Crash the process in this case.
-      CHECK_LT(waitdiff_us, kWaitTimeoutUs);
+      CHECK(!timed_out);
 
       // Update the current time.
       now_us = MicroTime();
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 1ec488e..43d21de 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -211,7 +211,7 @@
     Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
     Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
     Handle<mirror::ArtMethod> h_method(hs.NewHandle(m));
-    verifier::MethodVerifier verifier(h_dex_cache->GetDexFile(), h_dex_cache, h_class_loader,
+    verifier::MethodVerifier verifier(self_, h_dex_cache->GetDexFile(), h_dex_cache, h_class_loader,
                                       &m->GetClassDef(), code_item, m->GetDexMethodIndex(),
                                       h_method, m->GetAccessFlags(), false, true, true);
     verifier.Verify();
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 7da450c..9fe296a 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -560,7 +560,7 @@
   if (UNLIKELY(!declaring_class->IsInitialized())) {
     StackHandleScope<1> hs(soa.Self());
     Handle<mirror::Class> h_class(hs.NewHandle(declaring_class));
-    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) {
+    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(soa.Self(), h_class, true, true)) {
       return nullptr;
     }
     declaring_class = h_class.Get();
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index 9d10daa..75211e0 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -117,7 +117,7 @@
       // Ensure class is initialized before allocating object
       StackHandleScope<1> hs(self);
       Handle<mirror::Class> h_class(hs.NewHandle(c));
-      bool initialized = class_linker_->EnsureInitialized(h_class, true, true);
+      bool initialized = class_linker_->EnsureInitialized(self, h_class, true, true);
       CHECK(initialized);
       *receiver = c->AllocObject(self);
     }
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 474b72d..bf1e016 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -199,7 +199,7 @@
 }
 
 struct AbortState {
-  void Dump(std::ostream& os) NO_THREAD_SAFETY_ANALYSIS {
+  void Dump(std::ostream& os) {
     if (gAborting > 1) {
       os << "Runtime aborting --- recursively, so no thread-specific detail!\n";
       return;
@@ -229,7 +229,9 @@
     DumpAllThreads(os, self);
   }
 
-  void DumpThread(std::ostream& os, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  // No thread-safety analysis as we do explicitly test for holding the mutator lock.
+  void DumpThread(std::ostream& os, Thread* self) NO_THREAD_SAFETY_ANALYSIS {
+    DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self) || Locks::mutator_lock_->IsSharedHeld(self));
     self->Dump(os);
     if (self->IsExceptionPending()) {
       ThrowLocation throw_location;
@@ -240,7 +242,7 @@
     }
   }
 
-  void DumpAllThreads(std::ostream& os, Thread* self) NO_THREAD_SAFETY_ANALYSIS {
+  void DumpAllThreads(std::ostream& os, Thread* self) {
     Runtime* runtime = Runtime::Current();
     if (runtime != nullptr) {
       ThreadList* thread_list = runtime->GetThreadList();
@@ -254,7 +256,7 @@
               << "\n";
         }
         os << "All threads:\n";
-        thread_list->DumpLocked(os);
+        thread_list->Dump(os);
       }
     }
   }
@@ -343,7 +345,7 @@
   StackHandleScope<2> hs(soa.Self());
   Handle<mirror::Class> class_loader_class(
       hs.NewHandle(soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader)));
-  CHECK(cl->EnsureInitialized(class_loader_class, true, true));
+  CHECK(cl->EnsureInitialized(soa.Self(), class_loader_class, true, true));
 
   mirror::ArtMethod* getSystemClassLoader =
       class_loader_class->FindDirectMethod("getSystemClassLoader", "()Ljava/lang/ClassLoader;");
@@ -359,7 +361,7 @@
 
   Handle<mirror::Class> thread_class(
       hs.NewHandle(soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread)));
-  CHECK(cl->EnsureInitialized(thread_class, true, true));
+  CHECK(cl->EnsureInitialized(soa.Self(), thread_class, true, true));
 
   mirror::ArtField* contextClassLoader =
       thread_class->FindDeclaredInstanceField("contextClassLoader", "Ljava/lang/ClassLoader;");
@@ -404,7 +406,7 @@
     ScopedObjectAccess soa(Thread::Current());
     StackHandleScope<1> hs(soa.Self());
     auto klass(hs.NewHandle<mirror::Class>(mirror::Class::GetJavaLangClass()));
-    class_linker_->EnsureInitialized(klass, true, true);
+    class_linker_->EnsureInitialized(soa.Self(), klass, true, true);
   }
 
   // InitNativeMethods needs to be after started_ so that the classes
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 9df1453..cfb1abc 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -197,8 +197,7 @@
   // Detaches the current native thread from the runtime.
   void DetachCurrentThread() LOCKS_EXCLUDED(Locks::mutator_lock_);
 
-  void DumpForSigQuit(std::ostream& os)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void DumpForSigQuit(std::ostream& os);
   void DumpLockHolders(std::ostream& os);
 
   ~Runtime();
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index 11e06fe..336340e 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -118,17 +118,6 @@
 
 void SignalCatcher::HandleSigQuit() {
   Runtime* runtime = Runtime::Current();
-  ThreadList* thread_list = runtime->GetThreadList();
-
-  // Grab exclusively the mutator lock, set state to Runnable without checking for a pending
-  // suspend request as we're going to suspend soon anyway. We set the state to Runnable to avoid
-  // giving away the mutator lock.
-  thread_list->SuspendAll();
-  Thread* self = Thread::Current();
-  Locks::mutator_lock_->AssertExclusiveHeld(self);
-  const char* old_cause = self->StartAssertNoThreadSuspension("Handling SIGQUIT");
-  ThreadState old_state = self->SetStateUnsafe(kRunnable);
-
   std::ostringstream os;
   os << "\n"
       << "----- pid " << getpid() << " at " << GetIsoDate() << " -----\n";
@@ -149,14 +138,6 @@
     }
   }
   os << "----- end " << getpid() << " -----\n";
-  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
-  self->EndAssertNoThreadSuspension(old_cause);
-  thread_list->ResumeAll();
-  // Run the checkpoints after resuming the threads to prevent deadlocks if the checkpoint function
-  // acquires the mutator lock.
-  if (self->ReadFlag(kCheckpointRequest)) {
-    self->RunCheckpointFunction();
-  }
   Output(os.str());
 }
 
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index bd399e7..6698634 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -23,6 +23,7 @@
 
 #include "base/casts.h"
 #include "base/mutex-inl.h"
+#include "entrypoints/entrypoint_utils-inl.h"
 #include "gc/heap.h"
 #include "jni_env_ext.h"
 
@@ -45,6 +46,26 @@
   }
 }
 
+inline void Thread::AllowThreadSuspension() {
+  DCHECK_EQ(Thread::Current(), this);
+  if (UNLIKELY(TestAllFlags())) {
+    CheckSuspend();
+  }
+}
+
+inline void Thread::CheckSuspend() {
+  DCHECK_EQ(Thread::Current(), this);
+  for (;;) {
+    if (ReadFlag(kCheckpointRequest)) {
+      RunCheckpointFunction();
+    } else if (ReadFlag(kSuspendRequest)) {
+      FullSuspendCheck();
+    } else {
+      break;
+    }
+  }
+}
+
 inline ThreadState Thread::SetState(ThreadState new_state) {
   // Cannot use this code to change into Runnable as changing to Runnable should fail if
   // old_state_and_flags.suspend_request is true.
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 6e3e9c1..d4ac02b 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -592,7 +592,7 @@
     }
   }
   std::ostringstream ss;
-  Runtime::Current()->GetThreadList()->DumpLocked(ss);
+  Runtime::Current()->GetThreadList()->Dump(ss);
   LOG(FATAL) << ss.str();
 }
 
@@ -1602,7 +1602,8 @@
   ThrowNewException(throw_location, exception_class_descriptor, msg.c_str());
 }
 
-void Thread::ThrowNewException(const ThrowLocation& throw_location, const char* exception_class_descriptor,
+void Thread::ThrowNewException(const ThrowLocation& throw_location,
+                               const char* exception_class_descriptor,
                                const char* msg) {
   // Callers should either clear or call ThrowNewWrappedException.
   AssertNoPendingExceptionForNewException(msg);
@@ -1638,7 +1639,8 @@
     return;
   }
 
-  if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(exception_class, true, true))) {
+  if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(soa.Self(), exception_class, true,
+                                                             true))) {
     DCHECK(IsExceptionPending());
     return;
   }
diff --git a/runtime/thread.h b/runtime/thread.h
index aca4069..d96b50b 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -146,6 +146,12 @@
 
   static Thread* Current();
 
+  // On a runnable thread, check for pending thread suspension request and handle if pending.
+  void AllowThreadSuspension() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Process pending thread suspension request and handle if pending.
+  void CheckSuspend() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
                                    mirror::Object* thread_peer)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
@@ -1029,7 +1035,11 @@
       deoptimization_shadow_frame(nullptr), shadow_frame_under_construction(nullptr), name(nullptr),
       pthread_self(0), last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr),
       thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_objects(0),
-      thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr) {
+      thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr),
+      nested_signal_state(nullptr) {
+        for (size_t i = 0; i < kLockLevelCount; ++i) {
+          held_mutexes[i] = nullptr;
+        }
     }
 
     // The biased card table, see CardTable for details.
@@ -1162,7 +1172,6 @@
   friend class Runtime;  // For CreatePeer.
   friend class QuickExceptionHandler;  // For dumping the stack.
   friend class ScopedThreadStateChange;
-  friend class SignalCatcher;  // For SetStateUnsafe.
   friend class StubTest;  // For accessing entrypoints.
   friend class ThreadList;  // For ~Thread and Destroy.
 
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index afb98ca..3cc2a28 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -88,10 +88,7 @@
 }
 
 void ThreadList::DumpForSigQuit(std::ostream& os) {
-  {
-    MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
-    DumpLocked(os);
-  }
+  Dump(os);
   DumpUnattachedThreads(os);
 }
 
@@ -133,12 +130,50 @@
   closedir(d);
 }
 
-void ThreadList::DumpLocked(std::ostream& os) {
-  os << "DALVIK THREADS (" << list_.size() << "):\n";
-  for (const auto& thread : list_) {
-    thread->Dump(os);
-    os << "\n";
+// A closure used by Thread::Dump.
+class DumpCheckpoint FINAL : public Closure {
+ public:
+  explicit DumpCheckpoint(std::ostream* os) : os_(os), barrier_(0) {}
+
+  void Run(Thread* thread) OVERRIDE {
+    // Note thread and self may not be equal if thread was already suspended at the point of the
+    // request.
+    Thread* self = Thread::Current();
+    std::ostringstream local_os;
+    {
+      ScopedObjectAccess soa(self);
+      thread->Dump(local_os);
+    }
+    local_os << "\n";
+    {
+      // Use the logging lock to ensure serialization when writing to the common ostream.
+      MutexLock mu(self, *Locks::logging_lock_);
+      *os_ << local_os.str();
+    }
+    barrier_.Pass(self);
   }
+
+  void WaitForThreadsToRunThroughCheckpoint(size_t threads_running_checkpoint) {
+    Thread* self = Thread::Current();
+    ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
+    barrier_.Increment(self, threads_running_checkpoint);
+  }
+
+ private:
+  // The common stream that will accumulate all the dumps.
+  std::ostream* const os_;
+  // The barrier to be passed through and for the requestor to wait upon.
+  Barrier barrier_;
+};
+
+void ThreadList::Dump(std::ostream& os) {
+  {
+    MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+    os << "DALVIK THREADS (" << list_.size() << "):\n";
+  }
+  DumpCheckpoint checkpoint(&os);
+  size_t threads_running_checkpoint = RunCheckpoint(&checkpoint);
+  checkpoint.WaitForThreadsToRunThroughCheckpoint(threads_running_checkpoint);
 }
 
 void ThreadList::AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2) {
@@ -155,12 +190,12 @@
 
 #if HAVE_TIMED_RWLOCK
 // Attempt to rectify locks so that we dump thread list with required locks before exiting.
-static void UnsafeLogFatalForThreadSuspendAllTimeout() NO_THREAD_SAFETY_ANALYSIS __attribute__((noreturn));
+static void UnsafeLogFatalForThreadSuspendAllTimeout() __attribute__((noreturn));
 static void UnsafeLogFatalForThreadSuspendAllTimeout() {
   Runtime* runtime = Runtime::Current();
   std::ostringstream ss;
   ss << "Thread suspend timeout\n";
-  runtime->GetThreadList()->DumpLocked(ss);
+  runtime->GetThreadList()->Dump(ss);
   LOG(FATAL) << ss.str();
   exit(0);
 }
@@ -266,12 +301,10 @@
 // threads.  Returns the number of successful requests.
 size_t ThreadList::RunCheckpointOnRunnableThreads(Closure* checkpoint_function) {
   Thread* self = Thread::Current();
-  if (kIsDebugBuild) {
-    Locks::mutator_lock_->AssertNotExclusiveHeld(self);
-    Locks::thread_list_lock_->AssertNotHeld(self);
-    Locks::thread_suspend_count_lock_->AssertNotHeld(self);
-    CHECK_NE(self->GetState(), kRunnable);
-  }
+  Locks::mutator_lock_->AssertNotExclusiveHeld(self);
+  Locks::thread_list_lock_->AssertNotHeld(self);
+  Locks::thread_suspend_count_lock_->AssertNotHeld(self);
+  CHECK_NE(self->GetState(), kRunnable);
 
   size_t count = 0;
   {
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index bb4f775..9f47f9f 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -39,11 +39,11 @@
   ~ThreadList();
 
   void DumpForSigQuit(std::ostream& os)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void DumpLocked(std::ostream& os)  // For thread suspend timeout dumps.
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      LOCKS_EXCLUDED(Locks::thread_list_lock_);
+  // For thread suspend timeout dumps.
+  void Dump(std::ostream& os)
+      LOCKS_EXCLUDED(Locks::thread_list_lock_,
+                     Locks::thread_suspend_count_lock_);
   pid_t GetLockOwner();  // For SignalCatcher.
 
   // Thread suspension support.
@@ -93,7 +93,8 @@
                      Locks::thread_suspend_count_lock_);
 
   size_t RunCheckpointOnRunnableThreads(Closure* checkpoint_function)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_);
+  LOCKS_EXCLUDED(Locks::thread_list_lock_,
+                 Locks::thread_suspend_count_lock_);
 
   // Suspends all threads
   void SuspendAllForDebugger()
diff --git a/runtime/transaction_test.cc b/runtime/transaction_test.cc
index 691aec4..8c37489 100644
--- a/runtime/transaction_test.cc
+++ b/runtime/transaction_test.cc
@@ -110,7 +110,7 @@
   Handle<mirror::Class> h_klass(
       hs.NewHandle(class_linker_->FindClass(soa.Self(), "LStaticFieldsTest;", class_loader)));
   ASSERT_TRUE(h_klass.Get() != nullptr);
-  class_linker_->EnsureInitialized(h_klass, true, true);
+  class_linker_->EnsureInitialized(soa.Self(), h_klass, true, true);
   ASSERT_TRUE(h_klass->IsInitialized());
 
   // Lookup fields.
@@ -205,7 +205,7 @@
   Handle<mirror::Class> h_klass(
       hs.NewHandle(class_linker_->FindClass(soa.Self(), "LInstanceFieldsTest;", class_loader)));
   ASSERT_TRUE(h_klass.Get() != nullptr);
-  class_linker_->EnsureInitialized(h_klass, true, true);
+  class_linker_->EnsureInitialized(soa.Self(), h_klass, true, true);
   ASSERT_TRUE(h_klass->IsInitialized());
 
   // Allocate an InstanceFieldTest object.
@@ -305,7 +305,7 @@
   Handle<mirror::Class> h_klass(
       hs.NewHandle(class_linker_->FindClass(soa.Self(), "LStaticArrayFieldsTest;", class_loader)));
   ASSERT_TRUE(h_klass.Get() != nullptr);
-  class_linker_->EnsureInitialized(h_klass, true, true);
+  class_linker_->EnsureInitialized(soa.Self(), h_klass, true, true);
   ASSERT_TRUE(h_klass->IsInitialized());
 
   // Lookup fields.
@@ -419,12 +419,12 @@
   Handle<mirror::Class> h_klass(
       hs.NewHandle(class_linker_->FindClass(soa.Self(), "LTransaction$EmptyStatic;", class_loader)));
   ASSERT_TRUE(h_klass.Get() != nullptr);
-  class_linker_->VerifyClass(h_klass);
+  class_linker_->VerifyClass(soa.Self(), h_klass);
   ASSERT_TRUE(h_klass->IsVerified());
 
   Transaction transaction;
   Runtime::Current()->EnterTransactionMode(&transaction);
-  class_linker_->EnsureInitialized(h_klass, true, true);
+  class_linker_->EnsureInitialized(soa.Self(), h_klass, true, true);
   Runtime::Current()->ExitTransactionMode();
   ASSERT_FALSE(soa.Self()->IsExceptionPending());
 }
@@ -440,12 +440,12 @@
       hs.NewHandle(class_linker_->FindClass(soa.Self(), "LTransaction$StaticFieldClass;",
                                             class_loader)));
   ASSERT_TRUE(h_klass.Get() != nullptr);
-  class_linker_->VerifyClass(h_klass);
+  class_linker_->VerifyClass(soa.Self(), h_klass);
   ASSERT_TRUE(h_klass->IsVerified());
 
   Transaction transaction;
   Runtime::Current()->EnterTransactionMode(&transaction);
-  class_linker_->EnsureInitialized(h_klass, true, true);
+  class_linker_->EnsureInitialized(soa.Self(), h_klass, true, true);
   Runtime::Current()->ExitTransactionMode();
   ASSERT_FALSE(soa.Self()->IsExceptionPending());
 }
@@ -464,29 +464,29 @@
       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(),
                                                   "Ljava/lang/ExceptionInInitializerError;")));
   ASSERT_TRUE(h_klass.Get() != nullptr);
-  class_linker_->VerifyClass(h_klass);
+  class_linker_->VerifyClass(soa.Self(), h_klass);
   ASSERT_TRUE(h_klass->IsVerified());
   h_klass.Assign(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/InternalError;"));
   ASSERT_TRUE(h_klass.Get() != nullptr);
-  class_linker_->VerifyClass(h_klass);
+  class_linker_->VerifyClass(soa.Self(), h_klass);
   ASSERT_TRUE(h_klass->IsVerified());
 
   // Load and verify Transaction$NativeSupport used in class initialization.
   h_klass.Assign(class_linker_->FindClass(soa.Self(), "LTransaction$NativeSupport;",
                                              class_loader));
   ASSERT_TRUE(h_klass.Get() != nullptr);
-  class_linker_->VerifyClass(h_klass);
+  class_linker_->VerifyClass(soa.Self(), h_klass);
   ASSERT_TRUE(h_klass->IsVerified());
 
   h_klass.Assign(class_linker_->FindClass(soa.Self(), "LTransaction$BlacklistedClass;",
                                              class_loader));
   ASSERT_TRUE(h_klass.Get() != nullptr);
-  class_linker_->VerifyClass(h_klass);
+  class_linker_->VerifyClass(soa.Self(), h_klass);
   ASSERT_TRUE(h_klass->IsVerified());
 
   Transaction transaction;
   Runtime::Current()->EnterTransactionMode(&transaction);
-  class_linker_->EnsureInitialized(h_klass, true, true);
+  class_linker_->EnsureInitialized(soa.Self(), h_klass, true, true);
   Runtime::Current()->ExitTransactionMode();
   ASSERT_TRUE(soa.Self()->IsExceptionPending());
 }
diff --git a/runtime/utils.cc b/runtime/utils.cc
index d15a09a..6135e5d 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -823,7 +823,8 @@
 }
 
 enum ClassNameType { kName, kDescriptor };
-static bool IsValidClassName(const char* s, ClassNameType type, char separator) {
+template<ClassNameType kType, char kSeparator>
+static bool IsValidClassName(const char* s) {
   int arrayCount = 0;
   while (*s == '[') {
     arrayCount++;
@@ -835,7 +836,8 @@
     return false;
   }
 
-  if (arrayCount != 0) {
+  ClassNameType type = kType;
+  if (type != kDescriptor && arrayCount != 0) {
     /*
      * If we're looking at an array of some sort, then it doesn't
      * matter if what is being asked for is a class name; the
@@ -903,7 +905,7 @@
       return (type == kDescriptor) && !sepOrFirst && (s[1] == '\0');
     case '/':
     case '.':
-      if (c != separator) {
+      if (c != kSeparator) {
         // The wrong separator character.
         return false;
       }
@@ -925,15 +927,15 @@
 }
 
 bool IsValidBinaryClassName(const char* s) {
-  return IsValidClassName(s, kName, '.');
+  return IsValidClassName<kName, '.'>(s);
 }
 
 bool IsValidJniClassName(const char* s) {
-  return IsValidClassName(s, kName, '/');
+  return IsValidClassName<kName, '/'>(s);
 }
 
 bool IsValidDescriptor(const char* s) {
-  return IsValidClassName(s, kDescriptor, '/');
+  return IsValidClassName<kDescriptor, '/'>(s);
 }
 
 void Split(const std::string& s, char separator, std::vector<std::string>& result) {
diff --git a/runtime/verifier/instruction_flags.cc b/runtime/verifier/instruction_flags.cc
index f76c226..ca3c687 100644
--- a/runtime/verifier/instruction_flags.cc
+++ b/runtime/verifier/instruction_flags.cc
@@ -22,13 +22,14 @@
 namespace verifier {
 
 std::string InstructionFlags::ToString() const {
-  char encoding[7];
+  char encoding[8];
   if (!IsOpcode()) {
-    strncpy(encoding, "XXXXXX", sizeof(encoding));
+    strncpy(encoding, "XXXXXXX", sizeof(encoding));
   } else {
-    strncpy(encoding, "------", sizeof(encoding));
+    strncpy(encoding, "-------", sizeof(encoding));
     if (IsVisited())               encoding[kVisited] = 'V';
     if (IsChanged())               encoding[kChanged] = 'C';
+    if (IsOpcode())                encoding[kOpcode] = 'O';
     if (IsInTry())                 encoding[kInTry] = 'T';
     if (IsBranchTarget())          encoding[kBranchTarget] = 'B';
     if (IsCompileTimeInfoPoint())  encoding[kCompileTimeInfoPoint] = 'G';
diff --git a/runtime/verifier/instruction_flags.h b/runtime/verifier/instruction_flags.h
index f8abca0..36a6e55 100644
--- a/runtime/verifier/instruction_flags.h
+++ b/runtime/verifier/instruction_flags.h
@@ -20,24 +20,23 @@
 #include <stdint.h>
 #include <string>
 
-#include "base/logging.h"
+#include "base/macros.h"
 
 namespace art {
 namespace verifier {
 
-class InstructionFlags {
+class InstructionFlags FINAL {
  public:
-  InstructionFlags() : length_(0), flags_(0) {}
+  InstructionFlags() : flags_(0) {}
 
-  void SetLengthInCodeUnits(size_t length) {
-    DCHECK_LT(length, 65536u);
-    length_ = length;
+  void SetIsOpcode() {
+    flags_ |= 1 << kOpcode;
   }
-  size_t GetLengthInCodeUnits() {
-    return length_;
+  void ClearIsOpcode() {
+    flags_ &= ~(1 << kOpcode);
   }
   bool IsOpcode() const {
-    return length_ != 0;
+    return (flags_ & (1 << kOpcode)) != 0;
   }
 
   void SetInTry() {
@@ -117,21 +116,22 @@
     // Register type information flowing into the instruction changed and so the instruction must be
     // reprocessed.
     kChanged = 1,
+    // The item at this location is an opcode.
+    kOpcode = 2,
     // Instruction is contained within a try region.
-    kInTry = 2,
+    kInTry = 3,
     // Instruction is the target of a branch (ie the start of a basic block).
-    kBranchTarget = 3,
+    kBranchTarget = 4,
     // Location of interest to the compiler for GC maps and verifier based method sharpening.
-    kCompileTimeInfoPoint = 4,
+    kCompileTimeInfoPoint = 5,
     // A return instruction.
-    kReturn = 5,
+    kReturn = 6,
   };
-
-  // Size of instruction in code units.
-  uint16_t length_;
   uint8_t flags_;
 };
 
+COMPILE_ASSERT(sizeof(InstructionFlags) == sizeof(uint8_t), err);
+
 }  // namespace verifier
 }  // namespace art
 
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 9cde8da..f90da15 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -38,6 +38,7 @@
 #include "mirror/dex_cache-inl.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
+#include "reg_type-inl.h"
 #include "register_line-inl.h"
 #include "runtime.h"
 #include "scoped_thread_state_change.h"
@@ -87,7 +88,8 @@
   }
 }
 
-MethodVerifier::FailureKind MethodVerifier::VerifyClass(mirror::Class* klass,
+MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self,
+                                                        mirror::Class* klass,
                                                         bool allow_soft_failures,
                                                         std::string* error) {
   if (klass->IsVerified()) {
@@ -99,13 +101,13 @@
   const DexFile::ClassDef* class_def = klass->GetClassDef();
   mirror::Class* super = klass->GetSuperClass();
   std::string temp;
-  if (super == NULL && strcmp("Ljava/lang/Object;", klass->GetDescriptor(&temp)) != 0) {
+  if (super == nullptr && strcmp("Ljava/lang/Object;", klass->GetDescriptor(&temp)) != 0) {
     early_failure = true;
     failure_message = " that has no super class";
-  } else if (super != NULL && super->IsFinal()) {
+  } else if (super != nullptr && super->IsFinal()) {
     early_failure = true;
     failure_message = " that attempts to sub-class final class " + PrettyDescriptor(super);
-  } else if (class_def == NULL) {
+  } else if (class_def == nullptr) {
     early_failure = true;
     failure_message = " that isn't present in dex file " + dex_file.GetLocation();
   }
@@ -117,13 +119,14 @@
     }
     return kHardFailure;
   }
-  StackHandleScope<2> hs(Thread::Current());
+  StackHandleScope<2> hs(self);
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache()));
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader()));
-  return VerifyClass(&dex_file, dex_cache, class_loader, class_def, allow_soft_failures, error);
+  return VerifyClass(self, &dex_file, dex_cache, class_loader, class_def, allow_soft_failures, error);
 }
 
-MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file,
+MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self,
+                                                        const DexFile* dex_file,
                                                         ConstHandle<mirror::DexCache> dex_cache,
                                                         ConstHandle<mirror::ClassLoader> class_loader,
                                                         const DexFile::ClassDef* class_def,
@@ -131,7 +134,7 @@
                                                         std::string* error) {
   DCHECK(class_def != nullptr);
   const byte* class_data = dex_file->GetClassData(*class_def);
-  if (class_data == NULL) {
+  if (class_data == nullptr) {
     // empty class, probably a marker interface
     return kNoFailure;
   }
@@ -139,12 +142,12 @@
   while (it.HasNextStaticField() || it.HasNextInstanceField()) {
     it.Next();
   }
-  Thread* self = Thread::Current();
   size_t error_count = 0;
   bool hard_fail = false;
   ClassLinker* linker = Runtime::Current()->GetClassLinker();
   int64_t previous_direct_method_idx = -1;
   while (it.HasNextDirectMethod()) {
+    self->AllowThreadSuspension();
     uint32_t method_idx = it.GetMemberIndex();
     if (method_idx == previous_direct_method_idx) {
       // smali can create dex files with two encoded_methods sharing the same method_idx
@@ -157,14 +160,15 @@
     mirror::ArtMethod* method =
         linker->ResolveMethod(*dex_file, method_idx, dex_cache, class_loader,
                               NullHandle<mirror::ArtMethod>(), type);
-    if (method == NULL) {
+    if (method == nullptr) {
       DCHECK(self->IsExceptionPending());
       // We couldn't resolve the method, but continue regardless.
       self->ClearException();
     }
     StackHandleScope<1> hs(self);
     Handle<mirror::ArtMethod> h_method(hs.NewHandle(method));
-    MethodVerifier::FailureKind result = VerifyMethod(method_idx,
+    MethodVerifier::FailureKind result = VerifyMethod(self,
+                                                      method_idx,
                                                       dex_file,
                                                       dex_cache,
                                                       class_loader,
@@ -191,6 +195,7 @@
   }
   int64_t previous_virtual_method_idx = -1;
   while (it.HasNextVirtualMethod()) {
+    self->AllowThreadSuspension();
     uint32_t method_idx = it.GetMemberIndex();
     if (method_idx == previous_virtual_method_idx) {
       // smali can create dex files with two encoded_methods sharing the same method_idx
@@ -203,14 +208,15 @@
     mirror::ArtMethod* method =
         linker->ResolveMethod(*dex_file, method_idx, dex_cache, class_loader,
                               NullHandle<mirror::ArtMethod>(), type);
-    if (method == NULL) {
+    if (method == nullptr) {
       DCHECK(self->IsExceptionPending());
       // We couldn't resolve the method, but continue regardless.
       self->ClearException();
     }
     StackHandleScope<1> hs(self);
     Handle<mirror::ArtMethod> h_method(hs.NewHandle(method));
-    MethodVerifier::FailureKind result = VerifyMethod(method_idx,
+    MethodVerifier::FailureKind result = VerifyMethod(self,
+                                                      method_idx,
                                                       dex_file,
                                                       dex_cache,
                                                       class_loader,
@@ -242,7 +248,7 @@
   }
 }
 
-MethodVerifier::FailureKind MethodVerifier::VerifyMethod(uint32_t method_idx,
+MethodVerifier::FailureKind MethodVerifier::VerifyMethod(Thread* self, uint32_t method_idx,
                                                          const DexFile* dex_file,
                                                          ConstHandle<mirror::DexCache> dex_cache,
                                                          ConstHandle<mirror::ClassLoader> class_loader,
@@ -255,7 +261,7 @@
   MethodVerifier::FailureKind result = kNoFailure;
   uint64_t start_ns = kTimeVerifyMethod ? NanoTime() : 0;
 
-  MethodVerifier verifier(dex_file, dex_cache, class_loader, class_def, code_item,
+  MethodVerifier verifier(self, dex_file, dex_cache, class_loader, class_def, code_item,
                           method_idx, method, method_access_flags, true, allow_soft_failures,
                           need_precise_constants);
   if (verifier.Verify()) {
@@ -291,7 +297,7 @@
   return result;
 }
 
-void MethodVerifier::VerifyMethodAndDump(std::ostream& os, uint32_t dex_method_idx,
+void MethodVerifier::VerifyMethodAndDump(Thread* self, std::ostream& os, uint32_t dex_method_idx,
                                          const DexFile* dex_file,
                                          ConstHandle<mirror::DexCache> dex_cache,
                                          ConstHandle<mirror::ClassLoader> class_loader,
@@ -299,7 +305,7 @@
                                          const DexFile::CodeItem* code_item,
                                          ConstHandle<mirror::ArtMethod> method,
                                          uint32_t method_access_flags) {
-  MethodVerifier verifier(dex_file, dex_cache, class_loader, class_def, code_item,
+  MethodVerifier verifier(self, dex_file, dex_cache, class_loader, class_def, code_item,
                           dex_method_idx, method, method_access_flags, true, true, true);
   verifier.Verify();
   verifier.DumpFailures(os);
@@ -307,14 +313,16 @@
   verifier.Dump(os);
 }
 
-MethodVerifier::MethodVerifier(const DexFile* dex_file, ConstHandle<mirror::DexCache> dex_cache,
+MethodVerifier::MethodVerifier(Thread* self,
+                               const DexFile* dex_file, ConstHandle<mirror::DexCache> dex_cache,
                                ConstHandle<mirror::ClassLoader> class_loader,
                                const DexFile::ClassDef* class_def,
                                const DexFile::CodeItem* code_item, uint32_t dex_method_idx,
                                ConstHandle<mirror::ArtMethod> method, uint32_t method_access_flags,
                                bool can_load_classes, bool allow_soft_failures,
                                bool need_precise_constants)
-    : reg_types_(can_load_classes),
+    : self_(self),
+      reg_types_(can_load_classes),
       work_insn_idx_(-1),
       dex_method_idx_(dex_method_idx),
       mirror_method_(method),
@@ -325,7 +333,7 @@
       class_loader_(class_loader),
       class_def_(class_def),
       code_item_(code_item),
-      declaring_class_(NULL),
+      declaring_class_(nullptr),
       interesting_dex_pc_(-1),
       monitor_enter_dex_pcs_(nullptr),
       have_pending_hard_failure_(false),
@@ -348,11 +356,12 @@
 
 void MethodVerifier::FindLocksAtDexPc(mirror::ArtMethod* m, uint32_t dex_pc,
                                       std::vector<uint32_t>* monitor_enter_dex_pcs) {
-  StackHandleScope<3> hs(Thread::Current());
+  Thread* self = Thread::Current();
+  StackHandleScope<3> hs(self);
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(m->GetDexCache()));
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(m->GetClassLoader()));
   Handle<mirror::ArtMethod> method(hs.NewHandle(m));
-  MethodVerifier verifier(m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
+  MethodVerifier verifier(self, m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
                           m->GetCodeItem(), m->GetDexMethodIndex(), method, m->GetAccessFlags(),
                           false, true, false);
   verifier.interesting_dex_pc_ = dex_pc;
@@ -361,8 +370,8 @@
 }
 
 void MethodVerifier::FindLocksAtDexPc() {
-  CHECK(monitor_enter_dex_pcs_ != NULL);
-  CHECK(code_item_ != NULL);  // This only makes sense for methods with code.
+  CHECK(monitor_enter_dex_pcs_ != nullptr);
+  CHECK(code_item_ != nullptr);  // This only makes sense for methods with code.
 
   // Strictly speaking, we ought to be able to get away with doing a subset of the full method
   // verification. In practice, the phase we want relies on data structures set up by all the
@@ -373,18 +382,19 @@
 
 mirror::ArtField* MethodVerifier::FindAccessedFieldAtDexPc(mirror::ArtMethod* m,
                                                            uint32_t dex_pc) {
-  StackHandleScope<3> hs(Thread::Current());
+  Thread* self = Thread::Current();
+  StackHandleScope<3> hs(self);
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(m->GetDexCache()));
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(m->GetClassLoader()));
   Handle<mirror::ArtMethod> method(hs.NewHandle(m));
-  MethodVerifier verifier(m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
+  MethodVerifier verifier(self, m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
                           m->GetCodeItem(), m->GetDexMethodIndex(), method, m->GetAccessFlags(),
                           true, true, false);
   return verifier.FindAccessedFieldAtDexPc(dex_pc);
 }
 
 mirror::ArtField* MethodVerifier::FindAccessedFieldAtDexPc(uint32_t dex_pc) {
-  CHECK(code_item_ != NULL);  // This only makes sense for methods with code.
+  CHECK(code_item_ != nullptr);  // This only makes sense for methods with code.
 
   // Strictly speaking, we ought to be able to get away with doing a subset of the full method
   // verification. In practice, the phase we want relies on data structures set up by all the
@@ -395,7 +405,7 @@
     return nullptr;
   }
   RegisterLine* register_line = reg_table_.GetLine(dex_pc);
-  if (register_line == NULL) {
+  if (register_line == nullptr) {
     return nullptr;
   }
   const Instruction* inst = Instruction::At(code_item_->insns_ + dex_pc);
@@ -404,18 +414,19 @@
 
 mirror::ArtMethod* MethodVerifier::FindInvokedMethodAtDexPc(mirror::ArtMethod* m,
                                                             uint32_t dex_pc) {
-  StackHandleScope<3> hs(Thread::Current());
+  Thread* self = Thread::Current();
+  StackHandleScope<3> hs(self);
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(m->GetDexCache()));
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(m->GetClassLoader()));
   Handle<mirror::ArtMethod> method(hs.NewHandle(m));
-  MethodVerifier verifier(m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
+  MethodVerifier verifier(self, m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
                           m->GetCodeItem(), m->GetDexMethodIndex(), method, m->GetAccessFlags(),
                           true, true, false);
   return verifier.FindInvokedMethodAtDexPc(dex_pc);
 }
 
 mirror::ArtMethod* MethodVerifier::FindInvokedMethodAtDexPc(uint32_t dex_pc) {
-  CHECK(code_item_ != NULL);  // This only makes sense for methods with code.
+  CHECK(code_item_ != nullptr);  // This only makes sense for methods with code.
 
   // Strictly speaking, we ought to be able to get away with doing a subset of the full method
   // verification. In practice, the phase we want relies on data structures set up by all the
@@ -423,11 +434,11 @@
   // got what we wanted.
   bool success = Verify();
   if (!success) {
-    return NULL;
+    return nullptr;
   }
   RegisterLine* register_line = reg_table_.GetLine(dex_pc);
-  if (register_line == NULL) {
-    return NULL;
+  if (register_line == nullptr) {
+    return nullptr;
   }
   const Instruction* inst = Instruction::At(code_item_->insns_ + dex_pc);
   const bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
@@ -436,7 +447,7 @@
 
 bool MethodVerifier::Verify() {
   // If there aren't any instructions, make sure that's expected, then exit successfully.
-  if (code_item_ == NULL) {
+  if (code_item_ == nullptr) {
     if ((method_access_flags_ & (kAccNative | kAccAbstract)) == 0) {
       Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "zero-length code in concrete non-native method";
       return false;
@@ -569,9 +580,9 @@
         break;
     }
     size_t inst_size = inst->SizeInCodeUnits();
-    insn_flags_[dex_pc].SetLengthInCodeUnits(inst_size);
+    insn_flags_[dex_pc].SetIsOpcode();
     dex_pc += inst_size;
-    inst = inst->Next();
+    inst = inst->RelativeAt(inst_size);
   }
 
   if (dex_pc != insns_size) {
@@ -607,9 +618,13 @@
           << "'try' block starts inside an instruction (" << start << ")";
       return false;
     }
-    for (uint32_t dex_pc = start; dex_pc < end;
-        dex_pc += insn_flags_[dex_pc].GetLengthInCodeUnits()) {
+    uint32_t dex_pc = start;
+    const Instruction* inst = Instruction::At(code_item_->insns_ + dex_pc);
+    while (dex_pc < end) {
       insn_flags_[dex_pc].SetInTry();
+      size_t insn_size = inst->SizeInCodeUnits();
+      dex_pc += insn_size;
+      inst = inst->RelativeAt(insn_size);
     }
   }
   // Iterate over each of the handlers to verify target addresses.
@@ -632,9 +647,9 @@
         mirror::Class* exception_type = linker->ResolveType(*dex_file_,
                                                             iterator.GetHandlerTypeIndex(),
                                                             dex_cache_, class_loader_);
-        if (exception_type == NULL) {
-          DCHECK(Thread::Current()->IsExceptionPending());
-          Thread::Current()->ClearException();
+        if (exception_type == nullptr) {
+          DCHECK(self_->IsExceptionPending());
+          self_->ClearException();
         }
       }
     }
@@ -766,7 +781,7 @@
   return result;
 }
 
-bool MethodVerifier::CheckRegisterIndex(uint32_t idx) {
+inline bool MethodVerifier::CheckRegisterIndex(uint32_t idx) {
   if (idx >= code_item_->registers_size_) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register index out of range (" << idx << " >= "
                                       << code_item_->registers_size_ << ")";
@@ -775,7 +790,7 @@
   return true;
 }
 
-bool MethodVerifier::CheckWideRegisterIndex(uint32_t idx) {
+inline bool MethodVerifier::CheckWideRegisterIndex(uint32_t idx) {
   if (idx + 1 >= code_item_->registers_size_) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register index out of range (" << idx
                                       << "+1 >= " << code_item_->registers_size_ << ")";
@@ -784,7 +799,7 @@
   return true;
 }
 
-bool MethodVerifier::CheckFieldIndex(uint32_t idx) {
+inline bool MethodVerifier::CheckFieldIndex(uint32_t idx) {
   if (idx >= dex_file_->GetHeader().field_ids_size_) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad field index " << idx << " (max "
                                       << dex_file_->GetHeader().field_ids_size_ << ")";
@@ -793,7 +808,7 @@
   return true;
 }
 
-bool MethodVerifier::CheckMethodIndex(uint32_t idx) {
+inline bool MethodVerifier::CheckMethodIndex(uint32_t idx) {
   if (idx >= dex_file_->GetHeader().method_ids_size_) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad method index " << idx << " (max "
                                       << dex_file_->GetHeader().method_ids_size_ << ")";
@@ -802,7 +817,7 @@
   return true;
 }
 
-bool MethodVerifier::CheckNewInstance(uint32_t idx) {
+inline bool MethodVerifier::CheckNewInstance(uint32_t idx) {
   if (idx >= dex_file_->GetHeader().type_ids_size_) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad type index " << idx << " (max "
                                       << dex_file_->GetHeader().type_ids_size_ << ")";
@@ -817,7 +832,7 @@
   return true;
 }
 
-bool MethodVerifier::CheckStringIndex(uint32_t idx) {
+inline bool MethodVerifier::CheckStringIndex(uint32_t idx) {
   if (idx >= dex_file_->GetHeader().string_ids_size_) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad string index " << idx << " (max "
                                       << dex_file_->GetHeader().string_ids_size_ << ")";
@@ -826,7 +841,7 @@
   return true;
 }
 
-bool MethodVerifier::CheckTypeIndex(uint32_t idx) {
+inline bool MethodVerifier::CheckTypeIndex(uint32_t idx) {
   if (idx >= dex_file_->GetHeader().type_ids_size_) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad type index " << idx << " (max "
                                       << dex_file_->GetHeader().type_ids_size_ << ")";
@@ -1129,7 +1144,7 @@
 }
 
 void MethodVerifier::Dump(std::ostream& os) {
-  if (code_item_ == NULL) {
+  if (code_item_ == nullptr) {
     os << "Native method\n";
     return;
   }
@@ -1144,10 +1159,10 @@
   std::ostream indent_os(&indent_filter);
   const Instruction* inst = Instruction::At(code_item_->insns_);
   for (size_t dex_pc = 0; dex_pc < code_item_->insns_size_in_code_units_;
-      dex_pc += insn_flags_[dex_pc].GetLengthInCodeUnits()) {
+      dex_pc += inst->SizeInCodeUnits()) {
     RegisterLine* reg_line = reg_table_.GetLine(dex_pc);
-    if (reg_line != NULL) {
-      indent_os << reg_line->Dump() << "\n";
+    if (reg_line != nullptr) {
+      indent_os << reg_line->Dump(this) << "\n";
     }
     indent_os << StringPrintf("0x%04zx", dex_pc) << ": " << insn_flags_[dex_pc].ToString() << " ";
     const bool kDumpHexOfInstruction = false;
@@ -1189,10 +1204,10 @@
     // called.
     const RegType& declaring_class = GetDeclaringClass();
     if (IsConstructor() && !declaring_class.IsJavaLangObject()) {
-      reg_line->SetRegisterType(arg_start + cur_arg,
+      reg_line->SetRegisterType(this, arg_start + cur_arg,
                                 reg_types_.UninitializedThisArgument(declaring_class));
     } else {
-      reg_line->SetRegisterType(arg_start + cur_arg, declaring_class);
+      reg_line->SetRegisterType(this, arg_start + cur_arg, declaring_class);
     }
     cur_arg++;
   }
@@ -1203,7 +1218,7 @@
 
   for (; iterator.HasNext(); iterator.Next()) {
     const char* descriptor = iterator.GetDescriptor();
-    if (descriptor == NULL) {
+    if (descriptor == nullptr) {
       LOG(FATAL) << "Null descriptor";
     }
     if (cur_arg >= expected_args) {
@@ -1224,26 +1239,26 @@
             DCHECK(HasFailures());
             return false;
           }
-          reg_line->SetRegisterType(arg_start + cur_arg, reg_type);
+          reg_line->SetRegisterType(this, arg_start + cur_arg, reg_type);
         }
         break;
       case 'Z':
-        reg_line->SetRegisterType(arg_start + cur_arg, reg_types_.Boolean());
+        reg_line->SetRegisterType(this, arg_start + cur_arg, reg_types_.Boolean());
         break;
       case 'C':
-        reg_line->SetRegisterType(arg_start + cur_arg, reg_types_.Char());
+        reg_line->SetRegisterType(this, arg_start + cur_arg, reg_types_.Char());
         break;
       case 'B':
-        reg_line->SetRegisterType(arg_start + cur_arg, reg_types_.Byte());
+        reg_line->SetRegisterType(this, arg_start + cur_arg, reg_types_.Byte());
         break;
       case 'I':
-        reg_line->SetRegisterType(arg_start + cur_arg, reg_types_.Integer());
+        reg_line->SetRegisterType(this, arg_start + cur_arg, reg_types_.Integer());
         break;
       case 'S':
-        reg_line->SetRegisterType(arg_start + cur_arg, reg_types_.Short());
+        reg_line->SetRegisterType(this, arg_start + cur_arg, reg_types_.Short());
         break;
       case 'F':
-        reg_line->SetRegisterType(arg_start + cur_arg, reg_types_.Float());
+        reg_line->SetRegisterType(this, arg_start + cur_arg, reg_types_.Float());
         break;
       case 'J':
       case 'D': {
@@ -1253,9 +1268,16 @@
           return false;
         }
 
-        const RegType& lo_half = descriptor[0] == 'J' ? reg_types_.LongLo() : reg_types_.DoubleLo();
-        const RegType& hi_half = descriptor[0] == 'J' ? reg_types_.LongHi() : reg_types_.DoubleHi();
-        reg_line->SetRegisterTypeWide(arg_start + cur_arg, lo_half, hi_half);
+        const RegType* lo_half;
+        const RegType* hi_half;
+        if (descriptor[0] == 'J') {
+          lo_half = &reg_types_.LongLo();
+          hi_half = &reg_types_.LongHi();
+        } else {
+          lo_half = &reg_types_.DoubleLo();
+          hi_half = &reg_types_.DoubleHi();
+        }
+        reg_line->SetRegisterTypeWide(this, arg_start + cur_arg, *lo_half, *hi_half);
         cur_arg++;
         break;
       }
@@ -1317,6 +1339,7 @@
 
   /* Continue until no instructions are marked "changed". */
   while (true) {
+    self_->AllowThreadSuspension();
     // Find the first marked one. Use "start_guess" as a way to find one quickly.
     uint32_t insn_idx = start_guess;
     for (; insn_idx < insns_size; insn_idx++) {
@@ -1348,14 +1371,14 @@
        * a full table) and make sure it actually matches.
        */
       RegisterLine* register_line = reg_table_.GetLine(insn_idx);
-      if (register_line != NULL) {
+      if (register_line != nullptr) {
         if (work_line_->CompareLine(register_line) != 0) {
           Dump(std::cout);
           std::cout << info_messages_.str();
           LOG(FATAL) << "work_line diverged in " << PrettyMethod(dex_method_idx_, *dex_file_)
                      << "@" << reinterpret_cast<void*>(work_insn_idx_) << "\n"
-                     << " work_line=" << *work_line_ << "\n"
-                     << "  expected=" << *register_line;
+                     << " work_line=" << work_line_->Dump(this) << "\n"
+                     << "  expected=" << register_line->Dump(this);
         }
       }
     }
@@ -1381,7 +1404,8 @@
      */
     int dead_start = -1;
     uint32_t insn_idx = 0;
-    for (; insn_idx < insns_size; insn_idx += insn_flags_[insn_idx].GetLengthInCodeUnits()) {
+    for (; insn_idx < insns_size;
+         insn_idx += Instruction::At(code_item_->insns_ + insn_idx)->SizeInCodeUnits()) {
       /*
        * Switch-statement data doesn't get "visited" by scanner. It
        * may or may not be preceded by a padding NOP (for alignment).
@@ -1423,7 +1447,7 @@
   // We want the state _before_ the instruction, for the case where the dex pc we're
   // interested in is itself a monitor-enter instruction (which is a likely place
   // for a thread to be suspended).
-  if (monitor_enter_dex_pcs_ != NULL && work_insn_idx_ == interesting_dex_pc_) {
+  if (monitor_enter_dex_pcs_ != nullptr && work_insn_idx_ == interesting_dex_pc_) {
     monitor_enter_dex_pcs_->clear();  // The new work line is more accurate than the previous one.
     for (size_t i = 0; i < work_line_->GetMonitorEnterCount(); ++i) {
       monitor_enter_dex_pcs_->push_back(work_line_->GetMonitorEnterDexPc(i));
@@ -1457,7 +1481,7 @@
   if (gDebugVerify) {
     // Generate processing back trace to debug verifier
     LogVerifyInfo() << "Processing " << inst->DumpString(dex_file_) << "\n"
-                    << *work_line_.get() << "\n";
+                    << work_line_->Dump(this) << "\n";
   }
 
   /*
@@ -1493,31 +1517,31 @@
       break;
 
     case Instruction::MOVE:
-      work_line_->CopyRegister1(inst->VRegA_12x(), inst->VRegB_12x(), kTypeCategory1nr);
+      work_line_->CopyRegister1(this, inst->VRegA_12x(), inst->VRegB_12x(), kTypeCategory1nr);
       break;
     case Instruction::MOVE_FROM16:
-      work_line_->CopyRegister1(inst->VRegA_22x(), inst->VRegB_22x(), kTypeCategory1nr);
+      work_line_->CopyRegister1(this, inst->VRegA_22x(), inst->VRegB_22x(), kTypeCategory1nr);
       break;
     case Instruction::MOVE_16:
-      work_line_->CopyRegister1(inst->VRegA_32x(), inst->VRegB_32x(), kTypeCategory1nr);
+      work_line_->CopyRegister1(this, inst->VRegA_32x(), inst->VRegB_32x(), kTypeCategory1nr);
       break;
     case Instruction::MOVE_WIDE:
-      work_line_->CopyRegister2(inst->VRegA_12x(), inst->VRegB_12x());
+      work_line_->CopyRegister2(this, inst->VRegA_12x(), inst->VRegB_12x());
       break;
     case Instruction::MOVE_WIDE_FROM16:
-      work_line_->CopyRegister2(inst->VRegA_22x(), inst->VRegB_22x());
+      work_line_->CopyRegister2(this, inst->VRegA_22x(), inst->VRegB_22x());
       break;
     case Instruction::MOVE_WIDE_16:
-      work_line_->CopyRegister2(inst->VRegA_32x(), inst->VRegB_32x());
+      work_line_->CopyRegister2(this, inst->VRegA_32x(), inst->VRegB_32x());
       break;
     case Instruction::MOVE_OBJECT:
-      work_line_->CopyRegister1(inst->VRegA_12x(), inst->VRegB_12x(), kTypeCategoryRef);
+      work_line_->CopyRegister1(this, inst->VRegA_12x(), inst->VRegB_12x(), kTypeCategoryRef);
       break;
     case Instruction::MOVE_OBJECT_FROM16:
-      work_line_->CopyRegister1(inst->VRegA_22x(), inst->VRegB_22x(), kTypeCategoryRef);
+      work_line_->CopyRegister1(this, inst->VRegA_22x(), inst->VRegB_22x(), kTypeCategoryRef);
       break;
     case Instruction::MOVE_OBJECT_16:
-      work_line_->CopyRegister1(inst->VRegA_32x(), inst->VRegB_32x(), kTypeCategoryRef);
+      work_line_->CopyRegister1(this, inst->VRegA_32x(), inst->VRegB_32x(), kTypeCategoryRef);
       break;
 
     /*
@@ -1532,13 +1556,13 @@
      * easier to read in some cases.)
      */
     case Instruction::MOVE_RESULT:
-      work_line_->CopyResultRegister1(inst->VRegA_11x(), false);
+      work_line_->CopyResultRegister1(this, inst->VRegA_11x(), false);
       break;
     case Instruction::MOVE_RESULT_WIDE:
-      work_line_->CopyResultRegister2(inst->VRegA_11x());
+      work_line_->CopyResultRegister2(this, inst->VRegA_11x());
       break;
     case Instruction::MOVE_RESULT_OBJECT:
-      work_line_->CopyResultRegister1(inst->VRegA_11x(), true);
+      work_line_->CopyResultRegister1(this, inst->VRegA_11x(), true);
       break;
 
     case Instruction::MOVE_EXCEPTION: {
@@ -1547,18 +1571,18 @@
        * that as part of extracting the exception type from the catch block list.
        */
       const RegType& res_type = GetCaughtExceptionType();
-      work_line_->SetRegisterType(inst->VRegA_11x(), res_type);
+      work_line_->SetRegisterType(this, inst->VRegA_11x(), res_type);
       break;
     }
     case Instruction::RETURN_VOID:
-      if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
+      if (!IsConstructor() || work_line_->CheckConstructorReturn(this)) {
         if (!GetMethodReturnType().IsConflict()) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-void not expected";
         }
       }
       break;
     case Instruction::RETURN:
-      if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
+      if (!IsConstructor() || work_line_->CheckConstructorReturn(this)) {
         /* check the method signature */
         const RegType& return_type = GetMethodReturnType();
         if (!return_type.IsCategory1Types()) {
@@ -1568,14 +1592,14 @@
           // Compilers may generate synthetic functions that write byte values into boolean fields.
           // Also, it may use integer values for boolean, byte, short, and character return types.
           const uint32_t vregA = inst->VRegA_11x();
-          const RegType& src_type = work_line_->GetRegisterType(vregA);
+          const RegType& src_type = work_line_->GetRegisterType(this, vregA);
           bool use_src = ((return_type.IsBoolean() && src_type.IsByte()) ||
                           ((return_type.IsBoolean() || return_type.IsByte() ||
                            return_type.IsShort() || return_type.IsChar()) &&
                            src_type.IsInteger()));
           /* check the register contents */
           bool success =
-              work_line_->VerifyRegisterType(vregA, use_src ? src_type : return_type);
+              work_line_->VerifyRegisterType(this, vregA, use_src ? src_type : return_type);
           if (!success) {
             AppendToLastFailMessage(StringPrintf(" return-1nr on invalid register v%d", vregA));
           }
@@ -1583,7 +1607,7 @@
       }
       break;
     case Instruction::RETURN_WIDE:
-      if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
+      if (!IsConstructor() || work_line_->CheckConstructorReturn(this)) {
         /* check the method signature */
         const RegType& return_type = GetMethodReturnType();
         if (!return_type.IsCategory2Types()) {
@@ -1591,7 +1615,7 @@
         } else {
           /* check the register contents */
           const uint32_t vregA = inst->VRegA_11x();
-          bool success = work_line_->VerifyRegisterType(vregA, return_type);
+          bool success = work_line_->VerifyRegisterType(this, vregA, return_type);
           if (!success) {
             AppendToLastFailMessage(StringPrintf(" return-wide on invalid register v%d", vregA));
           }
@@ -1599,7 +1623,7 @@
       }
       break;
     case Instruction::RETURN_OBJECT:
-      if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
+      if (!IsConstructor() || work_line_->CheckConstructorReturn(this)) {
         const RegType& return_type = GetMethodReturnType();
         if (!return_type.IsReferenceTypes()) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-object not expected";
@@ -1608,7 +1632,7 @@
           DCHECK(!return_type.IsZero());
           DCHECK(!return_type.IsUninitializedReference());
           const uint32_t vregA = inst->VRegA_11x();
-          const RegType& reg_type = work_line_->GetRegisterType(vregA);
+          const RegType& reg_type = work_line_->GetRegisterType(this, vregA);
           // Disallow returning uninitialized values and verify that the reference in vAA is an
           // instance of the "return_type"
           if (reg_type.IsUninitializedTypes()) {
@@ -1630,25 +1654,25 @@
       /* could be boolean, int, float, or a null reference */
     case Instruction::CONST_4: {
       int32_t val = static_cast<int32_t>(inst->VRegB_11n() << 28) >> 28;
-      work_line_->SetRegisterType(inst->VRegA_11n(),
+      work_line_->SetRegisterType(this, inst->VRegA_11n(),
                                   DetermineCat1Constant(val, need_precise_constants_));
       break;
     }
     case Instruction::CONST_16: {
       int16_t val = static_cast<int16_t>(inst->VRegB_21s());
-      work_line_->SetRegisterType(inst->VRegA_21s(),
+      work_line_->SetRegisterType(this, inst->VRegA_21s(),
                                   DetermineCat1Constant(val, need_precise_constants_));
       break;
     }
     case Instruction::CONST: {
       int32_t val = inst->VRegB_31i();
-      work_line_->SetRegisterType(inst->VRegA_31i(),
+      work_line_->SetRegisterType(this, inst->VRegA_31i(),
                                   DetermineCat1Constant(val, need_precise_constants_));
       break;
     }
     case Instruction::CONST_HIGH16: {
       int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
-      work_line_->SetRegisterType(inst->VRegA_21h(),
+      work_line_->SetRegisterType(this, inst->VRegA_21h(),
                                   DetermineCat1Constant(val, need_precise_constants_));
       break;
     }
@@ -1657,48 +1681,48 @@
       int64_t val = static_cast<int16_t>(inst->VRegB_21s());
       const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
       const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
-      work_line_->SetRegisterTypeWide(inst->VRegA_21s(), lo, hi);
+      work_line_->SetRegisterTypeWide(this, inst->VRegA_21s(), lo, hi);
       break;
     }
     case Instruction::CONST_WIDE_32: {
       int64_t val = static_cast<int32_t>(inst->VRegB_31i());
       const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
       const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
-      work_line_->SetRegisterTypeWide(inst->VRegA_31i(), lo, hi);
+      work_line_->SetRegisterTypeWide(this, inst->VRegA_31i(), lo, hi);
       break;
     }
     case Instruction::CONST_WIDE: {
       int64_t val = inst->VRegB_51l();
       const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
       const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
-      work_line_->SetRegisterTypeWide(inst->VRegA_51l(), lo, hi);
+      work_line_->SetRegisterTypeWide(this, inst->VRegA_51l(), lo, hi);
       break;
     }
     case Instruction::CONST_WIDE_HIGH16: {
       int64_t val = static_cast<uint64_t>(inst->VRegB_21h()) << 48;
       const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
       const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
-      work_line_->SetRegisterTypeWide(inst->VRegA_21h(), lo, hi);
+      work_line_->SetRegisterTypeWide(this, inst->VRegA_21h(), lo, hi);
       break;
     }
     case Instruction::CONST_STRING:
-      work_line_->SetRegisterType(inst->VRegA_21c(), reg_types_.JavaLangString());
+      work_line_->SetRegisterType(this, inst->VRegA_21c(), reg_types_.JavaLangString());
       break;
     case Instruction::CONST_STRING_JUMBO:
-      work_line_->SetRegisterType(inst->VRegA_31c(), reg_types_.JavaLangString());
+      work_line_->SetRegisterType(this, inst->VRegA_31c(), reg_types_.JavaLangString());
       break;
     case Instruction::CONST_CLASS: {
       // Get type from instruction if unresolved then we need an access check
       // TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
       const RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
       // Register holds class, ie its type is class, on error it will hold Conflict.
-      work_line_->SetRegisterType(inst->VRegA_21c(),
+      work_line_->SetRegisterType(this, inst->VRegA_21c(),
                                   res_type.IsConflict() ? res_type
-                                                        : reg_types_.JavaLangClass(true));
+                                                        : reg_types_.JavaLangClass());
       break;
     }
     case Instruction::MONITOR_ENTER:
-      work_line_->PushMonitor(inst->VRegA_11x(), work_insn_idx_);
+      work_line_->PushMonitor(this, inst->VRegA_11x(), work_insn_idx_);
       break;
     case Instruction::MONITOR_EXIT:
       /*
@@ -1722,7 +1746,7 @@
        * "live" so we still need to check it.
        */
       opcode_flags &= ~Instruction::kThrow;
-      work_line_->PopMonitor(inst->VRegA_11x());
+      work_line_->PopMonitor(this, inst->VRegA_11x());
       break;
 
     case Instruction::CHECK_CAST:
@@ -1749,13 +1773,13 @@
 
         DCHECK_NE(failures_.size(), 0U);
         if (!is_checkcast) {
-          work_line_->SetRegisterType(inst->VRegA_22c(), reg_types_.Boolean());
+          work_line_->SetRegisterType(this, inst->VRegA_22c(), reg_types_.Boolean());
         }
         break;  // bad class
       }
       // TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
       uint32_t orig_type_reg = (is_checkcast) ? inst->VRegA_21c() : inst->VRegB_22c();
-      const RegType& orig_type = work_line_->GetRegisterType(orig_type_reg);
+      const RegType& orig_type = work_line_->GetRegisterType(this, orig_type_reg);
       if (!res_type.IsNonZeroReferenceTypes()) {
         if (is_checkcast) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "check-cast on unexpected class " << res_type;
@@ -1770,20 +1794,20 @@
         }
       } else {
         if (is_checkcast) {
-          work_line_->SetRegisterType(inst->VRegA_21c(), res_type);
+          work_line_->SetRegisterType(this, inst->VRegA_21c(), res_type);
         } else {
-          work_line_->SetRegisterType(inst->VRegA_22c(), reg_types_.Boolean());
+          work_line_->SetRegisterType(this, inst->VRegA_22c(), reg_types_.Boolean());
         }
       }
       break;
     }
     case Instruction::ARRAY_LENGTH: {
-      const RegType& res_type = work_line_->GetRegisterType(inst->VRegB_12x());
+      const RegType& res_type = work_line_->GetRegisterType(this, inst->VRegB_12x());
       if (res_type.IsReferenceTypes()) {
         if (!res_type.IsArrayTypes() && !res_type.IsZero()) {  // ie not an array or null
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array-length on non-array " << res_type;
         } else {
-          work_line_->SetRegisterType(inst->VRegA_12x(), reg_types_.Integer());
+          work_line_->SetRegisterType(this, inst->VRegA_12x(), reg_types_.Integer());
         }
       } else {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array-length on non-array " << res_type;
@@ -1806,9 +1830,9 @@
       const RegType& uninit_type = reg_types_.Uninitialized(res_type, work_insn_idx_);
       // Any registers holding previous allocations from this address that have not yet been
       // initialized must be marked invalid.
-      work_line_->MarkUninitRefsAsInvalid(uninit_type);
+      work_line_->MarkUninitRefsAsInvalid(this, uninit_type);
       // add the new uninitialized reference to the register state
-      work_line_->SetRegisterType(inst->VRegA_21c(), uninit_type);
+      work_line_->SetRegisterType(this, inst->VRegA_21c(), uninit_type);
       break;
     }
     case Instruction::NEW_ARRAY:
@@ -1824,39 +1848,39 @@
       break;
     case Instruction::CMPL_FLOAT:
     case Instruction::CMPG_FLOAT:
-      if (!work_line_->VerifyRegisterType(inst->VRegB_23x(), reg_types_.Float())) {
+      if (!work_line_->VerifyRegisterType(this, inst->VRegB_23x(), reg_types_.Float())) {
         break;
       }
-      if (!work_line_->VerifyRegisterType(inst->VRegC_23x(), reg_types_.Float())) {
+      if (!work_line_->VerifyRegisterType(this, inst->VRegC_23x(), reg_types_.Float())) {
         break;
       }
-      work_line_->SetRegisterType(inst->VRegA_23x(), reg_types_.Integer());
+      work_line_->SetRegisterType(this, inst->VRegA_23x(), reg_types_.Integer());
       break;
     case Instruction::CMPL_DOUBLE:
     case Instruction::CMPG_DOUBLE:
-      if (!work_line_->VerifyRegisterTypeWide(inst->VRegB_23x(), reg_types_.DoubleLo(),
+      if (!work_line_->VerifyRegisterTypeWide(this, inst->VRegB_23x(), reg_types_.DoubleLo(),
                                               reg_types_.DoubleHi())) {
         break;
       }
-      if (!work_line_->VerifyRegisterTypeWide(inst->VRegC_23x(), reg_types_.DoubleLo(),
+      if (!work_line_->VerifyRegisterTypeWide(this, inst->VRegC_23x(), reg_types_.DoubleLo(),
                                               reg_types_.DoubleHi())) {
         break;
       }
-      work_line_->SetRegisterType(inst->VRegA_23x(), reg_types_.Integer());
+      work_line_->SetRegisterType(this, inst->VRegA_23x(), reg_types_.Integer());
       break;
     case Instruction::CMP_LONG:
-      if (!work_line_->VerifyRegisterTypeWide(inst->VRegB_23x(), reg_types_.LongLo(),
+      if (!work_line_->VerifyRegisterTypeWide(this, inst->VRegB_23x(), reg_types_.LongLo(),
                                               reg_types_.LongHi())) {
         break;
       }
-      if (!work_line_->VerifyRegisterTypeWide(inst->VRegC_23x(), reg_types_.LongLo(),
+      if (!work_line_->VerifyRegisterTypeWide(this, inst->VRegC_23x(), reg_types_.LongLo(),
                                               reg_types_.LongHi())) {
         break;
       }
-      work_line_->SetRegisterType(inst->VRegA_23x(), reg_types_.Integer());
+      work_line_->SetRegisterType(this, inst->VRegA_23x(), reg_types_.Integer());
       break;
     case Instruction::THROW: {
-      const RegType& res_type = work_line_->GetRegisterType(inst->VRegA_11x());
+      const RegType& res_type = work_line_->GetRegisterType(this, inst->VRegA_11x());
       if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(res_type)) {
         Fail(res_type.IsUnresolvedTypes() ? VERIFY_ERROR_NO_CLASS : VERIFY_ERROR_BAD_CLASS_SOFT)
             << "thrown class " << res_type << " not instanceof Throwable";
@@ -1872,12 +1896,12 @@
     case Instruction::PACKED_SWITCH:
     case Instruction::SPARSE_SWITCH:
       /* verify that vAA is an integer, or can be converted to one */
-      work_line_->VerifyRegisterType(inst->VRegA_31t(), reg_types_.Integer());
+      work_line_->VerifyRegisterType(this, inst->VRegA_31t(), reg_types_.Integer());
       break;
 
     case Instruction::FILL_ARRAY_DATA: {
       /* Similar to the verification done for APUT */
-      const RegType& array_type = work_line_->GetRegisterType(inst->VRegA_31t());
+      const RegType& array_type = work_line_->GetRegisterType(this, inst->VRegA_31t());
       /* array_type can be null if the reg type is Zero */
       if (!array_type.IsZero()) {
         if (!array_type.IsArrayTypes()) {
@@ -1911,8 +1935,8 @@
     }
     case Instruction::IF_EQ:
     case Instruction::IF_NE: {
-      const RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
-      const RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
+      const RegType& reg_type1 = work_line_->GetRegisterType(this, inst->VRegA_22t());
+      const RegType& reg_type2 = work_line_->GetRegisterType(this, inst->VRegB_22t());
       bool mismatch = false;
       if (reg_type1.IsZero()) {  // zero then integral or reference expected
         mismatch = !reg_type2.IsReferenceTypes() && !reg_type2.IsIntegralTypes();
@@ -1931,8 +1955,8 @@
     case Instruction::IF_GE:
     case Instruction::IF_GT:
     case Instruction::IF_LE: {
-      const RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
-      const RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
+      const RegType& reg_type1 = work_line_->GetRegisterType(this, inst->VRegA_22t());
+      const RegType& reg_type2 = work_line_->GetRegisterType(this, inst->VRegB_22t());
       if (!reg_type1.IsIntegralTypes() || !reg_type2.IsIntegralTypes()) {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "args to 'if' (" << reg_type1 << ","
                                           << reg_type2 << ") must be integral";
@@ -1941,7 +1965,7 @@
     }
     case Instruction::IF_EQZ:
     case Instruction::IF_NEZ: {
-      const RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
+      const RegType& reg_type = work_line_->GetRegisterType(this, inst->VRegA_21t());
       if (!reg_type.IsReferenceTypes() && !reg_type.IsIntegralTypes()) {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type
                                           << " unexpected as arg to if-eqz/if-nez";
@@ -1987,7 +2011,7 @@
         // type is assignable to the original then allow optimization. This check is performed to
         // ensure that subsequent merges don't lose type information - such as becoming an
         // interface from a class that would lose information relevant to field checks.
-        const RegType& orig_type = work_line_->GetRegisterType(instance_of_inst->VRegB_22c());
+        const RegType& orig_type = work_line_->GetRegisterType(this, instance_of_inst->VRegB_22c());
         const RegType& cast_type = ResolveClassAndCheckAccess(instance_of_inst->VRegC_22c());
 
         if (!orig_type.Equals(cast_type) &&
@@ -2003,7 +2027,7 @@
             branch_line.reset(update_line);
           }
           update_line->CopyFromLine(work_line_.get());
-          update_line->SetRegisterType(instance_of_inst->VRegB_22c(), cast_type);
+          update_line->SetRegisterType(this, instance_of_inst->VRegB_22c(), cast_type);
           if (!insn_flags_[instance_of_idx].IsBranchTarget() && 0 != instance_of_idx) {
             // See if instance-of was preceded by a move-object operation, common due to the small
             // register encoding space of instance-of, and propagate type information to the source
@@ -2017,17 +2041,17 @@
             switch (move_inst->Opcode()) {
               case Instruction::MOVE_OBJECT:
                 if (move_inst->VRegA_12x() == instance_of_inst->VRegB_22c()) {
-                  update_line->SetRegisterType(move_inst->VRegB_12x(), cast_type);
+                  update_line->SetRegisterType(this, move_inst->VRegB_12x(), cast_type);
                 }
                 break;
               case Instruction::MOVE_OBJECT_FROM16:
                 if (move_inst->VRegA_22x() == instance_of_inst->VRegB_22c()) {
-                  update_line->SetRegisterType(move_inst->VRegB_22x(), cast_type);
+                  update_line->SetRegisterType(this, move_inst->VRegB_22x(), cast_type);
                 }
                 break;
               case Instruction::MOVE_OBJECT_16:
                 if (move_inst->VRegA_32x() == instance_of_inst->VRegB_22c()) {
-                  update_line->SetRegisterType(move_inst->VRegB_32x(), cast_type);
+                  update_line->SetRegisterType(this, move_inst->VRegB_32x(), cast_type);
                 }
                 break;
               default:
@@ -2043,7 +2067,7 @@
     case Instruction::IF_GEZ:
     case Instruction::IF_GTZ:
     case Instruction::IF_LEZ: {
-      const RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
+      const RegType& reg_type = work_line_->GetRegisterType(this, inst->VRegA_21t());
       if (!reg_type.IsIntegralTypes()) {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type
                                           << " unexpected as arg to if-ltz/if-gez/if-gtz/if-lez";
@@ -2194,8 +2218,7 @@
                                                               is_super);
       const RegType* return_type = nullptr;
       if (called_method != nullptr) {
-        Thread* self = Thread::Current();
-        StackHandleScope<1> hs(self);
+        StackHandleScope<1> hs(self_);
         Handle<mirror::ArtMethod> h_called_method(hs.NewHandle(called_method));
         MethodHelper mh(h_called_method);
         mirror::Class* return_type_class = mh.GetReturnType(can_load_classes_);
@@ -2204,8 +2227,8 @@
                                               return_type_class,
                                               return_type_class->CannotBeAssignedFromOtherTypes());
         } else {
-          DCHECK(!can_load_classes_ || self->IsExceptionPending());
-          self->ClearException();
+          DCHECK(!can_load_classes_ || self_->IsExceptionPending());
+          self_->ClearException();
         }
       }
       if (return_type == nullptr) {
@@ -2216,7 +2239,7 @@
         return_type = &reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
       }
       if (!return_type->IsLowHalf()) {
-        work_line_->SetResultRegisterType(*return_type);
+        work_line_->SetResultRegisterType(this, *return_type);
       } else {
         work_line_->SetResultRegisterTypeWide(*return_type, return_type->HighHalf(&reg_types_));
       }
@@ -2231,7 +2254,7 @@
       const char* return_type_descriptor;
       bool is_constructor;
       const RegType* return_type = nullptr;
-      if (called_method == NULL) {
+      if (called_method == nullptr) {
         uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
         const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
         is_constructor = strcmp("<init>", dex_file_->StringDataByIdx(method_id.name_idx_)) == 0;
@@ -2240,8 +2263,7 @@
       } else {
         is_constructor = called_method->IsConstructor();
         return_type_descriptor = called_method->GetReturnTypeDescriptor();
-        Thread* self = Thread::Current();
-        StackHandleScope<1> hs(self);
+        StackHandleScope<1> hs(self_);
         Handle<mirror::ArtMethod> h_called_method(hs.NewHandle(called_method));
         MethodHelper mh(h_called_method);
         mirror::Class* return_type_class = mh.GetReturnType(can_load_classes_);
@@ -2250,8 +2272,8 @@
                                               return_type_class,
                                               return_type_class->CannotBeAssignedFromOtherTypes());
         } else {
-          DCHECK(!can_load_classes_ || self->IsExceptionPending());
-          self->ClearException();
+          DCHECK(!can_load_classes_ || self_->IsExceptionPending());
+          self_->ClearException();
         }
       }
       if (is_constructor) {
@@ -2262,7 +2284,7 @@
          * allowing the latter only if the "this" argument is the same as the "this" argument to
          * this method (which implies that we're in a constructor ourselves).
          */
-        const RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
+        const RegType& this_type = work_line_->GetInvocationThis(this, inst, is_range);
         if (this_type.IsConflict())  // failure.
           break;
 
@@ -2292,14 +2314,14 @@
          * Replace the uninitialized reference with an initialized one. We need to do this for all
          * registers that have the same object instance in them, not just the "this" register.
          */
-        work_line_->MarkRefsAsInitialized(this_type);
+        work_line_->MarkRefsAsInitialized(this, this_type);
       }
       if (return_type == nullptr) {
         return_type = &reg_types_.FromDescriptor(GetClassLoader(), return_type_descriptor,
                                                  false);
       }
       if (!return_type->IsLowHalf()) {
-        work_line_->SetResultRegisterType(*return_type);
+        work_line_->SetResultRegisterType(this, *return_type);
       } else {
         work_line_->SetResultRegisterTypeWide(*return_type, return_type->HighHalf(&reg_types_));
       }
@@ -2314,7 +2336,7 @@
                                                                      is_range,
                                                                      false);
         const char* descriptor;
-        if (called_method == NULL) {
+        if (called_method == nullptr) {
           uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
           const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
           uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
@@ -2324,7 +2346,7 @@
         }
         const RegType& return_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
         if (!return_type.IsLowHalf()) {
-          work_line_->SetResultRegisterType(return_type);
+          work_line_->SetResultRegisterType(this, return_type);
         } else {
           work_line_->SetResultRegisterTypeWide(return_type, return_type.HighHalf(&reg_types_));
         }
@@ -2338,7 +2360,7 @@
                                                                 METHOD_INTERFACE,
                                                                 is_range,
                                                                 false);
-      if (abs_method != NULL) {
+      if (abs_method != nullptr) {
         mirror::Class* called_interface = abs_method->GetDeclaringClass();
         if (!called_interface->IsInterface() && !called_interface->IsObjectClass()) {
           Fail(VERIFY_ERROR_CLASS_CHANGE) << "expected interface class in invoke-interface '"
@@ -2349,7 +2371,7 @@
       /* Get the type of the "this" arg, which should either be a sub-interface of called
        * interface or Object (see comments in RegType::JoinClass).
        */
-      const RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
+      const RegType& this_type = work_line_->GetInvocationThis(this, inst, is_range);
       if (this_type.IsZero()) {
         /* null pointer always passes (and always fails at runtime) */
       } else {
@@ -2371,7 +2393,7 @@
        * the type information is in the abstract method, so we're good.
        */
       const char* descriptor;
-      if (abs_method == NULL) {
+      if (abs_method == nullptr) {
         uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
         const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
         uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
@@ -2381,7 +2403,7 @@
       }
       const RegType& return_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
       if (!return_type.IsLowHalf()) {
-        work_line_->SetResultRegisterType(return_type);
+        work_line_->SetResultRegisterType(this, return_type);
       } else {
         work_line_->SetResultRegisterTypeWide(return_type, return_type.HighHalf(&reg_types_));
       }
@@ -2390,74 +2412,74 @@
     }
     case Instruction::NEG_INT:
     case Instruction::NOT_INT:
-      work_line_->CheckUnaryOp(inst, reg_types_.Integer(), reg_types_.Integer());
+      work_line_->CheckUnaryOp(this, inst, reg_types_.Integer(), reg_types_.Integer());
       break;
     case Instruction::NEG_LONG:
     case Instruction::NOT_LONG:
-      work_line_->CheckUnaryOpWide(inst, reg_types_.LongLo(), reg_types_.LongHi(),
+      work_line_->CheckUnaryOpWide(this, inst, reg_types_.LongLo(), reg_types_.LongHi(),
                                    reg_types_.LongLo(), reg_types_.LongHi());
       break;
     case Instruction::NEG_FLOAT:
-      work_line_->CheckUnaryOp(inst, reg_types_.Float(), reg_types_.Float());
+      work_line_->CheckUnaryOp(this, inst, reg_types_.Float(), reg_types_.Float());
       break;
     case Instruction::NEG_DOUBLE:
-      work_line_->CheckUnaryOpWide(inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+      work_line_->CheckUnaryOpWide(this, inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
                                    reg_types_.DoubleLo(), reg_types_.DoubleHi());
       break;
     case Instruction::INT_TO_LONG:
-      work_line_->CheckUnaryOpToWide(inst, reg_types_.LongLo(), reg_types_.LongHi(),
+      work_line_->CheckUnaryOpToWide(this, inst, reg_types_.LongLo(), reg_types_.LongHi(),
                                      reg_types_.Integer());
       break;
     case Instruction::INT_TO_FLOAT:
-      work_line_->CheckUnaryOp(inst, reg_types_.Float(), reg_types_.Integer());
+      work_line_->CheckUnaryOp(this, inst, reg_types_.Float(), reg_types_.Integer());
       break;
     case Instruction::INT_TO_DOUBLE:
-      work_line_->CheckUnaryOpToWide(inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+      work_line_->CheckUnaryOpToWide(this, inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
                                      reg_types_.Integer());
       break;
     case Instruction::LONG_TO_INT:
-      work_line_->CheckUnaryOpFromWide(inst, reg_types_.Integer(),
+      work_line_->CheckUnaryOpFromWide(this, inst, reg_types_.Integer(),
                                        reg_types_.LongLo(), reg_types_.LongHi());
       break;
     case Instruction::LONG_TO_FLOAT:
-      work_line_->CheckUnaryOpFromWide(inst, reg_types_.Float(),
+      work_line_->CheckUnaryOpFromWide(this, inst, reg_types_.Float(),
                                        reg_types_.LongLo(), reg_types_.LongHi());
       break;
     case Instruction::LONG_TO_DOUBLE:
-      work_line_->CheckUnaryOpWide(inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+      work_line_->CheckUnaryOpWide(this, inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
                                    reg_types_.LongLo(), reg_types_.LongHi());
       break;
     case Instruction::FLOAT_TO_INT:
-      work_line_->CheckUnaryOp(inst, reg_types_.Integer(), reg_types_.Float());
+      work_line_->CheckUnaryOp(this, inst, reg_types_.Integer(), reg_types_.Float());
       break;
     case Instruction::FLOAT_TO_LONG:
-      work_line_->CheckUnaryOpToWide(inst, reg_types_.LongLo(), reg_types_.LongHi(),
+      work_line_->CheckUnaryOpToWide(this, inst, reg_types_.LongLo(), reg_types_.LongHi(),
                                      reg_types_.Float());
       break;
     case Instruction::FLOAT_TO_DOUBLE:
-      work_line_->CheckUnaryOpToWide(inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+      work_line_->CheckUnaryOpToWide(this, inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
                                      reg_types_.Float());
       break;
     case Instruction::DOUBLE_TO_INT:
-      work_line_->CheckUnaryOpFromWide(inst, reg_types_.Integer(),
+      work_line_->CheckUnaryOpFromWide(this, inst, reg_types_.Integer(),
                                        reg_types_.DoubleLo(), reg_types_.DoubleHi());
       break;
     case Instruction::DOUBLE_TO_LONG:
-      work_line_->CheckUnaryOpWide(inst, reg_types_.LongLo(), reg_types_.LongHi(),
+      work_line_->CheckUnaryOpWide(this, inst, reg_types_.LongLo(), reg_types_.LongHi(),
                                    reg_types_.DoubleLo(), reg_types_.DoubleHi());
       break;
     case Instruction::DOUBLE_TO_FLOAT:
-      work_line_->CheckUnaryOpFromWide(inst, reg_types_.Float(),
+      work_line_->CheckUnaryOpFromWide(this, inst, reg_types_.Float(),
                                        reg_types_.DoubleLo(), reg_types_.DoubleHi());
       break;
     case Instruction::INT_TO_BYTE:
-      work_line_->CheckUnaryOp(inst, reg_types_.Byte(), reg_types_.Integer());
+      work_line_->CheckUnaryOp(this, inst, reg_types_.Byte(), reg_types_.Integer());
       break;
     case Instruction::INT_TO_CHAR:
-      work_line_->CheckUnaryOp(inst, reg_types_.Char(), reg_types_.Integer());
+      work_line_->CheckUnaryOp(this, inst, reg_types_.Char(), reg_types_.Integer());
       break;
     case Instruction::INT_TO_SHORT:
-      work_line_->CheckUnaryOp(inst, reg_types_.Short(), reg_types_.Integer());
+      work_line_->CheckUnaryOp(this, inst, reg_types_.Short(), reg_types_.Integer());
       break;
 
     case Instruction::ADD_INT:
@@ -2468,13 +2490,13 @@
     case Instruction::SHL_INT:
     case Instruction::SHR_INT:
     case Instruction::USHR_INT:
-      work_line_->CheckBinaryOp(inst, reg_types_.Integer(), reg_types_.Integer(),
+      work_line_->CheckBinaryOp(this, inst, reg_types_.Integer(), reg_types_.Integer(),
                                 reg_types_.Integer(), false);
       break;
     case Instruction::AND_INT:
     case Instruction::OR_INT:
     case Instruction::XOR_INT:
-      work_line_->CheckBinaryOp(inst, reg_types_.Integer(), reg_types_.Integer(),
+      work_line_->CheckBinaryOp(this, inst, reg_types_.Integer(), reg_types_.Integer(),
                                 reg_types_.Integer(), true);
       break;
     case Instruction::ADD_LONG:
@@ -2485,7 +2507,7 @@
     case Instruction::AND_LONG:
     case Instruction::OR_LONG:
     case Instruction::XOR_LONG:
-      work_line_->CheckBinaryOpWide(inst, reg_types_.LongLo(), reg_types_.LongHi(),
+      work_line_->CheckBinaryOpWide(this, inst, reg_types_.LongLo(), reg_types_.LongHi(),
                                     reg_types_.LongLo(), reg_types_.LongHi(),
                                     reg_types_.LongLo(), reg_types_.LongHi());
       break;
@@ -2493,7 +2515,7 @@
     case Instruction::SHR_LONG:
     case Instruction::USHR_LONG:
       /* shift distance is Int, making these different from other binary operations */
-      work_line_->CheckBinaryOpWideShift(inst, reg_types_.LongLo(), reg_types_.LongHi(),
+      work_line_->CheckBinaryOpWideShift(this, inst, reg_types_.LongLo(), reg_types_.LongHi(),
                                          reg_types_.Integer());
       break;
     case Instruction::ADD_FLOAT:
@@ -2501,18 +2523,15 @@
     case Instruction::MUL_FLOAT:
     case Instruction::DIV_FLOAT:
     case Instruction::REM_FLOAT:
-      work_line_->CheckBinaryOp(inst,
-                                reg_types_.Float(),
-                                reg_types_.Float(),
-                                reg_types_.Float(),
-                                false);
+      work_line_->CheckBinaryOp(this, inst, reg_types_.Float(), reg_types_.Float(),
+                                reg_types_.Float(), false);
       break;
     case Instruction::ADD_DOUBLE:
     case Instruction::SUB_DOUBLE:
     case Instruction::MUL_DOUBLE:
     case Instruction::DIV_DOUBLE:
     case Instruction::REM_DOUBLE:
-      work_line_->CheckBinaryOpWide(inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+      work_line_->CheckBinaryOpWide(this, inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
                                     reg_types_.DoubleLo(), reg_types_.DoubleHi(),
                                     reg_types_.DoubleLo(), reg_types_.DoubleHi());
       break;
@@ -2523,27 +2542,18 @@
     case Instruction::SHL_INT_2ADDR:
     case Instruction::SHR_INT_2ADDR:
     case Instruction::USHR_INT_2ADDR:
-      work_line_->CheckBinaryOp2addr(inst,
-                                     reg_types_.Integer(),
-                                     reg_types_.Integer(),
-                                     reg_types_.Integer(),
-                                     false);
+      work_line_->CheckBinaryOp2addr(this, inst, reg_types_.Integer(), reg_types_.Integer(),
+                                     reg_types_.Integer(), false);
       break;
     case Instruction::AND_INT_2ADDR:
     case Instruction::OR_INT_2ADDR:
     case Instruction::XOR_INT_2ADDR:
-      work_line_->CheckBinaryOp2addr(inst,
-                                     reg_types_.Integer(),
-                                     reg_types_.Integer(),
-                                     reg_types_.Integer(),
-                                     true);
+      work_line_->CheckBinaryOp2addr(this, inst, reg_types_.Integer(), reg_types_.Integer(),
+                                     reg_types_.Integer(), true);
       break;
     case Instruction::DIV_INT_2ADDR:
-      work_line_->CheckBinaryOp2addr(inst,
-                                     reg_types_.Integer(),
-                                     reg_types_.Integer(),
-                                     reg_types_.Integer(),
-                                     false);
+      work_line_->CheckBinaryOp2addr(this, inst, reg_types_.Integer(), reg_types_.Integer(),
+                                     reg_types_.Integer(), false);
       break;
     case Instruction::ADD_LONG_2ADDR:
     case Instruction::SUB_LONG_2ADDR:
@@ -2553,14 +2563,14 @@
     case Instruction::AND_LONG_2ADDR:
     case Instruction::OR_LONG_2ADDR:
     case Instruction::XOR_LONG_2ADDR:
-      work_line_->CheckBinaryOp2addrWide(inst, reg_types_.LongLo(), reg_types_.LongHi(),
+      work_line_->CheckBinaryOp2addrWide(this, inst, reg_types_.LongLo(), reg_types_.LongHi(),
                                          reg_types_.LongLo(), reg_types_.LongHi(),
                                          reg_types_.LongLo(), reg_types_.LongHi());
       break;
     case Instruction::SHL_LONG_2ADDR:
     case Instruction::SHR_LONG_2ADDR:
     case Instruction::USHR_LONG_2ADDR:
-      work_line_->CheckBinaryOp2addrWideShift(inst, reg_types_.LongLo(), reg_types_.LongHi(),
+      work_line_->CheckBinaryOp2addrWideShift(this, inst, reg_types_.LongLo(), reg_types_.LongHi(),
                                               reg_types_.Integer());
       break;
     case Instruction::ADD_FLOAT_2ADDR:
@@ -2568,18 +2578,15 @@
     case Instruction::MUL_FLOAT_2ADDR:
     case Instruction::DIV_FLOAT_2ADDR:
     case Instruction::REM_FLOAT_2ADDR:
-      work_line_->CheckBinaryOp2addr(inst,
-                                     reg_types_.Float(),
-                                     reg_types_.Float(),
-                                     reg_types_.Float(),
-                                     false);
+      work_line_->CheckBinaryOp2addr(this, inst, reg_types_.Float(), reg_types_.Float(),
+                                     reg_types_.Float(), false);
       break;
     case Instruction::ADD_DOUBLE_2ADDR:
     case Instruction::SUB_DOUBLE_2ADDR:
     case Instruction::MUL_DOUBLE_2ADDR:
     case Instruction::DIV_DOUBLE_2ADDR:
     case Instruction::REM_DOUBLE_2ADDR:
-      work_line_->CheckBinaryOp2addrWide(inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+      work_line_->CheckBinaryOp2addrWide(this, inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
                                          reg_types_.DoubleLo(),  reg_types_.DoubleHi(),
                                          reg_types_.DoubleLo(), reg_types_.DoubleHi());
       break;
@@ -2588,12 +2595,14 @@
     case Instruction::MUL_INT_LIT16:
     case Instruction::DIV_INT_LIT16:
     case Instruction::REM_INT_LIT16:
-      work_line_->CheckLiteralOp(inst, reg_types_.Integer(), reg_types_.Integer(), false, true);
+      work_line_->CheckLiteralOp(this, inst, reg_types_.Integer(), reg_types_.Integer(), false,
+                                 true);
       break;
     case Instruction::AND_INT_LIT16:
     case Instruction::OR_INT_LIT16:
     case Instruction::XOR_INT_LIT16:
-      work_line_->CheckLiteralOp(inst, reg_types_.Integer(), reg_types_.Integer(), true, true);
+      work_line_->CheckLiteralOp(this, inst, reg_types_.Integer(), reg_types_.Integer(), true,
+                                 true);
       break;
     case Instruction::ADD_INT_LIT8:
     case Instruction::RSUB_INT_LIT8:
@@ -2603,12 +2612,14 @@
     case Instruction::SHL_INT_LIT8:
     case Instruction::SHR_INT_LIT8:
     case Instruction::USHR_INT_LIT8:
-      work_line_->CheckLiteralOp(inst, reg_types_.Integer(), reg_types_.Integer(), false, false);
+      work_line_->CheckLiteralOp(this, inst, reg_types_.Integer(), reg_types_.Integer(), false,
+                                 false);
       break;
     case Instruction::AND_INT_LIT8:
     case Instruction::OR_INT_LIT8:
     case Instruction::XOR_INT_LIT8:
-      work_line_->CheckLiteralOp(inst, reg_types_.Integer(), reg_types_.Integer(), true, false);
+      work_line_->CheckLiteralOp(this, inst, reg_types_.Integer(), reg_types_.Integer(), true,
+                                 false);
       break;
 
     // Special instructions.
@@ -2654,11 +2665,11 @@
     case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
       bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
       mirror::ArtMethod* called_method = VerifyInvokeVirtualQuickArgs(inst, is_range);
-      if (called_method != NULL) {
+      if (called_method != nullptr) {
         const char* descriptor = called_method->GetReturnTypeDescriptor();
         const RegType& return_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
         if (!return_type.IsLowHalf()) {
-          work_line_->SetResultRegisterType(return_type);
+          work_line_->SetResultRegisterType(this, return_type);
         } else {
           work_line_->SetResultRegisterTypeWide(return_type, return_type.HighHalf(&reg_types_));
         }
@@ -2720,7 +2731,7 @@
    * not expensive and it makes our debugging output cleaner.)
    */
   if (!just_set_result) {
-    work_line_->SetResultTypeToUnknown();
+    work_line_->SetResultTypeToUnknown(this);
   }
 
 
@@ -2749,7 +2760,7 @@
       return false;
     }
     /* update branch target, set "changed" if appropriate */
-    if (NULL != branch_line.get()) {
+    if (nullptr != branch_line.get()) {
       if (!UpdateRegisters(work_insn_idx_ + branch_target, branch_line.get(), false)) {
         return false;
       }
@@ -2825,9 +2836,8 @@
           }
         } else {
           // Clear exception.
-          Thread* self = Thread::Current();
-          DCHECK(self->IsExceptionPending());
-          self->ClearException();
+          DCHECK(self_->IsExceptionPending());
+          self_->ClearException();
         }
       }
       /*
@@ -2864,7 +2874,8 @@
    *        and this change should not be used in those cases.
    */
   if ((opcode_flags & Instruction::kContinue) != 0) {
-    uint32_t next_insn_idx = work_insn_idx_ + CurrentInsnFlags()->GetLengthInCodeUnits();
+    DCHECK_EQ(Instruction::At(code_item_->insns_ + work_insn_idx_), inst);
+    uint32_t next_insn_idx = work_insn_idx_ + inst->SizeInCodeUnits();
     if (next_insn_idx >= code_item_->insns_size_in_code_units_) {
       Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Execution can walk off end of code area";
       return false;
@@ -2874,7 +2885,7 @@
     if (!CheckNotMoveException(code_item_->insns_, next_insn_idx)) {
       return false;
     }
-    if (NULL != fallthrough_line.get()) {
+    if (nullptr != fallthrough_line.get()) {
       // Make workline consistent with fallthrough computed from peephole optimization.
       work_line_->CopyFromLine(fallthrough_line.get());
     }
@@ -2883,17 +2894,17 @@
       const Instruction* ret_inst = Instruction::At(code_item_->insns_ + next_insn_idx);
       Instruction::Code opcode = ret_inst->Opcode();
       if ((opcode == Instruction::RETURN_VOID) || (opcode == Instruction::RETURN_VOID_BARRIER)) {
-        work_line_->MarkAllRegistersAsConflicts();
+        work_line_->MarkAllRegistersAsConflicts(this);
       } else {
         if (opcode == Instruction::RETURN_WIDE) {
-          work_line_->MarkAllRegistersAsConflictsExceptWide(ret_inst->VRegA_11x());
+          work_line_->MarkAllRegistersAsConflictsExceptWide(this, ret_inst->VRegA_11x());
         } else {
-          work_line_->MarkAllRegistersAsConflictsExcept(ret_inst->VRegA_11x());
+          work_line_->MarkAllRegistersAsConflictsExcept(this, ret_inst->VRegA_11x());
         }
       }
     }
     RegisterLine* next_line = reg_table_.GetLine(next_insn_idx);
-    if (next_line != NULL) {
+    if (next_line != nullptr) {
       // Merge registers into what we have for the next instruction, and set the "changed" flag if
       // needed. If the merge changes the state of the registers then the work line will be
       // updated.
@@ -2911,7 +2922,7 @@
 
   /* If we're returning from the method, make sure monitor stack is empty. */
   if ((opcode_flags & Instruction::kReturn) != 0) {
-    if (!work_line_->VerifyMonitorStackEmpty()) {
+    if (!work_line_->VerifyMonitorStackEmpty(this)) {
       return false;
     }
   }
@@ -2923,7 +2934,8 @@
    * alone and let the caller sort it out.
    */
   if ((opcode_flags & Instruction::kContinue) != 0) {
-    *start_guess = work_insn_idx_ + insn_flags_[work_insn_idx_].GetLengthInCodeUnits();
+    DCHECK_EQ(Instruction::At(code_item_->insns_ + work_insn_idx_), inst);
+    *start_guess = work_insn_idx_ + inst->SizeInCodeUnits();
   } else if ((opcode_flags & Instruction::kBranch) != 0) {
     /* we're still okay if branch_target is zero */
     *start_guess = work_insn_idx_ + branch_target;
@@ -2939,7 +2951,7 @@
   const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
   const RegType& referrer = GetDeclaringClass();
   mirror::Class* klass = dex_cache_->GetResolvedType(class_idx);
-  const RegType& result = klass != NULL ?
+  const RegType& result = klass != nullptr ?
       reg_types_.FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes()) :
       reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
   if (result.IsConflict()) {
@@ -2947,7 +2959,7 @@
         << "' in " << referrer;
     return result;
   }
-  if (klass == NULL && !result.IsUnresolvedTypes()) {
+  if (klass == nullptr && !result.IsUnresolvedTypes()) {
     dex_cache_->SetResolvedType(class_idx, result.GetClass());
   }
   // Check if access is allowed. Unresolved types use xxxWithAccessCheck to
@@ -2962,7 +2974,7 @@
 }
 
 const RegType& MethodVerifier::GetCaughtExceptionType() {
-  const RegType* common_super = NULL;
+  const RegType* common_super = nullptr;
   if (code_item_->tries_size_ != 0) {
     const byte* handlers_ptr = DexFile::GetCatchHandlerData(*code_item_, 0);
     uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
@@ -2997,7 +3009,7 @@
       handlers_ptr = iterator.EndDataPointer();
     }
   }
-  if (common_super == NULL) {
+  if (common_super == nullptr) {
     /* no catch blocks, or no catches with classes we can find */
     Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "unable to find exception handler";
     return reg_types_.Conflict();
@@ -3013,15 +3025,15 @@
     std::string append(" in attempt to access method ");
     append += dex_file_->GetMethodName(method_id);
     AppendToLastFailMessage(append);
-    return NULL;
+    return nullptr;
   }
   if (klass_type.IsUnresolvedTypes()) {
-    return NULL;  // Can't resolve Class so no more to do here
+    return nullptr;  // Can't resolve Class so no more to do here
   }
   mirror::Class* klass = klass_type.GetClass();
   const RegType& referrer = GetDeclaringClass();
   mirror::ArtMethod* res_method = dex_cache_->GetResolvedMethod(dex_method_idx);
-  if (res_method == NULL) {
+  if (res_method == nullptr) {
     const char* name = dex_file_->GetMethodName(method_id);
     const Signature signature = dex_file_->GetMethodSignature(method_id);
 
@@ -3032,7 +3044,7 @@
     } else {
       res_method = klass->FindVirtualMethod(name, signature);
     }
-    if (res_method != NULL) {
+    if (res_method != nullptr) {
       dex_cache_->SetResolvedMethod(dex_method_idx, res_method);
     } else {
       // If a virtual or interface method wasn't found with the expected type, look in
@@ -3041,11 +3053,11 @@
       if (method_type == METHOD_INTERFACE || method_type == METHOD_VIRTUAL) {
         res_method = klass->FindDirectMethod(name, signature);
       }
-      if (res_method == NULL) {
+      if (res_method == nullptr) {
         Fail(VERIFY_ERROR_NO_METHOD) << "couldn't find method "
                                      << PrettyDescriptor(klass) << "." << name
                                      << " " << signature;
-        return NULL;
+        return nullptr;
       }
     }
   }
@@ -3054,13 +3066,13 @@
   if (res_method->IsConstructor() && method_type != METHOD_DIRECT) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "rejecting non-direct call to constructor "
                                       << PrettyMethod(res_method);
-    return NULL;
+    return nullptr;
   }
   // Disallow any calls to class initializers.
   if (res_method->IsClassInitializer()) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "rejecting call to class initializer "
                                       << PrettyMethod(res_method);
-    return NULL;
+    return nullptr;
   }
   // Check if access is allowed.
   if (!referrer.CanAccessMember(res_method->GetDeclaringClass(), res_method->GetAccessFlags())) {
@@ -3072,17 +3084,17 @@
   if (res_method->IsPrivate() && method_type == METHOD_VIRTUAL) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invoke-super/virtual can't be used on private method "
                                       << PrettyMethod(res_method);
-    return NULL;
+    return nullptr;
   }
   // Check that interface methods match interface classes.
   if (klass->IsInterface() && method_type != METHOD_INTERFACE) {
     Fail(VERIFY_ERROR_CLASS_CHANGE) << "non-interface method " << PrettyMethod(res_method)
                                     << " is in an interface class " << PrettyClass(klass);
-    return NULL;
+    return nullptr;
   } else if (!klass->IsInterface() && method_type == METHOD_INTERFACE) {
     Fail(VERIFY_ERROR_CLASS_CHANGE) << "interface method " << PrettyMethod(res_method)
                                     << " is in a non-interface class " << PrettyClass(klass);
-    return NULL;
+    return nullptr;
   }
   // See if the method type implied by the invoke instruction matches the access flags for the
   // target method.
@@ -3092,7 +3104,7 @@
       ) {
     Fail(VERIFY_ERROR_CLASS_CHANGE) << "invoke type (" << method_type << ") does not match method "
                                        " type of " << PrettyMethod(res_method);
-    return NULL;
+    return nullptr;
   }
   return res_method;
 }
@@ -3126,7 +3138,7 @@
    * rigorous check here (which is okay since we have to do it at runtime).
    */
   if (method_type != METHOD_STATIC) {
-    const RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
+    const RegType& actual_arg_type = work_line_->GetInvocationThis(this, inst, is_range);
     if (actual_arg_type.IsConflict()) {  // GetInvocationThis failed.
       CHECK(have_pending_hard_failure_);
       return nullptr;
@@ -3193,13 +3205,13 @@
     uint32_t get_reg = is_range ? inst->VRegC_3rc() + static_cast<uint32_t>(sig_registers) :
         arg[sig_registers];
     if (reg_type.IsIntegralTypes()) {
-      const RegType& src_type = work_line_->GetRegisterType(get_reg);
+      const RegType& src_type = work_line_->GetRegisterType(this, get_reg);
       if (!src_type.IsIntegralTypes()) {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register v" << get_reg << " has type " << src_type
             << " but expected " << reg_type;
         return res_method;
       }
-    } else if (!work_line_->VerifyRegisterType(get_reg, reg_type)) {
+    } else if (!work_line_->VerifyRegisterType(this, get_reg, reg_type)) {
       // Continue on soft failures. We need to find possible hard failures to avoid problems in the
       // compiler.
       if (have_pending_hard_failure_) {
@@ -3264,7 +3276,7 @@
   const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
 
   mirror::ArtMethod* res_method = ResolveMethodAndCheckAccess(method_idx, method_type);
-  if (res_method == NULL) {  // error or class is unresolved
+  if (res_method == nullptr) {  // error or class is unresolved
     // Check what we can statically.
     if (!have_pending_hard_failure_) {
       VerifyInvocationArgsUnresolvedMethod(inst, method_type, is_range);
@@ -3304,7 +3316,7 @@
                                                          RegisterLine* reg_line, bool is_range) {
   DCHECK(inst->Opcode() == Instruction::INVOKE_VIRTUAL_QUICK ||
          inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
-  const RegType& actual_arg_type = reg_line->GetInvocationThis(inst, is_range);
+  const RegType& actual_arg_type = reg_line->GetInvocationThis(this, inst, is_range);
   if (!actual_arg_type.HasClass()) {
     VLOG(verifier) << "Failed to get mirror::Class* from '" << actual_arg_type << "'";
     return nullptr;
@@ -3324,7 +3336,7 @@
   CHECK_LT(static_cast<int32_t>(vtable_index), dispatch_class->GetVTableLength())
       << PrettyDescriptor(klass);
   mirror::ArtMethod* res_method = dispatch_class->GetVTableEntry(vtable_index);
-  CHECK(!Thread::Current()->IsExceptionPending());
+  CHECK(!self_->IsExceptionPending());
   return res_method;
 }
 
@@ -3333,18 +3345,18 @@
   DCHECK(Runtime::Current()->IsStarted());
   mirror::ArtMethod* res_method = GetQuickInvokedMethod(inst, work_line_.get(),
                                                              is_range);
-  if (res_method == NULL) {
+  if (res_method == nullptr) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer method from " << inst->Name();
-    return NULL;
+    return nullptr;
   }
   CHECK(!res_method->IsDirect() && !res_method->IsStatic());
 
   // We use vAA as our expected arg count, rather than res_method->insSize, because we need to
   // match the call to the signature. Also, we might be calling through an abstract method
   // definition (which doesn't have register count values).
-  const RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
+  const RegType& actual_arg_type = work_line_->GetInvocationThis(this, inst, is_range);
   if (actual_arg_type.IsConflict()) {  // GetInvocationThis failed.
-    return NULL;
+    return nullptr;
   }
   const size_t expected_args = (is_range) ? inst->VRegA_3rc() : inst->VRegA_35c();
   /* caught by static verifier */
@@ -3352,7 +3364,7 @@
   if (expected_args > code_item_->outs_size_) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid argument count (" << expected_args
         << ") exceeds outsSize (" << code_item_->outs_size_ << ")";
-    return NULL;
+    return nullptr;
   }
 
   /*
@@ -3362,7 +3374,7 @@
    */
   if (actual_arg_type.IsUninitializedReference() && !res_method->IsConstructor()) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "'this' arg must be initialized";
-    return NULL;
+    return nullptr;
   }
   if (!actual_arg_type.IsZero()) {
     mirror::Class* klass = res_method->GetDeclaringClass();
@@ -3374,7 +3386,7 @@
       Fail(actual_arg_type.IsUnresolvedTypes() ? VERIFY_ERROR_NO_CLASS :
           VERIFY_ERROR_BAD_CLASS_SOFT) << "'this' argument '" << actual_arg_type
           << "' not instance of '" << res_method_class << "'";
-      return NULL;
+      return nullptr;
     }
   }
   /*
@@ -3382,7 +3394,7 @@
    * have been verified, so we can't assume it's properly formed.
    */
   const DexFile::TypeList* params = res_method->GetParameterTypeList();
-  size_t params_size = params == NULL ? 0 : params->Size();
+  size_t params_size = params == nullptr ? 0 : params->Size();
   uint32_t arg[5];
   if (!is_range) {
     inst->GetVarArgs(arg);
@@ -3394,18 +3406,18 @@
                                         << "'. Expected " << expected_args
                                          << " arguments, processing argument " << actual_args
                                         << " (where longs/doubles count twice).";
-      return NULL;
+      return nullptr;
     }
     const char* descriptor =
         res_method->GetTypeDescriptorFromTypeIdx(params->GetTypeItem(param_index).type_idx_);
-    if (descriptor == NULL) {
+    if (descriptor == nullptr) {
       Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Rejecting invocation of " << PrettyMethod(res_method)
                                         << " missing signature component";
-      return NULL;
+      return nullptr;
     }
     const RegType& reg_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
     uint32_t get_reg = is_range ? inst->VRegC_3rc() + actual_args : arg[actual_args];
-    if (!work_line_->VerifyRegisterType(get_reg, reg_type)) {
+    if (!work_line_->VerifyRegisterType(this, get_reg, reg_type)) {
       return res_method;
     }
     actual_args = reg_type.IsLongOrDoubleTypes() ? actual_args + 2 : actual_args + 1;
@@ -3413,7 +3425,7 @@
   if (actual_args != expected_args) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Rejecting invocation of " << PrettyMethod(res_method)
               << " expected " << expected_args << " arguments, found " << actual_args;
-    return NULL;
+    return nullptr;
   } else {
     return res_method;
   }
@@ -3440,10 +3452,10 @@
       Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "new-array on non-array class " << res_type;
     } else if (!is_filled) {
       /* make sure "size" register is valid type */
-      work_line_->VerifyRegisterType(inst->VRegB_22c(), reg_types_.Integer());
+      work_line_->VerifyRegisterType(this, inst->VRegB_22c(), reg_types_.Integer());
       /* set register type to array class */
       const RegType& precise_type = reg_types_.FromUninitialized(res_type);
-      work_line_->SetRegisterType(inst->VRegA_22c(), precise_type);
+      work_line_->SetRegisterType(this, inst->VRegA_22c(), precise_type);
     } else {
       // Verify each register. If "arg_count" is bad, VerifyRegisterType() will run off the end of
       // the list and fail. It's legal, if silly, for arg_count to be zero.
@@ -3455,34 +3467,35 @@
       }
       for (size_t ui = 0; ui < arg_count; ui++) {
         uint32_t get_reg = is_range ? inst->VRegC_3rc() + ui : arg[ui];
-        if (!work_line_->VerifyRegisterType(get_reg, expected_type)) {
-          work_line_->SetResultRegisterType(reg_types_.Conflict());
+        if (!work_line_->VerifyRegisterType(this, get_reg, expected_type)) {
+          work_line_->SetResultRegisterType(this, reg_types_.Conflict());
           return;
         }
       }
       // filled-array result goes into "result" register
       const RegType& precise_type = reg_types_.FromUninitialized(res_type);
-      work_line_->SetResultRegisterType(precise_type);
+      work_line_->SetResultRegisterType(this, precise_type);
     }
   }
 }
 
 void MethodVerifier::VerifyAGet(const Instruction* inst,
                                 const RegType& insn_type, bool is_primitive) {
-  const RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
+  const RegType& index_type = work_line_->GetRegisterType(this, inst->VRegC_23x());
   if (!index_type.IsArrayIndexTypes()) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Invalid reg type for array index (" << index_type << ")";
   } else {
-    const RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
+    const RegType& array_type = work_line_->GetRegisterType(this, inst->VRegB_23x());
     if (array_type.IsZero()) {
       // Null array class; this code path will fail at runtime. Infer a merge-able type from the
       // instruction type. TODO: have a proper notion of bottom here.
       if (!is_primitive || insn_type.IsCategory1Types()) {
         // Reference or category 1
-        work_line_->SetRegisterType(inst->VRegA_23x(), reg_types_.Zero());
+        work_line_->SetRegisterType(this, inst->VRegA_23x(), reg_types_.Zero());
       } else {
         // Category 2
-        work_line_->SetRegisterTypeWide(inst->VRegA_23x(), reg_types_.FromCat2ConstLo(0, false),
+        work_line_->SetRegisterTypeWide(this, inst->VRegA_23x(),
+                                        reg_types_.FromCat2ConstLo(0, false),
                                         reg_types_.FromCat2ConstHi(0, false));
       }
     } else if (!array_type.IsArrayTypes()) {
@@ -3506,9 +3519,9 @@
         // instruction, which can't differentiate object types and ints from floats, longs from
         // doubles.
         if (!component_type.IsLowHalf()) {
-          work_line_->SetRegisterType(inst->VRegA_23x(), component_type);
+          work_line_->SetRegisterType(this, inst->VRegA_23x(), component_type);
         } else {
-          work_line_->SetRegisterTypeWide(inst->VRegA_23x(), component_type,
+          work_line_->SetRegisterTypeWide(this, inst->VRegA_23x(), component_type,
                                           component_type.HighHalf(&reg_types_));
         }
       }
@@ -3521,7 +3534,7 @@
   // Primitive assignability rules are weaker than regular assignability rules.
   bool instruction_compatible;
   bool value_compatible;
-  const RegType& value_type = work_line_->GetRegisterType(vregA);
+  const RegType& value_type = work_line_->GetRegisterType(this, vregA);
   if (target_type.IsIntegralTypes()) {
     instruction_compatible = target_type.Equals(insn_type);
     value_compatible = value_type.IsIntegralTypes();
@@ -3533,7 +3546,7 @@
     // Additional register check: this is not checked statically (as part of VerifyInstructions),
     // as target_type depends on the resolved type of the field.
     if (instruction_compatible && work_line_->NumRegs() > vregA + 1) {
-      const RegType& value_type_hi = work_line_->GetRegisterType(vregA + 1);
+      const RegType& value_type_hi = work_line_->GetRegisterType(this, vregA + 1);
       value_compatible = value_type.IsLongTypes() && value_type.CheckWidePair(value_type_hi);
     } else {
       value_compatible = false;
@@ -3543,7 +3556,7 @@
     // Additional register check: this is not checked statically (as part of VerifyInstructions),
     // as target_type depends on the resolved type of the field.
     if (instruction_compatible && work_line_->NumRegs() > vregA + 1) {
-      const RegType& value_type_hi = work_line_->GetRegisterType(vregA + 1);
+      const RegType& value_type_hi = work_line_->GetRegisterType(this, vregA + 1);
       value_compatible = value_type.IsDoubleTypes() && value_type.CheckWidePair(value_type_hi);
     } else {
       value_compatible = false;
@@ -3569,11 +3582,11 @@
 
 void MethodVerifier::VerifyAPut(const Instruction* inst,
                                 const RegType& insn_type, bool is_primitive) {
-  const RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
+  const RegType& index_type = work_line_->GetRegisterType(this, inst->VRegC_23x());
   if (!index_type.IsArrayIndexTypes()) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Invalid reg type for array index (" << index_type << ")";
   } else {
-    const RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
+    const RegType& array_type = work_line_->GetRegisterType(this, inst->VRegB_23x());
     if (array_type.IsZero()) {
       // Null array type; this code path will fail at runtime. Infer a merge-able type from the
       // instruction type.
@@ -3592,7 +3605,7 @@
           // The instruction agrees with the type of array, confirm the value to be stored does too
           // Note: we use the instruction type (rather than the component type) for aput-object as
           // incompatible classes will be caught at runtime as an array store exception
-          work_line_->VerifyRegisterType(vregA, insn_type);
+          work_line_->VerifyRegisterType(this, vregA, insn_type);
         }
       }
     }
@@ -3607,29 +3620,29 @@
     AppendToLastFailMessage(StringPrintf(" in attempt to access static field %d (%s) in %s",
                                          field_idx, dex_file_->GetFieldName(field_id),
                                          dex_file_->GetFieldDeclaringClassDescriptor(field_id)));
-    return NULL;
+    return nullptr;
   }
   if (klass_type.IsUnresolvedTypes()) {
-    return NULL;  // Can't resolve Class so no more to do here, will do checking at runtime.
+    return nullptr;  // Can't resolve Class so no more to do here, will do checking at runtime.
   }
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   mirror::ArtField* field = class_linker->ResolveFieldJLS(*dex_file_, field_idx, dex_cache_,
                                                           class_loader_);
-  if (field == NULL) {
+  if (field == nullptr) {
     VLOG(verifier) << "Unable to resolve static field " << field_idx << " ("
               << dex_file_->GetFieldName(field_id) << ") in "
               << dex_file_->GetFieldDeclaringClassDescriptor(field_id);
-    DCHECK(Thread::Current()->IsExceptionPending());
-    Thread::Current()->ClearException();
-    return NULL;
+    DCHECK(self_->IsExceptionPending());
+    self_->ClearException();
+    return nullptr;
   } else if (!GetDeclaringClass().CanAccessMember(field->GetDeclaringClass(),
                                                   field->GetAccessFlags())) {
     Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot access static field " << PrettyField(field)
                                     << " from " << GetDeclaringClass();
-    return NULL;
+    return nullptr;
   } else if (!field->IsStatic()) {
     Fail(VERIFY_ERROR_CLASS_CHANGE) << "expected field " << PrettyField(field) << " to be static";
-    return NULL;
+    return nullptr;
   }
   return field;
 }
@@ -3642,30 +3655,30 @@
     AppendToLastFailMessage(StringPrintf(" in attempt to access instance field %d (%s) in %s",
                                          field_idx, dex_file_->GetFieldName(field_id),
                                          dex_file_->GetFieldDeclaringClassDescriptor(field_id)));
-    return NULL;
+    return nullptr;
   }
   if (klass_type.IsUnresolvedTypes()) {
-    return NULL;  // Can't resolve Class so no more to do here
+    return nullptr;  // Can't resolve Class so no more to do here
   }
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   mirror::ArtField* field = class_linker->ResolveFieldJLS(*dex_file_, field_idx, dex_cache_,
                                                           class_loader_);
-  if (field == NULL) {
+  if (field == nullptr) {
     VLOG(verifier) << "Unable to resolve instance field " << field_idx << " ("
               << dex_file_->GetFieldName(field_id) << ") in "
               << dex_file_->GetFieldDeclaringClassDescriptor(field_id);
-    DCHECK(Thread::Current()->IsExceptionPending());
-    Thread::Current()->ClearException();
-    return NULL;
+    DCHECK(self_->IsExceptionPending());
+    self_->ClearException();
+    return nullptr;
   } else if (!GetDeclaringClass().CanAccessMember(field->GetDeclaringClass(),
                                                   field->GetAccessFlags())) {
     Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot access instance field " << PrettyField(field)
                                     << " from " << GetDeclaringClass();
-    return NULL;
+    return nullptr;
   } else if (field->IsStatic()) {
     Fail(VERIFY_ERROR_CLASS_CHANGE) << "expected field " << PrettyField(field)
                                     << " to not be static";
-    return NULL;
+    return nullptr;
   } else if (obj_type.IsZero()) {
     // Cannot infer and check type, however, access will cause null pointer exception
     return field;
@@ -3673,7 +3686,7 @@
     // Trying to read a field from something that isn't a reference
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "instance field access on object that has "
                                       << "non-reference type " << obj_type;
-    return NULL;
+    return nullptr;
   } else {
     mirror::Class* klass = field->GetDeclaringClass();
     const RegType& field_klass =
@@ -3687,14 +3700,14 @@
       Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "cannot access instance field " << PrettyField(field)
                                         << " of a not fully initialized object within the context"
                                         << " of " << PrettyMethod(dex_method_idx_, *dex_file_);
-      return NULL;
+      return nullptr;
     } else if (!field_klass.IsAssignableFrom(obj_type)) {
       // Trying to access C1.field1 using reference of type C2, which is neither C1 or a sub-class
       // of C1. For resolution to occur the declared class of the field must be compatible with
       // obj_type, we've discovered this wasn't so, so report the field didn't exist.
       Fail(VERIFY_ERROR_NO_FIELD) << "cannot access instance field " << PrettyField(field)
                                   << " from object of type " << obj_type;
-      return NULL;
+      return nullptr;
     } else {
       return field;
     }
@@ -3708,15 +3721,14 @@
   if (is_static) {
     field = GetStaticField(field_idx);
   } else {
-    const RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
+    const RegType& object_type = work_line_->GetRegisterType(this, inst->VRegB_22c());
     field = GetInstanceField(object_type, field_idx);
   }
   const RegType* field_type = nullptr;
-  if (field != NULL) {
-    Thread* self = Thread::Current();
+  if (field != nullptr) {
     mirror::Class* field_type_class;
     {
-      StackHandleScope<1> hs(self);
+      StackHandleScope<1> hs(self_);
       HandleWrapper<mirror::ArtField> h_field(hs.NewHandleWrapper(&field));
       field_type_class = FieldHelper(h_field).GetType(can_load_classes_);
     }
@@ -3724,8 +3736,8 @@
       field_type = &reg_types_.FromClass(field->GetTypeDescriptor(), field_type_class,
                                          field_type_class->CannotBeAssignedFromOtherTypes());
     } else {
-      DCHECK(!can_load_classes_ || self->IsExceptionPending());
-      self->ClearException();
+      DCHECK(!can_load_classes_ || self_->IsExceptionPending());
+      self_->ClearException();
     }
   }
   if (field_type == nullptr) {
@@ -3756,14 +3768,14 @@
                                         << " to be compatible with type '" << insn_type
                                         << "' but found type '" << *field_type
                                         << "' in Get-object";
-      work_line_->SetRegisterType(vregA, reg_types_.Conflict());
+      work_line_->SetRegisterType(this, vregA, reg_types_.Conflict());
       return;
     }
   }
   if (!field_type->IsLowHalf()) {
-    work_line_->SetRegisterType(vregA, *field_type);
+    work_line_->SetRegisterType(this, vregA, *field_type);
   } else {
-    work_line_->SetRegisterTypeWide(vregA, *field_type, field_type->HighHalf(&reg_types_));
+    work_line_->SetRegisterTypeWide(this, vregA, *field_type, field_type->HighHalf(&reg_types_));
   }
 }
 
@@ -3774,11 +3786,11 @@
   if (is_static) {
     field = GetStaticField(field_idx);
   } else {
-    const RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
+    const RegType& object_type = work_line_->GetRegisterType(this, inst->VRegB_22c());
     field = GetInstanceField(object_type, field_idx);
   }
   const RegType* field_type = nullptr;
-  if (field != NULL) {
+  if (field != nullptr) {
     if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
       Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << PrettyField(field)
                                       << " from other class " << GetDeclaringClass();
@@ -3786,7 +3798,7 @@
     }
     mirror::Class* field_type_class;
     {
-      StackHandleScope<1> hs(Thread::Current());
+      StackHandleScope<1> hs(self_);
       HandleWrapper<mirror::ArtField> h_field(hs.NewHandleWrapper(&field));
       FieldHelper fh(h_field);
       field_type_class = fh.GetType(can_load_classes_);
@@ -3795,9 +3807,8 @@
       field_type = &reg_types_.FromClass(field->GetTypeDescriptor(), field_type_class,
                                          field_type_class->CannotBeAssignedFromOtherTypes());
     } else {
-      Thread* self = Thread::Current();
-      DCHECK(!can_load_classes_ || self->IsExceptionPending());
-      self->ClearException();
+      DCHECK(!can_load_classes_ || self_->IsExceptionPending());
+      self_->ClearException();
     }
   }
   if (field_type == nullptr) {
@@ -3817,7 +3828,7 @@
                                         << "' in put-object";
       return;
     }
-    work_line_->VerifyRegisterType(vregA, *field_type);
+    work_line_->VerifyRegisterType(this, vregA, *field_type);
   }
 }
 
@@ -3829,7 +3840,7 @@
          inst->Opcode() == Instruction::IPUT_QUICK ||
          inst->Opcode() == Instruction::IPUT_WIDE_QUICK ||
          inst->Opcode() == Instruction::IPUT_OBJECT_QUICK);
-  const RegType& object_type = reg_line->GetRegisterType(inst->VRegB_22c());
+  const RegType& object_type = reg_line->GetRegisterType(this, inst->VRegB_22c());
   if (!object_type.HasClass()) {
     VLOG(verifier) << "Failed to get mirror::Class* from '" << object_type << "'";
     return nullptr;
@@ -3848,13 +3859,13 @@
                                      bool is_primitive) {
   DCHECK(Runtime::Current()->IsStarted());
   mirror::ArtField* field = GetQuickFieldAccess(inst, work_line_.get());
-  if (field == NULL) {
+  if (field == nullptr) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field from " << inst->Name();
     return;
   }
   mirror::Class* field_type_class;
   {
-    StackHandleScope<1> hs(Thread::Current());
+    StackHandleScope<1> hs(self_);
     HandleWrapper<mirror::ArtField> h_field(hs.NewHandleWrapper(&field));
     FieldHelper fh(h_field);
     field_type_class = fh.GetType(can_load_classes_);
@@ -3864,9 +3875,8 @@
     field_type = &reg_types_.FromClass(field->GetTypeDescriptor(), field_type_class,
                                        field_type_class->CannotBeAssignedFromOtherTypes());
   } else {
-    Thread* self = Thread::Current();
-    DCHECK(!can_load_classes_ || self->IsExceptionPending());
-    self->ClearException();
+    DCHECK(!can_load_classes_ || self_->IsExceptionPending());
+    self_->ClearException();
     field_type = &reg_types_.FromDescriptor(field->GetDeclaringClass()->GetClassLoader(),
                                             field->GetTypeDescriptor(), false);
   }
@@ -3893,14 +3903,14 @@
                                         << " to be compatible with type '" << insn_type
                                         << "' but found type '" << *field_type
                                         << "' in get-object";
-      work_line_->SetRegisterType(vregA, reg_types_.Conflict());
+      work_line_->SetRegisterType(this, vregA, reg_types_.Conflict());
       return;
     }
   }
   if (!field_type->IsLowHalf()) {
-    work_line_->SetRegisterType(vregA, *field_type);
+    work_line_->SetRegisterType(this, vregA, *field_type);
   } else {
-    work_line_->SetRegisterTypeWide(vregA, *field_type, field_type->HighHalf(&reg_types_));
+    work_line_->SetRegisterTypeWide(this, vregA, *field_type, field_type->HighHalf(&reg_types_));
   }
 }
 
@@ -3908,14 +3918,14 @@
                                      bool is_primitive) {
   DCHECK(Runtime::Current()->IsStarted());
   mirror::ArtField* field = GetQuickFieldAccess(inst, work_line_.get());
-  if (field == NULL) {
+  if (field == nullptr) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field from " << inst->Name();
     return;
   }
   const char* descriptor = field->GetTypeDescriptor();
   mirror::ClassLoader* loader = field->GetDeclaringClass()->GetClassLoader();
   const RegType& field_type = reg_types_.FromDescriptor(loader, descriptor, false);
-  if (field != NULL) {
+  if (field != nullptr) {
     if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
       Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << PrettyField(field)
                                       << " from other class " << GetDeclaringClass();
@@ -3927,7 +3937,7 @@
     // Primitive field assignability rules are weaker than regular assignability rules
     bool instruction_compatible;
     bool value_compatible;
-    const RegType& value_type = work_line_->GetRegisterType(vregA);
+    const RegType& value_type = work_line_->GetRegisterType(this, vregA);
     if (field_type.IsIntegralTypes()) {
       instruction_compatible = insn_type.IsIntegralTypes();
       value_compatible = value_type.IsIntegralTypes();
@@ -3969,7 +3979,7 @@
                                         << "' in put-object";
       return;
     }
-    work_line_->VerifyRegisterType(vregA, field_type);
+    work_line_->VerifyRegisterType(this, vregA, field_type);
   }
 }
 
@@ -3995,7 +4005,7 @@
       target_line->CopyFromLine(merge_line);
     } else {
       // Verify that the monitor stack is empty on return.
-      if (!merge_line->VerifyMonitorStackEmpty()) {
+      if (!merge_line->VerifyMonitorStackEmpty(this)) {
         return false;
       }
       // For returns we only care about the operand to the return, all other registers are dead.
@@ -4003,33 +4013,33 @@
       const Instruction* ret_inst = Instruction::At(code_item_->insns_ + next_insn);
       Instruction::Code opcode = ret_inst->Opcode();
       if ((opcode == Instruction::RETURN_VOID) || (opcode == Instruction::RETURN_VOID_BARRIER)) {
-        target_line->MarkAllRegistersAsConflicts();
+        target_line->MarkAllRegistersAsConflicts(this);
       } else {
         target_line->CopyFromLine(merge_line);
         if (opcode == Instruction::RETURN_WIDE) {
-          target_line->MarkAllRegistersAsConflictsExceptWide(ret_inst->VRegA_11x());
+          target_line->MarkAllRegistersAsConflictsExceptWide(this, ret_inst->VRegA_11x());
         } else {
-          target_line->MarkAllRegistersAsConflictsExcept(ret_inst->VRegA_11x());
+          target_line->MarkAllRegistersAsConflictsExcept(this, ret_inst->VRegA_11x());
         }
       }
     }
   } else {
     std::unique_ptr<RegisterLine> copy(gDebugVerify ?
                                  RegisterLine::Create(target_line->NumRegs(), this) :
-                                 NULL);
+                                 nullptr);
     if (gDebugVerify) {
       copy->CopyFromLine(target_line);
     }
-    changed = target_line->MergeRegisters(merge_line);
+    changed = target_line->MergeRegisters(this, merge_line);
     if (have_pending_hard_failure_) {
       return false;
     }
     if (gDebugVerify && changed) {
       LogVerifyInfo() << "Merging at [" << reinterpret_cast<void*>(work_insn_idx_) << "]"
                       << " to [" << reinterpret_cast<void*>(next_insn) << "]: " << "\n"
-                      << *copy.get() << "  MERGE\n"
-                      << *merge_line << "  ==\n"
-                      << *target_line << "\n";
+                      << copy->Dump(this) << "  MERGE\n"
+                      << merge_line->Dump(this) << "  ==\n"
+                      << target_line->Dump(this) << "\n";
     }
     if (update_merge_line && changed) {
       merge_line->CopyFromLine(target_line);
@@ -4048,8 +4058,7 @@
 const RegType& MethodVerifier::GetMethodReturnType() {
   if (return_type_ == nullptr) {
     if (mirror_method_.Get() != nullptr) {
-      Thread* self = Thread::Current();
-      StackHandleScope<1> hs(self);
+      StackHandleScope<1> hs(self_);
       mirror::Class* return_type_class =
           MethodHelper(hs.NewHandle(mirror_method_.Get())).GetReturnType(can_load_classes_);
       if (return_type_class != nullptr) {
@@ -4057,8 +4066,8 @@
                                              return_type_class,
                                              return_type_class->CannotBeAssignedFromOtherTypes());
       } else {
-        DCHECK(!can_load_classes_ || self->IsExceptionPending());
-        self->ClearException();
+        DCHECK(!can_load_classes_ || self_->IsExceptionPending());
+        self_->ClearException();
       }
     }
     if (return_type_ == nullptr) {
@@ -4073,7 +4082,7 @@
 }
 
 const RegType& MethodVerifier::GetDeclaringClass() {
-  if (declaring_class_ == NULL) {
+  if (declaring_class_ == nullptr) {
     const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
     const char* descriptor
         = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_));
@@ -4093,16 +4102,19 @@
   DCHECK(line != nullptr) << "No register line at DEX pc " << StringPrintf("0x%x", dex_pc);
   std::vector<int32_t> result;
   for (size_t i = 0; i < line->NumRegs(); ++i) {
-    const RegType& type = line->GetRegisterType(i);
+    const RegType& type = line->GetRegisterType(this, i);
     if (type.IsConstant()) {
       result.push_back(type.IsPreciseConstant() ? kConstant : kImpreciseConstant);
-      result.push_back(type.ConstantValue());
+      const ConstantType* const_val = down_cast<const ConstantType*>(&type);
+      result.push_back(const_val->ConstantValue());
     } else if (type.IsConstantLo()) {
       result.push_back(type.IsPreciseConstantLo() ? kConstant : kImpreciseConstant);
-      result.push_back(type.ConstantValueLo());
+      const ConstantType* const_val = down_cast<const ConstantType*>(&type);
+      result.push_back(const_val->ConstantValueLo());
     } else if (type.IsConstantHi()) {
       result.push_back(type.IsPreciseConstantHi() ? kConstant : kImpreciseConstant);
-      result.push_back(type.ConstantValueHi());
+      const ConstantType* const_val = down_cast<const ConstantType*>(&type);
+      result.push_back(const_val->ConstantValueHi());
     } else if (type.IsIntegralTypes()) {
       result.push_back(kIntVReg);
       result.push_back(0);
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 45c0a03..81ab960 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -140,16 +140,18 @@
   };
 
   /* Verify a class. Returns "kNoFailure" on success. */
-  static FailureKind VerifyClass(mirror::Class* klass, bool allow_soft_failures, std::string* error)
+  static FailureKind VerifyClass(Thread* self, mirror::Class* klass, bool allow_soft_failures,
+                                 std::string* error)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static FailureKind VerifyClass(const DexFile* dex_file, ConstHandle<mirror::DexCache> dex_cache,
+  static FailureKind VerifyClass(Thread* self, const DexFile* dex_file,
+                                 ConstHandle<mirror::DexCache> dex_cache,
                                  ConstHandle<mirror::ClassLoader> class_loader,
                                  const DexFile::ClassDef* class_def,
                                  bool allow_soft_failures, std::string* error)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  static void VerifyMethodAndDump(std::ostream& os, uint32_t method_idx, const DexFile* dex_file,
-                                  ConstHandle<mirror::DexCache> dex_cache,
+  static void VerifyMethodAndDump(Thread* self, std::ostream& os, uint32_t method_idx,
+                                  const DexFile* dex_file, ConstHandle<mirror::DexCache> dex_cache,
                                   ConstHandle<mirror::ClassLoader> class_loader,
                                   const DexFile::ClassDef* class_def,
                                   const DexFile::CodeItem* code_item,
@@ -202,7 +204,7 @@
     return can_load_classes_;
   }
 
-  MethodVerifier(const DexFile* dex_file, ConstHandle<mirror::DexCache> dex_cache,
+  MethodVerifier(Thread* self, const DexFile* dex_file, ConstHandle<mirror::DexCache> dex_cache,
                  ConstHandle<mirror::ClassLoader> class_loader, const DexFile::ClassDef* class_def,
                  const DexFile::CodeItem* code_item, uint32_t method_idx,
                  ConstHandle<mirror::ArtMethod> method,
@@ -253,7 +255,7 @@
    *  (3) Iterate through the method, checking type safety and looking
    *      for code flow problems.
    */
-  static FailureKind VerifyMethod(uint32_t method_idx, const DexFile* dex_file,
+  static FailureKind VerifyMethod(Thread* self, uint32_t method_idx, const DexFile* dex_file,
                                   ConstHandle<mirror::DexCache> dex_cache,
                                   ConstHandle<mirror::ClassLoader> class_loader,
                                   const DexFile::ClassDef* class_def_idx,
@@ -625,6 +627,9 @@
   const RegType& DetermineCat1Constant(int32_t value, bool precise)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  // The thread we're verifying on.
+  Thread* const self_;
+
   RegTypeCache reg_types_;
 
   PcToRegisterLineTable reg_table_;
diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc
index a5895e6..770ca7e 100644
--- a/runtime/verifier/method_verifier_test.cc
+++ b/runtime/verifier/method_verifier_test.cc
@@ -32,11 +32,12 @@
   void VerifyClass(const std::string& descriptor)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     ASSERT_TRUE(descriptor != NULL);
-    mirror::Class* klass = class_linker_->FindSystemClass(Thread::Current(), descriptor.c_str());
+    Thread* self = Thread::Current();
+    mirror::Class* klass = class_linker_->FindSystemClass(self, descriptor.c_str());
 
     // Verify the class
     std::string error_msg;
-    ASSERT_TRUE(MethodVerifier::VerifyClass(klass, true, &error_msg) == MethodVerifier::kNoFailure)
+    ASSERT_TRUE(MethodVerifier::VerifyClass(self, klass, true, &error_msg) == MethodVerifier::kNoFailure)
         << error_msg;
   }
 
diff --git a/runtime/verifier/reg_type-inl.h b/runtime/verifier/reg_type-inl.h
new file mode 100644
index 0000000..480ed40
--- /dev/null
+++ b/runtime/verifier/reg_type-inl.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_VERIFIER_REG_TYPE_INL_H_
+#define ART_RUNTIME_VERIFIER_REG_TYPE_INL_H_
+
+#include "reg_type.h"
+
+#include "base/casts.h"
+#include "mirror/class.h"
+
+namespace art {
+namespace verifier {
+
+inline bool RegType::CanAccess(const RegType& other) const {
+  if (Equals(other)) {
+    return true;  // Trivial accessibility.
+  } else {
+    bool this_unresolved = IsUnresolvedTypes();
+    bool other_unresolved = other.IsUnresolvedTypes();
+    if (!this_unresolved && !other_unresolved) {
+      return GetClass()->CanAccess(other.GetClass());
+    } else if (!other_unresolved) {
+      return other.GetClass()->IsPublic();  // Be conservative, only allow if other is public.
+    } else {
+      return false;  // More complicated test not possible on unresolved types, be conservative.
+    }
+  }
+}
+
+inline bool RegType::CanAccessMember(mirror::Class* klass, uint32_t access_flags) const {
+  if ((access_flags & kAccPublic) != 0) {
+    return true;
+  }
+  if (!IsUnresolvedTypes()) {
+    return GetClass()->CanAccessMember(klass, access_flags);
+  } else {
+    return false;  // More complicated test not possible on unresolved types, be conservative.
+  }
+}
+
+inline bool RegType::IsConstantBoolean() const {
+  if (!IsConstant()) {
+    return false;
+  } else {
+    const ConstantType* const_val = down_cast<const ConstantType*>(this);
+    return const_val->ConstantValue() >= 0 && const_val->ConstantValue() <= 1;
+  }
+}
+
+inline bool RegType::AssignableFrom(const RegType& lhs, const RegType& rhs, bool strict) {
+  if (lhs.Equals(rhs)) {
+    return true;
+  } else {
+    if (lhs.IsBoolean()) {
+      return rhs.IsBooleanTypes();
+    } else if (lhs.IsByte()) {
+      return rhs.IsByteTypes();
+    } else if (lhs.IsShort()) {
+      return rhs.IsShortTypes();
+    } else if (lhs.IsChar()) {
+      return rhs.IsCharTypes();
+    } else if (lhs.IsInteger()) {
+      return rhs.IsIntegralTypes();
+    } else if (lhs.IsFloat()) {
+      return rhs.IsFloatTypes();
+    } else if (lhs.IsLongLo()) {
+      return rhs.IsLongTypes();
+    } else if (lhs.IsDoubleLo()) {
+      return rhs.IsDoubleTypes();
+    } else {
+      CHECK(lhs.IsReferenceTypes())
+          << "Unexpected register type in IsAssignableFrom: '"
+          << lhs << "' := '" << rhs << "'";
+      if (rhs.IsZero()) {
+        return true;  // All reference types can be assigned null.
+      } else if (!rhs.IsReferenceTypes()) {
+        return false;  // Expect rhs to be a reference type.
+      } else if (lhs.IsJavaLangObject()) {
+        return true;  // All reference types can be assigned to Object.
+      } else if (!strict && !lhs.IsUnresolvedTypes() && lhs.GetClass()->IsInterface()) {
+        // If we're not strict allow assignment to any interface, see comment in ClassJoin.
+        return true;
+      } else if (lhs.IsJavaLangObjectArray()) {
+        return rhs.IsObjectArrayTypes();  // All reference arrays may be assigned to Object[]
+      } else if (lhs.HasClass() && rhs.HasClass() &&
+                 lhs.GetClass()->IsAssignableFrom(rhs.GetClass())) {
+        // We're assignable from the Class point-of-view.
+        return true;
+      } else {
+        // Unresolved types are only assignable for null and equality.
+        return false;
+      }
+    }
+  }
+}
+
+inline bool RegType::IsAssignableFrom(const RegType& src) const {
+  return AssignableFrom(*this, src, false);
+}
+
+inline bool RegType::IsStrictlyAssignableFrom(const RegType& src) const {
+  return AssignableFrom(*this, src, true);
+}
+
+inline const DoubleHiType* DoubleHiType::GetInstance() {
+  DCHECK(instance_ != nullptr);
+  return instance_;
+}
+
+inline const DoubleLoType* DoubleLoType::GetInstance() {
+  DCHECK(instance_ != nullptr);
+  return instance_;
+}
+
+inline const LongHiType* LongHiType::GetInstance() {
+  DCHECK(instance_ != nullptr);
+  return instance_;
+}
+
+inline const LongLoType* LongLoType::GetInstance() {
+  DCHECK(instance_ != nullptr);
+  return instance_;
+}
+
+inline const FloatType* FloatType::GetInstance() {
+  DCHECK(instance_ != nullptr);
+  return instance_;
+}
+
+inline const CharType* CharType::GetInstance() {
+  DCHECK(instance_ != nullptr);
+  return instance_;
+}
+
+inline const ShortType* ShortType::GetInstance() {
+  DCHECK(instance_ != nullptr);
+  return instance_;
+}
+
+inline const ByteType* ByteType::GetInstance() {
+  DCHECK(instance_ != nullptr);
+  return instance_;
+}
+
+
+inline const IntegerType* IntegerType::GetInstance() {
+  DCHECK(instance_ != nullptr);
+  return instance_;
+}
+
+inline const BooleanType* BooleanType::GetInstance() {
+  DCHECK(BooleanType::instance_ != nullptr);
+  return BooleanType::instance_;
+}
+
+inline const ConflictType* ConflictType::GetInstance() {
+  DCHECK(instance_ != nullptr);
+  return instance_;
+}
+
+inline const UndefinedType* UndefinedType::GetInstance() {
+  DCHECK(instance_ != nullptr);
+  return instance_;
+}
+
+}  // namespace verifier
+}  // namespace art
+
+#endif  // ART_RUNTIME_VERIFIER_REG_TYPE_INL_H_
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 68c7849..41541b5 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -14,8 +14,7 @@
  * limitations under the License.
  */
 
-#include "reg_type.h"
-
+#include "reg_type-inl.h"
 
 #include "base/casts.h"
 #include "class_linker-inl.h"
@@ -33,41 +32,23 @@
 namespace art {
 namespace verifier {
 
-UndefinedType* UndefinedType::instance_ = NULL;
-ConflictType* ConflictType::instance_ = NULL;
-BooleanType* BooleanType::instance = NULL;
-ByteType* ByteType::instance_ = NULL;
-ShortType* ShortType::instance_ = NULL;
-CharType* CharType::instance_ = NULL;
-FloatType* FloatType::instance_ = NULL;
-LongLoType* LongLoType::instance_ = NULL;
-LongHiType* LongHiType::instance_ = NULL;
-DoubleLoType* DoubleLoType::instance_ = NULL;
-DoubleHiType* DoubleHiType::instance_ = NULL;
-IntegerType* IntegerType::instance_ = NULL;
-
-int32_t RegType::ConstantValue() const {
-  ScopedObjectAccess soa(Thread::Current());
-  LOG(FATAL) << "Unexpected call to ConstantValue: " << *this;
-  return 0;
-}
-
-int32_t RegType::ConstantValueLo() const {
-  ScopedObjectAccess soa(Thread::Current());
-  LOG(FATAL) << "Unexpected call to ConstantValueLo: " << *this;
-  return 0;
-}
-
-int32_t RegType::ConstantValueHi() const {
-  ScopedObjectAccess soa(Thread::Current());
-  LOG(FATAL) << "Unexpected call to ConstantValueHi: " << *this;
-  return 0;
-}
+const UndefinedType* UndefinedType::instance_ = nullptr;
+const ConflictType* ConflictType::instance_ = nullptr;
+const BooleanType* BooleanType::instance_ = nullptr;
+const ByteType* ByteType::instance_ = nullptr;
+const ShortType* ShortType::instance_ = nullptr;
+const CharType* CharType::instance_ = nullptr;
+const FloatType* FloatType::instance_ = nullptr;
+const LongLoType* LongLoType::instance_ = nullptr;
+const LongHiType* LongHiType::instance_ = nullptr;
+const DoubleLoType* DoubleLoType::instance_ = nullptr;
+const DoubleHiType* DoubleHiType::instance_ = nullptr;
+const IntegerType* IntegerType::instance_ = nullptr;
 
 PrimitiveType::PrimitiveType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
     : RegType(klass, descriptor, cache_id) {
-  CHECK(klass != NULL);
+  CHECK(klass != nullptr);
   CHECK(!descriptor.empty());
 }
 
@@ -142,222 +123,160 @@
     return "Integer";
 }
 
-DoubleHiType* DoubleHiType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                           uint16_t cache_id) {
-  if (instance_ == NULL) {
-    instance_ = new DoubleHiType(klass, descriptor, cache_id);
-  }
-  return instance_;
-}
-
-DoubleHiType* DoubleHiType::GetInstance() {
-  CHECK(instance_ != NULL);
+const DoubleHiType* DoubleHiType::CreateInstance(mirror::Class* klass,
+                                                 const std::string& descriptor,
+                                                 uint16_t cache_id) {
+  CHECK(instance_ == nullptr);
+  instance_ = new DoubleHiType(klass, descriptor, cache_id);
   return instance_;
 }
 
 void DoubleHiType::Destroy() {
-  if (instance_ != NULL) {
+  if (instance_ != nullptr) {
     delete instance_;
-    instance_ = NULL;
+    instance_ = nullptr;
   }
 }
 
-DoubleLoType* DoubleLoType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                           uint16_t cache_id) {
-  if (instance_ == NULL) {
-    instance_ = new DoubleLoType(klass, descriptor, cache_id);
-  }
-  return instance_;
-}
-
-DoubleLoType* DoubleLoType::GetInstance() {
-  CHECK(instance_ != NULL);
+const DoubleLoType* DoubleLoType::CreateInstance(mirror::Class* klass,
+                                                 const std::string& descriptor,
+                                                 uint16_t cache_id) {
+  CHECK(instance_ == nullptr);
+  instance_ = new DoubleLoType(klass, descriptor, cache_id);
   return instance_;
 }
 
 void DoubleLoType::Destroy() {
-  if (instance_ != NULL) {
+  if (instance_ != nullptr) {
     delete instance_;
-    instance_ = NULL;
+    instance_ = nullptr;
   }
 }
 
-LongLoType* LongLoType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                       uint16_t cache_id) {
-  if (instance_ == NULL) {
-    instance_ = new LongLoType(klass, descriptor, cache_id);
-  }
+const LongLoType* LongLoType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+                                             uint16_t cache_id) {
+  CHECK(instance_ == nullptr);
+  instance_ = new LongLoType(klass, descriptor, cache_id);
   return instance_;
 }
 
-LongHiType* LongHiType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                       uint16_t cache_id) {
-  if (instance_ == NULL) {
-    instance_ = new LongHiType(klass, descriptor, cache_id);
-  }
-  return instance_;
-}
-
-LongHiType* LongHiType::GetInstance() {
-  CHECK(instance_ != NULL);
+const LongHiType* LongHiType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+                                             uint16_t cache_id) {
+  CHECK(instance_ == nullptr);
+  instance_ = new LongHiType(klass, descriptor, cache_id);
   return instance_;
 }
 
 void LongHiType::Destroy() {
-  if (instance_ != NULL) {
+  if (instance_ != nullptr) {
     delete instance_;
-    instance_ = NULL;
+    instance_ = nullptr;
   }
 }
 
-LongLoType* LongLoType::GetInstance() {
-  CHECK(instance_ != NULL);
-  return instance_;
-}
-
 void LongLoType::Destroy() {
-  if (instance_ != NULL) {
+  if (instance_ != nullptr) {
     delete instance_;
-    instance_ = NULL;
+    instance_ = nullptr;
   }
 }
 
-FloatType* FloatType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                     uint16_t cache_id) {
-  if (instance_ == NULL) {
-    instance_ = new FloatType(klass, descriptor, cache_id);
-  }
-  return instance_;
-}
-FloatType* FloatType::GetInstance() {
-  CHECK(instance_ != NULL);
+const FloatType* FloatType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+                                           uint16_t cache_id) {
+  CHECK(instance_ == nullptr);
+  instance_ = new FloatType(klass, descriptor, cache_id);
   return instance_;
 }
 
 void FloatType::Destroy() {
-  if (instance_ != NULL) {
+  if (instance_ != nullptr) {
     delete instance_;
-    instance_ = NULL;
+    instance_ = nullptr;
   }
 }
 
-CharType* CharType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                   uint16_t cache_id) {
-  if (instance_ == NULL) {
-    instance_ = new CharType(klass, descriptor, cache_id);
-  }
-  return instance_;
-}
-
-CharType* CharType::GetInstance() {
-  CHECK(instance_ != NULL);
+const CharType* CharType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+                                         uint16_t cache_id) {
+  CHECK(instance_ == nullptr);
+  instance_ = new CharType(klass, descriptor, cache_id);
   return instance_;
 }
 
 void CharType::Destroy() {
-  if (instance_ != NULL) {
+  if (instance_ != nullptr) {
     delete instance_;
-    instance_ = NULL;
+    instance_ = nullptr;
   }
 }
 
-ShortType* ShortType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                     uint16_t cache_id) {
-  if (instance_ == NULL) {
-    instance_ = new ShortType(klass, descriptor, cache_id);
-  }
-  return instance_;
-}
-
-ShortType* ShortType::GetInstance() {
-  CHECK(instance_ != NULL);
+const ShortType* ShortType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+                                           uint16_t cache_id) {
+  CHECK(instance_ == nullptr);
+  instance_ = new ShortType(klass, descriptor, cache_id);
   return instance_;
 }
 
 void ShortType::Destroy() {
-  if (instance_ != NULL) {
+  if (instance_ != nullptr) {
     delete instance_;
-    instance_ = NULL;
+    instance_ = nullptr;
   }
 }
 
-ByteType* ByteType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                   uint16_t cache_id) {
-  if (instance_ == NULL) {
-    instance_ = new ByteType(klass, descriptor, cache_id);
-  }
-  return instance_;
-}
-
-ByteType* ByteType::GetInstance() {
-  CHECK(instance_ != NULL);
+const ByteType* ByteType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+                                         uint16_t cache_id) {
+  CHECK(instance_ == nullptr);
+  instance_ = new ByteType(klass, descriptor, cache_id);
   return instance_;
 }
 
 void ByteType::Destroy() {
-  if (instance_ != NULL) {
+  if (instance_ != nullptr) {
     delete instance_;
-    instance_ = NULL;
+    instance_ = nullptr;
   }
 }
 
-IntegerType* IntegerType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                         uint16_t cache_id) {
-  if (instance_ == NULL) {
-    instance_ = new IntegerType(klass, descriptor, cache_id);
-  }
-  return instance_;
-}
-
-IntegerType* IntegerType::GetInstance() {
-  CHECK(instance_ != NULL);
+const IntegerType* IntegerType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+                                               uint16_t cache_id) {
+  CHECK(instance_ == nullptr);
+  instance_ = new IntegerType(klass, descriptor, cache_id);
   return instance_;
 }
 
 void IntegerType::Destroy() {
-  if (instance_ != NULL) {
+  if (instance_ != nullptr) {
     delete instance_;
-    instance_ = NULL;
+    instance_ = nullptr;
   }
 }
 
-ConflictType* ConflictType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                           uint16_t cache_id) {
-  if (instance_ == NULL) {
-    instance_ = new ConflictType(klass, descriptor, cache_id);
-  }
-  return instance_;
-}
-
-ConflictType* ConflictType::GetInstance() {
-  CHECK(instance_ != NULL);
+const ConflictType* ConflictType::CreateInstance(mirror::Class* klass,
+                                                 const std::string& descriptor,
+                                                 uint16_t cache_id) {
+  CHECK(instance_ == nullptr);
+  instance_ = new ConflictType(klass, descriptor, cache_id);
   return instance_;
 }
 
 void ConflictType::Destroy() {
-  if (instance_ != NULL) {
+  if (instance_ != nullptr) {
     delete instance_;
-    instance_ = NULL;
+    instance_ = nullptr;
   }
 }
 
-BooleanType* BooleanType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const BooleanType* BooleanType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
                                          uint16_t cache_id) {
-  if (BooleanType::instance == NULL) {
-    instance = new BooleanType(klass, descriptor, cache_id);
-  }
-  return BooleanType::instance;
-}
-
-BooleanType* BooleanType::GetInstance() {
-  CHECK(BooleanType::instance != NULL);
-  return BooleanType::instance;
+  CHECK(BooleanType::instance_ == nullptr);
+  instance_ = new BooleanType(klass, descriptor, cache_id);
+  return BooleanType::instance_;
 }
 
 void BooleanType::Destroy() {
-  if (BooleanType::instance != NULL) {
-    delete instance;
-    instance = NULL;
+  if (BooleanType::instance_ != nullptr) {
+    delete instance_;
+    instance_ = nullptr;
   }
 }
 
@@ -365,23 +284,18 @@
   return "Undefined";
 }
 
-UndefinedType* UndefinedType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                             uint16_t cache_id) {
-  if (instance_ == NULL) {
-    instance_ = new UndefinedType(klass, descriptor, cache_id);
-  }
-  return instance_;
-}
-
-UndefinedType* UndefinedType::GetInstance() {
-  CHECK(instance_ != NULL);
+const UndefinedType* UndefinedType::CreateInstance(mirror::Class* klass,
+                                                   const std::string& descriptor,
+                                                   uint16_t cache_id) {
+  CHECK(instance_ == nullptr);
+  instance_ = new UndefinedType(klass, descriptor, cache_id);
   return instance_;
 }
 
 void UndefinedType::Destroy() {
-  if (instance_ != NULL) {
+  if (instance_ != nullptr) {
     delete instance_;
-    instance_ = NULL;
+    instance_ = nullptr;
   }
 }
 
@@ -528,18 +442,6 @@
   return result.str();
 }
 
-ConstantType::ConstantType(uint32_t constant, uint16_t cache_id)
-    : RegType(NULL, "", cache_id), constant_(constant) {
-}
-
-const RegType& UndefinedType::Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  if (incoming_type.IsUndefined()) {
-    return *this;  // Undefined MERGE Undefined => Undefined
-  }
-  return reg_types->Conflict();
-}
-
 const RegType& RegType::HighHalf(RegTypeCache* cache) const {
   DCHECK(IsLowHalf());
   if (IsLongLo()) {
@@ -548,7 +450,8 @@
     return cache->DoubleHi();
   } else {
     DCHECK(IsImpreciseConstantLo());
-    return cache->FromCat2ConstHi(ConstantValue(), false);
+    const ConstantType* const_val = down_cast<const ConstantType*>(this);
+    return cache->FromCat2ConstHi(const_val->ConstantValue(), false);
   }
 }
 
@@ -586,24 +489,21 @@
 bool UnresolvedType::IsNonZeroReferenceTypes() const {
   return true;
 }
+
 std::set<uint16_t> UnresolvedMergedType::GetMergedTypes() const {
   std::pair<uint16_t, uint16_t> refs = GetTopMergedTypes();
-  const RegType& _left(reg_type_cache_->GetFromId(refs.first));
-  RegType& __left(const_cast<RegType&>(_left));
-  UnresolvedMergedType* left = down_cast<UnresolvedMergedType*>(&__left);
-
-  RegType& _right(
-      const_cast<RegType&>(reg_type_cache_->GetFromId(refs.second)));
-  UnresolvedMergedType* right = down_cast<UnresolvedMergedType*>(&_right);
+  const RegType& left = reg_type_cache_->GetFromId(refs.first);
+  const RegType& right = reg_type_cache_->GetFromId(refs.second);
 
   std::set<uint16_t> types;
-  if (left->IsUnresolvedMergedReference()) {
-    types = left->GetMergedTypes();
+  if (left.IsUnresolvedMergedReference()) {
+    types = down_cast<const UnresolvedMergedType*>(&left)->GetMergedTypes();
   } else {
     types.insert(refs.first);
   }
-  if (right->IsUnresolvedMergedReference()) {
-    std::set<uint16_t> right_types = right->GetMergedTypes();
+  if (right.IsUnresolvedMergedReference()) {
+    std::set<uint16_t> right_types =
+        down_cast<const UnresolvedMergedType*>(&right)->GetMergedTypes();
     types.insert(right_types.begin(), right_types.end());
   } else {
     types.insert(refs.second);
@@ -619,7 +519,7 @@
 const RegType& RegType::GetSuperClass(RegTypeCache* cache) const {
   if (!IsUnresolvedTypes()) {
     mirror::Class* super_klass = GetClass()->GetSuperClass();
-    if (super_klass != NULL) {
+    if (super_klass != nullptr) {
       // A super class of a precise type isn't precise as a precise type indicates the register
       // holds exactly that type.
       std::string temp;
@@ -638,33 +538,6 @@
   }
 }
 
-bool RegType::CanAccess(const RegType& other) const {
-  if (Equals(other)) {
-    return true;  // Trivial accessibility.
-  } else {
-    bool this_unresolved = IsUnresolvedTypes();
-    bool other_unresolved = other.IsUnresolvedTypes();
-    if (!this_unresolved && !other_unresolved) {
-      return GetClass()->CanAccess(other.GetClass());
-    } else if (!other_unresolved) {
-      return other.GetClass()->IsPublic();  // Be conservative, only allow if other is public.
-    } else {
-      return false;  // More complicated test not possible on unresolved types, be conservative.
-    }
-  }
-}
-
-bool RegType::CanAccessMember(mirror::Class* klass, uint32_t access_flags) const {
-  if ((access_flags & kAccPublic) != 0) {
-    return true;
-  }
-  if (!IsUnresolvedTypes()) {
-    return GetClass()->CanAccessMember(klass, access_flags);
-  } else {
-    return false;  // More complicated test not possible on unresolved types, be conservative.
-  }
-}
-
 bool RegType::IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) {
     // Primitive arrays will always resolve
@@ -704,106 +577,38 @@
   return IsUnresolvedTypes() || (IsNonZeroReferenceTypes() && GetClass()->IsInstantiable());
 }
 
-ImpreciseConstType::ImpreciseConstType(uint32_t constat, uint16_t cache_id)
-  : ConstantType(constat, cache_id) {
-}
-
-static bool AssignableFrom(const RegType& lhs, const RegType& rhs, bool strict)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  if (lhs.Equals(rhs)) {
-    return true;
-  } else {
-    if (lhs.IsBoolean()) {
-      return rhs.IsBooleanTypes();
-    } else if (lhs.IsByte()) {
-      return rhs.IsByteTypes();
-    } else if (lhs.IsShort()) {
-      return rhs.IsShortTypes();
-    } else if (lhs.IsChar()) {
-      return rhs.IsCharTypes();
-    } else if (lhs.IsInteger()) {
-      return rhs.IsIntegralTypes();
-    } else if (lhs.IsFloat()) {
-      return rhs.IsFloatTypes();
-    } else if (lhs.IsLongLo()) {
-      return rhs.IsLongTypes();
-    } else if (lhs.IsDoubleLo()) {
-      return rhs.IsDoubleTypes();
-    } else {
-      CHECK(lhs.IsReferenceTypes())
-          << "Unexpected register type in IsAssignableFrom: '"
-          << lhs << "' := '" << rhs << "'";
-      if (rhs.IsZero()) {
-        return true;  // All reference types can be assigned null.
-      } else if (!rhs.IsReferenceTypes()) {
-        return false;  // Expect rhs to be a reference type.
-      } else if (lhs.IsJavaLangObject()) {
-        return true;  // All reference types can be assigned to Object.
-      } else if (!strict && !lhs.IsUnresolvedTypes() && lhs.GetClass()->IsInterface()) {
-        // If we're not strict allow assignment to any interface, see comment in ClassJoin.
-        return true;
-      } else if (lhs.IsJavaLangObjectArray()) {
-        return rhs.IsObjectArrayTypes();  // All reference arrays may be assigned to Object[]
-      } else if (lhs.HasClass() && rhs.HasClass() &&
-                 lhs.GetClass()->IsAssignableFrom(rhs.GetClass())) {
-        // We're assignable from the Class point-of-view.
-        return true;
-      } else {
-        // Unresolved types are only assignable for null and equality.
-        return false;
-      }
-    }
-  }
-}
-
-bool RegType::IsAssignableFrom(const RegType& src) const {
-  return AssignableFrom(*this, src, false);
-}
-
-bool RegType::IsStrictlyAssignableFrom(const RegType& src) const {
-  return AssignableFrom(*this, src, true);
-}
-
-int32_t ConstantType::ConstantValueLo() const {
-  DCHECK(IsConstantLo());
-  return constant_;
-}
-
-int32_t ConstantType::ConstantValueHi() const {
-  if (IsConstantHi() || IsPreciseConstantHi() || IsImpreciseConstantHi()) {
-    return constant_;
-  } else {
-    DCHECK(false);
-    return 0;
-  }
-}
-
 static const RegType& SelectNonConstant(const RegType& a, const RegType& b) {
   return a.IsConstantTypes() ? b : a;
 }
 
 const RegType& RegType::Merge(const RegType& incoming_type, RegTypeCache* reg_types) const {
   DCHECK(!Equals(incoming_type));  // Trivial equality handled by caller
-  if (IsConflict()) {
+  // Perform pointer equality tests for conflict to avoid virtual method dispatch.
+  const ConflictType& conflict = reg_types->Conflict();
+  if (this == &conflict) {
+    DCHECK(IsConflict());
     return *this;  // Conflict MERGE * => Conflict
-  } else if (incoming_type.IsConflict()) {
+  } else if (&incoming_type == &conflict) {
+    DCHECK(incoming_type.IsConflict());
     return incoming_type;  // * MERGE Conflict => Conflict
   } else if (IsUndefined() || incoming_type.IsUndefined()) {
-    return reg_types->Conflict();  // Unknown MERGE * => Conflict
+    return conflict;  // Unknown MERGE * => Conflict
   } else if (IsConstant() && incoming_type.IsConstant()) {
-    int32_t val1 = ConstantValue();
-    int32_t val2 = incoming_type.ConstantValue();
+    const ConstantType& type1 = *down_cast<const ConstantType*>(this);
+    const ConstantType& type2 = *down_cast<const ConstantType*>(&incoming_type);
+    int32_t val1 = type1.ConstantValue();
+    int32_t val2 = type2.ConstantValue();
     if (val1 >= 0 && val2 >= 0) {
       // +ve1 MERGE +ve2 => MAX(+ve1, +ve2)
       if (val1 >= val2) {
-        if (!IsPreciseConstant()) {
+        if (!type1.IsPreciseConstant()) {
           return *this;
         } else {
           return reg_types->FromCat1Const(val1, false);
         }
       } else {
-        if (!incoming_type.IsPreciseConstant()) {
-          return incoming_type;
+        if (!type2.IsPreciseConstant()) {
+          return type2;
         } else {
           return reg_types->FromCat1Const(val2, false);
         }
@@ -811,30 +616,30 @@
     } else if (val1 < 0 && val2 < 0) {
       // -ve1 MERGE -ve2 => MIN(-ve1, -ve2)
       if (val1 <= val2) {
-        if (!IsPreciseConstant()) {
+        if (!type1.IsPreciseConstant()) {
           return *this;
         } else {
           return reg_types->FromCat1Const(val1, false);
         }
       } else {
-        if (!incoming_type.IsPreciseConstant()) {
-          return incoming_type;
+        if (!type2.IsPreciseConstant()) {
+          return type2;
         } else {
           return reg_types->FromCat1Const(val2, false);
         }
       }
     } else {
       // Values are +ve and -ve, choose smallest signed type in which they both fit
-      if (IsConstantByte()) {
-        if (incoming_type.IsConstantByte()) {
+      if (type1.IsConstantByte()) {
+        if (type2.IsConstantByte()) {
           return reg_types->ByteConstant();
-        } else if (incoming_type.IsConstantShort()) {
+        } else if (type2.IsConstantShort()) {
           return reg_types->ShortConstant();
         } else {
           return reg_types->IntConstant();
         }
-      } else if (IsConstantShort()) {
-        if (incoming_type.IsConstantShort()) {
+      } else if (type1.IsConstantShort()) {
+        if (type2.IsConstantShort()) {
           return reg_types->ShortConstant();
         } else {
           return reg_types->IntConstant();
@@ -844,12 +649,16 @@
       }
     }
   } else if (IsConstantLo() && incoming_type.IsConstantLo()) {
-    int32_t val1 = ConstantValueLo();
-    int32_t val2 = incoming_type.ConstantValueLo();
+    const ConstantType& type1 = *down_cast<const ConstantType*>(this);
+    const ConstantType& type2 = *down_cast<const ConstantType*>(&incoming_type);
+    int32_t val1 = type1.ConstantValueLo();
+    int32_t val2 = type2.ConstantValueLo();
     return reg_types->FromCat2ConstLo(val1 | val2, false);
   } else if (IsConstantHi() && incoming_type.IsConstantHi()) {
-    int32_t val1 = ConstantValueHi();
-    int32_t val2 = incoming_type.ConstantValueHi();
+    const ConstantType& type1 = *down_cast<const ConstantType*>(this);
+    const ConstantType& type2 = *down_cast<const ConstantType*>(&incoming_type);
+    int32_t val1 = type1.ConstantValueHi();
+    int32_t val2 = type2.ConstantValueHi();
     return reg_types->FromCat2ConstHi(val1 | val2, false);
   } else if (IsIntegralTypes() && incoming_type.IsIntegralTypes()) {
     if (IsBooleanTypes() && incoming_type.IsBooleanTypes()) {
@@ -889,12 +698,12 @@
       // Something that is uninitialized hasn't had its constructor called. Mark any merge
       // of this type with something that is initialized as conflicting. The cases of a merge
       // with itself, 0 or Object are handled above.
-      return reg_types->Conflict();
+      return conflict;
     } else {  // Two reference types, compute Join
       mirror::Class* c1 = GetClass();
       mirror::Class* c2 = incoming_type.GetClass();
-      DCHECK(c1 != NULL && !c1->IsPrimitive());
-      DCHECK(c2 != NULL && !c2->IsPrimitive());
+      DCHECK(c1 != nullptr && !c1->IsPrimitive());
+      DCHECK(c2 != nullptr && !c2->IsPrimitive());
       mirror::Class* join_class = ClassJoin(c1, c2);
       if (c1 == join_class && !IsPreciseReference()) {
         return *this;
@@ -906,7 +715,7 @@
       }
     }
   } else {
-    return reg_types->Conflict();  // Unexpected types => Conflict
+    return conflict;  // Unexpected types => Conflict
   }
 }
 
@@ -933,7 +742,7 @@
     mirror::Class* common_elem = ClassJoin(s_ct, t_ct);
     ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
     mirror::Class* array_class = class_linker->FindArrayClass(Thread::Current(), &common_elem);
-    DCHECK(array_class != NULL);
+    DCHECK(array_class != nullptr);
     return array_class;
   } else {
     size_t s_depth = s->Depth();
@@ -969,7 +778,7 @@
   }
 }
 
-void RegType::VisitRoots(RootCallback* callback, void* arg) {
+void RegType::VisitRoots(RootCallback* callback, void* arg) const {
   if (!klass_.IsNull()) {
     callback(reinterpret_cast<mirror::Object**>(&klass_), arg, 0, kRootUnknown);
   }
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 378b4c9..d429dfd 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -60,7 +60,9 @@
   virtual bool IsUninitializedReference() const { return false; }
   virtual bool IsUninitializedThisReference() const { return false; }
   virtual bool IsUnresolvedAndUninitializedReference() const { return false; }
-  virtual bool IsUnresolvedAndUninitializedThisReference() const { return false; }
+  virtual bool IsUnresolvedAndUninitializedThisReference() const {
+    return false;
+  }
   virtual bool IsUnresolvedMergedReference() const { return false; }
   virtual bool IsUnresolvedSuperClass() const { return false; }
   virtual bool IsReference() const { return false; }
@@ -73,90 +75,64 @@
   virtual bool IsImpreciseConstant() const { return false; }
   virtual bool IsConstantTypes() const { return false; }
   bool IsConstant() const {
-    return IsPreciseConstant() || IsImpreciseConstant();
+    return IsImpreciseConstant() || IsPreciseConstant();
   }
   bool IsConstantLo() const {
-    return IsPreciseConstantLo() || IsImpreciseConstantLo();
+    return IsImpreciseConstantLo() || IsPreciseConstantLo();
   }
   bool IsPrecise() const {
-    return IsPreciseConstantLo() || IsPreciseConstant() || IsPreciseConstantHi();
+    return IsPreciseConstantLo() || IsPreciseConstant() ||
+           IsPreciseConstantHi();
   }
-  bool IsLongConstant() const {
-    return IsConstantLo();
-  }
+  bool IsLongConstant() const { return IsConstantLo(); }
   bool IsConstantHi() const {
     return (IsPreciseConstantHi() || IsImpreciseConstantHi());
   }
-  bool IsLongConstantHigh() const {
-    return IsConstantHi();
-  }
+  bool IsLongConstantHigh() const { return IsConstantHi(); }
   virtual bool IsUninitializedTypes() const { return false; }
-  bool IsUnresolvedTypes() const {
-    return IsUnresolvedReference() || IsUnresolvedAndUninitializedReference() ||
-           IsUnresolvedAndUninitializedThisReference() ||
-           IsUnresolvedMergedReference() || IsUnresolvedSuperClass();
-  }
+  virtual bool IsUnresolvedTypes() const { return false; }
 
   bool IsLowHalf() const {
-    return (IsLongLo() || IsDoubleLo() || IsPreciseConstantLo() ||
-            IsImpreciseConstantLo());
+    return (IsLongLo() || IsDoubleLo() || IsPreciseConstantLo() || IsImpreciseConstantLo());
   }
   bool IsHighHalf() const {
-    return (IsLongHi() || IsDoubleHi() || IsPreciseConstantHi() ||
-            IsImpreciseConstantHi());
+    return (IsLongHi() || IsDoubleHi() || IsPreciseConstantHi() || IsImpreciseConstantHi());
   }
-  bool IsLongOrDoubleTypes() const {
-    return IsLowHalf();
-  }
+  bool IsLongOrDoubleTypes() const { return IsLowHalf(); }
   // Check this is the low half, and that type_h is its matching high-half.
   inline bool CheckWidePair(const RegType& type_h) const {
     if (IsLowHalf()) {
-      return ((IsPreciseConstantLo() && type_h.IsPreciseConstantHi()) ||
-              (IsPreciseConstantLo() && type_h.IsImpreciseConstantHi()) ||
-              (IsImpreciseConstantLo() && type_h.IsPreciseConstantHi()) ||
+      return ((IsImpreciseConstantLo() && type_h.IsPreciseConstantHi()) ||
               (IsImpreciseConstantLo() && type_h.IsImpreciseConstantHi()) ||
+              (IsPreciseConstantLo() && type_h.IsPreciseConstantHi()) ||
+              (IsPreciseConstantLo() && type_h.IsImpreciseConstantHi()) ||
               (IsDoubleLo() && type_h.IsDoubleHi()) ||
               (IsLongLo() && type_h.IsLongHi()));
     }
     return false;
   }
   // The high half that corresponds to this low half
-  const RegType& HighHalf(RegTypeCache* cache) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const RegType& HighHalf(RegTypeCache* cache) const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsConstantBoolean() const {
-    return IsConstant() && (ConstantValue() >= 0) && (ConstantValue() <= 1);
-  }
-  virtual bool IsConstantChar() const {
-    return false;
-  }
-  virtual bool IsConstantByte() const {
-    return false;
-  }
-  virtual bool IsConstantShort() const {
-    return false;
-  }
-  virtual bool IsOne() const {
-    return false;
-  }
-  virtual bool IsZero() const {
-    return false;
-  }
+  bool IsConstantBoolean() const;
+  virtual bool IsConstantChar() const { return false; }
+  virtual bool IsConstantByte() const { return false; }
+  virtual bool IsConstantShort() const { return false; }
+  virtual bool IsOne() const { return false; }
+  virtual bool IsZero() const { return false; }
   bool IsReferenceTypes() const {
     return IsNonZeroReferenceTypes() || IsZero();
   }
-  virtual bool IsNonZeroReferenceTypes() const {
-    return false;
-  }
+  virtual bool IsNonZeroReferenceTypes() const { return false; }
   bool IsCategory1Types() const {
-    return IsChar() || IsInteger() || IsFloat() || IsConstant() || IsByte() || IsShort() ||
-        IsBoolean();
+    return IsChar() || IsInteger() || IsFloat() || IsConstant() || IsByte() ||
+           IsShort() || IsBoolean();
   }
   bool IsCategory2Types() const {
     return IsLowHalf();  // Don't expect explicit testing of high halves
   }
-  bool IsBooleanTypes() const {
-    return IsBoolean() || IsConstantBoolean();
-  }
+  bool IsBooleanTypes() const { return IsBoolean() || IsConstantBoolean(); }
   bool IsByteTypes() const {
     return IsConstantByte() || IsByte() || IsBoolean();
   }
@@ -167,48 +143,40 @@
     return IsChar() || IsBooleanTypes() || IsConstantChar();
   }
   bool IsIntegralTypes() const {
-    return IsInteger() || IsConstant() || IsByte() || IsShort() || IsChar() || IsBoolean();
+    return IsInteger() || IsConstant() || IsByte() || IsShort() || IsChar() ||
+           IsBoolean();
   }
-  // Give the constant value encoded, but this shouldn't be called in the general case.
-  virtual int32_t ConstantValue() const;
-  virtual int32_t ConstantValueLo() const;
-  virtual int32_t ConstantValueHi() const;
-  bool IsArrayIndexTypes() const {
-    return IsIntegralTypes();
-  }
+  // Give the constant value encoded, but this shouldn't be called in the
+  // general case.
+  bool IsArrayIndexTypes() const { return IsIntegralTypes(); }
   // Float type may be derived from any constant type
-  bool IsFloatTypes() const {
-    return IsFloat() || IsConstant();
-  }
-  bool IsLongTypes() const {
-    return IsLongLo() || IsLongConstant();
-  }
+  bool IsFloatTypes() const { return IsFloat() || IsConstant(); }
+  bool IsLongTypes() const { return IsLongLo() || IsLongConstant(); }
   bool IsLongHighTypes() const {
-    return (IsLongHi() ||
-            IsPreciseConstantHi() ||
-            IsImpreciseConstantHi());
+    return (IsLongHi() || IsPreciseConstantHi() || IsImpreciseConstantHi());
   }
-  bool IsDoubleTypes() const {
-    return IsDoubleLo() || IsLongConstant();
-  }
+  bool IsDoubleTypes() const { return IsDoubleLo() || IsLongConstant(); }
   bool IsDoubleHighTypes() const {
     return (IsDoubleHi() || IsPreciseConstantHi() || IsImpreciseConstantHi());
   }
-  virtual bool IsLong() const {
-    return false;
+  virtual bool IsLong() const { return false; }
+  bool HasClass() const {
+    bool result = !klass_.IsNull();
+    DCHECK_EQ(result, HasClassVirtual());
+    return result;
   }
-  virtual bool HasClass() const {
-    return false;
-  }
+  virtual bool HasClassVirtual() const { return false; }
   bool IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   Primitive::Type GetPrimitiveType() const;
-  bool IsJavaLangObjectArray() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsJavaLangObjectArray() const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool IsInstantiableTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   const std::string& GetDescriptor() const {
-    DCHECK(HasClass() || (IsUnresolvedTypes() && !IsUnresolvedMergedReference() &&
-                          !IsUnresolvedSuperClass()));
+    DCHECK(HasClass() ||
+           (IsUnresolvedTypes() && !IsUnresolvedMergedReference() &&
+            !IsUnresolvedSuperClass()));
     return descriptor_;
   }
   mirror::Class* GetClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -217,53 +185,65 @@
     DCHECK(HasClass());
     return klass_.Read();
   }
-  uint16_t GetId() const {
-    return cache_id_;
-  }
+  uint16_t GetId() const { return cache_id_; }
   const RegType& GetSuperClass(RegTypeCache* cache) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  virtual std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+  virtual std::string Dump() const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
 
   // Can this type access other?
-  bool CanAccess(const RegType& other) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool CanAccess(const RegType& other) const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Can this type access a member with the given properties?
   bool CanAccessMember(mirror::Class* klass, uint32_t access_flags) const
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Can this type be assigned by src?
-  // Note: Object and interface types may always be assigned to one another, see comment on
+  // Note: Object and interface types may always be assigned to one another, see
+  // comment on
   // ClassJoin.
-  bool IsAssignableFrom(const RegType& src) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsAssignableFrom(const RegType& src) const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Can this type be assigned by src? Variant of IsAssignableFrom that doesn't allow assignment to
+  // Can this type be assigned by src? Variant of IsAssignableFrom that doesn't
+  // allow assignment to
   // an interface from an Object.
   bool IsStrictlyAssignableFrom(const RegType& src) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Are these RegTypes the same?
-  bool Equals(const RegType& other) const {
-    return GetId() == other.GetId();
-  }
+  bool Equals(const RegType& other) const { return GetId() == other.GetId(); }
 
-  // Compute the merge of this register from one edge (path) with incoming_type from another.
-  virtual const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
+  // Compute the merge of this register from one edge (path) with incoming_type
+  // from another.
+  const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
-   * A basic Join operation on classes. For a pair of types S and T the Join, written S v T = J, is
-   * S <: J, T <: J and for-all U such that S <: U, T <: U then J <: U. That is J is the parent of
-   * S and T such that there isn't a parent of both S and T that isn't also the parent of J (ie J
+   * A basic Join operation on classes. For a pair of types S and T the Join,
+   *written S v T = J, is
+   * S <: J, T <: J and for-all U such that S <: U, T <: U then J <: U. That is
+   *J is the parent of
+   * S and T such that there isn't a parent of both S and T that isn't also the
+   *parent of J (ie J
    * is the deepest (lowest upper bound) parent of S and T).
    *
-   * This operation applies for regular classes and arrays, however, for interface types there
-   * needn't be a partial ordering on the types. We could solve the problem of a lack of a partial
-   * order by introducing sets of types, however, the only operation permissible on an interface is
-   * invoke-interface. In the tradition of Java verifiers [1] we defer the verification of interface
-   * types until an invoke-interface call on the interface typed reference at runtime and allow
-   * the perversion of Object being assignable to an interface type (note, however, that we don't
-   * allow assignment of Object or Interface to any concrete class and are therefore type safe).
+   * This operation applies for regular classes and arrays, however, for
+   *interface types there
+   * needn't be a partial ordering on the types. We could solve the problem of a
+   *lack of a partial
+   * order by introducing sets of types, however, the only operation permissible
+   *on an interface is
+   * invoke-interface. In the tradition of Java verifiers [1] we defer the
+   *verification of interface
+   * types until an invoke-interface call on the interface typed reference at
+   *runtime and allow
+   * the perversion of Object being assignable to an interface type (note,
+   *however, that we don't
+   * allow assignment of Object or Interface to any concrete class and are
+   *therefore type safe).
    *
    * [1] Java bytecode verification: algorithms and formalizations, Xavier Leroy
    */
@@ -272,11 +252,12 @@
 
   virtual ~RegType() {}
 
-  void VisitRoots(RootCallback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void VisitRoots(RootCallback* callback, void* arg) const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  protected:
-  RegType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+  RegType(mirror::Class* klass, const std::string& descriptor,
+          uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       : descriptor_(descriptor), klass_(klass), cache_id_(cache_id) {
     if (kIsDebugBuild) {
       CheckInvariants();
@@ -285,414 +266,402 @@
 
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-
   const std::string descriptor_;
-  mutable GcRoot<mirror::Class> klass_;  // Non-const only due to moving classes.
+  mutable GcRoot<mirror::Class>
+      klass_;  // Non-const only due to moving classes.
   const uint16_t cache_id_;
 
   friend class RegTypeCache;
 
  private:
+  static bool AssignableFrom(const RegType& lhs, const RegType& rhs, bool strict)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   DISALLOW_COPY_AND_ASSIGN(RegType);
 };
 
 // Bottom type.
-class ConflictType : public RegType {
+class ConflictType FINAL : public RegType {
  public:
-  bool IsConflict() const {
-    return true;
-  }
+  bool IsConflict() const OVERRIDE { return true; }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Get the singleton Conflict instance.
-  static ConflictType* GetInstance();
+  static const ConflictType* GetInstance() PURE;
 
   // Create the singleton instance.
-  static ConflictType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                      uint16_t cache_id)
+  static const ConflictType* CreateInstance(mirror::Class* klass,
+                                            const std::string& descriptor,
+                                            uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Destroy the singleton instance.
   static void Destroy();
 
  private:
-  ConflictType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : RegType(klass, descriptor, cache_id) {
-  }
+  ConflictType(mirror::Class* klass, const std::string& descriptor,
+               uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : RegType(klass, descriptor, cache_id) {}
 
-  static ConflictType* instance_;
+  static const ConflictType* instance_;
 };
 
-// A variant of the bottom type used to specify an undefined value in the incoming registers.
+// A variant of the bottom type used to specify an undefined value in the
+// incoming registers.
 // Merging with UndefinedType yields ConflictType which is the true bottom.
-class UndefinedType : public RegType {
+class UndefinedType FINAL : public RegType {
  public:
-  bool IsUndefined() const {
-    return true;
-  }
+  bool IsUndefined() const OVERRIDE { return true; }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Get the singleton Undefined instance.
-  static UndefinedType* GetInstance();
+  static const UndefinedType* GetInstance() PURE;
 
   // Create the singleton instance.
-  static UndefinedType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                       uint16_t cache_id)
+  static const UndefinedType* CreateInstance(mirror::Class* klass,
+                                             const std::string& descriptor,
+                                             uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Destroy the singleton instance.
   static void Destroy();
 
  private:
-  UndefinedType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : RegType(klass, descriptor, cache_id) {
-  }
+  UndefinedType(mirror::Class* klass, const std::string& descriptor,
+                uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : RegType(klass, descriptor, cache_id) {}
 
-  virtual const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  static UndefinedType* instance_;
+  static const UndefinedType* instance_;
 };
 
 class PrimitiveType : public RegType {
  public:
-  PrimitiveType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  PrimitiveType(mirror::Class* klass, const std::string& descriptor,
+                uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 class Cat1Type : public PrimitiveType {
  public:
-  Cat1Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  Cat1Type(mirror::Class* klass, const std::string& descriptor,
+           uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 class IntegerType : public Cat1Type {
  public:
-  bool IsInteger() const {
-    return true;
-  }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static IntegerType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                     uint16_t cache_id)
+  bool IsInteger() const OVERRIDE { return true; }
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static const IntegerType* CreateInstance(mirror::Class* klass,
+                                           const std::string& descriptor,
+                                           uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static IntegerType* GetInstance();
+  static const IntegerType* GetInstance() PURE;
   static void Destroy();
+
  private:
-  IntegerType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : Cat1Type(klass, descriptor, cache_id) {
-  }
-  static IntegerType* instance_;
+  IntegerType(mirror::Class* klass, const std::string& descriptor,
+              uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : Cat1Type(klass, descriptor, cache_id) {}
+  static const IntegerType* instance_;
 };
 
-class BooleanType : public Cat1Type {
+class BooleanType FINAL : public Cat1Type {
  public:
-  bool IsBoolean() const {
-    return true;
-  }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static BooleanType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                     uint16_t cache_id)
+  bool IsBoolean() const OVERRIDE { return true; }
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static const BooleanType* CreateInstance(mirror::Class* klass,
+                                           const std::string& descriptor,
+                                           uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static BooleanType* GetInstance();
+  static const BooleanType* GetInstance() PURE;
   static void Destroy();
- private:
-  BooleanType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : Cat1Type(klass, descriptor, cache_id) {
-  }
 
-  static BooleanType* instance;
+ private:
+  BooleanType(mirror::Class* klass, const std::string& descriptor,
+              uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : Cat1Type(klass, descriptor, cache_id) {}
+
+  static const BooleanType* instance_;
 };
 
-class ByteType : public Cat1Type {
+class ByteType FINAL : public Cat1Type {
  public:
-  bool IsByte() const {
-    return true;
-  }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static ByteType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                  uint16_t cache_id)
+  bool IsByte() const OVERRIDE { return true; }
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static const ByteType* CreateInstance(mirror::Class* klass,
+                                        const std::string& descriptor,
+                                        uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static ByteType* GetInstance();
+  static const ByteType* GetInstance() PURE;
   static void Destroy();
+
  private:
-  ByteType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : Cat1Type(klass, descriptor, cache_id) {
-  }
-  static ByteType* instance_;
+  ByteType(mirror::Class* klass, const std::string& descriptor,
+           uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : Cat1Type(klass, descriptor, cache_id) {}
+  static const ByteType* instance_;
 };
 
-class ShortType : public Cat1Type {
+class ShortType FINAL : public Cat1Type {
  public:
-  bool IsShort() const {
-    return true;
-  }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static ShortType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                   uint16_t cache_id)
+  bool IsShort() const OVERRIDE { return true; }
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static const ShortType* CreateInstance(mirror::Class* klass,
+                                         const std::string& descriptor,
+                                         uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static ShortType* GetInstance();
+  static const ShortType* GetInstance() PURE;
   static void Destroy();
+
  private:
-  ShortType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : Cat1Type(klass, descriptor, cache_id) {
-  }
-  static ShortType* instance_;
+  ShortType(mirror::Class* klass, const std::string& descriptor,
+            uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : Cat1Type(klass, descriptor, cache_id) {}
+  static const ShortType* instance_;
 };
 
-class CharType : public Cat1Type {
+class CharType FINAL : public Cat1Type {
  public:
-  bool IsChar() const {
-    return true;
-  }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static CharType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                  uint16_t cache_id)
+  bool IsChar() const OVERRIDE { return true; }
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static const CharType* CreateInstance(mirror::Class* klass,
+                                        const std::string& descriptor,
+                                        uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static CharType* GetInstance();
+  static const CharType* GetInstance() PURE;
   static void Destroy();
+
  private:
-  CharType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : Cat1Type(klass, descriptor, cache_id) {
-  }
-  static CharType* instance_;
+  CharType(mirror::Class* klass, const std::string& descriptor,
+           uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : Cat1Type(klass, descriptor, cache_id) {}
+  static const CharType* instance_;
 };
 
-class FloatType : public Cat1Type {
+class FloatType FINAL : public Cat1Type {
  public:
-  bool IsFloat() const {
-    return true;
-  }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static FloatType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                   uint16_t cache_id)
+  bool IsFloat() const OVERRIDE { return true; }
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static const FloatType* CreateInstance(mirror::Class* klass,
+                                         const std::string& descriptor,
+                                         uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static FloatType* GetInstance();
+  static const FloatType* GetInstance() PURE;
   static void Destroy();
+
  private:
-  FloatType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : Cat1Type(klass, descriptor, cache_id) {
-  }
-  static FloatType* instance_;
+  FloatType(mirror::Class* klass, const std::string& descriptor,
+            uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : Cat1Type(klass, descriptor, cache_id) {}
+  static const FloatType* instance_;
 };
 
 class Cat2Type : public PrimitiveType {
  public:
-  Cat2Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  Cat2Type(mirror::Class* klass, const std::string& descriptor,
+           uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
-class LongLoType : public Cat2Type {
+class LongLoType FINAL : public Cat2Type {
  public:
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsLongLo() const {
-    return true;
-  }
-  bool IsLong() const {
-    return true;
-  }
-  static LongLoType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                    uint16_t cache_id)
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsLongLo() const OVERRIDE { return true; }
+  bool IsLong() const OVERRIDE { return true; }
+  static const LongLoType* CreateInstance(mirror::Class* klass,
+                                          const std::string& descriptor,
+                                          uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static LongLoType* GetInstance();
+  static const LongLoType* GetInstance() PURE;
   static void Destroy();
+
  private:
-  LongLoType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : Cat2Type(klass, descriptor, cache_id) {
-  }
-  static LongLoType* instance_;
+  LongLoType(mirror::Class* klass, const std::string& descriptor,
+             uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : Cat2Type(klass, descriptor, cache_id) {}
+  static const LongLoType* instance_;
 };
 
-class LongHiType : public Cat2Type {
+class LongHiType FINAL : public Cat2Type {
  public:
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsLongHi() const {
-    return true;
-  }
-  static LongHiType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                    uint16_t cache_id)
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsLongHi() const OVERRIDE { return true; }
+  static const LongHiType* CreateInstance(mirror::Class* klass,
+                                          const std::string& descriptor,
+                                          uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static LongHiType* GetInstance();
+  static const LongHiType* GetInstance() PURE;
   static void Destroy();
+
  private:
-  LongHiType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : Cat2Type(klass, descriptor, cache_id) {
-  }
-  static LongHiType* instance_;
+  LongHiType(mirror::Class* klass, const std::string& descriptor,
+             uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : Cat2Type(klass, descriptor, cache_id) {}
+  static const LongHiType* instance_;
 };
 
-class DoubleLoType : public Cat2Type {
+class DoubleLoType FINAL : public Cat2Type {
  public:
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsDoubleLo() const {
-    return true;
-  }
-  bool IsDouble() const {
-    return true;
-  }
-  static DoubleLoType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsDoubleLo() const OVERRIDE { return true; }
+  bool IsDouble() const OVERRIDE { return true; }
+  static const DoubleLoType* CreateInstance(mirror::Class* klass,
+                                            const std::string& descriptor,
+                                            uint16_t cache_id)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static const DoubleLoType* GetInstance() PURE;
+  static void Destroy();
+
+ private:
+  DoubleLoType(mirror::Class* klass, const std::string& descriptor,
+               uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : Cat2Type(klass, descriptor, cache_id) {}
+  static const DoubleLoType* instance_;
+};
+
+class DoubleHiType FINAL : public Cat2Type {
+ public:
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual bool IsDoubleHi() const OVERRIDE { return true; }
+  static const DoubleHiType* CreateInstance(mirror::Class* klass,
+                                      const std::string& descriptor,
                                       uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static DoubleLoType* GetInstance();
+  static const DoubleHiType* GetInstance() PURE;
   static void Destroy();
- private:
-  DoubleLoType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : Cat2Type(klass, descriptor, cache_id) {
-  }
-  static DoubleLoType* instance_;
-};
 
-class DoubleHiType : public Cat2Type {
- public:
-  std::string Dump() const;
-  virtual bool IsDoubleHi() const {
-    return true;
-  }
-  static DoubleHiType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                      uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static DoubleHiType* GetInstance();
-  static void Destroy();
  private:
-  DoubleHiType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : Cat2Type(klass, descriptor, cache_id) {
-  }
-  static DoubleHiType* instance_;
+  DoubleHiType(mirror::Class* klass, const std::string& descriptor,
+               uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : Cat2Type(klass, descriptor, cache_id) {}
+  static const DoubleHiType* instance_;
 };
 
 class ConstantType : public RegType {
  public:
-  ConstantType(uint32_t constat, uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ConstantType(uint32_t constant, uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : RegType(nullptr, "", cache_id), constant_(constant) {
+  }
 
-  // If this is a 32-bit constant, what is the value? This value may be imprecise in which case
-  // the value represents part of the integer range of values that may be held in the register.
+
+  // If this is a 32-bit constant, what is the value? This value may be
+  // imprecise in which case
+  // the value represents part of the integer range of values that may be held
+  // in the register.
   int32_t ConstantValue() const {
     DCHECK(IsConstantTypes());
     return constant_;
   }
-  int32_t ConstantValueLo() const;
-  int32_t ConstantValueHi() const;
 
-  bool IsZero() const {
+  int32_t ConstantValueLo() const {
+    DCHECK(IsConstantLo());
+    return constant_;
+  }
+
+  int32_t ConstantValueHi() const {
+    if (IsConstantHi() || IsPreciseConstantHi() || IsImpreciseConstantHi()) {
+      return constant_;
+    } else {
+      DCHECK(false);
+      return 0;
+    }
+  }
+
+  bool IsZero() const OVERRIDE {
     return IsPreciseConstant() && ConstantValue() == 0;
   }
-  bool IsOne() const {
+  bool IsOne() const OVERRIDE {
     return IsPreciseConstant() && ConstantValue() == 1;
   }
 
-  bool IsConstantChar() const {
+  bool IsConstantChar() const OVERRIDE {
     return IsConstant() && ConstantValue() >= 0 &&
            ConstantValue() <= std::numeric_limits<jchar>::max();
   }
-  bool IsConstantByte() const {
+  bool IsConstantByte() const OVERRIDE {
     return IsConstant() &&
            ConstantValue() >= std::numeric_limits<jbyte>::min() &&
            ConstantValue() <= std::numeric_limits<jbyte>::max();
   }
-  bool IsConstantShort() const {
+  bool IsConstantShort() const OVERRIDE {
     return IsConstant() &&
            ConstantValue() >= std::numeric_limits<jshort>::min() &&
            ConstantValue() <= std::numeric_limits<jshort>::max();
   }
-  virtual bool IsConstantTypes() const { return true; }
+  virtual bool IsConstantTypes() const OVERRIDE { return true; }
 
  private:
   const uint32_t constant_;
 };
 
-class PreciseConstType : public ConstantType {
+class PreciseConstType FINAL : public ConstantType {
  public:
-  PreciseConstType(uint32_t constat, uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : ConstantType(constat, cache_id) {
-  }
+  PreciseConstType(uint32_t constant, uint16_t cache_id)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : ConstantType(constant, cache_id) {}
 
-  bool IsPreciseConstant() const {
-    return true;
-  }
+  bool IsPreciseConstant() const OVERRIDE { return true; }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
-class PreciseConstLoType : public ConstantType {
+class PreciseConstLoType FINAL : public ConstantType {
  public:
-  PreciseConstLoType(uint32_t constat, uint16_t cache_id)
-     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : ConstantType(constat, cache_id) {
-  }
-  bool IsPreciseConstantLo() const {
-    return true;
-  }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  PreciseConstLoType(uint32_t constant, uint16_t cache_id)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : ConstantType(constant, cache_id) {}
+  bool IsPreciseConstantLo() const OVERRIDE { return true; }
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
-class PreciseConstHiType : public ConstantType {
+class PreciseConstHiType FINAL : public ConstantType {
  public:
-  PreciseConstHiType(uint32_t constat, uint16_t cache_id)
-     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : ConstantType(constat, cache_id) {
-  }
-  bool IsPreciseConstantHi() const {
-    return true;
-  }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  PreciseConstHiType(uint32_t constant, uint16_t cache_id)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : ConstantType(constant, cache_id) {}
+  bool IsPreciseConstantHi() const OVERRIDE { return true; }
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
-class ImpreciseConstType : public ConstantType {
+class ImpreciseConstType FINAL : public ConstantType {
  public:
   ImpreciseConstType(uint32_t constat, uint16_t cache_id)
-     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsImpreciseConstant() const {
-    return true;
+       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+       : ConstantType(constat, cache_id) {
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsImpreciseConstant() const OVERRIDE { return true; }
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
-class ImpreciseConstLoType : public ConstantType {
+class ImpreciseConstLoType FINAL : public ConstantType {
  public:
-  ImpreciseConstLoType(uint32_t constat, uint16_t cache_id)
-     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : ConstantType(constat, cache_id) {
-  }
-  bool IsImpreciseConstantLo() const {
-    return true;
-  }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ImpreciseConstLoType(uint32_t constant, uint16_t cache_id)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : ConstantType(constant, cache_id) {}
+  bool IsImpreciseConstantLo() const OVERRIDE { return true; }
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
-class ImpreciseConstHiType : public ConstantType {
+class ImpreciseConstHiType FINAL : public ConstantType {
  public:
-  ImpreciseConstHiType(uint32_t constat, uint16_t cache_id)
-     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : ConstantType(constat, cache_id) {
-  }
-  bool IsImpreciseConstantHi() const {
-    return true;
-  }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ImpreciseConstHiType(uint32_t constant, uint16_t cache_id)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : ConstantType(constant, cache_id) {}
+  bool IsImpreciseConstantHi() const OVERRIDE { return true; }
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
-// Common parent of all uninitialized types. Uninitialized types are created by "new" dex
+// Common parent of all uninitialized types. Uninitialized types are created by
+// "new" dex
 // instructions and must be passed to a constructor.
 class UninitializedType : public RegType {
  public:
-  UninitializedType(mirror::Class* klass, const std::string& descriptor, uint32_t allocation_pc,
-                    uint16_t cache_id)
-      : RegType(klass, descriptor, cache_id), allocation_pc_(allocation_pc) {
-  }
+  UninitializedType(mirror::Class* klass, const std::string& descriptor,
+                    uint32_t allocation_pc, uint16_t cache_id)
+      : RegType(klass, descriptor, cache_id), allocation_pc_(allocation_pc) {}
 
-  bool IsUninitializedTypes() const;
-  bool IsNonZeroReferenceTypes() const;
+  bool IsUninitializedTypes() const OVERRIDE;
+  bool IsNonZeroReferenceTypes() const OVERRIDE;
 
   uint32_t GetAllocationPc() const {
     DCHECK(IsUninitializedTypes());
@@ -704,30 +673,27 @@
 };
 
 // Similar to ReferenceType but not yet having been passed to a constructor.
-class UninitializedReferenceType : public UninitializedType {
+class UninitializedReferenceType FINAL : public UninitializedType {
  public:
-  UninitializedReferenceType(mirror::Class* klass, const std::string& descriptor,
+  UninitializedReferenceType(mirror::Class* klass,
+                             const std::string& descriptor,
                              uint32_t allocation_pc, uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : UninitializedType(klass, descriptor, allocation_pc, cache_id) {
-  }
+      : UninitializedType(klass, descriptor, allocation_pc, cache_id) {}
 
-  bool IsUninitializedReference() const {
-    return true;
-  }
+  bool IsUninitializedReference() const OVERRIDE { return true; }
 
-  bool HasClass() const {
-    return true;
-  }
+  bool HasClassVirtual() const OVERRIDE { return true; }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
-// Similar to UnresolvedReferenceType but not yet having been passed to a constructor.
-class UnresolvedUninitializedRefType : public UninitializedType {
+// Similar to UnresolvedReferenceType but not yet having been passed to a
+// constructor.
+class UnresolvedUninitializedRefType FINAL : public UninitializedType {
  public:
-  UnresolvedUninitializedRefType(const std::string& descriptor, uint32_t allocation_pc,
-                                 uint16_t cache_id)
+  UnresolvedUninitializedRefType(const std::string& descriptor,
+                                 uint32_t allocation_pc, uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       : UninitializedType(NULL, descriptor, allocation_pc, cache_id) {
     if (kIsDebugBuild) {
@@ -735,19 +701,22 @@
     }
   }
 
-  bool IsUnresolvedAndUninitializedReference() const {
-    return true;
-  }
+  bool IsUnresolvedAndUninitializedReference() const OVERRIDE { return true; }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsUnresolvedTypes() const OVERRIDE { return true; }
+
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
-// Similar to UninitializedReferenceType but special case for the this argument of a constructor.
-class UninitializedThisReferenceType : public UninitializedType {
+// Similar to UninitializedReferenceType but special case for the this argument
+// of a constructor.
+class UninitializedThisReferenceType FINAL : public UninitializedType {
  public:
-  UninitializedThisReferenceType(mirror::Class* klass, const std::string& descriptor,
+  UninitializedThisReferenceType(mirror::Class* klass,
+                                 const std::string& descriptor,
                                  uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       : UninitializedType(klass, descriptor, 0, cache_id) {
@@ -756,23 +725,20 @@
     }
   }
 
-  virtual bool IsUninitializedThisReference() const {
-    return true;
-  }
+  virtual bool IsUninitializedThisReference() const OVERRIDE { return true; }
 
-  bool HasClass() const {
-    return true;
-  }
+  bool HasClassVirtual() const OVERRIDE { return true; }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
-class UnresolvedUninitializedThisRefType : public UninitializedType {
+class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
  public:
-  UnresolvedUninitializedThisRefType(const std::string& descriptor, uint16_t cache_id)
+  UnresolvedUninitializedThisRefType(const std::string& descriptor,
+                                     uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       : UninitializedType(NULL, descriptor, 0, cache_id) {
     if (kIsDebugBuild) {
@@ -780,112 +746,108 @@
     }
   }
 
-  bool IsUnresolvedAndUninitializedThisReference() const {
-    return true;
-  }
+  bool IsUnresolvedAndUninitializedThisReference() const OVERRIDE { return true; }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsUnresolvedTypes() const OVERRIDE { return true; }
+
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
-// A type of register holding a reference to an Object of type GetClass or a sub-class.
-class ReferenceType : public RegType {
+// A type of register holding a reference to an Object of type GetClass or a
+// sub-class.
+class ReferenceType FINAL : public RegType {
  public:
-  ReferenceType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-     : RegType(klass, descriptor, cache_id) {
-  }
+  ReferenceType(mirror::Class* klass, const std::string& descriptor,
+                uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : RegType(klass, descriptor, cache_id) {}
 
-  bool IsReference() const {
-    return true;
-  }
+  bool IsReference() const OVERRIDE { return true; }
 
-  bool IsNonZeroReferenceTypes() const {
-    return true;
-  }
+  bool IsNonZeroReferenceTypes() const OVERRIDE { return true; }
 
-  bool HasClass() const {
-    return true;
-  }
+  bool HasClassVirtual() const OVERRIDE { return true; }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
-// A type of register holding a reference to an Object of type GetClass and only an object of that
+// A type of register holding a reference to an Object of type GetClass and only
+// an object of that
 // type.
-class PreciseReferenceType : public RegType {
+class PreciseReferenceType FINAL : public RegType {
  public:
-  PreciseReferenceType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+  PreciseReferenceType(mirror::Class* klass, const std::string& descriptor,
+                       uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsPreciseReference() const {
-    return true;
-  }
+  bool IsPreciseReference() const OVERRIDE { return true; }
 
-  bool IsNonZeroReferenceTypes() const {
-    return true;
-  }
+  bool IsNonZeroReferenceTypes() const OVERRIDE { return true; }
 
-  bool HasClass() const {
-    return true;
-  }
+  bool HasClassVirtual() const OVERRIDE { return true; }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 // Common parent of unresolved types.
 class UnresolvedType : public RegType {
  public:
   UnresolvedType(const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : RegType(NULL, descriptor, cache_id) {
-  }
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : RegType(NULL, descriptor, cache_id) {}
 
-  bool IsNonZeroReferenceTypes() const;
+  bool IsNonZeroReferenceTypes() const OVERRIDE;
 };
 
-// Similar to ReferenceType except the Class couldn't be loaded. Assignability and other tests made
+// Similar to ReferenceType except the Class couldn't be loaded. Assignability
+// and other tests made
 // of this type must be conservative.
-class UnresolvedReferenceType : public UnresolvedType {
+class UnresolvedReferenceType FINAL : public UnresolvedType {
  public:
   UnresolvedReferenceType(const std::string& descriptor, uint16_t cache_id)
-     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : UnresolvedType(descriptor, cache_id) {
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : UnresolvedType(descriptor, cache_id) {
     if (kIsDebugBuild) {
       CheckInvariants();
     }
   }
 
-  bool IsUnresolvedReference() const {
-    return true;
-  }
+  bool IsUnresolvedReference() const OVERRIDE { return true; }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsUnresolvedTypes() const OVERRIDE { return true; }
+
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 // Type representing the super-class of an unresolved type.
-class UnresolvedSuperClass : public UnresolvedType {
+class UnresolvedSuperClass FINAL : public UnresolvedType {
  public:
-  UnresolvedSuperClass(uint16_t child_id, RegTypeCache* reg_type_cache, uint16_t cache_id)
+  UnresolvedSuperClass(uint16_t child_id, RegTypeCache* reg_type_cache,
+                       uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : UnresolvedType("", cache_id), unresolved_child_id_(child_id),
+      : UnresolvedType("", cache_id),
+        unresolved_child_id_(child_id),
         reg_type_cache_(reg_type_cache) {
     if (kIsDebugBuild) {
       CheckInvariants();
     }
   }
 
-  bool IsUnresolvedSuperClass() const {
-    return true;
-  }
+  bool IsUnresolvedSuperClass() const OVERRIDE { return true; }
+
+  bool IsUnresolvedTypes() const OVERRIDE { return true; }
 
   uint16_t GetUnresolvedSuperClassChildId() const {
     DCHECK(IsUnresolvedSuperClass());
     return static_cast<uint16_t>(unresolved_child_id_ & 0xFFFF);
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -894,14 +856,17 @@
   const RegTypeCache* const reg_type_cache_;
 };
 
-// A merge of two unresolved types. If the types were resolved this may be Conflict or another
+// A merge of two unresolved types. If the types were resolved this may be
+// Conflict or another
 // known ReferenceType.
-class UnresolvedMergedType : public UnresolvedType {
+class UnresolvedMergedType FINAL : public UnresolvedType {
  public:
-  UnresolvedMergedType(uint16_t left_id, uint16_t right_id, const RegTypeCache* reg_type_cache,
-                       uint16_t cache_id)
+  UnresolvedMergedType(uint16_t left_id, uint16_t right_id,
+                       const RegTypeCache* reg_type_cache, uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : UnresolvedType("", cache_id), reg_type_cache_(reg_type_cache), merged_types_(left_id, right_id) {
+      : UnresolvedType("", cache_id),
+        reg_type_cache_(reg_type_cache),
+        merged_types_(left_id, right_id) {
     if (kIsDebugBuild) {
       CheckInvariants();
     }
@@ -916,11 +881,11 @@
   // The complete set of merged types.
   std::set<uint16_t> GetMergedTypes() const;
 
-  bool IsUnresolvedMergedReference() const {
-    return true;
-  }
+  bool IsUnresolvedMergedReference() const OVERRIDE { return true; }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsUnresolvedTypes() const OVERRIDE { return true; }
+
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index fc9e5c9..9024a7d 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -17,16 +17,19 @@
 #ifndef ART_RUNTIME_VERIFIER_REG_TYPE_CACHE_INL_H_
 #define ART_RUNTIME_VERIFIER_REG_TYPE_CACHE_INL_H_
 
+#include "class_linker.h"
+#include "mirror/class-inl.h"
+#include "mirror/string.h"
+#include "mirror/throwable.h"
 #include "reg_type.h"
 #include "reg_type_cache.h"
-#include "class_linker.h"
 
 namespace art {
 namespace verifier {
 
 inline const art::verifier::RegType& RegTypeCache::GetFromId(uint16_t id) const {
   DCHECK_LT(id, entries_.size());
-  RegType* result = entries_[id];
+  const RegType* result = entries_[id];
   DCHECK(result != NULL);
   return *result;
 }
@@ -40,6 +43,81 @@
   return FromCat1NonSmallConstant(value, precise);
 }
 
+inline const ImpreciseConstType& RegTypeCache::ByteConstant() {
+  const ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::min(), false);
+  DCHECK(result.IsImpreciseConstant());
+  return *down_cast<const ImpreciseConstType*>(&result);
+}
+
+inline const ImpreciseConstType& RegTypeCache::CharConstant() {
+  int32_t jchar_max = static_cast<int32_t>(std::numeric_limits<jchar>::max());
+  const ConstantType& result =  FromCat1Const(jchar_max, false);
+  DCHECK(result.IsImpreciseConstant());
+  return *down_cast<const ImpreciseConstType*>(&result);
+}
+
+inline const ImpreciseConstType& RegTypeCache::ShortConstant() {
+  const ConstantType& result =  FromCat1Const(std::numeric_limits<jshort>::min(), false);
+  DCHECK(result.IsImpreciseConstant());
+  return *down_cast<const ImpreciseConstType*>(&result);
+}
+
+inline const ImpreciseConstType& RegTypeCache::IntConstant() {
+  const ConstantType& result = FromCat1Const(std::numeric_limits<jint>::max(), false);
+  DCHECK(result.IsImpreciseConstant());
+  return *down_cast<const ImpreciseConstType*>(&result);
+}
+
+inline const ImpreciseConstType& RegTypeCache::PosByteConstant() {
+  const ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::max(), false);
+  DCHECK(result.IsImpreciseConstant());
+  return *down_cast<const ImpreciseConstType*>(&result);
+}
+
+inline const ImpreciseConstType& RegTypeCache::PosShortConstant() {
+  const ConstantType& result =  FromCat1Const(std::numeric_limits<jshort>::max(), false);
+  DCHECK(result.IsImpreciseConstant());
+  return *down_cast<const ImpreciseConstType*>(&result);
+}
+
+inline const PreciseReferenceType& RegTypeCache::JavaLangClass() {
+  const RegType* result = &FromClass("Ljava/lang/Class;", mirror::Class::GetJavaLangClass(), true);
+  DCHECK(result->IsPreciseReference());
+  return *down_cast<const PreciseReferenceType*>(result);
+}
+
+inline const PreciseReferenceType& RegTypeCache::JavaLangString() {
+  // String is final and therefore always precise.
+  const RegType* result = &FromClass("Ljava/lang/String;", mirror::String::GetJavaLangString(),
+                                     true);
+  DCHECK(result->IsPreciseReference());
+  return *down_cast<const PreciseReferenceType*>(result);
+}
+
+inline const RegType&  RegTypeCache::JavaLangThrowable(bool precise) {
+  const RegType* result = &FromClass("Ljava/lang/Throwable;",
+                                     mirror::Throwable::GetJavaLangThrowable(), precise);
+  if (precise) {
+    DCHECK(result->IsPreciseReference());
+    return *down_cast<const PreciseReferenceType*>(result);
+  } else {
+    DCHECK(result->IsReference());
+    return *down_cast<const ReferenceType*>(result);
+  }
+}
+
+inline const RegType& RegTypeCache::JavaLangObject(bool precise) {
+  const RegType* result = &FromClass("Ljava/lang/Object;",
+                                     mirror::Class::GetJavaLangClass()->GetSuperClass(), precise);
+  if (precise) {
+    DCHECK(result->IsPreciseReference());
+    return *down_cast<const PreciseReferenceType*>(result);
+  } else {
+    DCHECK(result->IsReference());
+    return *down_cast<const ReferenceType*>(result);
+  }
+}
+
 }  // namespace verifier
 }  // namespace art
 #endif  // ART_RUNTIME_VERIFIER_REG_TYPE_CACHE_INL_H_
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 92a005b..121fccb 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -21,15 +21,16 @@
 #include "dex_file-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
+#include "reg_type-inl.h"
 
 namespace art {
 namespace verifier {
 
 bool RegTypeCache::primitive_initialized_ = false;
 uint16_t RegTypeCache::primitive_count_ = 0;
-PreciseConstType* RegTypeCache::small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
+const PreciseConstType* RegTypeCache::small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
 
-static bool MatchingPrecisionForClass(RegType* entry, bool precise)
+static bool MatchingPrecisionForClass(const RegType* entry, bool precise)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   if (entry->IsPreciseReference() == precise) {
     // We were or weren't looking for a precise reference and we found what we need.
@@ -98,7 +99,7 @@
 };
 
 const RegType& RegTypeCache::RegTypeFromPrimitiveType(Primitive::Type prim_type) const {
-  CHECK(RegTypeCache::primitive_initialized_);
+  DCHECK(RegTypeCache::primitive_initialized_);
   switch (prim_type) {
     case Primitive::kPrimBoolean:
       return *BooleanType::GetInstance();
@@ -123,7 +124,7 @@
 }
 
 bool RegTypeCache::MatchDescriptor(size_t idx, const StringPiece& descriptor, bool precise) {
-  RegType* entry = entries_[idx];
+  const RegType* entry = entries_[idx];
   if (descriptor != entry->descriptor_) {
     return false;
   }
@@ -143,11 +144,11 @@
   Thread* self = Thread::Current();
   StackHandleScope<1> hs(self);
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(loader));
-  mirror::Class* klass = NULL;
+  mirror::Class* klass = nullptr;
   if (can_load_classes_) {
     klass = class_linker->FindClass(self, descriptor, class_loader);
   } else {
-    klass = class_linker->LookupClass(descriptor, loader);
+    klass = class_linker->LookupClass(self, descriptor, loader);
     if (klass != nullptr && !klass->IsLoaded()) {
       // We found the class but without it being loaded its not safe for use.
       klass = nullptr;
@@ -169,7 +170,7 @@
   // Class not found in the cache, will create a new type for that.
   // Try resolving class.
   mirror::Class* klass = ResolveClass(descriptor, loader);
-  if (klass != NULL) {
+  if (klass != nullptr) {
     // Class resolved, first look for the class in the list of entries
     // Class was not found, must create new type.
     // To pass the verification, the type should be imprecise,
@@ -219,7 +220,7 @@
   } else {
     // Look for the reference in the list of entries to have.
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
-      RegType* cur_entry = entries_[i];
+      const RegType* cur_entry = entries_[i];
       if (cur_entry->klass_.Read() == klass && MatchingPrecisionForClass(cur_entry, precise)) {
         return *cur_entry;
       }
@@ -237,8 +238,8 @@
 }
 
 RegTypeCache::RegTypeCache(bool can_load_classes) : can_load_classes_(can_load_classes) {
-  if (kIsDebugBuild && can_load_classes) {
-    Thread::Current()->AssertThreadSuspensionIsAllowable();
+  if (kIsDebugBuild) {
+    Thread::Current()->AssertThreadSuspensionIsAllowable(gAborting == 0);
   }
   entries_.reserve(64);
   FillPrimitiveAndSmallConstantTypes();
@@ -251,7 +252,7 @@
     // All entries are from the global pool, nothing to delete.
     return;
   }
-  std::vector<RegType*>::iterator non_primitive_begin = entries_.begin();
+  std::vector<const RegType*>::iterator non_primitive_begin = entries_.begin();
   std::advance(non_primitive_begin, kNumPrimitivesAndSmallConstants);
   STLDeleteContainerPointers(non_primitive_begin, entries_.end());
 }
@@ -271,7 +272,7 @@
     DoubleLoType::Destroy();
     DoubleHiType::Destroy();
     for (int32_t value = kMinSmallConstant; value <= kMaxSmallConstant; ++value) {
-      PreciseConstType* type = small_precise_constants_[value - kMinSmallConstant];
+      const PreciseConstType* type = small_precise_constants_[value - kMinSmallConstant];
       delete type;
       small_precise_constants_[value - kMinSmallConstant] = nullptr;
     }
@@ -281,14 +282,14 @@
 }
 
 template <class Type>
-Type* RegTypeCache::CreatePrimitiveTypeInstance(const std::string& descriptor) {
-  mirror::Class* klass = NULL;
+const Type* RegTypeCache::CreatePrimitiveTypeInstance(const std::string& descriptor) {
+  mirror::Class* klass = nullptr;
   // Try loading the class from linker.
   if (!descriptor.empty()) {
     klass = art::Runtime::Current()->GetClassLinker()->FindSystemClass(Thread::Current(),
                                                                        descriptor.c_str());
   }
-  Type* entry = Type::CreateInstance(klass, descriptor, RegTypeCache::primitive_count_);
+  const Type* entry = Type::CreateInstance(klass, descriptor, RegTypeCache::primitive_count_);
   RegTypeCache::primitive_count_++;
   return entry;
 }
@@ -330,10 +331,10 @@
   }
   // Check if entry already exists.
   for (size_t i = primitive_count_; i < entries_.size(); i++) {
-    RegType* cur_entry = entries_[i];
+    const RegType* cur_entry = entries_[i];
     if (cur_entry->IsUnresolvedMergedReference()) {
       std::set<uint16_t> cur_entry_types =
-          (down_cast<UnresolvedMergedType*>(cur_entry))->GetMergedTypes();
+          (down_cast<const UnresolvedMergedType*>(cur_entry))->GetMergedTypes();
       if (cur_entry_types == types) {
         return *cur_entry;
       }
@@ -353,10 +354,10 @@
 const RegType& RegTypeCache::FromUnresolvedSuperClass(const RegType& child) {
   // Check if entry already exists.
   for (size_t i = primitive_count_; i < entries_.size(); i++) {
-    RegType* cur_entry = entries_[i];
+    const RegType* cur_entry = entries_[i];
     if (cur_entry->IsUnresolvedSuperClass()) {
-      UnresolvedSuperClass* tmp_entry =
-          down_cast<UnresolvedSuperClass*>(cur_entry);
+      const UnresolvedSuperClass* tmp_entry =
+          down_cast<const UnresolvedSuperClass*>(cur_entry);
       uint16_t unresolved_super_child_id =
           tmp_entry->GetUnresolvedSuperClassChildId();
       if (unresolved_super_child_id == child.GetId()) {
@@ -370,27 +371,28 @@
 }
 
 const UninitializedType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
-  UninitializedType* entry = NULL;
+  UninitializedType* entry = nullptr;
   const std::string& descriptor(type.GetDescriptor());
   if (type.IsUnresolvedTypes()) {
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
-      RegType* cur_entry = entries_[i];
+      const RegType* cur_entry = entries_[i];
       if (cur_entry->IsUnresolvedAndUninitializedReference() &&
-          down_cast<UnresolvedUninitializedRefType*>(cur_entry)->GetAllocationPc() == allocation_pc &&
+          down_cast<const UnresolvedUninitializedRefType*>(cur_entry)->GetAllocationPc()
+              == allocation_pc &&
           (cur_entry->GetDescriptor() == descriptor)) {
-        return *down_cast<UnresolvedUninitializedRefType*>(cur_entry);
+        return *down_cast<const UnresolvedUninitializedRefType*>(cur_entry);
       }
     }
     entry = new UnresolvedUninitializedRefType(descriptor, allocation_pc, entries_.size());
   } else {
     mirror::Class* klass = type.GetClass();
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
-      RegType* cur_entry = entries_[i];
+      const RegType* cur_entry = entries_[i];
       if (cur_entry->IsUninitializedReference() &&
-          down_cast<UninitializedReferenceType*>(cur_entry)
+          down_cast<const UninitializedReferenceType*>(cur_entry)
               ->GetAllocationPc() == allocation_pc &&
           cur_entry->GetClass() == klass) {
-        return *down_cast<UninitializedReferenceType*>(cur_entry);
+        return *down_cast<const UninitializedReferenceType*>(cur_entry);
       }
     }
     entry = new UninitializedReferenceType(klass, descriptor, allocation_pc, entries_.size());
@@ -405,7 +407,7 @@
   if (uninit_type.IsUnresolvedTypes()) {
     const std::string& descriptor(uninit_type.GetDescriptor());
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
-      RegType* cur_entry = entries_[i];
+      const RegType* cur_entry = entries_[i];
       if (cur_entry->IsUnresolvedReference() &&
           cur_entry->GetDescriptor() == descriptor) {
         return *cur_entry;
@@ -417,7 +419,7 @@
     if (uninit_type.IsUninitializedThisReference() && !klass->IsFinal()) {
       // For uninitialized "this reference" look for reference types that are not precise.
       for (size_t i = primitive_count_; i < entries_.size(); i++) {
-        RegType* cur_entry = entries_[i];
+        const RegType* cur_entry = entries_[i];
         if (cur_entry->IsReference() && cur_entry->GetClass() == klass) {
           return *cur_entry;
         }
@@ -427,7 +429,7 @@
       // We're uninitialized because of allocation, look or create a precise type as allocations
       // may only create objects of that type.
       for (size_t i = primitive_count_; i < entries_.size(); i++) {
-        RegType* cur_entry = entries_[i];
+        const RegType* cur_entry = entries_[i];
         if (cur_entry->IsPreciseReference() && cur_entry->GetClass() == klass) {
           return *cur_entry;
         }
@@ -441,61 +443,24 @@
   return *entry;
 }
 
-const ImpreciseConstType& RegTypeCache::ByteConstant() {
-  const ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::min(), false);
-  DCHECK(result.IsImpreciseConstant());
-  return *down_cast<const ImpreciseConstType*>(&result);
-}
-
-const ImpreciseConstType& RegTypeCache::CharConstant() {
-  int32_t jchar_max = static_cast<int32_t>(std::numeric_limits<jchar>::max());
-  const ConstantType& result =  FromCat1Const(jchar_max, false);
-  DCHECK(result.IsImpreciseConstant());
-  return *down_cast<const ImpreciseConstType*>(&result);
-}
-
-const ImpreciseConstType& RegTypeCache::ShortConstant() {
-  const ConstantType& result =  FromCat1Const(std::numeric_limits<jshort>::min(), false);
-  DCHECK(result.IsImpreciseConstant());
-  return *down_cast<const ImpreciseConstType*>(&result);
-}
-
-const ImpreciseConstType& RegTypeCache::IntConstant() {
-  const ConstantType& result = FromCat1Const(std::numeric_limits<jint>::max(), false);
-  DCHECK(result.IsImpreciseConstant());
-  return *down_cast<const ImpreciseConstType*>(&result);
-}
-
-const ImpreciseConstType& RegTypeCache::PosByteConstant() {
-  const ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::max(), false);
-  DCHECK(result.IsImpreciseConstant());
-  return *down_cast<const ImpreciseConstType*>(&result);
-}
-
-const ImpreciseConstType& RegTypeCache::PosShortConstant() {
-  const ConstantType& result =  FromCat1Const(std::numeric_limits<jshort>::max(), false);
-  DCHECK(result.IsImpreciseConstant());
-  return *down_cast<const ImpreciseConstType*>(&result);
-}
-
 const UninitializedType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
   UninitializedType* entry;
   const std::string& descriptor(type.GetDescriptor());
   if (type.IsUnresolvedTypes()) {
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
-      RegType* cur_entry = entries_[i];
+      const RegType* cur_entry = entries_[i];
       if (cur_entry->IsUnresolvedAndUninitializedThisReference() &&
           cur_entry->GetDescriptor() == descriptor) {
-        return *down_cast<UninitializedType*>(cur_entry);
+        return *down_cast<const UninitializedType*>(cur_entry);
       }
     }
     entry = new UnresolvedUninitializedThisRefType(descriptor, entries_.size());
   } else {
     mirror::Class* klass = type.GetClass();
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
-      RegType* cur_entry = entries_[i];
+      const RegType* cur_entry = entries_[i];
       if (cur_entry->IsUninitializedThisReference() && cur_entry->GetClass() == klass) {
-        return *down_cast<UninitializedType*>(cur_entry);
+        return *down_cast<const UninitializedType*>(cur_entry);
       }
     }
     entry = new UninitializedThisReferenceType(klass, descriptor, entries_.size());
@@ -506,11 +471,11 @@
 
 const ConstantType& RegTypeCache::FromCat1NonSmallConstant(int32_t value, bool precise) {
   for (size_t i = primitive_count_; i < entries_.size(); i++) {
-    RegType* cur_entry = entries_[i];
+    const RegType* cur_entry = entries_[i];
     if (cur_entry->klass_.IsNull() && cur_entry->IsConstant() &&
         cur_entry->IsPreciseConstant() == precise &&
-        (down_cast<ConstantType*>(cur_entry))->ConstantValue() == value) {
-      return *down_cast<ConstantType*>(cur_entry);
+        (down_cast<const ConstantType*>(cur_entry))->ConstantValue() == value) {
+      return *down_cast<const ConstantType*>(cur_entry);
     }
   }
   ConstantType* entry;
@@ -525,10 +490,10 @@
 
 const ConstantType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
   for (size_t i = primitive_count_; i < entries_.size(); i++) {
-    RegType* cur_entry = entries_[i];
+    const RegType* cur_entry = entries_[i];
     if (cur_entry->IsConstantLo() && (cur_entry->IsPrecise() == precise) &&
-        (down_cast<ConstantType*>(cur_entry))->ConstantValueLo() == value) {
-      return *down_cast<ConstantType*>(cur_entry);
+        (down_cast<const ConstantType*>(cur_entry))->ConstantValueLo() == value) {
+      return *down_cast<const ConstantType*>(cur_entry);
     }
   }
   ConstantType* entry;
@@ -543,10 +508,10 @@
 
 const ConstantType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
   for (size_t i = primitive_count_; i < entries_.size(); i++) {
-    RegType* cur_entry = entries_[i];
+    const RegType* cur_entry = entries_[i];
     if (cur_entry->IsConstantHi() && (cur_entry->IsPrecise() == precise) &&
-        (down_cast<ConstantType*>(cur_entry))->ConstantValueHi() == value) {
-      return *down_cast<ConstantType*>(cur_entry);
+        (down_cast<const ConstantType*>(cur_entry))->ConstantValueHi() == value) {
+      return *down_cast<const ConstantType*>(cur_entry);
     }
   }
   ConstantType* entry;
@@ -583,15 +548,15 @@
 
 void RegTypeCache::Dump(std::ostream& os) {
   for (size_t i = 0; i < entries_.size(); i++) {
-    RegType* cur_entry = entries_[i];
-    if (cur_entry != NULL) {
+    const RegType* cur_entry = entries_[i];
+    if (cur_entry != nullptr) {
       os << i << ": " << cur_entry->Dump() << "\n";
     }
   }
 }
 
 void RegTypeCache::VisitRoots(RootCallback* callback, void* arg) {
-  for (RegType* entry : entries_) {
+  for (const RegType* entry : entries_) {
     entry->VisitRoots(callback, arg);
   }
 }
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 8baf3ff..eb17a52 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -68,14 +68,6 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   const RegType& FromUnresolvedSuperClass(const RegType& child)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const RegType& JavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    // String is final and therefore always precise.
-    return From(NULL, "Ljava/lang/String;", true);
-  }
-  const RegType& JavaLangThrowable(bool precise)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return From(NULL, "Ljava/lang/Throwable;", precise);
-  }
   const ConstantType& Zero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return FromCat1Const(0, true);
   }
@@ -85,48 +77,48 @@
   size_t GetCacheSize() {
     return entries_.size();
   }
-  const RegType& Boolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  const BooleanType& Boolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return *BooleanType::GetInstance();
   }
-  const RegType& Byte() {
+  const ByteType& Byte() {
     return *ByteType::GetInstance();
   }
-  const RegType& Char()  {
+  const CharType& Char()  {
     return *CharType::GetInstance();
   }
-  const RegType& Short()  {
+  const ShortType& Short()  {
     return *ShortType::GetInstance();
   }
-  const RegType& Integer() {
+  const IntegerType& Integer() {
     return *IntegerType::GetInstance();
   }
-  const RegType& Float() {
+  const FloatType& Float() {
     return *FloatType::GetInstance();
   }
-  const RegType& LongLo() {
+  const LongLoType& LongLo() {
     return *LongLoType::GetInstance();
   }
-  const RegType& LongHi() {
+  const LongHiType& LongHi() {
     return *LongHiType::GetInstance();
   }
-  const RegType& DoubleLo() {
+  const DoubleLoType& DoubleLo() {
     return *DoubleLoType::GetInstance();
   }
-  const RegType& DoubleHi() {
+  const DoubleHiType& DoubleHi() {
     return *DoubleHiType::GetInstance();
   }
-  const RegType& Undefined() {
+  const UndefinedType& Undefined() {
     return *UndefinedType::GetInstance();
   }
-  const RegType& Conflict() {
+  const ConflictType& Conflict() {
     return *ConflictType::GetInstance();
   }
-  const RegType& JavaLangClass(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return From(NULL, "Ljava/lang/Class;", precise);
-  }
-  const RegType& JavaLangObject(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return From(NULL, "Ljava/lang/Object;", precise);
-  }
+
+  const PreciseReferenceType& JavaLangClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const PreciseReferenceType& JavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const RegType& JavaLangThrowable(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const RegType& JavaLangObject(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   const UninitializedType& Uninitialized(const RegType& type, uint32_t allocation_pc)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   // Create an uninitialized 'this' argument for the given type.
@@ -159,17 +151,14 @@
   void AddEntry(RegType* new_entry);
 
   template <class Type>
-  static Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
+  static const Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   static void CreatePrimitiveAndSmallConstantTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // The actual storage for the RegTypes.
-  std::vector<RegType*> entries_;
-
   // A quick look up for popular small constants.
   static constexpr int32_t kMinSmallConstant = -1;
   static constexpr int32_t kMaxSmallConstant = 4;
-  static PreciseConstType* small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
+  static const PreciseConstType* small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
 
   static constexpr size_t kNumPrimitivesAndSmallConstants =
       12 + (kMaxSmallConstant - kMinSmallConstant + 1);
@@ -180,6 +169,9 @@
   // Number of well known primitives that will be copied into a RegTypeCache upon construction.
   static uint16_t primitive_count_;
 
+  // The actual storage for the RegTypes.
+  std::vector<const RegType*> entries_;
+
   // Whether or not we're allowed to load classes.
   const bool can_load_classes_;
 
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 9dc0df1..aad3b5a 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -21,6 +21,7 @@
 #include "base/casts.h"
 #include "common_runtime_test.h"
 #include "reg_type_cache-inl.h"
+#include "reg_type-inl.h"
 #include "scoped_thread_state_change.h"
 #include "thread-inl.h"
 
@@ -346,7 +347,7 @@
   RegTypeCache cache(true);
   const RegType& imprecise_obj = cache.JavaLangObject(false);
   const RegType& precise_obj = cache.JavaLangObject(true);
-  const RegType& precise_obj_2 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
+  const RegType& precise_obj_2 = cache.FromDescriptor(nullptr, "Ljava/lang/Object;", true);
 
   EXPECT_TRUE(precise_obj.Equals(precise_obj_2));
   EXPECT_FALSE(imprecise_obj.Equals(precise_obj));
@@ -359,11 +360,11 @@
   // a hit second time.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache(true);
-  const RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+  const RegType& ref_type_0 = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
   EXPECT_TRUE(ref_type_0.IsNonZeroReferenceTypes());
 
-  const RegType& ref_type_1 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+  const RegType& ref_type_1 = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.Equals(ref_type_1));
 
   const RegType& unresolved_super_class =  cache.FromUnresolvedSuperClass(ref_type_0);
@@ -375,9 +376,9 @@
   // Tests creating types uninitialized types from unresolved types.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache(true);
-  const RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+  const RegType& ref_type_0 = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
-  const RegType& ref_type = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+  const RegType& ref_type = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.Equals(ref_type));
   // Create an uninitialized type of this unresolved type
   const RegType& unresolved_unintialised = cache.Uninitialized(ref_type, 1101ull);
@@ -397,8 +398,8 @@
   // Tests types for proper Dump messages.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache(true);
-  const RegType& unresolved_ref = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
-  const RegType& unresolved_ref_another = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExistEither;", true);
+  const RegType& unresolved_ref = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
+  const RegType& unresolved_ref_another = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExistEither;", true);
   const RegType& resolved_ref = cache.JavaLangString();
   const RegType& resolved_unintialiesd = cache.Uninitialized(resolved_ref, 10);
   const RegType& unresolved_unintialized = cache.Uninitialized(unresolved_ref, 12);
@@ -424,7 +425,7 @@
   RegTypeCache cache(true);
   const RegType& ref_type = cache.JavaLangString();
   const RegType& ref_type_2 = cache.JavaLangString();
-  const RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/String;", true);
+  const RegType& ref_type_3 = cache.FromDescriptor(nullptr, "Ljava/lang/String;", true);
 
   EXPECT_TRUE(ref_type.Equals(ref_type_2));
   EXPECT_TRUE(ref_type_2.Equals(ref_type_3));
@@ -444,7 +445,7 @@
   RegTypeCache cache(true);
   const RegType& ref_type = cache.JavaLangObject(true);
   const RegType& ref_type_2 = cache.JavaLangObject(true);
-  const RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
+  const RegType& ref_type_3 = cache.FromDescriptor(nullptr, "Ljava/lang/Object;", true);
 
   EXPECT_TRUE(ref_type.Equals(ref_type_2));
   EXPECT_TRUE(ref_type_3.Equals(ref_type_2));
@@ -459,9 +460,9 @@
   const RegType& Object = cache_new.JavaLangObject(true);
   EXPECT_TRUE(string.Merge(Object, &cache_new).IsJavaLangObject());
   // Merge two unresolved types.
-  const RegType& ref_type_0 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+  const RegType& ref_type_0 = cache_new.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
-  const RegType& ref_type_1 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExistToo;", true);
+  const RegType& ref_type_1 = cache_new.FromDescriptor(nullptr, "Ljava/lang/DoesNotExistToo;", true);
   EXPECT_FALSE(ref_type_0.Equals(ref_type_1));
 
   const RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new);
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index 0989cd0..219e687 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -25,10 +25,135 @@
 namespace art {
 namespace verifier {
 
-inline const RegType& RegisterLine::GetRegisterType(uint32_t vsrc) const {
+inline const RegType& RegisterLine::GetRegisterType(MethodVerifier* verifier, uint32_t vsrc) const {
   // The register index was validated during the static pass, so we don't need to check it here.
   DCHECK_LT(vsrc, num_regs_);
-  return verifier_->GetRegTypeCache()->GetFromId(line_[vsrc]);
+  return verifier->GetRegTypeCache()->GetFromId(line_[vsrc]);
+}
+
+inline bool RegisterLine::SetRegisterType(MethodVerifier* verifier, uint32_t vdst,
+                                          const RegType& new_type) {
+  DCHECK_LT(vdst, num_regs_);
+  if (new_type.IsLowHalf() || new_type.IsHighHalf()) {
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Expected category1 register type not '"
+        << new_type << "'";
+    return false;
+  } else if (new_type.IsConflict()) {  // should only be set during a merge
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "Set register to unknown type " << new_type;
+    return false;
+  } else {
+    line_[vdst] = new_type.GetId();
+  }
+  // Clear the monitor entry bits for this register.
+  ClearAllRegToLockDepths(vdst);
+  return true;
+}
+
+inline bool RegisterLine::SetRegisterTypeWide(MethodVerifier* verifier, uint32_t vdst,
+                                              const RegType& new_type1,
+                                              const RegType& new_type2) {
+  DCHECK_LT(vdst + 1, num_regs_);
+  if (!new_type1.CheckWidePair(new_type2)) {
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "Invalid wide pair '"
+        << new_type1 << "' '" << new_type2 << "'";
+    return false;
+  } else {
+    line_[vdst] = new_type1.GetId();
+    line_[vdst + 1] = new_type2.GetId();
+  }
+  // Clear the monitor entry bits for this register.
+  ClearAllRegToLockDepths(vdst);
+  ClearAllRegToLockDepths(vdst + 1);
+  return true;
+}
+
+inline void RegisterLine::SetResultTypeToUnknown(MethodVerifier* verifier) {
+  result_[0] = verifier->GetRegTypeCache()->Undefined().GetId();
+  result_[1] = result_[0];
+}
+
+inline void RegisterLine::SetResultRegisterType(MethodVerifier* verifier, const RegType& new_type) {
+  DCHECK(!new_type.IsLowHalf());
+  DCHECK(!new_type.IsHighHalf());
+  result_[0] = new_type.GetId();
+  result_[1] = verifier->GetRegTypeCache()->Undefined().GetId();
+}
+
+inline void RegisterLine::SetResultRegisterTypeWide(const RegType& new_type1,
+                                                    const RegType& new_type2) {
+  DCHECK(new_type1.CheckWidePair(new_type2));
+  result_[0] = new_type1.GetId();
+  result_[1] = new_type2.GetId();
+}
+
+inline void RegisterLine::CopyRegister1(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc,
+                                 TypeCategory cat) {
+  DCHECK(cat == kTypeCategory1nr || cat == kTypeCategoryRef);
+  const RegType& type = GetRegisterType(verifier, vsrc);
+  if (!SetRegisterType(verifier, vdst, type)) {
+    return;
+  }
+  if ((cat == kTypeCategory1nr && !type.IsCategory1Types()) ||
+      (cat == kTypeCategoryRef && !type.IsReferenceTypes())) {
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "copy1 v" << vdst << "<-v" << vsrc << " type=" << type
+                                                 << " cat=" << static_cast<int>(cat);
+  } else if (cat == kTypeCategoryRef) {
+    CopyRegToLockDepth(vdst, vsrc);
+  }
+}
+
+inline void RegisterLine::CopyRegister2(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc) {
+  const RegType& type_l = GetRegisterType(verifier, vsrc);
+  const RegType& type_h = GetRegisterType(verifier, vsrc + 1);
+
+  if (!type_l.CheckWidePair(type_h)) {
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "copy2 v" << vdst << "<-v" << vsrc
+                                                 << " type=" << type_l << "/" << type_h;
+  } else {
+    SetRegisterTypeWide(verifier, vdst, type_l, type_h);
+  }
+}
+
+inline bool RegisterLine::VerifyRegisterType(MethodVerifier* verifier, uint32_t vsrc,
+                                             const RegType& check_type) {
+  // Verify the src register type against the check type refining the type of the register
+  const RegType& src_type = GetRegisterType(verifier, vsrc);
+  if (UNLIKELY(!check_type.IsAssignableFrom(src_type))) {
+    enum VerifyError fail_type;
+    if (!check_type.IsNonZeroReferenceTypes() || !src_type.IsNonZeroReferenceTypes()) {
+      // Hard fail if one of the types is primitive, since they are concretely known.
+      fail_type = VERIFY_ERROR_BAD_CLASS_HARD;
+    } else if (check_type.IsUnresolvedTypes() || src_type.IsUnresolvedTypes()) {
+      fail_type = VERIFY_ERROR_NO_CLASS;
+    } else {
+      fail_type = VERIFY_ERROR_BAD_CLASS_SOFT;
+    }
+    verifier->Fail(fail_type) << "register v" << vsrc << " has type "
+                               << src_type << " but expected " << check_type;
+    return false;
+  }
+  if (check_type.IsLowHalf()) {
+    const RegType& src_type_h = GetRegisterType(verifier, vsrc + 1);
+    if (UNLIKELY(!src_type.CheckWidePair(src_type_h))) {
+      verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register v" << vsrc << " has type "
+                                                   << src_type << "/" << src_type_h;
+      return false;
+    }
+  }
+  // The register at vsrc has a defined type, we know the lower-upper-bound, but this is less
+  // precise than the subtype in vsrc so leave it for reference types. For primitive types
+  // if they are a defined type then they are as precise as we can get, however, for constant
+  // types we may wish to refine them. Unfortunately constant propagation has rendered this useless.
+  return true;
+}
+
+inline bool RegisterLine::VerifyMonitorStackEmpty(MethodVerifier* verifier) const {
+  if (MonitorStackDepth() != 0) {
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected empty monitor stack";
+    return false;
+  } else {
+    return true;
+  }
 }
 
 }  // namespace verifier
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 556056c..3139204 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -20,15 +20,16 @@
 #include "dex_instruction-inl.h"
 #include "method_verifier.h"
 #include "register_line-inl.h"
+#include "reg_type-inl.h"
 
 namespace art {
 namespace verifier {
 
-bool RegisterLine::CheckConstructorReturn() const {
+bool RegisterLine::CheckConstructorReturn(MethodVerifier* verifier) const {
   for (size_t i = 0; i < num_regs_; i++) {
-    if (GetRegisterType(i).IsUninitializedThisReference() ||
-        GetRegisterType(i).IsUnresolvedAndUninitializedThisReference()) {
-      verifier_->Fail(VERIFY_ERROR_BAD_CLASS_SOFT)
+    if (GetRegisterType(verifier, i).IsUninitializedThisReference() ||
+        GetRegisterType(verifier, i).IsUnresolvedAndUninitializedThisReference()) {
+      verifier->Fail(VERIFY_ERROR_BAD_CLASS_SOFT)
           << "Constructor returning without calling superclass constructor";
       return false;
     }
@@ -36,122 +37,38 @@
   return true;
 }
 
-bool RegisterLine::SetRegisterType(uint32_t vdst, const RegType& new_type) {
-  DCHECK_LT(vdst, num_regs_);
-  if (new_type.IsLowHalf() || new_type.IsHighHalf()) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Expected category1 register type not '"
-        << new_type << "'";
-    return false;
-  } else if (new_type.IsConflict()) {  // should only be set during a merge
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "Set register to unknown type " << new_type;
-    return false;
-  } else {
-    line_[vdst] = new_type.GetId();
-  }
-  // Clear the monitor entry bits for this register.
-  ClearAllRegToLockDepths(vdst);
-  return true;
-}
-
-bool RegisterLine::SetRegisterTypeWide(uint32_t vdst, const RegType& new_type1,
-                                       const RegType& new_type2) {
-  DCHECK_LT(vdst + 1, num_regs_);
-  if (!new_type1.CheckWidePair(new_type2)) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "Invalid wide pair '"
-        << new_type1 << "' '" << new_type2 << "'";
-    return false;
-  } else {
-    line_[vdst] = new_type1.GetId();
-    line_[vdst + 1] = new_type2.GetId();
-  }
-  // Clear the monitor entry bits for this register.
-  ClearAllRegToLockDepths(vdst);
-  ClearAllRegToLockDepths(vdst + 1);
-  return true;
-}
-
-void RegisterLine::SetResultTypeToUnknown() {
-  result_[0] = verifier_->GetRegTypeCache()->Undefined().GetId();
-  result_[1] = result_[0];
-}
-
-void RegisterLine::SetResultRegisterType(const RegType& new_type) {
-  DCHECK(!new_type.IsLowHalf());
-  DCHECK(!new_type.IsHighHalf());
-  result_[0] = new_type.GetId();
-  result_[1] = verifier_->GetRegTypeCache()->Undefined().GetId();
-}
-
-void RegisterLine::SetResultRegisterTypeWide(const RegType& new_type1,
-                                             const RegType& new_type2) {
-  DCHECK(new_type1.CheckWidePair(new_type2));
-  result_[0] = new_type1.GetId();
-  result_[1] = new_type2.GetId();
-}
-
-const RegType& RegisterLine::GetInvocationThis(const Instruction* inst, bool is_range) {
+const RegType& RegisterLine::GetInvocationThis(MethodVerifier* verifier, const Instruction* inst,
+                                               bool is_range) {
   const size_t args_count = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
   if (args_count < 1) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invoke lacks 'this'";
-    return verifier_->GetRegTypeCache()->Conflict();
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invoke lacks 'this'";
+    return verifier->GetRegTypeCache()->Conflict();
   }
   /* Get the element type of the array held in vsrc */
   const uint32_t this_reg = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
-  const RegType& this_type = GetRegisterType(this_reg);
+  const RegType& this_type = GetRegisterType(verifier, this_reg);
   if (!this_type.IsReferenceTypes()) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "tried to get class from non-reference register v"
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "tried to get class from non-reference register v"
                                                  << this_reg << " (type=" << this_type << ")";
-    return verifier_->GetRegTypeCache()->Conflict();
+    return verifier->GetRegTypeCache()->Conflict();
   }
   return this_type;
 }
 
-bool RegisterLine::VerifyRegisterType(uint32_t vsrc,
-                                      const RegType& check_type) {
-  // Verify the src register type against the check type refining the type of the register
-  const RegType& src_type = GetRegisterType(vsrc);
-  if (!(check_type.IsAssignableFrom(src_type))) {
-    enum VerifyError fail_type;
-    if (!check_type.IsNonZeroReferenceTypes() || !src_type.IsNonZeroReferenceTypes()) {
-      // Hard fail if one of the types is primitive, since they are concretely known.
-      fail_type = VERIFY_ERROR_BAD_CLASS_HARD;
-    } else if (check_type.IsUnresolvedTypes() || src_type.IsUnresolvedTypes()) {
-      fail_type = VERIFY_ERROR_NO_CLASS;
-    } else {
-      fail_type = VERIFY_ERROR_BAD_CLASS_SOFT;
-    }
-    verifier_->Fail(fail_type) << "register v" << vsrc << " has type "
-                               << src_type << " but expected " << check_type;
-    return false;
-  }
-  if (check_type.IsLowHalf()) {
-    const RegType& src_type_h = GetRegisterType(vsrc + 1);
-    if (!src_type.CheckWidePair(src_type_h)) {
-      verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register v" << vsrc << " has type "
-                                                   << src_type << "/" << src_type_h;
-      return false;
-    }
-  }
-  // The register at vsrc has a defined type, we know the lower-upper-bound, but this is less
-  // precise than the subtype in vsrc so leave it for reference types. For primitive types
-  // if they are a defined type then they are as precise as we can get, however, for constant
-  // types we may wish to refine them. Unfortunately constant propagation has rendered this useless.
-  return true;
-}
-
-bool RegisterLine::VerifyRegisterTypeWide(uint32_t vsrc, const RegType& check_type1,
+bool RegisterLine::VerifyRegisterTypeWide(MethodVerifier* verifier, uint32_t vsrc,
+                                          const RegType& check_type1,
                                           const RegType& check_type2) {
   DCHECK(check_type1.CheckWidePair(check_type2));
   // Verify the src register type against the check type refining the type of the register
-  const RegType& src_type = GetRegisterType(vsrc);
+  const RegType& src_type = GetRegisterType(verifier, vsrc);
   if (!check_type1.IsAssignableFrom(src_type)) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register v" << vsrc << " has type " << src_type
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register v" << vsrc << " has type " << src_type
                                << " but expected " << check_type1;
     return false;
   }
-  const RegType& src_type_h = GetRegisterType(vsrc + 1);
+  const RegType& src_type_h = GetRegisterType(verifier, vsrc + 1);
   if (!src_type.CheckWidePair(src_type_h)) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register v" << vsrc << " has type "
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register v" << vsrc << " has type "
         << src_type << "/" << src_type_h;
     return false;
   }
@@ -162,12 +79,12 @@
   return true;
 }
 
-void RegisterLine::MarkRefsAsInitialized(const RegType& uninit_type) {
+void RegisterLine::MarkRefsAsInitialized(MethodVerifier* verifier, const RegType& uninit_type) {
   DCHECK(uninit_type.IsUninitializedTypes());
-  const RegType& init_type = verifier_->GetRegTypeCache()->FromUninitialized(uninit_type);
+  const RegType& init_type = verifier->GetRegTypeCache()->FromUninitialized(uninit_type);
   size_t changed = 0;
   for (uint32_t i = 0; i < num_regs_; i++) {
-    if (GetRegisterType(i).Equals(uninit_type)) {
+    if (GetRegisterType(verifier, i).Equals(uninit_type)) {
       line_[i] = init_type.GetId();
       changed++;
     }
@@ -175,15 +92,15 @@
   DCHECK_GT(changed, 0u);
 }
 
-void RegisterLine::MarkAllRegistersAsConflicts() {
-  uint16_t conflict_type_id = verifier_->GetRegTypeCache()->Conflict().GetId();
+void RegisterLine::MarkAllRegistersAsConflicts(MethodVerifier* verifier) {
+  uint16_t conflict_type_id = verifier->GetRegTypeCache()->Conflict().GetId();
   for (uint32_t i = 0; i < num_regs_; i++) {
     line_[i] = conflict_type_id;
   }
 }
 
-void RegisterLine::MarkAllRegistersAsConflictsExcept(uint32_t vsrc) {
-  uint16_t conflict_type_id = verifier_->GetRegTypeCache()->Conflict().GetId();
+void RegisterLine::MarkAllRegistersAsConflictsExcept(MethodVerifier* verifier, uint32_t vsrc) {
+  uint16_t conflict_type_id = verifier->GetRegTypeCache()->Conflict().GetId();
   for (uint32_t i = 0; i < num_regs_; i++) {
     if (i != vsrc) {
       line_[i] = conflict_type_id;
@@ -191,8 +108,8 @@
   }
 }
 
-void RegisterLine::MarkAllRegistersAsConflictsExceptWide(uint32_t vsrc) {
-  uint16_t conflict_type_id = verifier_->GetRegTypeCache()->Conflict().GetId();
+void RegisterLine::MarkAllRegistersAsConflictsExceptWide(MethodVerifier* verifier, uint32_t vsrc) {
+  uint16_t conflict_type_id = verifier->GetRegTypeCache()->Conflict().GetId();
   for (uint32_t i = 0; i < num_regs_; i++) {
     if ((i != vsrc) && (i != (vsrc + 1))) {
       line_[i] = conflict_type_id;
@@ -200,11 +117,11 @@
   }
 }
 
-std::string RegisterLine::Dump() const {
+std::string RegisterLine::Dump(MethodVerifier* verifier) const {
   std::string result;
   for (size_t i = 0; i < num_regs_; i++) {
     result += StringPrintf("%zd:[", i);
-    result += GetRegisterType(i).Dump();
+    result += GetRegisterType(verifier, i).Dump();
     result += "],";
   }
   for (const auto& monitor : monitors_) {
@@ -213,52 +130,25 @@
   return result;
 }
 
-void RegisterLine::MarkUninitRefsAsInvalid(const RegType& uninit_type) {
+void RegisterLine::MarkUninitRefsAsInvalid(MethodVerifier* verifier, const RegType& uninit_type) {
   for (size_t i = 0; i < num_regs_; i++) {
-    if (GetRegisterType(i).Equals(uninit_type)) {
-      line_[i] = verifier_->GetRegTypeCache()->Conflict().GetId();
+    if (GetRegisterType(verifier, i).Equals(uninit_type)) {
+      line_[i] = verifier->GetRegTypeCache()->Conflict().GetId();
       ClearAllRegToLockDepths(i);
     }
   }
 }
 
-void RegisterLine::CopyRegister1(uint32_t vdst, uint32_t vsrc, TypeCategory cat) {
-  DCHECK(cat == kTypeCategory1nr || cat == kTypeCategoryRef);
-  const RegType& type = GetRegisterType(vsrc);
-  if (!SetRegisterType(vdst, type)) {
-    return;
-  }
-  if ((cat == kTypeCategory1nr && !type.IsCategory1Types()) ||
-      (cat == kTypeCategoryRef && !type.IsReferenceTypes())) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "copy1 v" << vdst << "<-v" << vsrc << " type=" << type
-                                                 << " cat=" << static_cast<int>(cat);
-  } else if (cat == kTypeCategoryRef) {
-    CopyRegToLockDepth(vdst, vsrc);
-  }
-}
-
-void RegisterLine::CopyRegister2(uint32_t vdst, uint32_t vsrc) {
-  const RegType& type_l = GetRegisterType(vsrc);
-  const RegType& type_h = GetRegisterType(vsrc + 1);
-
-  if (!type_l.CheckWidePair(type_h)) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "copy2 v" << vdst << "<-v" << vsrc
-                                                 << " type=" << type_l << "/" << type_h;
-  } else {
-    SetRegisterTypeWide(vdst, type_l, type_h);
-  }
-}
-
-void RegisterLine::CopyResultRegister1(uint32_t vdst, bool is_reference) {
-  const RegType& type = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
+void RegisterLine::CopyResultRegister1(MethodVerifier* verifier, uint32_t vdst, bool is_reference) {
+  const RegType& type = verifier->GetRegTypeCache()->GetFromId(result_[0]);
   if ((!is_reference && !type.IsCategory1Types()) ||
       (is_reference && !type.IsReferenceTypes())) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
         << "copyRes1 v" << vdst << "<- result0"  << " type=" << type;
   } else {
-    DCHECK(verifier_->GetRegTypeCache()->GetFromId(result_[1]).IsUndefined());
-    SetRegisterType(vdst, type);
-    result_[0] = verifier_->GetRegTypeCache()->Undefined().GetId();
+    DCHECK(verifier->GetRegTypeCache()->GetFromId(result_[1]).IsUndefined());
+    SetRegisterType(verifier, vdst, type);
+    result_[0] = verifier->GetRegTypeCache()->Undefined().GetId();
   }
 }
 
@@ -266,178 +156,179 @@
  * Implement "move-result-wide". Copy the category-2 value from the result
  * register to another register, and reset the result register.
  */
-void RegisterLine::CopyResultRegister2(uint32_t vdst) {
-  const RegType& type_l = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
-  const RegType& type_h = verifier_->GetRegTypeCache()->GetFromId(result_[1]);
+void RegisterLine::CopyResultRegister2(MethodVerifier* verifier, uint32_t vdst) {
+  const RegType& type_l = verifier->GetRegTypeCache()->GetFromId(result_[0]);
+  const RegType& type_h = verifier->GetRegTypeCache()->GetFromId(result_[1]);
   if (!type_l.IsCategory2Types()) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
         << "copyRes2 v" << vdst << "<- result0"  << " type=" << type_l;
   } else {
     DCHECK(type_l.CheckWidePair(type_h));  // Set should never allow this case
-    SetRegisterTypeWide(vdst, type_l, type_h);  // also sets the high
-    result_[0] = verifier_->GetRegTypeCache()->Undefined().GetId();
-    result_[1] = verifier_->GetRegTypeCache()->Undefined().GetId();
+    SetRegisterTypeWide(verifier, vdst, type_l, type_h);  // also sets the high
+    result_[0] = verifier->GetRegTypeCache()->Undefined().GetId();
+    result_[1] = verifier->GetRegTypeCache()->Undefined().GetId();
   }
 }
 
-void RegisterLine::CheckUnaryOp(const Instruction* inst,
-                                const RegType& dst_type,
-                                const RegType& src_type) {
-  if (VerifyRegisterType(inst->VRegB_12x(), src_type)) {
-    SetRegisterType(inst->VRegA_12x(), dst_type);
+void RegisterLine::CheckUnaryOp(MethodVerifier* verifier, const Instruction* inst,
+                                const RegType& dst_type, const RegType& src_type) {
+  if (VerifyRegisterType(verifier, inst->VRegB_12x(), src_type)) {
+    SetRegisterType(verifier, inst->VRegA_12x(), dst_type);
   }
 }
 
-void RegisterLine::CheckUnaryOpWide(const Instruction* inst,
+void RegisterLine::CheckUnaryOpWide(MethodVerifier* verifier, const Instruction* inst,
                                     const RegType& dst_type1, const RegType& dst_type2,
                                     const RegType& src_type1, const RegType& src_type2) {
-  if (VerifyRegisterTypeWide(inst->VRegB_12x(), src_type1, src_type2)) {
-    SetRegisterTypeWide(inst->VRegA_12x(), dst_type1, dst_type2);
+  if (VerifyRegisterTypeWide(verifier, inst->VRegB_12x(), src_type1, src_type2)) {
+    SetRegisterTypeWide(verifier, inst->VRegA_12x(), dst_type1, dst_type2);
   }
 }
 
-void RegisterLine::CheckUnaryOpToWide(const Instruction* inst,
+void RegisterLine::CheckUnaryOpToWide(MethodVerifier* verifier, const Instruction* inst,
                                       const RegType& dst_type1, const RegType& dst_type2,
                                       const RegType& src_type) {
-  if (VerifyRegisterType(inst->VRegB_12x(), src_type)) {
-    SetRegisterTypeWide(inst->VRegA_12x(), dst_type1, dst_type2);
+  if (VerifyRegisterType(verifier, inst->VRegB_12x(), src_type)) {
+    SetRegisterTypeWide(verifier, inst->VRegA_12x(), dst_type1, dst_type2);
   }
 }
 
-void RegisterLine::CheckUnaryOpFromWide(const Instruction* inst,
+void RegisterLine::CheckUnaryOpFromWide(MethodVerifier* verifier, const Instruction* inst,
                                         const RegType& dst_type,
                                         const RegType& src_type1, const RegType& src_type2) {
-  if (VerifyRegisterTypeWide(inst->VRegB_12x(), src_type1, src_type2)) {
-    SetRegisterType(inst->VRegA_12x(), dst_type);
+  if (VerifyRegisterTypeWide(verifier, inst->VRegB_12x(), src_type1, src_type2)) {
+    SetRegisterType(verifier, inst->VRegA_12x(), dst_type);
   }
 }
 
-void RegisterLine::CheckBinaryOp(const Instruction* inst,
+void RegisterLine::CheckBinaryOp(MethodVerifier* verifier, const Instruction* inst,
                                  const RegType& dst_type,
                                  const RegType& src_type1, const RegType& src_type2,
                                  bool check_boolean_op) {
   const uint32_t vregB = inst->VRegB_23x();
   const uint32_t vregC = inst->VRegC_23x();
-  if (VerifyRegisterType(vregB, src_type1) &&
-      VerifyRegisterType(vregC, src_type2)) {
+  if (VerifyRegisterType(verifier, vregB, src_type1) &&
+      VerifyRegisterType(verifier, vregC, src_type2)) {
     if (check_boolean_op) {
       DCHECK(dst_type.IsInteger());
-      if (GetRegisterType(vregB).IsBooleanTypes() &&
-          GetRegisterType(vregC).IsBooleanTypes()) {
-        SetRegisterType(inst->VRegA_23x(), verifier_->GetRegTypeCache()->Boolean());
+      if (GetRegisterType(verifier, vregB).IsBooleanTypes() &&
+          GetRegisterType(verifier, vregC).IsBooleanTypes()) {
+        SetRegisterType(verifier, inst->VRegA_23x(), verifier->GetRegTypeCache()->Boolean());
         return;
       }
     }
-    SetRegisterType(inst->VRegA_23x(), dst_type);
+    SetRegisterType(verifier, inst->VRegA_23x(), dst_type);
   }
 }
 
-void RegisterLine::CheckBinaryOpWide(const Instruction* inst,
+void RegisterLine::CheckBinaryOpWide(MethodVerifier* verifier, const Instruction* inst,
                                      const RegType& dst_type1, const RegType& dst_type2,
                                      const RegType& src_type1_1, const RegType& src_type1_2,
                                      const RegType& src_type2_1, const RegType& src_type2_2) {
-  if (VerifyRegisterTypeWide(inst->VRegB_23x(), src_type1_1, src_type1_2) &&
-      VerifyRegisterTypeWide(inst->VRegC_23x(), src_type2_1, src_type2_2)) {
-    SetRegisterTypeWide(inst->VRegA_23x(), dst_type1, dst_type2);
+  if (VerifyRegisterTypeWide(verifier, inst->VRegB_23x(), src_type1_1, src_type1_2) &&
+      VerifyRegisterTypeWide(verifier, inst->VRegC_23x(), src_type2_1, src_type2_2)) {
+    SetRegisterTypeWide(verifier, inst->VRegA_23x(), dst_type1, dst_type2);
   }
 }
 
-void RegisterLine::CheckBinaryOpWideShift(const Instruction* inst,
+void RegisterLine::CheckBinaryOpWideShift(MethodVerifier* verifier, const Instruction* inst,
                                           const RegType& long_lo_type, const RegType& long_hi_type,
                                           const RegType& int_type) {
-  if (VerifyRegisterTypeWide(inst->VRegB_23x(), long_lo_type, long_hi_type) &&
-      VerifyRegisterType(inst->VRegC_23x(), int_type)) {
-    SetRegisterTypeWide(inst->VRegA_23x(), long_lo_type, long_hi_type);
+  if (VerifyRegisterTypeWide(verifier, inst->VRegB_23x(), long_lo_type, long_hi_type) &&
+      VerifyRegisterType(verifier, inst->VRegC_23x(), int_type)) {
+    SetRegisterTypeWide(verifier, inst->VRegA_23x(), long_lo_type, long_hi_type);
   }
 }
 
-void RegisterLine::CheckBinaryOp2addr(const Instruction* inst,
+void RegisterLine::CheckBinaryOp2addr(MethodVerifier* verifier, const Instruction* inst,
                                       const RegType& dst_type, const RegType& src_type1,
                                       const RegType& src_type2, bool check_boolean_op) {
   const uint32_t vregA = inst->VRegA_12x();
   const uint32_t vregB = inst->VRegB_12x();
-  if (VerifyRegisterType(vregA, src_type1) &&
-      VerifyRegisterType(vregB, src_type2)) {
+  if (VerifyRegisterType(verifier, vregA, src_type1) &&
+      VerifyRegisterType(verifier, vregB, src_type2)) {
     if (check_boolean_op) {
       DCHECK(dst_type.IsInteger());
-      if (GetRegisterType(vregA).IsBooleanTypes() &&
-          GetRegisterType(vregB).IsBooleanTypes()) {
-        SetRegisterType(vregA, verifier_->GetRegTypeCache()->Boolean());
+      if (GetRegisterType(verifier, vregA).IsBooleanTypes() &&
+          GetRegisterType(verifier, vregB).IsBooleanTypes()) {
+        SetRegisterType(verifier, vregA, verifier->GetRegTypeCache()->Boolean());
         return;
       }
     }
-    SetRegisterType(vregA, dst_type);
+    SetRegisterType(verifier, vregA, dst_type);
   }
 }
 
-void RegisterLine::CheckBinaryOp2addrWide(const Instruction* inst,
+void RegisterLine::CheckBinaryOp2addrWide(MethodVerifier* verifier, const Instruction* inst,
                                           const RegType& dst_type1, const RegType& dst_type2,
                                           const RegType& src_type1_1, const RegType& src_type1_2,
                                           const RegType& src_type2_1, const RegType& src_type2_2) {
   const uint32_t vregA = inst->VRegA_12x();
   const uint32_t vregB = inst->VRegB_12x();
-  if (VerifyRegisterTypeWide(vregA, src_type1_1, src_type1_2) &&
-      VerifyRegisterTypeWide(vregB, src_type2_1, src_type2_2)) {
-    SetRegisterTypeWide(vregA, dst_type1, dst_type2);
+  if (VerifyRegisterTypeWide(verifier, vregA, src_type1_1, src_type1_2) &&
+      VerifyRegisterTypeWide(verifier, vregB, src_type2_1, src_type2_2)) {
+    SetRegisterTypeWide(verifier, vregA, dst_type1, dst_type2);
   }
 }
 
-void RegisterLine::CheckBinaryOp2addrWideShift(const Instruction* inst,
+void RegisterLine::CheckBinaryOp2addrWideShift(MethodVerifier* verifier, const Instruction* inst,
                                                const RegType& long_lo_type, const RegType& long_hi_type,
                                                const RegType& int_type) {
   const uint32_t vregA = inst->VRegA_12x();
   const uint32_t vregB = inst->VRegB_12x();
-  if (VerifyRegisterTypeWide(vregA, long_lo_type, long_hi_type) &&
-      VerifyRegisterType(vregB, int_type)) {
-    SetRegisterTypeWide(vregA, long_lo_type, long_hi_type);
+  if (VerifyRegisterTypeWide(verifier, vregA, long_lo_type, long_hi_type) &&
+      VerifyRegisterType(verifier, vregB, int_type)) {
+    SetRegisterTypeWide(verifier, vregA, long_lo_type, long_hi_type);
   }
 }
 
-void RegisterLine::CheckLiteralOp(const Instruction* inst,
+void RegisterLine::CheckLiteralOp(MethodVerifier* verifier, const Instruction* inst,
                                   const RegType& dst_type, const RegType& src_type,
                                   bool check_boolean_op, bool is_lit16) {
   const uint32_t vregA = is_lit16 ? inst->VRegA_22s() : inst->VRegA_22b();
   const uint32_t vregB = is_lit16 ? inst->VRegB_22s() : inst->VRegB_22b();
-  if (VerifyRegisterType(vregB, src_type)) {
+  if (VerifyRegisterType(verifier, vregB, src_type)) {
     if (check_boolean_op) {
       DCHECK(dst_type.IsInteger());
       /* check vB with the call, then check the constant manually */
       const uint32_t val = is_lit16 ? inst->VRegC_22s() : inst->VRegC_22b();
-      if (GetRegisterType(vregB).IsBooleanTypes() && (val == 0 || val == 1)) {
-        SetRegisterType(vregA, verifier_->GetRegTypeCache()->Boolean());
+      if (GetRegisterType(verifier, vregB).IsBooleanTypes() && (val == 0 || val == 1)) {
+        SetRegisterType(verifier, vregA, verifier->GetRegTypeCache()->Boolean());
         return;
       }
     }
-    SetRegisterType(vregA, dst_type);
+    SetRegisterType(verifier, vregA, dst_type);
   }
 }
 
-void RegisterLine::PushMonitor(uint32_t reg_idx, int32_t insn_idx) {
-  const RegType& reg_type = GetRegisterType(reg_idx);
+void RegisterLine::PushMonitor(MethodVerifier* verifier, uint32_t reg_idx, int32_t insn_idx) {
+  const RegType& reg_type = GetRegisterType(verifier, reg_idx);
   if (!reg_type.IsReferenceTypes()) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-enter on non-object (" << reg_type << ")";
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-enter on non-object ("
+        << reg_type << ")";
   } else if (monitors_.size() >= 32) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-enter stack overflow: " << monitors_.size();
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-enter stack overflow: "
+        << monitors_.size();
   } else {
     SetRegToLockDepth(reg_idx, monitors_.size());
     monitors_.push_back(insn_idx);
   }
 }
 
-void RegisterLine::PopMonitor(uint32_t reg_idx) {
-  const RegType& reg_type = GetRegisterType(reg_idx);
+void RegisterLine::PopMonitor(MethodVerifier* verifier, uint32_t reg_idx) {
+  const RegType& reg_type = GetRegisterType(verifier, reg_idx);
   if (!reg_type.IsReferenceTypes()) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-exit on non-object (" << reg_type << ")";
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-exit on non-object (" << reg_type << ")";
   } else if (monitors_.empty()) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-exit stack underflow";
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-exit stack underflow";
   } else {
     monitors_.pop_back();
     if (!IsSetLockDepth(reg_idx, monitors_.size())) {
       // Bug 3215458: Locks and unlocks are on objects, if that object is a literal then before
       // format "036" the constant collector may create unlocks on the same object but referenced
       // via different registers.
-      ((verifier_->DexFileVersion() >= 36) ? verifier_->Fail(VERIFY_ERROR_BAD_CLASS_SOFT)
-                                           : verifier_->LogVerifyInfo())
+      ((verifier->DexFileVersion() >= 36) ? verifier->Fail(VERIFY_ERROR_BAD_CLASS_SOFT)
+                                          : verifier->LogVerifyInfo())
             << "monitor-exit not unlocking the top of the monitor stack";
     } else {
       // Record the register was unlocked
@@ -446,41 +337,34 @@
   }
 }
 
-bool RegisterLine::VerifyMonitorStackEmpty() const {
-  if (MonitorStackDepth() != 0) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected empty monitor stack";
-    return false;
-  } else {
-    return true;
-  }
-}
-
-bool RegisterLine::MergeRegisters(const RegisterLine* incoming_line) {
+bool RegisterLine::MergeRegisters(MethodVerifier* verifier, const RegisterLine* incoming_line) {
   bool changed = false;
   DCHECK(incoming_line != nullptr);
   for (size_t idx = 0; idx < num_regs_; idx++) {
     if (line_[idx] != incoming_line->line_[idx]) {
-      const RegType& incoming_reg_type = incoming_line->GetRegisterType(idx);
-      const RegType& cur_type = GetRegisterType(idx);
-      const RegType& new_type = cur_type.Merge(incoming_reg_type, verifier_->GetRegTypeCache());
+      const RegType& incoming_reg_type = incoming_line->GetRegisterType(verifier, idx);
+      const RegType& cur_type = GetRegisterType(verifier, idx);
+      const RegType& new_type = cur_type.Merge(incoming_reg_type, verifier->GetRegTypeCache());
       changed = changed || !cur_type.Equals(new_type);
       line_[idx] = new_type.GetId();
     }
   }
-  if (monitors_.size() != incoming_line->monitors_.size()) {
-    LOG(WARNING) << "mismatched stack depths (depth=" << MonitorStackDepth()
-                 << ", incoming depth=" << incoming_line->MonitorStackDepth() << ")";
-  } else if (reg_to_lock_depths_ != incoming_line->reg_to_lock_depths_) {
-    for (uint32_t idx = 0; idx < num_regs_; idx++) {
-      size_t depths = reg_to_lock_depths_.count(idx);
-      size_t incoming_depths = incoming_line->reg_to_lock_depths_.count(idx);
-      if (depths != incoming_depths) {
-        if (depths == 0 || incoming_depths == 0) {
-          reg_to_lock_depths_.erase(idx);
-        } else {
-          LOG(WARNING) << "mismatched stack depths for register v" << idx
-                       << ": " << depths  << " != " << incoming_depths;
-          break;
+  if (monitors_.size() > 0 || incoming_line->monitors_.size() > 0) {
+    if (monitors_.size() != incoming_line->monitors_.size()) {
+      LOG(WARNING) << "mismatched stack depths (depth=" << MonitorStackDepth()
+                     << ", incoming depth=" << incoming_line->MonitorStackDepth() << ")";
+    } else if (reg_to_lock_depths_ != incoming_line->reg_to_lock_depths_) {
+      for (uint32_t idx = 0; idx < num_regs_; idx++) {
+        size_t depths = reg_to_lock_depths_.count(idx);
+        size_t incoming_depths = incoming_line->reg_to_lock_depths_.count(idx);
+        if (depths != incoming_depths) {
+          if (depths == 0 || incoming_depths == 0) {
+            reg_to_lock_depths_.erase(idx);
+          } else {
+            LOG(WARNING) << "mismatched stack depths for register v" << idx
+                << ": " << depths  << " != " << incoming_depths;
+            break;
+          }
         }
       }
     }
@@ -488,12 +372,13 @@
   return changed;
 }
 
-void RegisterLine::WriteReferenceBitMap(std::vector<uint8_t>& data, size_t max_bytes) {
+void RegisterLine::WriteReferenceBitMap(MethodVerifier* verifier,
+                                        std::vector<uint8_t>* data, size_t max_bytes) {
   for (size_t i = 0; i < num_regs_; i += 8) {
     uint8_t val = 0;
     for (size_t j = 0; j < 8 && (i + j) < num_regs_; j++) {
       // Note: we write 1 for a Reference but not for Null
-      if (GetRegisterType(i + j).IsNonZeroReferenceTypes()) {
+      if (GetRegisterType(verifier, i + j).IsNonZeroReferenceTypes()) {
         val |= 1 << j;
       }
     }
@@ -502,15 +387,9 @@
       continue;
     }
     DCHECK_LT(i / 8, max_bytes) << "val=" << static_cast<uint32_t>(val);
-    data.push_back(val);
+    data->push_back(val);
   }
 }
 
-std::ostream& operator<<(std::ostream& os, const RegisterLine& rhs)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  os << rhs.Dump();
-  return os;
-}
-
 }  // namespace verifier
 }  // namespace art
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index a9d0dbb..c7fd369 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -57,50 +57,54 @@
   }
 
   // Implement category-1 "move" instructions. Copy a 32-bit value from "vsrc" to "vdst".
-  void CopyRegister1(uint32_t vdst, uint32_t vsrc, TypeCategory cat)
+  void CopyRegister1(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc, TypeCategory cat)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Implement category-2 "move" instructions. Copy a 64-bit value from "vsrc" to "vdst". This
   // copies both halves of the register.
-  void CopyRegister2(uint32_t vdst, uint32_t vsrc)
+  void CopyRegister2(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Implement "move-result". Copy the category-1 value from the result register to another
   // register, and reset the result register.
-  void CopyResultRegister1(uint32_t vdst, bool is_reference)
+  void CopyResultRegister1(MethodVerifier* verifier, uint32_t vdst, bool is_reference)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Implement "move-result-wide". Copy the category-2 value from the result register to another
   // register, and reset the result register.
-  void CopyResultRegister2(uint32_t vdst)
+  void CopyResultRegister2(MethodVerifier* verifier, uint32_t vdst)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Set the invisible result register to unknown
-  void SetResultTypeToUnknown() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetResultTypeToUnknown(MethodVerifier* verifier) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Set the type of register N, verifying that the register is valid.  If "newType" is the "Lo"
   // part of a 64-bit value, register N+1 will be set to "newType+1".
   // The register index was validated during the static pass, so we don't need to check it here.
-  bool SetRegisterType(uint32_t vdst, const RegType& new_type)
+  ALWAYS_INLINE bool SetRegisterType(MethodVerifier* verifier, uint32_t vdst,
+                                     const RegType& new_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool SetRegisterTypeWide(uint32_t vdst, const RegType& new_type1, const RegType& new_type2)
+  bool SetRegisterTypeWide(MethodVerifier* verifier, uint32_t vdst, const RegType& new_type1,
+                           const RegType& new_type2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /* Set the type of the "result" register. */
-  void SetResultRegisterType(const RegType& new_type)
+  void SetResultRegisterType(MethodVerifier* verifier, const RegType& new_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetResultRegisterTypeWide(const RegType& new_type1, const RegType& new_type2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Get the type of register vsrc.
-  const RegType& GetRegisterType(uint32_t vsrc) const;
+  const RegType& GetRegisterType(MethodVerifier* verifier, uint32_t vsrc) const;
 
-  bool VerifyRegisterType(uint32_t vsrc, const RegType& check_type)
+  ALWAYS_INLINE bool VerifyRegisterType(MethodVerifier* verifier, uint32_t vsrc,
+                                        const RegType& check_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool VerifyRegisterTypeWide(uint32_t vsrc, const RegType& check_type1, const RegType& check_type2)
+  bool VerifyRegisterTypeWide(MethodVerifier* verifier, uint32_t vsrc, const RegType& check_type1,
+                              const RegType& check_type2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void CopyFromLine(const RegisterLine* src) {
@@ -110,7 +114,7 @@
     reg_to_lock_depths_ = src->reg_to_lock_depths_;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump(MethodVerifier* verifier) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void FillWithGarbage() {
     memset(&line_, 0xf1, num_regs_ * sizeof(uint16_t));
@@ -126,7 +130,7 @@
    * to prevent them from being used (otherwise, MarkRefsAsInitialized would mark the old ones and
    * the new ones at the same time).
    */
-  void MarkUninitRefsAsInvalid(const RegType& uninit_type)
+  void MarkUninitRefsAsInvalid(MethodVerifier* verifier, const RegType& uninit_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
@@ -134,15 +138,15 @@
    * reference type. This is called when an appropriate constructor is invoked -- all copies of
    * the reference must be marked as initialized.
    */
-  void MarkRefsAsInitialized(const RegType& uninit_type)
+  void MarkRefsAsInitialized(MethodVerifier* verifier, const RegType& uninit_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
    * Update all registers to be Conflict except vsrc.
    */
-  void MarkAllRegistersAsConflicts();
-  void MarkAllRegistersAsConflictsExcept(uint32_t vsrc);
-  void MarkAllRegistersAsConflictsExceptWide(uint32_t vsrc);
+  void MarkAllRegistersAsConflicts(MethodVerifier* verifier);
+  void MarkAllRegistersAsConflictsExcept(MethodVerifier* verifier, uint32_t vsrc);
+  void MarkAllRegistersAsConflictsExceptWide(MethodVerifier* verifier, uint32_t vsrc);
 
   /*
    * Check constraints on constructor return. Specifically, make sure that the "this" argument got
@@ -151,7 +155,7 @@
    * of the list in slot 0. If we see a register with an uninitialized slot 0 reference, we know it
    * somehow didn't get initialized.
    */
-  bool CheckConstructorReturn() const;
+  bool CheckConstructorReturn(MethodVerifier* verifier) const;
 
   // Compare two register lines. Returns 0 if they match.
   // Using this for a sort is unwise, since the value can change based on machine endianness.
@@ -173,28 +177,29 @@
    * The argument count is in vA, and the first argument is in vC, for both "simple" and "range"
    * versions. We just need to make sure vA is >= 1 and then return vC.
    */
-  const RegType& GetInvocationThis(const Instruction* inst, bool is_range)
+  const RegType& GetInvocationThis(MethodVerifier* verifier, const Instruction* inst,
+                                   bool is_range)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
    * Verify types for a simple two-register instruction (e.g. "neg-int").
    * "dst_type" is stored into vA, and "src_type" is verified against vB.
    */
-  void CheckUnaryOp(const Instruction* inst, const RegType& dst_type,
+  void CheckUnaryOp(MethodVerifier* verifier, const Instruction* inst, const RegType& dst_type,
                     const RegType& src_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void CheckUnaryOpWide(const Instruction* inst,
+  void CheckUnaryOpWide(MethodVerifier* verifier, const Instruction* inst,
                         const RegType& dst_type1, const RegType& dst_type2,
                         const RegType& src_type1, const RegType& src_type2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void CheckUnaryOpToWide(const Instruction* inst,
+  void CheckUnaryOpToWide(MethodVerifier* verifier, const Instruction* inst,
                           const RegType& dst_type1, const RegType& dst_type2,
                           const RegType& src_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void CheckUnaryOpFromWide(const Instruction* inst,
+  void CheckUnaryOpFromWide(MethodVerifier* verifier, const Instruction* inst,
                             const RegType& dst_type,
                             const RegType& src_type1, const RegType& src_type2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -204,18 +209,18 @@
    * "dst_type" is stored into vA, and "src_type1"/"src_type2" are verified
    * against vB/vC.
    */
-  void CheckBinaryOp(const Instruction* inst,
+  void CheckBinaryOp(MethodVerifier* verifier, const Instruction* inst,
                      const RegType& dst_type, const RegType& src_type1, const RegType& src_type2,
                      bool check_boolean_op)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void CheckBinaryOpWide(const Instruction* inst,
+  void CheckBinaryOpWide(MethodVerifier* verifier, const Instruction* inst,
                          const RegType& dst_type1, const RegType& dst_type2,
                          const RegType& src_type1_1, const RegType& src_type1_2,
                          const RegType& src_type2_1, const RegType& src_type2_2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void CheckBinaryOpWideShift(const Instruction* inst,
+  void CheckBinaryOpWideShift(MethodVerifier* verifier, const Instruction* inst,
                               const RegType& long_lo_type, const RegType& long_hi_type,
                               const RegType& int_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -224,19 +229,19 @@
    * Verify types for a binary "2addr" operation. "src_type1"/"src_type2"
    * are verified against vA/vB, then "dst_type" is stored into vA.
    */
-  void CheckBinaryOp2addr(const Instruction* inst,
+  void CheckBinaryOp2addr(MethodVerifier* verifier, const Instruction* inst,
                           const RegType& dst_type,
                           const RegType& src_type1, const RegType& src_type2,
                           bool check_boolean_op)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void CheckBinaryOp2addrWide(const Instruction* inst,
+  void CheckBinaryOp2addrWide(MethodVerifier* verifier, const Instruction* inst,
                               const RegType& dst_type1, const RegType& dst_type2,
                               const RegType& src_type1_1, const RegType& src_type1_2,
                               const RegType& src_type2_1, const RegType& src_type2_2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void CheckBinaryOp2addrWideShift(const Instruction* inst,
+  void CheckBinaryOp2addrWideShift(MethodVerifier* verifier, const Instruction* inst,
                                    const RegType& long_lo_type, const RegType& long_hi_type,
                                    const RegType& int_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -247,16 +252,18 @@
    *
    * If "check_boolean_op" is set, we use the constant value in vC.
    */
-  void CheckLiteralOp(const Instruction* inst,
+  void CheckLiteralOp(MethodVerifier* verifier, const Instruction* inst,
                       const RegType& dst_type, const RegType& src_type,
                       bool check_boolean_op, bool is_lit16)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Verify/push monitor onto the monitor stack, locking the value in reg_idx at location insn_idx.
-  void PushMonitor(uint32_t reg_idx, int32_t insn_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void PushMonitor(MethodVerifier* verifier, uint32_t reg_idx, int32_t insn_idx)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Verify/pop monitor from monitor stack ensuring that we believe the monitor is locked
-  void PopMonitor(uint32_t reg_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void PopMonitor(MethodVerifier* verifier, uint32_t reg_idx)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Stack of currently held monitors and where they were locked
   size_t MonitorStackDepth() const {
@@ -265,23 +272,23 @@
 
   // We expect no monitors to be held at certain points, such a method returns. Verify the stack
   // is empty, failing and returning false if not.
-  bool VerifyMonitorStackEmpty() const;
+  bool VerifyMonitorStackEmpty(MethodVerifier* verifier) const;
 
-  bool MergeRegisters(const RegisterLine* incoming_line)
+  bool MergeRegisters(MethodVerifier* verifier, const RegisterLine* incoming_line)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  size_t GetMaxNonZeroReferenceReg(size_t max_ref_reg) {
+  size_t GetMaxNonZeroReferenceReg(MethodVerifier* verifier, size_t max_ref_reg) {
     size_t i = static_cast<int>(max_ref_reg) < 0 ? 0 : max_ref_reg;
     for (; i < num_regs_; i++) {
-      if (GetRegisterType(i).IsNonZeroReferenceTypes()) {
+      if (GetRegisterType(verifier, i).IsNonZeroReferenceTypes()) {
         max_ref_reg = i;
       }
     }
     return max_ref_reg;
   }
 
-  // Write a bit at each register location that holds a reference
-  void WriteReferenceBitMap(std::vector<uint8_t>& data, size_t max_bytes);
+  // Write a bit at each register location that holds a reference.
+  void WriteReferenceBitMap(MethodVerifier* verifier, std::vector<uint8_t>* data, size_t max_bytes);
 
   size_t GetMonitorEnterCount() {
     return monitors_.size();
@@ -337,19 +344,17 @@
   }
 
   RegisterLine(size_t num_regs, MethodVerifier* verifier)
-      : verifier_(verifier), num_regs_(num_regs) {
+      : num_regs_(num_regs) {
     memset(&line_, 0, num_regs_ * sizeof(uint16_t));
-    SetResultTypeToUnknown();
+    SetResultTypeToUnknown(verifier);
   }
 
   // Storage for the result register's type, valid after an invocation
   uint16_t result_[2];
 
-  // Back link to the verifier
-  MethodVerifier* verifier_;
-
   // Length of reg_types_
   const uint32_t num_regs_;
+
   // A stack of monitor enter locations
   std::vector<uint32_t, TrackingAllocator<uint32_t, kAllocatorTagVerifier>> monitors_;
   // A map from register to a bit vector of indices into the monitors_ stack. As we pop the monitor
@@ -360,7 +365,6 @@
   // An array of RegType Ids associated with each dex register.
   uint16_t line_[0];
 };
-std::ostream& operator<<(std::ostream& os, const RegisterLine& rhs);
 
 }  // namespace verifier
 }  // namespace art