Merge "Allow devirtualized method to be intrinsified."
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 92dd19e..987298b 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -151,8 +151,14 @@
       Dbg::IsDebuggerActive() ||
       // An async exception has been thrown. We need to go to the switch interpreter. MTerp doesn't
       // know how to deal with these so we could end up never dealing with it if we are in an
-      // infinite loop.
-      UNLIKELY(Thread::Current()->IsAsyncExceptionPending());
+      // infinite loop. Since this can be called in a tight loop and getting the current thread
+      // requires a TLS read we instead first check a short-circuit runtime flag that will only be
+      // set if something tries to set an async exception. This will make this function faster in
+      // the common case where no async exception has ever been sent. We don't need to worry about
+      // synchronization on the runtime flag since it is only set in a checkpoint which will either
+      // take place on the current thread or act as a synchronization point.
+      (UNLIKELY(runtime->AreAsyncExceptionsThrown()) &&
+       Thread::Current()->IsAsyncExceptionPending());
 }
 
 
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index bb8e5e5..bb0048d 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -1582,7 +1582,11 @@
       for (uint32_t method_idx = 0; method_idx < dex_data->num_method_ids; ++method_idx) {
         MethodHotness hotness_info(dex_data->GetHotnessInfo(method_idx));
         if (startup ? hotness_info.IsStartup() : hotness_info.IsPostStartup()) {
-          os << method_idx << ", ";
+          if (dex_file != nullptr) {
+            os << "\n\t\t" << dex_file->PrettyMethod(method_idx, true);
+          } else {
+            os << method_idx << ", ";
+          }
         }
       }
       if (startup == false) {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index f09b6c9..d15de38 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -256,6 +256,7 @@
       force_native_bridge_(false),
       is_native_bridge_loaded_(false),
       is_native_debuggable_(false),
+      async_exceptions_thrown_(false),
       is_java_debuggable_(false),
       zygote_max_failed_boots_(0),
       experimental_flags_(ExperimentalFlags::kNone),
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 6b01cc2..476b71f 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -586,6 +586,14 @@
     is_native_debuggable_ = value;
   }
 
+  bool AreAsyncExceptionsThrown() const {
+    return async_exceptions_thrown_;
+  }
+
+  void SetAsyncExceptionsThrown() {
+    async_exceptions_thrown_ = true;
+  }
+
   // Returns the build fingerprint, if set. Otherwise an empty string is returned.
   std::string GetFingerprint() {
     return fingerprint_;
@@ -899,6 +907,10 @@
   // Whether we are running under native debugger.
   bool is_native_debuggable_;
 
+  // whether or not any async exceptions have ever been thrown. This is used to speed up the
+  // MterpShouldSwitchInterpreters function.
+  bool async_exceptions_thrown_;
+
   // Whether Java code needs to be debuggable.
   bool is_java_debuggable_;
 
diff --git a/runtime/thread.cc b/runtime/thread.cc
index bec1c90..cb350ed 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -3700,6 +3700,7 @@
 
 void Thread::SetAsyncException(ObjPtr<mirror::Throwable> new_exception) {
   CHECK(new_exception != nullptr);
+  Runtime::Current()->SetAsyncExceptionsThrown();
   if (kIsDebugBuild) {
     // Make sure we are in a checkpoint.
     MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 0029eb9..4808381 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -16,6 +16,8 @@
 
 #include "reg_type_cache-inl.h"
 
+#include <type_traits>
+
 #include "base/arena_bit_vector.h"
 #include "base/bit_vector-inl.h"
 #include "base/casts.h"
@@ -51,6 +53,7 @@
 }
 
 void RegTypeCache::FillPrimitiveAndSmallConstantTypes() {
+  // Note: this must have the same order as CreatePrimitiveAndSmallConstantTypes.
   entries_.push_back(UndefinedType::GetInstance());
   entries_.push_back(ConflictType::GetInstance());
   entries_.push_back(BooleanType::GetInstance());
@@ -314,33 +317,54 @@
   }
 }
 
-template <class Type>
-const Type* RegTypeCache::CreatePrimitiveTypeInstance(const std::string& descriptor) {
-  mirror::Class* klass = nullptr;
-  // Try loading the class from linker.
-  if (!descriptor.empty()) {
-    klass = art::Runtime::Current()->GetClassLinker()->FindSystemClass(Thread::Current(),
-                                                                       descriptor.c_str());
-    DCHECK(klass != nullptr);
-  }
-  const Type* entry = Type::CreateInstance(klass, descriptor, RegTypeCache::primitive_count_);
-  RegTypeCache::primitive_count_++;
-  return entry;
-}
+// Helper for create_primitive_type_instance lambda.
+namespace {
+template <typename T>
+struct TypeHelper {
+  using type = T;
+  static_assert(std::is_convertible<T*, RegType*>::value, "T must be a RegType");
+
+  const char* descriptor;
+
+  explicit TypeHelper(const char* d) : descriptor(d) {}
+};
+}  // namespace
 
 void RegTypeCache::CreatePrimitiveAndSmallConstantTypes() {
-  CreatePrimitiveTypeInstance<UndefinedType>("");
-  CreatePrimitiveTypeInstance<ConflictType>("");
-  CreatePrimitiveTypeInstance<BooleanType>("Z");
-  CreatePrimitiveTypeInstance<ByteType>("B");
-  CreatePrimitiveTypeInstance<ShortType>("S");
-  CreatePrimitiveTypeInstance<CharType>("C");
-  CreatePrimitiveTypeInstance<IntegerType>("I");
-  CreatePrimitiveTypeInstance<LongLoType>("J");
-  CreatePrimitiveTypeInstance<LongHiType>("J");
-  CreatePrimitiveTypeInstance<FloatType>("F");
-  CreatePrimitiveTypeInstance<DoubleLoType>("D");
-  CreatePrimitiveTypeInstance<DoubleHiType>("D");
+  // Note: this must have the same order as FillPrimitiveAndSmallConstantTypes.
+
+  // It is acceptable to pass on the const char* in type to CreateInstance, as all calls below are
+  // with compile-time constants that will have global lifetime. Use of the lambda ensures this
+  // code cannot leak to other users.
+  auto create_primitive_type_instance = [&](auto type) REQUIRES_SHARED(Locks::mutator_lock_) {
+    using Type = typename decltype(type)::type;
+    mirror::Class* klass = nullptr;
+    // Try loading the class from linker.
+    DCHECK(type.descriptor != nullptr);
+    if (strlen(type.descriptor) > 0) {
+      klass = art::Runtime::Current()->GetClassLinker()->FindSystemClass(Thread::Current(),
+                                                                         type.descriptor);
+      DCHECK(klass != nullptr);
+    }
+    const Type* entry = Type::CreateInstance(klass,
+                                             type.descriptor,
+                                             RegTypeCache::primitive_count_);
+    RegTypeCache::primitive_count_++;
+    return entry;
+  };
+  create_primitive_type_instance(TypeHelper<UndefinedType>(""));
+  create_primitive_type_instance(TypeHelper<ConflictType>(""));
+  create_primitive_type_instance(TypeHelper<BooleanType>("Z"));
+  create_primitive_type_instance(TypeHelper<ByteType>("B"));
+  create_primitive_type_instance(TypeHelper<ShortType>("S"));
+  create_primitive_type_instance(TypeHelper<CharType>("C"));
+  create_primitive_type_instance(TypeHelper<IntegerType>("I"));
+  create_primitive_type_instance(TypeHelper<LongLoType>("J"));
+  create_primitive_type_instance(TypeHelper<LongHiType>("J"));
+  create_primitive_type_instance(TypeHelper<FloatType>("F"));
+  create_primitive_type_instance(TypeHelper<DoubleLoType>("D"));
+  create_primitive_type_instance(TypeHelper<DoubleHiType>("D"));
+
   for (int32_t value = kMinSmallConstant; value <= kMaxSmallConstant; ++value) {
     PreciseConstType* type = new PreciseConstType(value, primitive_count_);
     small_precise_constants_[value - kMinSmallConstant] = type;
@@ -396,6 +420,9 @@
   if (resolved_parts_merged.IsConflict()) {
     return Conflict();
   }
+  if (resolved_parts_merged.IsJavaLangObject()) {
+    return resolved_parts_merged;
+  }
 
   bool resolved_merged_is_array = resolved_parts_merged.IsArrayTypes();
   if (left_unresolved_is_array || right_unresolved_is_array || resolved_merged_is_array) {
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index d090756..054a2af 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -171,9 +171,6 @@
   // verifier.
   StringPiece AddString(const StringPiece& string_piece);
 
-  template <class Type>
-  static const Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
-      REQUIRES_SHARED(Locks::mutator_lock_);
   static void CreatePrimitiveAndSmallConstantTypes() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // A quick look up for popular small constants.
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 1bc48ed..6edeece 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -664,6 +664,362 @@
   }
 }
 
+TEST_F(RegTypeTest, MergeSemiLatticeRef) {
+  //  (Incomplete) semilattice:
+  //
+  //  Excluded for now: * category-2 types
+  //                    * interfaces
+  //                    * all of category-1 primitive types, including constants.
+  //  This is to demonstrate/codify the reference side, mostly.
+  //
+  //  Note: It is not a real semilattice because int = float makes this wonky. :-(
+  //
+  //                                       Conflict
+  //                                           |
+  //      #---------#--------------------------#-----------------------------#
+  //      |         |                                                        |
+  //      |         |                                                      Object
+  //      |         |                                                        |
+  //     int   uninit types              #---------------#--------#------------------#---------#
+  //      |                              |               |        |                  |         |
+  //      |                  unresolved-merge-types      |      Object[]           char[]   byte[]
+  //      |                              |    |  |       |        |                  |         |
+  //      |                  unresolved-types |  #------Number    #---------#        |         |
+  //      |                              |    |          |        |         |        |         |
+  //      |                              |    #--------Integer  Number[] Number[][]  |         |
+  //      |                              |               |        |         |        |         |
+  //      |                              #---------------#--------#---------#--------#---------#
+  //      |                                                       |
+  //      #--------------------------#----------------------------#
+  //                                 |
+  //                                 0
+
+  ArenaStack stack(Runtime::Current()->GetArenaPool());
+  ScopedArenaAllocator allocator(&stack);
+  ScopedObjectAccess soa(Thread::Current());
+
+  // We cannot allow moving GC. Otherwise we'd have to ensure the reg types are updated (reference
+  // reg types store a class pointer in a GCRoot, which is normally updated through active verifiers
+  // being registered with their thread), which is unnecessarily complex.
+  Runtime::Current()->GetHeap()->IncrementDisableMovingGC(soa.Self());
+
+  RegTypeCache cache(true, allocator);
+
+  const RegType& conflict = cache.Conflict();
+  const RegType& zero = cache.Zero();
+  const RegType& int_type = cache.Integer();
+
+  const RegType& obj = cache.JavaLangObject(false);
+  const RegType& obj_arr = cache.From(nullptr, "[Ljava/lang/Object;", false);
+  ASSERT_FALSE(obj_arr.IsUnresolvedReference());
+
+  const RegType& unresolved_a = cache.From(nullptr, "Ldoes/not/resolve/A;", false);
+  ASSERT_TRUE(unresolved_a.IsUnresolvedReference());
+  const RegType& unresolved_b = cache.From(nullptr, "Ldoes/not/resolve/B;", false);
+  ASSERT_TRUE(unresolved_b.IsUnresolvedReference());
+  const RegType& unresolved_ab = cache.FromUnresolvedMerge(unresolved_a, unresolved_b, nullptr);
+  ASSERT_TRUE(unresolved_ab.IsUnresolvedMergedReference());
+
+  const RegType& uninit_this = cache.UninitializedThisArgument(obj);
+  const RegType& uninit_obj_0 = cache.Uninitialized(obj, 0u);
+  const RegType& uninit_obj_1 = cache.Uninitialized(obj, 1u);
+
+  const RegType& uninit_unres_this = cache.UninitializedThisArgument(unresolved_a);
+  const RegType& uninit_unres_a_0 = cache.Uninitialized(unresolved_a, 0);
+  const RegType& uninit_unres_b_0 = cache.Uninitialized(unresolved_b, 0);
+
+  const RegType& number = cache.From(nullptr, "Ljava/lang/Number;", false);
+  ASSERT_FALSE(number.IsUnresolvedReference());
+  const RegType& integer = cache.From(nullptr, "Ljava/lang/Integer;", false);
+  ASSERT_FALSE(integer.IsUnresolvedReference());
+
+  const RegType& uninit_number_0 = cache.Uninitialized(number, 0u);
+  const RegType& uninit_integer_0 = cache.Uninitialized(integer, 0u);
+
+  const RegType& number_arr = cache.From(nullptr, "[Ljava/lang/Number;", false);
+  ASSERT_FALSE(number_arr.IsUnresolvedReference());
+  const RegType& integer_arr = cache.From(nullptr, "[Ljava/lang/Integer;", false);
+  ASSERT_FALSE(integer_arr.IsUnresolvedReference());
+
+  const RegType& number_arr_arr = cache.From(nullptr, "[[Ljava/lang/Number;", false);
+  ASSERT_FALSE(number_arr_arr.IsUnresolvedReference());
+
+  const RegType& char_arr = cache.From(nullptr, "[C", false);
+  ASSERT_FALSE(char_arr.IsUnresolvedReference());
+  const RegType& byte_arr = cache.From(nullptr, "[B", false);
+  ASSERT_FALSE(byte_arr.IsUnresolvedReference());
+
+  const RegType& unresolved_a_num = cache.FromUnresolvedMerge(unresolved_a, number, nullptr);
+  ASSERT_TRUE(unresolved_a_num.IsUnresolvedMergedReference());
+  const RegType& unresolved_b_num = cache.FromUnresolvedMerge(unresolved_b, number, nullptr);
+  ASSERT_TRUE(unresolved_b_num.IsUnresolvedMergedReference());
+  const RegType& unresolved_ab_num = cache.FromUnresolvedMerge(unresolved_ab, number, nullptr);
+  ASSERT_TRUE(unresolved_ab_num.IsUnresolvedMergedReference());
+
+  const RegType& unresolved_a_int = cache.FromUnresolvedMerge(unresolved_a, integer, nullptr);
+  ASSERT_TRUE(unresolved_a_int.IsUnresolvedMergedReference());
+  const RegType& unresolved_b_int = cache.FromUnresolvedMerge(unresolved_b, integer, nullptr);
+  ASSERT_TRUE(unresolved_b_int.IsUnresolvedMergedReference());
+  const RegType& unresolved_ab_int = cache.FromUnresolvedMerge(unresolved_ab, integer, nullptr);
+  ASSERT_TRUE(unresolved_ab_int.IsUnresolvedMergedReference());
+  std::vector<const RegType*> uninitialized_types = {
+      &uninit_this, &uninit_obj_0, &uninit_obj_1, &uninit_number_0, &uninit_integer_0
+  };
+  std::vector<const RegType*> unresolved_types = {
+      &unresolved_a,
+      &unresolved_b,
+      &unresolved_ab,
+      &unresolved_a_num,
+      &unresolved_b_num,
+      &unresolved_ab_num,
+      &unresolved_a_int,
+      &unresolved_b_int,
+      &unresolved_ab_int
+  };
+  std::vector<const RegType*> uninit_unresolved_types = {
+      &uninit_unres_this, &uninit_unres_a_0, &uninit_unres_b_0
+  };
+  std::vector<const RegType*> plain_nonobj_classes = { &number, &integer };
+  std::vector<const RegType*> plain_nonobj_arr_classes = {
+      &number_arr,
+      &number_arr_arr,
+      &integer_arr,
+      &char_arr,
+  };
+  // std::vector<const RegType*> others = { &conflict, &zero, &null, &obj, &int_type };
+
+  std::vector<const RegType*> all_minus_uninit_conflict;
+  all_minus_uninit_conflict.insert(all_minus_uninit_conflict.end(),
+                                   unresolved_types.begin(),
+                                   unresolved_types.end());
+  all_minus_uninit_conflict.insert(all_minus_uninit_conflict.end(),
+                                   plain_nonobj_classes.begin(),
+                                   plain_nonobj_classes.end());
+  all_minus_uninit_conflict.insert(all_minus_uninit_conflict.end(),
+                                   plain_nonobj_arr_classes.begin(),
+                                   plain_nonobj_arr_classes.end());
+  all_minus_uninit_conflict.push_back(&zero);
+  all_minus_uninit_conflict.push_back(&obj);
+
+  std::vector<const RegType*> all_minus_uninit;
+  all_minus_uninit.insert(all_minus_uninit.end(),
+                          all_minus_uninit_conflict.begin(),
+                          all_minus_uninit_conflict.end());
+  all_minus_uninit.push_back(&conflict);
+
+
+  std::vector<const RegType*> all;
+  all.insert(all.end(), uninitialized_types.begin(), uninitialized_types.end());
+  all.insert(all.end(), uninit_unresolved_types.begin(), uninit_unresolved_types.end());
+  all.insert(all.end(), all_minus_uninit.begin(), all_minus_uninit.end());
+  all.push_back(&int_type);
+
+  auto check = [&](const RegType& in1, const RegType& in2, const RegType& expected_out)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    const RegType& merge_result = in1.SafeMerge(in2, &cache, nullptr);
+    EXPECT_EQ(&expected_out, &merge_result)
+        << in1.Dump() << " x " << in2.Dump() << " = " << merge_result.Dump()
+        << " != " << expected_out.Dump();
+  };
+
+  // Identity.
+  {
+    for (auto r : all) {
+      check(*r, *r, *r);
+    }
+  }
+
+  // Define a covering relation through a list of Edges. We'll then derive LUBs from this and
+  // create checks for every pair of types.
+
+  struct Edge {
+    const RegType& from;
+    const RegType& to;
+
+    Edge(const RegType& from_, const RegType& to_) : from(from_), to(to_) {}
+  };
+  std::vector<Edge> edges;
+#define ADD_EDGE(from, to) edges.emplace_back((from), (to))
+
+  // To Conflict.
+  {
+    for (auto r : uninitialized_types) {
+      ADD_EDGE(*r, conflict);
+    }
+    for (auto r : uninit_unresolved_types) {
+      ADD_EDGE(*r, conflict);
+    }
+    ADD_EDGE(obj, conflict);
+    ADD_EDGE(int_type, conflict);
+  }
+
+  // Unresolved.
+  {
+    ADD_EDGE(zero, unresolved_a);
+    ADD_EDGE(zero, unresolved_b);
+    ADD_EDGE(unresolved_a, unresolved_ab);
+    ADD_EDGE(unresolved_b, unresolved_ab);
+
+    ADD_EDGE(number, unresolved_a_num);
+    ADD_EDGE(unresolved_a, unresolved_a_num);
+    ADD_EDGE(number, unresolved_b_num);
+    ADD_EDGE(unresolved_b, unresolved_b_num);
+    ADD_EDGE(number, unresolved_ab_num);
+    ADD_EDGE(unresolved_a_num, unresolved_ab_num);
+    ADD_EDGE(unresolved_b_num, unresolved_ab_num);
+    ADD_EDGE(unresolved_ab, unresolved_ab_num);
+
+    ADD_EDGE(integer, unresolved_a_int);
+    ADD_EDGE(unresolved_a, unresolved_a_int);
+    ADD_EDGE(integer, unresolved_b_int);
+    ADD_EDGE(unresolved_b, unresolved_b_int);
+    ADD_EDGE(integer, unresolved_ab_int);
+    ADD_EDGE(unresolved_a_int, unresolved_ab_int);
+    ADD_EDGE(unresolved_b_int, unresolved_ab_int);
+    ADD_EDGE(unresolved_ab, unresolved_ab_int);
+
+    ADD_EDGE(unresolved_a_int, unresolved_a_num);
+    ADD_EDGE(unresolved_b_int, unresolved_b_num);
+    ADD_EDGE(unresolved_ab_int, unresolved_ab_num);
+
+    ADD_EDGE(unresolved_ab_num, obj);
+  }
+
+  // Classes.
+  {
+    ADD_EDGE(zero, integer);
+    ADD_EDGE(integer, number);
+    ADD_EDGE(number, obj);
+  }
+
+  // Arrays.
+  {
+    ADD_EDGE(integer_arr, number_arr);
+    ADD_EDGE(number_arr, obj_arr);
+    ADD_EDGE(obj_arr, obj);
+    ADD_EDGE(number_arr_arr, obj_arr);
+
+    ADD_EDGE(char_arr, obj);
+    ADD_EDGE(byte_arr, obj);
+
+    ADD_EDGE(zero, integer_arr);
+    ADD_EDGE(zero, number_arr_arr);
+    ADD_EDGE(zero, char_arr);
+    ADD_EDGE(zero, byte_arr);
+  }
+
+  // Primitive.
+  {
+    ADD_EDGE(zero, int_type);
+  }
+#undef ADD_EDGE
+
+  // Create merge triples by using the covering relation established by edges to derive the
+  // expected merge for any pair of types.
+
+  // Expect merge(in1, in2) == out.
+  struct MergeExpectation {
+    const RegType& in1;
+    const RegType& in2;
+    const RegType& out;
+
+    MergeExpectation(const RegType& in1_, const RegType& in2_, const RegType& out_)
+        : in1(in1_), in2(in2_), out(out_) {}
+  };
+  std::vector<MergeExpectation> expectations;
+
+  for (auto r1 : all) {
+    for (auto r2 : all) {
+      if (r1 == r2) {
+        continue;
+      }
+
+      // Very simple algorithm here that is usually used with adjacency lists. Our graph is
+      // small, it didn't make sense to have lists per node. Thus, the regular guarantees
+      // of O(n + |e|) don't apply, but that is acceptable.
+      //
+      // To compute r1 lub r2 = merge(r1, r2):
+      //   1) Generate the reachable set of r1, name it grey.
+      //   2) Mark all grey reachable nodes of r2 as black.
+      //   3) Find black nodes with no in-edges from other black nodes.
+      //   4) If |3)| == 1, that's the lub.
+
+      // Generic BFS of the graph induced by edges, starting at start. new_node will be called
+      // with any discovered node, in order.
+      auto bfs = [&](auto new_node, const RegType* start) {
+        std::unordered_set<const RegType*> seen;
+        std::queue<const RegType*> work_list;
+        work_list.push(start);
+        while (!work_list.empty()) {
+          const RegType* cur = work_list.front();
+          work_list.pop();
+          auto it = seen.find(cur);
+          if (it != seen.end()) {
+            continue;
+          }
+          seen.insert(cur);
+          new_node(cur);
+
+          for (const Edge& edge : edges) {
+            if (&edge.from == cur) {
+              work_list.push(&edge.to);
+            }
+          }
+        }
+      };
+
+      std::unordered_set<const RegType*> grey;
+      auto compute_grey = [&](const RegType* cur) {
+        grey.insert(cur);  // Mark discovered node as grey.
+      };
+      bfs(compute_grey, r1);
+
+      std::set<const RegType*> black;
+      auto compute_black = [&](const RegType* cur) {
+        // Mark discovered grey node as black.
+        if (grey.find(cur) != grey.end()) {
+          black.insert(cur);
+        }
+      };
+      bfs(compute_black, r2);
+
+      std::set<const RegType*> no_in_edge(black);  // Copy of black, remove nodes with in-edges.
+      for (auto r : black) {
+        for (Edge& e : edges) {
+          if (&e.from == r) {
+            no_in_edge.erase(&e.to);  // It doesn't matter whether "to" is black or not, just
+                                      // attempt to remove it.
+          }
+        }
+      }
+
+      // Helper to print sets when something went wrong.
+      auto print_set = [](auto& container) REQUIRES_SHARED(Locks::mutator_lock_) {
+        std::string result;
+        for (auto r : container) {
+          result.append(" + ");
+          result.append(r->Dump());
+        }
+        return result;
+      };
+      ASSERT_EQ(no_in_edge.size(), 1u) << r1->Dump() << " u " << r2->Dump()
+                                       << " grey=" << print_set(grey)
+                                       << " black=" << print_set(black)
+                                       << " no-in-edge=" << print_set(no_in_edge);
+      expectations.emplace_back(*r1, *r2, **no_in_edge.begin());
+    }
+  }
+
+  // Evaluate merge expectations. The merge is expected to be commutative.
+
+  for (auto& triple : expectations) {
+    check(triple.in1, triple.in2, triple.out);
+    check(triple.in2, triple.in1, triple.out);
+  }
+
+  Runtime::Current()->GetHeap()->DecrementDisableMovingGC(soa.Self());
+}
+
 TEST_F(RegTypeTest, ConstPrecision) {
   // Tests creating primitive types types.
   ArenaStack stack(Runtime::Current()->GetArenaPool());
diff --git a/tools/libjdwp_art_failures.txt b/tools/libjdwp_art_failures.txt
index abcc728..bf1c937 100644
--- a/tools/libjdwp_art_failures.txt
+++ b/tools/libjdwp_art_failures.txt
@@ -64,6 +64,11 @@
            "org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testMethodExit",
            "org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testMethodExitWithReturnValue" ]
 },
+{
+  description: "Tests for VMDebug functionality not implemented in the upstream libjdwp",
+  result: EXEC_FAILED,
+  name: "org.apache.harmony.jpda.tests.jdwp.VMDebug.VMDebugTest#testVMDebug"
+},
 /* TODO Categorize these failures more. */
 {
   description: "Tests that fail on both ART and RI. These tests are likely incorrect",