DO NOT MERGE

Merge ART from AOSP to lmp-preview-dev.

Change-Id: I0f578733a4b8756fd780d4a052ad69b746f687a9
diff --git a/Android.mk b/Android.mk
index cc37599..a30c090 100644
--- a/Android.mk
+++ b/Android.mk
@@ -90,6 +90,8 @@
 include $(art_path)/dalvikvm/Android.mk
 include $(art_path)/tools/Android.mk
 include $(art_build_path)/Android.oat.mk
+include $(art_path)/sigchainlib/Android.mk
+
 
 
 
@@ -401,15 +403,27 @@
 .PHONY: dump-oat-core-target
 ifeq ($(ART_BUILD_TARGET),true)
 dump-oat-core-target: $(TARGET_CORE_IMG_OUT) $(OATDUMP)
-	$(OATDUMP) --image=$(TARGET_CORE_IMG_OUT) --output=$(ART_DUMP_OAT_PATH)/core.target.oatdump.txt
+	$(OATDUMP) --image=$(TARGET_CORE_IMG_LOCATION) --output=$(ART_DUMP_OAT_PATH)/core.target.oatdump.txt --instruction-set=$(TARGET_ARCH)
 	@echo Output in $(ART_DUMP_OAT_PATH)/core.target.oatdump.txt
 endif
 
-.PHONY: dump-oat-boot
+.PHONY: dump-oat-boot-$(TARGET_ARCH)
 ifeq ($(ART_BUILD_TARGET_NDEBUG),true)
-dump-oat-boot: $(DEFAULT_DEX_PREOPT_BUILT_IMAGE) $(OATDUMP)
-	$(OATDUMP) --image=$(DEFAULT_DEX_PREOPT_BUILT_IMAGE) --output=$(ART_DUMP_OAT_PATH)/boot.oatdump.txt
-	@echo Output in $(ART_DUMP_OAT_PATH)/boot.oatdump.txt
+dump-oat-boot-$(TARGET_ARCH): $(DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME) $(OATDUMP)
+	$(OATDUMP) --image=$(DEFAULT_DEX_PREOPT_BUILT_IMAGE_LOCATION) --output=$(ART_DUMP_OAT_PATH)/boot.$(TARGET_ARCH).oatdump.txt --instruction-set=$(TARGET_ARCH)
+	@echo Output in $(ART_DUMP_OAT_PATH)/boot.$(TARGET_ARCH).oatdump.txt
+endif
+
+ifdef TARGET_2ND_ARCH
+dump-oat-boot-$(TARGET_2ND_ARCH): $(2ND_DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME) $(OATDUMP)
+	$(OATDUMP) --image=$(2ND_DEFAULT_DEX_PREOPT_BUILT_IMAGE_LOCATION) --output=$(ART_DUMP_OAT_PATH)/boot.$(TARGET_2ND_ARCH).oatdump.txt --instruction-set=$(TARGET_2ND_ARCH)
+	@echo Output in $(ART_DUMP_OAT_PATH)/boot.$(TARGET_2ND_ARCH).oatdump.txt
+endif
+
+.PHONY: dump-oat-boot
+dump-oat-boot: dump-oat-boot-$(TARGET_ARCH)
+ifdef TARGET_2ND_ARCH
+dump-oat-boot: dump-oat-boot-$(TARGET_2ND_ARCH)
 endif
 
 .PHONY: dump-oat-Calculator
diff --git a/build/Android.common.mk b/build/Android.common.mk
index ae54efb..83c536f 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -402,5 +402,6 @@
 endif
 
 HOST_CORE_IMG_LOCATION := $(HOST_OUT_JAVA_LIBRARIES)/core.art
+TARGET_CORE_IMG_LOCATION := $(ART_TEST_OUT)/core.art
 
 endif # ANDROID_COMMON_MK
diff --git a/build/Android.executable.mk b/build/Android.executable.mk
index 3c33975..a186e85 100644
--- a/build/Android.executable.mk
+++ b/build/Android.executable.mk
@@ -99,8 +99,8 @@
     LOCAL_MULTILIB := $$(art_multilib)
   endif
 
+  include external/libcxx/libcxx.mk
   ifeq ($$(art_target_or_host),target)
-    include external/libcxx/libcxx.mk
     include $(BUILD_EXECUTABLE)
     ART_TARGET_EXECUTABLES := $(ART_TARGET_EXECUTABLES) $(TARGET_OUT_EXECUTABLES)/$$(LOCAL_MODULE)
   else # host
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 952f79a..9f1d0f1 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -81,8 +81,11 @@
 	compiler/optimizing/find_loops_test.cc \
 	compiler/optimizing/linearize_test.cc \
 	compiler/optimizing/liveness_test.cc \
+	compiler/optimizing/live_interval_test.cc \
 	compiler/optimizing/live_ranges_test.cc \
+	compiler/optimizing/parallel_move_test.cc \
 	compiler/optimizing/pretty_printer_test.cc \
+	compiler/optimizing/register_allocator_test.cc \
 	compiler/optimizing/ssa_test.cc \
 	compiler/output_stream_test.cc \
 	compiler/utils/arena_allocator_test.cc \
@@ -182,6 +185,7 @@
   endif
 
   LOCAL_CFLAGS := $(ART_TEST_CFLAGS)
+  include external/libcxx/libcxx.mk
   ifeq ($$(art_target_or_host),target)
     LOCAL_CLANG := $(ART_TARGET_CLANG)
     LOCAL_CFLAGS += $(ART_TARGET_CFLAGS) $(ART_TARGET_DEBUG_CFLAGS)
@@ -191,7 +195,6 @@
     LOCAL_MODULE_PATH_32 := $(ART_NATIVETEST_OUT)/$(ART_TARGET_ARCH_32)
     LOCAL_MODULE_PATH_64 := $(ART_NATIVETEST_OUT)/$(ART_TARGET_ARCH_64)
     LOCAL_MULTILIB := both
-    include external/libcxx/libcxx.mk
     include $(BUILD_EXECUTABLE)
     
     ART_TARGET_GTEST_EXECUTABLES$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_NATIVETEST_OUT)/$(TARGET_ARCH)/$$(LOCAL_MODULE)
@@ -216,7 +219,7 @@
     LOCAL_STATIC_LIBRARIES += libcutils libvixl
     ifneq ($(WITHOUT_HOST_CLANG),true)
         # GCC host compiled tests fail with this linked, presumably due to destructors that run.
-        LOCAL_STATIC_LIBRARIES += libgtest_host
+        LOCAL_STATIC_LIBRARIES += libgtest_libc++_host
     endif
     LOCAL_LDLIBS += -lpthread -ldl
     LOCAL_IS_HOST_MODULE := true
diff --git a/build/Android.libarttest.mk b/build/Android.libarttest.mk
index 6965326..c080928 100644
--- a/build/Android.libarttest.mk
+++ b/build/Android.libarttest.mk
@@ -16,6 +16,7 @@
 
 LIBARTTEST_COMMON_SRC_FILES := \
 	test/JniTest/jni_test.cc \
+	test/SignalTest/signaltest.cc \
 	test/ReferenceMap/stack_walk_refmap_jni.cc \
 	test/StackWalk/stack_walk_jni.cc \
 	test/UnsafeTest/unsafe_test.cc
@@ -46,6 +47,7 @@
   LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime
   LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/build/Android.common.mk
   LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/build/Android.libarttest.mk
+  include external/libcxx/libcxx.mk
   ifeq ($$(art_target_or_host),target)
     LOCAL_CLANG := $(ART_TARGET_CLANG)
     LOCAL_CFLAGS := $(ART_TARGET_CFLAGS) $(ART_TARGET_DEBUG_CFLAGS)
@@ -56,13 +58,12 @@
     LOCAL_MODULE_PATH_32 := $(ART_TEST_OUT)/$(ART_TARGET_ARCH_32)
     LOCAL_MODULE_PATH_64 := $(ART_TEST_OUT)/$(ART_TARGET_ARCH_64)
     LOCAL_MODULE_TARGET_ARCH := $(ART_SUPPORTED_ARCH)
-    include external/libcxx/libcxx.mk
     include $(BUILD_SHARED_LIBRARY)
   else # host
     LOCAL_CLANG := $(ART_HOST_CLANG)
     LOCAL_CFLAGS := $(ART_HOST_CFLAGS) $(ART_HOST_DEBUG_CFLAGS)
     LOCAL_STATIC_LIBRARIES := libcutils
-    LOCAL_LDLIBS := -ldl -lpthread
+    LOCAL_LDLIBS += -ldl -lpthread
     ifeq ($(HOST_OS),linux)
       LOCAL_LDLIBS += -lrt
     endif
diff --git a/compiler/Android.mk b/compiler/Android.mk
index cb900ea..3bed01d 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -59,9 +59,10 @@
 	dex/mir_field_info.cc \
 	dex/mir_method_info.cc \
 	dex/mir_optimization.cc \
-	dex/pass_driver.cc \
 	dex/bb_optimizations.cc \
-	dex/bit_vector_block_iterator.cc \
+	dex/post_opt_passes.cc \
+	dex/pass_driver_me_opts.cc \
+	dex/pass_driver_me_post_opt.cc \
 	dex/frontend.cc \
 	dex/mir_graph.cc \
 	dex/mir_analysis.cc \
@@ -83,8 +84,11 @@
 	optimizing/code_generator_arm.cc \
 	optimizing/code_generator_x86.cc \
 	optimizing/graph_visualizer.cc \
+	optimizing/locations.cc \
 	optimizing/nodes.cc \
 	optimizing/optimizing_compiler.cc \
+	optimizing/parallel_move_resolver.cc \
+	optimizing/register_allocator.cc \
 	optimizing/ssa_builder.cc \
 	optimizing/ssa_liveness_analysis.cc \
 	trampolines/trampoline_compiler.cc \
@@ -194,8 +198,8 @@
   LOCAL_GENERATED_SOURCES += $$(ENUM_OPERATOR_OUT_GEN)
 
   LOCAL_CFLAGS := $$(LIBART_COMPILER_CFLAGS)
+  include external/libcxx/libcxx.mk
   ifeq ($$(art_target_or_host),target)
-    include external/libcxx/libcxx.mk
     LOCAL_CLANG := $(ART_TARGET_CLANG)
     LOCAL_CFLAGS += $(ART_TARGET_CFLAGS)
   else # host
@@ -247,7 +251,7 @@
   LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime
 
   ifeq ($$(art_target_or_host),host)
-    LOCAL_LDLIBS := -ldl -lpthread
+    LOCAL_LDLIBS += -ldl -lpthread
   endif
   LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common.mk
   LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index fb6c625..5050d4e 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -377,7 +377,7 @@
     timings.EndSplit();
   }
 
-  void CompileDirectMethod(Handle<mirror::ClassLoader>& class_loader, const char* class_name,
+  void CompileDirectMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
                            const char* method_name, const char* signature)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     std::string class_descriptor(DotToDescriptor(class_name));
@@ -390,7 +390,7 @@
     CompileMethod(method);
   }
 
-  void CompileVirtualMethod(Handle<mirror::ClassLoader>& class_loader, const char* class_name,
+  void CompileVirtualMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
                             const char* method_name, const char* signature)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     std::string class_descriptor(DotToDescriptor(class_name));
diff --git a/compiler/dex/bb_optimizations.cc b/compiler/dex/bb_optimizations.cc
index abfa7a7..06e259a 100644
--- a/compiler/dex/bb_optimizations.cc
+++ b/compiler/dex/bb_optimizations.cc
@@ -23,52 +23,14 @@
 /*
  * Code Layout pass implementation start.
  */
-bool CodeLayout::WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const {
-  cUnit->mir_graph->LayoutBlocks(bb);
-  // No need of repeating, so just return false.
-  return false;
-}
-
-/*
- * SSATransformation pass implementation start.
- */
-bool SSATransformation::WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const {
-  cUnit->mir_graph->InsertPhiNodeOperands(bb);
-  // No need of repeating, so just return false.
-  return false;
-}
-
-void SSATransformation::End(CompilationUnit* cUnit) const {
-  // Verify the dataflow information after the pass.
-  if (cUnit->enable_debug & (1 << kDebugVerifyDataflow)) {
-    cUnit->mir_graph->VerifyDataflow();
-  }
-}
-
-/*
- * ConstantPropagation pass implementation start
- */
-bool ConstantPropagation::WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const {
-  cUnit->mir_graph->DoConstantPropagation(bb);
-  // No need of repeating, so just return false.
-  return false;
-}
-
-/*
- * MethodUseCount pass implementation start.
- */
-bool MethodUseCount::Gate(const CompilationUnit* cUnit) const {
-  // First initialize the data.
-  cUnit->mir_graph->InitializeMethodUses();
-
-  // Now check if the pass is to be ignored.
-  bool res = ((cUnit->disable_opt & (1 << kPromoteRegs)) == 0);
-
-  return res;
-}
-
-bool MethodUseCount::WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const {
-  cUnit->mir_graph->CountUses(bb);
+bool CodeLayout::Worker(const PassDataHolder* data) const {
+  DCHECK(data != nullptr);
+  const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+  CompilationUnit* c_unit = pass_me_data_holder->c_unit;
+  DCHECK(c_unit != nullptr);
+  BasicBlock* bb = pass_me_data_holder->bb;
+  DCHECK(bb != nullptr);
+  c_unit->mir_graph->LayoutBlocks(bb);
   // No need of repeating, so just return false.
   return false;
 }
@@ -76,8 +38,14 @@
 /*
  * BasicBlock Combine pass implementation start.
  */
-bool BBCombine::WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const {
-  cUnit->mir_graph->CombineBlocks(bb);
+bool BBCombine::Worker(const PassDataHolder* data) const {
+  DCHECK(data != nullptr);
+  const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+  CompilationUnit* c_unit = pass_me_data_holder->c_unit;
+  DCHECK(c_unit != nullptr);
+  BasicBlock* bb = pass_me_data_holder->bb;
+  DCHECK(bb != nullptr);
+  c_unit->mir_graph->CombineBlocks(bb);
 
   // No need of repeating, so just return false.
   return false;
@@ -86,14 +54,17 @@
 /*
  * BasicBlock Optimization pass implementation start.
  */
-void BBOptimizations::Start(CompilationUnit* cUnit) const {
+void BBOptimizations::Start(const PassDataHolder* data) const {
+  DCHECK(data != nullptr);
+  CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+  DCHECK(c_unit != nullptr);
   /*
    * This pass has a different ordering depEnding on the suppress exception,
    * so do the pass here for now:
    *   - Later, the Start should just change the ordering and we can move the extended
    *     creation into the pass driver's main job with a new iterator
    */
-  cUnit->mir_graph->BasicBlockOptimization();
+  c_unit->mir_graph->BasicBlockOptimization();
 }
 
 }  // namespace art
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
index 6d500a5..0094790 100644
--- a/compiler/dex/bb_optimizations.h
+++ b/compiler/dex/bb_optimizations.h
@@ -18,7 +18,7 @@
 #define ART_COMPILER_DEX_BB_OPTIMIZATIONS_H_
 
 #include "compiler_internals.h"
-#include "pass.h"
+#include "pass_me.h"
 
 namespace art {
 
@@ -26,16 +26,22 @@
  * @class CacheFieldLoweringInfo
  * @brief Cache the lowering info for fields used by IGET/IPUT/SGET/SPUT insns.
  */
-class CacheFieldLoweringInfo : public Pass {
+class CacheFieldLoweringInfo : public PassME {
  public:
-  CacheFieldLoweringInfo() : Pass("CacheFieldLoweringInfo", kNoNodes) {
+  CacheFieldLoweringInfo() : PassME("CacheFieldLoweringInfo", kNoNodes) {
   }
 
-  void Start(CompilationUnit* cUnit) const {
+  void Start(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(cUnit != nullptr);
     cUnit->mir_graph->DoCacheFieldLoweringInfo();
   }
 
-  bool Gate(const CompilationUnit *cUnit) const {
+  bool Gate(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(cUnit != nullptr);
     return cUnit->mir_graph->HasFieldAccess();
   }
 };
@@ -44,16 +50,22 @@
  * @class CacheMethodLoweringInfo
  * @brief Cache the lowering info for methods called by INVOKEs.
  */
-class CacheMethodLoweringInfo : public Pass {
+class CacheMethodLoweringInfo : public PassME {
  public:
-  CacheMethodLoweringInfo() : Pass("CacheMethodLoweringInfo", kNoNodes) {
+  CacheMethodLoweringInfo() : PassME("CacheMethodLoweringInfo", kNoNodes) {
   }
 
-  void Start(CompilationUnit* cUnit) const {
+  void Start(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(cUnit != nullptr);
     cUnit->mir_graph->DoCacheMethodLoweringInfo();
   }
 
-  bool Gate(const CompilationUnit *cUnit) const {
+  bool Gate(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(cUnit != nullptr);
     return cUnit->mir_graph->HasInvokes();
   }
 };
@@ -62,26 +74,41 @@
  * @class CallInlining
  * @brief Perform method inlining pass.
  */
-class CallInlining : public Pass {
+class CallInlining : public PassME {
  public:
-  CallInlining() : Pass("CallInlining") {
+  CallInlining() : PassME("CallInlining") {
   }
 
-  bool Gate(const CompilationUnit* cUnit) const {
+  bool Gate(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(cUnit != nullptr);
     return cUnit->mir_graph->InlineCallsGate();
   }
 
-  void Start(CompilationUnit* cUnit) const {
+  void Start(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(cUnit != nullptr);
     cUnit->mir_graph->InlineCallsStart();
   }
 
-  bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const {
+  bool Worker(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+    CompilationUnit* cUnit = pass_me_data_holder->c_unit;
+    DCHECK(cUnit != nullptr);
+    BasicBlock* bb = pass_me_data_holder->bb;
+    DCHECK(bb != nullptr);
     cUnit->mir_graph->InlineCalls(bb);
     // No need of repeating, so just return false.
     return false;
   }
 
-  void End(CompilationUnit* cUnit) const {
+  void End(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(cUnit != nullptr);
     cUnit->mir_graph->InlineCallsEnd();
   }
 };
@@ -90,117 +117,82 @@
  * @class CodeLayout
  * @brief Perform the code layout pass.
  */
-class CodeLayout : public Pass {
+class CodeLayout : public PassME {
  public:
-  CodeLayout() : Pass("CodeLayout", "2_post_layout_cfg") {
+  CodeLayout() : PassME("CodeLayout", kAllNodes, kOptimizationBasicBlockChange, "2_post_layout_cfg") {
   }
 
-  void Start(CompilationUnit* cUnit) const {
+  void Start(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(cUnit != nullptr);
     cUnit->mir_graph->VerifyDataflow();
   }
 
-  bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const;
-};
-
-/**
- * @class SSATransformation
- * @brief Perform an SSA representation pass on the CompilationUnit.
- */
-class SSATransformation : public Pass {
- public:
-  SSATransformation() : Pass("SSATransformation", kPreOrderDFSTraversal, "3_post_ssa_cfg") {
-  }
-
-  bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const;
-
-  void Start(CompilationUnit* cUnit) const {
-    cUnit->mir_graph->InitializeSSATransformation();
-  }
-
-  void End(CompilationUnit* cUnit) const;
-};
-
-/**
- * @class ConstantPropagation
- * @brief Perform a constant propagation pass.
- */
-class ConstantPropagation : public Pass {
- public:
-  ConstantPropagation() : Pass("ConstantPropagation") {
-  }
-
-  bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const;
-
-  void Start(CompilationUnit* cUnit) const {
-    cUnit->mir_graph->InitializeConstantPropagation();
-  }
-};
-
-/**
- * @class InitRegLocations
- * @brief Initialize Register Locations.
- */
-class InitRegLocations : public Pass {
- public:
-  InitRegLocations() : Pass("InitRegLocation", kNoNodes) {
-  }
-
-  void Start(CompilationUnit* cUnit) const {
-    cUnit->mir_graph->InitRegLocations();
-  }
-};
-
-/**
- * @class MethodUseCount
- * @brief Count the register uses of the method
- */
-class MethodUseCount : public Pass {
- public:
-  MethodUseCount() : Pass("UseCount") {
-  }
-
-  bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const;
-
-  bool Gate(const CompilationUnit* cUnit) const;
+  bool Worker(const PassDataHolder* data) const;
 };
 
 /**
  * @class NullCheckEliminationAndTypeInference
  * @brief Null check elimination and type inference.
  */
-class NullCheckEliminationAndTypeInference : public Pass {
+class NullCheckEliminationAndTypeInference : public PassME {
  public:
   NullCheckEliminationAndTypeInference()
-    : Pass("NCE_TypeInference", kRepeatingPreOrderDFSTraversal, "4_post_nce_cfg") {
+    : PassME("NCE_TypeInference", kRepeatingPreOrderDFSTraversal, "4_post_nce_cfg") {
   }
 
-  void Start(CompilationUnit* cUnit) const {
+  void Start(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(cUnit != nullptr);
     cUnit->mir_graph->EliminateNullChecksAndInferTypesStart();
   }
 
-  bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const {
+  bool Worker(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+    CompilationUnit* cUnit = pass_me_data_holder->c_unit;
+    DCHECK(cUnit != nullptr);
+    BasicBlock* bb = pass_me_data_holder->bb;
+    DCHECK(bb != nullptr);
     return cUnit->mir_graph->EliminateNullChecksAndInferTypes(bb);
   }
 
-  void End(CompilationUnit* cUnit) const {
+  void End(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(cUnit != nullptr);
     cUnit->mir_graph->EliminateNullChecksAndInferTypesEnd();
   }
 };
 
-class ClassInitCheckElimination : public Pass {
+class ClassInitCheckElimination : public PassME {
  public:
-  ClassInitCheckElimination() : Pass("ClInitCheckElimination", kRepeatingPreOrderDFSTraversal) {
+  ClassInitCheckElimination() : PassME("ClInitCheckElimination", kRepeatingPreOrderDFSTraversal) {
   }
 
-  bool Gate(const CompilationUnit* cUnit) const {
+  bool Gate(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(cUnit != nullptr);
     return cUnit->mir_graph->EliminateClassInitChecksGate();
   }
 
-  bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const {
+  bool Worker(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+    CompilationUnit* cUnit = pass_me_data_holder->c_unit;
+    DCHECK(cUnit != nullptr);
+    BasicBlock* bb = pass_me_data_holder->bb;
+    DCHECK(bb != nullptr);
     return cUnit->mir_graph->EliminateClassInitChecks(bb);
   }
 
-  void End(CompilationUnit* cUnit) const {
+  void End(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(cUnit != nullptr);
     cUnit->mir_graph->EliminateClassInitChecksEnd();
   }
 };
@@ -209,32 +201,38 @@
  * @class NullCheckEliminationAndTypeInference
  * @brief Null check elimination and type inference.
  */
-class BBCombine : public Pass {
+class BBCombine : public PassME {
  public:
-  BBCombine() : Pass("BBCombine", kPreOrderDFSTraversal, "5_post_bbcombine_cfg") {
+  BBCombine() : PassME("BBCombine", kPreOrderDFSTraversal, "5_post_bbcombine_cfg") {
   }
 
-  bool Gate(const CompilationUnit* cUnit) const {
+  bool Gate(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(cUnit != nullptr);
     return ((cUnit->disable_opt & (1 << kSuppressExceptionEdges)) != 0);
   }
 
-  bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const;
+  bool Worker(const PassDataHolder* data) const;
 };
 
 /**
  * @class BasicBlock Optimizations
  * @brief Any simple BasicBlock optimization can be put here.
  */
-class BBOptimizations : public Pass {
+class BBOptimizations : public PassME {
  public:
-  BBOptimizations() : Pass("BBOptimizations", kNoNodes, "5_post_bbo_cfg") {
+  BBOptimizations() : PassME("BBOptimizations", kNoNodes, "5_post_bbo_cfg") {
   }
 
-  bool Gate(const CompilationUnit* cUnit) const {
+  bool Gate(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(cUnit != nullptr);
     return ((cUnit->disable_opt & (1 << kBBOpt)) == 0);
   }
 
-  void Start(CompilationUnit* cUnit) const;
+  void Start(const PassDataHolder* data) const;
 };
 
 }  // namespace art
diff --git a/compiler/dex/bit_vector_block_iterator.h b/compiler/dex/bit_vector_block_iterator.h
deleted file mode 100644
index 0f1c2b6..0000000
--- a/compiler/dex/bit_vector_block_iterator.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_BIT_VECTOR_BLOCK_ITERATOR_H_
-#define ART_COMPILER_DEX_BIT_VECTOR_BLOCK_ITERATOR_H_
-
-#include "base/bit_vector.h"
-#include "compiler_enums.h"
-#include "utils/arena_bit_vector.h"
-#include "utils/arena_allocator.h"
-#include "compiler_ir.h"
-
-namespace art {
-
-class MIRGraph;
-
-/**
- * @class BasicBlockIterator
- * @brief Helper class to get the BasicBlocks when iterating through the ArenaBitVector.
- */
-class BitVectorBlockIterator {
-  public:
-    explicit BitVectorBlockIterator(BitVector* bv, MIRGraph* mir_graph)
-      : mir_graph_(mir_graph),
-        internal_iterator_(bv) {}
-
-    explicit BitVectorBlockIterator(BitVector* bv, CompilationUnit* c_unit)
-      : mir_graph_(c_unit->mir_graph.get()),
-        internal_iterator_(bv) {}
-
-    BasicBlock* Next();
-
-    void* operator new(size_t size, ArenaAllocator* arena) {
-      return arena->Alloc(size, kArenaAllocGrowableArray);
-    };
-    void operator delete(void* p) {}  // Nop.
-
-  private:
-    MIRGraph* const mir_graph_;
-    BitVector::Iterator internal_iterator_;
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_BIT_VECTOR_BLOCK_ITERATOR_H_
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 5b4492f..767ffbf 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -22,6 +22,7 @@
 namespace art {
 
 enum RegisterClass {
+  kInvalidRegClass,
   kCoreReg,
   kFPReg,
   kAnyReg,
diff --git a/compiler/dex/compiler_ir.h b/compiler/dex/compiler_ir.h
index 35d777e..66fb608 100644
--- a/compiler/dex/compiler_ir.h
+++ b/compiler/dex/compiler_ir.h
@@ -88,6 +88,7 @@
   std::unique_ptr<MIRGraph> mir_graph;   // MIR container.
   std::unique_ptr<Backend> cg;           // Target-specific codegen.
   TimingLogger timings;
+  bool print_pass;                 // Do we want to print a pass or not?
 };
 
 }  // namespace art
diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h
index b45d6a4..62973af 100644
--- a/compiler/dex/dataflow_iterator.h
+++ b/compiler/dex/dataflow_iterator.h
@@ -326,6 +326,81 @@
       GrowableArray<BasicBlock*>::Iterator all_nodes_iterator_;    /**< @brief The list of all the nodes */
   };
 
+  /**
+   * @class TopologicalSortIterator
+   * @brief Used to perform a Topological Sort Iteration of a MIRGraph.
+   */
+  class TopologicalSortIterator : public DataflowIterator {
+    public:
+      /**
+       * @brief The constructor, using all of the reachable blocks of the MIRGraph.
+       * @param mir_graph The MIRGraph considered.
+       */
+      explicit TopologicalSortIterator(MIRGraph* mir_graph)
+          : DataflowIterator(mir_graph, 0, mir_graph->GetTopologicalSortOrder() != nullptr ?
+            mir_graph->GetTopologicalSortOrder()->Size() : 0) {
+        // Extra setup for TopologicalSortIterator.
+        idx_ = start_idx_;
+        block_id_list_ = mir_graph->GetTopologicalSortOrder();
+
+        if (mir_graph->GetTopologicalSortOrder() == nullptr) {
+          /* Compute the topological order */
+          mir_graph->ComputeTopologicalSortOrder();
+        }
+      }
+
+      /**
+       * @brief Get the next BasicBlock depending on iteration order.
+       * @param had_change did the user of the iteration change the previous BasicBlock.
+       * @return the next BasicBlock following the iteration order, 0 if finished.
+       */
+      virtual BasicBlock* Next(bool had_change = false) {
+        // Update changed: if had_changed is true, we remember it for the whole iteration.
+        changed_ |= had_change;
+
+        return ForwardSingleNext();
+      }
+  };
+
+  /**
+   * @class RepeatingTopologicalSortIterator
+   * @brief Used to perform a Topological Sort Iteration of a MIRGraph.
+   * @details If there is a change during an iteration, the iteration starts over at the end of the
+   *          iteration.
+   */
+  class RepeatingTopologicalSortIterator : public DataflowIterator {
+    public:
+     /**
+      * @brief The constructor, using all of the reachable blocks of the MIRGraph.
+      * @param mir_graph The MIRGraph considered.
+      */
+     explicit RepeatingTopologicalSortIterator(MIRGraph* mir_graph)
+         : DataflowIterator(mir_graph, 0, mir_graph->GetTopologicalSortOrder() != nullptr ?
+           mir_graph->GetTopologicalSortOrder()->Size() : 0) {
+       // Extra setup for RepeatingTopologicalSortIterator.
+       idx_ = start_idx_;
+       block_id_list_ = mir_graph->GetTopologicalSortOrder();
+
+       if (mir_graph->GetTopologicalSortOrder() == nullptr) {
+         /* Compute the topological order */
+         mir_graph->ComputeTopologicalSortOrder();
+       }
+     }
+
+     /**
+      * @brief Get the next BasicBlock depending on iteration order.
+      * @param had_change did the user of the iteration change the previous BasicBlock.
+      * @return the next BasicBlock following the iteration order, 0 if finished.
+      */
+     virtual BasicBlock* Next(bool had_change = false) {
+       // Update changed: if had_changed is true, we remember it for the whole iteration.
+       changed_ |= had_change;
+
+       return ForwardRepeatNext();
+     }
+  };
+
+
 }  // namespace art
 
 #endif  // ART_COMPILER_DEX_DATAFLOW_ITERATOR_H_
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index ba4c5e1..d315141 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -21,7 +21,7 @@
 #include "dataflow_iterator-inl.h"
 #include "leb128.h"
 #include "mirror/object.h"
-#include "pass_driver.h"
+#include "pass_driver_me_opts.h"
 #include "runtime.h"
 #include "base/logging.h"
 #include "base/timing_logger.h"
@@ -105,7 +105,8 @@
     arena_stack(pool),
     mir_graph(nullptr),
     cg(nullptr),
-    timings("QuickCompiler", true, false) {
+    timings("QuickCompiler", true, false),
+    print_pass(false) {
 }
 
 CompilationUnit::~CompilationUnit() {
@@ -136,22 +137,22 @@
 // TODO: Remove this when we are able to compile everything.
 int arm64_support_list[] = {
     Instruction::NOP,
-    // Instruction::MOVE,
-    // Instruction::MOVE_FROM16,
-    // Instruction::MOVE_16,
-    // Instruction::MOVE_WIDE,
-    // Instruction::MOVE_WIDE_FROM16,
-    // Instruction::MOVE_WIDE_16,
-    // Instruction::MOVE_OBJECT,
-    // Instruction::MOVE_OBJECT_FROM16,
-    // Instruction::MOVE_OBJECT_16,
+    Instruction::MOVE,
+    Instruction::MOVE_FROM16,
+    Instruction::MOVE_16,
+    Instruction::MOVE_WIDE,
+    Instruction::MOVE_WIDE_FROM16,
+    Instruction::MOVE_WIDE_16,
+    Instruction::MOVE_OBJECT,
+    Instruction::MOVE_OBJECT_FROM16,
+    Instruction::MOVE_OBJECT_16,
     // Instruction::MOVE_RESULT,
     // Instruction::MOVE_RESULT_WIDE,
     // Instruction::MOVE_RESULT_OBJECT,
     Instruction::MOVE_EXCEPTION,
     Instruction::RETURN_VOID,
-    // Instruction::RETURN,
-    // Instruction::RETURN_WIDE,
+    Instruction::RETURN,
+    Instruction::RETURN_WIDE,
     // Instruction::RETURN_OBJECT,
     // Instruction::CONST_4,
     // Instruction::CONST_16,
@@ -180,11 +181,11 @@
     // Instruction::GOTO_32,
     // Instruction::PACKED_SWITCH,
     // Instruction::SPARSE_SWITCH,
-    // Instruction::CMPL_FLOAT,
-    // Instruction::CMPG_FLOAT,
-    // Instruction::CMPL_DOUBLE,
-    // Instruction::CMPG_DOUBLE,
-    // Instruction::CMP_LONG,
+    Instruction::CMPL_FLOAT,
+    Instruction::CMPG_FLOAT,
+    Instruction::CMPL_DOUBLE,
+    Instruction::CMPG_DOUBLE,
+    Instruction::CMP_LONG,
     // Instruction::IF_EQ,
     // Instruction::IF_NE,
     // Instruction::IF_LT,
@@ -258,110 +259,110 @@
     // Instruction::INVOKE_INTERFACE_RANGE,
     // Instruction::UNUSED_79,
     // Instruction::UNUSED_7A,
-    // Instruction::NEG_INT,
-    // Instruction::NOT_INT,
-    // Instruction::NEG_LONG,
-    // Instruction::NOT_LONG,
-    // Instruction::NEG_FLOAT,
-    // Instruction::NEG_DOUBLE,
-    // Instruction::INT_TO_LONG,
-    // Instruction::INT_TO_FLOAT,
-    // Instruction::INT_TO_DOUBLE,
-    // Instruction::LONG_TO_INT,
-    // Instruction::LONG_TO_FLOAT,
-    // Instruction::LONG_TO_DOUBLE,
-    // Instruction::FLOAT_TO_INT,
-    // Instruction::FLOAT_TO_LONG,
-    // Instruction::FLOAT_TO_DOUBLE,
-    // Instruction::DOUBLE_TO_INT,
-    // Instruction::DOUBLE_TO_LONG,
-    // Instruction::DOUBLE_TO_FLOAT,
-    // Instruction::INT_TO_BYTE,
-    // Instruction::INT_TO_CHAR,
-    // Instruction::INT_TO_SHORT,
-    // Instruction::ADD_INT,
-    // Instruction::SUB_INT,
-    // Instruction::MUL_INT,
-    // Instruction::DIV_INT,
-    // Instruction::REM_INT,
-    // Instruction::AND_INT,
-    // Instruction::OR_INT,
-    // Instruction::XOR_INT,
-    // Instruction::SHL_INT,
-    // Instruction::SHR_INT,
-    // Instruction::USHR_INT,
-    // Instruction::ADD_LONG,
-    // Instruction::SUB_LONG,
-    // Instruction::MUL_LONG,
-    // Instruction::DIV_LONG,
-    // Instruction::REM_LONG,
-    // Instruction::AND_LONG,
-    // Instruction::OR_LONG,
-    // Instruction::XOR_LONG,
-    // Instruction::SHL_LONG,
-    // Instruction::SHR_LONG,
-    // Instruction::USHR_LONG,
-    // Instruction::ADD_FLOAT,
-    // Instruction::SUB_FLOAT,
-    // Instruction::MUL_FLOAT,
-    // Instruction::DIV_FLOAT,
+    Instruction::NEG_INT,
+    Instruction::NOT_INT,
+    Instruction::NEG_LONG,
+    Instruction::NOT_LONG,
+    Instruction::NEG_FLOAT,
+    Instruction::NEG_DOUBLE,
+    Instruction::INT_TO_LONG,
+    Instruction::INT_TO_FLOAT,
+    Instruction::INT_TO_DOUBLE,
+    Instruction::LONG_TO_INT,
+    Instruction::LONG_TO_FLOAT,
+    Instruction::LONG_TO_DOUBLE,
+    Instruction::FLOAT_TO_INT,
+    Instruction::FLOAT_TO_LONG,
+    Instruction::FLOAT_TO_DOUBLE,
+    Instruction::DOUBLE_TO_INT,
+    Instruction::DOUBLE_TO_LONG,
+    Instruction::DOUBLE_TO_FLOAT,
+    Instruction::INT_TO_BYTE,
+    Instruction::INT_TO_CHAR,
+    Instruction::INT_TO_SHORT,
+    Instruction::ADD_INT,
+    Instruction::SUB_INT,
+    Instruction::MUL_INT,
+    Instruction::DIV_INT,
+    Instruction::REM_INT,
+    Instruction::AND_INT,
+    Instruction::OR_INT,
+    Instruction::XOR_INT,
+    Instruction::SHL_INT,
+    Instruction::SHR_INT,
+    Instruction::USHR_INT,
+    Instruction::ADD_LONG,
+    Instruction::SUB_LONG,
+    Instruction::MUL_LONG,
+    Instruction::DIV_LONG,
+    Instruction::REM_LONG,
+    Instruction::AND_LONG,
+    Instruction::OR_LONG,
+    Instruction::XOR_LONG,
+    Instruction::SHL_LONG,
+    Instruction::SHR_LONG,
+    Instruction::USHR_LONG,
+    Instruction::ADD_FLOAT,
+    Instruction::SUB_FLOAT,
+    Instruction::MUL_FLOAT,
+    Instruction::DIV_FLOAT,
     // Instruction::REM_FLOAT,
-    // Instruction::ADD_DOUBLE,
-    // Instruction::SUB_DOUBLE,
-    // Instruction::MUL_DOUBLE,
-    // Instruction::DIV_DOUBLE,
+    Instruction::ADD_DOUBLE,
+    Instruction::SUB_DOUBLE,
+    Instruction::MUL_DOUBLE,
+    Instruction::DIV_DOUBLE,
     // Instruction::REM_DOUBLE,
-    // Instruction::ADD_INT_2ADDR,
-    // Instruction::SUB_INT_2ADDR,
-    // Instruction::MUL_INT_2ADDR,
-    // Instruction::DIV_INT_2ADDR,
-    // Instruction::REM_INT_2ADDR,
-    // Instruction::AND_INT_2ADDR,
-    // Instruction::OR_INT_2ADDR,
-    // Instruction::XOR_INT_2ADDR,
-    // Instruction::SHL_INT_2ADDR,
-    // Instruction::SHR_INT_2ADDR,
-    // Instruction::USHR_INT_2ADDR,
-    // Instruction::ADD_LONG_2ADDR,
-    // Instruction::SUB_LONG_2ADDR,
-    // Instruction::MUL_LONG_2ADDR,
-    // Instruction::DIV_LONG_2ADDR,
-    // Instruction::REM_LONG_2ADDR,
-    // Instruction::AND_LONG_2ADDR,
-    // Instruction::OR_LONG_2ADDR,
-    // Instruction::XOR_LONG_2ADDR,
-    // Instruction::SHL_LONG_2ADDR,
-    // Instruction::SHR_LONG_2ADDR,
-    // Instruction::USHR_LONG_2ADDR,
-    // Instruction::ADD_FLOAT_2ADDR,
-    // Instruction::SUB_FLOAT_2ADDR,
-    // Instruction::MUL_FLOAT_2ADDR,
-    // Instruction::DIV_FLOAT_2ADDR,
+    Instruction::ADD_INT_2ADDR,
+    Instruction::SUB_INT_2ADDR,
+    Instruction::MUL_INT_2ADDR,
+    Instruction::DIV_INT_2ADDR,
+    Instruction::REM_INT_2ADDR,
+    Instruction::AND_INT_2ADDR,
+    Instruction::OR_INT_2ADDR,
+    Instruction::XOR_INT_2ADDR,
+    Instruction::SHL_INT_2ADDR,
+    Instruction::SHR_INT_2ADDR,
+    Instruction::USHR_INT_2ADDR,
+    Instruction::ADD_LONG_2ADDR,
+    Instruction::SUB_LONG_2ADDR,
+    Instruction::MUL_LONG_2ADDR,
+    Instruction::DIV_LONG_2ADDR,
+    Instruction::REM_LONG_2ADDR,
+    Instruction::AND_LONG_2ADDR,
+    Instruction::OR_LONG_2ADDR,
+    Instruction::XOR_LONG_2ADDR,
+    Instruction::SHL_LONG_2ADDR,
+    Instruction::SHR_LONG_2ADDR,
+    Instruction::USHR_LONG_2ADDR,
+    Instruction::ADD_FLOAT_2ADDR,
+    Instruction::SUB_FLOAT_2ADDR,
+    Instruction::MUL_FLOAT_2ADDR,
+    Instruction::DIV_FLOAT_2ADDR,
     // Instruction::REM_FLOAT_2ADDR,
-    // Instruction::ADD_DOUBLE_2ADDR,
-    // Instruction::SUB_DOUBLE_2ADDR,
-    // Instruction::MUL_DOUBLE_2ADDR,
-    // Instruction::DIV_DOUBLE_2ADDR,
+    Instruction::ADD_DOUBLE_2ADDR,
+    Instruction::SUB_DOUBLE_2ADDR,
+    Instruction::MUL_DOUBLE_2ADDR,
+    Instruction::DIV_DOUBLE_2ADDR,
     // Instruction::REM_DOUBLE_2ADDR,
-    // Instruction::ADD_INT_LIT16,
-    // Instruction::RSUB_INT,
-    // Instruction::MUL_INT_LIT16,
-    // Instruction::DIV_INT_LIT16,
-    // Instruction::REM_INT_LIT16,
-    // Instruction::AND_INT_LIT16,
-    // Instruction::OR_INT_LIT16,
-    // Instruction::XOR_INT_LIT16,
+    Instruction::ADD_INT_LIT16,
+    Instruction::RSUB_INT,
+    Instruction::MUL_INT_LIT16,
+    Instruction::DIV_INT_LIT16,
+    Instruction::REM_INT_LIT16,
+    Instruction::AND_INT_LIT16,
+    Instruction::OR_INT_LIT16,
+    Instruction::XOR_INT_LIT16,
     Instruction::ADD_INT_LIT8,
-    // Instruction::RSUB_INT_LIT8,
-    // Instruction::MUL_INT_LIT8,
-    // Instruction::DIV_INT_LIT8,
-    // Instruction::REM_INT_LIT8,
-    // Instruction::AND_INT_LIT8,
-    // Instruction::OR_INT_LIT8,
-    // Instruction::XOR_INT_LIT8,
-    // Instruction::SHL_INT_LIT8,
-    // Instruction::SHR_INT_LIT8,
-    // Instruction::USHR_INT_LIT8,
+    Instruction::RSUB_INT_LIT8,
+    Instruction::MUL_INT_LIT8,
+    Instruction::DIV_INT_LIT8,
+    Instruction::REM_INT_LIT8,
+    Instruction::AND_INT_LIT8,
+    Instruction::OR_INT_LIT8,
+    Instruction::XOR_INT_LIT8,
+    Instruction::SHL_INT_LIT8,
+    Instruction::SHR_INT_LIT8,
+    Instruction::USHR_INT_LIT8,
     // Instruction::IGET_QUICK,
     // Instruction::IGET_WIDE_QUICK,
     // Instruction::IGET_OBJECT_QUICK,
@@ -392,21 +393,22 @@
     // Instruction::UNUSED_FE,
     // Instruction::UNUSED_FF,
 
+    // TODO(Arm64): Enable compiler pass
     // ----- ExtendedMIROpcode -----
-    // kMirOpPhi,
-    // kMirOpCopy,
-    // kMirOpFusedCmplFloat,
-    // kMirOpFusedCmpgFloat,
-    // kMirOpFusedCmplDouble,
-    // kMirOpFusedCmpgDouble,
-    // kMirOpFusedCmpLong,
-    // kMirOpNop,
-    // kMirOpNullCheck,
-    // kMirOpRangeCheck,
-    // kMirOpDivZeroCheck,
+    kMirOpPhi,
+    kMirOpCopy,
+    kMirOpFusedCmplFloat,
+    kMirOpFusedCmpgFloat,
+    kMirOpFusedCmplDouble,
+    kMirOpFusedCmpgDouble,
+    kMirOpFusedCmpLong,
+    kMirOpNop,
+    kMirOpNullCheck,
+    kMirOpRangeCheck,
+    kMirOpDivZeroCheck,
     kMirOpCheck,
-    // kMirOpCheckPart2,
-    // kMirOpSelect,
+    kMirOpCheckPart2,
+    kMirOpSelect,
     // kMirOpLast,
 };
 
@@ -692,14 +694,14 @@
 // S : short
 // C : char
 // I : int
-// L : long
+// J : long
 // F : float
 // D : double
 // L : reference(object, array)
 // V : void
 // (ARM64) Current calling conversion only support 32bit softfp
 //         which has problems with long, float, double
-constexpr char arm64_supported_types[] = "ZBSCILV";
+constexpr char arm64_supported_types[] = "ZBSCILVJFD";
 // (x84_64) We still have troubles with compiling longs/doubles/floats
 constexpr char x86_64_supported_types[] = "ZBSCILV";
 
@@ -749,7 +751,7 @@
     }
 
     for (int idx = 0; idx < cu.mir_graph->GetNumBlocks(); idx++) {
-      BasicBlock *bb = cu.mir_graph->GetBasicBlock(idx);
+      BasicBlock* bb = cu.mir_graph->GetBasicBlock(idx);
       if (bb == NULL) continue;
       if (bb->block_type == kDead) continue;
       for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
@@ -757,7 +759,7 @@
         // Check if we support the byte code.
         if (std::find(support_list, support_list + support_list_size,
             opcode) == support_list + support_list_size) {
-          if (opcode < kMirOpFirst) {
+          if (!cu.mir_graph->IsPseudoMirOp(opcode)) {
             VLOG(compiler) << "Unsupported dalvik byte code : "
                            << mir->dalvikInsn.opcode;
           } else {
@@ -879,8 +881,9 @@
         (1 << kPromoteCompilerTemps));
   }
 
-  if (cu.instruction_set == kArm64) {
+  if (cu.instruction_set == kArm64 || cu.instruction_set == kX86_64) {
     // TODO(Arm64): enable optimizations once backend is mature enough.
+    // TODO(X86_64): enable optimizations once backend is mature enough.
     cu.disable_opt = ~(uint32_t)0;
     cu.enable_debug |= (1 << kDebugCodegenDump);
   }
@@ -924,7 +927,7 @@
   }
 
   /* Create the pass driver and launch it */
-  PassDriver pass_driver(&cu);
+  PassDriverMEOpts pass_driver(&cu);
   pass_driver.Launch();
 
   if (cu.enable_debug & (1 << kDebugDumpCheckStats)) {
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index c0068b2..6259496 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -21,8 +21,48 @@
 
 namespace art {
 
-uint16_t LocalValueNumbering::GetFieldId(const DexFile* dex_file, uint16_t field_idx) {
-  FieldReference key = { dex_file, field_idx };
+namespace {  // anonymous namespace
+
+// Operations used for value map keys instead of actual opcode.
+static constexpr uint16_t kInvokeMemoryVersionBumpOp = Instruction::INVOKE_DIRECT;
+static constexpr uint16_t kUnresolvedSFieldOp = Instruction::SPUT;
+static constexpr uint16_t kResolvedSFieldOp = Instruction::SGET;
+static constexpr uint16_t kUnresolvedIFieldOp = Instruction::IPUT;
+static constexpr uint16_t kNonAliasingIFieldOp = Instruction::IGET;
+static constexpr uint16_t kAliasingIFieldOp = Instruction::IGET_WIDE;
+static constexpr uint16_t kAliasingIFieldStartVersionOp = Instruction::IGET_WIDE;
+static constexpr uint16_t kAliasingIFieldBumpVersionOp = Instruction::IGET_OBJECT;
+static constexpr uint16_t kArrayAccessLocOp = Instruction::APUT;
+static constexpr uint16_t kNonAliasingArrayOp = Instruction::AGET;
+static constexpr uint16_t kNonAliasingArrayStartVersionOp = Instruction::AGET_WIDE;
+static constexpr uint16_t kAliasingArrayOp = Instruction::AGET_OBJECT;
+static constexpr uint16_t kAliasingArrayMemoryVersionOp = Instruction::AGET_BOOLEAN;
+static constexpr uint16_t kAliasingArrayBumpVersionOp = Instruction::AGET_BYTE;
+
+}  // anonymous namespace
+
+LocalValueNumbering::LocalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator)
+    : cu_(cu),
+      last_value_(0u),
+      sreg_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+      sreg_wide_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+      value_map_(std::less<uint64_t>(), allocator->Adapter()),
+      global_memory_version_(0u),
+      aliasing_ifield_version_map_(std::less<uint16_t>(), allocator->Adapter()),
+      non_aliasing_array_version_map_(std::less<uint16_t>(), allocator->Adapter()),
+      field_index_map_(FieldReferenceComparator(), allocator->Adapter()),
+      non_aliasing_refs_(std::less<uint16_t>(), allocator->Adapter()),
+      non_aliasing_ifields_(NonAliasingIFieldKeyComparator(), allocator->Adapter()),
+      escaped_array_refs_(EscapedArrayKeyComparator(), allocator->Adapter()),
+      range_checked_(RangeCheckKeyComparator() , allocator->Adapter()),
+      null_checked_(std::less<uint16_t>(), allocator->Adapter()) {
+  std::fill_n(unresolved_sfield_version_, kFieldTypeCount, 0u);
+  std::fill_n(unresolved_ifield_version_, kFieldTypeCount, 0u);
+  std::fill_n(aliasing_array_version_, kFieldTypeCount, 0u);
+}
+
+uint16_t LocalValueNumbering::GetFieldId(const MirFieldInfo& field_info) {
+  FieldReference key = { field_info.DeclaringDexFile(), field_info.DeclaringFieldIndex() };
   auto it = field_index_map_.find(key);
   if (it != field_index_map_.end()) {
     return it->second;
@@ -32,62 +72,6 @@
   return id;
 }
 
-void LocalValueNumbering::AdvanceGlobalMemory() {
-  // See AdvanceMemoryVersion() for explanation.
-  global_memory_version_ = next_memory_version_;
-  ++next_memory_version_;
-}
-
-uint16_t LocalValueNumbering::GetMemoryVersion(uint16_t base, uint16_t field, uint16_t type) {
-  // See AdvanceMemoryVersion() for explanation.
-  MemoryVersionKey key = { base, field, type };
-  MemoryVersionMap::iterator it = memory_version_map_.find(key);
-  uint16_t memory_version = (it != memory_version_map_.end()) ? it->second : 0u;
-  if (base != NO_VALUE && non_aliasing_refs_.find(base) == non_aliasing_refs_.end()) {
-    // Check modifications by potentially aliased access.
-    MemoryVersionKey aliased_access_key = { NO_VALUE, field, type };
-    auto aa_it = memory_version_map_.find(aliased_access_key);
-    if (aa_it != memory_version_map_.end() && aa_it->second > memory_version) {
-      memory_version = aa_it->second;
-    }
-    memory_version = std::max(memory_version, global_memory_version_);
-  } else if (base != NO_VALUE) {
-    // Ignore global_memory_version_ for access via unique references.
-  } else {
-    memory_version = std::max(memory_version, global_memory_version_);
-  }
-  return memory_version;
-};
-
-uint16_t LocalValueNumbering::AdvanceMemoryVersion(uint16_t base, uint16_t field, uint16_t type) {
-  // When we read the same value from memory, we want to assign the same value name to it.
-  // However, we need to be careful not to assign the same value name if the memory location
-  // may have been written to between the reads. To avoid that we do "memory versioning".
-  //
-  // For each write to a memory location (instance field, static field, array element) we assign
-  // a new memory version number to the location identified by the value name of the base register,
-  // the field id and type, or "{ base, field, type }". For static fields the "base" is NO_VALUE
-  // since they are not accessed via a reference. For arrays the "field" is NO_VALUE since they
-  // don't have a field id.
-  //
-  // To account for the possibility of aliased access to the same memory location via different
-  // "base", we also store the memory version number with the key "{ NO_VALUE, field, type }"
-  // if "base" is an aliasing reference and check it in GetMemoryVersion() on reads via
-  // aliasing references. A global memory version is set for method calls as a method can
-  // potentially write to any memory location accessed via an aliasing reference.
-
-  uint16_t result = next_memory_version_;
-  ++next_memory_version_;
-  MemoryVersionKey key = { base, field, type };
-  memory_version_map_.Overwrite(key, result);
-  if (base != NO_VALUE && non_aliasing_refs_.find(base) == non_aliasing_refs_.end()) {
-    // Advance memory version for aliased access.
-    MemoryVersionKey aliased_access_key = { NO_VALUE, field, type };
-    memory_version_map_.Overwrite(aliased_access_key, result);
-  }
-  return result;
-};
-
 uint16_t LocalValueNumbering::MarkNonAliasingNonNull(MIR* mir) {
   uint16_t res = GetOperandValue(mir->ssa_rep->defs[0]);
   SetOperandValue(mir->ssa_rep->defs[0], res);
@@ -97,43 +81,332 @@
   return res;
 }
 
-void LocalValueNumbering::MakeArgsAliasing(MIR* mir) {
-  for (size_t i = 0u, count = mir->ssa_rep->num_uses; i != count; ++i) {
-    uint16_t reg = GetOperandValue(mir->ssa_rep->uses[i]);
-    non_aliasing_refs_.erase(reg);
-  }
+bool LocalValueNumbering::IsNonAliasing(uint16_t reg) {
+  return non_aliasing_refs_.find(reg) != non_aliasing_refs_.end();
 }
 
+bool LocalValueNumbering::IsNonAliasingIField(uint16_t reg, uint16_t field_id, uint16_t type) {
+  if (IsNonAliasing(reg)) {
+    return true;
+  }
+  NonAliasingIFieldKey key = { reg, field_id, type };
+  return non_aliasing_ifields_.count(key) != 0u;
+}
+
+bool LocalValueNumbering::IsNonAliasingArray(uint16_t reg, uint16_t type) {
+  if (IsNonAliasing(reg)) {
+    return true;
+  }
+  EscapedArrayKey key = { reg, type };
+  return escaped_array_refs_.count(key) != 0u;
+}
+
+
 void LocalValueNumbering::HandleNullCheck(MIR* mir, uint16_t reg) {
-  if (null_checked_.find(reg) != null_checked_.end()) {
-    if (cu_->verbose) {
-      LOG(INFO) << "Removing null check for 0x" << std::hex << mir->offset;
+  auto lb = null_checked_.lower_bound(reg);
+  if (lb != null_checked_.end() && *lb == reg) {
+    if (LIKELY(Good())) {
+      if (cu_->verbose) {
+        LOG(INFO) << "Removing null check for 0x" << std::hex << mir->offset;
+      }
+      mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
     }
-    mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
   } else {
-    null_checked_.insert(reg);
+    null_checked_.insert(lb, reg);
   }
 }
 
 void LocalValueNumbering::HandleRangeCheck(MIR* mir, uint16_t array, uint16_t index) {
-  if (ValueExists(ARRAY_REF, array, index, NO_VALUE)) {
-    if (cu_->verbose) {
-      LOG(INFO) << "Removing range check for 0x" << std::hex << mir->offset;
+  RangeCheckKey key = { array, index };
+  auto lb = range_checked_.lower_bound(key);
+  if (lb != range_checked_.end() && !RangeCheckKeyComparator()(key, *lb)) {
+    if (LIKELY(Good())) {
+      if (cu_->verbose) {
+        LOG(INFO) << "Removing range check for 0x" << std::hex << mir->offset;
+      }
+      mir->optimization_flags |= MIR_IGNORE_RANGE_CHECK;
     }
-    mir->optimization_flags |= MIR_IGNORE_RANGE_CHECK;
+  } else {
+    // Mark range check completed.
+    range_checked_.insert(lb, key);
   }
-  // Use side effect to note range check completed.
-  (void)LookupValue(ARRAY_REF, array, index, NO_VALUE);
 }
 
 void LocalValueNumbering::HandlePutObject(MIR* mir) {
   // If we're storing a non-aliasing reference, stop tracking it as non-aliasing now.
   uint16_t base = GetOperandValue(mir->ssa_rep->uses[0]);
-  non_aliasing_refs_.erase(base);
+  HandleEscapingRef(base);
+}
+
+void LocalValueNumbering::HandleEscapingRef(uint16_t base) {
+  auto it = non_aliasing_refs_.find(base);
+  if (it != non_aliasing_refs_.end()) {
+    uint64_t iget_key = BuildKey(Instruction::IGET, base, 0u, 0u);
+    for (auto iget_it = value_map_.lower_bound(iget_key), iget_end = value_map_.end();
+        iget_it != iget_end && EqualOpAndOperand1(iget_it->first, iget_key); ++iget_it) {
+      uint16_t field_id = ExtractOperand2(iget_it->first);
+      uint16_t type = ExtractModifier(iget_it->first);
+      NonAliasingIFieldKey key = { base, field_id, type };
+      non_aliasing_ifields_.insert(key);
+    }
+    uint64_t aget_key = BuildKey(kNonAliasingArrayStartVersionOp, base, 0u, 0u);
+    auto aget_it = value_map_.lower_bound(aget_key);
+    if (aget_it != value_map_.end() && EqualOpAndOperand1(aget_key, aget_it->first)) {
+      DCHECK_EQ(ExtractOperand2(aget_it->first), kNoValue);
+      uint16_t type = ExtractModifier(aget_it->first);
+      EscapedArrayKey key = { base, type };
+      escaped_array_refs_.insert(key);
+    }
+    non_aliasing_refs_.erase(it);
+  }
+}
+
+uint16_t LocalValueNumbering::HandleAGet(MIR* mir, uint16_t opcode) {
+  // uint16_t type = opcode - Instruction::AGET;
+  uint16_t array = GetOperandValue(mir->ssa_rep->uses[0]);
+  HandleNullCheck(mir, array);
+  uint16_t index = GetOperandValue(mir->ssa_rep->uses[1]);
+  HandleRangeCheck(mir, array, index);
+  uint16_t type = opcode - Instruction::AGET;
+  // Establish value number for loaded register.
+  uint16_t res;
+  if (IsNonAliasingArray(array, type)) {
+    // Get the start version that accounts for aliasing within the array (different index names).
+    uint16_t start_version = LookupValue(kNonAliasingArrayStartVersionOp, array, kNoValue, type);
+    // Find the current version from the non_aliasing_array_version_map_.
+    uint16_t memory_version = start_version;
+    auto it = non_aliasing_array_version_map_.find(start_version);
+    if (it != non_aliasing_array_version_map_.end()) {
+      memory_version = it->second;
+    } else {
+      // Just use the start_version.
+    }
+    res = LookupValue(kNonAliasingArrayOp, array, index, memory_version);
+  } else {
+    // Get the memory version of aliased array accesses of this type.
+    uint16_t memory_version = LookupValue(kAliasingArrayMemoryVersionOp, global_memory_version_,
+                                          aliasing_array_version_[type], kNoValue);
+    res = LookupValue(kAliasingArrayOp, array, index, memory_version);
+  }
+  if (opcode == Instruction::AGET_WIDE) {
+    SetOperandValueWide(mir->ssa_rep->defs[0], res);
+  } else {
+    SetOperandValue(mir->ssa_rep->defs[0], res);
+  }
+  return res;
+}
+
+void LocalValueNumbering::HandleAPut(MIR* mir, uint16_t opcode) {
+  int array_idx = (opcode == Instruction::APUT_WIDE) ? 2 : 1;
+  int index_idx = array_idx + 1;
+  uint16_t array = GetOperandValue(mir->ssa_rep->uses[array_idx]);
+  HandleNullCheck(mir, array);
+  uint16_t index = GetOperandValue(mir->ssa_rep->uses[index_idx]);
+  HandleRangeCheck(mir, array, index);
+
+  uint16_t type = opcode - Instruction::APUT;
+  uint16_t value = (opcode == Instruction::APUT_WIDE)
+                   ? GetOperandValueWide(mir->ssa_rep->uses[0])
+                   : GetOperandValue(mir->ssa_rep->uses[0]);
+  if (IsNonAliasing(array)) {
+    // Get the start version that accounts for aliasing within the array (different index values).
+    uint16_t start_version = LookupValue(kNonAliasingArrayStartVersionOp, array, kNoValue, type);
+    auto it = non_aliasing_array_version_map_.find(start_version);
+    uint16_t memory_version = start_version;
+    if (it != non_aliasing_array_version_map_.end()) {
+      memory_version = it->second;
+    }
+    // We need to take 4 values (array, index, memory_version, value) into account for bumping
+    // the memory version but the key can take only 3. Merge array and index into a location.
+    uint16_t array_access_location = LookupValue(kArrayAccessLocOp, array, index, kNoValue);
+    // Bump the version, adding to the chain.
+    memory_version = LookupValue(kAliasingArrayBumpVersionOp, memory_version,
+                                 array_access_location, value);
+    non_aliasing_array_version_map_.Overwrite(start_version, memory_version);
+    StoreValue(kNonAliasingArrayOp, array, index, memory_version, value);
+  } else {
+    // Get the memory version based on global_memory_version_ and aliasing_array_version_[type].
+    uint16_t memory_version = LookupValue(kAliasingArrayMemoryVersionOp, global_memory_version_,
+                                          aliasing_array_version_[type], kNoValue);
+    if (HasValue(kAliasingArrayOp, array, index, memory_version, value)) {
+      // This APUT can be eliminated, it stores the same value that's already in the field.
+      // TODO: Eliminate the APUT.
+      return;
+    }
+    // We need to take 4 values (array, index, memory_version, value) into account for bumping
+    // the memory version but the key can take only 3. Merge array and index into a location.
+    uint16_t array_access_location = LookupValue(kArrayAccessLocOp, array, index, kNoValue);
+    // Bump the version, adding to the chain.
+    uint16_t bumped_version = LookupValue(kAliasingArrayBumpVersionOp, memory_version,
+                                          array_access_location, value);
+    aliasing_array_version_[type] = bumped_version;
+    memory_version = LookupValue(kAliasingArrayMemoryVersionOp, global_memory_version_,
+                                 bumped_version, kNoValue);
+    StoreValue(kAliasingArrayOp, array, index, memory_version, value);
+
+    // Clear escaped array refs for this type.
+    EscapedArrayKey array_key = { type, 0u };
+    auto it = escaped_array_refs_.lower_bound(array_key), end = escaped_array_refs_.end();
+    while (it != end && it->type == type) {
+      it = escaped_array_refs_.erase(it);
+    }
+  }
+}
+
+uint16_t LocalValueNumbering::HandleIGet(MIR* mir, uint16_t opcode) {
+  uint16_t base = GetOperandValue(mir->ssa_rep->uses[0]);
+  HandleNullCheck(mir, base);
+  const MirFieldInfo& field_info = cu_->mir_graph->GetIFieldLoweringInfo(mir);
+  uint16_t res;
+  if (!field_info.IsResolved() || field_info.IsVolatile()) {
+    // Volatile fields always get a new memory version; field id is irrelevant.
+    // Unresolved fields may be volatile, so handle them as such to be safe.
+    // Use result s_reg - will be unique.
+    res = LookupValue(kNoValue, mir->ssa_rep->defs[0], kNoValue, kNoValue);
+  } else {
+    uint16_t type = opcode - Instruction::IGET;
+    uint16_t field_id = GetFieldId(field_info);
+    if (IsNonAliasingIField(base, field_id, type)) {
+      res = LookupValue(kNonAliasingIFieldOp, base, field_id, type);
+    } else {
+      // Get the start version that accounts for aliasing with unresolved fields of the same type
+      // and make it unique for the field by including the field_id.
+      uint16_t start_version = LookupValue(kAliasingIFieldStartVersionOp, global_memory_version_,
+                                           unresolved_ifield_version_[type], field_id);
+      // Find the current version from the aliasing_ifield_version_map_.
+      uint16_t memory_version = start_version;
+      auto version_it = aliasing_ifield_version_map_.find(start_version);
+      if (version_it != aliasing_ifield_version_map_.end()) {
+        memory_version = version_it->second;
+      } else {
+        // Just use the start_version.
+      }
+      res = LookupValue(kAliasingIFieldOp, base, field_id, memory_version);
+    }
+  }
+  if (opcode == Instruction::IGET_WIDE) {
+    SetOperandValueWide(mir->ssa_rep->defs[0], res);
+  } else {
+    SetOperandValue(mir->ssa_rep->defs[0], res);
+  }
+  return res;
+}
+
+void LocalValueNumbering::HandleIPut(MIR* mir, uint16_t opcode) {
+  uint16_t type = opcode - Instruction::IPUT;
+  int base_reg = (opcode == Instruction::IPUT_WIDE) ? 2 : 1;
+  uint16_t base = GetOperandValue(mir->ssa_rep->uses[base_reg]);
+  HandleNullCheck(mir, base);
+  const MirFieldInfo& field_info = cu_->mir_graph->GetIFieldLoweringInfo(mir);
+  if (!field_info.IsResolved()) {
+    // Unresolved fields always alias with everything of the same type.
+    // Use mir->offset as modifier; without elaborate inlining, it will be unique.
+    unresolved_ifield_version_[type] =
+        LookupValue(kUnresolvedIFieldOp, kNoValue, kNoValue, mir->offset);
+
+    // Treat fields of escaped references of the same type as potentially modified.
+    NonAliasingIFieldKey key = { type, 0u, 0u };  // lowest possible key of this type.
+    auto it = non_aliasing_ifields_.lower_bound(key), end = non_aliasing_ifields_.end();
+    while (it != end && it->type == type) {
+      it = non_aliasing_ifields_.erase(it);
+    }
+  } else if (field_info.IsVolatile()) {
+    // Nothing to do, resolved volatile fields always get a new memory version anyway and
+    // can't alias with resolved non-volatile fields.
+  } else {
+    uint16_t field_id = GetFieldId(field_info);
+    uint16_t value = (opcode == Instruction::IPUT_WIDE)
+                     ? GetOperandValueWide(mir->ssa_rep->uses[0])
+                     : GetOperandValue(mir->ssa_rep->uses[0]);
+    if (IsNonAliasing(base)) {
+      StoreValue(kNonAliasingIFieldOp, base, field_id, type, value);
+    } else {
+      // Get the start version that accounts for aliasing with unresolved fields of the same type
+      // and make it unique for the field by including the field_id.
+      uint16_t start_version = LookupValue(kAliasingIFieldStartVersionOp, global_memory_version_,
+                                           unresolved_ifield_version_[type], field_id);
+      // Find the old version from the aliasing_ifield_version_map_.
+      uint16_t old_version = start_version;
+      auto version_it = aliasing_ifield_version_map_.find(start_version);
+      if (version_it != aliasing_ifield_version_map_.end()) {
+        old_version = version_it->second;
+      }
+      // Check if the field currently contains the value, making this a NOP.
+      if (HasValue(kAliasingIFieldOp, base, field_id, old_version, value)) {
+        // This IPUT can be eliminated, it stores the same value that's already in the field.
+        // TODO: Eliminate the IPUT.
+        return;
+      }
+      // Bump the version, adding to the chain started by start_version.
+      uint16_t memory_version = LookupValue(kAliasingIFieldBumpVersionOp, old_version, base, value);
+      // Update the aliasing_ifield_version_map_ so that HandleIGet() can get the memory_version
+      // without knowing the values used to build the chain.
+      aliasing_ifield_version_map_.Overwrite(start_version, memory_version);
+      StoreValue(kAliasingIFieldOp, base, field_id, memory_version, value);
+
+      // Clear non-aliasing fields for this field_id.
+      NonAliasingIFieldKey field_key = { type, field_id, 0u };
+      auto it = non_aliasing_ifields_.lower_bound(field_key), end = non_aliasing_ifields_.end();
+      while (it != end && it->field_id == field_id) {
+        DCHECK_EQ(type, it->type);
+        it = non_aliasing_ifields_.erase(it);
+      }
+    }
+  }
+}
+
+uint16_t LocalValueNumbering::HandleSGet(MIR* mir, uint16_t opcode) {
+  const MirFieldInfo& field_info = cu_->mir_graph->GetSFieldLoweringInfo(mir);
+  uint16_t res;
+  if (!field_info.IsResolved() || field_info.IsVolatile()) {
+    // Volatile fields always get a new memory version; field id is irrelevant.
+    // Unresolved fields may be volatile, so handle them as such to be safe.
+    // Use result s_reg - will be unique.
+    res = LookupValue(kNoValue, mir->ssa_rep->defs[0], kNoValue, kNoValue);
+  } else {
+    uint16_t field_id = GetFieldId(field_info);
+    // Resolved non-volatile static fields can alias with non-resolved fields of the same type,
+    // so we need to use unresolved_sfield_version_[type] in addition to global_memory_version_
+    // to determine the version of the field.
+    uint16_t type = opcode - Instruction::SGET;
+    res = LookupValue(kResolvedSFieldOp, field_id,
+                      unresolved_sfield_version_[type], global_memory_version_);
+  }
+  if (opcode == Instruction::SGET_WIDE) {
+    SetOperandValueWide(mir->ssa_rep->defs[0], res);
+  } else {
+    SetOperandValue(mir->ssa_rep->defs[0], res);
+  }
+  return res;
+}
+
+void LocalValueNumbering::HandleSPut(MIR* mir, uint16_t opcode) {
+  uint16_t type = opcode - Instruction::SPUT;
+  const MirFieldInfo& field_info = cu_->mir_graph->GetSFieldLoweringInfo(mir);
+  if (!field_info.IsResolved()) {
+    // Unresolved fields always alias with everything of the same type.
+    // Use mir->offset as modifier; without elaborate inlining, it will be unique.
+    unresolved_sfield_version_[type] =
+        LookupValue(kUnresolvedSFieldOp, kNoValue, kNoValue, mir->offset);
+  } else if (field_info.IsVolatile()) {
+    // Nothing to do, resolved volatile fields always get a new memory version anyway and
+    // can't alias with resolved non-volatile fields.
+  } else {
+    uint16_t field_id = GetFieldId(field_info);
+    uint16_t value = (opcode == Instruction::SPUT_WIDE)
+                     ? GetOperandValueWide(mir->ssa_rep->uses[0])
+                     : GetOperandValue(mir->ssa_rep->uses[0]);
+    // Resolved non-volatile static fields can alias with non-resolved fields of the same type,
+    // so we need to use unresolved_sfield_version_[type] in addition to global_memory_version_
+    // to determine the version of the field.
+    uint16_t type = opcode - Instruction::SGET;
+    StoreValue(kResolvedSFieldOp, field_id,
+               unresolved_sfield_version_[type], global_memory_version_, value);
+  }
 }
 
 uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
-  uint16_t res = NO_VALUE;
+  uint16_t res = kNoValue;
   uint16_t opcode = mir->dalvikInsn.opcode;
   switch (opcode) {
     case Instruction::NOP:
@@ -176,9 +449,14 @@
       // Nothing defined but the result will be unique and non-null.
       if (mir->next != nullptr && mir->next->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
         MarkNonAliasingNonNull(mir->next);
+        // TUNING: We could track value names stored in the array.
         // The MOVE_RESULT_OBJECT will be processed next and we'll return the value name then.
       }
-      MakeArgsAliasing(mir);
+      // All args escaped (if references).
+      for (size_t i = 0u, count = mir->ssa_rep->num_uses; i != count; ++i) {
+        uint16_t reg = GetOperandValue(mir->ssa_rep->uses[i]);
+        HandleEscapingRef(reg);
+      }
       break;
 
     case Instruction::INVOKE_DIRECT:
@@ -197,8 +475,17 @@
     case Instruction::INVOKE_STATIC:
     case Instruction::INVOKE_STATIC_RANGE:
       if ((mir->optimization_flags & MIR_INLINED) == 0) {
-        AdvanceGlobalMemory();
-        MakeArgsAliasing(mir);
+        // Use mir->offset as modifier; without elaborate inlining, it will be unique.
+        global_memory_version_ = LookupValue(kInvokeMemoryVersionBumpOp, 0u, 0u, mir->offset);
+        // Make ref args aliasing.
+        for (size_t i = 0u, count = mir->ssa_rep->num_uses; i != count; ++i) {
+          uint16_t reg = GetOperandValue(mir->ssa_rep->uses[i]);
+          non_aliasing_refs_.erase(reg);
+        }
+        // All fields of escaped references need to be treated as potentially modified.
+        non_aliasing_ifields_.clear();
+        // Array elements may also have been modified via escaped array refs.
+        escaped_array_refs_.clear();
       }
       break;
 
@@ -211,13 +498,24 @@
       break;
     case Instruction::MOVE_EXCEPTION:
     case Instruction::NEW_INSTANCE:
-    case Instruction::CONST_STRING:
-    case Instruction::CONST_STRING_JUMBO:
     case Instruction::CONST_CLASS:
     case Instruction::NEW_ARRAY:
       // 1 result, treat as unique each time, use result s_reg - will be unique.
       res = MarkNonAliasingNonNull(mir);
       break;
+    case Instruction::CONST_STRING:
+    case Instruction::CONST_STRING_JUMBO:
+      // These strings are internalized, so assign value based on the string pool index.
+      res = LookupValue(Instruction::CONST_STRING, Low16Bits(mir->dalvikInsn.vB),
+                        High16Bits(mir->dalvikInsn.vB), 0);
+      SetOperandValue(mir->ssa_rep->defs[0], res);
+      null_checked_.insert(res);  // May already be there.
+      // NOTE: Hacking the contents of an internalized string via reflection is possible
+      // but the behavior is undefined. Therefore, we consider the string constant and
+      // the reference non-aliasing.
+      // TUNING: We could keep this property even if the reference "escapes".
+      non_aliasing_refs_.insert(res);  // May already be there.
+      break;
     case Instruction::MOVE_RESULT_WIDE:
       // 1 wide result, treat as unique each time, use result s_reg - will be unique.
       res = GetOperandValueWide(mir->ssa_rep->defs[0]);
@@ -255,7 +553,7 @@
     case Instruction::CONST_4:
     case Instruction::CONST_16:
       res = LookupValue(Instruction::CONST, Low16Bits(mir->dalvikInsn.vB),
-                        High16Bits(mir->dalvikInsn.vB >> 16), 0);
+                        High16Bits(mir->dalvikInsn.vB), 0);
       SetOperandValue(mir->ssa_rep->defs[0], res);
       break;
 
@@ -310,7 +608,7 @@
     case Instruction::FLOAT_TO_INT: {
         // res = op + 1 operand
         uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
-        res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
+        res = LookupValue(opcode, operand1, kNoValue, kNoValue);
         SetOperandValue(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -320,8 +618,8 @@
     case Instruction::DOUBLE_TO_FLOAT:
     case Instruction::DOUBLE_TO_INT: {
         // res = op + 1 wide operand
-        uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
-        res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
+        uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
+        res = LookupValue(opcode, operand1, kNoValue, kNoValue);
         SetOperandValue(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -334,7 +632,7 @@
     case Instruction::NEG_DOUBLE: {
         // wide res = op + 1 wide operand
         uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
-        res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
+        res = LookupValue(opcode, operand1, kNoValue, kNoValue);
         SetOperandValueWide(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -344,8 +642,8 @@
     case Instruction::INT_TO_DOUBLE:
     case Instruction::INT_TO_LONG: {
         // wide res = op + 1 operand
-        uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
-        res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
+        uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
+        res = LookupValue(opcode, operand1, kNoValue, kNoValue);
         SetOperandValueWide(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -356,7 +654,7 @@
         // res = op + 2 wide operands
         uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
         uint16_t operand2 = GetOperandValueWide(mir->ssa_rep->uses[2]);
-        res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+        res = LookupValue(opcode, operand1, operand2, kNoValue);
         SetOperandValue(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -388,7 +686,7 @@
         // res = op + 2 operands
         uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
         uint16_t operand2 = GetOperandValue(mir->ssa_rep->uses[1]);
-        res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+        res = LookupValue(opcode, operand1, operand2, kNoValue);
         SetOperandValue(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -422,7 +720,7 @@
         // wide res = op + 2 wide operands
         uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
         uint16_t operand2 = GetOperandValueWide(mir->ssa_rep->uses[2]);
-        res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+        res = LookupValue(opcode, operand1, operand2, kNoValue);
         SetOperandValueWide(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -435,8 +733,8 @@
     case Instruction::USHR_LONG_2ADDR: {
         // wide res = op + 1 wide operand + 1 operand
         uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
-        uint16_t operand2 = GetOperandValueWide(mir->ssa_rep->uses[2]);
-        res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+        uint16_t operand2 = GetOperandValue(mir->ssa_rep->uses[2]);
+        res = LookupValue(opcode, operand1, operand2, kNoValue);
         SetOperandValueWide(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -454,7 +752,7 @@
         // res = op + 2 operands
         uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
         uint16_t operand2 = GetOperandValue(mir->ssa_rep->uses[1]);
-        res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+        res = LookupValue(opcode, operand1, operand2, kNoValue);
         SetOperandValue(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -481,7 +779,7 @@
         // Same as res = op + 2 operands, except use vC as operand 2
         uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
         uint16_t operand2 = LookupValue(Instruction::CONST, mir->dalvikInsn.vC, 0, 0);
-        res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+        res = LookupValue(opcode, operand1, operand2, kNoValue);
         SetOperandValue(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -492,21 +790,8 @@
     case Instruction::AGET_BOOLEAN:
     case Instruction::AGET_BYTE:
     case Instruction::AGET_CHAR:
-    case Instruction::AGET_SHORT: {
-        uint16_t type = opcode - Instruction::AGET;
-        uint16_t array = GetOperandValue(mir->ssa_rep->uses[0]);
-        HandleNullCheck(mir, array);
-        uint16_t index = GetOperandValue(mir->ssa_rep->uses[1]);
-        HandleRangeCheck(mir, array, index);
-        // Establish value number for loaded register. Note use of memory version.
-        uint16_t memory_version = GetMemoryVersion(array, NO_VALUE, type);
-        uint16_t res = LookupValue(ARRAY_REF, array, index, memory_version);
-        if (opcode == Instruction::AGET_WIDE) {
-          SetOperandValueWide(mir->ssa_rep->defs[0], res);
-        } else {
-          SetOperandValue(mir->ssa_rep->defs[0], res);
-        }
-      }
+    case Instruction::AGET_SHORT:
+      res = HandleAGet(mir, opcode);
       break;
 
     case Instruction::APUT_OBJECT:
@@ -517,17 +802,8 @@
     case Instruction::APUT_BYTE:
     case Instruction::APUT_BOOLEAN:
     case Instruction::APUT_SHORT:
-    case Instruction::APUT_CHAR: {
-        uint16_t type = opcode - Instruction::APUT;
-        int array_idx = (opcode == Instruction::APUT_WIDE) ? 2 : 1;
-        int index_idx = array_idx + 1;
-        uint16_t array = GetOperandValue(mir->ssa_rep->uses[array_idx]);
-        HandleNullCheck(mir, array);
-        uint16_t index = GetOperandValue(mir->ssa_rep->uses[index_idx]);
-        HandleRangeCheck(mir, array, index);
-        // Rev the memory version
-        AdvanceMemoryVersion(array, NO_VALUE, type);
-      }
+    case Instruction::APUT_CHAR:
+      HandleAPut(mir, opcode);
       break;
 
     case Instruction::IGET_OBJECT:
@@ -536,33 +812,8 @@
     case Instruction::IGET_BOOLEAN:
     case Instruction::IGET_BYTE:
     case Instruction::IGET_CHAR:
-    case Instruction::IGET_SHORT: {
-        uint16_t type = opcode - Instruction::IGET;
-        uint16_t base = GetOperandValue(mir->ssa_rep->uses[0]);
-        HandleNullCheck(mir, base);
-        const MirFieldInfo& field_info = cu_->mir_graph->GetIFieldLoweringInfo(mir);
-        uint16_t memory_version;
-        uint16_t field_id;
-        if (!field_info.IsResolved() || field_info.IsVolatile()) {
-          // Volatile fields always get a new memory version; field id is irrelevant.
-          // Unresolved fields may be volatile, so handle them as such to be safe.
-          field_id = 0u;
-          memory_version = next_memory_version_;
-          ++next_memory_version_;
-        } else {
-          DCHECK(field_info.IsResolved());
-          field_id = GetFieldId(field_info.DeclaringDexFile(), field_info.DeclaringFieldIndex());
-          memory_version = std::max(unresolved_ifield_version_[type],
-                                    GetMemoryVersion(base, field_id, type));
-        }
-        if (opcode == Instruction::IGET_WIDE) {
-          res = LookupValue(Instruction::IGET_WIDE, base, field_id, memory_version);
-          SetOperandValueWide(mir->ssa_rep->defs[0], res);
-        } else {
-          res = LookupValue(Instruction::IGET, base, field_id, memory_version);
-          SetOperandValue(mir->ssa_rep->defs[0], res);
-        }
-      }
+    case Instruction::IGET_SHORT:
+      res = HandleIGet(mir, opcode);
       break;
 
     case Instruction::IPUT_OBJECT:
@@ -573,24 +824,8 @@
     case Instruction::IPUT_BOOLEAN:
     case Instruction::IPUT_BYTE:
     case Instruction::IPUT_CHAR:
-    case Instruction::IPUT_SHORT: {
-        uint16_t type = opcode - Instruction::IPUT;
-        int base_reg = (opcode == Instruction::IPUT_WIDE) ? 2 : 1;
-        uint16_t base = GetOperandValue(mir->ssa_rep->uses[base_reg]);
-        HandleNullCheck(mir, base);
-        const MirFieldInfo& field_info = cu_->mir_graph->GetIFieldLoweringInfo(mir);
-        if (!field_info.IsResolved()) {
-          // Unresolved fields always alias with everything of the same type.
-          unresolved_ifield_version_[type] = next_memory_version_;
-          ++next_memory_version_;
-        } else if (field_info.IsVolatile()) {
-          // Nothing to do, resolved volatile fields always get a new memory version anyway and
-          // can't alias with resolved non-volatile fields.
-        } else {
-          AdvanceMemoryVersion(base, GetFieldId(field_info.DeclaringDexFile(),
-                                                field_info.DeclaringFieldIndex()), type);
-        }
-      }
+    case Instruction::IPUT_SHORT:
+      HandleIPut(mir, opcode);
       break;
 
     case Instruction::SGET_OBJECT:
@@ -599,31 +834,8 @@
     case Instruction::SGET_BOOLEAN:
     case Instruction::SGET_BYTE:
     case Instruction::SGET_CHAR:
-    case Instruction::SGET_SHORT: {
-        uint16_t type = opcode - Instruction::SGET;
-        const MirFieldInfo& field_info = cu_->mir_graph->GetSFieldLoweringInfo(mir);
-        uint16_t memory_version;
-        uint16_t field_id;
-        if (!field_info.IsResolved() || field_info.IsVolatile()) {
-          // Volatile fields always get a new memory version; field id is irrelevant.
-          // Unresolved fields may be volatile, so handle them as such to be safe.
-          field_id = 0u;
-          memory_version = next_memory_version_;
-          ++next_memory_version_;
-        } else {
-          DCHECK(field_info.IsResolved());
-          field_id = GetFieldId(field_info.DeclaringDexFile(), field_info.DeclaringFieldIndex());
-          memory_version = std::max(unresolved_sfield_version_[type],
-                                    GetMemoryVersion(NO_VALUE, field_id, type));
-        }
-        if (opcode == Instruction::SGET_WIDE) {
-          res = LookupValue(Instruction::SGET_WIDE, NO_VALUE, field_id, memory_version);
-          SetOperandValueWide(mir->ssa_rep->defs[0], res);
-        } else {
-          res = LookupValue(Instruction::SGET, NO_VALUE, field_id, memory_version);
-          SetOperandValue(mir->ssa_rep->defs[0], res);
-        }
-      }
+    case Instruction::SGET_SHORT:
+      res = HandleSGet(mir, opcode);
       break;
 
     case Instruction::SPUT_OBJECT:
@@ -634,21 +846,8 @@
     case Instruction::SPUT_BOOLEAN:
     case Instruction::SPUT_BYTE:
     case Instruction::SPUT_CHAR:
-    case Instruction::SPUT_SHORT: {
-        uint16_t type = opcode - Instruction::SPUT;
-        const MirFieldInfo& field_info = cu_->mir_graph->GetSFieldLoweringInfo(mir);
-        if (!field_info.IsResolved()) {
-          // Unresolved fields always alias with everything of the same type.
-          unresolved_sfield_version_[type] = next_memory_version_;
-          ++next_memory_version_;
-        } else if (field_info.IsVolatile()) {
-          // Nothing to do, resolved volatile fields always get a new memory version anyway and
-          // can't alias with resolved non-volatile fields.
-        } else {
-          AdvanceMemoryVersion(NO_VALUE, GetFieldId(field_info.DeclaringDexFile(),
-                                                    field_info.DeclaringFieldIndex()), type);
-        }
-      }
+    case Instruction::SPUT_SHORT:
+      HandleSPut(mir, opcode);
       break;
   }
   return res;
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index 0c2b6a7..2a815be 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -23,15 +23,33 @@
 #include "utils/scoped_arena_allocator.h"
 #include "utils/scoped_arena_containers.h"
 
-#define NO_VALUE 0xffff
-#define ARRAY_REF 0xfffe
-
 namespace art {
 
 class DexFile;
+class MirFieldInfo;
 
 class LocalValueNumbering {
+ public:
+  LocalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator);
+
+  uint16_t GetValueNumber(MIR* mir);
+
+  // LocalValueNumbering should be allocated on the ArenaStack (or the native stack).
+  static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
+    return allocator->Alloc(sizeof(LocalValueNumbering), kArenaAllocMIR);
+  }
+
+  // Allow delete-expression to destroy a LocalValueNumbering object without deallocation.
+  static void operator delete(void* ptr) { UNUSED(ptr); }
+
+  // Checks that the value names didn't overflow.
+  bool Good() const {
+    return last_value_ < kNoValue;
+  }
+
  private:
+  static constexpr uint16_t kNoValue = 0xffffu;
+
   // Field types correspond to the ordering of GET/PUT instructions; this order is the same
   // for IGET, IPUT, SGET, SPUT, AGET and APUT:
   // op         0
@@ -43,7 +61,7 @@
   // op_SHORT   6
   static constexpr size_t kFieldTypeCount = 7;
 
-  // FieldReference represents either a unique resolved field or all unresolved fields together.
+  // FieldReference represents a unique resolved field.
   struct FieldReference {
     const DexFile* dex_file;
     uint16_t field_idx;
@@ -58,48 +76,107 @@
     }
   };
 
-  struct MemoryVersionKey {
+  // Maps field key to field id for resolved fields.
+  typedef ScopedArenaSafeMap<FieldReference, uint32_t, FieldReferenceComparator> FieldIndexMap;
+
+  struct RangeCheckKey {
+    uint16_t array;
+    uint16_t index;
+  };
+
+  struct RangeCheckKeyComparator {
+    bool operator()(const RangeCheckKey& lhs, const RangeCheckKey& rhs) const {
+      if (lhs.array != rhs.array) {
+        return lhs.array < rhs.array;
+      }
+      return lhs.index < rhs.index;
+    }
+  };
+
+  typedef ScopedArenaSet<RangeCheckKey, RangeCheckKeyComparator> RangeCheckSet;
+
+  typedef ScopedArenaSafeMap<uint16_t, uint16_t> AliasingIFieldVersionMap;
+  typedef ScopedArenaSafeMap<uint16_t, uint16_t> NonAliasingArrayVersionMap;
+
+  struct NonAliasingIFieldKey {
     uint16_t base;
     uint16_t field_id;
     uint16_t type;
   };
 
-  struct MemoryVersionKeyComparator {
-    bool operator()(const MemoryVersionKey& lhs, const MemoryVersionKey& rhs) const {
-      if (lhs.base != rhs.base) {
-        return lhs.base < rhs.base;
+  struct NonAliasingIFieldKeyComparator {
+    bool operator()(const NonAliasingIFieldKey& lhs, const NonAliasingIFieldKey& rhs) const {
+      // Compare the type first. This allows iterating across all the entries for a certain type
+      // as needed when we need to purge them for an unresolved field IPUT.
+      if (lhs.type != rhs.type) {
+        return lhs.type < rhs.type;
       }
+      // Compare the field second. This allows iterating across all the entries for a certain
+      // field as needed when we need to purge them for an aliasing field IPUT.
       if (lhs.field_id != rhs.field_id) {
         return lhs.field_id < rhs.field_id;
       }
-      return lhs.type < rhs.type;
+      // Compare the base last.
+      return lhs.base < rhs.base;
     }
   };
 
+  // Set of instance fields still holding non-aliased values after the base has been stored.
+  typedef ScopedArenaSet<NonAliasingIFieldKey, NonAliasingIFieldKeyComparator> NonAliasingFieldSet;
+
+  struct EscapedArrayKey {
+    uint16_t base;
+    uint16_t type;
+  };
+
+  struct EscapedArrayKeyComparator {
+    bool operator()(const EscapedArrayKey& lhs, const EscapedArrayKey& rhs) const {
+      // Compare the type first. This allows iterating across all the entries for a certain type
+      // as needed when we need to purge them for an unresolved field APUT.
+      if (lhs.type != rhs.type) {
+        return lhs.type < rhs.type;
+      }
+      // Compare the base last.
+      return lhs.base < rhs.base;
+    }
+  };
+
+  // Set of previously non-aliasing array refs that escaped.
+  typedef ScopedArenaSet<EscapedArrayKey, EscapedArrayKeyComparator> EscapedArraySet;
+
   // Key is s_reg, value is value name.
   typedef ScopedArenaSafeMap<uint16_t, uint16_t> SregValueMap;
   // Key is concatenation of opcode, operand1, operand2 and modifier, value is value name.
   typedef ScopedArenaSafeMap<uint64_t, uint16_t> ValueMap;
   // Key represents a memory address, value is generation.
-  typedef ScopedArenaSafeMap<MemoryVersionKey, uint16_t, MemoryVersionKeyComparator
-      > MemoryVersionMap;
-  // Maps field key to field id for resolved fields.
-  typedef ScopedArenaSafeMap<FieldReference, uint32_t, FieldReferenceComparator> FieldIndexMap;
   // A set of value names.
   typedef ScopedArenaSet<uint16_t> ValueNameSet;
 
- public:
-  static LocalValueNumbering* Create(CompilationUnit* cu) {
-    std::unique_ptr<ScopedArenaAllocator> allocator(ScopedArenaAllocator::Create(&cu->arena_stack));
-    void* addr = allocator->Alloc(sizeof(LocalValueNumbering), kArenaAllocMisc);
-    return new(addr) LocalValueNumbering(cu, allocator.release());
-  }
-
   static uint64_t BuildKey(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) {
     return (static_cast<uint64_t>(op) << 48 | static_cast<uint64_t>(operand1) << 32 |
             static_cast<uint64_t>(operand2) << 16 | static_cast<uint64_t>(modifier));
   };
 
+  static uint16_t ExtractOp(uint64_t key) {
+    return static_cast<uint16_t>(key >> 48);
+  }
+
+  static uint16_t ExtractOperand1(uint64_t key) {
+    return static_cast<uint16_t>(key >> 32);
+  }
+
+  static uint16_t ExtractOperand2(uint64_t key) {
+    return static_cast<uint16_t>(key >> 16);
+  }
+
+  static uint16_t ExtractModifier(uint64_t key) {
+    return static_cast<uint16_t>(key);
+  }
+
+  static bool EqualOpAndOperand1(uint64_t key1, uint64_t key2) {
+    return static_cast<uint32_t>(key1 >> 32) == static_cast<uint32_t>(key2 >> 32);
+  }
+
   uint16_t LookupValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) {
     uint16_t res;
     uint64_t key = BuildKey(op, operand1, operand2, modifier);
@@ -107,12 +184,26 @@
     if (it != value_map_.end()) {
       res = it->second;
     } else {
-      res = value_map_.size() + 1;
+      ++last_value_;
+      res = last_value_;
       value_map_.Put(key, res);
     }
     return res;
   };
 
+  void StoreValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier,
+                  uint16_t value) {
+    uint64_t key = BuildKey(op, operand1, operand2, modifier);
+    value_map_.Overwrite(key, value);
+  }
+
+  bool HasValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier,
+                uint16_t value) const {
+    uint64_t key = BuildKey(op, operand1, operand2, modifier);
+    ValueMap::const_iterator it = value_map_.find(key);
+    return (it != value_map_.end() && it->second == value);
+  };
+
   bool ValueExists(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) const {
     uint64_t key = BuildKey(op, operand1, operand2, modifier);
     ValueMap::const_iterator it = value_map_.find(key);
@@ -129,13 +220,13 @@
   };
 
   uint16_t GetOperandValue(int s_reg) {
-    uint16_t res = NO_VALUE;
+    uint16_t res = kNoValue;
     SregValueMap::iterator it = sreg_value_map_.find(s_reg);
     if (it != sreg_value_map_.end()) {
       res = it->second;
     } else {
       // First use
-      res = LookupValue(NO_VALUE, s_reg, NO_VALUE, NO_VALUE);
+      res = LookupValue(kNoValue, s_reg, kNoValue, kNoValue);
       sreg_value_map_.Put(s_reg, res);
     }
     return res;
@@ -151,63 +242,61 @@
   };
 
   uint16_t GetOperandValueWide(int s_reg) {
-    uint16_t res = NO_VALUE;
+    uint16_t res = kNoValue;
     SregValueMap::iterator it = sreg_wide_value_map_.find(s_reg);
     if (it != sreg_wide_value_map_.end()) {
       res = it->second;
     } else {
       // First use
-      res = LookupValue(NO_VALUE, s_reg, NO_VALUE, NO_VALUE);
+      res = LookupValue(kNoValue, s_reg, kNoValue, kNoValue);
       sreg_wide_value_map_.Put(s_reg, res);
     }
     return res;
   };
 
-  uint16_t GetValueNumber(MIR* mir);
-
-  // Allow delete-expression to destroy a LocalValueNumbering object without deallocation.
-  static void operator delete(void* ptr) { UNUSED(ptr); }
-
- private:
-  LocalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator)
-      : cu_(cu),
-        allocator_(allocator),
-        sreg_value_map_(std::less<uint16_t>(), allocator->Adapter()),
-        sreg_wide_value_map_(std::less<uint16_t>(), allocator->Adapter()),
-        value_map_(std::less<uint64_t>(), allocator->Adapter()),
-        next_memory_version_(1u),
-        global_memory_version_(0u),
-        memory_version_map_(MemoryVersionKeyComparator(), allocator->Adapter()),
-        field_index_map_(FieldReferenceComparator(), allocator->Adapter()),
-        non_aliasing_refs_(std::less<uint16_t>(), allocator->Adapter()),
-        null_checked_(std::less<uint16_t>(), allocator->Adapter()) {
-    std::fill_n(unresolved_sfield_version_, kFieldTypeCount, 0u);
-    std::fill_n(unresolved_ifield_version_, kFieldTypeCount, 0u);
-  }
-
-  uint16_t GetFieldId(const DexFile* dex_file, uint16_t field_idx);
-  void AdvanceGlobalMemory();
-  uint16_t GetMemoryVersion(uint16_t base, uint16_t field, uint16_t type);
-  uint16_t AdvanceMemoryVersion(uint16_t base, uint16_t field, uint16_t type);
+  uint16_t GetFieldId(const MirFieldInfo& field_info);
   uint16_t MarkNonAliasingNonNull(MIR* mir);
-  void MakeArgsAliasing(MIR* mir);
+  bool IsNonAliasing(uint16_t reg);
+  bool IsNonAliasingIField(uint16_t reg, uint16_t field_id, uint16_t type);
+  bool IsNonAliasingArray(uint16_t reg, uint16_t type);
   void HandleNullCheck(MIR* mir, uint16_t reg);
   void HandleRangeCheck(MIR* mir, uint16_t array, uint16_t index);
   void HandlePutObject(MIR* mir);
+  void HandleEscapingRef(uint16_t base);
+  uint16_t HandleAGet(MIR* mir, uint16_t opcode);
+  void HandleAPut(MIR* mir, uint16_t opcode);
+  uint16_t HandleIGet(MIR* mir, uint16_t opcode);
+  void HandleIPut(MIR* mir, uint16_t opcode);
+  uint16_t HandleSGet(MIR* mir, uint16_t opcode);
+  void HandleSPut(MIR* mir, uint16_t opcode);
 
   CompilationUnit* const cu_;
-  std::unique_ptr<ScopedArenaAllocator> allocator_;
+
+  // We have 32-bit last_value_ so that we can detect when we run out of value names, see Good().
+  // We usually don't check Good() until the end of LVN unless we're about to modify code.
+  uint32_t last_value_;
+
   SregValueMap sreg_value_map_;
   SregValueMap sreg_wide_value_map_;
   ValueMap value_map_;
-  uint16_t next_memory_version_;
+
+  // Data for dealing with memory clobbering and store/load aliasing.
   uint16_t global_memory_version_;
   uint16_t unresolved_sfield_version_[kFieldTypeCount];
   uint16_t unresolved_ifield_version_[kFieldTypeCount];
-  MemoryVersionMap memory_version_map_;
+  uint16_t aliasing_array_version_[kFieldTypeCount];
+  AliasingIFieldVersionMap aliasing_ifield_version_map_;
+  NonAliasingArrayVersionMap non_aliasing_array_version_map_;
   FieldIndexMap field_index_map_;
   // Value names of references to objects that cannot be reached through a different value name.
   ValueNameSet non_aliasing_refs_;
+  // Instance fields still holding non-aliased values after the base has escaped.
+  NonAliasingFieldSet non_aliasing_ifields_;
+  // Previously non-aliasing array refs that escaped but can still be used for non-aliasing AGET.
+  EscapedArraySet escaped_array_refs_;
+
+  // Range check and null check elimination.
+  RangeCheckSet range_checked_;
   ValueNameSet null_checked_;
 
   DISALLOW_COPY_AND_ASSIGN(LocalValueNumbering);
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index 2b1c420..efc4fc8 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -40,7 +40,7 @@
 
   struct MIRDef {
     static constexpr size_t kMaxSsaDefs = 2;
-    static constexpr size_t kMaxSsaUses = 3;
+    static constexpr size_t kMaxSsaUses = 4;
 
     Instruction::Code opcode;
     int64_t value;
@@ -55,6 +55,8 @@
     { opcode, value, 0u, 0, { }, 1, { reg } }
 #define DEF_CONST_WIDE(opcode, reg, value) \
     { opcode, value, 0u, 0, { }, 2, { reg, reg + 1 } }
+#define DEF_CONST_STRING(opcode, reg, index) \
+    { opcode, index, 0u, 0, { }, 1, { reg } }
 #define DEF_IGET(opcode, reg, obj, field_info) \
     { opcode, 0u, field_info, 1, { obj }, 1, { reg } }
 #define DEF_IGET_WIDE(opcode, reg, obj, field_info) \
@@ -71,6 +73,14 @@
     { opcode, 0u, field_info, 1, { reg }, 0, { } }
 #define DEF_SPUT_WIDE(opcode, reg, field_info) \
     { opcode, 0u, field_info, 2, { reg, reg + 1 }, 0, { } }
+#define DEF_AGET(opcode, reg, obj, idx) \
+    { opcode, 0u, 0u, 2, { obj, idx }, 1, { reg } }
+#define DEF_AGET_WIDE(opcode, reg, obj, idx) \
+    { opcode, 0u, 0u, 2, { obj, idx }, 2, { reg, reg + 1 } }
+#define DEF_APUT(opcode, reg, obj, idx) \
+    { opcode, 0u, 0u, 3, { reg, obj, idx }, 0, { } }
+#define DEF_APUT_WIDE(opcode, reg, obj, idx) \
+    { opcode, 0u, 0u, 4, { reg, reg + 1, obj, idx }, 0, { } }
 #define DEF_INVOKE1(opcode, reg) \
     { opcode, 0u, 0u, 1, { reg }, 0, { } }
 #define DEF_UNIQUE_REF(opcode, reg) \
@@ -144,7 +154,6 @@
       mir->ssa_rep->fp_def = nullptr;  // Not used by LVN.
       mir->dalvikInsn.opcode = def->opcode;
       mir->offset = i;  // LVN uses offset only for debug output
-      mir->width = 1u;  // Not used by LVN.
       mir->optimization_flags = 0u;
 
       if (i != 0u) {
@@ -164,6 +173,7 @@
     for (size_t i = 0; i != mir_count_; ++i) {
       value_names_[i] =  lvn_->GetValueNumber(&mirs_[i]);
     }
+    EXPECT_TRUE(lvn_->Good());
   }
 
   LocalValueNumberingTest()
@@ -171,8 +181,11 @@
         cu_(&pool_),
         mir_count_(0u),
         mirs_(nullptr),
-        lvn_(LocalValueNumbering::Create(&cu_)) {
+        allocator_(),
+        lvn_() {
     cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
+    allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
+    lvn_.reset(new (allocator_.get()) LocalValueNumbering(&cu_, allocator_.get()));
   }
 
   ArenaPool pool_;
@@ -181,12 +194,13 @@
   MIR* mirs_;
   std::vector<SSARepresentation> ssa_reps_;
   std::vector<uint16_t> value_names_;
+  std::unique_ptr<ScopedArenaAllocator> allocator_;
   std::unique_ptr<LocalValueNumbering> lvn_;
 };
 
-TEST_F(LocalValueNumberingTest, TestIGetIGetInvokeIGet) {
+TEST_F(LocalValueNumberingTest, IGetIGetInvokeIGet) {
   static const IFieldDef ifields[] = {
-      { 1u, 1u, 1u, false }
+      { 1u, 1u, 1u, false },
   };
   static const MIRDef mirs[] = {
       DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
@@ -207,15 +221,15 @@
   EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
 }
 
-TEST_F(LocalValueNumberingTest, TestIGetIPutIGetIGetIGet) {
+TEST_F(LocalValueNumberingTest, IGetIPutIGetIGetIGet) {
   static const IFieldDef ifields[] = {
       { 1u, 1u, 1u, false },
       { 2u, 1u, 2u, false },
   };
   static const MIRDef mirs[] = {
-      DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
-      DEF_IPUT(Instruction::IPUT, 1u, 11u, 0u),  // May alias.
-      DEF_IGET(Instruction::IGET, 2u, 10u, 0u),
+      DEF_IGET(Instruction::IGET_OBJECT, 0u, 10u, 0u),
+      DEF_IPUT(Instruction::IPUT_OBJECT, 1u, 11u, 0u),  // May alias.
+      DEF_IGET(Instruction::IGET_OBJECT, 2u, 10u, 0u),
       DEF_IGET(Instruction::IGET, 3u,  0u, 1u),
       DEF_IGET(Instruction::IGET, 4u,  2u, 1u),
   };
@@ -233,7 +247,7 @@
   EXPECT_EQ(mirs_[4].optimization_flags, 0u);
 }
 
-TEST_F(LocalValueNumberingTest, TestUniquePreserve1) {
+TEST_F(LocalValueNumberingTest, UniquePreserve1) {
   static const IFieldDef ifields[] = {
       { 1u, 1u, 1u, false },
   };
@@ -254,7 +268,7 @@
   EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
 }
 
-TEST_F(LocalValueNumberingTest, TestUniquePreserve2) {
+TEST_F(LocalValueNumberingTest, UniquePreserve2) {
   static const IFieldDef ifields[] = {
       { 1u, 1u, 1u, false },
   };
@@ -275,7 +289,7 @@
   EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
 }
 
-TEST_F(LocalValueNumberingTest, TestUniquePreserveAndEscape) {
+TEST_F(LocalValueNumberingTest, UniquePreserveAndEscape) {
   static const IFieldDef ifields[] = {
       { 1u, 1u, 1u, false },
   };
@@ -299,7 +313,7 @@
   EXPECT_EQ(mirs_[5].optimization_flags, MIR_IGNORE_NULL_CHECK);
 }
 
-TEST_F(LocalValueNumberingTest, TestVolatile) {
+TEST_F(LocalValueNumberingTest, Volatile) {
   static const IFieldDef ifields[] = {
       { 1u, 1u, 1u, false },
       { 2u, 1u, 2u, true },
@@ -323,4 +337,264 @@
   EXPECT_EQ(mirs_[3].optimization_flags, 0u);
 }
 
+TEST_F(LocalValueNumberingTest, UnresolvedIField) {
+  static const IFieldDef ifields[] = {
+      { 1u, 1u, 1u, false },  // Resolved field #1.
+      { 2u, 1u, 2u, false },  // Resolved field #2.
+      { 3u, 0u, 0u, false },  // Unresolved field.
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 20u),
+      DEF_IGET(Instruction::IGET, 1u, 20u, 0u),             // Resolved field #1, unique object.
+      DEF_IGET(Instruction::IGET, 2u, 21u, 0u),             // Resolved field #1.
+      DEF_IGET_WIDE(Instruction::IGET_WIDE, 3u, 21u, 1u),   // Resolved field #2.
+      DEF_IGET(Instruction::IGET, 4u, 22u, 2u),             // IGET doesn't clobber anything.
+      DEF_IGET(Instruction::IGET, 5u, 20u, 0u),             // Resolved field #1, unique object.
+      DEF_IGET(Instruction::IGET, 6u, 21u, 0u),             // Resolved field #1.
+      DEF_IGET_WIDE(Instruction::IGET_WIDE, 7u, 21u, 1u),   // Resolved field #2.
+      DEF_IPUT(Instruction::IPUT, 8u, 22u, 2u),             // IPUT clobbers field #1 (#2 if wide).
+      DEF_IGET(Instruction::IGET, 9u, 20u, 0u),             // Resolved field #1, unique object.
+      DEF_IGET(Instruction::IGET, 10u, 21u, 0u),            // Resolved field #1, new value name.
+      DEF_IGET_WIDE(Instruction::IGET_WIDE, 11u, 21u, 1u),  // Resolved field #2.
+  };
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformLVN();
+  ASSERT_EQ(value_names_.size(), 12u);
+  EXPECT_EQ(value_names_[1], value_names_[5]);
+  EXPECT_EQ(value_names_[2], value_names_[6]);
+  EXPECT_EQ(value_names_[3], value_names_[7]);
+  EXPECT_EQ(value_names_[1], value_names_[9]);
+  EXPECT_NE(value_names_[2], value_names_[10]);  // This aliased with unresolved IPUT.
+  EXPECT_EQ(value_names_[3], value_names_[11]);
+  EXPECT_EQ(mirs_[0].optimization_flags, 0u);
+  EXPECT_EQ(mirs_[1].optimization_flags, MIR_IGNORE_NULL_CHECK);
+  EXPECT_EQ(mirs_[2].optimization_flags, 0u);
+  EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
+  EXPECT_EQ(mirs_[4].optimization_flags, 0u);
+  for (size_t i = 5u; i != mir_count_; ++i) {
+    EXPECT_EQ(mirs_[i].optimization_flags, MIR_IGNORE_NULL_CHECK);
+  }
+}
+
+TEST_F(LocalValueNumberingTest, UnresolvedSField) {
+  static const SFieldDef sfields[] = {
+      { 1u, 1u, 1u, false },  // Resolved field #1.
+      { 2u, 1u, 2u, false },  // Resolved field #2.
+      { 3u, 0u, 0u, false },  // Unresolved field.
+  };
+  static const MIRDef mirs[] = {
+      DEF_SGET(Instruction::SGET, 0u, 0u),            // Resolved field #1.
+      DEF_SGET_WIDE(Instruction::SGET_WIDE, 1u, 1u),  // Resolved field #2.
+      DEF_SGET(Instruction::SGET, 2u, 2u),            // SGET doesn't clobber anything.
+      DEF_SGET(Instruction::SGET, 3u, 0u),            // Resolved field #1.
+      DEF_SGET_WIDE(Instruction::SGET_WIDE, 4u, 1u),  // Resolved field #2.
+      DEF_SPUT(Instruction::SPUT, 5u, 2u),            // SPUT clobbers field #1 (#2 is wide).
+      DEF_SGET(Instruction::SGET, 6u, 0u),            // Resolved field #1.
+      DEF_SGET_WIDE(Instruction::SGET_WIDE, 7u, 1u),  // Resolved field #2.
+  };
+
+  PrepareSFields(sfields);
+  PrepareMIRs(mirs);
+  PerformLVN();
+  ASSERT_EQ(value_names_.size(), 8u);
+  EXPECT_EQ(value_names_[0], value_names_[3]);
+  EXPECT_EQ(value_names_[1], value_names_[4]);
+  EXPECT_NE(value_names_[0], value_names_[6]);  // This aliased with unresolved IPUT.
+  EXPECT_EQ(value_names_[1], value_names_[7]);
+  for (size_t i = 0u; i != mir_count_; ++i) {
+    EXPECT_EQ(mirs_[i].optimization_flags, 0u) << i;
+  }
+}
+
+TEST_F(LocalValueNumberingTest, ConstString) {
+  static const MIRDef mirs[] = {
+      DEF_CONST_STRING(Instruction::CONST_STRING, 0u, 0u),
+      DEF_CONST_STRING(Instruction::CONST_STRING, 1u, 0u),
+      DEF_CONST_STRING(Instruction::CONST_STRING, 2u, 2u),
+      DEF_CONST_STRING(Instruction::CONST_STRING, 3u, 0u),
+      DEF_INVOKE1(Instruction::INVOKE_DIRECT, 2u),
+      DEF_CONST_STRING(Instruction::CONST_STRING, 4u, 2u),
+  };
+
+  PrepareMIRs(mirs);
+  PerformLVN();
+  ASSERT_EQ(value_names_.size(), 6u);
+  EXPECT_EQ(value_names_[1], value_names_[0]);
+  EXPECT_NE(value_names_[2], value_names_[0]);
+  EXPECT_EQ(value_names_[3], value_names_[0]);
+  EXPECT_EQ(value_names_[5], value_names_[2]);
+}
+
+TEST_F(LocalValueNumberingTest, SameValueInDifferentMemoryLocations) {
+  static const IFieldDef ifields[] = {
+      { 1u, 1u, 1u, false },
+      { 2u, 1u, 2u, false },
+  };
+  static const SFieldDef sfields[] = {
+      { 3u, 1u, 3u, false },
+  };
+  static const MIRDef mirs[] = {
+      DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
+      DEF_IPUT(Instruction::IPUT, 0u, 10u, 1u),
+      DEF_SPUT(Instruction::SPUT, 0u, 0u),
+      DEF_APUT(Instruction::APUT, 0u, 11u, 12u),
+      DEF_IGET(Instruction::IGET, 1u, 10u, 0u),
+      DEF_IGET(Instruction::IGET, 2u, 10u, 1u),
+      DEF_AGET(Instruction::AGET, 3u, 11u, 12u),
+      DEF_SGET(Instruction::SGET, 4u, 0u),
+  };
+
+  PrepareIFields(ifields);
+  PrepareSFields(sfields);
+  PrepareMIRs(mirs);
+  PerformLVN();
+  ASSERT_EQ(value_names_.size(), 8u);
+  EXPECT_EQ(value_names_[4], value_names_[0]);
+  EXPECT_EQ(value_names_[5], value_names_[0]);
+  EXPECT_EQ(value_names_[6], value_names_[0]);
+  EXPECT_EQ(value_names_[7], value_names_[0]);
+  EXPECT_EQ(mirs_[0].optimization_flags, 0u);
+  EXPECT_EQ(mirs_[1].optimization_flags, MIR_IGNORE_NULL_CHECK);
+  EXPECT_EQ(mirs_[2].optimization_flags, 0u);
+  EXPECT_EQ(mirs_[3].optimization_flags, 0u);
+  EXPECT_EQ(mirs_[4].optimization_flags, MIR_IGNORE_NULL_CHECK);
+  EXPECT_EQ(mirs_[5].optimization_flags, MIR_IGNORE_NULL_CHECK);
+  EXPECT_EQ(mirs_[6].optimization_flags, MIR_IGNORE_NULL_CHECK | MIR_IGNORE_RANGE_CHECK);
+  EXPECT_EQ(mirs_[7].optimization_flags, 0u);
+}
+
+TEST_F(LocalValueNumberingTest, UniqueArrayAliasing) {
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(Instruction::NEW_ARRAY, 20u),
+      DEF_AGET(Instruction::AGET, 1u, 20u, 40u),
+      DEF_APUT(Instruction::APUT, 2u, 20u, 41u),  // May alias with index for sreg 40u.
+      DEF_AGET(Instruction::AGET, 3u, 20u, 40u),
+  };
+
+  PrepareMIRs(mirs);
+  PerformLVN();
+  ASSERT_EQ(value_names_.size(), 4u);
+  EXPECT_NE(value_names_[1], value_names_[3]);
+  EXPECT_EQ(mirs_[0].optimization_flags, 0u);
+  EXPECT_EQ(mirs_[1].optimization_flags, MIR_IGNORE_NULL_CHECK);
+  EXPECT_EQ(mirs_[2].optimization_flags, MIR_IGNORE_NULL_CHECK);
+  EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK | MIR_IGNORE_RANGE_CHECK);
+}
+
+TEST_F(LocalValueNumberingTest, EscapingRefs) {
+  static const IFieldDef ifields[] = {
+      { 1u, 1u, 1u, false },  // Field #1.
+      { 2u, 1u, 2u, false },  // Field #2.
+      { 3u, 1u, 3u, false },  // Reference field for storing escaping refs.
+      { 4u, 1u, 4u, false },  // Wide.
+      { 5u, 0u, 0u, false },  // Unresolved field, int.
+      { 6u, 0u, 0u, false },  // Unresolved field, wide.
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 20u),
+      DEF_IGET(Instruction::IGET, 1u, 20u, 0u),
+      DEF_IGET(Instruction::IGET, 2u, 20u, 1u),
+      DEF_IPUT(Instruction::IPUT_OBJECT, 20u, 30u, 2u),      // Ref escapes.
+      DEF_IGET(Instruction::IGET, 4u, 20u, 0u),
+      DEF_IGET(Instruction::IGET, 5u, 20u, 1u),
+      DEF_IPUT(Instruction::IPUT, 6u, 31u, 0u),              // May alias with field #1.
+      DEF_IGET(Instruction::IGET, 7u, 20u, 0u),              // New value.
+      DEF_IGET(Instruction::IGET, 8u, 20u, 1u),              // Still the same.
+      DEF_IPUT_WIDE(Instruction::IPUT_WIDE, 9u, 31u, 3u),    // No aliasing, different type.
+      DEF_IGET(Instruction::IGET, 10u, 20u, 0u),
+      DEF_IGET(Instruction::IGET, 11u, 20u, 1u),
+      DEF_IPUT_WIDE(Instruction::IPUT_WIDE, 12u, 31u, 5u),   // No aliasing, different type.
+      DEF_IGET(Instruction::IGET, 13u, 20u, 0u),
+      DEF_IGET(Instruction::IGET, 14u, 20u, 1u),
+      DEF_IPUT(Instruction::IPUT, 15u, 31u, 4u),             // Aliasing, same type.
+      DEF_IGET(Instruction::IGET, 16u, 20u, 0u),
+      DEF_IGET(Instruction::IGET, 17u, 20u, 1u),
+  };
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformLVN();
+  ASSERT_EQ(value_names_.size(), 18u);
+  EXPECT_EQ(value_names_[1], value_names_[4]);
+  EXPECT_EQ(value_names_[2], value_names_[5]);
+  EXPECT_NE(value_names_[4], value_names_[7]);  // New value.
+  EXPECT_EQ(value_names_[5], value_names_[8]);
+  EXPECT_EQ(value_names_[7], value_names_[10]);
+  EXPECT_EQ(value_names_[8], value_names_[11]);
+  EXPECT_EQ(value_names_[10], value_names_[13]);
+  EXPECT_EQ(value_names_[11], value_names_[14]);
+  EXPECT_NE(value_names_[13], value_names_[16]);  // New value.
+  EXPECT_NE(value_names_[14], value_names_[17]);  // New value.
+  for (size_t i = 0u; i != mir_count_; ++i) {
+    int expected = (i != 0u && i != 3u && i != 6u) ? MIR_IGNORE_NULL_CHECK : 0u;
+    EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
+  }
+}
+
+TEST_F(LocalValueNumberingTest, EscapingArrayRefs) {
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(Instruction::NEW_ARRAY, 20u),
+      DEF_AGET(Instruction::AGET, 1u, 20u, 40u),
+      DEF_AGET(Instruction::AGET, 2u, 20u, 41u),
+      DEF_APUT(Instruction::APUT_OBJECT, 20u, 30u, 42u),    // Array ref escapes.
+      DEF_AGET(Instruction::AGET, 4u, 20u, 40u),
+      DEF_AGET(Instruction::AGET, 5u, 20u, 41u),
+      DEF_APUT_WIDE(Instruction::APUT_WIDE, 6u, 31u, 43u),  // No aliasing, different type.
+      DEF_AGET(Instruction::AGET, 7u, 20u, 40u),
+      DEF_AGET(Instruction::AGET, 8u, 20u, 41u),
+      DEF_APUT(Instruction::APUT, 9u, 32u, 40u),            // May alias with all elements.
+      DEF_AGET(Instruction::AGET, 10u, 20u, 40u),           // New value (same index name).
+      DEF_AGET(Instruction::AGET, 11u, 20u, 41u),           // New value (different index name).
+  };
+
+  PrepareMIRs(mirs);
+  PerformLVN();
+  ASSERT_EQ(value_names_.size(), 12u);
+  EXPECT_EQ(value_names_[1], value_names_[4]);
+  EXPECT_EQ(value_names_[2], value_names_[5]);
+  EXPECT_EQ(value_names_[4], value_names_[7]);
+  EXPECT_EQ(value_names_[5], value_names_[8]);
+  EXPECT_NE(value_names_[7], value_names_[10]);  // New value.
+  EXPECT_NE(value_names_[8], value_names_[11]);  // New value.
+  for (size_t i = 0u; i != mir_count_; ++i) {
+    int expected =
+        ((i != 0u && i != 3u && i != 6u && i != 9u) ? MIR_IGNORE_NULL_CHECK : 0u) |
+        ((i >= 4 && i != 6u && i != 9u) ? MIR_IGNORE_RANGE_CHECK : 0u);
+    EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
+  }
+}
+
+TEST_F(LocalValueNumberingTest, StoringSameValueKeepsMemoryVersion) {
+  static const IFieldDef ifields[] = {
+      { 1u, 1u, 1u, false },
+  };
+  static const MIRDef mirs[] = {
+      DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
+      DEF_IGET(Instruction::IGET, 1u, 11u, 0u),
+      DEF_IPUT(Instruction::IPUT, 1u, 11u, 0u),   // Store the same value.
+      DEF_IGET(Instruction::IGET, 3u, 10u, 0u),
+      DEF_AGET(Instruction::AGET, 4u, 12u, 40u),
+      DEF_AGET(Instruction::AGET, 5u, 13u, 40u),
+      DEF_APUT(Instruction::APUT, 5u, 13u, 40u),  // Store the same value.
+      DEF_AGET(Instruction::AGET, 7u, 12u, 40u),
+  };
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformLVN();
+  ASSERT_EQ(value_names_.size(), 8u);
+  EXPECT_NE(value_names_[0], value_names_[1]);
+  EXPECT_EQ(value_names_[0], value_names_[3]);
+  EXPECT_NE(value_names_[4], value_names_[5]);
+  EXPECT_EQ(value_names_[4], value_names_[7]);
+  for (size_t i = 0u; i != mir_count_; ++i) {
+    int expected =
+        ((i == 2u || i == 3u || i == 6u || i == 7u) ? MIR_IGNORE_NULL_CHECK : 0u) |
+        ((i == 6u || i == 7u) ? MIR_IGNORE_RANGE_CHECK : 0u);
+    EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
+  }
+}
+
 }  // namespace art
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 508f1c7..7129f8a 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -902,7 +902,7 @@
   while (!done) {
     tbb->visited = true;
     for (MIR* mir = tbb->first_mir_insn; mir != NULL; mir = mir->next) {
-      if (static_cast<uint32_t>(mir->dalvikInsn.opcode) >= kMirOpFirst) {
+      if (IsPseudoMirOp(mir->dalvikInsn.opcode)) {
         // Skip any MIR pseudo-op.
         continue;
       }
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index ed7e1f5..9fea709 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -953,18 +953,34 @@
   defs[reg_index] = ssa_reg;
 }
 
+void MIRGraph::AllocateSSAUseData(MIR *mir, int num_uses) {
+  mir->ssa_rep->num_uses = num_uses;
+
+  if (mir->ssa_rep->num_uses_allocated < num_uses) {
+    mir->ssa_rep->uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses, kArenaAllocDFInfo));
+    // NOTE: will be filled in during type & size inference pass
+    mir->ssa_rep->fp_use = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses, kArenaAllocDFInfo));
+  }
+}
+
+void MIRGraph::AllocateSSADefData(MIR *mir, int num_defs) {
+  mir->ssa_rep->num_defs = num_defs;
+
+  if (mir->ssa_rep->num_defs_allocated < num_defs) {
+    mir->ssa_rep->defs = static_cast<int*>(arena_->Alloc(sizeof(int) * num_defs,
+          kArenaAllocDFInfo));
+    mir->ssa_rep->fp_def = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_defs,
+          kArenaAllocDFInfo));
+  }
+}
+
 /* Look up new SSA names for format_35c instructions */
 void MIRGraph::DataFlowSSAFormat35C(MIR* mir) {
   MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
   int num_uses = d_insn->vA;
   int i;
 
-  mir->ssa_rep->num_uses = num_uses;
-  mir->ssa_rep->uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
-                                                       kArenaAllocDFInfo));
-  // NOTE: will be filled in during type & size inference pass
-  mir->ssa_rep->fp_use = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses,
-                                                          kArenaAllocDFInfo));
+  AllocateSSAUseData(mir, num_uses);
 
   for (i = 0; i < num_uses; i++) {
     HandleSSAUse(mir->ssa_rep->uses, d_insn->arg[i], i);
@@ -977,12 +993,7 @@
   int num_uses = d_insn->vA;
   int i;
 
-  mir->ssa_rep->num_uses = num_uses;
-  mir->ssa_rep->uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
-                                                       kArenaAllocDFInfo));
-  // NOTE: will be filled in during type & size inference pass
-  mir->ssa_rep->fp_use = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses,
-                                                          kArenaAllocDFInfo));
+  AllocateSSAUseData(mir, num_uses);
 
   for (i = 0; i < num_uses; i++) {
     HandleSSAUse(mir->ssa_rep->uses, d_insn->vC+i, i);
@@ -999,12 +1010,12 @@
     mir->ssa_rep =
         static_cast<struct SSARepresentation *>(arena_->Alloc(sizeof(SSARepresentation),
                                                               kArenaAllocDFInfo));
+    memset(mir->ssa_rep, 0, sizeof(*mir->ssa_rep));
 
     uint64_t df_attributes = GetDataFlowAttributes(mir);
 
       // If not a pseudo-op, note non-leaf or can throw
-    if (static_cast<int>(mir->dalvikInsn.opcode) <
-        static_cast<int>(kNumPackedOpcodes)) {
+    if (!IsPseudoMirOp(mir->dalvikInsn.opcode)) {
       int flags = Instruction::FlagsOf(mir->dalvikInsn.opcode);
 
       if ((flags & Instruction::kInvoke) != 0 && (mir->optimization_flags & MIR_INLINED) == 0) {
@@ -1045,13 +1056,7 @@
       }
     }
 
-    if (num_uses) {
-      mir->ssa_rep->num_uses = num_uses;
-      mir->ssa_rep->uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
-                                                           kArenaAllocDFInfo));
-      mir->ssa_rep->fp_use = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses,
-                                                              kArenaAllocDFInfo));
-    }
+    AllocateSSAUseData(mir, num_uses);
 
     int num_defs = 0;
 
@@ -1062,13 +1067,7 @@
       }
     }
 
-    if (num_defs) {
-      mir->ssa_rep->num_defs = num_defs;
-      mir->ssa_rep->defs = static_cast<int*>(arena_->Alloc(sizeof(int) * num_defs,
-                                                           kArenaAllocDFInfo));
-      mir->ssa_rep->fp_def = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_defs,
-                                                              kArenaAllocDFInfo));
-    }
+    AllocateSSADefData(mir, num_defs);
 
     MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
 
@@ -1114,11 +1113,11 @@
    * input to PHI nodes can be derived from the snapshot of all
    * predecessor blocks.
    */
-  bb->data_flow_info->vreg_to_ssa_map =
+  bb->data_flow_info->vreg_to_ssa_map_exit =
       static_cast<int*>(arena_->Alloc(sizeof(int) * cu_->num_dalvik_registers,
                                       kArenaAllocDFInfo));
 
-  memcpy(bb->data_flow_info->vreg_to_ssa_map, vreg_to_ssa_map_,
+  memcpy(bb->data_flow_info->vreg_to_ssa_map_exit, vreg_to_ssa_map_,
          sizeof(int) * cu_->num_dalvik_registers);
   return true;
 }
@@ -1282,7 +1281,7 @@
   GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
 
   while (true) {
-    BasicBlock *pred_bb = GetBasicBlock(iter.Next());
+    BasicBlock* pred_bb = GetBasicBlock(iter.Next());
     if (!pred_bb) break;
     bool found = false;
     if (pred_bb->taken == bb->id) {
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index ba4224e..3ef1dbf 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -17,6 +17,7 @@
 #include "mir_graph.h"
 
 #include <inttypes.h>
+#include <queue>
 
 #include "base/stl_util.h"
 #include "compiler_internals.h"
@@ -25,6 +26,7 @@
 #include "dex/quick/dex_file_to_method_inliner_map.h"
 #include "dex/quick/dex_file_method_inliner.h"
 #include "leb128.h"
+#include "pass_driver_me_post_opt.h"
 
 namespace art {
 
@@ -73,12 +75,13 @@
       use_counts_(arena, 256, kGrowableArrayMisc),
       raw_use_counts_(arena, 256, kGrowableArrayMisc),
       num_reachable_blocks_(0),
+      max_num_reachable_blocks_(0),
       dfs_order_(NULL),
       dfs_post_order_(NULL),
       dom_post_order_traversal_(NULL),
+      topological_order_(nullptr),
       i_dom_list_(NULL),
       def_block_matrix_(NULL),
-      temp_dalvik_register_v_(NULL),
       temp_scoped_alloc_(),
       temp_insn_data_(nullptr),
       temp_bit_vector_size_(0u),
@@ -149,7 +152,7 @@
   if (insn == NULL) {
     LOG(FATAL) << "Break split failed";
   }
-  BasicBlock *bottom_block = NewMemBB(kDalvikByteCode, num_blocks_++);
+  BasicBlock* bottom_block = NewMemBB(kDalvikByteCode, num_blocks_++);
   block_list_.Insert(bottom_block);
 
   bottom_block->start_offset = code_offset;
@@ -187,16 +190,16 @@
     orig_block->successor_blocks = NULL;
     GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bottom_block->successor_blocks);
     while (true) {
-      SuccessorBlockInfo *successor_block_info = iterator.Next();
+      SuccessorBlockInfo* successor_block_info = iterator.Next();
       if (successor_block_info == NULL) break;
-      BasicBlock *bb = GetBasicBlock(successor_block_info->block);
+      BasicBlock* bb = GetBasicBlock(successor_block_info->block);
       bb->predecessors->Delete(orig_block->id);
       bb->predecessors->Insert(bottom_block->id);
     }
   }
 
   orig_block->last_mir_insn = prev;
-  prev->next = NULL;
+  prev->next = nullptr;
 
   /*
    * Update the immediate predecessor block pointer so that outgoing edges
@@ -220,6 +223,7 @@
   while (p != bottom_block->last_mir_insn) {
     p = p->next;
     DCHECK(p != nullptr);
+    p->bb = bottom_block->id;
     int opcode = p->dalvikInsn.opcode;
     /*
      * Some messiness here to ensure that we only enter real opcodes and only the
@@ -296,7 +300,7 @@
     }
   }
 
-  // Iterate over each of the handlers to enqueue the empty Catch blocks
+  // Iterate over each of the handlers to enqueue the empty Catch blocks.
   const byte* handlers_ptr = DexFile::GetCatchHandlerData(*current_code_item_, 0);
   uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
   for (uint32_t idx = 0; idx < handlers_size; idx++) {
@@ -343,14 +347,14 @@
       LOG(FATAL) << "Unexpected opcode(" << insn->dalvikInsn.opcode << ") with kBranch set";
   }
   CountBranch(target);
-  BasicBlock *taken_block = FindBlock(target, /* split */ true, /* create */ true,
+  BasicBlock* taken_block = FindBlock(target, /* split */ true, /* create */ true,
                                       /* immed_pred_block_p */ &cur_block);
   cur_block->taken = taken_block->id;
   taken_block->predecessors->Insert(cur_block->id);
 
   /* Always terminate the current block for conditional branches */
   if (flags & Instruction::kContinue) {
-    BasicBlock *fallthrough_block = FindBlock(cur_offset +  width,
+    BasicBlock* fallthrough_block = FindBlock(cur_offset +  width,
                                              /*
                                               * If the method is processed
                                               * in sequential order from the
@@ -403,7 +407,7 @@
     size = switch_data[1];
     first_key = switch_data[2] | (switch_data[3] << 16);
     target_table = reinterpret_cast<const int*>(&switch_data[4]);
-    keyTable = NULL;        // Make the compiler happy
+    keyTable = NULL;        // Make the compiler happy.
   /*
    * Sparse switch data format:
    *  ushort ident = 0x0200   magic value
@@ -419,7 +423,7 @@
     size = switch_data[1];
     keyTable = reinterpret_cast<const int*>(&switch_data[2]);
     target_table = reinterpret_cast<const int*>(&switch_data[2 + size*2]);
-    first_key = 0;   // To make the compiler happy
+    first_key = 0;   // To make the compiler happy.
   }
 
   if (cur_block->successor_block_list_type != kNotUsed) {
@@ -432,9 +436,9 @@
       new (arena_) GrowableArray<SuccessorBlockInfo*>(arena_, size, kGrowableArraySuccessorBlocks);
 
   for (i = 0; i < size; i++) {
-    BasicBlock *case_block = FindBlock(cur_offset + target_table[i], /* split */ true,
+    BasicBlock* case_block = FindBlock(cur_offset + target_table[i], /* split */ true,
                                       /* create */ true, /* immed_pred_block_p */ &cur_block);
-    SuccessorBlockInfo *successor_block_info =
+    SuccessorBlockInfo* successor_block_info =
         static_cast<SuccessorBlockInfo*>(arena_->Alloc(sizeof(SuccessorBlockInfo),
                                                        kArenaAllocSuccessor));
     successor_block_info->block = case_block->id;
@@ -477,13 +481,13 @@
         new (arena_) GrowableArray<SuccessorBlockInfo*>(arena_, 2, kGrowableArraySuccessorBlocks);
 
     for (; iterator.HasNext(); iterator.Next()) {
-      BasicBlock *catch_block = FindBlock(iterator.GetHandlerAddress(), false /* split*/,
+      BasicBlock* catch_block = FindBlock(iterator.GetHandlerAddress(), false /* split*/,
                                          false /* creat */, NULL  /* immed_pred_block_p */);
       catch_block->catch_entry = true;
       if (kIsDebugBuild) {
         catches_.insert(catch_block->start_offset);
       }
-      SuccessorBlockInfo *successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
+      SuccessorBlockInfo* successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
           (arena_->Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessor));
       successor_block_info->block = catch_block->id;
       successor_block_info->key = iterator.GetHandlerTypeIndex();
@@ -491,7 +495,7 @@
       catch_block->predecessors->Insert(cur_block->id);
     }
   } else if (build_all_edges) {
-    BasicBlock *eh_block = NewMemBB(kExceptionHandling, num_blocks_++);
+    BasicBlock* eh_block = NewMemBB(kExceptionHandling, num_blocks_++);
     cur_block->taken = eh_block->id;
     block_list_.Insert(eh_block);
     eh_block->start_offset = cur_offset;
@@ -501,7 +505,7 @@
   if (is_throw) {
     cur_block->explicit_throw = true;
     if (code_ptr < code_end) {
-      // Force creation of new block following THROW via side-effect
+      // Force creation of new block following THROW via side-effect.
       FindBlock(cur_offset + width, /* split */ false, /* create */ true,
                 /* immed_pred_block_p */ NULL);
     }
@@ -538,16 +542,15 @@
    * Note also that the dex_pc_to_block_map_ entry for the potentially
    * throwing instruction will refer to the original basic block.
    */
-  BasicBlock *new_block = NewMemBB(kDalvikByteCode, num_blocks_++);
+  BasicBlock* new_block = NewMemBB(kDalvikByteCode, num_blocks_++);
   block_list_.Insert(new_block);
   new_block->start_offset = insn->offset;
   cur_block->fall_through = new_block->id;
   new_block->predecessors->Insert(cur_block->id);
-  MIR* new_insn = static_cast<MIR*>(arena_->Alloc(sizeof(MIR), kArenaAllocMIR));
+  MIR* new_insn = NewMIR();
   *new_insn = *insn;
-  insn->dalvikInsn.opcode =
-      static_cast<Instruction::Code>(kMirOpCheck);
-  // Associate the two halves
+  insn->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpCheck);
+  // Associate the two halves.
   insn->meta.throw_insn = new_insn;
   new_block->AppendMIR(new_insn);
   return new_block;
@@ -614,7 +617,7 @@
   }
 
   /* Current block to record parsed instructions */
-  BasicBlock *cur_block = NewMemBB(kDalvikByteCode, num_blocks_++);
+  BasicBlock* cur_block = NewMemBB(kDalvikByteCode, num_blocks_++);
   DCHECK_EQ(current_offset_, 0U);
   cur_block->start_offset = current_offset_;
   block_list_.Insert(cur_block);
@@ -629,11 +632,10 @@
 
   /* Parse all instructions and put them into containing basic blocks */
   while (code_ptr < code_end) {
-    MIR *insn = static_cast<MIR *>(arena_->Alloc(sizeof(MIR), kArenaAllocMIR));
+    MIR *insn = NewMIR();
     insn->offset = current_offset_;
     insn->m_unit_index = current_method_;
     int width = ParseInsn(code_ptr, &insn->dalvikInsn);
-    insn->width = width;
     Instruction::Code opcode = insn->dalvikInsn.opcode;
     if (opcode_count_ != NULL) {
       opcode_count_[static_cast<int>(opcode)]++;
@@ -653,7 +655,7 @@
       cur_block->use_lvn = true;  // Run local value numbering on this basic block.
     }
 
-    // Check for inline data block signatures
+    // Check for inline data block signatures.
     if (opcode == Instruction::NOP) {
       // A simple NOP will have a width of 1 at this point, embedded data NOP > 1.
       if ((width == 1) && ((current_offset_ & 0x1) == 0x1) && ((code_end - code_ptr) > 1)) {
@@ -722,7 +724,7 @@
       }
     }
     current_offset_ += width;
-    BasicBlock *next_block = FindBlock(current_offset_, /* split */ false, /* create */
+    BasicBlock* next_block = FindBlock(current_offset_, /* split */ false, /* create */
                                       false, /* immed_pred_block_p */ NULL);
     if (next_block) {
       /*
@@ -797,7 +799,7 @@
 
   for (idx = 0; idx < num_blocks; idx++) {
     int block_idx = all_blocks ? idx : dfs_order_->Get(idx);
-    BasicBlock *bb = GetBasicBlock(block_idx);
+    BasicBlock* bb = GetBasicBlock(block_idx);
     if (bb == NULL) continue;
     if (bb->block_type == kDead) continue;
     if (bb->block_type == kEntryBlock) {
@@ -807,7 +809,7 @@
     } else if (bb->block_type == kDalvikByteCode) {
       fprintf(file, "  block%04x_%d [shape=record,label = \"{ \\\n",
               bb->start_offset, bb->id);
-      const MIR *mir;
+      const MIR* mir;
         fprintf(file, "    {block id %d\\l}%s\\\n", bb->id,
                 bb->first_mir_insn ? " | " : " ");
         for (mir = bb->first_mir_insn; mir; mir = mir->next) {
@@ -834,8 +836,7 @@
             } else {
               fprintf(file, "    {%04x %s %s %s\\l}%s\\\n", mir->offset,
                       mir->ssa_rep ? GetDalvikDisassembly(mir) :
-                      (opcode < kMirOpFirst) ?
-                        Instruction::Name(mir->dalvikInsn.opcode) :
+                      !IsPseudoMirOp(opcode) ? Instruction::Name(mir->dalvikInsn.opcode) :
                         extended_mir_op_names_[opcode - kMirOpFirst],
                       (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0 ? " no_rangecheck" : " ",
                       (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0 ? " no_nullcheck" : " ",
@@ -869,13 +870,13 @@
               bb->start_offset, bb->id,
               (bb->successor_block_list_type == kCatch) ?  "Mrecord" : "record");
       GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_blocks);
-      SuccessorBlockInfo *successor_block_info = iterator.Next();
+      SuccessorBlockInfo* successor_block_info = iterator.Next();
 
       int succ_id = 0;
       while (true) {
         if (successor_block_info == NULL) break;
 
-        BasicBlock *dest_block = GetBasicBlock(successor_block_info->block);
+        BasicBlock* dest_block = GetBasicBlock(successor_block_info->block);
         SuccessorBlockInfo *next_successor_block_info = iterator.Next();
 
         fprintf(file, "    {<f%d> %04x: %04x\\l}%s\\\n",
@@ -897,7 +898,7 @@
 
       succ_id = 0;
       while (true) {
-        SuccessorBlockInfo *successor_block_info = iter.Next();
+        SuccessorBlockInfo* successor_block_info = iter.Next();
         if (successor_block_info == NULL) break;
 
         BasicBlock* dest_block = GetBasicBlock(successor_block_info->block);
@@ -924,40 +925,173 @@
   fclose(file);
 }
 
-/* Insert an MIR instruction to the end of a basic block */
+/* Insert an MIR instruction to the end of a basic block. */
 void BasicBlock::AppendMIR(MIR* mir) {
-  if (first_mir_insn == nullptr) {
-    DCHECK(last_mir_insn == nullptr);
-    last_mir_insn = first_mir_insn = mir;
-    mir->next = nullptr;
-  } else {
-    last_mir_insn->next = mir;
-    mir->next = nullptr;
-    last_mir_insn = mir;
+  // Insert it after the last MIR.
+  InsertMIRListAfter(last_mir_insn, mir, mir);
+}
+
+void BasicBlock::AppendMIRList(MIR* first_list_mir, MIR* last_list_mir) {
+  // Insert it after the last MIR.
+  InsertMIRListAfter(last_mir_insn, first_list_mir, last_list_mir);
+}
+
+void BasicBlock::AppendMIRList(const std::vector<MIR*>& insns) {
+  for (std::vector<MIR*>::const_iterator it = insns.begin(); it != insns.end(); it++) {
+    MIR* new_mir = *it;
+
+    // Add a copy of each MIR.
+    InsertMIRListAfter(last_mir_insn, new_mir, new_mir);
   }
 }
 
-/* Insert an MIR instruction to the head of a basic block */
-void BasicBlock::PrependMIR(MIR* mir) {
-  if (first_mir_insn == nullptr) {
-    DCHECK(last_mir_insn == nullptr);
-    last_mir_insn = first_mir_insn = mir;
-    mir->next = nullptr;
-  } else {
-    mir->next = first_mir_insn;
-    first_mir_insn = mir;
-  }
-}
-
-/* Insert a MIR instruction after the specified MIR */
+/* Insert a MIR instruction after the specified MIR. */
 void BasicBlock::InsertMIRAfter(MIR* current_mir, MIR* new_mir) {
-  new_mir->next = current_mir->next;
-  current_mir->next = new_mir;
+  InsertMIRListAfter(current_mir, new_mir, new_mir);
+}
 
-  if (last_mir_insn == current_mir) {
-    /* Is the last MIR in the block */
-    last_mir_insn = new_mir;
+void BasicBlock::InsertMIRListAfter(MIR* insert_after, MIR* first_list_mir, MIR* last_list_mir) {
+  // If no MIR, we are done.
+  if (first_list_mir == nullptr || last_list_mir == nullptr) {
+    return;
   }
+
+  // If insert_after is null, assume BB is empty.
+  if (insert_after == nullptr) {
+    first_mir_insn = first_list_mir;
+    last_mir_insn = last_list_mir;
+    last_list_mir->next = nullptr;
+  } else {
+    MIR* after_list = insert_after->next;
+    insert_after->next = first_list_mir;
+    last_list_mir->next = after_list;
+    if (after_list == nullptr) {
+      last_mir_insn = last_list_mir;
+    }
+  }
+
+  // Set this BB to be the basic block of the MIRs.
+  MIR* last = last_list_mir->next;
+  for (MIR* mir = first_list_mir; mir != last; mir = mir->next) {
+    mir->bb = id;
+  }
+}
+
+/* Insert an MIR instruction to the head of a basic block. */
+void BasicBlock::PrependMIR(MIR* mir) {
+  InsertMIRListBefore(first_mir_insn, mir, mir);
+}
+
+void BasicBlock::PrependMIRList(MIR* first_list_mir, MIR* last_list_mir) {
+  // Insert it before the first MIR.
+  InsertMIRListBefore(first_mir_insn, first_list_mir, last_list_mir);
+}
+
+void BasicBlock::PrependMIRList(const std::vector<MIR*>& to_add) {
+  for (std::vector<MIR*>::const_iterator it = to_add.begin(); it != to_add.end(); it++) {
+    MIR* mir = *it;
+
+    InsertMIRListBefore(first_mir_insn, mir, mir);
+  }
+}
+
+/* Insert a MIR instruction before the specified MIR. */
+void BasicBlock::InsertMIRBefore(MIR* current_mir, MIR* new_mir) {
+  // Insert as a single element list.
+  return InsertMIRListBefore(current_mir, new_mir, new_mir);
+}
+
+MIR* BasicBlock::FindPreviousMIR(MIR* mir) {
+  MIR* current = first_mir_insn;
+
+  while (current != nullptr) {
+    MIR* next = current->next;
+
+    if (next == mir) {
+      return current;
+    }
+
+    current = next;
+  }
+
+  return nullptr;
+}
+
+void BasicBlock::InsertMIRListBefore(MIR* insert_before, MIR* first_list_mir, MIR* last_list_mir) {
+  // If no MIR, we are done.
+  if (first_list_mir == nullptr || last_list_mir == nullptr) {
+    return;
+  }
+
+  // If insert_before is null, assume BB is empty.
+  if (insert_before == nullptr) {
+    first_mir_insn = first_list_mir;
+    last_mir_insn = last_list_mir;
+    last_list_mir->next = nullptr;
+  } else {
+    if (first_mir_insn == insert_before) {
+      last_list_mir->next = first_mir_insn;
+      first_mir_insn = first_list_mir;
+    } else {
+      // Find the preceding MIR.
+      MIR* before_list = FindPreviousMIR(insert_before);
+      DCHECK(before_list != nullptr);
+      before_list->next = first_list_mir;
+      last_list_mir->next = insert_before;
+    }
+  }
+
+  // Set this BB to be the basic block of the MIRs.
+  for (MIR* mir = first_list_mir; mir != last_list_mir->next; mir = mir->next) {
+    mir->bb = id;
+  }
+}
+
+bool BasicBlock::RemoveMIR(MIR* mir) {
+  // Remove as a single element list.
+  return RemoveMIRList(mir, mir);
+}
+
+bool BasicBlock::RemoveMIRList(MIR* first_list_mir, MIR* last_list_mir) {
+  if (first_list_mir == nullptr) {
+    return false;
+  }
+
+  // Try to find the MIR.
+  MIR* before_list = nullptr;
+  MIR* after_list = nullptr;
+
+  // If we are removing from the beginning of the MIR list.
+  if (first_mir_insn == first_list_mir) {
+    before_list = nullptr;
+  } else {
+    before_list = FindPreviousMIR(first_list_mir);
+    if (before_list == nullptr) {
+      // We did not find the mir.
+      return false;
+    }
+  }
+
+  // Remove the BB information and also find the after_list.
+  for (MIR* mir = first_list_mir; mir != last_list_mir; mir = mir->next) {
+    mir->bb = NullBasicBlockId;
+  }
+
+  after_list = last_list_mir->next;
+
+  // If there is nothing before the list, after_list is the first_mir.
+  if (before_list == nullptr) {
+    first_mir_insn = after_list;
+  } else {
+    before_list->next = after_list;
+  }
+
+  // If there is nothing after the list, before_list is last_mir.
+  if (after_list == nullptr) {
+    last_mir_insn = before_list;
+  }
+
+  return true;
 }
 
 MIR* BasicBlock::GetNextUnconditionalMir(MIRGraph* mir_graph, MIR* current) {
@@ -985,7 +1119,7 @@
   char* ret;
   bool nop = false;
   SSARepresentation* ssa_rep = mir->ssa_rep;
-  Instruction::Format dalvik_format = Instruction::k10x;  // Default to no-operand format
+  Instruction::Format dalvik_format = Instruction::k10x;  // Default to no-operand format.
   int defs = (ssa_rep != NULL) ? ssa_rep->num_defs : 0;
   int uses = (ssa_rep != NULL) ? ssa_rep->num_uses : 0;
 
@@ -993,7 +1127,7 @@
   if ((opcode == kMirOpCheck) || (opcode == kMirOpCheckPart2)) {
     str.append(extended_mir_op_names_[opcode - kMirOpFirst]);
     str.append(": ");
-    // Recover the original Dex instruction
+    // Recover the original Dex instruction.
     insn = mir->meta.throw_insn->dalvikInsn;
     ssa_rep = mir->meta.throw_insn->ssa_rep;
     defs = ssa_rep->num_defs;
@@ -1007,7 +1141,7 @@
     nop = true;
   }
 
-  if (opcode >= kMirOpFirst) {
+  if (IsPseudoMirOp(opcode)) {
     str.append(extended_mir_op_names_[opcode - kMirOpFirst]);
   } else {
     dalvik_format = Instruction::FormatOf(insn.opcode);
@@ -1052,7 +1186,7 @@
     str.append(StringPrintf(" 0x%x (%c%x)", mir->offset + offset,
                             offset > 0 ? '+' : '-', offset > 0 ? offset : -offset));
   } else {
-    // For invokes-style formats, treat wide regs as a pair of singles
+    // For invokes-style formats, treat wide regs as a pair of singles.
     bool show_singles = ((dalvik_format == Instruction::k35c) ||
                          (dalvik_format == Instruction::k3rc));
     if (defs != 0) {
@@ -1073,28 +1207,28 @@
       }
     }
     switch (dalvik_format) {
-      case Instruction::k11n:  // Add one immediate from vB
+      case Instruction::k11n:  // Add one immediate from vB.
       case Instruction::k21s:
       case Instruction::k31i:
       case Instruction::k21h:
         str.append(StringPrintf(", #%d", insn.vB));
         break;
-      case Instruction::k51l:  // Add one wide immediate
+      case Instruction::k51l:  // Add one wide immediate.
         str.append(StringPrintf(", #%" PRId64, insn.vB_wide));
         break;
-      case Instruction::k21c:  // One register, one string/type/method index
+      case Instruction::k21c:  // One register, one string/type/method index.
       case Instruction::k31c:
         str.append(StringPrintf(", index #%d", insn.vB));
         break;
-      case Instruction::k22c:  // Two registers, one string/type/method index
+      case Instruction::k22c:  // Two registers, one string/type/method index.
         str.append(StringPrintf(", index #%d", insn.vC));
         break;
-      case Instruction::k22s:  // Add one immediate from vC
+      case Instruction::k22s:  // Add one immediate from vC.
       case Instruction::k22b:
         str.append(StringPrintf(", #%d", insn.vC));
         break;
       default: {
-        // Nothing left to print
+        // Nothing left to print.
       }
     }
   }
@@ -1128,7 +1262,7 @@
 // Similar to GetSSAName, but if ssa name represents an immediate show that as well.
 std::string MIRGraph::GetSSANameWithConst(int ssa_reg, bool singles_only) {
   if (reg_location_ == NULL) {
-    // Pre-SSA - just use the standard name
+    // Pre-SSA - just use the standard name.
     return GetSSAName(ssa_reg);
   }
   if (IsConst(reg_location_[ssa_reg])) {
@@ -1240,10 +1374,16 @@
   return info;
 }
 
+// Allocate a new MIR.
+MIR* MIRGraph::NewMIR() {
+  MIR* mir = new (arena_) MIR();
+  return mir;
+}
+
 // Allocate a new basic block.
 BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id) {
-  BasicBlock* bb = static_cast<BasicBlock*>(arena_->Alloc(sizeof(BasicBlock),
-                                                          kArenaAllocBB));
+  BasicBlock* bb = new (arena_) BasicBlock();
+
   bb->block_type = block_type;
   bb->id = block_id;
   // TUNING: better estimate of the exit block predecessors?
@@ -1261,36 +1401,136 @@
 }
 
 void MIRGraph::InitializeMethodUses() {
-  // The gate starts by initializing the use counts
+  // The gate starts by initializing the use counts.
   int num_ssa_regs = GetNumSSARegs();
   use_counts_.Resize(num_ssa_regs + 32);
   raw_use_counts_.Resize(num_ssa_regs + 32);
-  // Initialize list
+  // Initialize list.
   for (int i = 0; i < num_ssa_regs; i++) {
     use_counts_.Insert(0);
     raw_use_counts_.Insert(0);
   }
 }
 
-void MIRGraph::InitializeSSATransformation() {
-  /* Compute the DFS order */
-  ComputeDFSOrders();
+void MIRGraph::SSATransformationStart() {
+  DCHECK(temp_scoped_alloc_.get() == nullptr);
+  temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
+  temp_bit_vector_size_ = cu_->num_dalvik_registers;
+  temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
+      temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapRegisterV);
 
-  /* Compute the dominator info */
-  ComputeDominators();
+  // Update the maximum number of reachable blocks.
+  max_num_reachable_blocks_ = num_reachable_blocks_;
+}
 
-  /* Allocate data structures in preparation for SSA conversion */
-  CompilerInitializeSSAConversion();
+void MIRGraph::SSATransformationEnd() {
+  // Verify the dataflow information after the pass.
+  if (cu_->enable_debug & (1 << kDebugVerifyDataflow)) {
+    VerifyDataflow();
+  }
 
-  /* Find out the "Dalvik reg def x block" relation */
-  ComputeDefBlockMatrix();
+  temp_bit_vector_size_ = 0u;
+  temp_bit_vector_ = nullptr;
+  DCHECK(temp_scoped_alloc_.get() != nullptr);
+  temp_scoped_alloc_.reset();
+}
 
-  /* Insert phi nodes to dominance frontiers for all variables */
-  InsertPhiNodes();
+void MIRGraph::ComputeTopologicalSortOrder() {
+  std::queue<BasicBlock*> q;
+  std::map<int, int> visited_cnt_values;
 
-  /* Rename register names by local defs and phi nodes */
+  // Clear the nodes.
   ClearAllVisitedFlags();
-  DoDFSPreOrderSSARename(GetEntryBlock());
+
+  // Create the topological order if need be.
+  if (topological_order_ != nullptr) {
+    topological_order_ = new (arena_) GrowableArray<BasicBlockId>(arena_, 0);
+  }
+  topological_order_->Reset();
+
+  // Set up visitedCntValues map for all BB. The default value for this counters in the map is zero.
+  // also fill initial queue.
+  GrowableArray<BasicBlock*>::Iterator iterator(&block_list_);
+
+  while (true) {
+    BasicBlock* bb = iterator.Next();
+
+    if (bb == nullptr) {
+      break;
+    }
+
+    if (bb->hidden == true) {
+      continue;
+    }
+
+    visited_cnt_values[bb->id] = bb->predecessors->Size();
+
+    GrowableArray<BasicBlockId>::Iterator pred_iterator(bb->predecessors);
+    // To process loops we should not wait for dominators.
+    while (true) {
+      BasicBlock* pred_bb = GetBasicBlock(pred_iterator.Next());
+
+      if (pred_bb == nullptr) {
+        break;
+      }
+
+      if (pred_bb->dominators == nullptr || pred_bb->hidden == true) {
+        continue;
+      }
+
+      // Skip the backward branch.
+      if (pred_bb->dominators->IsBitSet(bb->id) != 0) {
+        visited_cnt_values[bb->id]--;
+      }
+    }
+
+    // Add entry block to queue.
+    if (visited_cnt_values[bb->id] == 0) {
+      q.push(bb);
+    }
+  }
+
+  while (q.size() > 0) {
+    // Get top.
+    BasicBlock* bb = q.front();
+    q.pop();
+
+    DCHECK_EQ(bb->hidden, false);
+
+    if (bb->IsExceptionBlock() == true) {
+      continue;
+    }
+
+    // We've visited all the predecessors. So, we can visit bb.
+    if (bb->visited == false) {
+      bb->visited = true;
+
+      // Now add the basic block.
+      topological_order_->Insert(bb->id);
+
+      // Reduce visitedCnt for all the successors and add into the queue ones with visitedCnt equals to zero.
+      ChildBlockIterator succIter(bb, this);
+      BasicBlock* successor = succIter.Next();
+      while (successor != nullptr) {
+        // one more predecessor was visited.
+        visited_cnt_values[successor->id]--;
+
+        if (visited_cnt_values[successor->id] <= 0 && successor->visited == false && successor->hidden == false) {
+          q.push(successor);
+        }
+
+        // Take next successor.
+        successor = succIter.Next();
+      }
+    }
+  }
+}
+
+bool BasicBlock::IsExceptionBlock() const {
+  if (block_type == kExceptionHandling) {
+    return true;
+  }
+  return false;
 }
 
 ChildBlockIterator::ChildBlockIterator(BasicBlock* bb, MIRGraph* mir_graph)
@@ -1344,4 +1584,325 @@
   return nullptr;
 }
 
+BasicBlock* BasicBlock::Copy(CompilationUnit* c_unit) {
+  MIRGraph* mir_graph = c_unit->mir_graph.get();
+  return Copy(mir_graph);
+}
+
+BasicBlock* BasicBlock::Copy(MIRGraph* mir_graph) {
+  BasicBlock* result_bb = mir_graph->CreateNewBB(block_type);
+
+  // We don't do a memcpy style copy here because it would lead to a lot of things
+  // to clean up. Let us do it by hand instead.
+  // Copy in taken and fallthrough.
+  result_bb->fall_through = fall_through;
+  result_bb->taken = taken;
+
+  // Copy successor links if needed.
+  ArenaAllocator* arena = mir_graph->GetArena();
+
+  result_bb->successor_block_list_type = successor_block_list_type;
+  if (result_bb->successor_block_list_type != kNotUsed) {
+    size_t size = successor_blocks->Size();
+    result_bb->successor_blocks = new (arena) GrowableArray<SuccessorBlockInfo*>(arena, size, kGrowableArraySuccessorBlocks);
+    GrowableArray<SuccessorBlockInfo*>::Iterator iterator(successor_blocks);
+    while (true) {
+      SuccessorBlockInfo* sbi_old = iterator.Next();
+      if (sbi_old == nullptr) {
+        break;
+      }
+      SuccessorBlockInfo* sbi_new = static_cast<SuccessorBlockInfo*>(arena->Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessor));
+      memcpy(sbi_new, sbi_old, sizeof(SuccessorBlockInfo));
+      result_bb->successor_blocks->Insert(sbi_new);
+    }
+  }
+
+  // Copy offset, method.
+  result_bb->start_offset = start_offset;
+
+  // Now copy instructions.
+  for (MIR* mir = first_mir_insn; mir != 0; mir = mir->next) {
+    // Get a copy first.
+    MIR* copy = mir->Copy(mir_graph);
+
+    // Append it.
+    result_bb->AppendMIR(copy);
+  }
+
+  return result_bb;
+}
+
+MIR* MIR::Copy(MIRGraph* mir_graph) {
+  MIR* res = mir_graph->NewMIR();
+  *res = *this;
+
+  // Remove links
+  res->next = nullptr;
+  res->bb = NullBasicBlockId;
+  res->ssa_rep = nullptr;
+
+  return res;
+}
+
+MIR* MIR::Copy(CompilationUnit* c_unit) {
+  return Copy(c_unit->mir_graph.get());
+}
+
+uint32_t SSARepresentation::GetStartUseIndex(Instruction::Code opcode) {
+  // Default result.
+  int res = 0;
+
+  // We are basically setting the iputs to their igets counterparts.
+  switch (opcode) {
+    case Instruction::IPUT:
+    case Instruction::IPUT_OBJECT:
+    case Instruction::IPUT_BOOLEAN:
+    case Instruction::IPUT_BYTE:
+    case Instruction::IPUT_CHAR:
+    case Instruction::IPUT_SHORT:
+    case Instruction::IPUT_QUICK:
+    case Instruction::IPUT_OBJECT_QUICK:
+    case Instruction::APUT:
+    case Instruction::APUT_OBJECT:
+    case Instruction::APUT_BOOLEAN:
+    case Instruction::APUT_BYTE:
+    case Instruction::APUT_CHAR:
+    case Instruction::APUT_SHORT:
+    case Instruction::SPUT:
+    case Instruction::SPUT_OBJECT:
+    case Instruction::SPUT_BOOLEAN:
+    case Instruction::SPUT_BYTE:
+    case Instruction::SPUT_CHAR:
+    case Instruction::SPUT_SHORT:
+      // Skip the VR containing what to store.
+      res = 1;
+      break;
+    case Instruction::IPUT_WIDE:
+    case Instruction::IPUT_WIDE_QUICK:
+    case Instruction::APUT_WIDE:
+    case Instruction::SPUT_WIDE:
+      // Skip the two VRs containing what to store.
+      res = 2;
+      break;
+    default:
+      // Do nothing in the general case.
+      break;
+  }
+
+  return res;
+}
+
+/**
+ * @brief Given a decoded instruction, it checks whether the instruction
+ * sets a constant and if it does, more information is provided about the
+ * constant being set.
+ * @param ptr_value pointer to a 64-bit holder for the constant.
+ * @param wide Updated by function whether a wide constant is being set by bytecode.
+ * @return Returns false if the decoded instruction does not represent a constant bytecode.
+ */
+bool MIR::DecodedInstruction::GetConstant(int64_t* ptr_value, bool* wide) const {
+  bool sets_const = true;
+  int64_t value = vB;
+
+  DCHECK(ptr_value != nullptr);
+  DCHECK(wide != nullptr);
+
+  switch (opcode) {
+    case Instruction::CONST_4:
+    case Instruction::CONST_16:
+    case Instruction::CONST:
+      *wide = false;
+      value <<= 32;      // In order to get the sign extend.
+      value >>= 32;
+      break;
+    case Instruction::CONST_HIGH16:
+      *wide = false;
+      value <<= 48;      // In order to get the sign extend.
+      value >>= 32;
+      break;
+    case Instruction::CONST_WIDE_16:
+    case Instruction::CONST_WIDE_32:
+      *wide = true;
+      value <<= 32;      // In order to get the sign extend.
+      value >>= 32;
+      break;
+    case Instruction::CONST_WIDE:
+      *wide = true;
+      value = vB_wide;
+      break;
+    case Instruction::CONST_WIDE_HIGH16:
+      *wide = true;
+      value <<= 48;      // In order to get the sign extend.
+      break;
+    default:
+      sets_const = false;
+      break;
+  }
+
+  if (sets_const) {
+    *ptr_value = value;
+  }
+
+  return sets_const;
+}
+
+void BasicBlock::ResetOptimizationFlags(uint16_t reset_flags) {
+  // Reset flags for all MIRs in bb.
+  for (MIR* mir = first_mir_insn; mir != NULL; mir = mir->next) {
+    mir->optimization_flags &= (~reset_flags);
+  }
+}
+
+void BasicBlock::Hide(CompilationUnit* c_unit) {
+  // First lets make it a dalvik bytecode block so it doesn't have any special meaning.
+  block_type = kDalvikByteCode;
+
+  // Mark it as hidden.
+  hidden = true;
+
+  // Detach it from its MIRs so we don't generate code for them. Also detached MIRs
+  // are updated to know that they no longer have a parent.
+  for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) {
+    mir->bb = NullBasicBlockId;
+  }
+  first_mir_insn = nullptr;
+  last_mir_insn = nullptr;
+
+  GrowableArray<BasicBlockId>::Iterator iterator(predecessors);
+
+  MIRGraph* mir_graph = c_unit->mir_graph.get();
+  while (true) {
+    BasicBlock* pred_bb = mir_graph->GetBasicBlock(iterator.Next());
+    if (pred_bb == nullptr) {
+      break;
+    }
+
+    // Sadly we have to go through the children by hand here.
+    pred_bb->ReplaceChild(id, NullBasicBlockId);
+  }
+
+  // Iterate through children of bb we are hiding.
+  ChildBlockIterator successorChildIter(this, mir_graph);
+
+  for (BasicBlock* childPtr = successorChildIter.Next(); childPtr != 0; childPtr = successorChildIter.Next()) {
+    // Replace child with null child.
+    childPtr->predecessors->Delete(id);
+  }
+}
+
+bool BasicBlock::IsSSALiveOut(const CompilationUnit* c_unit, int ssa_reg) {
+  // In order to determine if the ssa reg is live out, we scan all the MIRs. We remember
+  // the last SSA number of the same dalvik register. At the end, if it is different than ssa_reg,
+  // then it is not live out of this BB.
+  int dalvik_reg = c_unit->mir_graph->SRegToVReg(ssa_reg);
+
+  int last_ssa_reg = -1;
+
+  // Walk through the MIRs backwards.
+  for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) {
+    // Get ssa rep.
+    SSARepresentation *ssa_rep = mir->ssa_rep;
+
+    // Go through the defines for this MIR.
+    for (int i = 0; i < ssa_rep->num_defs; i++) {
+      DCHECK(ssa_rep->defs != nullptr);
+
+      // Get the ssa reg.
+      int def_ssa_reg = ssa_rep->defs[i];
+
+      // Get dalvik reg.
+      int def_dalvik_reg = c_unit->mir_graph->SRegToVReg(def_ssa_reg);
+
+      // Compare dalvik regs.
+      if (dalvik_reg == def_dalvik_reg) {
+        // We found a def of the register that we are being asked about.
+        // Remember it.
+        last_ssa_reg = def_ssa_reg;
+      }
+    }
+  }
+
+  if (last_ssa_reg == -1) {
+    // If we get to this point we couldn't find a define of register user asked about.
+    // Let's assume the user knows what he's doing so we can be safe and say that if we
+    // couldn't find a def, it is live out.
+    return true;
+  }
+
+  // If it is not -1, we found a match, is it ssa_reg?
+  return (ssa_reg == last_ssa_reg);
+}
+
+bool BasicBlock::ReplaceChild(BasicBlockId old_bb, BasicBlockId new_bb) {
+  // We need to check taken, fall_through, and successor_blocks to replace.
+  bool found = false;
+  if (taken == old_bb) {
+    taken = new_bb;
+    found = true;
+  }
+
+  if (fall_through == old_bb) {
+    fall_through = new_bb;
+    found = true;
+  }
+
+  if (successor_block_list_type != kNotUsed) {
+    GrowableArray<SuccessorBlockInfo*>::Iterator iterator(successor_blocks);
+    while (true) {
+      SuccessorBlockInfo* successor_block_info = iterator.Next();
+      if (successor_block_info == nullptr) {
+        break;
+      }
+      if (successor_block_info->block == old_bb) {
+        successor_block_info->block = new_bb;
+        found = true;
+      }
+    }
+  }
+
+  return found;
+}
+
+void BasicBlock::UpdatePredecessor(BasicBlockId old_parent, BasicBlockId new_parent) {
+  GrowableArray<BasicBlockId>::Iterator iterator(predecessors);
+  bool found = false;
+
+  while (true) {
+    BasicBlockId pred_bb_id = iterator.Next();
+
+    if (pred_bb_id == NullBasicBlockId) {
+      break;
+    }
+
+    if (pred_bb_id == old_parent) {
+      size_t idx = iterator.GetIndex() - 1;
+      predecessors->Put(idx, new_parent);
+      found = true;
+      break;
+    }
+  }
+
+  // If not found, add it.
+  if (found == false) {
+    predecessors->Insert(new_parent);
+  }
+}
+
+// Create a new basic block with block_id as num_blocks_ that is
+// post-incremented.
+BasicBlock* MIRGraph::CreateNewBB(BBType block_type) {
+  BasicBlock* res = NewMemBB(block_type, num_blocks_++);
+  block_list_.Insert(res);
+  return res;
+}
+
+void MIRGraph::CalculateBasicBlockInformation() {
+  PassDriverMEPostOpt driver(cu_);
+  driver.Launch();
+}
+
+void MIRGraph::InitializeBasicBlockData() {
+  num_blocks_ = block_list_.Size();
+}
+
 }  // namespace art
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 11d2fbe..38cd5ee 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -223,7 +223,7 @@
   ArenaBitVector* def_v;
   ArenaBitVector* live_in_v;
   ArenaBitVector* phi_v;
-  int32_t* vreg_to_ssa_map;
+  int32_t* vreg_to_ssa_map_exit;
   ArenaBitVector* ending_check_v;  // For null check and class init check elimination.
 };
 
@@ -234,14 +234,23 @@
  * Following SSA renaming, this is the primary struct used by code generators to locate
  * operand and result registers.  This is a somewhat confusing and unhelpful convention that
  * we may want to revisit in the future.
+ *
+ * TODO:
+ *  1. Add accessors for uses/defs and make data private
+ *  2. Change fp_use/fp_def to a bit array (could help memory usage)
+ *  3. Combine array storage into internal array and handled via accessors from 1.
  */
 struct SSARepresentation {
-  int16_t num_uses;
-  int16_t num_defs;
   int32_t* uses;
   bool* fp_use;
   int32_t* defs;
   bool* fp_def;
+  int16_t num_uses_allocated;
+  int16_t num_defs_allocated;
+  int16_t num_uses;
+  int16_t num_defs;
+
+  static uint32_t GetStartUseIndex(Instruction::Code opcode);
 };
 
 /*
@@ -261,12 +270,65 @@
     uint32_t vC;
     uint32_t arg[5];         /* vC/D/E/F/G in invoke or filled-new-array */
     Instruction::Code opcode;
+
+    explicit DecodedInstruction():vA(0), vB(0), vB_wide(0), vC(0), opcode(Instruction::NOP) {
+    }
+
+    /*
+     * Given a decoded instruction representing a const bytecode, it updates
+     * the out arguments with proper values as dictated by the constant bytecode.
+     */
+    bool GetConstant(int64_t* ptr_value, bool* wide) const;
+
+    bool IsStore() const {
+      return ((Instruction::FlagsOf(opcode) & Instruction::kStore) == Instruction::kStore);
+    }
+
+    bool IsLoad() const {
+      return ((Instruction::FlagsOf(opcode) & Instruction::kLoad) == Instruction::kLoad);
+    }
+
+    bool IsConditionalBranch() const {
+      return (Instruction::FlagsOf(opcode) == (Instruction::kContinue | Instruction::kBranch));
+    }
+
+    /**
+     * @brief Is the register C component of the decoded instruction a constant?
+     */
+    bool IsCFieldOrConstant() const {
+      return ((Instruction::FlagsOf(opcode) & Instruction::kRegCFieldOrConstant) == Instruction::kRegCFieldOrConstant);
+    }
+
+    /**
+     * @brief Is the register C component of the decoded instruction a constant?
+     */
+    bool IsBFieldOrConstant() const {
+      return ((Instruction::FlagsOf(opcode) & Instruction::kRegBFieldOrConstant) == Instruction::kRegBFieldOrConstant);
+    }
+
+    bool IsCast() const {
+      return ((Instruction::FlagsOf(opcode) & Instruction::kCast) == Instruction::kCast);
+    }
+
+    /**
+     * @brief Does the instruction clobber memory?
+     * @details Clobber means that the instruction changes the memory not in a punctual way.
+     *          Therefore any supposition on memory aliasing or memory contents should be disregarded
+     *            when crossing such an instruction.
+     */
+    bool Clobbers() const {
+      return ((Instruction::FlagsOf(opcode) & Instruction::kClobber) == Instruction::kClobber);
+    }
+
+    bool IsLinear() const {
+      return (Instruction::FlagsOf(opcode) & (Instruction::kAdd | Instruction::kSubtract)) != 0;
+    }
   } dalvikInsn;
 
-  uint16_t width;                 // Note: width can include switch table or fill array data.
   NarrowDexOffset offset;         // Offset of the instruction in code units.
   uint16_t optimization_flags;
   int16_t m_unit_index;           // From which method was this MIR included
+  BasicBlockId bb;
   MIR* next;
   SSARepresentation* ssa_rep;
   union {
@@ -285,6 +347,23 @@
     // INVOKE data index, points to MIRGraph::method_lowering_infos_.
     uint32_t method_lowering_info;
   } meta;
+
+  explicit MIR():offset(0), optimization_flags(0), m_unit_index(0), bb(NullBasicBlockId),
+                 next(nullptr), ssa_rep(nullptr) {
+    memset(&meta, 0, sizeof(meta));
+  }
+
+  uint32_t GetStartUseIndex() const {
+    return SSARepresentation::GetStartUseIndex(dalvikInsn.opcode);
+  }
+
+  MIR* Copy(CompilationUnit *c_unit);
+  MIR* Copy(MIRGraph* mir_Graph);
+
+  static void* operator new(size_t size, ArenaAllocator* arena) {
+    return arena->Alloc(sizeof(MIR), kArenaAllocMIR);
+  }
+  static void operator delete(void* p) {}  // Nop.
 };
 
 struct SuccessorBlockInfo;
@@ -317,8 +396,49 @@
   GrowableArray<SuccessorBlockInfo*>* successor_blocks;
 
   void AppendMIR(MIR* mir);
+  void AppendMIRList(MIR* first_list_mir, MIR* last_list_mir);
+  void AppendMIRList(const std::vector<MIR*>& insns);
   void PrependMIR(MIR* mir);
+  void PrependMIRList(MIR* first_list_mir, MIR* last_list_mir);
+  void PrependMIRList(const std::vector<MIR*>& to_add);
   void InsertMIRAfter(MIR* current_mir, MIR* new_mir);
+  void InsertMIRListAfter(MIR* insert_after, MIR* first_list_mir, MIR* last_list_mir);
+  MIR* FindPreviousMIR(MIR* mir);
+  void InsertMIRBefore(MIR* insert_before, MIR* list);
+  void InsertMIRListBefore(MIR* insert_before, MIR* first_list_mir, MIR* last_list_mir);
+  bool RemoveMIR(MIR* mir);
+  bool RemoveMIRList(MIR* first_list_mir, MIR* last_list_mir);
+
+  BasicBlock* Copy(CompilationUnit* c_unit);
+  BasicBlock* Copy(MIRGraph* mir_graph);
+
+  /**
+   * @brief Reset the optimization_flags field of each MIR.
+   */
+  void ResetOptimizationFlags(uint16_t reset_flags);
+
+  /**
+   * @brief Hide the BasicBlock.
+   * @details Set it to kDalvikByteCode, set hidden to true, remove all MIRs,
+   *          remove itself from any predecessor edges, remove itself from any
+   *          child's predecessor growable array.
+   */
+  void Hide(CompilationUnit* c_unit);
+
+  /**
+   * @brief Is ssa_reg the last SSA definition of that VR in the block?
+   */
+  bool IsSSALiveOut(const CompilationUnit* c_unit, int ssa_reg);
+
+  /**
+   * @brief Replace the edge going to old_bb to now go towards new_bb.
+   */
+  bool ReplaceChild(BasicBlockId old_bb, BasicBlockId new_bb);
+
+  /**
+   * @brief Update the predecessor growable array from old_pred to new_pred.
+   */
+  void UpdatePredecessor(BasicBlockId old_pred, BasicBlockId new_pred);
 
   /**
    * @brief Used to obtain the next MIR that follows unconditionally.
@@ -329,11 +449,17 @@
    * @return Returns the following MIR if one can be found.
    */
   MIR* GetNextUnconditionalMir(MIRGraph* mir_graph, MIR* current);
+  bool IsExceptionBlock() const;
+
+  static void* operator new(size_t size, ArenaAllocator* arena) {
+    return arena->Alloc(sizeof(BasicBlock), kArenaAllocBB);
+  }
+  static void operator delete(void* p) {}  // Nop.
 };
 
 /*
  * The "blocks" field in "successor_block_list" points to an array of elements with the type
- * "SuccessorBlockInfo".  For catch blocks, key is type index for the exception.  For swtich
+ * "SuccessorBlockInfo".  For catch blocks, key is type index for the exception.  For switch
  * blocks, key is the case value.
  */
 struct SuccessorBlockInfo {
@@ -573,6 +699,10 @@
 
   void BasicBlockOptimization();
 
+  GrowableArray<BasicBlockId>* GetTopologicalSortOrder() {
+    return topological_order_;
+  }
+
   bool IsConst(int32_t s_reg) const {
     return is_constant_v_->IsBitSet(s_reg);
   }
@@ -775,11 +905,11 @@
     return backward_branches_ + forward_branches_;
   }
 
-  bool IsPseudoMirOp(Instruction::Code opcode) {
+  static bool IsPseudoMirOp(Instruction::Code opcode) {
     return static_cast<int>(opcode) >= static_cast<int>(kMirOpFirst);
   }
 
-  bool IsPseudoMirOp(int opcode) {
+  static bool IsPseudoMirOp(int opcode) {
     return opcode >= static_cast<int>(kMirOpFirst);
   }
 
@@ -794,7 +924,7 @@
   void VerifyDataflow();
   void CheckForDominanceFrontier(BasicBlock* dom_bb, const BasicBlock* succ_bb);
   void EliminateNullChecksAndInferTypesStart();
-  bool EliminateNullChecksAndInferTypes(BasicBlock *bb);
+  bool EliminateNullChecksAndInferTypes(BasicBlock* bb);
   void EliminateNullChecksAndInferTypesEnd();
   bool EliminateClassInitChecksGate();
   bool EliminateClassInitChecks(BasicBlock* bb);
@@ -836,9 +966,12 @@
   void DumpMIRGraph();
   CallInfo* NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range);
   BasicBlock* NewMemBB(BBType block_type, int block_id);
+  MIR* NewMIR();
   MIR* AdvanceMIR(BasicBlock** p_bb, MIR* mir);
   BasicBlock* NextDominatedBlock(BasicBlock* bb);
   bool LayoutBlocks(BasicBlock* bb);
+  void ComputeTopologicalSortOrder();
+  BasicBlock* CreateNewBB(BBType block_type);
 
   bool InlineCallsGate();
   void InlineCallsStart();
@@ -858,7 +991,7 @@
   /**
    * @brief Perform the initial preparation for the SSA Transformation.
    */
-  void InitializeSSATransformation();
+  void SSATransformationStart();
 
   /**
    * @brief Insert a the operands for the Phi nodes.
@@ -868,6 +1001,11 @@
   bool InsertPhiNodeOperands(BasicBlock* bb);
 
   /**
+   * @brief Perform the cleanup after the SSA Transformation.
+   */
+  void SSATransformationEnd();
+
+  /**
    * @brief Perform constant propagation on a BasicBlock.
    * @param bb the considered BasicBlock.
    */
@@ -889,6 +1027,18 @@
   void CombineBlocks(BasicBlock* bb);
 
   void ClearAllVisitedFlags();
+
+  void AllocateSSAUseData(MIR *mir, int num_uses);
+  void AllocateSSADefData(MIR *mir, int num_defs);
+  void CalculateBasicBlockInformation();
+  void InitializeBasicBlockData();
+  void ComputeDFSOrders();
+  void ComputeDefBlockMatrix();
+  void ComputeDominators();
+  void CompilerInitializeSSAConversion();
+  void InsertPhiNodes();
+  void DoDFSPreOrderSSARename(BasicBlock* block);
+
   /*
    * IsDebugBuild sanity check: keep track of the Dex PCs for catch entries so that later on
    * we can verify that all catch entries have native PC entries.
@@ -904,7 +1054,6 @@
 
   void HandleSSADef(int* defs, int dalvik_reg, int reg_index);
   bool InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed);
-  void ComputeDFSOrders();
 
  protected:
   int FindCommonParent(int block1, int block2);
@@ -913,7 +1062,6 @@
   void HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v,
                        ArenaBitVector* live_in_v, int dalvik_reg_id);
   void HandleDef(ArenaBitVector* def_v, int dalvik_reg_id);
-  void CompilerInitializeSSAConversion();
   bool DoSSAConversion(BasicBlock* bb);
   bool InvokeUsesMethodStar(MIR* mir);
   int ParseInsn(const uint16_t* code_ptr, MIR::DecodedInstruction* decoded_instruction);
@@ -940,11 +1088,7 @@
   BasicBlock* NextUnvisitedSuccessor(BasicBlock* bb);
   void MarkPreOrder(BasicBlock* bb);
   void RecordDFSOrders(BasicBlock* bb);
-  void ComputeDefBlockMatrix();
   void ComputeDomPostOrderTraversal(BasicBlock* bb);
-  void ComputeDominators();
-  void InsertPhiNodes();
-  void DoDFSPreOrderSSARename(BasicBlock* block);
   void SetConstant(int32_t ssa_reg, int value);
   void SetConstantWide(int ssa_reg, int64_t value);
   int GetSSAUseCount(int s_reg);
@@ -974,12 +1118,13 @@
   GrowableArray<uint32_t> use_counts_;      // Weighted by nesting depth
   GrowableArray<uint32_t> raw_use_counts_;  // Not weighted
   unsigned int num_reachable_blocks_;
+  unsigned int max_num_reachable_blocks_;
   GrowableArray<BasicBlockId>* dfs_order_;
   GrowableArray<BasicBlockId>* dfs_post_order_;
   GrowableArray<BasicBlockId>* dom_post_order_traversal_;
+  GrowableArray<BasicBlockId>* topological_order_;
   int* i_dom_list_;
   ArenaBitVector** def_block_matrix_;    // num_dalvik_register x num_blocks.
-  ArenaBitVector* temp_dalvik_register_v_;
   std::unique_ptr<ScopedArenaAllocator> temp_scoped_alloc_;
   uint16_t* temp_insn_data_;
   uint32_t temp_bit_vector_size_;
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 749a235..1460ce6 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -129,17 +129,16 @@
   BasicBlock* tbb = bb;
   mir = AdvanceMIR(&tbb, mir);
   while (mir != NULL) {
-    int opcode = mir->dalvikInsn.opcode;
     if ((mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) ||
         (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) ||
         (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE)) {
       break;
     }
     // Keep going if pseudo op, otherwise terminate
-    if (opcode < kNumPackedOpcodes) {
-      mir = NULL;
-    } else {
+    if (IsPseudoMirOp(mir->dalvikInsn.opcode)) {
       mir = AdvanceMIR(&tbb, mir);
+    } else {
+      mir = NULL;
     }
   }
   return mir;
@@ -268,13 +267,22 @@
     DCHECK_EQ(ct_type, kCompilerTempVR);
 
     // The new non-special compiler temp must receive a unique v_reg with a negative value.
-    compiler_temp->v_reg = static_cast<int>(kVRegNonSpecialTempBaseReg) - num_non_special_compiler_temps_;
+    compiler_temp->v_reg = static_cast<int>(kVRegNonSpecialTempBaseReg) -
+        num_non_special_compiler_temps_;
     compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
     num_non_special_compiler_temps_++;
 
     if (wide) {
-      // Ensure that the two registers are consecutive. Since the virtual registers used for temps grow in a
-      // negative fashion, we need the smaller to refer to the low part. Thus, we redefine the v_reg and s_reg_low.
+      // Create a new CompilerTemp for the high part.
+      CompilerTemp *compiler_temp_high =
+          static_cast<CompilerTemp *>(arena_->Alloc(sizeof(CompilerTemp), kArenaAllocRegAlloc));
+      compiler_temp_high->v_reg = compiler_temp->v_reg;
+      compiler_temp_high->s_reg_low = compiler_temp->s_reg_low;
+      compiler_temps_.Insert(compiler_temp_high);
+
+      // Ensure that the two registers are consecutive. Since the virtual registers used for temps
+      // grow in a negative fashion, we need the smaller to refer to the low part. Thus, we
+      // redefine the v_reg and s_reg_low.
       compiler_temp->v_reg--;
       int ssa_reg_high = compiler_temp->s_reg_low;
       compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
@@ -311,9 +319,11 @@
     return true;
   }
   bool use_lvn = bb->use_lvn;
+  std::unique_ptr<ScopedArenaAllocator> allocator;
   std::unique_ptr<LocalValueNumbering> local_valnum;
   if (use_lvn) {
-    local_valnum.reset(LocalValueNumbering::Create(cu_));
+    allocator.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
+    local_valnum.reset(new (allocator.get()) LocalValueNumbering(cu_, allocator.get()));
   }
   while (bb != NULL) {
     for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
@@ -406,7 +416,8 @@
       // TODO: flesh out support for Mips.  NOTE: llvm's select op doesn't quite work here.
       // TUNING: expand to support IF_xx compare & branches
       if (!cu_->compiler->IsPortable() &&
-          (cu_->instruction_set == kThumb2 || cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) &&
+          (cu_->instruction_set == kArm64 || cu_->instruction_set == kThumb2 ||
+           cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) &&
           IsInstructionIfCcZ(mir->dalvikInsn.opcode)) {
         BasicBlock* ft = GetBasicBlock(bb->fall_through);
         DCHECK(ft != NULL);
@@ -432,6 +443,8 @@
           if (SelectKind(tk->last_mir_insn) == kSelectGoto) {
               tk->last_mir_insn->optimization_flags |= (MIR_IGNORE_SUSPEND_CHECK);
           }
+
+          // TODO: Add logic for LONG.
           // Are the block bodies something we can handle?
           if ((ft->first_mir_insn == ft->last_mir_insn) &&
               (tk->first_mir_insn != tk->last_mir_insn) &&
@@ -541,6 +554,9 @@
     }
     bb = ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) ? NextDominatedBlock(bb) : NULL;
   }
+  if (use_lvn && UNLIKELY(!local_valnum->Good())) {
+    LOG(WARNING) << "LVN overflow in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+  }
 
   return true;
 }
@@ -852,7 +868,7 @@
           struct BasicBlock* next_bb = GetBasicBlock(bb->fall_through);
           for (MIR* tmir = next_bb->first_mir_insn; tmir != NULL;
             tmir =tmir->next) {
-            if (static_cast<int>(tmir->dalvikInsn.opcode) >= static_cast<int>(kMirOpFirst)) {
+            if (IsPseudoMirOp(tmir->dalvikInsn.opcode)) {
               continue;
             }
             // First non-pseudo should be MOVE_RESULT_OBJECT
@@ -1169,6 +1185,9 @@
     return;
   }
   for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+    if (IsPseudoMirOp(mir->dalvikInsn.opcode)) {
+      continue;
+    }
     if (!(Instruction::FlagsOf(mir->dalvikInsn.opcode) & Instruction::kInvoke)) {
       continue;
     }
diff --git a/compiler/dex/mir_optimization_test.cc b/compiler/dex/mir_optimization_test.cc
index 891d9fb..69c394f 100644
--- a/compiler/dex/mir_optimization_test.cc
+++ b/compiler/dex/mir_optimization_test.cc
@@ -170,7 +170,6 @@
       }
       mir->ssa_rep = nullptr;
       mir->offset = 2 * i;  // All insns need to be at least 2 code units long.
-      mir->width = 2u;
       mir->optimization_flags = 0u;
       merged_df_flags |= MIRGraph::GetDataFlowAttributes(def->opcode);
     }
@@ -194,7 +193,7 @@
     ASSERT_TRUE(gate_result);
     RepeatingPreOrderDfsIterator iterator(cu_.mir_graph.get());
     bool change = false;
-    for (BasicBlock *bb = iterator.Next(change); bb != 0; bb = iterator.Next(change)) {
+    for (BasicBlock* bb = iterator.Next(change); bb != nullptr; bb = iterator.Next(change)) {
       change = cu_.mir_graph->EliminateClassInitChecks(bb);
     }
     cu_.mir_graph->EliminateClassInitChecksEnd();
diff --git a/compiler/dex/pass.h b/compiler/dex/pass.h
index 9457d5b..b4906d6 100644
--- a/compiler/dex/pass.h
+++ b/compiler/dex/pass.h
@@ -19,6 +19,7 @@
 
 #include <string>
 
+#include "base/macros.h"
 namespace art {
 
 // Forward declarations.
@@ -26,42 +27,18 @@
 struct CompilationUnit;
 class Pass;
 
-/**
- * @brief OptimizationFlag is an enumeration to perform certain tasks for a given pass.
- * @details Each enum should be a power of 2 to be correctly used.
- */
-enum OptimizationFlag {
-};
-
-enum DataFlowAnalysisMode {
-  kAllNodes = 0,                           /**< @brief All nodes. */
-  kPreOrderDFSTraversal,                   /**< @brief Depth-First-Search / Pre-Order. */
-  kRepeatingPreOrderDFSTraversal,          /**< @brief Depth-First-Search / Repeating Pre-Order. */
-  kReversePostOrderDFSTraversal,           /**< @brief Depth-First-Search / Reverse Post-Order. */
-  kRepeatingPostOrderDFSTraversal,         /**< @brief Depth-First-Search / Repeating Post-Order. */
-  kRepeatingReversePostOrderDFSTraversal,  /**< @brief Depth-First-Search / Repeating Reverse Post-Order. */
-  kPostOrderDOMTraversal,                  /**< @brief Dominator tree / Post-Order. */
-  kNoNodes,                                /**< @brief Skip BasicBlock traversal. */
+// Empty Pass Data Class, can be extended by any pass extending the base Pass class.
+class PassDataHolder {
 };
 
 /**
  * @class Pass
- * @brief Pass is the Pass structure for the optimizations.
- * @details The following structure has the different optimization passes that we are going to do.
+ * @brief Base Pass class, can be extended to perform a more defined way of doing the work call.
  */
 class Pass {
  public:
-  explicit Pass(const char* name, DataFlowAnalysisMode type = kAllNodes,
-                unsigned int flags = 0u, const char* dump = "")
-    : pass_name_(name), traversal_type_(type), flags_(flags), dump_cfg_folder_(dump) {
-  }
-
-  Pass(const char* name, DataFlowAnalysisMode type, const char* dump)
-    : pass_name_(name), traversal_type_(type), flags_(0), dump_cfg_folder_(dump) {
-  }
-
-  Pass(const char* name, const char* dump)
-    : pass_name_(name), traversal_type_(kAllNodes), flags_(0), dump_cfg_folder_(dump) {
+  explicit Pass(const char* name)
+    : pass_name_(name) {
   }
 
   virtual ~Pass() {
@@ -71,77 +48,66 @@
     return pass_name_;
   }
 
-  virtual DataFlowAnalysisMode GetTraversal() const {
-    return traversal_type_;
-  }
-
-  virtual bool GetFlag(OptimizationFlag flag) const {
-    return (flags_ & flag);
-  }
-
-  const char* GetDumpCFGFolder() const {
-    return dump_cfg_folder_;
-  }
-
   /**
    * @brief Gate for the pass: determines whether to execute the pass or not considering a CompilationUnit
-   * @param c_unit the CompilationUnit.
-   * @return whether or not to execute the pass
+   * @param data the PassDataHolder.
+   * @return whether or not to execute the pass.
    */
-  virtual bool Gate(const CompilationUnit* c_unit) const {
+  virtual bool Gate(const PassDataHolder* data) const {
     // Unused parameter.
-    UNUSED(c_unit);
+    UNUSED(data);
 
     // Base class says yes.
     return true;
   }
 
   /**
-   * @brief Start of the pass: called before the WalkBasicBlocks function
-   * @param c_unit the considered CompilationUnit.
+   * @brief Start of the pass: called before the Worker function.
    */
-  virtual void Start(CompilationUnit* c_unit) const {
+  virtual void Start(const PassDataHolder* data) const {
     // Unused parameter.
-    UNUSED(c_unit);
+    UNUSED(data);
   }
 
   /**
-   * @brief End of the pass: called after the WalkBasicBlocks function
-   * @param c_unit the considered CompilationUnit.
+   * @brief End of the pass: called after the WalkBasicBlocks function.
    */
-  virtual void End(CompilationUnit* c_unit) const {
+  virtual void End(const PassDataHolder* data) const {
     // Unused parameter.
-    UNUSED(c_unit);
+    UNUSED(data);
   }
 
   /**
-   * @brief Actually walk the BasicBlocks following a particular traversal type.
-   * @param c_unit the CompilationUnit.
-   * @param bb the BasicBlock.
+   * @param data the object containing data necessary for the pass.
    * @return whether or not there is a change when walking the BasicBlock
    */
-  virtual bool WalkBasicBlocks(CompilationUnit* c_unit, BasicBlock* bb) const {
-    // Unused parameters.
-    UNUSED(c_unit);
-    UNUSED(bb);
+  virtual bool Worker(const PassDataHolder* data) const {
+    // Unused parameter.
+    UNUSED(data);
 
     // BasicBlock did not change.
     return false;
   }
 
+  static void BasePrintMessage(CompilationUnit* c_unit, const char* pass_name, const char* message, ...) {
+    // Check if we want to log something or not.
+    if (c_unit->print_pass) {
+      // Stringify the message.
+      va_list args;
+      va_start(args, message);
+      std::string stringified_message;
+      StringAppendV(&stringified_message, message, args);
+      va_end(args);
+
+      // Log the message and ensure to include pass name.
+      LOG(INFO) << pass_name << ": " << stringified_message;
+    }
+  }
+
  protected:
   /** @brief The pass name: used for searching for a pass when running a particular pass or debugging. */
   const char* const pass_name_;
 
-  /** @brief Type of traversal: determines the order to execute the pass on the BasicBlocks. */
-  const DataFlowAnalysisMode traversal_type_;
-
-  /** @brief Flags for additional directives: used to determine if a particular clean-up is necessary post pass. */
-  const unsigned int flags_;
-
-  /** @brief CFG Dump Folder: what sub-folder to use for dumping the CFGs post pass. */
-  const char* const dump_cfg_folder_;
-
  private:
   // In order to make the all passes not copy-friendly.
   DISALLOW_COPY_AND_ASSIGN(Pass);
diff --git a/compiler/dex/pass_driver.cc b/compiler/dex/pass_driver.cc
deleted file mode 100644
index 999ed2a..0000000
--- a/compiler/dex/pass_driver.cc
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <dlfcn.h>
-
-#include "base/logging.h"
-#include "base/macros.h"
-#include "bb_optimizations.h"
-#include "compiler_internals.h"
-#include "dataflow_iterator.h"
-#include "dataflow_iterator-inl.h"
-#include "pass.h"
-#include "pass_driver.h"
-
-namespace art {
-
-namespace {  // anonymous namespace
-
-/**
- * @brief Helper function to create a single instance of a given Pass and can be shared across
- * the threads.
- */
-template <typename PassType>
-const Pass* GetPassInstance() {
-  static const PassType pass;
-  return &pass;
-}
-
-void DoWalkBasicBlocks(CompilationUnit* c_unit, const Pass* pass, DataflowIterator* iterator) {
-  // Paranoid: Check the iterator before walking the BasicBlocks.
-  DCHECK(iterator != nullptr);
-
-  bool change = false;
-  for (BasicBlock *bb = iterator->Next(change); bb != 0; bb = iterator->Next(change)) {
-    change = pass->WalkBasicBlocks(c_unit, bb);
-  }
-}
-
-template <typename Iterator>
-inline void DoWalkBasicBlocks(CompilationUnit* c_unit, const Pass* pass) {
-  Iterator iterator(c_unit->mir_graph.get());
-  DoWalkBasicBlocks(c_unit, pass, &iterator);
-}
-
-}  // anonymous namespace
-
-PassDriver::PassDriver(CompilationUnit* cu, bool create_default_passes)
-    : cu_(cu), dump_cfg_folder_("/sdcard/") {
-  DCHECK(cu != nullptr);
-
-  // If need be, create the default passes.
-  if (create_default_passes) {
-    CreatePasses();
-  }
-}
-
-PassDriver::~PassDriver() {
-}
-
-void PassDriver::InsertPass(const Pass* new_pass) {
-  DCHECK(new_pass != nullptr);
-  DCHECK(new_pass->GetName() != nullptr && new_pass->GetName()[0] != 0);
-
-  // It is an error to override an existing pass.
-  DCHECK(GetPass(new_pass->GetName()) == nullptr)
-      << "Pass name " << new_pass->GetName() << " already used.";
-
-  // Now add to the list.
-  pass_list_.push_back(new_pass);
-}
-
-/*
- * Create the pass list. These passes are immutable and are shared across the threads.
- *
- * Advantage is that there will be no race conditions here.
- * Disadvantage is the passes can't change their internal states depending on CompilationUnit:
- *   - This is not yet an issue: no current pass would require it.
- */
-static const Pass* const gPasses[] = {
-  GetPassInstance<CacheFieldLoweringInfo>(),
-  GetPassInstance<CacheMethodLoweringInfo>(),
-  GetPassInstance<CallInlining>(),
-  GetPassInstance<CodeLayout>(),
-  GetPassInstance<SSATransformation>(),
-  GetPassInstance<ConstantPropagation>(),
-  GetPassInstance<InitRegLocations>(),
-  GetPassInstance<MethodUseCount>(),
-  GetPassInstance<NullCheckEliminationAndTypeInference>(),
-  GetPassInstance<ClassInitCheckElimination>(),
-  GetPassInstance<BBCombine>(),
-  GetPassInstance<BBOptimizations>(),
-};
-
-// The default pass list is used by CreatePasses to initialize pass_list_.
-static std::vector<const Pass*> gDefaultPassList(gPasses, gPasses + arraysize(gPasses));
-
-void PassDriver::CreateDefaultPassList(const std::string& disable_passes) {
-  // Insert each pass from gPasses into gDefaultPassList.
-  gDefaultPassList.clear();
-  gDefaultPassList.reserve(arraysize(gPasses));
-  for (const Pass* pass : gPasses) {
-    // Check if we should disable this pass.
-    if (disable_passes.find(pass->GetName()) != std::string::npos) {
-      LOG(INFO) << "Skipping " << pass->GetName();
-    } else {
-      gDefaultPassList.push_back(pass);
-    }
-  }
-}
-
-void PassDriver::CreatePasses() {
-  // Insert each pass into the list via the InsertPass method.
-  pass_list_.reserve(gDefaultPassList.size());
-  for (const Pass* pass : gDefaultPassList) {
-    InsertPass(pass);
-  }
-}
-
-void PassDriver::HandlePassFlag(CompilationUnit* c_unit, const Pass* pass) {
-  // Unused parameters for the moment.
-  UNUSED(c_unit);
-  UNUSED(pass);
-}
-
-void PassDriver::DispatchPass(CompilationUnit* c_unit, const Pass* curPass) {
-  VLOG(compiler) << "Dispatching " << curPass->GetName();
-
-  DataFlowAnalysisMode mode = curPass->GetTraversal();
-
-  switch (mode) {
-    case kPreOrderDFSTraversal:
-      DoWalkBasicBlocks<PreOrderDfsIterator>(c_unit, curPass);
-      break;
-    case kRepeatingPreOrderDFSTraversal:
-      DoWalkBasicBlocks<RepeatingPreOrderDfsIterator>(c_unit, curPass);
-      break;
-    case kRepeatingPostOrderDFSTraversal:
-      DoWalkBasicBlocks<RepeatingPostOrderDfsIterator>(c_unit, curPass);
-      break;
-    case kReversePostOrderDFSTraversal:
-      DoWalkBasicBlocks<ReversePostOrderDfsIterator>(c_unit, curPass);
-      break;
-    case kRepeatingReversePostOrderDFSTraversal:
-      DoWalkBasicBlocks<RepeatingReversePostOrderDfsIterator>(c_unit, curPass);
-      break;
-    case kPostOrderDOMTraversal:
-      DoWalkBasicBlocks<PostOrderDOMIterator>(c_unit, curPass);
-      break;
-    case kAllNodes:
-      DoWalkBasicBlocks<AllNodesIterator>(c_unit, curPass);
-      break;
-    case kNoNodes:
-      break;
-    default:
-      LOG(FATAL) << "Iterator mode not handled in dispatcher: " << mode;
-      break;
-  }
-}
-
-void PassDriver::ApplyPass(CompilationUnit* c_unit, const Pass* curPass) {
-  curPass->Start(c_unit);
-  DispatchPass(c_unit, curPass);
-  curPass->End(c_unit);
-}
-
-bool PassDriver::RunPass(CompilationUnit* c_unit, const Pass* pass, bool time_split) {
-  // Paranoid: c_unit and pass cannot be nullptr, and the pass should have a name.
-  DCHECK(c_unit != nullptr);
-  DCHECK(pass != nullptr);
-  DCHECK(pass->GetName() != nullptr && pass->GetName()[0] != 0);
-
-  // Do we perform a time split
-  if (time_split) {
-    c_unit->NewTimingSplit(pass->GetName());
-  }
-
-  // Check the pass gate first.
-  bool should_apply_pass = pass->Gate(c_unit);
-
-  if (should_apply_pass) {
-    // Applying the pass: first start, doWork, and end calls.
-    ApplyPass(c_unit, pass);
-
-    // Clean up if need be.
-    HandlePassFlag(c_unit, pass);
-
-    // Do we want to log it?
-    if ((c_unit->enable_debug&  (1 << kDebugDumpCFG)) != 0) {
-      // Do we have a pass folder?
-      const char* passFolder = pass->GetDumpCFGFolder();
-      DCHECK(passFolder != nullptr);
-
-      if (passFolder[0] != 0) {
-        // Create directory prefix.
-        std::string prefix = GetDumpCFGFolder();
-        prefix += passFolder;
-        prefix += "/";
-
-        c_unit->mir_graph->DumpCFG(prefix.c_str(), false);
-      }
-    }
-  }
-
-  // If the pass gate passed, we can declare success.
-  return should_apply_pass;
-}
-
-bool PassDriver::RunPass(CompilationUnit* c_unit, const char* pass_name) {
-  // Paranoid: c_unit cannot be nullptr and we need a pass name.
-  DCHECK(c_unit != nullptr);
-  DCHECK(pass_name != nullptr && pass_name[0] != 0);
-
-  const Pass* cur_pass = GetPass(pass_name);
-
-  if (cur_pass != nullptr) {
-    return RunPass(c_unit, cur_pass);
-  }
-
-  // Return false, we did not find the pass.
-  return false;
-}
-
-void PassDriver::Launch() {
-  for (const Pass* cur_pass : pass_list_) {
-    RunPass(cu_, cur_pass, true);
-  }
-}
-
-void PassDriver::PrintPassNames() {
-  LOG(INFO) << "Loop Passes are:";
-
-  for (const Pass* cur_pass : gPasses) {
-    LOG(INFO) << "\t-" << cur_pass->GetName();
-  }
-}
-
-const Pass* PassDriver::GetPass(const char* name) const {
-  for (const Pass* cur_pass : pass_list_) {
-    if (strcmp(name, cur_pass->GetName()) == 0) {
-      return cur_pass;
-    }
-  }
-  return nullptr;
-}
-
-}  // namespace art
diff --git a/compiler/dex/pass_driver.h b/compiler/dex/pass_driver.h
index 2b7196e..bd8f53c 100644
--- a/compiler/dex/pass_driver.h
+++ b/compiler/dex/pass_driver.h
@@ -22,77 +22,190 @@
 #include "safe_map.h"
 
 // Forward Declarations.
-class CompilationUnit;
 class Pass;
-
+class PassDriver;
 namespace art {
+/**
+ * @brief Helper function to create a single instance of a given Pass and can be shared across
+ * the threads.
+ */
+template <typename PassType>
+const Pass* GetPassInstance() {
+  static const PassType pass;
+  return &pass;
+}
+
+// Empty holder for the constructor.
+class PassDriverDataHolder {
+};
 
 /**
  * @class PassDriver
- * @brief PassDriver is the wrapper around all Pass instances in order to execute them from the Middle-End
+ * @brief PassDriver is the wrapper around all Pass instances in order to execute them
  */
+template <typename PassDriverType>
 class PassDriver {
  public:
-  explicit PassDriver(CompilationUnit* cu, bool create_default_passes = true);
+  explicit PassDriver() {
+    InitializePasses();
+  }
 
-  ~PassDriver();
+  virtual ~PassDriver() {
+  }
 
   /**
    * @brief Insert a Pass: can warn if multiple passes have the same name.
-   * @param new_pass the new Pass to insert in the map and list.
-   * @param warn_override warn if the name of the Pass is already used.
    */
-  void InsertPass(const Pass* new_pass);
+  void InsertPass(const Pass* new_pass) {
+    DCHECK(new_pass != nullptr);
+    DCHECK(new_pass->GetName() != nullptr && new_pass->GetName()[0] != 0);
+
+    // It is an error to override an existing pass.
+    DCHECK(GetPass(new_pass->GetName()) == nullptr)
+        << "Pass name " << new_pass->GetName() << " already used.";
+
+    // Now add to the list.
+    pass_list_.push_back(new_pass);
+  }
 
   /**
    * @brief Run a pass using the name as key.
-   * @param c_unit the considered CompilationUnit.
-   * @param pass_name the Pass name.
    * @return whether the pass was applied.
    */
-  bool RunPass(CompilationUnit* c_unit, const char* pass_name);
+  virtual bool RunPass(const char* pass_name) {
+    // Paranoid: c_unit cannot be nullptr and we need a pass name.
+    DCHECK(pass_name != nullptr && pass_name[0] != 0);
+
+    const Pass* cur_pass = GetPass(pass_name);
+
+    if (cur_pass != nullptr) {
+      return RunPass(cur_pass);
+    }
+
+    // Return false, we did not find the pass.
+    return false;
+  }
+
+  /**
+   * @brief Runs all the passes with the pass_list_.
+   */
+  void Launch() {
+    for (const Pass* cur_pass : pass_list_) {
+      RunPass(cur_pass);
+    }
+  }
+
+  /**
+   * @brief Searches for a particular pass.
+   * @param the name of the pass to be searched for.
+   */
+  const Pass* GetPass(const char* name) const {
+    for (const Pass* cur_pass : pass_list_) {
+      if (strcmp(name, cur_pass->GetName()) == 0) {
+        return cur_pass;
+      }
+    }
+    return nullptr;
+  }
+
+  static void CreateDefaultPassList(const std::string& disable_passes) {
+    // Insert each pass from g_passes into g_default_pass_list.
+    PassDriverType::g_default_pass_list.clear();
+    PassDriverType::g_default_pass_list.reserve(PassDriver<PassDriverType>::g_passes_size);
+    for (uint16_t i = 0; i < PassDriver<PassDriverType>::g_passes_size; ++i) {
+      const Pass* pass = PassDriver<PassDriverType>::g_passes[i];
+      // Check if we should disable this pass.
+      if (disable_passes.find(pass->GetName()) != std::string::npos) {
+        LOG(INFO) << "Skipping " << pass->GetName();
+      } else {
+        PassDriver<PassDriverType>::g_default_pass_list.push_back(pass);
+      }
+    }
+  }
 
   /**
    * @brief Run a pass using the Pass itself.
    * @param time_split do we want a time split request(default: false)?
    * @return whether the pass was applied.
    */
-  bool RunPass(CompilationUnit* c_unit, const Pass* pass, bool time_split = false);
+  virtual bool RunPass(const Pass* pass, bool time_split = false) = 0;
 
-  void Launch();
+  /**
+   * @brief Print the pass names of all the passes available.
+   */
+  static void PrintPassNames() {
+    LOG(INFO) << "Loop Passes are:";
 
-  void HandlePassFlag(CompilationUnit* c_unit, const Pass* pass);
+    for (const Pass* cur_pass : PassDriver<PassDriverType>::g_default_pass_list) {
+      LOG(INFO) << "\t-" << cur_pass->GetName();
+    }
+  }
+
+  /**
+   * @brief Gets the list of passes currently schedule to execute.
+   * @return pass_list_
+   */
+  std::vector<const Pass*>& GetPasses() {
+    return pass_list_;
+  }
+
+  static void SetPrintAllPasses() {
+    default_print_passes_ = true;
+  }
+
+  static void SetDumpPassList(const std::string& list) {
+    dump_pass_list_ = list;
+  }
+
+  static void SetPrintPassList(const std::string& list) {
+    print_pass_list_ = list;
+  }
+
+  void SetDefaultPasses() {
+    pass_list_ = PassDriver<PassDriverType>::g_default_pass_list;
+  }
+
+ protected:
+  virtual void InitializePasses() {
+    SetDefaultPasses();
+  }
 
   /**
    * @brief Apply a patch: perform start/work/end functions.
    */
-  void ApplyPass(CompilationUnit* c_unit, const Pass* pass);
-
-  /**
-   * @brief Dispatch a patch: walk the BasicBlocks depending on the traversal mode
-   */
-  void DispatchPass(CompilationUnit* c_unit, const Pass* pass);
-
-  static void PrintPassNames();
-  static void CreateDefaultPassList(const std::string& disable_passes);
-
-  const Pass* GetPass(const char* name) const;
-
-  const char* GetDumpCFGFolder() const {
-    return dump_cfg_folder_;
+  virtual void ApplyPass(PassDataHolder* data, const Pass* pass) {
+    pass->Start(data);
+    DispatchPass(pass);
+    pass->End(data);
   }
-
- protected:
-  void CreatePasses();
+  /**
+   * @brief Dispatch a patch.
+   * Gives the ability to add logic when running the patch.
+   */
+  virtual void DispatchPass(const Pass* pass) {
+    UNUSED(pass);
+  }
 
   /** @brief List of passes: provides the order to execute the passes. */
   std::vector<const Pass*> pass_list_;
 
-  /** @brief The CompilationUnit on which to execute the passes on. */
-  CompilationUnit* const cu_;
+  /** @brief The number of passes within g_passes.  */
+  static const uint16_t g_passes_size;
 
-  /** @brief Dump CFG base folder: where is the base folder for dumping CFGs. */
-  const char* dump_cfg_folder_;
+  /** @brief The number of passes within g_passes.  */
+  static const Pass* const g_passes[];
+
+  /** @brief The default pass list is used to initialize pass_list_. */
+  static std::vector<const Pass*> g_default_pass_list;
+
+  /** @brief Do we, by default, want to be printing the log messages? */
+  static bool default_print_passes_;
+
+  /** @brief What are the passes we want to be printing the log messages? */
+  static std::string print_pass_list_;
+
+  /** @brief What are the passes we want to be dumping the CFG? */
+  static std::string dump_pass_list_;
 };
 
 }  // namespace art
diff --git a/compiler/dex/pass_driver_me.h b/compiler/dex/pass_driver_me.h
new file mode 100644
index 0000000..7d76fb8
--- /dev/null
+++ b/compiler/dex/pass_driver_me.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_PASS_DRIVER_ME_H_
+#define ART_COMPILER_DEX_PASS_DRIVER_ME_H_
+
+#include "bb_optimizations.h"
+#include "dataflow_iterator.h"
+#include "dataflow_iterator-inl.h"
+#include "pass_driver.h"
+#include "pass_me.h"
+
+namespace art {
+
+template <typename PassDriverType>
+class PassDriverME: public PassDriver<PassDriverType> {
+ public:
+  explicit PassDriverME(CompilationUnit* cu)
+      : pass_me_data_holder_(), dump_cfg_folder_("/sdcard/") {
+        pass_me_data_holder_.bb = nullptr;
+        pass_me_data_holder_.c_unit = cu;
+  }
+
+  ~PassDriverME() {
+  }
+
+  void DispatchPass(const Pass* pass) {
+    VLOG(compiler) << "Dispatching " << pass->GetName();
+    const PassME* me_pass = down_cast<const PassME*>(pass);
+
+    DataFlowAnalysisMode mode = me_pass->GetTraversal();
+
+    switch (mode) {
+      case kPreOrderDFSTraversal:
+        DoWalkBasicBlocks<PreOrderDfsIterator>(&pass_me_data_holder_, me_pass);
+        break;
+      case kRepeatingPreOrderDFSTraversal:
+        DoWalkBasicBlocks<RepeatingPreOrderDfsIterator>(&pass_me_data_holder_, me_pass);
+        break;
+      case kRepeatingPostOrderDFSTraversal:
+        DoWalkBasicBlocks<RepeatingPostOrderDfsIterator>(&pass_me_data_holder_, me_pass);
+        break;
+      case kReversePostOrderDFSTraversal:
+        DoWalkBasicBlocks<ReversePostOrderDfsIterator>(&pass_me_data_holder_, me_pass);
+        break;
+      case kRepeatingReversePostOrderDFSTraversal:
+        DoWalkBasicBlocks<RepeatingReversePostOrderDfsIterator>(&pass_me_data_holder_, me_pass);
+        break;
+      case kPostOrderDOMTraversal:
+        DoWalkBasicBlocks<PostOrderDOMIterator>(&pass_me_data_holder_, me_pass);
+        break;
+      case kAllNodes:
+        DoWalkBasicBlocks<AllNodesIterator>(&pass_me_data_holder_, me_pass);
+        break;
+      case kNoNodes:
+        break;
+      default:
+        LOG(FATAL) << "Iterator mode not handled in dispatcher: " << mode;
+        break;
+    }
+  }
+
+  bool RunPass(const Pass* pass, bool time_split) {
+    // Paranoid: c_unit and pass cannot be nullptr, and the pass should have a name
+    DCHECK(pass != nullptr);
+    DCHECK(pass->GetName() != nullptr && pass->GetName()[0] != 0);
+    CompilationUnit* c_unit = pass_me_data_holder_.c_unit;
+    DCHECK(c_unit != nullptr);
+
+    // Do we perform a time split
+    if (time_split) {
+      c_unit->NewTimingSplit(pass->GetName());
+    }
+
+    // Check the pass gate first.
+    bool should_apply_pass = pass->Gate(&pass_me_data_holder_);
+    if (should_apply_pass) {
+      bool old_print_pass = c_unit->print_pass;
+
+      c_unit->print_pass = PassDriver<PassDriverType>::default_print_passes_;
+
+      const char* print_pass_list = PassDriver<PassDriverType>::print_pass_list_.c_str();
+
+      if (print_pass_list != nullptr && strstr(print_pass_list, pass->GetName()) != nullptr) {
+        c_unit->print_pass = true;
+      }
+
+      // Applying the pass: first start, doWork, and end calls.
+      this->ApplyPass(&pass_me_data_holder_, pass);
+
+      bool should_dump = ((c_unit->enable_debug & (1 << kDebugDumpCFG)) != 0);
+
+      const char* dump_pass_list = PassDriver<PassDriverType>::dump_pass_list_.c_str();
+
+      if (dump_pass_list != nullptr) {
+        bool found = strstr(dump_pass_list, pass->GetName());
+        should_dump = (should_dump || found);
+      }
+
+      if (should_dump) {
+        // Do we want to log it?
+        if ((c_unit->enable_debug&  (1 << kDebugDumpCFG)) != 0) {
+          // Do we have a pass folder?
+          const PassME* me_pass = (down_cast<const PassME*>(pass));
+          const char* passFolder = me_pass->GetDumpCFGFolder();
+          DCHECK(passFolder != nullptr);
+
+          if (passFolder[0] != 0) {
+            // Create directory prefix.
+            std::string prefix = GetDumpCFGFolder();
+            prefix += passFolder;
+            prefix += "/";
+
+            c_unit->mir_graph->DumpCFG(prefix.c_str(), false);
+          }
+        }
+      }
+
+      c_unit->print_pass = old_print_pass;
+    }
+
+    // If the pass gate passed, we can declare success.
+    return should_apply_pass;
+  }
+
+  const char* GetDumpCFGFolder() const {
+    return dump_cfg_folder_;
+  }
+
+ protected:
+  /** @brief The data holder that contains data needed for the PassDriverME. */
+  PassMEDataHolder pass_me_data_holder_;
+
+  /** @brief Dump CFG base folder: where is the base folder for dumping CFGs. */
+  const char* dump_cfg_folder_;
+
+  static void DoWalkBasicBlocks(PassMEDataHolder* data, const PassME* pass,
+                                DataflowIterator* iterator) {
+    // Paranoid: Check the iterator before walking the BasicBlocks.
+    DCHECK(iterator != nullptr);
+    bool change = false;
+    for (BasicBlock* bb = iterator->Next(change); bb != nullptr; bb = iterator->Next(change)) {
+      data->bb = bb;
+      change = pass->Worker(data);
+    }
+  }
+
+  template <typename Iterator>
+  inline static void DoWalkBasicBlocks(PassMEDataHolder* data, const PassME* pass) {
+      DCHECK(data != nullptr);
+      CompilationUnit* c_unit = data->c_unit;
+      DCHECK(c_unit != nullptr);
+      Iterator iterator(c_unit->mir_graph.get());
+      DoWalkBasicBlocks(data, pass, &iterator);
+    }
+};
+}  // namespace art
+#endif  // ART_COMPILER_DEX_PASS_DRIVER_ME_H_
+
diff --git a/compiler/dex/pass_driver_me_opts.cc b/compiler/dex/pass_driver_me_opts.cc
new file mode 100644
index 0000000..52a2273
--- /dev/null
+++ b/compiler/dex/pass_driver_me_opts.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/macros.h"
+#include "bb_optimizations.h"
+#include "compiler_internals.h"
+#include "dataflow_iterator.h"
+#include "dataflow_iterator-inl.h"
+#include "pass_driver_me_opts.h"
+
+namespace art {
+
+/*
+ * Create the pass list. These passes are immutable and are shared across the threads.
+ *
+ * Advantage is that there will be no race conditions here.
+ * Disadvantage is the passes can't change their internal states depending on CompilationUnit:
+ *   - This is not yet an issue: no current pass would require it.
+ */
+// The initial list of passes to be used by the PassDriveMEOpts.
+template<>
+const Pass* const PassDriver<PassDriverMEOpts>::g_passes[] = {
+  GetPassInstance<CacheFieldLoweringInfo>(),
+  GetPassInstance<CacheMethodLoweringInfo>(),
+  GetPassInstance<CallInlining>(),
+  GetPassInstance<CodeLayout>(),
+  GetPassInstance<NullCheckEliminationAndTypeInference>(),
+  GetPassInstance<ClassInitCheckElimination>(),
+  GetPassInstance<BBCombine>(),
+  GetPassInstance<BBOptimizations>(),
+};
+
+// The number of the passes in the initial list of Passes (g_passes).
+template<>
+uint16_t const PassDriver<PassDriverMEOpts>::g_passes_size =
+    arraysize(PassDriver<PassDriverMEOpts>::g_passes);
+
+// The default pass list is used by the PassDriverME instance of PassDriver
+// to initialize pass_list_.
+template<>
+std::vector<const Pass*> PassDriver<PassDriverMEOpts>::g_default_pass_list(
+    PassDriver<PassDriverMEOpts>::g_passes,
+    PassDriver<PassDriverMEOpts>::g_passes +
+    PassDriver<PassDriverMEOpts>::g_passes_size);
+
+// By default, do not have a dump pass list.
+template<>
+std::string PassDriver<PassDriverMEOpts>::dump_pass_list_ = std::string();
+
+// By default, do not have a print pass list.
+template<>
+std::string PassDriver<PassDriverMEOpts>::print_pass_list_ = std::string();
+
+// By default, we do not print the pass' information.
+template<>
+bool PassDriver<PassDriverMEOpts>::default_print_passes_ = false;
+
+void PassDriverMEOpts::ApplyPass(PassDataHolder* data, const Pass* pass) {
+  // First call the base class' version.
+  PassDriver::ApplyPass(data, pass);
+
+  const PassME* pass_me = down_cast<const PassME*> (pass);
+  DCHECK(pass_me != nullptr);
+
+  PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
+
+  // Now we care about flags.
+  if ((pass_me->GetFlag(kOptimizationBasicBlockChange) == true) ||
+      (pass_me->GetFlag(kOptimizationDefUsesChange) == true)) {
+    CompilationUnit* c_unit = pass_me_data_holder->c_unit;
+    c_unit->mir_graph.get()->CalculateBasicBlockInformation();
+  }
+}
+
+}  // namespace art
diff --git a/compiler/dex/pass_driver_me_opts.h b/compiler/dex/pass_driver_me_opts.h
new file mode 100644
index 0000000..0a5b5ae
--- /dev/null
+++ b/compiler/dex/pass_driver_me_opts.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_PASS_DRIVER_ME_OPTS_H_
+#define ART_COMPILER_DEX_PASS_DRIVER_ME_OPTS_H_
+
+#include "pass_driver_me.h"
+
+namespace art {
+
+// Forward Declarations.
+struct CompilationUnit;
+class Pass;
+class PassDataHolder;
+
+class PassDriverMEOpts : public PassDriverME<PassDriverMEOpts> {
+ public:
+  explicit PassDriverMEOpts(CompilationUnit* cu):PassDriverME<PassDriverMEOpts>(cu) {
+  }
+
+  ~PassDriverMEOpts() {
+  }
+
+  /**
+   * @brief Apply a patch: perform start/work/end functions.
+   */
+  virtual void ApplyPass(PassDataHolder* data, const Pass* pass);
+};
+
+}  // namespace art
+#endif  // ART_COMPILER_DEX_PASS_DRIVER_ME_OPTS_H_
diff --git a/compiler/dex/pass_driver_me_post_opt.cc b/compiler/dex/pass_driver_me_post_opt.cc
new file mode 100644
index 0000000..cb63f41
--- /dev/null
+++ b/compiler/dex/pass_driver_me_post_opt.cc
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/macros.h"
+#include "post_opt_passes.h"
+#include "compiler_internals.h"
+#include "pass_driver_me_post_opt.h"
+
+namespace art {
+
+/*
+ * Create the pass list. These passes are immutable and are shared across the threads.
+ *
+ * Advantage is that there will be no race conditions here.
+ * Disadvantage is the passes can't change their internal states depending on CompilationUnit:
+ *   - This is not yet an issue: no current pass would require it.
+ */
+// The initial list of passes to be used by the PassDriveMEPostOpt.
+template<>
+const Pass* const PassDriver<PassDriverMEPostOpt>::g_passes[] = {
+  GetPassInstance<InitializeData>(),
+  GetPassInstance<ClearPhiInstructions>(),
+  GetPassInstance<CalculatePredecessors>(),
+  GetPassInstance<DFSOrders>(),
+  GetPassInstance<BuildDomination>(),
+  GetPassInstance<DefBlockMatrix>(),
+  GetPassInstance<CreatePhiNodes>(),
+  GetPassInstance<ClearVisitedFlag>(),
+  GetPassInstance<SSAConversion>(),
+  GetPassInstance<PhiNodeOperands>(),
+  GetPassInstance<ConstantPropagation>(),
+  GetPassInstance<PerformInitRegLocations>(),
+  GetPassInstance<MethodUseCount>(),
+  GetPassInstance<FreeData>(),
+};
+
+// The number of the passes in the initial list of Passes (g_passes).
+template<>
+uint16_t const PassDriver<PassDriverMEPostOpt>::g_passes_size =
+    arraysize(PassDriver<PassDriverMEPostOpt>::g_passes);
+
+// The default pass list is used by the PassDriverME instance of PassDriver
+// to initialize pass_list_.
+template<>
+std::vector<const Pass*> PassDriver<PassDriverMEPostOpt>::g_default_pass_list(
+    PassDriver<PassDriverMEPostOpt>::g_passes,
+    PassDriver<PassDriverMEPostOpt>::g_passes +
+    PassDriver<PassDriverMEPostOpt>::g_passes_size);
+
+// By default, do not have a dump pass list.
+template<>
+std::string PassDriver<PassDriverMEPostOpt>::dump_pass_list_ = std::string();
+
+// By default, do not have a print pass list.
+template<>
+std::string PassDriver<PassDriverMEPostOpt>::print_pass_list_ = std::string();
+
+// By default, we do not print the pass' information.
+template<>
+bool PassDriver<PassDriverMEPostOpt>::default_print_passes_ = false;
+
+}  // namespace art
diff --git a/compiler/dex/pass_driver_me_post_opt.h b/compiler/dex/pass_driver_me_post_opt.h
new file mode 100644
index 0000000..574a6ba
--- /dev/null
+++ b/compiler/dex/pass_driver_me_post_opt.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_PASS_DRIVER_ME_POST_OPT_H_
+#define ART_COMPILER_DEX_PASS_DRIVER_ME_POST_OPT_H_
+
+#include "pass_driver_me.h"
+
+namespace art {
+
+// Forward Declarations.
+struct CompilationUnit;
+class Pass;
+class PassDataHolder;
+
+class PassDriverMEPostOpt : public PassDriverME<PassDriverMEPostOpt> {
+ public:
+  explicit PassDriverMEPostOpt(CompilationUnit* cu) : PassDriverME<PassDriverMEPostOpt>(cu) {
+  }
+
+  ~PassDriverMEPostOpt() {
+  }
+};
+
+}  // namespace art
+#endif  // ART_COMPILER_DEX_PASS_DRIVER_ME_POST_OPT_H_
diff --git a/compiler/dex/pass_me.h b/compiler/dex/pass_me.h
new file mode 100644
index 0000000..9efd5ae
--- /dev/null
+++ b/compiler/dex/pass_me.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_PASS_ME_H_
+#define ART_COMPILER_DEX_PASS_ME_H_
+
+#include <string>
+#include "pass.h"
+
+namespace art {
+
+// Forward declarations.
+struct BasicBlock;
+struct CompilationUnit;
+class Pass;
+
+/**
+ * @brief OptimizationFlag is an enumeration to perform certain tasks for a given pass.
+ * @details Each enum should be a power of 2 to be correctly used.
+ */
+enum OptimizationFlag {
+  kOptimizationBasicBlockChange = 1,  /**< @brief Has there been a change to a BasicBlock? */
+  kOptimizationDefUsesChange = 2,     /**< @brief Has there been a change to a def-use? */
+  kLoopStructureChange = 4,           /**< @brief Has there been a loop structural change? */
+};
+
+// Data holder class.
+class PassMEDataHolder: public PassDataHolder {
+  public:
+    CompilationUnit* c_unit;
+    BasicBlock* bb;
+};
+
+enum DataFlowAnalysisMode {
+  kAllNodes = 0,                           /**< @brief All nodes. */
+  kPreOrderDFSTraversal,                   /**< @brief Depth-First-Search / Pre-Order. */
+  kRepeatingPreOrderDFSTraversal,          /**< @brief Depth-First-Search / Repeating Pre-Order. */
+  kReversePostOrderDFSTraversal,           /**< @brief Depth-First-Search / Reverse Post-Order. */
+  kRepeatingPostOrderDFSTraversal,         /**< @brief Depth-First-Search / Repeating Post-Order. */
+  kRepeatingReversePostOrderDFSTraversal,  /**< @brief Depth-First-Search / Repeating Reverse Post-Order. */
+  kPostOrderDOMTraversal,                  /**< @brief Dominator tree / Post-Order. */
+  kTopologicalSortTraversal,               /**< @brief Topological Order traversal. */
+  kRepeatingTopologicalSortTraversal,      /**< @brief Repeating Topological Order traversal. */
+  kNoNodes,                                /**< @brief Skip BasicBlock traversal. */
+};
+
+/**
+ * @class Pass
+ * @brief Pass is the Pass structure for the optimizations.
+ * @details The following structure has the different optimization passes that we are going to do.
+ */
+class PassME: public Pass {
+ public:
+  explicit PassME(const char* name, DataFlowAnalysisMode type = kAllNodes,
+          unsigned int flags = 0u, const char* dump = "")
+    : Pass(name), traversal_type_(type), flags_(flags), dump_cfg_folder_(dump) {
+  }
+
+  PassME(const char* name, DataFlowAnalysisMode type, const char* dump)
+    : Pass(name), traversal_type_(type), flags_(0), dump_cfg_folder_(dump) {
+  }
+
+  PassME(const char* name, const char* dump)
+    : Pass(name), traversal_type_(kAllNodes), flags_(0), dump_cfg_folder_(dump) {
+  }
+
+  ~PassME() {
+  }
+
+  virtual DataFlowAnalysisMode GetTraversal() const {
+    return traversal_type_;
+  }
+
+  const char* GetDumpCFGFolder() const {
+    return dump_cfg_folder_;
+  }
+
+  bool GetFlag(OptimizationFlag flag) const {
+    return (flags_ & flag);
+  }
+
+ protected:
+  /** @brief Type of traversal: determines the order to execute the pass on the BasicBlocks. */
+  const DataFlowAnalysisMode traversal_type_;
+
+  /** @brief Flags for additional directives: used to determine if a particular post-optimization pass is necessary. */
+  const unsigned int flags_;
+
+  /** @brief CFG Dump Folder: what sub-folder to use for dumping the CFGs post pass. */
+  const char* const dump_cfg_folder_;
+};
+}  // namespace art
+#endif  // ART_COMPILER_DEX_PASS_ME_H_
diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc
index 576e242..fd67608 100644
--- a/compiler/dex/portable/mir_to_gbc.cc
+++ b/compiler/dex/portable/mir_to_gbc.cc
@@ -712,7 +712,7 @@
   int opt_flags = mir->optimization_flags;
 
   if (cu_->verbose) {
-    if (op_val < kMirOpFirst) {
+    if (!IsPseudoMirOp(op_val)) {
       LOG(INFO) << ".. " << Instruction::Name(opcode) << " 0x" << std::hex << op_val;
     } else {
       LOG(INFO) << mir_graph_->extended_mir_op_names_[op_val - kMirOpFirst] << " 0x" << std::hex << op_val;
@@ -1550,7 +1550,7 @@
   SetDexOffset(bb->start_offset);
   for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
     int opcode = mir->dalvikInsn.opcode;
-    if (opcode < kMirOpFirst) {
+    if (!IsPseudoMirOp(opcode)) {
       // Stop after first non-pseudo MIR op.
       continue;
     }
@@ -1759,7 +1759,7 @@
       }
     }
 
-    if (opcode >= kMirOpFirst) {
+    if (IsPseudoMirOp(opcode)) {
       ConvertExtendedMIR(bb, mir, llvm_bb);
       continue;
     }
diff --git a/compiler/dex/post_opt_passes.cc b/compiler/dex/post_opt_passes.cc
new file mode 100644
index 0000000..58700a4
--- /dev/null
+++ b/compiler/dex/post_opt_passes.cc
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "post_opt_passes.h"
+#include "dataflow_iterator.h"
+#include "dataflow_iterator-inl.h"
+
+namespace art {
+
+/*
+ * MethodUseCount pass implementation start.
+ */
+bool MethodUseCount::Gate(const PassDataHolder* data) const {
+  DCHECK(data != nullptr);
+  CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+  DCHECK(c_unit != nullptr);
+  // First initialize the data.
+  c_unit->mir_graph->InitializeMethodUses();
+
+  // Now check if the pass is to be ignored.
+  bool res = ((c_unit->disable_opt & (1 << kPromoteRegs)) == 0);
+
+  return res;
+}
+
+bool MethodUseCount::Worker(const PassDataHolder* data) const {
+  DCHECK(data != nullptr);
+  const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+  CompilationUnit* c_unit = pass_me_data_holder->c_unit;
+  DCHECK(c_unit != nullptr);
+  BasicBlock* bb = pass_me_data_holder->bb;
+  DCHECK(bb != nullptr);
+  c_unit->mir_graph->CountUses(bb);
+  // No need of repeating, so just return false.
+  return false;
+}
+
+
+bool ClearPhiInstructions::Worker(const PassDataHolder* data) const {
+  DCHECK(data != nullptr);
+  const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+  CompilationUnit* c_unit = pass_me_data_holder->c_unit;
+  DCHECK(c_unit != nullptr);
+  BasicBlock* bb = pass_me_data_holder->bb;
+  DCHECK(bb != nullptr);
+  MIR* mir = bb->first_mir_insn;
+
+  while (mir != nullptr) {
+    MIR* next = mir->next;
+
+    Instruction::Code opcode = mir->dalvikInsn.opcode;
+
+    if (opcode == static_cast<Instruction::Code> (kMirOpPhi)) {
+      bb->RemoveMIR(mir);
+    }
+
+    mir = next;
+  }
+
+  // We do not care in reporting a change or not in the MIR.
+  return false;
+}
+
+void CalculatePredecessors::Start(const PassDataHolder* data) const {
+  DCHECK(data != nullptr);
+  CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+  DCHECK(c_unit != nullptr);
+  // First get the MIRGraph here to factorize a bit the code.
+  MIRGraph *mir_graph = c_unit->mir_graph.get();
+
+  // First clear all predecessors.
+  AllNodesIterator first(mir_graph);
+  for (BasicBlock* bb = first.Next(); bb != nullptr; bb = first.Next()) {
+    bb->predecessors->Reset();
+  }
+
+  // Now calculate all predecessors.
+  AllNodesIterator second(mir_graph);
+  for (BasicBlock* bb = second.Next(); bb != nullptr; bb = second.Next()) {
+    // We only care about non hidden blocks.
+    if (bb->hidden == true) {
+      continue;
+    }
+
+    // Create iterator for visiting children.
+    ChildBlockIterator child_iter(bb, mir_graph);
+
+    // Now iterate through the children to set the predecessor bits.
+    for (BasicBlock* child = child_iter.Next(); child != nullptr; child = child_iter.Next()) {
+      child->predecessors->Insert(bb->id);
+    }
+  }
+}
+
+}  // namespace art
diff --git a/compiler/dex/post_opt_passes.h b/compiler/dex/post_opt_passes.h
new file mode 100644
index 0000000..f203505
--- /dev/null
+++ b/compiler/dex/post_opt_passes.h
@@ -0,0 +1,284 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_POST_OPT_PASSES_H_
+#define ART_COMPILER_DEX_POST_OPT_PASSES_H_
+
+#include "compiler_internals.h"
+#include "pass_me.h"
+
+namespace art {
+
+/**
+ * @class InitializeData
+ * @brief There is some data that needs to be initialized before performing
+ * the post optimization passes.
+ */
+class InitializeData : public PassME {
+ public:
+  InitializeData() : PassME("InitializeData") {
+  }
+
+  void Start(const PassDataHolder* data) const {
+    // New blocks may have been inserted so the first thing we do is ensure that
+    // the c_unit's number of blocks matches the actual count of basic blocks.
+    DCHECK(data != nullptr);
+    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(c_unit != nullptr);
+    c_unit->mir_graph.get()->InitializeBasicBlockData();
+    c_unit->mir_graph.get()->SSATransformationStart();
+  }
+};
+
+/**
+ * @class MethodUseCount
+ * @brief Count the register uses of the method
+ */
+class MethodUseCount : public PassME {
+ public:
+  MethodUseCount() : PassME("UseCount") {
+  }
+
+  bool Worker(const PassDataHolder* data) const;
+
+  bool Gate(const PassDataHolder* data) const;
+};
+
+/**
+ * @class ClearPhiInformation
+ * @brief Clear the PHI nodes from the CFG.
+ */
+class ClearPhiInstructions : public PassME {
+ public:
+  ClearPhiInstructions() : PassME("ClearPhiInstructions") {
+  }
+
+  bool Worker(const PassDataHolder* data) const;
+};
+
+/**
+ * @class CalculatePredecessors
+ * @brief Calculate the predecessor BitVector of each Basicblock.
+ */
+class CalculatePredecessors : public PassME {
+ public:
+  CalculatePredecessors() : PassME("CalculatePredecessors") {
+  }
+
+  void Start(const PassDataHolder* data) const;
+};
+
+/**
+ * @class DFSOrders
+ * @brief Compute the DFS order of the MIR graph
+ */
+class DFSOrders : public PassME {
+ public:
+  DFSOrders() : PassME("DFSOrders") {
+  }
+
+  void Start(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(c_unit != nullptr);
+    c_unit->mir_graph.get()->ComputeDFSOrders();
+  }
+};
+
+/**
+ * @class BuildDomination
+ * @brief Build the domination information of the MIR Graph
+ */
+class BuildDomination : public PassME {
+ public:
+  BuildDomination() : PassME("BuildDomination") {
+  }
+
+  void Start(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(c_unit != nullptr);
+    c_unit->mir_graph.get()->ComputeDominators();
+    c_unit->mir_graph.get()->CompilerInitializeSSAConversion();
+  }
+
+  void End(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(c_unit != nullptr);
+    // Verify the dataflow information after the pass.
+    if (c_unit->enable_debug & (1 << kDebugVerifyDataflow)) {
+      c_unit->mir_graph->VerifyDataflow();
+    }
+  }
+};
+
+/**
+ * @class DefBlockMatrix
+ * @brief Calculate the matrix of definition per basic block
+ */
+class DefBlockMatrix : public PassME {
+ public:
+  DefBlockMatrix() : PassME("DefBlockMatrix") {
+  }
+
+  void Start(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(c_unit != nullptr);
+    c_unit->mir_graph.get()->ComputeDefBlockMatrix();
+  }
+};
+
+/**
+ * @class CreatePhiNodes
+ * @brief Pass to create the phi nodes after SSA calculation
+ */
+class CreatePhiNodes : public PassME {
+ public:
+  CreatePhiNodes() : PassME("CreatePhiNodes") {
+  }
+
+  void Start(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(c_unit != nullptr);
+    c_unit->mir_graph.get()->InsertPhiNodes();
+  }
+};
+
+/**
+ * @class ClearVisitedFlag
+ * @brief Pass to clear the visited flag for all basic blocks.
+ */
+
+class ClearVisitedFlag : public PassME {
+ public:
+  ClearVisitedFlag() : PassME("ClearVisitedFlag") {
+  }
+
+  void Start(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(c_unit != nullptr);
+    c_unit->mir_graph.get()->ClearAllVisitedFlags();
+  }
+};
+
+/**
+ * @class SSAConversion
+ * @brief Pass for SSA conversion of MIRs
+ */
+class SSAConversion : public PassME {
+ public:
+  SSAConversion() : PassME("SSAConversion") {
+  }
+
+  void Start(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(c_unit != nullptr);
+    MIRGraph *mir_graph = c_unit->mir_graph.get();
+    mir_graph->DoDFSPreOrderSSARename(mir_graph->GetEntryBlock());
+  }
+};
+
+/**
+ * @class PhiNodeOperands
+ * @brief Pass to insert the Phi node operands to basic blocks
+ */
+class PhiNodeOperands : public PassME {
+ public:
+  PhiNodeOperands() : PassME("PhiNodeOperands", kPreOrderDFSTraversal) {
+  }
+
+  bool Worker(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(c_unit != nullptr);
+    BasicBlock* bb = down_cast<const PassMEDataHolder*>(data)->bb;
+    DCHECK(bb != nullptr);
+    c_unit->mir_graph->InsertPhiNodeOperands(bb);
+    // No need of repeating, so just return false.
+    return false;
+  }
+};
+
+/**
+ * @class InitRegLocations
+ * @brief Initialize Register Locations.
+ */
+class PerformInitRegLocations : public PassME {
+ public:
+  PerformInitRegLocations() : PassME("PerformInitRegLocation") {
+  }
+
+  void Start(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(c_unit != nullptr);
+    c_unit->mir_graph->InitRegLocations();
+  }
+};
+
+/**
+ * @class ConstantPropagation
+ * @brief Perform a constant propagation pass.
+ */
+class ConstantPropagation : public PassME {
+ public:
+  ConstantPropagation() : PassME("ConstantPropagation") {
+  }
+
+  bool Worker(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(c_unit != nullptr);
+    BasicBlock* bb = down_cast<const PassMEDataHolder*>(data)->bb;
+    DCHECK(bb != nullptr);
+    c_unit->mir_graph->DoConstantPropagation(bb);
+    // No need of repeating, so just return false.
+    return false;
+  }
+
+  void Start(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(c_unit != nullptr);
+    c_unit->mir_graph->InitializeConstantPropagation();
+  }
+};
+
+/**
+ * @class FreeData
+ * @brief There is some data that needs to be freed after performing the post optimization passes.
+ */
+class FreeData : public PassME {
+ public:
+  FreeData() : PassME("FreeData") {
+  }
+
+  void End(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(c_unit != nullptr);
+    c_unit->mir_graph.get()->SSATransformationEnd();
+  }
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_DEX_POST_OPT_PASSES_H_
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 2d1c19e..f0a9ca4 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -120,6 +120,7 @@
     bool GenInlinedSqrt(CallInfo* info);
     bool GenInlinedPeek(CallInfo* info, OpSize size);
     bool GenInlinedPoke(CallInfo* info, OpSize size);
+    void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
     void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
     void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
                    RegLocation rl_src2);
@@ -127,6 +128,8 @@
                     RegLocation rl_src2);
     void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
                     RegLocation rl_src2);
+    void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
+                       RegLocation rl_src2, bool is_div);
     RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
     RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
     void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index bb02f74..dde8ff0 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -141,8 +141,11 @@
       break;
     case Instruction::LONG_TO_DOUBLE: {
       rl_src = LoadValueWide(rl_src, kFPReg);
-      RegStorage src_low = rl_src.reg.DoubleToLowSingle();
-      RegStorage src_high = rl_src.reg.DoubleToHighSingle();
+      RegisterInfo* info = GetRegInfo(rl_src.reg);
+      RegStorage src_low = info->FindMatchingView(RegisterInfo::kLowSingleStorageMask)->GetReg();
+      DCHECK(src_low.Valid());
+      RegStorage src_high = info->FindMatchingView(RegisterInfo::kHighSingleStorageMask)->GetReg();
+      DCHECK(src_high.Valid());
       rl_result = EvalLoc(rl_dest, kFPReg, true);
       RegStorage tmp1 = AllocTempDouble();
       RegStorage tmp2 = AllocTempDouble();
@@ -161,8 +164,11 @@
       return;
     case Instruction::LONG_TO_FLOAT: {
       rl_src = LoadValueWide(rl_src, kFPReg);
-      RegStorage src_low = rl_src.reg.DoubleToLowSingle();
-      RegStorage src_high = rl_src.reg.DoubleToHighSingle();
+      RegisterInfo* info = GetRegInfo(rl_src.reg);
+      RegStorage src_low = info->FindMatchingView(RegisterInfo::kLowSingleStorageMask)->GetReg();
+      DCHECK(src_low.Valid());
+      RegStorage src_high = info->FindMatchingView(RegisterInfo::kHighSingleStorageMask)->GetReg();
+      DCHECK(src_high.Valid());
       rl_result = EvalLoc(rl_dest, kFPReg, true);
       // Allocate temp registers.
       RegStorage high_val = AllocTempDouble();
@@ -334,22 +340,11 @@
 
 bool ArmMir2Lir::GenInlinedSqrt(CallInfo* info) {
   DCHECK_EQ(cu_->instruction_set, kThumb2);
-  LIR *branch;
   RegLocation rl_src = info->args[0];
   RegLocation rl_dest = InlineTargetWide(info);  // double place for result
   rl_src = LoadValueWide(rl_src, kFPReg);
   RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
   NewLIR2(kThumb2Vsqrtd, rl_result.reg.GetReg(), rl_src.reg.GetReg());
-  NewLIR2(kThumb2Vcmpd, rl_result.reg.GetReg(), rl_result.reg.GetReg());
-  NewLIR0(kThumb2Fmstat);
-  branch = NewLIR2(kThumbBCond, 0, kArmCondEq);
-  ClobberCallerSave();
-  LockCallTemps();  // Using fixed registers
-  RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pSqrt));
-  NewLIR3(kThumb2Fmrrd, rs_r0.GetReg(), rs_r1.GetReg(), rl_src.reg.GetReg());
-  NewLIR1(kThumbBlxR, r_tgt.GetReg());
-  NewLIR3(kThumb2Fmdrr, rl_result.reg.GetReg(), rs_r0.GetReg(), rs_r1.GetReg());
-  branch->target = NewLIR0(kPseudoTargetLabel);
   StoreValueWide(rl_dest, rl_result);
   return true;
 }
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 384a008..2556788 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -998,6 +998,15 @@
 #endif
 }
 
+void ArmMir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
+  LOG(FATAL) << "Unexpected use GenNotLong()";
+}
+
+void ArmMir2Lir::GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
+                           RegLocation rl_src2, bool is_div) {
+  LOG(FATAL) << "Unexpected use GenDivRemLong()";
+}
+
 void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
   rl_src = LoadValueWide(rl_src, kCoreReg);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 1520c52..309f676 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -575,10 +575,10 @@
     // Redirect single precision's master storage to master.
     info->SetMaster(dp_reg_info);
     // Singles should show a single 32-bit mask bit, at first referring to the low half.
-    DCHECK_EQ(info->StorageMask(), 0x1U);
+    DCHECK_EQ(info->StorageMask(), RegisterInfo::kLowSingleStorageMask);
     if (sp_reg_num & 1) {
-      // For odd singles, change to user the high word of the backing double.
-      info->SetStorageMask(0x2);
+      // For odd singles, change to use the high word of the backing double.
+      info->SetStorageMask(RegisterInfo::kHighSingleStorageMask);
     }
   }
 
@@ -786,10 +786,13 @@
     }
   }
   if (res.Valid()) {
+    RegisterInfo* info = GetRegInfo(res);
     promotion_map_[p_map_idx].fp_location = kLocPhysReg;
-    promotion_map_[p_map_idx].FpReg = res.DoubleToLowSingle().GetReg();
+    promotion_map_[p_map_idx].FpReg =
+        info->FindMatchingView(RegisterInfo::kLowSingleStorageMask)->GetReg().GetReg();
     promotion_map_[p_map_idx+1].fp_location = kLocPhysReg;
-    promotion_map_[p_map_idx+1].FpReg = res.DoubleToHighSingle().GetReg();
+    promotion_map_[p_map_idx+1].FpReg =
+        info->FindMatchingView(RegisterInfo::kHighSingleStorageMask)->GetReg().GetReg();
   }
   return res;
 }
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index c3b23fd..6a6b0f6 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -298,6 +298,7 @@
   kA64Mov2rr,        // mov [00101010000] rm[20-16] [000000] [11111] rd[4-0].
   kA64Mvn2rr,        // mov [00101010001] rm[20-16] [000000] [11111] rd[4-0].
   kA64Mul3rrr,       // mul [00011011000] rm[20-16] [011111] rn[9-5] rd[4-0].
+  kA64Msub4rrrr,     // msub[s0011011000] rm[20-16] [1] ra[14-10] rn[9-5] rd[4-0].
   kA64Neg3rro,       // neg alias of "sub arg0, rzr, arg1, arg2".
   kA64Orr3Rrl,       // orr [s01100100] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
   kA64Orr4rrro,      // orr [s0101010] shift[23-22] [0] rm[20-16] imm_6[15-10] rn[9-5] rd[4-0].
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 656f8fd..4a0c055 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -422,6 +422,10 @@
                  kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
                  "mul", "!0r, !1r, !2r", kFixupNone),
+    ENCODING_MAP(WIDE(kA64Msub4rrrr), SF_VARIANTS(0x1b008000),
+                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 14, 10,
+                 kFmtRegR, 20, 16, IS_QUAD_OP | REG_DEF0_USE123,
+                 "msub", "!0r, !1r, !3r, !2r", kFixupNone),
     ENCODING_MAP(WIDE(kA64Neg3rro), SF_VARIANTS(0x4b0003e0),
                  kFmtRegR, 4, 0, kFmtRegR, 20, 16, kFmtShift, -1, -1,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index f7a0199..2e3ef86 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -94,8 +94,7 @@
   tab_rec->anchor = switch_label;
 
   // Add displacement to base branch address and go!
-  OpRegRegRegShift(kOpAdd, r_base.GetReg(), r_base.GetReg(), r_disp.GetReg(),
-                   ENCODE_NO_SHIFT, true);
+  OpRegRegRegShift(kOpAdd, r_base, r_base, r_disp, ENCODE_NO_SHIFT);
   NewLIR1(kA64Br1x, r_base.GetReg());
 
   // Loop exit label.
@@ -148,8 +147,7 @@
   tab_rec->anchor = switch_label;
 
   // Add displacement to base branch address and go!
-  OpRegRegRegShift(kOpAdd, branch_reg.GetReg(), branch_reg.GetReg(), disp_reg.GetReg(),
-                   ENCODE_NO_SHIFT, true);
+  OpRegRegRegShift(kOpAdd, branch_reg, branch_reg, disp_reg, ENCODE_NO_SHIFT);
   NewLIR1(kA64Br1x, branch_reg.GetReg());
 
   // branch_over target here
@@ -334,7 +332,7 @@
 
   if (!skip_overflow_check) {
     LoadWordDisp(rs_rA64_SELF, Thread::StackEndOffset<8>().Int32Value(), rs_x12);
-    OpRegImm64(kOpSub, rs_rA64_SP, frame_size_, /*is_wide*/true);
+    OpRegImm64(kOpSub, rs_rA64_SP, frame_size_);
     if (Runtime::Current()->ExplicitStackOverflowChecks()) {
       /* Load stack limit */
       // TODO(Arm64): fix the line below:
@@ -348,7 +346,7 @@
       MarkPossibleStackOverflowException();
     }
   } else if (frame_size_ > 0) {
-    OpRegImm64(kOpSub, rs_rA64_SP, frame_size_, /*is_wide*/true);
+    OpRegImm64(kOpSub, rs_rA64_SP, frame_size_);
   }
 
   /* Need to spill any FP regs? */
@@ -391,7 +389,7 @@
     UnSpillCoreRegs(rs_rA64_SP, spill_offset, core_spill_mask_);
   }
 
-  OpRegImm64(kOpAdd, rs_rA64_SP, frame_size_, /*is_wide*/true);
+  OpRegImm64(kOpAdd, rs_rA64_SP, frame_size_);
   NewLIR0(kA64Ret);
 }
 
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 350e483..16bb701 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -93,6 +93,8 @@
     RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
 
     // Required for target - Dalvik-level generators.
+    void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                        RegLocation lr_shift);
     void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
                            RegLocation rl_src1, RegLocation rl_src2);
     void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
@@ -120,6 +122,8 @@
     bool GenInlinedSqrt(CallInfo* info);
     bool GenInlinedPeek(CallInfo* info, OpSize size);
     bool GenInlinedPoke(CallInfo* info, OpSize size);
+    void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
+    void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
     void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
     void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
                    RegLocation rl_src2);
@@ -127,6 +131,8 @@
                     RegLocation rl_src2);
     void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
                     RegLocation rl_src2);
+    void GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                       RegLocation rl_src2, bool is_div);
     RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
     RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
     void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
@@ -170,7 +176,7 @@
     LIR* OpReg(OpKind op, RegStorage r_dest_src);
     void OpRegCopy(RegStorage r_dest, RegStorage r_src);
     LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
-    LIR* OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value, bool is_wide);
+    LIR* OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value);
     LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
     LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
     LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
@@ -191,8 +197,8 @@
 
     LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
     LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
-    LIR* OpRegRegRegShift(OpKind op, int r_dest, int r_src1, int r_src2, int shift,
-                          bool is_wide = false);
+    LIR* OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
+                          int shift);
     LIR* OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift);
     static const ArmEncodingMap EncodingMap[kA64Last];
     int EncodeShift(int code, int amount);
@@ -216,8 +222,6 @@
                     bool skip_this);
 
   private:
-    void GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1, int64_t val,
-                                  ConditionCode ccode);
     LIR* LoadFPConstantValue(int r_dest, int32_t value);
     LIR* LoadFPConstantValueWide(int r_dest, int64_t value);
     void ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
diff --git a/compiler/dex/quick/arm64/fp_arm64.cc b/compiler/dex/quick/arm64/fp_arm64.cc
index 87ab6fe..882ee66 100644
--- a/compiler/dex/quick/arm64/fp_arm64.cc
+++ b/compiler/dex/quick/arm64/fp_arm64.cc
@@ -25,10 +25,6 @@
   int op = kA64Brk1d;
   RegLocation rl_result;
 
-  /*
-   * Don't attempt to optimize register usage since these opcodes call out to
-   * the handlers.
-   */
   switch (opcode) {
     case Instruction::ADD_FLOAT_2ADDR:
     case Instruction::ADD_FLOAT:
@@ -119,49 +115,75 @@
                                  RegLocation rl_dest, RegLocation rl_src) {
   int op = kA64Brk1d;
   RegLocation rl_result;
+  RegisterClass src_reg_class = kInvalidRegClass;
+  RegisterClass dst_reg_class = kInvalidRegClass;
 
   switch (opcode) {
     case Instruction::INT_TO_FLOAT:
       op = kA64Scvtf2fw;
+      src_reg_class = kCoreReg;
+      dst_reg_class = kFPReg;
       break;
     case Instruction::FLOAT_TO_INT:
       op = kA64Fcvtzs2wf;
+      src_reg_class = kFPReg;
+      dst_reg_class = kCoreReg;
       break;
     case Instruction::DOUBLE_TO_FLOAT:
       op = kA64Fcvt2sS;
+      src_reg_class = kFPReg;
+      dst_reg_class = kFPReg;
       break;
     case Instruction::FLOAT_TO_DOUBLE:
       op = kA64Fcvt2Ss;
+      src_reg_class = kFPReg;
+      dst_reg_class = kFPReg;
       break;
     case Instruction::INT_TO_DOUBLE:
       op = FWIDE(kA64Scvtf2fw);
+      src_reg_class = kCoreReg;
+      dst_reg_class = kFPReg;
       break;
     case Instruction::DOUBLE_TO_INT:
       op = FWIDE(kA64Fcvtzs2wf);
+      src_reg_class = kFPReg;
+      dst_reg_class = kCoreReg;
       break;
     case Instruction::LONG_TO_DOUBLE:
       op = FWIDE(kA64Scvtf2fx);
+      src_reg_class = kCoreReg;
+      dst_reg_class = kFPReg;
       break;
     case Instruction::FLOAT_TO_LONG:
       op = kA64Fcvtzs2xf;
+      src_reg_class = kFPReg;
+      dst_reg_class = kCoreReg;
       break;
     case Instruction::LONG_TO_FLOAT:
       op = kA64Scvtf2fx;
+      src_reg_class = kCoreReg;
+      dst_reg_class = kFPReg;
       break;
     case Instruction::DOUBLE_TO_LONG:
       op = FWIDE(kA64Fcvtzs2xf);
+      src_reg_class = kFPReg;
+      dst_reg_class = kCoreReg;
       break;
     default:
       LOG(FATAL) << "Unexpected opcode: " << opcode;
   }
 
+  DCHECK_NE(src_reg_class, kInvalidRegClass);
+  DCHECK_NE(dst_reg_class, kInvalidRegClass);
+  DCHECK_NE(op, kA64Brk1d);
+
   if (rl_src.wide) {
-    rl_src = LoadValueWide(rl_src, kFPReg);
+    rl_src = LoadValueWide(rl_src, src_reg_class);
   } else {
-    rl_src = LoadValue(rl_src, kFPReg);
+    rl_src = LoadValue(rl_src, src_reg_class);
   }
 
-  rl_result = EvalLoc(rl_dest, kFPReg, true);
+  rl_result = EvalLoc(rl_dest, dst_reg_class, true);
   NewLIR2(op, rl_result.reg.GetReg(), rl_src.reg.GetReg());
 
   if (rl_dest.wide) {
@@ -296,25 +318,11 @@
 }
 
 bool Arm64Mir2Lir::GenInlinedSqrt(CallInfo* info) {
-  // TODO(Arm64): implement this.
-  UNIMPLEMENTED(FATAL) << "GenInlinedSqrt not implemented for Arm64";
-
-  DCHECK_EQ(cu_->instruction_set, kArm64);
-  LIR *branch;
   RegLocation rl_src = info->args[0];
   RegLocation rl_dest = InlineTargetWide(info);  // double place for result
   rl_src = LoadValueWide(rl_src, kFPReg);
   RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
   NewLIR2(FWIDE(kA64Fsqrt2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
-  NewLIR2(FWIDE(kA64Fcmp2ff), rl_result.reg.GetReg(), rl_result.reg.GetReg());
-  branch = NewLIR2(kA64B2ct, kArmCondEq, 0);
-  ClobberCallerSave();
-  LockCallTemps();  // Using fixed registers
-  RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pSqrt));
-  // NewLIR3(kThumb2Fmrrd, r0, r1, rl_src.reg.GetReg());
-  NewLIR1(kA64Blr1x, r_tgt.GetReg());
-  // NewLIR3(kThumb2Fmdrr, rl_result.reg.GetReg(), r0, r1);
-  branch->target = NewLIR0(kPseudoTargetLabel);
   StoreValueWide(rl_dest, rl_result);
   return true;
 }
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index b0f5904..d9428f9 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -29,7 +29,6 @@
   return OpCondBranch(cond, target);
 }
 
-// TODO(Arm64): remove this.
 LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) {
   LOG(FATAL) << "Unexpected use of OpIT for Arm64";
   return NULL;
@@ -53,160 +52,90 @@
   rl_result = EvalLoc(rl_dest, kCoreReg, true);
 
   OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
-  NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondEq);
-  NewLIR4(kA64Csneg4rrrc, rl_result.reg.GetReg(), rl_result.reg.GetReg(),
+  NewLIR4(WIDE(kA64Csinc4rrrc), rl_result.reg.GetReg(), rxzr, rxzr, kArmCondEq);
+  NewLIR4(WIDE(kA64Csneg4rrrc), rl_result.reg.GetReg(), rl_result.reg.GetReg(),
           rl_result.reg.GetReg(), kArmCondLe);
-  StoreValue(rl_dest, rl_result);
+  StoreValueWide(rl_dest, rl_result);
 }
 
-void Arm64Mir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
-                                            int64_t val, ConditionCode ccode) {
-  LIR* taken = &block_label_list_[bb->taken];
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-
-  if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
-    ArmOpcode opcode = (ccode == kCondEq) ? kA64Cbz2rt : kA64Cbnz2rt;
-    LIR* branch = NewLIR2(WIDE(opcode), rl_src1.reg.GetLowReg(), 0);
-    branch->target = taken;
-  } else {
-    OpRegImm64(kOpCmp, rl_src1.reg, val, /*is_wide*/true);
-    OpCondBranch(ccode, taken);
+void Arm64Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                             RegLocation rl_src1, RegLocation rl_shift) {
+  OpKind op = kOpBkpt;
+  switch (opcode) {
+  case Instruction::SHL_LONG:
+  case Instruction::SHL_LONG_2ADDR:
+    op = kOpLsl;
+    break;
+  case Instruction::SHR_LONG:
+  case Instruction::SHR_LONG_2ADDR:
+    op = kOpAsr;
+    break;
+  case Instruction::USHR_LONG:
+  case Instruction::USHR_LONG_2ADDR:
+    op = kOpLsr;
+    break;
+  default:
+    LOG(FATAL) << "Unexpected case: " << opcode;
   }
+  rl_shift = LoadValueWide(rl_shift, kCoreReg);
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+  OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_shift.reg);
+  StoreValueWide(rl_dest, rl_result);
 }
 
 void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
-  // TODO(Arm64): implement this.
-  UNIMPLEMENTED(FATAL);
-
   RegLocation rl_result;
   RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
   RegLocation rl_dest = mir_graph_->GetDest(mir);
   rl_src = LoadValue(rl_src, kCoreReg);
-  ConditionCode ccode = mir->meta.ccode;
-  if (mir->ssa_rep->num_uses == 1) {
-    // CONST case
-    int true_val = mir->dalvikInsn.vB;
-    int false_val = mir->dalvikInsn.vC;
-    rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    // Change kCondNe to kCondEq for the special cases below.
-    if (ccode == kCondNe) {
-      ccode = kCondEq;
-      std::swap(true_val, false_val);
-    }
-    bool cheap_false_val = InexpensiveConstantInt(false_val);
-    if (cheap_false_val && ccode == kCondEq && (true_val == 0 || true_val == -1)) {
-      OpRegRegImm(kOpSub, rl_result.reg, rl_src.reg, -true_val);
-      DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
-      OpIT(true_val == 0 ? kCondNe : kCondUge, "");
-      LoadConstant(rl_result.reg, false_val);
-      GenBarrier();  // Add a scheduling barrier to keep the IT shadow intact
-    } else if (cheap_false_val && ccode == kCondEq && true_val == 1) {
-      OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, 1);
-      DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
-      OpIT(kCondLs, "");
-      LoadConstant(rl_result.reg, false_val);
-      GenBarrier();  // Add a scheduling barrier to keep the IT shadow intact
-    } else if (cheap_false_val && InexpensiveConstantInt(true_val)) {
-      OpRegImm(kOpCmp, rl_src.reg, 0);
-      OpIT(ccode, "E");
-      LoadConstant(rl_result.reg, true_val);
-      LoadConstant(rl_result.reg, false_val);
-      GenBarrier();  // Add a scheduling barrier to keep the IT shadow intact
-    } else {
-      // Unlikely case - could be tuned.
-      RegStorage t_reg1 = AllocTemp();
-      RegStorage t_reg2 = AllocTemp();
-      LoadConstant(t_reg1, true_val);
-      LoadConstant(t_reg2, false_val);
-      OpRegImm(kOpCmp, rl_src.reg, 0);
-      OpIT(ccode, "E");
-      OpRegCopy(rl_result.reg, t_reg1);
-      OpRegCopy(rl_result.reg, t_reg2);
-      GenBarrier();  // Add a scheduling barrier to keep the IT shadow intact
-    }
-  } else {
-    // MOVE case
-    RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]];
-    RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]];
-    rl_true = LoadValue(rl_true, kCoreReg);
-    rl_false = LoadValue(rl_false, kCoreReg);
-    rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    OpRegImm(kOpCmp, rl_src.reg, 0);
-    if (rl_result.reg.GetReg() == rl_true.reg.GetReg()) {  // Is the "true" case already in place?
-      OpIT(NegateComparison(ccode), "");
-      OpRegCopy(rl_result.reg, rl_false.reg);
-    } else if (rl_result.reg.GetReg() == rl_false.reg.GetReg()) {  // False case in place?
-      OpIT(ccode, "");
-      OpRegCopy(rl_result.reg, rl_true.reg);
-    } else {  // Normal - select between the two.
-      OpIT(ccode, "E");
-      OpRegCopy(rl_result.reg, rl_true.reg);
-      OpRegCopy(rl_result.reg, rl_false.reg);
-    }
-    GenBarrier();  // Add a scheduling barrier to keep the IT shadow intact
-  }
+  ArmConditionCode code = ArmConditionEncoding(mir->meta.ccode);
+
+  RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]];
+  RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]];
+  rl_true = LoadValue(rl_true, kCoreReg);
+  rl_false = LoadValue(rl_false, kCoreReg);
+  rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  OpRegImm(kOpCmp, rl_src.reg, 0);
+  NewLIR4(kA64Csel4rrrc, rl_result.reg.GetReg(), rl_true.reg.GetReg(),
+          rl_false.reg.GetReg(), code);
   StoreValue(rl_dest, rl_result);
 }
 
 void Arm64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
-  // TODO(Arm64): implement this.
-  UNIMPLEMENTED(FATAL);
-
   RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
   RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
+  LIR* taken = &block_label_list_[bb->taken];
+  LIR* not_taken = &block_label_list_[bb->fall_through];
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
   // Normalize such that if either operand is constant, src2 will be constant.
   ConditionCode ccode = mir->meta.ccode;
   if (rl_src1.is_const) {
     std::swap(rl_src1, rl_src2);
     ccode = FlipComparisonOrder(ccode);
   }
+
   if (rl_src2.is_const) {
-    RegLocation rl_temp = UpdateLocWide(rl_src2);
-    // Do special compare/branch against simple const operand if not already in registers.
+    rl_src2 = UpdateLocWide(rl_src2);
     int64_t val = mir_graph_->ConstantValueWide(rl_src2);
-    if ((rl_temp.location != kLocPhysReg)
-     /*&& ((ModifiedImmediate(Low32Bits(val)) >= 0) && (ModifiedImmediate(High32Bits(val)) >= 0))*/) {
-      GenFusedLongCmpImmBranch(bb, rl_src1, val, ccode);
+    // Special handling using cbz & cbnz.
+    if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
+      OpCmpImmBranch(ccode, rl_src1.reg, 0, taken);
+      OpCmpImmBranch(NegateComparison(ccode), rl_src1.reg, 0, not_taken);
+      return;
+    // Only handle Imm if src2 is not already in a register.
+    } else if (rl_src2.location != kLocPhysReg) {
+      OpRegImm64(kOpCmp, rl_src1.reg, val);
+      OpCondBranch(ccode, taken);
+      OpCondBranch(NegateComparison(ccode), not_taken);
       return;
     }
   }
-  LIR* taken = &block_label_list_[bb->taken];
-  LIR* not_taken = &block_label_list_[bb->fall_through];
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+
   rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-  OpRegReg(kOpCmp, rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
-  switch (ccode) {
-    case kCondEq:
-      OpCondBranch(kCondNe, not_taken);
-      break;
-    case kCondNe:
-      OpCondBranch(kCondNe, taken);
-      break;
-    case kCondLt:
-      OpCondBranch(kCondLt, taken);
-      OpCondBranch(kCondGt, not_taken);
-      ccode = kCondUlt;
-      break;
-    case kCondLe:
-      OpCondBranch(kCondLt, taken);
-      OpCondBranch(kCondGt, not_taken);
-      ccode = kCondLs;
-      break;
-    case kCondGt:
-      OpCondBranch(kCondGt, taken);
-      OpCondBranch(kCondLt, not_taken);
-      ccode = kCondHi;
-      break;
-    case kCondGe:
-      OpCondBranch(kCondGt, taken);
-      OpCondBranch(kCondLt, not_taken);
-      ccode = kCondUge;
-      break;
-    default:
-      LOG(FATAL) << "Unexpected ccode: " << ccode;
-  }
-  OpRegReg(kOpCmp, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
+  OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
   OpCondBranch(ccode, taken);
+  OpCondBranch(NegateComparison(ccode), not_taken);
 }
 
 /*
@@ -219,7 +148,8 @@
   ArmConditionCode arm_cond = ArmConditionEncoding(cond);
   if (check_value == 0 && (arm_cond == kArmCondEq || arm_cond == kArmCondNe)) {
     ArmOpcode opcode = (arm_cond == kArmCondEq) ? kA64Cbz2rt : kA64Cbnz2rt;
-    branch = NewLIR2(opcode, reg.GetReg(), 0);
+    ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
+    branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
   } else {
     OpRegImm(kOpCmp, reg, check_value);
     branch = NewLIR2(kA64B2ct, arm_cond, 0);
@@ -354,19 +284,16 @@
   NewLIR4(kA64Smaddl4xwwx, r_lo.GetReg(), r_magic.GetReg(), rl_src.reg.GetReg(), rxzr);
   switch (pattern) {
     case Divide3:
-      OpRegRegRegShift(kOpSub, rl_result.reg.GetReg(), r_hi.GetReg(),
-               rl_src.reg.GetReg(), EncodeShift(kA64Asr, 31));
+      OpRegRegRegShift(kOpSub, rl_result.reg, r_hi, rl_src.reg, EncodeShift(kA64Asr, 31));
       break;
     case Divide5:
       OpRegRegImm(kOpAsr, r_lo, rl_src.reg, 31);
-      OpRegRegRegShift(kOpRsub, rl_result.reg.GetReg(), r_lo.GetReg(), r_hi.GetReg(),
-               EncodeShift(kA64Asr, magic_table[lit].shift));
+      OpRegRegRegShift(kOpRsub, rl_result.reg, r_lo, r_hi, EncodeShift(kA64Asr, magic_table[lit].shift));
       break;
     case Divide7:
       OpRegReg(kOpAdd, r_hi, rl_src.reg);
       OpRegRegImm(kOpAsr, r_lo, rl_src.reg, 31);
-      OpRegRegRegShift(kOpRsub, rl_result.reg.GetReg(), r_lo.GetReg(), r_hi.GetReg(),
-               EncodeShift(kA64Asr, magic_table[lit].shift));
+      OpRegRegRegShift(kOpRsub, rl_result.reg, r_lo, r_hi, EncodeShift(kA64Asr, magic_table[lit].shift));
       break;
     default:
       LOG(FATAL) << "Unexpected pattern: " << pattern;
@@ -405,25 +332,30 @@
   return rl_result;
 }
 
-RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg1, RegStorage reg2,
+RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage r_src1, RegStorage r_src2,
                                   bool is_div) {
+  CHECK_EQ(r_src1.Is64Bit(), r_src2.Is64Bit());
+
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   if (is_div) {
-    // Simple case, use sdiv instruction.
-    OpRegRegReg(kOpDiv, rl_result.reg, reg1, reg2);
+    OpRegRegReg(kOpDiv, rl_result.reg, r_src1, r_src2);
   } else {
-    // Remainder case, use the following code:
-    // temp = reg1 / reg2      - integer division
-    // temp = temp * reg2
-    // dest = reg1 - temp
-
-    RegStorage temp = AllocTemp();
-    OpRegRegReg(kOpDiv, temp, reg1, reg2);
-    OpRegReg(kOpMul, temp, reg2);
-    OpRegRegReg(kOpSub, rl_result.reg, reg1, temp);
+    // temp = r_src1 / r_src2
+    // dest = r_src1 - temp * r_src2
+    RegStorage temp;
+    ArmOpcode wide;
+    if (rl_result.reg.Is64Bit()) {
+      temp = AllocTempWide();
+      wide = WIDE(0);
+    } else {
+      temp = AllocTemp();
+      wide = UNWIDE(0);
+    }
+    OpRegRegReg(kOpDiv, temp, r_src1, r_src2);
+    NewLIR4(kA64Msub4rrrr | wide, rl_result.reg.GetReg(), temp.GetReg(),
+            r_src1.GetReg(), r_src2.GetReg());
     FreeTemp(temp);
   }
-
   return rl_result;
 }
 
@@ -439,7 +371,7 @@
   RegLocation rl_dest = InlineTarget(info);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
-  OpIT((is_min) ? kCondGt : kCondLt, "E");
+  // OpIT((is_min) ? kCondGt : kCondLt, "E");
   OpRegReg(kOpMov, rl_result.reg, rl_src2.reg);
   OpRegReg(kOpMov, rl_result.reg, rl_src1.reg);
   GenBarrier();
@@ -639,7 +571,7 @@
     NewLIR3(kA64Ldxr2rX, r_tmp.GetReg(), r_ptr.GetReg(), 0);
     OpRegReg(kOpSub, r_tmp, rl_expected.reg);
     DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
-    OpIT(kCondEq, "T");
+    // OpIT(kCondEq, "T");
     NewLIR4(kA64Stxr3wrX /* eq */, r_tmp.GetReg(), rl_new_value.reg.GetReg(), r_ptr.GetReg(), 0);
   }
 
@@ -655,7 +587,7 @@
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   OpRegRegImm(kOpRsub, rl_result.reg, r_tmp, 1);
   DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
-  OpIT(kCondUlt, "");
+  // OpIT(kCondUlt, "");
   LoadConstant(rl_result.reg, 0); /* cc */
   FreeTemp(r_tmp);  // Now unneeded.
 
@@ -684,32 +616,22 @@
 void Arm64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
                                                RegLocation rl_result, int lit,
                                                int first_bit, int second_bit) {
-  OpRegRegRegShift(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), rl_src.reg.GetReg(),
-                   EncodeShift(kA64Lsl, second_bit - first_bit));
+  OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg, EncodeShift(kA64Lsl, second_bit - first_bit));
   if (first_bit != 0) {
     OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
   }
 }
 
 void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) {
-  DCHECK(reg.IsPair());   // TODO: support k64BitSolo.
-  OpRegImm64(kOpCmp, reg, 0, /*is_wide*/true);
-  GenDivZeroCheck(kCondEq);
+  LOG(FATAL) << "Unexpected use of GenDivZero for Arm64";
 }
 
 // Test suspend flag, return target of taken suspend branch
 LIR* Arm64Mir2Lir::OpTestSuspend(LIR* target) {
-  // TODO(Arm64): re-enable suspend checks, once art_quick_test_suspend is implemented and
-  //   the suspend register is properly handled in the trampolines.
-#if 0
+  // FIXME: Define rA64_SUSPEND as w19, when we do not need two copies of reserved register.
+  // Note: The opcode is not set as wide, so actually we are using the 32-bit version register.
   NewLIR3(kA64Subs3rRd, rA64_SUSPEND, rA64_SUSPEND, 1);
   return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
-#else
-  // TODO(Arm64): Fake suspend check. Will always fail to branch. Remove this.
-  LIR* branch = NewLIR2((target == NULL) ? kA64Cbnz2rt : kA64Cbz2rt, rwzr, 0);
-  branch->target = target;
-  return branch;
-#endif
 }
 
 // Decrement register and branch on condition
@@ -756,33 +678,51 @@
 #endif
 }
 
-void Arm64Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
-  rl_src = LoadValueWide(rl_src, kCoreReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  RegStorage z_reg = AllocTemp();
-  LoadConstantNoClobber(z_reg, 0);
-  // Check for destructive overlap
-  if (rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg()) {
-    RegStorage t_reg = AllocTemp();
-    OpRegRegReg(kOpSub, rl_result.reg.GetLow(), z_reg, rl_src.reg.GetLow());
-    OpRegRegReg(kOpSbc, rl_result.reg.GetHigh(), z_reg, t_reg);
-    FreeTemp(t_reg);
-  } else {
-    OpRegRegReg(kOpSub, rl_result.reg.GetLow(), z_reg, rl_src.reg.GetLow());
-    OpRegRegReg(kOpSbc, rl_result.reg.GetHigh(), z_reg, rl_src.reg.GetHigh());
-  }
-  FreeTemp(z_reg);
+void Arm64Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
+  RegLocation rl_result;
+
+  rl_src = LoadValue(rl_src, kCoreReg);
+  rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+  NewLIR4(WIDE(kA64Sbfm4rrdd), rl_result.reg.GetReg(), rl_src.reg.GetReg(), 0, 31);
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void Arm64Mir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest,
+                                 RegLocation rl_src1, RegLocation rl_src2, bool is_div) {
+  RegLocation rl_result;
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+  GenDivZeroCheck(rl_src2.reg);
+  rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, is_div);
   StoreValueWide(rl_dest, rl_result);
 }
 
 void Arm64Mir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
                              RegLocation rl_src2) {
   RegLocation rl_result;
+
   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
   rl_src2 = LoadValueWide(rl_src2, kCoreReg);
   rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-  OpRegRegRegShift(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg(),
-                   ENCODE_NO_SHIFT, /*is_wide*/ true);
+  OpRegRegRegShift(op, rl_result.reg, rl_src1.reg, rl_src2.reg, ENCODE_NO_SHIFT);
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void Arm64Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
+  RegLocation rl_result;
+
+  rl_src = LoadValueWide(rl_src, kCoreReg);
+  rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+  OpRegRegShift(kOpNeg, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT);
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void Arm64Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
+  RegLocation rl_result;
+
+  rl_src = LoadValueWide(rl_src, kCoreReg);
+  rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+  OpRegRegShift(kOpMvn, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT);
   StoreValueWide(rl_dest, rl_result);
 }
 
@@ -865,8 +805,7 @@
     } else {
       // No special indexed operation, lea + load w/ displacement
       reg_ptr = AllocTemp();
-      OpRegRegRegShift(kOpAdd, reg_ptr.GetReg(), rl_array.reg.GetReg(), rl_index.reg.GetReg(),
-                       EncodeShift(kA64Lsl, scale));
+      OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, rl_index.reg, EncodeShift(kA64Lsl, scale));
       FreeTemp(rl_index.reg);
     }
     rl_result = EvalLoc(rl_dest, reg_class, true);
@@ -971,8 +910,7 @@
       rl_src = LoadValue(rl_src, reg_class);
     }
     if (!constant_index) {
-      OpRegRegRegShift(kOpAdd, reg_ptr.GetReg(), rl_array.reg.GetReg(), rl_index.reg.GetReg(),
-                       EncodeShift(kA64Lsl, scale));
+      OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, rl_index.reg, EncodeShift(kA64Lsl, scale));
     }
     if (needs_range_check) {
       if (constant_index) {
@@ -1004,167 +942,84 @@
   }
 }
 
-
 void Arm64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
                                    RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift) {
-  // TODO(Arm64): check this.
-  UNIMPLEMENTED(WARNING);
-
-  rl_src = LoadValueWide(rl_src, kCoreReg);
+  OpKind op = kOpBkpt;
   // Per spec, we only care about low 6 bits of shift amount.
   int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
+  rl_src = LoadValueWide(rl_src, kCoreReg);
   if (shift_amount == 0) {
     StoreValueWide(rl_dest, rl_src);
     return;
   }
-  if (BadOverlap(rl_src, rl_dest)) {
-    GenShiftOpLong(opcode, rl_dest, rl_src, rl_shift);
-    return;
-  }
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+
+  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
   switch (opcode) {
     case Instruction::SHL_LONG:
     case Instruction::SHL_LONG_2ADDR:
-      if (shift_amount == 1) {
-        OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src.reg.GetLow(), rl_src.reg.GetLow());
-        OpRegRegReg(kOpAdc, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), rl_src.reg.GetHigh());
-      } else if (shift_amount == 32) {
-        OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg);
-        LoadConstant(rl_result.reg.GetLow(), 0);
-      } else if (shift_amount > 31) {
-        OpRegRegImm(kOpLsl, rl_result.reg.GetHigh(), rl_src.reg.GetLow(), shift_amount - 32);
-        LoadConstant(rl_result.reg.GetLow(), 0);
-      } else {
-        OpRegRegImm(kOpLsl, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
-        OpRegRegRegShift(kOpOr, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetLowReg(),
-                         EncodeShift(kA64Lsr, 32 - shift_amount));
-        OpRegRegImm(kOpLsl, rl_result.reg.GetLow(), rl_src.reg.GetLow(), shift_amount);
-      }
+      op = kOpLsl;
       break;
     case Instruction::SHR_LONG:
     case Instruction::SHR_LONG_2ADDR:
-      if (shift_amount == 32) {
-        OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
-        OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 31);
-      } else if (shift_amount > 31) {
-        OpRegRegImm(kOpAsr, rl_result.reg.GetLow(), rl_src.reg.GetHigh(), shift_amount - 32);
-        OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 31);
-      } else {
-        RegStorage t_reg = AllocTemp();
-        OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetLow(), shift_amount);
-        OpRegRegRegShift(kOpOr, rl_result.reg.GetLowReg(), t_reg.GetReg(), rl_src.reg.GetHighReg(),
-                         EncodeShift(kA64Lsl, 32 - shift_amount));
-        FreeTemp(t_reg);
-        OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
-      }
+      op = kOpAsr;
       break;
     case Instruction::USHR_LONG:
     case Instruction::USHR_LONG_2ADDR:
-      if (shift_amount == 32) {
-        OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
-        LoadConstant(rl_result.reg.GetHigh(), 0);
-      } else if (shift_amount > 31) {
-        OpRegRegImm(kOpLsr, rl_result.reg.GetLow(), rl_src.reg.GetHigh(), shift_amount - 32);
-        LoadConstant(rl_result.reg.GetHigh(), 0);
-      } else {
-        RegStorage t_reg = AllocTemp();
-        OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetLow(), shift_amount);
-        OpRegRegRegShift(kOpOr, rl_result.reg.GetLowReg(), t_reg.GetReg(), rl_src.reg.GetHighReg(),
-                         EncodeShift(kA64Lsl, 32 - shift_amount));
-        FreeTemp(t_reg);
-        OpRegRegImm(kOpLsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
-      }
+      op = kOpLsr;
       break;
     default:
       LOG(FATAL) << "Unexpected case";
   }
+  OpRegRegImm(op, rl_result.reg, rl_src.reg, shift_amount);
   StoreValueWide(rl_dest, rl_result);
 }
 
 void Arm64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
                                      RegLocation rl_src1, RegLocation rl_src2) {
-  // TODO(Arm64): implement this.
-  UNIMPLEMENTED(WARNING);
-
-  if ((opcode == Instruction::SUB_LONG_2ADDR) || (opcode == Instruction::SUB_LONG)) {
+  if ((opcode == Instruction::SUB_LONG) || (opcode == Instruction::SUB_LONG_2ADDR)) {
     if (!rl_src2.is_const) {
-      // Don't bother with special handling for subtract from immediate.
-      GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
-      return;
+      return GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
     }
   } else {
-    // Normalize
+    // Associativity.
     if (!rl_src2.is_const) {
       DCHECK(rl_src1.is_const);
       std::swap(rl_src1, rl_src2);
     }
   }
-  if (BadOverlap(rl_src1, rl_dest)) {
-    GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
-    return;
-  }
   DCHECK(rl_src2.is_const);
-  // TODO(Arm64): implement this.
-  //  int64_t val = mir_graph_->ConstantValueWide(rl_src2);
-  int32_t mod_imm_lo = -1;  // ModifiedImmediate(val_lo);
-  int32_t mod_imm_hi = -1;  // ModifiedImmediate(val_hi);
 
-  // Only a subset of add/sub immediate instructions set carry - so bail if we don't fit
+  OpKind op = kOpBkpt;
+  int64_t val = mir_graph_->ConstantValueWide(rl_src2);
+
   switch (opcode) {
     case Instruction::ADD_LONG:
     case Instruction::ADD_LONG_2ADDR:
+      op = kOpAdd;
+      break;
     case Instruction::SUB_LONG:
     case Instruction::SUB_LONG_2ADDR:
-      if ((mod_imm_lo < 0) || (mod_imm_hi < 0)) {
-        GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
-        return;
-      }
-      break;
-    default:
-      break;
-  }
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  // NOTE: once we've done the EvalLoc on dest, we can no longer bail.
-  switch (opcode) {
-#if 0
-    case Instruction::ADD_LONG:
-    case Instruction::ADD_LONG_2ADDR:
-      NewLIR3(kThumb2AddRRI8M, rl_result.reg.GetLowReg(), rl_src1.reg.GetLowReg(), mod_imm_lo);
-      NewLIR3(kThumb2AdcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi);
-      break;
-    case Instruction::OR_LONG:
-    case Instruction::OR_LONG_2ADDR:
-      if ((val_lo != 0) || (rl_result.reg.GetLowReg() != rl_src1.reg.GetLowReg())) {
-        OpRegRegImm(kOpOr, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
-      }
-      if ((val_hi != 0) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) {
-        OpRegRegImm(kOpOr, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
-      }
-      break;
-    case Instruction::XOR_LONG:
-    case Instruction::XOR_LONG_2ADDR:
-      OpRegRegImm(kOpXor, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
-      OpRegRegImm(kOpXor, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
+      op = kOpSub;
       break;
     case Instruction::AND_LONG:
     case Instruction::AND_LONG_2ADDR:
-      if ((val_lo != 0xffffffff) || (rl_result.reg.GetLowReg() != rl_src1.reg.GetLowReg())) {
-        OpRegRegImm(kOpAnd, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
-      }
-      if ((val_hi != 0xffffffff) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) {
-        OpRegRegImm(kOpAnd, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
-      }
+      op = kOpAnd;
       break;
-    case Instruction::SUB_LONG_2ADDR:
-    case Instruction::SUB_LONG:
-      NewLIR3(kThumb2SubRRI8M, rl_result.reg.GetLowReg(), rl_src1.reg.GetLowReg(), mod_imm_lo);
-      NewLIR3(kThumb2SbcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi);
+    case Instruction::OR_LONG:
+    case Instruction::OR_LONG_2ADDR:
+      op = kOpOr;
       break;
-#endif
+    case Instruction::XOR_LONG:
+    case Instruction::XOR_LONG_2ADDR:
+      op = kOpXor;
+      break;
     default:
-      LOG(FATAL) << "Unexpected opcode " << opcode;
+      LOG(FATAL) << "Unexpected opcode";
   }
+
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+  OpRegRegImm(op, rl_result.reg, rl_src1.reg, val);
   StoreValueWide(rl_dest, rl_result);
 }
 
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index 2b1c5e8..598d05b 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -258,7 +258,6 @@
   unsigned i;
   unsigned reg_size = (is_wide) ? 64 : 32;
   uint64_t result = value & BIT_MASK(width);
-  DCHECK_NE(width, reg_size);
   for (i = width; i < reg_size; i *= 2) {
     result |= (result << i);
   }
@@ -606,7 +605,7 @@
   GrowableArray<RegisterInfo*>::Iterator fp_it(&reg_pool_->sp_regs_);
   for (RegisterInfo* info = fp_it.Next(); info != nullptr; info = fp_it.Next()) {
     int fp_reg_num = info->GetReg().GetRegNum();
-    RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | fp_reg_num);
+    RegStorage dp_reg = RegStorage::FloatSolo64(fp_reg_num);
     RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
     // Double precision register's master storage should refer to itself.
     DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
@@ -616,10 +615,18 @@
     DCHECK_EQ(info->StorageMask(), 0x1U);
   }
 
-  // TODO: re-enable this when we can safely save r4 over the suspension code path.
-  bool no_suspend = NO_SUSPEND;  // || !Runtime::Current()->ExplicitSuspendChecks();
-  if (no_suspend) {
-    GetRegInfo(rs_rA64_SUSPEND)->MarkFree();
+  // Alias 32bit W registers to corresponding 64bit X registers.
+  GrowableArray<RegisterInfo*>::Iterator w_it(&reg_pool_->core_regs_);
+  for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) {
+    int x_reg_num = info->GetReg().GetRegNum();
+    RegStorage x_reg = RegStorage::Solo64(x_reg_num);
+    RegisterInfo* x_reg_info = GetRegInfo(x_reg);
+    // 64bit X register's master storage should refer to itself.
+    DCHECK_EQ(x_reg_info, x_reg_info->Master());
+    // Redirect 32bit W master storage to 64bit X.
+    info->SetMaster(x_reg_info);
+    // 32bit W should show a single 32-bit mask bit, at first referring to the low half.
+    DCHECK_EQ(info->StorageMask(), 0x1U);
   }
 
   // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index 39e9fad..d0ab4f6 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -408,7 +408,7 @@
       DCHECK_EQ(shift, ENCODE_NO_SHIFT);
       return NewLIR4(kA64Ubfm4rrdd | wide, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 15);
     default:
-      return OpRegRegRegShift(op, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_src2.GetReg(), shift);
+      return OpRegRegRegShift(op, r_dest_src1, r_dest_src1, r_src2, shift);
   }
 
   DCHECK(!IsPseudoLirOp(opcode));
@@ -445,8 +445,8 @@
   return NULL;
 }
 
-LIR* Arm64Mir2Lir::OpRegRegRegShift(OpKind op, int r_dest, int r_src1,
-                                    int r_src2, int shift, bool is_wide) {
+LIR* Arm64Mir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
+                                    RegStorage r_src2, int shift) {
   ArmOpcode opcode = kA64Brk1d;
 
   switch (op) {
@@ -500,21 +500,24 @@
   // The instructions above belong to two kinds:
   // - 4-operands instructions, where the last operand is a shift/extend immediate,
   // - 3-operands instructions with no shift/extend.
-  ArmOpcode widened_opcode = (is_wide) ? WIDE(opcode) : opcode;
+  ArmOpcode widened_opcode = r_dest.Is64Bit() ? WIDE(opcode) : opcode;
+  CHECK_EQ(r_dest.Is64Bit(), r_src1.Is64Bit());
+  CHECK_EQ(r_dest.Is64Bit(), r_src2.Is64Bit());
   if (EncodingMap[opcode].flags & IS_QUAD_OP) {
-    DCHECK_EQ(shift, ENCODE_NO_SHIFT);
-    return NewLIR4(widened_opcode, r_dest, r_src1, r_src2, shift);
+    DCHECK(!IsExtendEncoding(shift));
+    return NewLIR4(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(), shift);
   } else {
     DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
     DCHECK_EQ(shift, ENCODE_NO_SHIFT);
-    return NewLIR3(widened_opcode, r_dest, r_src1, r_src2);
+    return NewLIR3(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
   }
 }
 
 LIR* Arm64Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
-  return OpRegRegRegShift(op, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(), ENCODE_NO_SHIFT);
+  return OpRegRegRegShift(op, r_dest, r_src1, r_src2, ENCODE_NO_SHIFT);
 }
 
+// Should be taking an int64_t value ?
 LIR* Arm64Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
   LIR* res;
   bool neg = (value < 0);
@@ -523,6 +526,7 @@
   ArmOpcode alt_opcode = kA64Brk1d;
   int32_t log_imm = -1;
   bool is_wide = r_dest.Is64Bit();
+  CHECK_EQ(r_dest.Is64Bit(), r_src1.Is64Bit());
   ArmOpcode wide = (is_wide) ? WIDE(0) : UNWIDE(0);
 
   switch (op) {
@@ -610,11 +614,11 @@
 }
 
 LIR* Arm64Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
-  return OpRegImm64(op, r_dest_src1, static_cast<int64_t>(value), /*is_wide*/false);
+  return OpRegImm64(op, r_dest_src1, static_cast<int64_t>(value));
 }
 
-LIR* Arm64Mir2Lir::OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value, bool is_wide) {
-  ArmOpcode wide = (is_wide) ? WIDE(0) : UNWIDE(0);
+LIR* Arm64Mir2Lir::OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value) {
+  ArmOpcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
   ArmOpcode opcode = kA64Brk1d;
   ArmOpcode neg_opcode = kA64Brk1d;
   bool shift;
@@ -702,40 +706,46 @@
 LIR* Arm64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
                                    int scale, OpSize size) {
   LIR* load;
+  int expected_scale = 0;
   ArmOpcode opcode = kA64Brk1d;
-  ArmOpcode wide = kA64NotWide;
-
-  DCHECK(scale == 0 || scale == 1);
 
   if (r_dest.IsFloat()) {
-    bool is_double = r_dest.IsDouble();
-    bool is_single = !is_double;
-    DCHECK_EQ(is_single, r_dest.IsSingle());
+    if (r_dest.IsDouble()) {
+      DCHECK(size == k64 || size == kDouble);
+      expected_scale = 3;
+      opcode = FWIDE(kA64Ldr4fXxG);
+    } else {
+      DCHECK(r_dest.IsSingle());
+      DCHECK(size == k32 || size == kSingle);
+      expected_scale = 2;
+      opcode = kA64Ldr4fXxG;
+    }
 
-    // If r_dest is a single, then size must be either k32 or kSingle.
-    // If r_dest is a double, then size must be either k64 or kDouble.
-    DCHECK(!is_single || size == k32 || size == kSingle);
-    DCHECK(!is_double || size == k64 || size == kDouble);
-    return NewLIR4((is_double) ? FWIDE(kA64Ldr4fXxG) : kA64Ldr4fXxG,
-                   r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale);
+    DCHECK(scale == 0 || scale == expected_scale);
+    return NewLIR4(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(),
+                   (scale != 0) ? 1 : 0);
   }
 
   switch (size) {
     case kDouble:
     case kWord:
     case k64:
-      wide = kA64Wide;
-      // Intentional fall-trough.
+      opcode = WIDE(kA64Ldr4rXxG);
+      expected_scale = 3;
+      break;
     case kSingle:
     case k32:
     case kReference:
       opcode = kA64Ldr4rXxG;
+      expected_scale = 2;
       break;
     case kUnsignedHalf:
       opcode = kA64Ldrh4wXxd;
+      expected_scale = 1;
       break;
     case kSignedHalf:
       opcode = kA64Ldrsh4rXxd;
+      expected_scale = 1;
       break;
     case kUnsignedByte:
       opcode = kA64Ldrb3wXx;
@@ -747,13 +757,14 @@
       LOG(FATAL) << "Bad size: " << size;
   }
 
-  if (UNLIKELY((EncodingMap[opcode].flags & IS_TERTIARY_OP) != 0)) {
-    // Tertiary ops (e.g. ldrb, ldrsb) do not support scale.
+  if (UNLIKELY(expected_scale == 0)) {
+    // This is a tertiary op (e.g. ldrb, ldrsb), it does not not support scale.
+    DCHECK_NE(EncodingMap[UNWIDE(opcode)].flags & IS_TERTIARY_OP, 0U);
     DCHECK_EQ(scale, 0);
-    load = NewLIR3(opcode | wide, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg());
+    load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg());
   } else {
-    DCHECK(scale == 0 || scale == ((wide == kA64Wide) ? 3 : 2));
-    load = NewLIR4(opcode | wide, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(),
+    DCHECK(scale == 0 || scale == expected_scale);
+    load = NewLIR4(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(),
                    (scale != 0) ? 1 : 0);
   }
 
@@ -763,39 +774,43 @@
 LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
                                     int scale, OpSize size) {
   LIR* store;
+  int expected_scale = 0;
   ArmOpcode opcode = kA64Brk1d;
-  ArmOpcode wide = kA64NotWide;
-
-  DCHECK(scale == 0 || scale == 1);
 
   if (r_src.IsFloat()) {
-    bool is_double = r_src.IsDouble();
-    bool is_single = !is_double;
-    DCHECK_EQ(is_single, r_src.IsSingle());
+    if (r_src.IsDouble()) {
+      DCHECK(size == k64 || size == kDouble);
+      expected_scale = 3;
+      opcode = FWIDE(kA64Str4fXxG);
+    } else {
+      DCHECK(r_src.IsSingle());
+      DCHECK(size == k32 || size == kSingle);
+      expected_scale = 2;
+      opcode = kA64Str4fXxG;
+    }
 
-    // If r_src is a single, then size must be either k32 or kSingle.
-    // If r_src is a double, then size must be either k64 or kDouble.
-    DCHECK(!is_single || size == k32 || size == kSingle);
-    DCHECK(!is_double || size == k64 || size == kDouble);
-    return NewLIR4((is_double) ? FWIDE(kA64Str4fXxG) : kA64Str4fXxG,
-                   r_src.GetReg(), r_base.GetReg(), r_index.GetReg(), scale);
+    DCHECK(scale == 0 || scale == expected_scale);
+    return NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(),
+                   (scale != 0) ? 1 : 0);
   }
 
   switch (size) {
     case kDouble:     // Intentional fall-trough.
     case kWord:       // Intentional fall-trough.
     case k64:
-      opcode = kA64Str4rXxG;
-      wide = kA64Wide;
+      opcode = WIDE(kA64Str4rXxG);
+      expected_scale = 3;
       break;
     case kSingle:     // Intentional fall-trough.
     case k32:         // Intentional fall-trough.
     case kReference:
       opcode = kA64Str4rXxG;
+      expected_scale = 2;
       break;
     case kUnsignedHalf:
     case kSignedHalf:
       opcode = kA64Strh4wXxd;
+      expected_scale = 1;
       break;
     case kUnsignedByte:
     case kSignedByte:
@@ -805,12 +820,14 @@
       LOG(FATAL) << "Bad size: " << size;
   }
 
-  if (UNLIKELY((EncodingMap[opcode].flags & IS_TERTIARY_OP) != 0)) {
-    // Tertiary ops (e.g. strb) do not support scale.
+  if (UNLIKELY(expected_scale == 0)) {
+    // This is a tertiary op (e.g. strb), it does not not support scale.
+    DCHECK_NE(EncodingMap[UNWIDE(opcode)].flags & IS_TERTIARY_OP, 0U);
     DCHECK_EQ(scale, 0);
-    store = NewLIR3(opcode | wide, r_src.GetReg(), r_base.GetReg(), r_index.GetReg());
+    store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg());
   } else {
-    store = NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(), scale);
+    store = NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(),
+                    (scale != 0) ? 1 : 0);
   }
 
   return store;
@@ -838,8 +855,8 @@
         opcode = FWIDE(kA64Ldr3fXD);
         alt_opcode = FWIDE(kA64Ldur3fXd);
       } else {
-        opcode = FWIDE(kA64Ldr3rXD);
-        alt_opcode = FWIDE(kA64Ldur3rXd);
+        opcode = WIDE(kA64Ldr3rXD);
+        alt_opcode = WIDE(kA64Ldur3rXd);
       }
       break;
     case kSingle:     // Intentional fall-through.
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 6ccf252..3fbbc4e 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -364,6 +364,18 @@
   return NULL;
 }
 
+/* Search the existing constants in the literal pool for an exact method match */
+LIR* Mir2Lir::ScanLiteralPoolMethod(LIR* data_target, const MethodReference& method) {
+  while (data_target) {
+    if (static_cast<uint32_t>(data_target->operands[0]) == method.dex_method_index &&
+        UnwrapPointer(data_target->operands[1]) == method.dex_file) {
+      return data_target;
+    }
+    data_target = data_target->next;
+  }
+  return nullptr;
+}
+
 /*
  * The following are building blocks to insert constants into the pool or
  * instruction streams.
@@ -1143,11 +1155,13 @@
 
 void Mir2Lir::LoadCodeAddress(const MethodReference& target_method, InvokeType type,
                               SpecialTargetRegister symbolic_reg) {
-  int target_method_idx = target_method.dex_method_index;
-  LIR* data_target = ScanLiteralPool(code_literal_list_, target_method_idx, 0);
+  LIR* data_target = ScanLiteralPoolMethod(code_literal_list_, target_method);
   if (data_target == NULL) {
-    data_target = AddWordData(&code_literal_list_, target_method_idx);
+    data_target = AddWordData(&code_literal_list_, target_method.dex_method_index);
     data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file));
+    // NOTE: The invoke type doesn't contribute to the literal identity. In fact, we can have
+    // the same method invoked with kVirtual, kSuper and kInterface but the class linker will
+    // resolve these invokes to the same method, so we don't care which one we record here.
     data_target->operands[2] = type;
   }
   LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg), data_target);
@@ -1157,11 +1171,13 @@
 
 void Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type,
                                 SpecialTargetRegister symbolic_reg) {
-  int target_method_idx = target_method.dex_method_index;
-  LIR* data_target = ScanLiteralPool(method_literal_list_, target_method_idx, 0);
+  LIR* data_target = ScanLiteralPoolMethod(method_literal_list_, target_method);
   if (data_target == NULL) {
-    data_target = AddWordData(&method_literal_list_, target_method_idx);
+    data_target = AddWordData(&method_literal_list_, target_method.dex_method_index);
     data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file));
+    // NOTE: The invoke type doesn't contribute to the literal identity. In fact, we can have
+    // the same method invoked with kVirtual, kSuper and kInterface but the class linker will
+    // resolve these invokes to the same method, so we don't care which one we record here.
     data_target->operands[2] = type;
   }
   LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg), data_target);
@@ -1185,21 +1201,27 @@
 }
 
 RegLocation Mir2Lir::NarrowRegLoc(RegLocation loc) {
-  loc.wide = false;
   if (loc.location == kLocPhysReg) {
+    DCHECK(!loc.reg.Is32Bit());
     if (loc.reg.IsPair()) {
-      loc.reg = loc.reg.GetLow();
+      RegisterInfo* info_lo = GetRegInfo(loc.reg.GetLow());
+      RegisterInfo* info_hi = GetRegInfo(loc.reg.GetHigh());
+      info_lo->SetIsWide(false);
+      info_hi->SetIsWide(false);
+      loc.reg = info_lo->GetReg();
     } else {
-      // FIXME: temp workaround.
-      // Issue here: how do we narrow to a 32-bit value in 64-bit container?
-      // Probably the wrong thing to narrow the RegStorage container here.  That
-      // should be a target decision.  At the RegLocation level, we're only
-      // modifying the view of the Dalvik value - this is orthogonal to the storage
-      // container size.  Consider this a temp workaround.
-      DCHECK(loc.reg.IsDouble());
-      loc.reg = loc.reg.DoubleToLowSingle();
+      RegisterInfo* info = GetRegInfo(loc.reg);
+      RegisterInfo* info_new = info->FindMatchingView(RegisterInfo::k32SoloStorageMask);
+      DCHECK(info_new != nullptr);
+      if (info->IsLive() && (info->SReg() == loc.s_reg_low)) {
+        info->MarkDead();
+        info_new->MarkLive(loc.s_reg_low);
+      }
+      loc.reg = info_new->GetReg();
     }
+    DCHECK(loc.reg.Valid());
   }
+  loc.wide = false;
   return loc;
 }
 
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 3ec31ba..6397208 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -35,20 +35,15 @@
 namespace {  // anonymous namespace
 
 MIR* AllocReplacementMIR(MIRGraph* mir_graph, MIR* invoke, MIR* move_return) {
-  ArenaAllocator* arena = mir_graph->GetArena();
-  MIR* insn = static_cast<MIR*>(arena->Alloc(sizeof(MIR), kArenaAllocMIR));
+  MIR* insn = mir_graph->NewMIR();
   insn->offset = invoke->offset;
-  insn->width = invoke->width;
   insn->optimization_flags = MIR_CALLEE;
-  if (move_return != nullptr) {
-    DCHECK_EQ(move_return->offset, invoke->offset + invoke->width);
-    insn->width += move_return->width;
-  }
   return insn;
 }
 
 uint32_t GetInvokeReg(MIR* invoke, uint32_t arg) {
   DCHECK_LT(arg, invoke->dalvikInsn.vA);
+  DCHECK(!MIRGraph::IsPseudoMirOp(invoke->dalvikInsn.opcode));
   if (Instruction::FormatOf(invoke->dalvikInsn.opcode) == Instruction::k3rc) {
     return invoke->dalvikInsn.vC + arg;  // Non-range invoke.
   } else {
@@ -59,6 +54,7 @@
 
 bool WideArgIsInConsecutiveDalvikRegs(MIR* invoke, uint32_t arg) {
   DCHECK_LT(arg + 1, invoke->dalvikInsn.vA);
+  DCHECK(!MIRGraph::IsPseudoMirOp(invoke->dalvikInsn.opcode));
   return Instruction::FormatOf(invoke->dalvikInsn.opcode) == Instruction::k3rc ||
       invoke->dalvikInsn.arg[arg + 1u] == invoke->dalvikInsn.arg[arg] + 1u;
 }
@@ -660,7 +656,6 @@
   }
 
   MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result);
-  insn->width += insn->offset - invoke->offset;
   insn->offset = invoke->offset;
   insn->dalvikInsn.opcode = opcode;
   insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
@@ -737,9 +732,7 @@
 
   if (move_result != nullptr) {
     MIR* move = AllocReplacementMIR(mir_graph, invoke, move_result);
-    insn->width = invoke->width;
     move->offset = move_result->offset;
-    move->width = move_result->width;
     if (move_result->dalvikInsn.opcode == Instruction::MOVE_RESULT) {
       move->dalvikInsn.opcode = Instruction::MOVE_FROM16;
     } else if (move_result->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index de55a05..4f2a876 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -562,8 +562,8 @@
     RegStorage r_base;
     if (field_info.IsReferrersClass()) {
       // Fast path, static storage base is this method's class
-      RegLocation rl_method  = LoadCurrMethod();
-      r_base = AllocTemp();
+      RegLocation rl_method = LoadCurrMethod();
+      r_base = AllocTempWord();
       LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
       if (IsTemp(rl_method.reg)) {
         FreeTemp(rl_method.reg);
@@ -658,7 +658,7 @@
     if (field_info.IsReferrersClass()) {
       // Fast path, static storage base is this method's class
       RegLocation rl_method  = LoadCurrMethod();
-      r_base = AllocTemp();
+      r_base = AllocTempWord();
       LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
     } else {
       // Medium path, static storage base in a different class which requires checks that the other
@@ -1595,7 +1595,7 @@
       rl_result = EvalLoc(rl_dest, kCoreReg, true);
       OpRegReg(op, rl_result.reg, rl_src1.reg);
     } else {
-      if (shift_op) {
+      if ((shift_op) && (cu_->instruction_set != kArm64)) {
         rl_src2 = LoadValue(rl_src2, kCoreReg);
         RegStorage t_reg = AllocTemp();
         OpRegRegImm(kOpAnd, t_reg, rl_src2.reg, 31);
@@ -1613,7 +1613,7 @@
     StoreValue(rl_dest, rl_result);
   } else {
     bool done = false;      // Set to true if we happen to find a way to use a real instruction.
-    if (cu_->instruction_set == kMips) {
+    if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) {
       rl_src1 = LoadValue(rl_src1, kCoreReg);
       rl_src2 = LoadValue(rl_src2, kCoreReg);
       if (check_zero) {
@@ -1889,7 +1889,7 @@
       }
 
       bool done = false;
-      if (cu_->instruction_set == kMips) {
+      if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) {
         rl_src = LoadValue(rl_src, kCoreReg);
         rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div);
         done = true;
@@ -1952,6 +1952,10 @@
 
   switch (opcode) {
     case Instruction::NOT_LONG:
+      if (cu->instruction_set == kArm64) {
+        mir_to_lir->GenNotLong(rl_dest, rl_src2);
+        return;
+      }
       rl_src2 = mir_to_lir->LoadValueWide(rl_src2, kCoreReg);
       rl_result = mir_to_lir->EvalLoc(rl_dest, kCoreReg, true);
       // Check for destructive overlap
@@ -1998,6 +2002,10 @@
       break;
     case Instruction::DIV_LONG:
     case Instruction::DIV_LONG_2ADDR:
+      if (cu->instruction_set == kArm64) {
+        mir_to_lir->GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true);
+        return;
+      }
       call_out = true;
       check_zero = true;
       ret_reg = mir_to_lir->TargetReg(kRet0).GetReg();
@@ -2005,6 +2013,10 @@
       break;
     case Instruction::REM_LONG:
     case Instruction::REM_LONG_2ADDR:
+      if (cu->instruction_set == kArm64) {
+        mir_to_lir->GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false);
+        return;
+      }
       call_out = true;
       check_zero = true;
       func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLmod);
@@ -2014,7 +2026,8 @@
       break;
     case Instruction::AND_LONG_2ADDR:
     case Instruction::AND_LONG:
-      if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64) {
+      if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64 ||
+          cu->instruction_set == kArm64) {
         return mir_to_lir->GenAndLong(opcode, rl_dest, rl_src1, rl_src2);
       }
       first_op = kOpAnd;
@@ -2022,7 +2035,8 @@
       break;
     case Instruction::OR_LONG:
     case Instruction::OR_LONG_2ADDR:
-      if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64) {
+      if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64 ||
+          cu->instruction_set == kArm64) {
         mir_to_lir->GenOrLong(opcode, rl_dest, rl_src1, rl_src2);
         return;
       }
@@ -2031,7 +2045,8 @@
       break;
     case Instruction::XOR_LONG:
     case Instruction::XOR_LONG_2ADDR:
-      if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64) {
+      if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64 ||
+          cu->instruction_set == kArm64) {
         mir_to_lir->GenXorLong(opcode, rl_dest, rl_src1, rl_src2);
         return;
       }
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 721b345..eef3294 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -443,14 +443,10 @@
   rl_src.reg = TargetReg(kArg0);
   rl_src.home = false;
   MarkLive(rl_src);
-  if (rl_method.wide) {
-    StoreValueWide(rl_method, rl_src);
-  } else {
-    StoreValue(rl_method, rl_src);
-  }
+  StoreValue(rl_method, rl_src);
   // If Method* has been promoted, explicitly flush
   if (rl_method.location == kLocPhysReg) {
-    StoreWordDisp(TargetReg(kSp), 0, TargetReg(kArg0));
+    StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0));
   }
 
   if (cu_->num_ins == 0) {
@@ -864,8 +860,17 @@
       // Wide spans, we need the 2nd half of uses[2].
       rl_arg = UpdateLocWide(rl_use2);
       if (rl_arg.location == kLocPhysReg) {
-        // NOTE: not correct for 64-bit core regs, but this needs rewriting for hard-float.
-        reg = rl_arg.reg.IsPair() ? rl_arg.reg.GetHigh() : rl_arg.reg.DoubleToHighSingle();
+        if (rl_arg.reg.IsPair()) {
+          reg = rl_arg.reg.GetHigh();
+        } else {
+          RegisterInfo* info = GetRegInfo(rl_arg.reg);
+          info = info->FindMatchingView(RegisterInfo::kHighSingleStorageMask);
+          if (info == nullptr) {
+            // NOTE: For hard float convention we won't split arguments across reg/mem.
+            UNIMPLEMENTED(FATAL) << "Needs hard float api.";
+          }
+          reg = info->GetReg();
+        }
       } else {
         // kArg2 & rArg3 can safely be used here
         reg = TargetReg(kArg3);
@@ -1659,9 +1664,13 @@
     return;
   }
   DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
-  if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
-      ->GenIntrinsic(this, info)) {
-    return;
+  // TODO: Enable instrinsics for x86_64
+  // Temporary disable intrinsics for x86_64. We will enable them later step by step.
+  if (cu_->instruction_set != kX86_64) {
+    if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
+        ->GenIntrinsic(this, info)) {
+      return;
+    }
   }
   GenInvokeNoInline(info);
 }
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 2b57b35..e462173 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -118,6 +118,7 @@
     bool GenInlinedSqrt(CallInfo* info);
     bool GenInlinedPeek(CallInfo* info, OpSize size);
     bool GenInlinedPoke(CallInfo* info, OpSize size);
+    void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
     void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
     void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
                    RegLocation rl_src2);
@@ -125,6 +126,8 @@
                     RegLocation rl_src2);
     void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
                     RegLocation rl_src2);
+    void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
+                       RegLocation rl_src2, bool is_div);
     RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
     RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
     void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 55e93d7..beaf6bb 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -431,6 +431,15 @@
   StoreValueWide(rl_dest, rl_result);
 }
 
+void MipsMir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
+  LOG(FATAL) << "Unexpected use GenNotLong()";
+}
+
+void MipsMir2Lir::GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
+                           RegLocation rl_src2, bool is_div) {
+  LOG(FATAL) << "Unexpected use GenDivRemLong()";
+}
+
 void MipsMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
   rl_src = LoadValueWide(rl_src, kCoreReg);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 9fc93d0..df56820 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -1069,7 +1069,7 @@
       work_half->meta.throw_insn = mir;
     }
 
-    if (opcode >= kMirOpFirst) {
+    if (MIRGraph::IsPseudoMirOp(opcode)) {
       HandleExtendedMethodMIR(bb, mir);
       continue;
     }
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 3e0ba75..8d572ca 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -332,6 +332,15 @@
         return arena->Alloc(size, kArenaAllocRegAlloc);
       }
 
+      static const uint32_t k32SoloStorageMask     = 0x00000001;
+      static const uint32_t kLowSingleStorageMask  = 0x00000001;
+      static const uint32_t kHighSingleStorageMask = 0x00000002;
+      static const uint32_t k64SoloStorageMask     = 0x00000003;
+      static const uint32_t k128SoloStorageMask    = 0x0000000f;
+      static const uint32_t k256SoloStorageMask    = 0x000000ff;
+      static const uint32_t k512SoloStorageMask    = 0x0000ffff;
+      static const uint32_t k1024SoloStorageMask   = 0xffffffff;
+
       bool InUse() { return (storage_mask_ & master_->used_storage_) != 0; }
       void MarkInUse() { master_->used_storage_ |= storage_mask_; }
       void MarkFree() { master_->used_storage_ &= ~storage_mask_; }
@@ -389,7 +398,15 @@
       LIR* DefEnd() { return def_end_; }
       void SetDefEnd(LIR* def_end) { def_end_ = def_end; }
       void ResetDefBody() { def_start_ = def_end_ = nullptr; }
-
+      // Find member of aliased set matching storage_used; return nullptr if none.
+      RegisterInfo* FindMatchingView(uint32_t storage_used) {
+        RegisterInfo* res = Master();
+        for (; res != nullptr; res = res->GetAliasChain()) {
+          if (res->StorageMask() == storage_used)
+            break;
+        }
+        return res;
+      }
 
      private:
       RegStorage reg_;
@@ -617,6 +634,7 @@
     LIR* NewLIR5(int opcode, int dest, int src1, int src2, int info1, int info2);
     LIR* ScanLiteralPool(LIR* data_target, int value, unsigned int delta);
     LIR* ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi);
+    LIR* ScanLiteralPoolMethod(LIR* data_target, const MethodReference& method);
     LIR* AddWordData(LIR* *constant_list_p, int value);
     LIR* AddWideData(LIR* *constant_list_p, int val_lo, int val_hi);
     void ProcessSwitchTables();
@@ -647,7 +665,7 @@
     virtual void EndInvoke(CallInfo* info) {}
 
 
-    // Handle bookkeeping to convert a wide RegLocation to a narow RegLocation.  No code generated.
+    // Handle bookkeeping to convert a wide RegLocation to a narrow RegLocation.  No code generated.
     RegLocation NarrowRegLoc(RegLocation loc);
 
     // Shared by all targets - implemented in local_optimizations.cc
@@ -669,7 +687,7 @@
     /* Mark a temp register as dead.  Does not affect allocation state. */
     void Clobber(RegStorage reg);
     void ClobberSReg(int s_reg);
-    void ClobberAliases(RegisterInfo* info);
+    void ClobberAliases(RegisterInfo* info, uint32_t clobber_mask);
     int SRegToPMap(int s_reg);
     void RecordCorePromotion(RegStorage reg, int s_reg);
     RegStorage AllocPreservedCoreReg(int s_reg);
@@ -681,6 +699,7 @@
     virtual RegStorage AllocFreeTemp();
     virtual RegStorage AllocTemp();
     virtual RegStorage AllocTempWide();
+    virtual RegStorage AllocTempWord();
     virtual RegStorage AllocTempSingle();
     virtual RegStorage AllocTempDouble();
     virtual RegStorage AllocTypedTemp(bool fp_hint, int reg_class);
@@ -774,7 +793,7 @@
                              RegLocation rl_src2, LIR* taken, LIR* fall_through);
     void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src,
                                  LIR* taken, LIR* fall_through);
-    void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
+    virtual void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
     void GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
                          RegLocation rl_src);
     void GenNewArray(uint32_t type_idx, RegLocation rl_dest,
@@ -799,7 +818,7 @@
     void GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src);
     void GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest,
                       RegLocation rl_src1, RegLocation rl_src2);
-    void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
+    virtual void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
                         RegLocation rl_src1, RegLocation rl_shift);
     void GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest,
                           RegLocation rl_src, int lit);
@@ -1169,6 +1188,7 @@
     virtual bool GenInlinedSqrt(CallInfo* info) = 0;
     virtual bool GenInlinedPeek(CallInfo* info, OpSize size) = 0;
     virtual bool GenInlinedPoke(CallInfo* info, OpSize size) = 0;
+    virtual void GenNotLong(RegLocation rl_dest, RegLocation rl_src) = 0;
     virtual void GenNegLong(RegLocation rl_dest, RegLocation rl_src) = 0;
     virtual void GenOrLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
                            RegLocation rl_src2) = 0;
@@ -1176,6 +1196,8 @@
                             RegLocation rl_src2) = 0;
     virtual void GenXorLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
                             RegLocation rl_src2) = 0;
+    virtual void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2, bool is_div) = 0;
     virtual RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi,
                                   bool is_div) = 0;
     virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit,
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 2c51c1f..058b89c 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -173,22 +173,26 @@
       }
       ClobberBody(info);
       if (info->IsAliased()) {
-        ClobberAliases(info);
+        ClobberAliases(info, info->StorageMask());
       } else {
         RegisterInfo* master = info->Master();
         if (info != master) {
           ClobberBody(info->Master());
+          ClobberAliases(info->Master(), info->StorageMask());
         }
       }
     }
   }
 }
 
-void Mir2Lir::ClobberAliases(RegisterInfo* info) {
+void Mir2Lir::ClobberAliases(RegisterInfo* info, uint32_t clobber_mask) {
   for (RegisterInfo* alias = info->GetAliasChain(); alias != nullptr;
        alias = alias->GetAliasChain()) {
     DCHECK(!alias->IsAliased());  // Only the master should be marked as alised.
-    ClobberBody(alias);
+    // Only clobber if we have overlap.
+    if ((alias->StorageMask() & clobber_mask) != 0) {
+      ClobberBody(alias);
+    }
   }
 }
 
@@ -218,7 +222,7 @@
         }
         ClobberBody(info);
         if (info->IsAliased()) {
-          ClobberAliases(info);
+          ClobberAliases(info, info->StorageMask());
         }
       }
     }
@@ -402,6 +406,19 @@
   return res;
 }
 
+RegStorage Mir2Lir::AllocTempWord() {
+  // FIXME: temporary workaround.  For bring-up purposes, x86_64 needs the ability
+  // to allocate wide values as a pair of core registers.  However, we can't hold
+  // a reference in a register pair.  This workaround will be removed when the
+  // reference handling code is reworked, or x86_64 backend starts using wide core
+  // registers - whichever happens first.
+  if (cu_->instruction_set == kX86_64) {
+    return AllocTemp();
+  } else {
+    return (Is64BitInstructionSet(cu_->instruction_set)) ? AllocTempWide() : AllocTemp();
+  }
+}
+
 RegStorage Mir2Lir::AllocTempSingle() {
   RegStorage res = AllocTempBody(reg_pool_->sp_regs_, &reg_pool_->next_sp_reg_, true);
   DCHECK(res.IsSingle()) << "Reg: 0x" << std::hex << res.GetRawBits();
@@ -447,8 +464,11 @@
     reg = FindLiveReg(wide ? reg_pool_->dp_regs_ : reg_pool_->sp_regs_, s_reg);
   }
   if (!reg.Valid() && (reg_class != kFPReg)) {
-    // TODO: add 64-bit core pool similar to above.
-    reg = FindLiveReg(reg_pool_->core_regs_, s_reg);
+    if (Is64BitInstructionSet(cu_->instruction_set)) {
+      reg = FindLiveReg(wide ? reg_pool_->core64_regs_ : reg_pool_->core_regs_, s_reg);
+    } else {
+      reg = FindLiveReg(reg_pool_->core_regs_, s_reg);
+    }
   }
   if (reg.Valid()) {
     if (wide && !reg.IsFloat() && !Is64BitInstructionSet(cu_->instruction_set)) {
@@ -950,11 +970,8 @@
         // If I'm live, master should not be live, but should show liveness in alias set.
         DCHECK_EQ(info->Master()->SReg(), INVALID_SREG);
         DCHECK(!info->Master()->IsDead());
-      } else if (!info->IsDead()) {
-        // If I'm not live, but there is liveness in the set master must be live.
-        DCHECK_EQ(info->SReg(), INVALID_SREG);
-        DCHECK(info->Master()->IsLive());
       }
+// TODO: Add checks in !info->IsDead() case to ensure every live bit is owned by exactly 1 reg.
     }
     if (info->IsAliased()) {
       // Has child aliases.
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 9200106..39a0365 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -63,27 +63,36 @@
 { kX86 ## opname ## 16TI8, kThreadImm, mem_use | IS_BINARY_OP   |                        SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "16TI8", "fs:[!0d],!1d" }, \
   \
 { kX86 ## opname ## 32MR,  kMemReg,    mem_use | IS_TERTIARY_OP |           REG_USE02  | SETS_CCODES | uses_ccodes, { 0,             0, rm32_r32, 0, 0, 0,              0,        0 }, #opname "32MR", "[!0r+!1d],!2r" }, \
-{ kX86 ## opname ## 64MR,  kMemReg64,  mem_use | IS_TERTIARY_OP |           REG_USE02  | SETS_CCODES | uses_ccodes, { REX_W,         0, rm32_r32, 0, 0, 0,              0,        0 }, #opname "64MR", "[!0r+!1d],!2r" }, \
 { kX86 ## opname ## 32AR,  kArrayReg,  mem_use | IS_QUIN_OP     |           REG_USE014 | SETS_CCODES | uses_ccodes, { 0,             0, rm32_r32, 0, 0, 0,              0,        0 }, #opname "32AR", "[!0r+!1r<<!2d+!3d],!4r" }, \
-{ kX86 ## opname ## 64AR,  kArrayReg64, mem_use | IS_QUIN_OP     |           REG_USE014 | SETS_CCODES | uses_ccodes, { REX_W,         0, rm32_r32, 0, 0, 0,              0,        0 }, #opname "64AR", "[!0r+!1r<<!2d+!3d],!4r" }, \
 { kX86 ## opname ## 32TR,  kThreadReg, mem_use | IS_BINARY_OP   |           REG_USE1   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm32_r32, 0, 0, 0,              0,        0 }, #opname "32TR", "fs:[!0d],!1r" }, \
 { kX86 ## opname ## 32RR,  kRegReg,              IS_BINARY_OP   | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, r32_rm32, 0, 0, 0,              0,        0 }, #opname "32RR", "!0r,!1r" }, \
 { kX86 ## opname ## 32RM,  kRegMem,    IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, r32_rm32, 0, 0, 0,              0,        0 }, #opname "32RM", "!0r,[!1r+!2d]" }, \
-{ kX86 ## opname ## 64RM,  kRegMem,    IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { REX_W,         0, r32_rm32, 0, 0, 0,              0,        0 }, #opname "64RM", "!0r,[!1r+!2d]" }, \
 { kX86 ## opname ## 32RA,  kRegArray,  IS_LOAD | IS_QUIN_OP     | reg_def | REG_USE012 | SETS_CCODES | uses_ccodes, { 0,             0, r32_rm32, 0, 0, 0,              0,        0 }, #opname "32RA", "!0r,[!1r+!2r<<!3d+!4d]" }, \
-{ kX86 ## opname ## 64RA,  kRegArray,  IS_LOAD | IS_QUIN_OP     | reg_def | REG_USE012 | SETS_CCODES | uses_ccodes, { REX_W,         0, r32_rm32, 0, 0, 0,              0,        0 }, #opname "64RA", "!0r,[!1r+!2r<<!3d+!4d]" }, \
 { kX86 ## opname ## 32RT,  kRegThread, IS_LOAD | IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, r32_rm32, 0, 0, 0,              0,        0 }, #opname "32RT", "!0r,fs:[!1d]" }, \
-{ kX86 ## opname ## 64RT,  kReg64Thread, IS_LOAD | IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, REX_W, r32_rm32, 0, 0, 0,              0,        0 }, #opname "64RT", "!0r,fs:[!1d]" }, \
 { kX86 ## opname ## 32RI,  kRegImm,              IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { 0,             0, rm32_i32, 0, 0, rm32_i32_modrm, ax32_i32, 4 }, #opname "32RI", "!0r,!1d" }, \
-{ kX86 ## opname ## 64RI,  kReg64Imm,            IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { REX_W,         0, rm32_i32, 0, 0, rm32_i32_modrm, ax32_i32, 4 }, #opname "32RI", "!0r,!1d" }, \
 { kX86 ## opname ## 32MI,  kMemImm,    mem_use | IS_TERTIARY_OP |           REG_USE0   | SETS_CCODES | uses_ccodes, { 0,             0, rm32_i32, 0, 0, rm32_i32_modrm, 0,        4 }, #opname "32MI", "[!0r+!1d],!2d" }, \
 { kX86 ## opname ## 32AI,  kArrayImm,  mem_use | IS_QUIN_OP     |           REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, rm32_i32, 0, 0, rm32_i32_modrm, 0,        4 }, #opname "32AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
 { kX86 ## opname ## 32TI,  kThreadImm, mem_use | IS_BINARY_OP   |                        SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm32_i32, 0, 0, rm32_i32_modrm, 0,        4 }, #opname "32TI", "fs:[!0d],!1d" }, \
 { kX86 ## opname ## 32RI8, kRegImm,              IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { 0,             0, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "32RI8", "!0r,!1d" }, \
-{ kX86 ## opname ## 64RI8, kReg64Imm,            IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { REX_W,         0, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "64RI8", "!0r,!1d" }, \
 { kX86 ## opname ## 32MI8, kMemImm,    mem_use | IS_TERTIARY_OP |           REG_USE0   | SETS_CCODES | uses_ccodes, { 0,             0, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "32MI8", "[!0r+!1d],!2d" }, \
 { kX86 ## opname ## 32AI8, kArrayImm,  mem_use | IS_QUIN_OP     |           REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "32AI8", "[!0r+!1r<<!2d+!3d],!4d" }, \
-{ kX86 ## opname ## 32TI8, kThreadImm, mem_use | IS_BINARY_OP   |                        SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "32TI8", "fs:[!0d],!1d" }
+{ kX86 ## opname ## 32TI8, kThreadImm, mem_use | IS_BINARY_OP   |                        SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "32TI8", "fs:[!0d],!1d" }, \
+  \
+{ kX86 ## opname ## 64MR,  kMemReg,    mem_use | IS_TERTIARY_OP |           REG_USE02  | SETS_CCODES | uses_ccodes, { REX_W,             0, rm32_r32, 0, 0, 0,              0,        0 }, #opname "64MR", "[!0r+!1d],!2r" }, \
+{ kX86 ## opname ## 64AR,  kArrayReg,  mem_use | IS_QUIN_OP     |           REG_USE014 | SETS_CCODES | uses_ccodes, { REX_W,             0, rm32_r32, 0, 0, 0,              0,        0 }, #opname "64AR", "[!0r+!1r<<!2d+!3d],!4r" }, \
+{ kX86 ## opname ## 64TR,  kThreadReg, mem_use | IS_BINARY_OP   |           REG_USE1   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, REX_W, rm32_r32, 0, 0, 0,              0,        0 }, #opname "64TR", "fs:[!0d],!1r" }, \
+{ kX86 ## opname ## 64RR,  kRegReg,              IS_BINARY_OP   | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { REX_W,             0, r32_rm32, 0, 0, 0,              0,        0 }, #opname "64RR", "!0r,!1r" }, \
+{ kX86 ## opname ## 64RM,  kRegMem,    IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { REX_W,             0, r32_rm32, 0, 0, 0,              0,        0 }, #opname "64RM", "!0r,[!1r+!2d]" }, \
+{ kX86 ## opname ## 64RA,  kRegArray,  IS_LOAD | IS_QUIN_OP     | reg_def | REG_USE012 | SETS_CCODES | uses_ccodes, { REX_W,             0, r32_rm32, 0, 0, 0,              0,        0 }, #opname "64RA", "!0r,[!1r+!2r<<!3d+!4d]" }, \
+{ kX86 ## opname ## 64RT,  kRegThread, IS_LOAD | IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, REX_W, r32_rm32, 0, 0, 0,              0,        0 }, #opname "64RT", "!0r,fs:[!1d]" }, \
+{ kX86 ## opname ## 64RI,  kRegImm,              IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { REX_W,             0, rm32_i32, 0, 0, rm32_i32_modrm, ax32_i32, 4 }, #opname "64RI", "!0r,!1d" }, \
+{ kX86 ## opname ## 64MI,  kMemImm,    mem_use | IS_TERTIARY_OP |           REG_USE0   | SETS_CCODES | uses_ccodes, { REX_W,             0, rm32_i32, 0, 0, rm32_i32_modrm, 0,        4 }, #opname "64MI", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 64AI,  kArrayImm,  mem_use | IS_QUIN_OP     |           REG_USE01  | SETS_CCODES | uses_ccodes, { REX_W,             0, rm32_i32, 0, 0, rm32_i32_modrm, 0,        4 }, #opname "64AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 64TI,  kThreadImm, mem_use | IS_BINARY_OP   |                        SETS_CCODES | uses_ccodes, { THREAD_PREFIX, REX_W, rm32_i32, 0, 0, rm32_i32_modrm, 0,        4 }, #opname "64TI", "fs:[!0d],!1d" }, \
+{ kX86 ## opname ## 64RI8, kRegImm,              IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { REX_W,             0, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "64RI8", "!0r,!1d" }, \
+{ kX86 ## opname ## 64MI8, kMemImm,    mem_use | IS_TERTIARY_OP |           REG_USE0   | SETS_CCODES | uses_ccodes, { REX_W,             0, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "64MI8", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 64AI8, kArrayImm,  mem_use | IS_QUIN_OP     |           REG_USE01  | SETS_CCODES | uses_ccodes, { REX_W,             0, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "64AI8", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 64TI8, kThreadImm, mem_use | IS_BINARY_OP   |                        SETS_CCODES | uses_ccodes, { THREAD_PREFIX, REX_W, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "64TI8", "fs:[!0d],!1d" }
 
 ENCODING_MAP(Add, IS_LOAD | IS_STORE, REG_DEF0, 0,
   0x00 /* RegMem8/Reg8 */,     0x01 /* RegMem32/Reg32 */,
@@ -146,6 +155,13 @@
   { kX86Imul32RMI8,  kRegMemImm,   IS_LOAD | IS_QUAD_OP     | REG_DEF0_USE1  | SETS_CCODES, { 0, 0, 0x6B, 0, 0, 0, 0, 1 }, "Imul32RMI8", "!0r,[!1r+!2d],!3d" },
   { kX86Imul32RAI8,  kRegArrayImm, IS_LOAD | IS_SEXTUPLE_OP | REG_DEF0_USE12 | SETS_CCODES, { 0, 0, 0x6B, 0, 0, 0, 0, 1 }, "Imul32RAI8", "!0r,[!1r+!2r<<!3d+!4d],!5d" },
 
+  { kX86Imul64RRI,   kRegRegImm,             IS_TERTIARY_OP | REG_DEF0_USE1  | SETS_CCODES, { REX_W, 0, 0x69, 0, 0, 0, 0, 8 }, "Imul64RRI", "!0r,!1r,!2d" },
+  { kX86Imul64RMI,   kRegMemImm,   IS_LOAD | IS_QUAD_OP     | REG_DEF0_USE1  | SETS_CCODES, { REX_W, 0, 0x69, 0, 0, 0, 0, 8 }, "Imul64RMI", "!0r,[!1r+!2d],!3d" },
+  { kX86Imul64RAI,   kRegArrayImm, IS_LOAD | IS_SEXTUPLE_OP | REG_DEF0_USE12 | SETS_CCODES, { REX_W, 0, 0x69, 0, 0, 0, 0, 8 }, "Imul64RAI", "!0r,[!1r+!2r<<!3d+!4d],!5d" },
+  { kX86Imul64RRI8,  kRegRegImm,             IS_TERTIARY_OP | REG_DEF0_USE1  | SETS_CCODES, { REX_W, 0, 0x6B, 0, 0, 0, 0, 1 }, "Imul64RRI8", "!0r,!1r,!2d" },
+  { kX86Imul64RMI8,  kRegMemImm,   IS_LOAD | IS_QUAD_OP     | REG_DEF0_USE1  | SETS_CCODES, { REX_W, 0, 0x6B, 0, 0, 0, 0, 1 }, "Imul64RMI8", "!0r,[!1r+!2d],!3d" },
+  { kX86Imul64RAI8,  kRegArrayImm, IS_LOAD | IS_SEXTUPLE_OP | REG_DEF0_USE12 | SETS_CCODES, { REX_W, 0, 0x6B, 0, 0, 0, 0, 1 }, "Imul64RAI8", "!0r,[!1r+!2r<<!3d+!4d],!5d" },
+
   { kX86Mov8MR, kMemReg,    IS_STORE | IS_TERTIARY_OP | REG_USE02,      { 0,             0, 0x88, 0, 0, 0, 0, 0 }, "Mov8MR", "[!0r+!1d],!2r" },
   { kX86Mov8AR, kArrayReg,  IS_STORE | IS_QUIN_OP     | REG_USE014,     { 0,             0, 0x88, 0, 0, 0, 0, 0 }, "Mov8AR", "[!0r+!1r<<!2d+!3d],!4r" },
   { kX86Mov8TR, kThreadReg, IS_STORE | IS_BINARY_OP   | REG_USE1,       { THREAD_PREFIX, 0, 0x88, 0, 0, 0, 0, 0 }, "Mov8TR", "fs:[!0d],!1r" },
@@ -171,30 +187,42 @@
   { kX86Mov16TI, kThreadImm, IS_STORE | IS_BINARY_OP,                    { THREAD_PREFIX, 0x66, 0xC7, 0, 0, 0, 0, 2 }, "Mov16TI", "fs:[!0d],!1d" },
 
   { kX86Mov32MR, kMemReg,    IS_STORE | IS_TERTIARY_OP | REG_USE02,      { 0,             0, 0x89, 0, 0, 0, 0, 0 }, "Mov32MR", "[!0r+!1d],!2r" },
-  { kX86Mov64MR, kMemReg64,  IS_STORE | IS_TERTIARY_OP | REG_USE02,      { REX_W,         0, 0x89, 0, 0, 0, 0, 0 }, "Mov64MR", "[!0r+!1d],!2r" },
   { kX86Mov32AR, kArrayReg,  IS_STORE | IS_QUIN_OP     | REG_USE014,     { 0,             0, 0x89, 0, 0, 0, 0, 0 }, "Mov32AR", "[!0r+!1r<<!2d+!3d],!4r" },
-  { kX86Mov64AR, kArrayReg64, IS_STORE | IS_QUIN_OP     | REG_USE014,     { REX_W,        0, 0x89, 0, 0, 0, 0, 0 }, "Mov64AR", "[!0r+!1r<<!2d+!3d],!4r" },
   { kX86Mov32TR, kThreadReg, IS_STORE | IS_BINARY_OP   | REG_USE1,       { THREAD_PREFIX, 0, 0x89, 0, 0, 0, 0, 0 }, "Mov32TR", "fs:[!0d],!1r" },
   { kX86Mov32RR, kRegReg,               IS_BINARY_OP   | REG_DEF0_USE1,  { 0,             0, 0x8B, 0, 0, 0, 0, 0 }, "Mov32RR", "!0r,!1r" },
   { kX86Mov32RM, kRegMem,    IS_LOAD  | IS_TERTIARY_OP | REG_DEF0_USE1,  { 0,             0, 0x8B, 0, 0, 0, 0, 0 }, "Mov32RM", "!0r,[!1r+!2d]" },
-  { kX86Mov64RM, kRegMem,    IS_LOAD  | IS_TERTIARY_OP | REG_DEF0_USE1,  { REX_W,         0, 0x8B, 0, 0, 0, 0, 0 }, "Mov64RM", "!0r,[!1r+!2d]" },
   { kX86Mov32RA, kRegArray,  IS_LOAD  | IS_QUIN_OP     | REG_DEF0_USE12, { 0,             0, 0x8B, 0, 0, 0, 0, 0 }, "Mov32RA", "!0r,[!1r+!2r<<!3d+!4d]" },
-  { kX86Mov64RA, kRegArray,  IS_LOAD  | IS_QUIN_OP     | REG_DEF0_USE12, { REX_W,         0, 0x8B, 0, 0, 0, 0, 0 }, "Mov64RA", "!0r,[!1r+!2r<<!3d+!4d]" },
   { kX86Mov32RT, kRegThread, IS_LOAD  | IS_BINARY_OP   | REG_DEF0,       { THREAD_PREFIX, 0, 0x8B, 0, 0, 0, 0, 0 }, "Mov32RT", "!0r,fs:[!1d]" },
-  { kX86Mov64RT, kRegThread, IS_LOAD  | IS_BINARY_OP   | REG_DEF0,       { THREAD_PREFIX, REX_W, 0x8B, 0, 0, 0, 0, 0 }, "Mov64RT", "!0r,fs:[!1d]" },
   { kX86Mov32RI, kMovRegImm,            IS_BINARY_OP   | REG_DEF0,       { 0,             0, 0xB8, 0, 0, 0, 0, 4 }, "Mov32RI", "!0r,!1d" },
   { kX86Mov32MI, kMemImm,    IS_STORE | IS_TERTIARY_OP | REG_USE0,       { 0,             0, 0xC7, 0, 0, 0, 0, 4 }, "Mov32MI", "[!0r+!1d],!2d" },
   { kX86Mov32AI, kArrayImm,  IS_STORE | IS_QUIN_OP     | REG_USE01,      { 0,             0, 0xC7, 0, 0, 0, 0, 4 }, "Mov32AI", "[!0r+!1r<<!2d+!3d],!4d" },
   { kX86Mov32TI, kThreadImm, IS_STORE | IS_BINARY_OP,                    { THREAD_PREFIX, 0, 0xC7, 0, 0, 0, 0, 4 }, "Mov32TI", "fs:[!0d],!1d" },
-  { kX86Mov64TI, kThreadImm, IS_STORE | IS_BINARY_OP,                    { THREAD_PREFIX, REX_W, 0xC7, 0, 0, 0, 0, 4 }, "Mov64TI", "fs:[!0d],!1d" },
 
-  { kX86Lea32RM, kRegMem, IS_TERTIARY_OP | IS_LOAD | REG_DEF0_USE1,      { 0, 0, 0x8D, 0, 0, 0, 0, 0 }, "Lea32RM", "!0r,[!1r+!2d]" },
+  { kX86Lea32RM, kRegMem, IS_TERTIARY_OP | IS_LOAD | REG_DEF0_USE1,      { 0,             0, 0x8D, 0, 0, 0, 0, 0 }, "Lea32RM", "!0r,[!1r+!2d]" },
 
-  { kX86Lea32RA, kRegArray, IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8D, 0, 0, 0, 0, 0 }, "Lea32RA", "!0r,[!1r+!2r<<!3d+!4d]" },
+  { kX86Lea32RA, kRegArray, IS_QUIN_OP | REG_DEF0_USE12,                 { 0,             0, 0x8D, 0, 0, 0, 0, 0 }, "Lea32RA", "!0r,[!1r+!2r<<!3d+!4d]" },
 
-  { kX86Cmov32RRC, kRegRegCond, IS_TERTIARY_OP | REG_DEF0_USE01 | USES_CCODES, {0, 0, 0x0F, 0x40, 0, 0, 0, 0}, "Cmovcc32RR", "!2c !0r,!1r" },
+  { kX86Mov64MR, kMemReg,    IS_STORE | IS_TERTIARY_OP | REG_USE02,      { REX_W,             0, 0x89, 0, 0, 0, 0, 0 }, "Mov64MR", "[!0r+!1d],!2r" },
+  { kX86Mov64AR, kArrayReg,  IS_STORE | IS_QUIN_OP     | REG_USE014,     { REX_W,             0, 0x89, 0, 0, 0, 0, 0 }, "Mov64AR", "[!0r+!1r<<!2d+!3d],!4r" },
+  { kX86Mov64TR, kThreadReg, IS_STORE | IS_BINARY_OP   | REG_USE1,       { THREAD_PREFIX, REX_W, 0x89, 0, 0, 0, 0, 0 }, "Mov64TR", "fs:[!0d],!1r" },
+  { kX86Mov64RR, kRegReg,               IS_BINARY_OP   | REG_DEF0_USE1,  { REX_W,             0, 0x8B, 0, 0, 0, 0, 0 }, "Mov64RR", "!0r,!1r" },
+  { kX86Mov64RM, kRegMem,    IS_LOAD  | IS_TERTIARY_OP | REG_DEF0_USE1,  { REX_W,             0, 0x8B, 0, 0, 0, 0, 0 }, "Mov64RM", "!0r,[!1r+!2d]" },
+  { kX86Mov64RA, kRegArray,  IS_LOAD  | IS_QUIN_OP     | REG_DEF0_USE12, { REX_W,             0, 0x8B, 0, 0, 0, 0, 0 }, "Mov64RA", "!0r,[!1r+!2r<<!3d+!4d]" },
+  { kX86Mov64RT, kRegThread, IS_LOAD  | IS_BINARY_OP   | REG_DEF0,       { THREAD_PREFIX, REX_W, 0x8B, 0, 0, 0, 0, 0 }, "Mov64RT", "!0r,fs:[!1d]" },
+  { kX86Mov64RI, kMovRegImm,            IS_BINARY_OP   | REG_DEF0,       { REX_W,             0, 0xB8, 0, 0, 0, 0, 8 }, "Mov64RI", "!0r,!1d" },
+  { kX86Mov64MI, kMemImm,    IS_STORE | IS_TERTIARY_OP | REG_USE0,       { REX_W,             0, 0xC7, 0, 0, 0, 0, 8 }, "Mov64MI", "[!0r+!1d],!2d" },
+  { kX86Mov64AI, kArrayImm,  IS_STORE | IS_QUIN_OP     | REG_USE01,      { REX_W,             0, 0xC7, 0, 0, 0, 0, 8 }, "Mov64AI", "[!0r+!1r<<!2d+!3d],!4d" },
+  { kX86Mov64TI, kThreadImm, IS_STORE | IS_BINARY_OP,                    { THREAD_PREFIX, REX_W, 0xC7, 0, 0, 0, 0, 8 }, "Mov64TI", "fs:[!0d],!1d" },
 
-  { kX86Cmov32RMC, kRegMemCond, IS_QUAD_OP | IS_LOAD | REG_DEF0_USE01 | USES_CCODES, {0, 0, 0x0F, 0x40, 0, 0, 0, 0}, "Cmovcc32RM", "!3c !0r,[!1r+!2d]" },
+  { kX86Lea64RM, kRegMem, IS_TERTIARY_OP | IS_LOAD | REG_DEF0_USE1,      { REX_W,             0, 0x8D, 0, 0, 0, 0, 0 }, "Lea64RM", "!0r,[!1r+!2d]" },
+
+  { kX86Lea64RA, kRegArray, IS_QUIN_OP | REG_DEF0_USE12,                 { REX_W,             0, 0x8D, 0, 0, 0, 0, 0 }, "Lea64RA", "!0r,[!1r+!2r<<!3d+!4d]" },
+
+  { kX86Cmov32RRC, kRegRegCond, IS_TERTIARY_OP | REG_DEF0_USE01 | USES_CCODES, {0,     0, 0x0F, 0x40, 0, 0, 0, 0}, "Cmovcc32RR", "!2c !0r,!1r" },
+  { kX86Cmov64RRC, kRegRegCond, IS_TERTIARY_OP | REG_DEF0_USE01 | USES_CCODES, {REX_W, 0, 0x0F, 0x40, 0, 0, 0, 0}, "Cmovcc64RR", "!2c !0r,!1r" },
+
+  { kX86Cmov32RMC, kRegMemCond, IS_QUAD_OP | IS_LOAD | REG_DEF0_USE01 | USES_CCODES, {0,     0, 0x0F, 0x40, 0, 0, 0, 0}, "Cmovcc32RM", "!3c !0r,[!1r+!2d]" },
+  { kX86Cmov64RMC, kRegMemCond, IS_QUAD_OP | IS_LOAD | REG_DEF0_USE01 | USES_CCODES, {REX_W, 0, 0x0F, 0x40, 0, 0, 0, 0}, "Cmovcc64RM", "!3c !0r,[!1r+!2d]" },
 
 #define SHIFT_ENCODING_MAP(opname, modrm_opcode) \
 { kX86 ## opname ## 8RI, kShiftRegImm,                        IS_BINARY_OP   | REG_DEF0_USE0 |            SETS_CCODES, { 0,    0, 0xC0, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "8RI", "!0r,!1d" }, \
@@ -216,7 +244,14 @@
 { kX86 ## opname ## 32AI, kShiftArrayImm, IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     |            SETS_CCODES, { 0,    0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "32AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
 { kX86 ## opname ## 32RC, kShiftRegCl,                         IS_BINARY_OP   | REG_DEF0_USE0 | REG_USEC | SETS_CCODES, { 0,    0, 0xD3, 0, 0, modrm_opcode, 0,    0 }, #opname "32RC", "!0r,cl" }, \
 { kX86 ## opname ## 32MC, kShiftMemCl,    IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0      | REG_USEC | SETS_CCODES, { 0,    0, 0xD3, 0, 0, modrm_opcode, 0,    0 }, #opname "32MC", "[!0r+!1d],cl" }, \
-{ kX86 ## opname ## 32AC, kShiftArrayCl,  IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     | REG_USEC | SETS_CCODES, { 0,    0, 0xD3, 0, 0, modrm_opcode, 0,    0 }, #opname "32AC", "[!0r+!1r<<!2d+!3d],cl" }
+{ kX86 ## opname ## 32AC, kShiftArrayCl,  IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     | REG_USEC | SETS_CCODES, { 0,    0, 0xD3, 0, 0, modrm_opcode, 0,    0 }, #opname "32AC", "[!0r+!1r<<!2d+!3d],cl" }, \
+  \
+{ kX86 ## opname ## 64RI, kShiftRegImm,                        IS_BINARY_OP   | REG_DEF0_USE0 |            SETS_CCODES, { REX_W,    0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "64RI", "!0r,!1d" }, \
+{ kX86 ## opname ## 64MI, kShiftMemImm,   IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0      |            SETS_CCODES, { REX_W,    0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "64MI", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 64AI, kShiftArrayImm, IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     |            SETS_CCODES, { REX_W,    0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "64AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 64RC, kShiftRegCl,                         IS_BINARY_OP   | REG_DEF0_USE0 | REG_USEC | SETS_CCODES, { REX_W,    0, 0xD3, 0, 0, modrm_opcode, 0,    0 }, #opname "64RC", "!0r,cl" }, \
+{ kX86 ## opname ## 64MC, kShiftMemCl,    IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0      | REG_USEC | SETS_CCODES, { REX_W,    0, 0xD3, 0, 0, modrm_opcode, 0,    0 }, #opname "64MC", "[!0r+!1d],cl" }, \
+{ kX86 ## opname ## 64AC, kShiftArrayCl,  IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     | REG_USEC | SETS_CCODES, { REX_W,    0, 0xD3, 0, 0, modrm_opcode, 0,    0 }, #opname "64AC", "[!0r+!1r<<!2d+!3d],cl" }
 
   SHIFT_ENCODING_MAP(Rol, 0x0),
   SHIFT_ENCODING_MAP(Ror, 0x1),
@@ -232,6 +267,10 @@
   { kX86Shld32MRI,  kMemRegImm,    IS_QUAD_OP | REG_USE02 | IS_LOAD | IS_STORE | SETS_CCODES, { 0,    0, 0x0F, 0xA4, 0, 0, 0, 1}, "Shld32MRI", "[!0r+!1d],!2r,!3d" },
   { kX86Shrd32RRI,  kRegRegImmRev, IS_TERTIARY_OP | REG_DEF0_USE01  | SETS_CCODES, { 0,    0, 0x0F, 0xAC, 0, 0, 0, 1}, "Shrd32RRI", "!0r,!1r,!2d" },
   { kX86Shrd32MRI,  kMemRegImm,    IS_QUAD_OP | REG_USE02 | IS_LOAD | IS_STORE | SETS_CCODES, { 0,    0, 0x0F, 0xAC, 0, 0, 0, 1}, "Shrd32MRI", "[!0r+!1d],!2r,!3d" },
+  { kX86Shld64RRI,  kRegRegImmRev, IS_TERTIARY_OP | REG_DEF0_USE01  | SETS_CCODES, { REX_W,    0, 0x0F, 0xA4, 0, 0, 0, 1}, "Shld64RRI", "!0r,!1r,!2d" },
+  { kX86Shld64MRI,  kMemRegImm,    IS_QUAD_OP | REG_USE02 | IS_LOAD | IS_STORE | SETS_CCODES, { REX_W,    0, 0x0F, 0xA4, 0, 0, 0, 1}, "Shld64MRI", "[!0r+!1d],!2r,!3d" },
+  { kX86Shrd64RRI,  kRegRegImmRev, IS_TERTIARY_OP | REG_DEF0_USE01  | SETS_CCODES, { REX_W,    0, 0x0F, 0xAC, 0, 0, 0, 1}, "Shrd64RRI", "!0r,!1r,!2d" },
+  { kX86Shrd64MRI,  kMemRegImm,    IS_QUAD_OP | REG_USE02 | IS_LOAD | IS_STORE | SETS_CCODES, { REX_W,    0, 0x0F, 0xAC, 0, 0, 0, 1}, "Shrd64MRI", "[!0r+!1d],!2r,!3d" },
 
   { kX86Test8RI,  kRegImm,             IS_BINARY_OP   | REG_USE0  | SETS_CCODES, { 0,    0, 0xF6, 0, 0, 0, 0, 1}, "Test8RI", "!0r,!1d" },
   { kX86Test8MI,  kMemImm,   IS_LOAD | IS_TERTIARY_OP | REG_USE0  | SETS_CCODES, { 0,    0, 0xF6, 0, 0, 0, 0, 1}, "Test8MI", "[!0r+!1d],!2d" },
@@ -242,7 +281,12 @@
   { kX86Test32RI, kRegImm,             IS_BINARY_OP   | REG_USE0  | SETS_CCODES, { 0,    0, 0xF7, 0, 0, 0, 0, 4}, "Test32RI", "!0r,!1d" },
   { kX86Test32MI, kMemImm,   IS_LOAD | IS_TERTIARY_OP | REG_USE0  | SETS_CCODES, { 0,    0, 0xF7, 0, 0, 0, 0, 4}, "Test32MI", "[!0r+!1d],!2d" },
   { kX86Test32AI, kArrayImm, IS_LOAD | IS_QUIN_OP     | REG_USE01 | SETS_CCODES, { 0,    0, 0xF7, 0, 0, 0, 0, 4}, "Test32AI", "[!0r+!1r<<!2d+!3d],!4d" },
+  { kX86Test64RI, kRegImm,             IS_BINARY_OP   | REG_USE0  | SETS_CCODES, { REX_W, 0, 0xF7, 0, 0, 0, 0, 8}, "Test64RI", "!0r,!1d" },
+  { kX86Test64MI, kMemImm,   IS_LOAD | IS_TERTIARY_OP | REG_USE0  | SETS_CCODES, { REX_W, 0, 0xF7, 0, 0, 0, 0, 8}, "Test64MI", "[!0r+!1d],!2d" },
+  { kX86Test64AI, kArrayImm, IS_LOAD | IS_QUIN_OP     | REG_USE01 | SETS_CCODES, { REX_W, 0, 0xF7, 0, 0, 0, 0, 8}, "Test64AI", "[!0r+!1r<<!2d+!3d],!4d" },
+
   { kX86Test32RR, kRegReg,             IS_BINARY_OP   | REG_USE01 | SETS_CCODES, { 0,    0, 0x85, 0, 0, 0, 0, 0}, "Test32RR", "!0r,!1r" },
+  { kX86Test64RR, kRegReg,             IS_BINARY_OP   | REG_USE01 | SETS_CCODES, { REX_W, 0, 0x85, 0, 0, 0, 0, 0}, "Test64RR", "!0r,!1r" },
 
 #define UNARY_ENCODING_MAP(opname, modrm, is_store, sets_ccodes, \
                            reg, reg_kind, reg_flags, \
@@ -258,7 +302,10 @@
 { kX86 ## opname ## 16 ## arr, arr_kind, IS_LOAD | is_store | arr_flags | hw_flags | sets_ccodes, { 0x66, 0, 0xF7, 0, 0, modrm, 0, imm << 1}, #opname "16" #arr, hw_format "[!0r+!1r<<!2d+!3d]" }, \
 { kX86 ## opname ## 32 ## reg, reg_kind,                      reg_flags | w_flags  | sets_ccodes, { 0,    0, 0xF7, 0, 0, modrm, 0, imm << 2}, #opname "32" #reg, w_format "!0r" }, \
 { kX86 ## opname ## 32 ## mem, mem_kind, IS_LOAD | is_store | mem_flags | w_flags  | sets_ccodes, { 0,    0, 0xF7, 0, 0, modrm, 0, imm << 2}, #opname "32" #mem, w_format "[!0r+!1d]" }, \
-{ kX86 ## opname ## 32 ## arr, arr_kind, IS_LOAD | is_store | arr_flags | w_flags  | sets_ccodes, { 0,    0, 0xF7, 0, 0, modrm, 0, imm << 2}, #opname "32" #arr, w_format "[!0r+!1r<<!2d+!3d]" }
+{ kX86 ## opname ## 32 ## arr, arr_kind, IS_LOAD | is_store | arr_flags | w_flags  | sets_ccodes, { 0,    0, 0xF7, 0, 0, modrm, 0, imm << 2}, #opname "32" #arr, w_format "[!0r+!1r<<!2d+!3d]" }, \
+{ kX86 ## opname ## 64 ## reg, reg_kind,                      reg_flags | w_flags  | sets_ccodes, { REX_W, 0, 0xF7, 0, 0, modrm, 0, imm << 2}, #opname "64" #reg, w_format "!0r" }, \
+{ kX86 ## opname ## 64 ## mem, mem_kind, IS_LOAD | is_store | mem_flags | w_flags  | sets_ccodes, { REX_W, 0, 0xF7, 0, 0, modrm, 0, imm << 2}, #opname "64" #mem, w_format "[!0r+!1d]" }, \
+{ kX86 ## opname ## 64 ## arr, arr_kind, IS_LOAD | is_store | arr_flags | w_flags  | sets_ccodes, { REX_W, 0, 0xF7, 0, 0, modrm, 0, imm << 2}, #opname "64" #arr, w_format "[!0r+!1r<<!2d+!3d]" }
 
   UNARY_ENCODING_MAP(Not, 0x2, IS_STORE, 0,           R, kReg, IS_UNARY_OP | REG_DEF0_USE0, M, kMem, IS_BINARY_OP | REG_USE0, A, kArray, IS_QUAD_OP | REG_USE01, 0, 0, 0, 0, "", "", ""),
   UNARY_ENCODING_MAP(Neg, 0x3, IS_STORE, SETS_CCODES, R, kReg, IS_UNARY_OP | REG_DEF0_USE0, M, kMem, IS_BINARY_OP | REG_USE0, A, kArray, IS_QUAD_OP | REG_USE01, 0, 0, 0, 0, "", "", ""),
@@ -279,6 +326,11 @@
 { kX86 ## opname ## RM, kRegMem,   IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE1,  { prefix, 0, 0x0F, opcode, 0, 0, 0, 0 }, #opname "RM", "!0r,[!1r+!2d]" }, \
 { kX86 ## opname ## RA, kRegArray, IS_LOAD | IS_QUIN_OP     | reg_def | REG_USE12, { prefix, 0, 0x0F, opcode, 0, 0, 0, 0 }, #opname "RA", "!0r,[!1r+!2r<<!3d+!4d]" }
 
+#define EXT_0F_ENCODING2_MAP(opname, prefix, opcode, opcode2, reg_def) \
+{ kX86 ## opname ## RR, kRegReg,             IS_BINARY_OP   | reg_def | REG_USE1,  { prefix, 0, 0x0F, opcode, opcode2, 0, 0, 0 }, #opname "RR", "!0r,!1r" }, \
+{ kX86 ## opname ## RM, kRegMem,   IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE1,  { prefix, 0, 0x0F, opcode, opcode2, 0, 0, 0 }, #opname "RM", "!0r,[!1r+!2d]" }, \
+{ kX86 ## opname ## RA, kRegArray, IS_LOAD | IS_QUIN_OP     | reg_def | REG_USE12, { prefix, 0, 0x0F, opcode, opcode2, 0, 0, 0 }, #opname "RA", "!0r,[!1r+!2r<<!3d+!4d]" }
+
   EXT_0F_ENCODING_MAP(Movsd, 0xF2, 0x10, REG_DEF0),
   { kX86MovsdMR, kMemReg,   IS_STORE | IS_TERTIARY_OP | REG_USE02,  { 0xF2, 0, 0x0F, 0x11, 0, 0, 0, 0 }, "MovsdMR", "[!0r+!1d],!2r" },
   { kX86MovsdAR, kArrayReg, IS_STORE | IS_QUIN_OP     | REG_USE014, { 0xF2, 0, 0x0F, 0x11, 0, 0, 0, 0 }, "MovsdAR", "[!0r+!1r<<!2d+!3d],!4r" },
@@ -310,10 +362,42 @@
   EXT_0F_ENCODING_MAP(Divsd,     0xF2, 0x5E, REG_DEF0_USE0),
   EXT_0F_ENCODING_MAP(Divss,     0xF3, 0x5E, REG_DEF0_USE0),
   EXT_0F_ENCODING_MAP(Punpckldq, 0x66, 0x62, REG_DEF0_USE0),
+  EXT_0F_ENCODING_MAP(Sqrtsd,    0xF2, 0x51, REG_DEF0_USE0),
+  EXT_0F_ENCODING2_MAP(Pmulld,   0x66, 0x38, 0x40, REG_DEF0_USE0),
+  EXT_0F_ENCODING_MAP(Pmullw,    0x66, 0xD5, REG_DEF0_USE0),
+  EXT_0F_ENCODING_MAP(Mulps,     0x00, 0x59, REG_DEF0_USE0),
+  EXT_0F_ENCODING_MAP(Mulpd,     0x66, 0x59, REG_DEF0_USE0),
+  EXT_0F_ENCODING_MAP(Paddb,     0x66, 0xFC, REG_DEF0_USE0),
+  EXT_0F_ENCODING_MAP(Paddw,     0x66, 0xFD, REG_DEF0_USE0),
+  EXT_0F_ENCODING_MAP(Paddd,     0x66, 0xFE, REG_DEF0_USE0),
+  EXT_0F_ENCODING_MAP(Addps,     0x00, 0x58, REG_DEF0_USE0),
+  EXT_0F_ENCODING_MAP(Addpd,     0xF2, 0x58, REG_DEF0_USE0),
+  EXT_0F_ENCODING_MAP(Psubb,     0x66, 0xF8, REG_DEF0_USE0),
+  EXT_0F_ENCODING_MAP(Psubw,     0x66, 0xF9, REG_DEF0_USE0),
+  EXT_0F_ENCODING_MAP(Psubd,     0x66, 0xFA, REG_DEF0_USE0),
+  EXT_0F_ENCODING_MAP(Subps,     0x00, 0x5C, REG_DEF0_USE0),
+  EXT_0F_ENCODING_MAP(Subpd,     0x66, 0x5C, REG_DEF0_USE0),
+  EXT_0F_ENCODING_MAP(Pand,      0x66, 0xDB, REG_DEF0_USE0),
+  EXT_0F_ENCODING_MAP(Por,       0x66, 0xEB, REG_DEF0_USE0),
+  EXT_0F_ENCODING_MAP(Pxor,      0x66, 0xEF, REG_DEF0_USE0),
+  EXT_0F_ENCODING2_MAP(Phaddw,   0x66, 0x38, 0x01, REG_DEF0_USE0),
+  EXT_0F_ENCODING2_MAP(Phaddd,   0x66, 0x38, 0x02, REG_DEF0_USE0),
 
+  { kX86PextrbRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0x3A, 0x14, 0, 0, 1 }, "PextbRRI", "!0r,!1r,!2d" },
+  { kX86PextrwRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0xC5, 0x00, 0, 0, 1 }, "PextwRRI", "!0r,!1r,!2d" },
+  { kX86PextrdRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1 }, "PextdRRI", "!0r,!1r,!2d" },
+
+  { kX86PshuflwRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0xF2, 0, 0x0F, 0x70, 0, 0, 0, 1 }, "PshuflwRRI", "!0r,!1r,!2d" },
+  { kX86PshufdRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0x70, 0, 0, 0, 1 }, "PshuffRRI", "!0r,!1r,!2d" },
+
+  { kX86PsrawRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x71, 0, 4, 0, 1 }, "PsrawRI", "!0r,!1d" },
+  { kX86PsradRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x72, 0, 4, 0, 1 }, "PsradRI", "!0r,!1d" },
+  { kX86PsrlwRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x71, 0, 2, 0, 1 }, "PsrlwRI", "!0r,!1d" },
+  { kX86PsrldRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x72, 0, 2, 0, 1 }, "PsrldRI", "!0r,!1d" },
   { kX86PsrlqRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x73, 0, 2, 0, 1 }, "PsrlqRI", "!0r,!1d" },
+  { kX86PsllwRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x71, 0, 6, 0, 1 }, "PsllwRI", "!0r,!1d" },
+  { kX86PslldRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x72, 0, 6, 0, 1 }, "PslldRI", "!0r,!1d" },
   { kX86PsllqRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x73, 0, 6, 0, 1 }, "PsllqRI", "!0r,!1d" },
-  { kX86SqrtsdRR, kRegReg, IS_BINARY_OP | REG_DEF0_USE1, { 0xF2, 0, 0x0F, 0x51, 0, 0, 0, 0 }, "SqrtsdRR", "!0r,!1r" },
 
   { kX86Fild32M, kMem, IS_LOAD | IS_UNARY_OP | REG_USE0 | USE_FP_STACK, { 0x0, 0, 0xDB, 0x00, 0, 0, 0, 0 }, "Fild32M", "[!0r,!1d]" },
   { kX86Fild64M, kMem, IS_LOAD | IS_UNARY_OP | REG_USE0 | USE_FP_STACK, { 0x0, 0, 0xDF, 0x00, 0, 5, 0, 0 }, "Fild64M", "[!0r,!1d]" },
@@ -394,7 +478,8 @@
   { kX86RepneScasw, kPrefix2Nullary, NO_OPERAND | REG_USEA | REG_USEC | SETS_CCODES, { 0x66, 0xF2, 0xAF, 0, 0, 0, 0, 0 }, "RepNE ScasW", "" },
 };
 
-size_t X86Mir2Lir::ComputeSize(const X86EncodingMap* entry, int base, int displacement, bool has_sib) {
+size_t X86Mir2Lir::ComputeSize(const X86EncodingMap* entry, int base, int displacement,
+                               int reg_r, int reg_x, bool has_sib) {
   size_t size = 0;
   if (entry->skeleton.prefix1 > 0) {
     ++size;
@@ -402,6 +487,10 @@
       ++size;
     }
   }
+  if ((NeedsRex(base) || NeedsRex(reg_r) || NeedsRex(reg_x)) &&
+       entry->skeleton.prefix1 != REX_W && entry->skeleton.prefix2 != REX_W) {
+    ++size;  // REX_R
+  }
   ++size;  // opcode
   if (entry->skeleton.opcode == 0x0F) {
     ++size;
@@ -410,13 +499,13 @@
     }
   }
   ++size;  // modrm
-  if (has_sib || RegStorage::RegNum(base) == rs_rX86_SP.GetRegNum()
+  if (has_sib || LowRegisterBits(RegStorage::RegNum(base)) == rs_rX86_SP.GetRegNum()
       || (Gen64Bit() && entry->skeleton.prefix1 == THREAD_PREFIX)) {
     // SP requires a SIB byte.
     // GS access also needs a SIB byte for absolute adressing in 64-bit mode.
     ++size;
   }
-  if (displacement != 0 || RegStorage::RegNum(base) == rs_rBP.GetRegNum()) {
+  if (displacement != 0 || LowRegisterBits(RegStorage::RegNum(base)) == rs_rBP.GetRegNum()) {
     // BP requires an explicit displacement, even when it's 0.
     if (entry->opcode != kX86Lea32RA) {
       DCHECK_NE(entry->flags & (IS_LOAD | IS_STORE), 0ULL) << entry->name;
@@ -440,38 +529,41 @@
     case kPrefix2Nullary:
       return 3;  // 1 byte of opcode + 2 prefixes
     case kRegOpcode:  // lir operands - 0: reg
-      return ComputeSize(entry, 0, 0, false) - 1;  // substract 1 for modrm
-    case kReg64:
+      // substract 1 for modrm
+      return ComputeSize(entry, 0, 0, lir->operands[0], NO_REG, false) - 1;
     case kReg:  // lir operands - 0: reg
-      return ComputeSize(entry, 0, 0, false);
+      return ComputeSize(entry, 0, 0, lir->operands[0], NO_REG, false);
     case kMem:  // lir operands - 0: base, 1: disp
-      return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
+      return ComputeSize(entry, lir->operands[0], lir->operands[1], NO_REG, NO_REG, false);
     case kArray:  // lir operands - 0: base, 1: index, 2: scale, 3: disp
-      return ComputeSize(entry, lir->operands[0], lir->operands[3], true);
-    case kMemReg64:
+      return ComputeSize(entry, lir->operands[0], lir->operands[3],
+                         NO_REG, lir->operands[1], true);
     case kMemReg:  // lir operands - 0: base, 1: disp, 2: reg
-      return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
+      return ComputeSize(entry, lir->operands[0], lir->operands[1],
+                         lir->operands[2], NO_REG, false);
     case kMemRegImm:  // lir operands - 0: base, 1: disp, 2: reg 3: immediate
-      return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
-    case kArrayReg64:
+      return ComputeSize(entry, lir->operands[0], lir->operands[1],
+                         lir->operands[2], NO_REG, false);
     case kArrayReg:  // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
-      return ComputeSize(entry, lir->operands[0], lir->operands[3], true);
+      return ComputeSize(entry, lir->operands[0], lir->operands[3],
+                         lir->operands[4], lir->operands[1], true);
     case kThreadReg:  // lir operands - 0: disp, 1: reg
-      return ComputeSize(entry, 0, lir->operands[0], false);
-    case kRegReg:
-      return ComputeSize(entry, 0, 0, false);
-    case kRegRegStore:
-      return ComputeSize(entry, 0, 0, false);
+      return ComputeSize(entry, 0, lir->operands[0], lir->operands[1], NO_REG, false);
+    case kRegReg:  // lir operands - 0: reg1, 1: reg2
+      return ComputeSize(entry, 0, 0, lir->operands[0], lir->operands[1], false);
+    case kRegRegStore:  // lir operands - 0: reg2, 1: reg1
+      return ComputeSize(entry, 0, 0, lir->operands[1], lir->operands[0], false);
     case kRegMem:  // lir operands - 0: reg, 1: base, 2: disp
-      return ComputeSize(entry, lir->operands[1], lir->operands[2], false);
+      return ComputeSize(entry, lir->operands[1], lir->operands[2],
+                         lir->operands[0], NO_REG, false);
     case kRegArray:   // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
-      return ComputeSize(entry, lir->operands[1], lir->operands[4], true);
-    case kReg64Thread:  // lir operands - 0: reg, 1: disp
+      return ComputeSize(entry, lir->operands[1], lir->operands[4],
+                         lir->operands[0], lir->operands[2], true);
     case kRegThread:  // lir operands - 0: reg, 1: disp
-      return ComputeSize(entry, 0, 0x12345678, false);  // displacement size is always 32bit
-    case kReg64Imm:
+      // displacement size is always 32bit
+      return ComputeSize(entry, 0, 0x12345678, lir->operands[0], NO_REG, false);
     case kRegImm: {  // lir operands - 0: reg, 1: immediate
-      size_t size = ComputeSize(entry, 0, 0, false);
+      size_t size = ComputeSize(entry, 0, 0, lir->operands[0], NO_REG, false);
       if (entry->skeleton.ax_opcode == 0) {
         return size;
       } else {
@@ -481,47 +573,58 @@
       }
     }
     case kMemImm:  // lir operands - 0: base, 1: disp, 2: immediate
-      return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
+      return ComputeSize(entry, lir->operands[0], lir->operands[1],
+                         NO_REG, lir->operands[0], false);
     case kArrayImm:  // lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
-      return ComputeSize(entry, lir->operands[0], lir->operands[3], true);
+      return ComputeSize(entry, lir->operands[0], lir->operands[3],
+                         NO_REG, lir->operands[1], true);
     case kThreadImm:  // lir operands - 0: disp, 1: imm
-      return ComputeSize(entry, 0, 0x12345678, false);  // displacement size is always 32bit
+      // displacement size is always 32bit
+      return ComputeSize(entry, 0, 0x12345678, NO_REG, NO_REG, false);
     case kRegRegImm:  // lir operands - 0: reg, 1: reg, 2: imm
     case kRegRegImmRev:
-      return ComputeSize(entry, 0, 0, false);
+      return ComputeSize(entry, 0, 0, lir->operands[0], lir->operands[1], false);
     case kRegMemImm:  // lir operands - 0: reg, 1: base, 2: disp, 3: imm
-      return ComputeSize(entry, lir->operands[1], lir->operands[2], false);
+      return ComputeSize(entry, lir->operands[1], lir->operands[2],
+                         lir->operands[0], NO_REG, false);
     case kRegArrayImm:  // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp, 5: imm
-      return ComputeSize(entry, lir->operands[1], lir->operands[4], true);
+      return ComputeSize(entry, lir->operands[1], lir->operands[4],
+                         lir->operands[0], lir->operands[2], true);
     case kMovRegImm:  // lir operands - 0: reg, 1: immediate
-      return 1 + entry->skeleton.immediate_bytes;
+      return (entry->skeleton.prefix1 != 0 || NeedsRex(lir->operands[0])?1:0) +
+             1 + entry->skeleton.immediate_bytes;
     case kShiftRegImm:  // lir operands - 0: reg, 1: immediate
       // Shift by immediate one has a shorter opcode.
-      return ComputeSize(entry, 0, 0, false) - (lir->operands[1] == 1 ? 1 : 0);
+      return ComputeSize(entry, 0, 0, lir->operands[0], NO_REG, false) -
+             (lir->operands[1] == 1 ? 1 : 0);
     case kShiftMemImm:  // lir operands - 0: base, 1: disp, 2: immediate
       // Shift by immediate one has a shorter opcode.
-      return ComputeSize(entry, lir->operands[0], lir->operands[1], false) -
+      return ComputeSize(entry, lir->operands[0], lir->operands[1], NO_REG, NO_REG, false) -
              (lir->operands[2] == 1 ? 1 : 0);
     case kShiftArrayImm:  // lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
       // Shift by immediate one has a shorter opcode.
-      return ComputeSize(entry, lir->operands[0], lir->operands[3], true) -
+      return ComputeSize(entry, lir->operands[0], lir->operands[3],
+                         NO_REG, lir->operands[1], true) -
              (lir->operands[4] == 1 ? 1 : 0);
-    case kShiftRegCl:
-      return ComputeSize(entry, 0, 0, false);
+    case kShiftRegCl:  // lir operands - 0: reg, 1: cl
+      return ComputeSize(entry, 0, 0, lir->operands[0], NO_REG, false);
     case kShiftMemCl:  // lir operands - 0: base, 1: disp, 2: cl
-      return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
+      return ComputeSize(entry, lir->operands[0], lir->operands[1], NO_REG, NO_REG, false);
     case kShiftArrayCl:  // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
-      return ComputeSize(entry, lir->operands[0], lir->operands[3], true);
+      return ComputeSize(entry, lir->operands[0], lir->operands[3],
+                         lir->operands[4], lir->operands[1], true);
     case kRegCond:  // lir operands - 0: reg, 1: cond
-      return ComputeSize(entry, 0, 0, false);
+      return ComputeSize(entry, 0, 0, lir->operands[0], NO_REG, false);
     case kMemCond:  // lir operands - 0: base, 1: disp, 2: cond
-      return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
+      return ComputeSize(entry, lir->operands[0], lir->operands[1], NO_REG, NO_REG, false);
     case kArrayCond:  // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: cond
-      return ComputeSize(entry, lir->operands[0], lir->operands[3], true);
+      return ComputeSize(entry, lir->operands[0], lir->operands[3],
+                         NO_REG, lir->operands[1], true);
     case kRegRegCond:  // lir operands - 0: reg, 1: reg, 2: cond
-      return ComputeSize(entry, 0, 0, false);
+      return ComputeSize(entry, 0, 0, lir->operands[0], lir->operands[1], false);
     case kRegMemCond:  // lir operands - 0: reg, 1: reg, 2: disp, 3:cond
-      return ComputeSize(entry, lir->operands[1], lir->operands[2], false);
+      return ComputeSize(entry, lir->operands[1], lir->operands[2],
+                         lir->operands[0], lir->operands[1], false);
     case kJcc:
       if (lir->opcode == kX86Jcc8) {
         return 2;  // opcode + rel8
@@ -535,21 +638,28 @@
       } else if (lir->opcode == kX86Jmp32) {
         return 5;  // opcode + rel32
       } else if (lir->opcode == kX86JmpT) {
-        return ComputeSize(entry, 0, 0x12345678, false);  // displacement size is always 32bit
+        // displacement size is always 32bit
+        return ComputeSize(entry, 0, 0x12345678, NO_REG, NO_REG, false);
       } else {
         DCHECK(lir->opcode == kX86JmpR);
-        return 2;  // opcode + modrm
+        if (NeedsRex(lir->operands[0])) {
+          return 3;  // REX.B + opcode + modrm
+        } else {
+          return 2;  // opcode + modrm
+        }
       }
     case kCall:
       switch (lir->opcode) {
         case kX86CallI: return 5;  // opcode 0:disp
         case kX86CallR: return 2;  // opcode modrm
         case kX86CallM:  // lir operands - 0: base, 1: disp
-          return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
+          return ComputeSize(entry, lir->operands[0], lir->operands[1], NO_REG, NO_REG, false);
         case kX86CallA:  // lir operands - 0: base, 1: index, 2: scale, 3: disp
-          return ComputeSize(entry, lir->operands[0], lir->operands[3], true);
+          return ComputeSize(entry, lir->operands[0], lir->operands[3],
+                             NO_REG, lir->operands[1], true);
         case kX86CallT:  // lir operands - 0: disp
-          return ComputeSize(entry, 0, 0x12345678, false);  // displacement size is always 32bit
+          // displacement size is always 32bit
+          return ComputeSize(entry, 0, 0x12345678, NO_REG, NO_REG, false);
         default:
           break;
       }
@@ -557,16 +667,19 @@
     case kPcRel:
       if (entry->opcode == kX86PcRelLoadRA) {
         // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
-        return ComputeSize(entry, lir->operands[1], 0x12345678, true);
+        return ComputeSize(entry, lir->operands[1], 0x12345678,
+                           lir->operands[0], lir->operands[2], true);
       } else {
         DCHECK(entry->opcode == kX86PcRelAdr);
         return 5;  // opcode with reg + 4 byte immediate
       }
-    case kMacro:
+    case kMacro:  // lir operands - 0: reg
       DCHECK_EQ(lir->opcode, static_cast<int>(kX86StartOfMethod));
       return 5 /* call opcode + 4 byte displacement */ + 1 /* pop reg */ +
-          ComputeSize(&X86Mir2Lir::EncodingMap[kX86Sub32RI], 0, 0, false) -
-          (RegStorage::RegNum(lir->operands[0]) == rs_rAX.GetRegNum()  ? 1 : 0);  // shorter ax encoding
+          ComputeSize(&X86Mir2Lir::EncodingMap[kX86Sub32RI], 0, 0,
+                      lir->operands[0], NO_REG, false) -
+          // shorter ax encoding
+          (RegStorage::RegNum(lir->operands[0]) == rs_rAX.GetRegNum()  ? 1 : 0);
     default:
       break;
   }
@@ -575,19 +688,62 @@
 }
 
 void X86Mir2Lir::EmitPrefix(const X86EncodingMap* entry) {
+  EmitPrefix(entry, NO_REG, NO_REG, NO_REG);
+}
+
+void X86Mir2Lir::EmitPrefix(const X86EncodingMap* entry,
+                            uint8_t reg_r, uint8_t reg_x, uint8_t reg_b) {
+  // REX.WRXB
+  // W - 64-bit operand
+  // R - MODRM.reg
+  // X - SIB.index
+  // B - MODRM.rm/SIB.base
+  bool force = false;
+  bool w = (entry->skeleton.prefix1 == REX_W) || (entry->skeleton.prefix2 == REX_W);
+  bool r = NeedsRex(reg_r);
+  bool x = NeedsRex(reg_x);
+  bool b = NeedsRex(reg_b);
+  uint8_t rex = force ? 0x40 : 0;
+  if (w) {
+    rex |= 0x48;  // REX.W000
+  }
+  if (r) {
+    rex |= 0x44;  // REX.0R00
+  }
+  if (x) {
+    rex |= 0x42;  // REX.00X0
+  }
+  if (b) {
+    rex |= 0x41;  // REX.000B
+  }
   if (entry->skeleton.prefix1 != 0) {
     if (Gen64Bit() && entry->skeleton.prefix1 == THREAD_PREFIX) {
       // 64 bit adresses by GS, not FS
       code_buffer_.push_back(THREAD_PREFIX_GS);
     } else {
-      code_buffer_.push_back(entry->skeleton.prefix1);
+      if (entry->skeleton.prefix1 == REX_W) {
+        rex |= entry->skeleton.prefix1;
+        code_buffer_.push_back(rex);
+        rex = 0;
+      } else {
+        code_buffer_.push_back(entry->skeleton.prefix1);
+      }
     }
     if (entry->skeleton.prefix2 != 0) {
-      code_buffer_.push_back(entry->skeleton.prefix2);
+      if (entry->skeleton.prefix2 == REX_W) {
+        rex |= entry->skeleton.prefix2;
+        code_buffer_.push_back(rex);
+        rex = 0;
+      } else {
+        code_buffer_.push_back(entry->skeleton.prefix2);
+      }
     }
   } else {
     DCHECK_EQ(0, entry->skeleton.prefix2);
   }
+  if (rex != 0) {
+    code_buffer_.push_back(rex);
+  }
 }
 
 void X86Mir2Lir::EmitOpcode(const X86EncodingMap* entry) {
@@ -606,7 +762,12 @@
 }
 
 void X86Mir2Lir::EmitPrefixAndOpcode(const X86EncodingMap* entry) {
-  EmitPrefix(entry);
+  EmitPrefixAndOpcode(entry, NO_REG, NO_REG, NO_REG);
+}
+
+void X86Mir2Lir::EmitPrefixAndOpcode(const X86EncodingMap* entry,
+                                     uint8_t reg_r, uint8_t reg_x, uint8_t reg_b) {
+  EmitPrefix(entry, reg_r, reg_x, reg_b);
   EmitOpcode(entry);
 }
 
@@ -675,7 +836,7 @@
   EmitDisp(base, disp);
 }
 
-void X86Mir2Lir::EmitImm(const X86EncodingMap* entry, int imm) {
+void X86Mir2Lir::EmitImm(const X86EncodingMap* entry, int64_t imm) {
   switch (entry->skeleton.immediate_bytes) {
     case 1:
       DCHECK(IS_SIMM8(imm));
@@ -687,11 +848,26 @@
       code_buffer_.push_back((imm >> 8) & 0xFF);
       break;
     case 4:
+      if (imm <0) {
+        CHECK_EQ((-imm) & 0x0FFFFFFFFl, -imm);
+      } else {
+        CHECK_EQ(imm & 0x0FFFFFFFFl, imm);
+      }
       code_buffer_.push_back(imm & 0xFF);
       code_buffer_.push_back((imm >> 8) & 0xFF);
       code_buffer_.push_back((imm >> 16) & 0xFF);
       code_buffer_.push_back((imm >> 24) & 0xFF);
       break;
+    case 8:
+      code_buffer_.push_back(imm & 0xFF);
+      code_buffer_.push_back((imm >> 8) & 0xFF);
+      code_buffer_.push_back((imm >> 16) & 0xFF);
+      code_buffer_.push_back((imm >> 24) & 0xFF);
+      code_buffer_.push_back((imm >> 32) & 0xFF);
+      code_buffer_.push_back((imm >> 40) & 0xFF);
+      code_buffer_.push_back((imm >> 48) & 0xFF);
+      code_buffer_.push_back((imm >> 56) & 0xFF);
+      break;
     default:
       LOG(FATAL) << "Unexpected immediate bytes (" << entry->skeleton.immediate_bytes
                  << ") for instruction: " << entry->name;
@@ -700,7 +876,8 @@
 }
 
 void X86Mir2Lir::EmitOpRegOpcode(const X86EncodingMap* entry, uint8_t reg) {
-  EmitPrefixAndOpcode(entry);
+  EmitPrefixAndOpcode(entry, reg, NO_REG, NO_REG);
+  reg = LowRegisterBits(reg);
   // There's no 3-byte instruction with +rd
   DCHECK(entry->skeleton.opcode != 0x0F ||
          (entry->skeleton.extra_opcode1 != 0x38 && entry->skeleton.extra_opcode1 != 0x3A));
@@ -712,7 +889,8 @@
 }
 
 void X86Mir2Lir::EmitOpReg(const X86EncodingMap* entry, uint8_t reg) {
-  EmitPrefixAndOpcode(entry);
+  EmitPrefixAndOpcode(entry, reg, NO_REG, NO_REG);
+  reg = LowRegisterBits(reg);
   if (RegStorage::RegNum(reg) >= 4) {
     DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " "
         << static_cast<int>(RegStorage::RegNum(reg))
@@ -726,7 +904,8 @@
 }
 
 void X86Mir2Lir::EmitOpMem(const X86EncodingMap* entry, uint8_t base, int disp) {
-  EmitPrefix(entry);
+  EmitPrefix(entry, NO_REG, NO_REG, base);
+  base = LowRegisterBits(base);
   code_buffer_.push_back(entry->skeleton.opcode);
   DCHECK_NE(0x0F, entry->skeleton.opcode);
   DCHECK_EQ(0, entry->skeleton.extra_opcode1);
@@ -738,15 +917,29 @@
 
 void X86Mir2Lir::EmitOpArray(const X86EncodingMap* entry, uint8_t base, uint8_t index,
                              int scale, int disp) {
-  EmitPrefixAndOpcode(entry);
+  EmitPrefixAndOpcode(entry, NO_REG, index, base);
+  index = LowRegisterBits(index);
+  base = LowRegisterBits(base);
   EmitModrmSibDisp(entry->skeleton.modrm_opcode, base, index, scale, disp);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
   DCHECK_EQ(0, entry->skeleton.immediate_bytes);
 }
 
+uint8_t X86Mir2Lir::LowRegisterBits(uint8_t reg) {
+  uint8_t res = reg;
+  res = reg & kRegNumMask32;  // 3 bits
+  return res;
+}
+
+bool X86Mir2Lir::NeedsRex(uint8_t reg) {
+  return RegStorage::RegNum(reg) > 7;
+}
+
 void X86Mir2Lir::EmitMemReg(const X86EncodingMap* entry,
                        uint8_t base, int disp, uint8_t reg) {
-  EmitPrefixAndOpcode(entry);
+  EmitPrefixAndOpcode(entry, reg, NO_REG, base);
+  reg = LowRegisterBits(reg);
+  base = LowRegisterBits(base);
   if (RegStorage::RegNum(reg) >= 4) {
     DCHECK(strchr(entry->name, '8') == NULL ||
            entry->opcode == kX86Movzx8RM || entry->opcode == kX86Movsx8RM)
@@ -765,9 +958,12 @@
   EmitMemReg(entry, base, disp, reg);
 }
 
-void X86Mir2Lir::EmitRegArray(const X86EncodingMap* entry, uint8_t reg, uint8_t base, uint8_t index,
-                              int scale, int disp) {
-  EmitPrefixAndOpcode(entry);
+void X86Mir2Lir::EmitRegArray(const X86EncodingMap* entry, uint8_t reg, uint8_t base,
+                              uint8_t index, int scale, int disp) {
+  EmitPrefixAndOpcode(entry, reg, index, base);
+  reg = LowRegisterBits(reg);
+  index = LowRegisterBits(index);
+  base = LowRegisterBits(base);
   EmitModrmSibDisp(reg, base, index, scale, disp);
   DCHECK_EQ(0, entry->skeleton.modrm_opcode);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
@@ -782,7 +978,9 @@
 
 void X86Mir2Lir::EmitArrayImm(const X86EncodingMap* entry, uint8_t base, uint8_t index, int scale,
                               int disp, int32_t imm) {
-  EmitPrefixAndOpcode(entry);
+  EmitPrefixAndOpcode(entry, NO_REG, index, base);
+  index = LowRegisterBits(index);
+  base = LowRegisterBits(base);
   EmitModrmSibDisp(entry->skeleton.modrm_opcode, base, index, scale, disp);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
   EmitImm(entry, imm);
@@ -790,7 +988,8 @@
 
 void X86Mir2Lir::EmitRegThread(const X86EncodingMap* entry, uint8_t reg, int disp) {
   DCHECK_NE(entry->skeleton.prefix1, 0);
-  EmitPrefixAndOpcode(entry);
+  EmitPrefixAndOpcode(entry, reg, NO_REG, NO_REG);
+  reg = LowRegisterBits(reg);
   if (RegStorage::RegNum(reg) >= 4) {
     DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " "
         << static_cast<int>(RegStorage::RegNum(reg))
@@ -808,7 +1007,9 @@
 }
 
 void X86Mir2Lir::EmitRegReg(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2) {
-  EmitPrefixAndOpcode(entry);
+  EmitPrefixAndOpcode(entry, reg1, NO_REG, reg2);
+  reg1 = LowRegisterBits(reg1);
+  reg2 = LowRegisterBits(reg2);
   DCHECK_LT(RegStorage::RegNum(reg1), 8);
   DCHECK_LT(RegStorage::RegNum(reg2), 8);
   uint8_t modrm = (3 << 6) | (RegStorage::RegNum(reg1) << 3) | RegStorage::RegNum(reg2);
@@ -820,7 +1021,9 @@
 
 void X86Mir2Lir::EmitRegRegImm(const X86EncodingMap* entry,
                           uint8_t reg1, uint8_t reg2, int32_t imm) {
-  EmitPrefixAndOpcode(entry);
+  EmitPrefixAndOpcode(entry, reg1, NO_REG, reg2);
+  reg1 = LowRegisterBits(reg1);
+  reg2 = LowRegisterBits(reg2);
   DCHECK_LT(RegStorage::RegNum(reg1), 8);
   DCHECK_LT(RegStorage::RegNum(reg2), 8);
   uint8_t modrm = (3 << 6) | (RegStorage::RegNum(reg1) << 3) | RegStorage::RegNum(reg2);
@@ -837,7 +1040,9 @@
 
 void X86Mir2Lir::EmitRegMemImm(const X86EncodingMap* entry,
                                uint8_t reg, uint8_t base, int disp, int32_t imm) {
-  EmitPrefixAndOpcode(entry);
+  EmitPrefixAndOpcode(entry, reg, NO_REG, base);
+  reg = LowRegisterBits(reg);
+  base = LowRegisterBits(base);
   DCHECK(!RegStorage::IsFloat(reg));
   DCHECK_LT(RegStorage::RegNum(reg), 8);
   EmitModrmDisp(reg, base, disp);
@@ -852,10 +1057,11 @@
 }
 
 void X86Mir2Lir::EmitRegImm(const X86EncodingMap* entry, uint8_t reg, int imm) {
-  EmitPrefix(entry);
+  EmitPrefix(entry, NO_REG, NO_REG, reg);
   if (RegStorage::RegNum(reg) == rs_rAX.GetRegNum() && entry->skeleton.ax_opcode != 0) {
     code_buffer_.push_back(entry->skeleton.ax_opcode);
   } else {
+    reg = LowRegisterBits(reg);
     EmitOpcode(entry);
     uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | RegStorage::RegNum(reg);
     code_buffer_.push_back(modrm);
@@ -864,7 +1070,8 @@
 }
 
 void X86Mir2Lir::EmitMemImm(const X86EncodingMap* entry, uint8_t base, int disp, int32_t imm) {
-  EmitPrefixAndOpcode(entry);
+  EmitPrefixAndOpcode(entry, NO_REG, NO_REG, base);
+  base = LowRegisterBits(base);
   EmitModrmDisp(entry->skeleton.modrm_opcode, base, disp);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
   EmitImm(entry, imm);
@@ -881,17 +1088,37 @@
   DCHECK_EQ(entry->skeleton.ax_opcode, 0);
 }
 
-void X86Mir2Lir::EmitMovRegImm(const X86EncodingMap* entry, uint8_t reg, int imm) {
+void X86Mir2Lir::EmitMovRegImm(const X86EncodingMap* entry, uint8_t reg, int64_t imm) {
+  EmitPrefix(entry, NO_REG, NO_REG, reg);
+  reg = LowRegisterBits(reg);
   DCHECK_LT(RegStorage::RegNum(reg), 8);
   code_buffer_.push_back(0xB8 + RegStorage::RegNum(reg));
-  code_buffer_.push_back(imm & 0xFF);
-  code_buffer_.push_back((imm >> 8) & 0xFF);
-  code_buffer_.push_back((imm >> 16) & 0xFF);
-  code_buffer_.push_back((imm >> 24) & 0xFF);
+  switch (entry->skeleton.immediate_bytes) {
+    case 4:
+      code_buffer_.push_back(imm & 0xFF);
+      code_buffer_.push_back((imm >> 8) & 0xFF);
+      code_buffer_.push_back((imm >> 16) & 0xFF);
+      code_buffer_.push_back((imm >> 24) & 0xFF);
+      break;
+    case 8:
+      code_buffer_.push_back(imm & 0xFF);
+      code_buffer_.push_back((imm >> 8) & 0xFF);
+      code_buffer_.push_back((imm >> 16) & 0xFF);
+      code_buffer_.push_back((imm >> 24) & 0xFF);
+      code_buffer_.push_back((imm >> 32) & 0xFF);
+      code_buffer_.push_back((imm >> 40) & 0xFF);
+      code_buffer_.push_back((imm >> 48) & 0xFF);
+      code_buffer_.push_back((imm >> 56) & 0xFF);
+      break;
+    default:
+      LOG(FATAL) << "Unsupported immediate size for EmitMovRegImm: "
+                 << static_cast<uint32_t>(entry->skeleton.immediate_bytes);
+  }
 }
 
 void X86Mir2Lir::EmitShiftRegImm(const X86EncodingMap* entry, uint8_t reg, int imm) {
-  EmitPrefix(entry);
+  EmitPrefix(entry, NO_REG, NO_REG, reg);
+  reg = LowRegisterBits(reg);
   if (imm != 1) {
     code_buffer_.push_back(entry->skeleton.opcode);
   } else {
@@ -918,7 +1145,8 @@
 
 void X86Mir2Lir::EmitShiftRegCl(const X86EncodingMap* entry, uint8_t reg, uint8_t cl) {
   DCHECK_EQ(cl, static_cast<uint8_t>(rs_rCX.GetReg()));
-  EmitPrefix(entry);
+  EmitPrefix(entry, reg, NO_REG, NO_REG);
+  reg = LowRegisterBits(reg);
   code_buffer_.push_back(entry->skeleton.opcode);
   DCHECK_NE(0x0F, entry->skeleton.opcode);
   DCHECK_EQ(0, entry->skeleton.extra_opcode1);
@@ -933,7 +1161,8 @@
 void X86Mir2Lir::EmitShiftMemCl(const X86EncodingMap* entry, uint8_t base,
                                 int displacement, uint8_t cl) {
   DCHECK_EQ(cl, static_cast<uint8_t>(rs_rCX.GetReg()));
-  EmitPrefix(entry);
+  EmitPrefix(entry, NO_REG, NO_REG, base);
+  base = LowRegisterBits(base);
   code_buffer_.push_back(entry->skeleton.opcode);
   DCHECK_NE(0x0F, entry->skeleton.opcode);
   DCHECK_EQ(0, entry->skeleton.extra_opcode1);
@@ -946,7 +1175,8 @@
 
 void X86Mir2Lir::EmitShiftMemImm(const X86EncodingMap* entry, uint8_t base,
                                 int displacement, int imm) {
-  EmitPrefix(entry);
+  EmitPrefix(entry, NO_REG, NO_REG, base);
+  base = LowRegisterBits(base);
   if (imm != 1) {
     code_buffer_.push_back(entry->skeleton.opcode);
   } else {
@@ -965,7 +1195,8 @@
 }
 
 void X86Mir2Lir::EmitRegCond(const X86EncodingMap* entry, uint8_t reg, uint8_t condition) {
-  EmitPrefix(entry);
+  EmitPrefix(entry, reg, NO_REG, NO_REG);
+  reg = LowRegisterBits(reg);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
   DCHECK_EQ(0x0F, entry->skeleton.opcode);
   code_buffer_.push_back(0x0F);
@@ -978,7 +1209,8 @@
   DCHECK_EQ(entry->skeleton.immediate_bytes, 0);
 }
 
-void X86Mir2Lir::EmitMemCond(const X86EncodingMap* entry, uint8_t base, int displacement, uint8_t condition) {
+void X86Mir2Lir::EmitMemCond(const X86EncodingMap* entry, uint8_t base, int displacement,
+                             uint8_t condition) {
   if (entry->skeleton.prefix1 != 0) {
     code_buffer_.push_back(entry->skeleton.prefix1);
     if (entry->skeleton.prefix2 != 0) {
@@ -1000,7 +1232,9 @@
 void X86Mir2Lir::EmitRegRegCond(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2,
                                 uint8_t condition) {
   // Generate prefix and opcode without the condition
-  EmitPrefixAndOpcode(entry);
+  EmitPrefixAndOpcode(entry, reg1, NO_REG, reg2);
+  reg1 = LowRegisterBits(reg1);
+  reg2 = LowRegisterBits(reg2);
 
   // Now add the condition. The last byte of opcode is the one that receives it.
   DCHECK_LE(condition, 0xF);
@@ -1022,9 +1256,12 @@
   code_buffer_.push_back(modrm);
 }
 
-void X86Mir2Lir::EmitRegMemCond(const X86EncodingMap* entry, uint8_t reg1, uint8_t base, int displacement, uint8_t condition) {
+void X86Mir2Lir::EmitRegMemCond(const X86EncodingMap* entry, uint8_t reg1, uint8_t base,
+                                int displacement, uint8_t condition) {
   // Generate prefix and opcode without the condition
-  EmitPrefixAndOpcode(entry);
+  EmitPrefixAndOpcode(entry, reg1, NO_REG, base);
+  reg1 = LowRegisterBits(reg1);
+  base = LowRegisterBits(base);
 
   // Now add the condition. The last byte of opcode is the one that receives it.
   DCHECK_LE(condition, 0xF);
@@ -1057,8 +1294,10 @@
     code_buffer_.push_back(rel & 0xFF);
   } else {
     DCHECK(entry->opcode == kX86JmpR);
-    code_buffer_.push_back(entry->skeleton.opcode);
     uint8_t reg = static_cast<uint8_t>(rel);
+    EmitPrefix(entry, NO_REG, NO_REG, reg);
+    code_buffer_.push_back(entry->skeleton.opcode);
+    reg = LowRegisterBits(reg);
     DCHECK_LT(RegStorage::RegNum(reg), 8);
     uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | RegStorage::RegNum(reg);
     code_buffer_.push_back(modrm);
@@ -1083,7 +1322,8 @@
 }
 
 void X86Mir2Lir::EmitCallMem(const X86EncodingMap* entry, uint8_t base, int disp) {
-  EmitPrefixAndOpcode(entry);
+  EmitPrefixAndOpcode(entry, NO_REG, NO_REG, base);
+  base = LowRegisterBits(base);
   EmitModrmDisp(entry->skeleton.modrm_opcode, base, disp);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
   DCHECK_EQ(0, entry->skeleton.immediate_bytes);
@@ -1124,9 +1364,12 @@
         reinterpret_cast<Mir2Lir::EmbeddedData*>(UnwrapPointer(base_or_table));
     disp = tab_rec->offset;
   }
-  EmitPrefix(entry);
-  DCHECK_LT(RegStorage::RegNum(reg), 8);
   if (entry->opcode == kX86PcRelLoadRA) {
+    EmitPrefix(entry, reg, index, base_or_table);
+    reg = LowRegisterBits(reg);
+    base_or_table = LowRegisterBits(base_or_table);
+    index = LowRegisterBits(index);
+    DCHECK_LT(RegStorage::RegNum(reg), 8);
     code_buffer_.push_back(entry->skeleton.opcode);
     DCHECK_NE(0x0F, entry->skeleton.opcode);
     DCHECK_EQ(0, entry->skeleton.extra_opcode1);
@@ -1141,6 +1384,7 @@
     code_buffer_.push_back(sib);
     DCHECK_EQ(0, entry->skeleton.immediate_bytes);
   } else {
+    DCHECK_LT(RegStorage::RegNum(reg), 8);
     code_buffer_.push_back(entry->skeleton.opcode + RegStorage::RegNum(reg));
   }
   code_buffer_.push_back(disp & 0xFF);
@@ -1153,6 +1397,8 @@
 
 void X86Mir2Lir::EmitMacro(const X86EncodingMap* entry, uint8_t reg, int offset) {
   DCHECK(entry->opcode == kX86StartOfMethod) << entry->name;
+  EmitPrefix(entry, reg, NO_REG, NO_REG);
+  reg = LowRegisterBits(reg);
   code_buffer_.push_back(0xE8);  // call +0
   code_buffer_.push_back(0);
   code_buffer_.push_back(0);
@@ -1343,7 +1589,6 @@
       case kRegOpcode:  // lir operands - 0: reg
         EmitOpRegOpcode(entry, lir->operands[0]);
         break;
-      case kReg64:
       case kReg:  // lir operands - 0: reg
         EmitOpReg(entry, lir->operands[0]);
         break;
@@ -1353,7 +1598,6 @@
       case kArray:  // lir operands - 0: base, 1: index, 2: scale, 3: disp
         EmitOpArray(entry, lir->operands[0], lir->operands[1], lir->operands[2], lir->operands[3]);
         break;
-      case kMemReg64:
       case kMemReg:  // lir operands - 0: base, 1: disp, 2: reg
         EmitMemReg(entry, lir->operands[0], lir->operands[1], lir->operands[2]);
         break;
@@ -1364,7 +1608,6 @@
         EmitArrayImm(entry, lir->operands[0], lir->operands[1], lir->operands[2],
                      lir->operands[3], lir->operands[4]);
         break;
-      case kArrayReg64:
       case kArrayReg:  // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
         EmitArrayReg(entry, lir->operands[0], lir->operands[1], lir->operands[2],
                      lir->operands[3], lir->operands[4]);
@@ -1376,7 +1619,6 @@
         EmitRegArray(entry, lir->operands[0], lir->operands[1], lir->operands[2],
                      lir->operands[3], lir->operands[4]);
         break;
-      case kReg64Thread:  // lir operands - 0: reg, 1: disp
       case kRegThread:  // lir operands - 0: reg, 1: disp
         EmitRegThread(entry, lir->operands[0], lir->operands[1]);
         break;
@@ -1400,7 +1642,6 @@
         EmitRegMemImm(entry, lir->operands[0], lir->operands[1], lir->operands[2],
                       lir->operands[3]);
         break;
-      case kReg64Imm:
       case kRegImm:  // lir operands - 0: reg, 1: immediate
         EmitRegImm(entry, lir->operands[0], lir->operands[1]);
         break;
@@ -1432,7 +1673,8 @@
         EmitRegRegCond(entry, lir->operands[0], lir->operands[1], lir->operands[2]);
         break;
       case kRegMemCond:  // lir operands - 0: reg, 1: reg, displacement, 3: condition
-        EmitRegMemCond(entry, lir->operands[0], lir->operands[1], lir->operands[2], lir->operands[3]);
+        EmitRegMemCond(entry, lir->operands[0], lir->operands[1], lir->operands[2],
+                       lir->operands[3]);
         break;
       case kJmp:  // lir operands - 0: rel
         if (entry->opcode == kX86JmpT) {
@@ -1466,7 +1708,7 @@
         EmitPcRel(entry, lir->operands[0], lir->operands[1], lir->operands[2],
                   lir->operands[3], lir->operands[4]);
         break;
-      case kMacro:
+      case kMacro:  // lir operands - 0: reg
         EmitMacro(entry, lir->operands[0], lir->offset);
         break;
       default:
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 3070edd..d66790d 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -118,6 +118,7 @@
     bool GenInlinedSqrt(CallInfo* info);
     bool GenInlinedPeek(CallInfo* info, OpSize size);
     bool GenInlinedPoke(CallInfo* info, OpSize size);
+    void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
     void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
     void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
                    RegLocation rl_src2);
@@ -125,6 +126,8 @@
                     RegLocation rl_src2);
     void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
                     RegLocation rl_src2);
+    void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
+                       RegLocation rl_src2, bool is_div);
     // TODO: collapse reg_lo, reg_hi
     RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
     RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
@@ -328,15 +331,21 @@
     std::vector<uint8_t>* ReturnCallFrameInformation();
 
   protected:
-    size_t ComputeSize(const X86EncodingMap* entry, int base, int displacement, bool has_sib);
+    size_t ComputeSize(const X86EncodingMap* entry, int base, int displacement,
+                       int reg_r, int reg_x, bool has_sib);
+    uint8_t LowRegisterBits(uint8_t reg);
+    bool NeedsRex(uint8_t reg);
     void EmitPrefix(const X86EncodingMap* entry);
+    void EmitPrefix(const X86EncodingMap* entry, uint8_t reg_r, uint8_t reg_x, uint8_t reg_b);
     void EmitOpcode(const X86EncodingMap* entry);
     void EmitPrefixAndOpcode(const X86EncodingMap* entry);
+    void EmitPrefixAndOpcode(const X86EncodingMap* entry,
+                             uint8_t reg_r, uint8_t reg_x, uint8_t reg_b);
     void EmitDisp(uint8_t base, int disp);
     void EmitModrmThread(uint8_t reg_or_opcode);
     void EmitModrmDisp(uint8_t reg_or_opcode, uint8_t base, int disp);
     void EmitModrmSibDisp(uint8_t reg_or_opcode, uint8_t base, uint8_t index, int scale, int disp);
-    void EmitImm(const X86EncodingMap* entry, int imm);
+    void EmitImm(const X86EncodingMap* entry, int64_t imm);
     void EmitOpRegOpcode(const X86EncodingMap* entry, uint8_t reg);
     void EmitOpReg(const X86EncodingMap* entry, uint8_t reg);
     void EmitOpMem(const X86EncodingMap* entry, uint8_t base, int disp);
@@ -359,7 +368,7 @@
     void EmitMemRegImm(const X86EncodingMap* entry, uint8_t base, int disp, uint8_t reg1, int32_t imm);
     void EmitRegImm(const X86EncodingMap* entry, uint8_t reg, int imm);
     void EmitThreadImm(const X86EncodingMap* entry, int disp, int imm);
-    void EmitMovRegImm(const X86EncodingMap* entry, uint8_t reg, int imm);
+    void EmitMovRegImm(const X86EncodingMap* entry, uint8_t reg, int64_t imm);
     void EmitShiftRegImm(const X86EncodingMap* entry, uint8_t reg, int imm);
     void EmitShiftMemImm(const X86EncodingMap* entry, uint8_t base, int disp, int imm);
     void EmitShiftMemCl(const X86EncodingMap* entry, uint8_t base, int displacement, uint8_t cl);
@@ -426,6 +435,136 @@
     void GenConst128(BasicBlock* bb, MIR* mir);
 
     /*
+     * @brief MIR to move a vectorized register to another.
+     * @param bb The basic block in which the MIR is from.
+     * @param mir The MIR whose opcode is kMirConstVector.
+     * @note vA: TypeSize
+     * @note vB: destination
+     * @note vC: source
+     */
+    void GenMoveVector(BasicBlock *bb, MIR *mir);
+
+    /*
+     * @brief Packed multiply of units in two vector registers: vB = vB .* @note vC using vA to know the type of the vector.
+     * @param bb The basic block in which the MIR is from.
+     * @param mir The MIR whose opcode is kMirConstVector.
+     * @note vA: TypeSize
+     * @note vB: destination and source
+     * @note vC: source
+     */
+    void GenMultiplyVector(BasicBlock *bb, MIR *mir);
+
+    /*
+     * @brief Packed addition of units in two vector registers: vB = vB .+ vC using vA to know the type of the vector.
+     * @param bb The basic block in which the MIR is from.
+     * @param mir The MIR whose opcode is kMirConstVector.
+     * @note vA: TypeSize
+     * @note vB: destination and source
+     * @note vC: source
+     */
+    void GenAddVector(BasicBlock *bb, MIR *mir);
+
+    /*
+     * @brief Packed subtraction of units in two vector registers: vB = vB .- vC using vA to know the type of the vector.
+     * @param bb The basic block in which the MIR is from.
+     * @param mir The MIR whose opcode is kMirConstVector.
+     * @note vA: TypeSize
+     * @note vB: destination and source
+     * @note vC: source
+     */
+    void GenSubtractVector(BasicBlock *bb, MIR *mir);
+
+    /*
+     * @brief Packed shift left of units in two vector registers: vB = vB .<< vC using vA to know the type of the vector.
+     * @param bb The basic block in which the MIR is from.
+     * @param mir The MIR whose opcode is kMirConstVector.
+     * @note vA: TypeSize
+     * @note vB: destination and source
+     * @note vC: immediate
+     */
+    void GenShiftLeftVector(BasicBlock *bb, MIR *mir);
+
+    /*
+     * @brief Packed signed shift right of units in two vector registers: vB = vB .>> vC using vA to know the type of the vector.
+     * @param bb The basic block in which the MIR is from.
+     * @param mir The MIR whose opcode is kMirConstVector.
+     * @note vA: TypeSize
+     * @note vB: destination and source
+     * @note vC: immediate
+     */
+    void GenSignedShiftRightVector(BasicBlock *bb, MIR *mir);
+
+    /*
+     * @brief Packed unsigned shift right of units in two vector registers: vB = vB .>>> vC using vA to know the type of the vector.
+     * @param bb The basic block in which the MIR is from..
+     * @param mir The MIR whose opcode is kMirConstVector.
+     * @note vA: TypeSize
+     * @note vB: destination and source
+     * @note vC: immediate
+     */
+    void GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir);
+
+    /*
+     * @brief Packed bitwise and of units in two vector registers: vB = vB .& vC using vA to know the type of the vector.
+     * @note vA: TypeSize
+     * @note vB: destination and source
+     * @note vC: source
+     */
+    void GenAndVector(BasicBlock *bb, MIR *mir);
+
+    /*
+     * @brief Packed bitwise or of units in two vector registers: vB = vB .| vC using vA to know the type of the vector.
+     * @param bb The basic block in which the MIR is from.
+     * @param mir The MIR whose opcode is kMirConstVector.
+     * @note vA: TypeSize
+     * @note vB: destination and source
+     * @note vC: source
+     */
+    void GenOrVector(BasicBlock *bb, MIR *mir);
+
+    /*
+     * @brief Packed bitwise xor of units in two vector registers: vB = vB .^ vC using vA to know the type of the vector.
+     * @param bb The basic block in which the MIR is from.
+     * @param mir The MIR whose opcode is kMirConstVector.
+     * @note vA: TypeSize
+     * @note vB: destination and source
+     * @note vC: source
+     */
+    void GenXorVector(BasicBlock *bb, MIR *mir);
+
+    /*
+     * @brief Reduce a 128-bit packed element into a single VR by taking lower bits
+     * @param bb The basic block in which the MIR is from.
+     * @param mir The MIR whose opcode is kMirConstVector.
+     * @details Instruction does a horizontal addition of the packed elements and then adds it to VR.
+     * @note vA: TypeSize
+     * @note vB: destination and source VR (not vector register)
+     * @note vC: source (vector register)
+     */
+    void GenAddReduceVector(BasicBlock *bb, MIR *mir);
+
+    /*
+     * @brief Extract a packed element into a single VR.
+     * @param bb The basic block in which the MIR is from.
+     * @param mir The MIR whose opcode is kMirConstVector.
+     * @note vA: TypeSize
+     * @note vB: destination VR (not vector register)
+     * @note vC: source (vector register)
+     * @note arg[0]: The index to use for extraction from vector register (which packed element).
+     */
+    void GenReduceVector(BasicBlock *bb, MIR *mir);
+
+    /*
+     * @brief Create a vector value, with all TypeSize values equal to vC
+     * @param bb The basic block in which the MIR is from.
+     * @param mir The MIR whose opcode is kMirConstVector.
+     * @note vA: TypeSize.
+     * @note vB: destination vector register.
+     * @note vC: source VR (not vector register).
+     */
+    void GenSetVector(BasicBlock *bb, MIR *mir);
+
+    /*
      * @brief Generate code for a vector opcode.
      * @param bb The basic block in which the MIR is from.
      * @param mir The MIR whose opcode is a non-standard opcode.
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index a6ccc99..48bff6e 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -1372,6 +1372,15 @@
   GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
 }
 
+void X86Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
+  LOG(FATAL) << "Unexpected use GenNotLong()";
+}
+
+void X86Mir2Lir::GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
+                           RegLocation rl_src2, bool is_div) {
+  LOG(FATAL) << "Unexpected use GenDivRemLong()";
+}
+
 void X86Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
   rl_src = LoadValueWide(rl_src, kCoreReg);
   RegLocation rl_result = ForceTempWide(rl_src);
@@ -2189,6 +2198,10 @@
           }
         }
         rl_rhs = LoadValue(rl_rhs, kCoreReg);
+        // It might happen rl_rhs and rl_dest are the same VR
+        // in this case rl_dest is in reg after LoadValue while
+        // rl_result is not updated yet, so do this
+        rl_result = UpdateLocTyped(rl_dest, kCoreReg);
         if (rl_result.location != kLocPhysReg) {
           // Okay, we can do this into memory.
           OpMemReg(op, rl_result, rl_rhs.reg.GetReg());
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index e7a629a..8b34168 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -35,6 +35,12 @@
     rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15
 #endif
 };
+static const RegStorage core_regs_arr_64q[] = {
+    rs_r0q, rs_r1q, rs_r2q, rs_r3q, rs_rX86_SP_64, rs_r5q, rs_r6q, rs_r7q,
+#ifdef TARGET_REX_SUPPORT
+    rs_r8q, rs_r9q, rs_r10q, rs_r11q, rs_r12q, rs_r13q, rs_r14q, rs_r15q
+#endif
+};
 static const RegStorage sp_regs_arr_32[] = {
     rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
 };
@@ -55,6 +61,7 @@
 };
 static const RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32};
 static const RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_64};
+static const RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64};
 static const RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX};
 static const RegStorage core_temps_arr_64[] = {
     rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI,
@@ -62,6 +69,12 @@
     rs_r8, rs_r9, rs_r10, rs_r11
 #endif
 };
+static const RegStorage core_temps_arr_64q[] = {
+    rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q,
+#ifdef TARGET_REX_SUPPORT
+    rs_r8q, rs_r9q, rs_r10q, rs_r11q
+#endif
+};
 static const RegStorage sp_temps_arr_32[] = {
     rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
 };
@@ -81,11 +94,23 @@
 #endif
 };
 
+static const RegStorage xp_temps_arr_32[] = {
+    rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
+};
+static const RegStorage xp_temps_arr_64[] = {
+    rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
+#ifdef TARGET_REX_SUPPORT
+    rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15
+#endif
+};
+
 static const std::vector<RegStorage> empty_pool;
 static const std::vector<RegStorage> core_regs_32(core_regs_arr_32,
     core_regs_arr_32 + sizeof(core_regs_arr_32) / sizeof(core_regs_arr_32[0]));
 static const std::vector<RegStorage> core_regs_64(core_regs_arr_64,
     core_regs_arr_64 + sizeof(core_regs_arr_64) / sizeof(core_regs_arr_64[0]));
+static const std::vector<RegStorage> core_regs_64q(core_regs_arr_64q,
+    core_regs_arr_64q + sizeof(core_regs_arr_64q) / sizeof(core_regs_arr_64q[0]));
 static const std::vector<RegStorage> sp_regs_32(sp_regs_arr_32,
     sp_regs_arr_32 + sizeof(sp_regs_arr_32) / sizeof(sp_regs_arr_32[0]));
 static const std::vector<RegStorage> sp_regs_64(sp_regs_arr_64,
@@ -98,10 +123,14 @@
     reserved_regs_arr_32 + sizeof(reserved_regs_arr_32) / sizeof(reserved_regs_arr_32[0]));
 static const std::vector<RegStorage> reserved_regs_64(reserved_regs_arr_64,
     reserved_regs_arr_64 + sizeof(reserved_regs_arr_64) / sizeof(reserved_regs_arr_64[0]));
+static const std::vector<RegStorage> reserved_regs_64q(reserved_regs_arr_64q,
+    reserved_regs_arr_64q + sizeof(reserved_regs_arr_64q) / sizeof(reserved_regs_arr_64q[0]));
 static const std::vector<RegStorage> core_temps_32(core_temps_arr_32,
     core_temps_arr_32 + sizeof(core_temps_arr_32) / sizeof(core_temps_arr_32[0]));
 static const std::vector<RegStorage> core_temps_64(core_temps_arr_64,
     core_temps_arr_64 + sizeof(core_temps_arr_64) / sizeof(core_temps_arr_64[0]));
+static const std::vector<RegStorage> core_temps_64q(core_temps_arr_64q,
+    core_temps_arr_64q + sizeof(core_temps_arr_64q) / sizeof(core_temps_arr_64q[0]));
 static const std::vector<RegStorage> sp_temps_32(sp_temps_arr_32,
     sp_temps_arr_32 + sizeof(sp_temps_arr_32) / sizeof(sp_temps_arr_32[0]));
 static const std::vector<RegStorage> sp_temps_64(sp_temps_arr_64,
@@ -111,6 +140,11 @@
 static const std::vector<RegStorage> dp_temps_64(dp_temps_arr_64,
     dp_temps_arr_64 + sizeof(dp_temps_arr_64) / sizeof(dp_temps_arr_64[0]));
 
+static const std::vector<RegStorage> xp_temps_32(xp_temps_arr_32,
+    xp_temps_arr_32 + sizeof(xp_temps_arr_32) / sizeof(xp_temps_arr_32[0]));
+static const std::vector<RegStorage> xp_temps_64(xp_temps_arr_64,
+    xp_temps_arr_64 + sizeof(xp_temps_arr_64) / sizeof(xp_temps_arr_64[0]));
+
 RegStorage rs_rX86_SP;
 
 X86NativeRegisterPool rX86_ARG0;
@@ -209,7 +243,7 @@
   /* Double registers in x86 are just a single FP register */
   seed = 1;
   /* FP register starts at bit position 16 */
-  shift = reg.IsFloat() ? kX86FPReg0 : 0;
+  shift = (reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0;
   /* Expand the double register id into single offset */
   shift += reg_id;
   return (seed << shift);
@@ -531,9 +565,9 @@
 
 void X86Mir2Lir::CompilerInitializeRegAlloc() {
   if (Gen64Bit()) {
-    reg_pool_ = new (arena_) RegisterPool(this, arena_, empty_pool, core_regs_64, sp_regs_64,
-                                          dp_regs_64, empty_pool, reserved_regs_64,
-                                          empty_pool, core_temps_64, sp_temps_64, dp_temps_64);
+    reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, empty_pool/*core_regs_64q*/, sp_regs_64,
+                                          dp_regs_64, reserved_regs_64, empty_pool/*reserved_regs_64q*/,
+                                          core_temps_64, empty_pool/*core_temps_64q*/, sp_temps_64, dp_temps_64);
   } else {
     reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32,
                                           dp_regs_32, reserved_regs_32, empty_pool,
@@ -542,17 +576,31 @@
 
   // Target-specific adjustments.
 
+  // Add in XMM registers.
+  const std::vector<RegStorage> *xp_temps = Gen64Bit() ? &xp_temps_64 : &xp_temps_32;
+  for (RegStorage reg : *xp_temps) {
+    RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg));
+    reginfo_map_.Put(reg.GetReg(), info);
+    info->SetIsTemp(true);
+  }
+
   // Alias single precision xmm to double xmms.
   // TODO: as needed, add larger vector sizes - alias all to the largest.
   GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->sp_regs_);
   for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
     int sp_reg_num = info->GetReg().GetRegNum();
+    RegStorage xp_reg = RegStorage::Solo128(sp_reg_num);
+    RegisterInfo* xp_reg_info = GetRegInfo(xp_reg);
+    // 128-bit xmm vector register's master storage should refer to itself.
+    DCHECK_EQ(xp_reg_info, xp_reg_info->Master());
+
+    // Redirect 32-bit vector's master storage to 128-bit vector.
+    info->SetMaster(xp_reg_info);
+
     RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | sp_reg_num);
     RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
-    // 64-bit xmm vector register's master storage should refer to itself.
-    DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
-    // Redirect 32-bit vector's master storage to 64-bit vector.
-    info->SetMaster(dp_reg_info);
+    // Redirect 64-bit vector's master storage to 128-bit vector.
+    dp_reg_info->SetMaster(xp_reg_info);
   }
 
   // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
@@ -1240,6 +1288,45 @@
     case kMirOpConstVector:
       GenConst128(bb, mir);
       break;
+    case kMirOpMoveVector:
+      GenMoveVector(bb, mir);
+      break;
+    case kMirOpPackedMultiply:
+      GenMultiplyVector(bb, mir);
+      break;
+    case kMirOpPackedAddition:
+      GenAddVector(bb, mir);
+      break;
+    case kMirOpPackedSubtract:
+      GenSubtractVector(bb, mir);
+      break;
+    case kMirOpPackedShiftLeft:
+      GenShiftLeftVector(bb, mir);
+      break;
+    case kMirOpPackedSignedShiftRight:
+      GenSignedShiftRightVector(bb, mir);
+      break;
+    case kMirOpPackedUnsignedShiftRight:
+      GenUnsignedShiftRightVector(bb, mir);
+      break;
+    case kMirOpPackedAnd:
+      GenAndVector(bb, mir);
+      break;
+    case kMirOpPackedOr:
+      GenOrVector(bb, mir);
+      break;
+    case kMirOpPackedXor:
+      GenXorVector(bb, mir);
+      break;
+    case kMirOpPackedAddReduce:
+      GenAddReduceVector(bb, mir);
+      break;
+    case kMirOpPackedReduce:
+      GenReduceVector(bb, mir);
+      break;
+    case kMirOpPackedSet:
+      GenSetVector(bb, mir);
+      break;
     default:
       break;
   }
@@ -1249,9 +1336,9 @@
   int type_size = mir->dalvikInsn.vA;
   // We support 128 bit vectors.
   DCHECK_EQ(type_size & 0xFFFF, 128);
-  int reg = mir->dalvikInsn.vB;
-  DCHECK_LT(reg, 8);
+  RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB);
   uint32_t *args = mir->dalvikInsn.arg;
+  int reg = rs_dest.GetReg();
   // Check for all 0 case.
   if (args[0] == 0 && args[1] == 0 && args[2] == 0 && args[3] == 0) {
     NewLIR2(kX86XorpsRR, reg, reg);
@@ -1277,6 +1364,287 @@
   SetMemRefType(load, true, kLiteral);
 }
 
+void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) {
+  // We only support 128 bit registers.
+  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
+  RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB);
+  RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vC);
+  NewLIR2(kX86Mova128RR, rs_dest.GetReg(), rs_src.GetReg());
+}
+
+void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) {
+  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
+  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
+  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
+  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC);
+  int opcode = 0;
+  switch (opsize) {
+    case k32:
+      opcode = kX86PmulldRR;
+      break;
+    case kSignedHalf:
+      opcode = kX86PmullwRR;
+      break;
+    case kSingle:
+      opcode = kX86MulpsRR;
+      break;
+    case kDouble:
+      opcode = kX86MulpdRR;
+      break;
+    default:
+      LOG(FATAL) << "Unsupported vector multiply " << opsize;
+      break;
+  }
+  NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
+}
+
+void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) {
+  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
+  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
+  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
+  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC);
+  int opcode = 0;
+  switch (opsize) {
+    case k32:
+      opcode = kX86PadddRR;
+      break;
+    case kSignedHalf:
+    case kUnsignedHalf:
+      opcode = kX86PaddwRR;
+      break;
+    case kUnsignedByte:
+    case kSignedByte:
+      opcode = kX86PaddbRR;
+      break;
+    case kSingle:
+      opcode = kX86AddpsRR;
+      break;
+    case kDouble:
+      opcode = kX86AddpdRR;
+      break;
+    default:
+      LOG(FATAL) << "Unsupported vector addition " << opsize;
+      break;
+  }
+  NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
+}
+
+void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) {
+  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
+  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
+  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
+  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC);
+  int opcode = 0;
+  switch (opsize) {
+    case k32:
+      opcode = kX86PsubdRR;
+      break;
+    case kSignedHalf:
+    case kUnsignedHalf:
+      opcode = kX86PsubwRR;
+      break;
+    case kUnsignedByte:
+    case kSignedByte:
+      opcode = kX86PsubbRR;
+      break;
+    case kSingle:
+      opcode = kX86SubpsRR;
+      break;
+    case kDouble:
+      opcode = kX86SubpdRR;
+      break;
+    default:
+      LOG(FATAL) << "Unsupported vector subtraction " << opsize;
+      break;
+  }
+  NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
+}
+
+void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) {
+  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
+  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
+  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
+  int imm = mir->dalvikInsn.vC;
+  int opcode = 0;
+  switch (opsize) {
+    case k32:
+      opcode = kX86PslldRI;
+      break;
+    case k64:
+      opcode = kX86PsllqRI;
+      break;
+    case kSignedHalf:
+    case kUnsignedHalf:
+      opcode = kX86PsllwRI;
+      break;
+    default:
+      LOG(FATAL) << "Unsupported vector shift left " << opsize;
+      break;
+  }
+  NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
+}
+
+void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) {
+  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
+  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
+  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
+  int imm = mir->dalvikInsn.vC;
+  int opcode = 0;
+  switch (opsize) {
+    case k32:
+      opcode = kX86PsradRI;
+      break;
+    case kSignedHalf:
+    case kUnsignedHalf:
+      opcode = kX86PsrawRI;
+      break;
+    default:
+      LOG(FATAL) << "Unsupported vector signed shift right " << opsize;
+      break;
+  }
+  NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
+}
+
+void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) {
+  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
+  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
+  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
+  int imm = mir->dalvikInsn.vC;
+  int opcode = 0;
+  switch (opsize) {
+    case k32:
+      opcode = kX86PsrldRI;
+      break;
+    case k64:
+      opcode = kX86PsrlqRI;
+      break;
+    case kSignedHalf:
+    case kUnsignedHalf:
+      opcode = kX86PsrlwRI;
+      break;
+    default:
+      LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize;
+      break;
+  }
+  NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
+}
+
+void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) {
+  // We only support 128 bit registers.
+  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
+  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
+  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC);
+  NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
+}
+
+void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) {
+  // We only support 128 bit registers.
+  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
+  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
+  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC);
+  NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
+}
+
+void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) {
+  // We only support 128 bit registers.
+  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
+  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
+  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC);
+  NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
+}
+
+void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) {
+  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
+  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
+  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
+  int imm = mir->dalvikInsn.vC;
+  int opcode = 0;
+  switch (opsize) {
+    case k32:
+      opcode = kX86PhadddRR;
+      break;
+    case kSignedHalf:
+    case kUnsignedHalf:
+      opcode = kX86PhaddwRR;
+      break;
+    default:
+      LOG(FATAL) << "Unsupported vector add reduce " << opsize;
+      break;
+  }
+  NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
+}
+
+void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) {
+  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
+  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
+  RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vB);
+  int index = mir->dalvikInsn.arg[0];
+  int opcode = 0;
+  switch (opsize) {
+    case k32:
+      opcode = kX86PextrdRRI;
+      break;
+    case kSignedHalf:
+    case kUnsignedHalf:
+      opcode = kX86PextrwRRI;
+      break;
+    case kUnsignedByte:
+    case kSignedByte:
+      opcode = kX86PextrbRRI;
+      break;
+    default:
+      LOG(FATAL) << "Unsupported vector reduce " << opsize;
+      break;
+  }
+  // We need to extract to a GPR.
+  RegStorage temp = AllocTemp();
+  NewLIR3(opcode, temp.GetReg(), rs_src.GetReg(), index);
+
+  // Assume that the destination VR is in the def for the mir.
+  RegLocation rl_dest = mir_graph_->GetDest(mir);
+  RegLocation rl_temp =
+    {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, temp, INVALID_SREG, INVALID_SREG};
+  StoreValue(rl_dest, rl_temp);
+}
+
+void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) {
+  DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
+  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
+  RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB);
+  int op_low = 0, op_high = 0;
+  switch (opsize) {
+    case k32:
+      op_low = kX86PshufdRRI;
+      break;
+    case kSignedHalf:
+    case kUnsignedHalf:
+      // Handles low quadword.
+      op_low = kX86PshuflwRRI;
+      // Handles upper quadword.
+      op_high = kX86PshufdRRI;
+      break;
+    default:
+      LOG(FATAL) << "Unsupported vector set " << opsize;
+      break;
+  }
+
+  // Load the value from the VR into a GPR.
+  RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
+  rl_src = LoadValue(rl_src, kCoreReg);
+
+  // Load the value into the XMM register.
+  NewLIR2(kX86MovdxrRR, rs_dest.GetReg(), rl_src.reg.GetReg());
+
+  // Now shuffle the value across the destination.
+  NewLIR3(op_low, rs_dest.GetReg(), rs_dest.GetReg(), 0);
+
+  // And then repeat as needed.
+  if (op_high != 0) {
+    NewLIR3(op_high, rs_dest.GetReg(), rs_dest.GetReg(), 0);
+  }
+}
+
+
 LIR *X86Mir2Lir::ScanVectorLiteral(MIR *mir) {
   int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg);
   for (LIR *p = const_vectors_; p != nullptr; p = p->next) {
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index fed31c1..092e68e 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -89,7 +89,11 @@
     res = NewLIR2(kX86Xor32RR, r_dest.GetReg(), r_dest.GetReg());
   } else {
     // Note, there is no byte immediate form of a 32 bit immediate move.
-    res = NewLIR2(kX86Mov32RI, r_dest.GetReg(), value);
+    if (r_dest.Is64Bit()) {
+      res = NewLIR2(kX86Mov64RI, r_dest.GetReg(), value);
+    } else {
+      res = NewLIR2(kX86Mov32RI, r_dest.GetReg(), value);
+    }
   }
 
   if (r_dest_save.IsFloat()) {
@@ -181,7 +185,6 @@
         LOG(FATAL) << "Bad case in OpRegImm " << op;
     }
   }
-  CHECK(!r_dest_src1.Is64Bit() || X86Mir2Lir::EncodingMap[opcode].kind == kReg64Imm) << "OpRegImm(" << op << ")";
   return NewLIR2(opcode, r_dest_src1.GetReg(), value);
 }
 
@@ -559,7 +562,7 @@
         // We don't know the proper offset for the value, so pick one that will force
         // 4 byte offset.  We will fix this up in the assembler later to have the right
         // value.
-        res = LoadBaseDisp(rl_method.reg, 256 /* bogus */, RegStorage::Solo64(low_reg_val),
+        res = LoadBaseDisp(rl_method.reg, 256 /* bogus */, RegStorage::FloatSolo64(low_reg_val),
                            kDouble);
         res->target = data_target;
         res->flags.fixup = kFixupLoad;
@@ -866,7 +869,7 @@
 
   for (MIR *mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
     int opcode = mir->dalvikInsn.opcode;
-    if (opcode >= kMirOpFirst) {
+    if (MIRGraph::IsPseudoMirOp(opcode)) {
       AnalyzeExtendedMIR(opcode, bb, mir);
     } else {
       AnalyzeMIR(opcode, bb, mir);
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index adfed0c..bb8df89 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -117,41 +117,56 @@
 // FIXME: for 64-bit, perhaps add an X86_64NativeRegisterPool enum?
 enum X86NativeRegisterPool {
   r0             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0,
+  r0q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 0,
   rAX            = r0,
   r1             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 1,
+  r1q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 1,
   rCX            = r1,
   r2             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 2,
+  r2q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 2,
   rDX            = r2,
   r3             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 3,
+  r3q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 3,
   rBX            = r3,
   r4sp_32        = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 4,
   rX86_SP_32     = r4sp_32,
   r4sp_64        = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 4,
   rX86_SP_64     = r4sp_64,
   r5             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 5,
+  r5q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 5,
   rBP            = r5,
   r5sib_no_base  = r5,
   r6             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 6,
+  r6q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 6,
   rSI            = r6,
   r7             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 7,
+  r7q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 7,
   rDI            = r7,
 #ifndef TARGET_REX_SUPPORT
   // fake return address register for core spill mask.
   rRET           = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 8,
 #else
   r8             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 8,
+  r8q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 8,
   r9             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 9,
+  r9q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 9,
   r10            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 10,
+  r10q           = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 10,
   r11            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 11,
+  r11q           = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 11,
   r12            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 12,
+  r12q           = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 12,
   r13            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 13,
+  r13q           = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 13,
   r14            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 14,
+  r14q           = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 14,
   r15            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 15,
+  r15q           = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 15,
   // fake return address register for core spill mask.
   rRET           = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 16,
 #endif
 
-  // xmm registers, single precision view
+  // xmm registers, single precision view.
   fr0  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 0,
   fr1  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 1,
   fr2  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 2,
@@ -160,8 +175,18 @@
   fr5  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 5,
   fr6  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 6,
   fr7  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 7,
+#ifdef TARGET_REX_SUPPORT
+  fr8  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 8,
+  fr9  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 9,
+  fr10 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 10,
+  fr11 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 11,
+  fr12 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 12,
+  fr13 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 13,
+  fr14 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 14,
+  fr15 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 15,
+#endif
 
-  // xmm registers, double precision alises
+  // xmm registers, double precision aliases.
   dr0  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 0,
   dr1  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 1,
   dr2  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 2,
@@ -170,38 +195,83 @@
   dr5  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 5,
   dr6  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 6,
   dr7  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 7,
+#ifdef TARGET_REX_SUPPORT
+  dr8  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 8,
+  dr9  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 9,
+  dr10 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
+  dr11 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 11,
+  dr12 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
+  dr13 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 13,
+  dr14 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
+  dr15 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 15,
+#endif
 
-  // xmm registers, quad precision alises
-  qr0  = RegStorage::k128BitSolo | RegStorage::kFloatingPoint | 0,
-  qr1  = RegStorage::k128BitSolo | RegStorage::kFloatingPoint | 1,
-  qr2  = RegStorage::k128BitSolo | RegStorage::kFloatingPoint | 2,
-  qr3  = RegStorage::k128BitSolo | RegStorage::kFloatingPoint | 3,
-  qr4  = RegStorage::k128BitSolo | RegStorage::kFloatingPoint | 4,
-  qr5  = RegStorage::k128BitSolo | RegStorage::kFloatingPoint | 5,
-  qr6  = RegStorage::k128BitSolo | RegStorage::kFloatingPoint | 6,
-  qr7  = RegStorage::k128BitSolo | RegStorage::kFloatingPoint | 7,
+  // xmm registers, quad precision aliases
+  xr0  = RegStorage::k128BitSolo | 0,
+  xr1  = RegStorage::k128BitSolo | 1,
+  xr2  = RegStorage::k128BitSolo | 2,
+  xr3  = RegStorage::k128BitSolo | 3,
+  xr4  = RegStorage::k128BitSolo | 4,
+  xr5  = RegStorage::k128BitSolo | 5,
+  xr6  = RegStorage::k128BitSolo | 6,
+  xr7  = RegStorage::k128BitSolo | 7,
+#ifdef TARGET_REX_SUPPORT
+  xr8  = RegStorage::k128BitSolo | 8,
+  xr9  = RegStorage::k128BitSolo | 9,
+  xr10 = RegStorage::k128BitSolo | 10,
+  xr11 = RegStorage::k128BitSolo | 11,
+  xr12 = RegStorage::k128BitSolo | 12,
+  xr13 = RegStorage::k128BitSolo | 13,
+  xr14 = RegStorage::k128BitSolo | 14,
+  xr15 = RegStorage::k128BitSolo | 15,
+#endif
 
   // TODO: as needed, add 256, 512 and 1024-bit xmm views.
 };
 
 constexpr RegStorage rs_r0(RegStorage::kValid | r0);
+constexpr RegStorage rs_r0q(RegStorage::kValid | r0q);
 constexpr RegStorage rs_rAX = rs_r0;
 constexpr RegStorage rs_r1(RegStorage::kValid | r1);
+constexpr RegStorage rs_r1q(RegStorage::kValid | r1q);
 constexpr RegStorage rs_rCX = rs_r1;
 constexpr RegStorage rs_r2(RegStorage::kValid | r2);
+constexpr RegStorage rs_r2q(RegStorage::kValid | r2q);
 constexpr RegStorage rs_rDX = rs_r2;
 constexpr RegStorage rs_r3(RegStorage::kValid | r3);
+constexpr RegStorage rs_r3q(RegStorage::kValid | r3q);
 constexpr RegStorage rs_rBX = rs_r3;
 constexpr RegStorage rs_rX86_SP_64(RegStorage::kValid | r4sp_64);
 constexpr RegStorage rs_rX86_SP_32(RegStorage::kValid | r4sp_32);
 extern RegStorage rs_rX86_SP;
 constexpr RegStorage rs_r5(RegStorage::kValid | r5);
+constexpr RegStorage rs_r5q(RegStorage::kValid | r5q);
 constexpr RegStorage rs_rBP = rs_r5;
 constexpr RegStorage rs_r6(RegStorage::kValid | r6);
+constexpr RegStorage rs_r6q(RegStorage::kValid | r6q);
 constexpr RegStorage rs_rSI = rs_r6;
 constexpr RegStorage rs_r7(RegStorage::kValid | r7);
+constexpr RegStorage rs_r7q(RegStorage::kValid | r7q);
 constexpr RegStorage rs_rDI = rs_r7;
 constexpr RegStorage rs_rRET(RegStorage::kValid | rRET);
+#ifdef TARGET_REX_SUPPORT
+constexpr RegStorage rs_r8(RegStorage::kValid | r8);
+constexpr RegStorage rs_r8q(RegStorage::kValid | r8q);
+constexpr RegStorage rs_r9(RegStorage::kValid | r9);
+constexpr RegStorage rs_r9q(RegStorage::kValid | r9q);
+constexpr RegStorage rs_r10(RegStorage::kValid | r10);
+constexpr RegStorage rs_r10q(RegStorage::kValid | r10q);
+constexpr RegStorage rs_r11(RegStorage::kValid | r11);
+constexpr RegStorage rs_r11q(RegStorage::kValid | r11q);
+constexpr RegStorage rs_r12(RegStorage::kValid | r12);
+constexpr RegStorage rs_r12q(RegStorage::kValid | r12q);
+constexpr RegStorage rs_r13(RegStorage::kValid | r13);
+constexpr RegStorage rs_r13q(RegStorage::kValid | r13q);
+constexpr RegStorage rs_r14(RegStorage::kValid | r14);
+constexpr RegStorage rs_r14q(RegStorage::kValid | r14q);
+constexpr RegStorage rs_r15(RegStorage::kValid | r15);
+constexpr RegStorage rs_r15q(RegStorage::kValid | r15q);
+#endif
 
 constexpr RegStorage rs_fr0(RegStorage::kValid | fr0);
 constexpr RegStorage rs_fr1(RegStorage::kValid | fr1);
@@ -211,6 +281,16 @@
 constexpr RegStorage rs_fr5(RegStorage::kValid | fr5);
 constexpr RegStorage rs_fr6(RegStorage::kValid | fr6);
 constexpr RegStorage rs_fr7(RegStorage::kValid | fr7);
+#ifdef TARGET_REX_SUPPORT
+constexpr RegStorage rs_fr8(RegStorage::kValid | fr8);
+constexpr RegStorage rs_fr9(RegStorage::kValid | fr9);
+constexpr RegStorage rs_fr10(RegStorage::kValid | fr10);
+constexpr RegStorage rs_fr11(RegStorage::kValid | fr11);
+constexpr RegStorage rs_fr12(RegStorage::kValid | fr12);
+constexpr RegStorage rs_fr13(RegStorage::kValid | fr13);
+constexpr RegStorage rs_fr14(RegStorage::kValid | fr14);
+constexpr RegStorage rs_fr15(RegStorage::kValid | fr15);
+#endif
 
 constexpr RegStorage rs_dr0(RegStorage::kValid | dr0);
 constexpr RegStorage rs_dr1(RegStorage::kValid | dr1);
@@ -220,15 +300,35 @@
 constexpr RegStorage rs_dr5(RegStorage::kValid | dr5);
 constexpr RegStorage rs_dr6(RegStorage::kValid | dr6);
 constexpr RegStorage rs_dr7(RegStorage::kValid | dr7);
+#ifdef TARGET_REX_SUPPORT
+constexpr RegStorage rs_dr8(RegStorage::kValid | dr8);
+constexpr RegStorage rs_dr9(RegStorage::kValid | dr9);
+constexpr RegStorage rs_dr10(RegStorage::kValid | dr10);
+constexpr RegStorage rs_dr11(RegStorage::kValid | dr11);
+constexpr RegStorage rs_dr12(RegStorage::kValid | dr12);
+constexpr RegStorage rs_dr13(RegStorage::kValid | dr13);
+constexpr RegStorage rs_dr14(RegStorage::kValid | dr14);
+constexpr RegStorage rs_dr15(RegStorage::kValid | dr15);
+#endif
 
-constexpr RegStorage rs_qr0(RegStorage::kValid | qr0);
-constexpr RegStorage rs_qr1(RegStorage::kValid | qr1);
-constexpr RegStorage rs_qr2(RegStorage::kValid | qr2);
-constexpr RegStorage rs_qr3(RegStorage::kValid | qr3);
-constexpr RegStorage rs_qr4(RegStorage::kValid | qr4);
-constexpr RegStorage rs_qr5(RegStorage::kValid | qr5);
-constexpr RegStorage rs_qr6(RegStorage::kValid | qr6);
-constexpr RegStorage rs_qr7(RegStorage::kValid | qr7);
+constexpr RegStorage rs_xr0(RegStorage::kValid | xr0);
+constexpr RegStorage rs_xr1(RegStorage::kValid | xr1);
+constexpr RegStorage rs_xr2(RegStorage::kValid | xr2);
+constexpr RegStorage rs_xr3(RegStorage::kValid | xr3);
+constexpr RegStorage rs_xr4(RegStorage::kValid | xr4);
+constexpr RegStorage rs_xr5(RegStorage::kValid | xr5);
+constexpr RegStorage rs_xr6(RegStorage::kValid | xr6);
+constexpr RegStorage rs_xr7(RegStorage::kValid | xr7);
+#ifdef TARGET_REX_SUPPORT
+constexpr RegStorage rs_xr8(RegStorage::kValid | xr8);
+constexpr RegStorage rs_xr9(RegStorage::kValid | xr9);
+constexpr RegStorage rs_xr10(RegStorage::kValid | xr10);
+constexpr RegStorage rs_xr11(RegStorage::kValid | xr11);
+constexpr RegStorage rs_xr12(RegStorage::kValid | xr12);
+constexpr RegStorage rs_xr13(RegStorage::kValid | xr13);
+constexpr RegStorage rs_xr14(RegStorage::kValid | xr14);
+constexpr RegStorage rs_xr15(RegStorage::kValid | xr15);
+#endif
 
 extern X86NativeRegisterPool rX86_ARG0;
 extern X86NativeRegisterPool rX86_ARG1;
@@ -311,10 +411,14 @@
   opcode ## 16RR, opcode ## 16RM, opcode ## 16RA, opcode ## 16RT, \
   opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, opcode ## 16TI, \
   opcode ## 16RI8, opcode ## 16MI8, opcode ## 16AI8, opcode ## 16TI8, \
-  opcode ## 32MR, opcode ## 64MR, opcode ## 32AR, opcode ## 64AR, opcode ## 32TR,  \
-  opcode ## 32RR, opcode ## 32RM, opcode ## 64RM, opcode ## 32RA, opcode ## 64RA, opcode ## 32RT, opcode ## 64RT, \
-  opcode ## 32RI, opcode ## 64RI, opcode ## 32MI, opcode ## 32AI, opcode ## 32TI, \
-  opcode ## 32RI8, opcode ## 64RI8, opcode ## 32MI8, opcode ## 32AI8, opcode ## 32TI8
+  opcode ## 32MR, opcode ## 32AR, opcode ## 32TR,  \
+  opcode ## 32RR, opcode ## 32RM, opcode ## 32RA, opcode ## 32RT, \
+  opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, opcode ## 32TI, \
+  opcode ## 32RI8, opcode ## 32MI8, opcode ## 32AI8, opcode ## 32TI8, \
+  opcode ## 64MR, opcode ## 64AR, opcode ## 64TR,  \
+  opcode ## 64RR, opcode ## 64RM, opcode ## 64RA, opcode ## 64RT, \
+  opcode ## 64RI, opcode ## 64MI, opcode ## 64AI, opcode ## 64TI, \
+  opcode ## 64RI8, opcode ## 64MI8, opcode ## 64AI8, opcode ## 64TI8
   BinaryOpCode(kX86Add),
   BinaryOpCode(kX86Or),
   BinaryOpCode(kX86Adc),
@@ -327,23 +431,32 @@
   kX86Imul16RRI, kX86Imul16RMI, kX86Imul16RAI,
   kX86Imul32RRI, kX86Imul32RMI, kX86Imul32RAI,
   kX86Imul32RRI8, kX86Imul32RMI8, kX86Imul32RAI8,
+  kX86Imul64RRI, kX86Imul64RMI, kX86Imul64RAI,
+  kX86Imul64RRI8, kX86Imul64RMI8, kX86Imul64RAI8,
   kX86Mov8MR, kX86Mov8AR, kX86Mov8TR,
   kX86Mov8RR, kX86Mov8RM, kX86Mov8RA, kX86Mov8RT,
   kX86Mov8RI, kX86Mov8MI, kX86Mov8AI, kX86Mov8TI,
   kX86Mov16MR, kX86Mov16AR, kX86Mov16TR,
   kX86Mov16RR, kX86Mov16RM, kX86Mov16RA, kX86Mov16RT,
   kX86Mov16RI, kX86Mov16MI, kX86Mov16AI, kX86Mov16TI,
-  kX86Mov32MR, kX86Mov64MR, kX86Mov32AR, kX86Mov64AR, kX86Mov32TR,
-  kX86Mov32RR, kX86Mov32RM, kX86Mov64RM, kX86Mov32RA, kX86Mov64RA, kX86Mov32RT, kX86Mov64RT,
-  kX86Mov32RI, kX86Mov32MI, kX86Mov32AI, kX86Mov32TI, kX86Mov64TI,
+  kX86Mov32MR, kX86Mov32AR, kX86Mov32TR,
+  kX86Mov32RR, kX86Mov32RM, kX86Mov32RA, kX86Mov32RT,
+  kX86Mov32RI, kX86Mov32MI, kX86Mov32AI, kX86Mov32TI,
   kX86Lea32RM,
   kX86Lea32RA,
+  kX86Mov64MR, kX86Mov64AR, kX86Mov64TR,
+  kX86Mov64RR, kX86Mov64RM, kX86Mov64RA, kX86Mov64RT,
+  kX86Mov64RI, kX86Mov64MI, kX86Mov64AI, kX86Mov64TI,
+  kX86Lea64RM,
+  kX86Lea64RA,
   // RRC - Register Register ConditionCode - cond_opcode reg1, reg2
   //             - lir operands - 0: reg1, 1: reg2, 2: CC
   kX86Cmov32RRC,
+  kX86Cmov64RRC,
   // RMC - Register Memory ConditionCode - cond_opcode reg1, [base + disp]
   //             - lir operands - 0: reg1, 1: base, 2: disp 3: CC
   kX86Cmov32RMC,
+  kX86Cmov64RMC,
 
   // RC - Register CL - opcode reg, CL
   //          - lir operands - 0: reg, 1: CL
@@ -357,7 +470,9 @@
   opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, \
   opcode ## 16RC, opcode ## 16MC, opcode ## 16AC, \
   opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, \
-  opcode ## 32RC, opcode ## 32MC, opcode ## 32AC
+  opcode ## 32RC, opcode ## 32MC, opcode ## 32AC, \
+  opcode ## 64RI, opcode ## 64MI, opcode ## 64AI, \
+  opcode ## 64RC, opcode ## 64MC, opcode ## 64AC
   BinaryShiftOpCode(kX86Rol),
   BinaryShiftOpCode(kX86Ror),
   BinaryShiftOpCode(kX86Rcl),
@@ -371,12 +486,18 @@
   kX86Shld32MRI,
   kX86Shrd32RRI,
   kX86Shrd32MRI,
+  kX86Shld64RRI,
+  kX86Shld64MRI,
+  kX86Shrd64RRI,
+  kX86Shrd64MRI,
 #define UnaryOpcode(opcode, reg, mem, array) \
   opcode ## 8 ## reg, opcode ## 8 ## mem, opcode ## 8 ## array, \
   opcode ## 16 ## reg, opcode ## 16 ## mem, opcode ## 16 ## array, \
-  opcode ## 32 ## reg, opcode ## 32 ## mem, opcode ## 32 ## array
+  opcode ## 32 ## reg, opcode ## 32 ## mem, opcode ## 32 ## array, \
+  opcode ## 64 ## reg, opcode ## 64 ## mem, opcode ## 64 ## array
   UnaryOpcode(kX86Test, RI, MI, AI),
   kX86Test32RR,
+  kX86Test64RR,
   UnaryOpcode(kX86Not, R, M, A),
   UnaryOpcode(kX86Neg, R, M, A),
   UnaryOpcode(kX86Mul,  DaR, DaM, DaA),
@@ -418,9 +539,39 @@
   Binary0fOpCode(kX86Divsd),    // double divide
   Binary0fOpCode(kX86Divss),    // float divide
   Binary0fOpCode(kX86Punpckldq),  // Interleave low-order double words
-  kX86PsrlqRI,                  // right shift of floating point registers
-  kX86PsllqRI,                  // left shift of floating point registers
-  kX86SqrtsdRR,                 // sqrt of floating point register
+  Binary0fOpCode(kX86Sqrtsd),   // square root
+  Binary0fOpCode(kX86Pmulld),   // parallel integer multiply 32 bits x 4
+  Binary0fOpCode(kX86Pmullw),   // parallel integer multiply 16 bits x 8
+  Binary0fOpCode(kX86Mulps),    // parallel FP multiply 32 bits x 4
+  Binary0fOpCode(kX86Mulpd),    // parallel FP multiply 64 bits x 2
+  Binary0fOpCode(kX86Paddb),    // parallel integer addition 8 bits x 16
+  Binary0fOpCode(kX86Paddw),    // parallel integer addition 16 bits x 8
+  Binary0fOpCode(kX86Paddd),    // parallel integer addition 32 bits x 4
+  Binary0fOpCode(kX86Addps),    // parallel FP addition 32 bits x 4
+  Binary0fOpCode(kX86Addpd),    // parallel FP addition 64 bits x 2
+  Binary0fOpCode(kX86Psubb),    // parallel integer subtraction 8 bits x 16
+  Binary0fOpCode(kX86Psubw),    // parallel integer subtraction 16 bits x 8
+  Binary0fOpCode(kX86Psubd),    // parallel integer subtraction 32 bits x 4
+  Binary0fOpCode(kX86Subps),    // parallel FP subtraction 32 bits x 4
+  Binary0fOpCode(kX86Subpd),    // parallel FP subtraction 64 bits x 2
+  Binary0fOpCode(kX86Pand),     // parallel AND 128 bits x 1
+  Binary0fOpCode(kX86Por),      // parallel OR 128 bits x 1
+  Binary0fOpCode(kX86Pxor),     // parallel XOR 128 bits x 1
+  Binary0fOpCode(kX86Phaddw),   // parallel horizontal addition 16 bits x 8
+  Binary0fOpCode(kX86Phaddd),   // parallel horizontal addition 32 bits x 4
+  kX86PextrbRRI,                // Extract 8 bits from XMM into GPR
+  kX86PextrwRRI,                // Extract 16 bits from XMM into GPR
+  kX86PextrdRRI,                // Extract 32 bits from XMM into GPR
+  kX86PshuflwRRI,               // Shuffle 16 bits in lower 64 bits of XMM.
+  kX86PshufdRRI,                // Shuffle 32 bits in XMM.
+  kX86PsrawRI,                  // signed right shift of floating point registers 16 bits x 8
+  kX86PsradRI,                  // signed right shift of floating point registers 32 bits x 4
+  kX86PsrlwRI,                  // logical right shift of floating point registers 16 bits x 8
+  kX86PsrldRI,                  // logical right shift of floating point registers 32 bits x 4
+  kX86PsrlqRI,                  // logical right shift of floating point registers 64 bits x 2
+  kX86PsllwRI,                  // left shift of floating point registers 16 bits x 8
+  kX86PslldRI,                  // left shift of floating point registers 32 bits x 4
+  kX86PsllqRI,                  // left shift of floating point registers 64 bits x 2
   kX86Fild32M,                  // push 32-bit integer on x87 stack
   kX86Fild64M,                  // push 64-bit integer on x87 stack
   kX86Fstp32M,                  // pop top x87 fp stack and do 32-bit store
@@ -474,20 +625,20 @@
 
 /* Instruction assembly field_loc kind */
 enum X86EncodingKind {
-  kData,                                   // Special case for raw data.
-  kNop,                                    // Special case for variable length nop.
-  kNullary,                                // Opcode that takes no arguments.
-  kPrefix2Nullary,                         // Opcode that takes no arguments, but 2 prefixes.
-  kRegOpcode,                              // Shorter form of R instruction kind (opcode+rd)
-  kReg, kReg64, kMem, kArray,              // R, M and A instruction kinds.
-  kMemReg, kMemReg64, kArrayReg, kArrayReg64, kThreadReg,          // MR, AR and TR instruction kinds.
-  kRegReg, kRegMem, kRegArray, kRegThread, kReg64Thread,  // RR, RM, RA and RT instruction kinds.
-  kRegRegStore,                            // RR following the store modrm reg-reg encoding rather than the load.
-  kRegImm, kReg64Imm, kMemImm, kArrayImm, kThreadImm,  // RI, MI, AI and TI instruction kinds.
-  kRegRegImm, kRegMemImm, kRegArrayImm,    // RRI, RMI and RAI instruction kinds.
-  kMovRegImm,                              // Shorter form move RI.
-  kRegRegImmRev,                           // RRI with first reg in r/m
-  kMemRegImm,                              // MRI instruction kinds.
+  kData,                                    // Special case for raw data.
+  kNop,                                     // Special case for variable length nop.
+  kNullary,                                 // Opcode that takes no arguments.
+  kPrefix2Nullary,                          // Opcode that takes no arguments, but 2 prefixes.
+  kRegOpcode,                               // Shorter form of R instruction kind (opcode+rd)
+  kReg, kMem, kArray,                       // R, M and A instruction kinds.
+  kMemReg, kArrayReg, kThreadReg,           // MR, AR and TR instruction kinds.
+  kRegReg, kRegMem, kRegArray, kRegThread,  // RR, RM, RA and RT instruction kinds.
+  kRegRegStore,                             // RR following the store modrm reg-reg encoding rather than the load.
+  kRegImm, kMemImm, kArrayImm, kThreadImm,  // RI, MI, AI and TI instruction kinds.
+  kRegRegImm, kRegMemImm, kRegArrayImm,     // RRI, RMI and RAI instruction kinds.
+  kMovRegImm,                               // Shorter form move RI.
+  kRegRegImmRev,                            // RRI with first reg in r/m
+  kMemRegImm,                               // MRI instruction kinds.
   kShiftRegImm, kShiftMemImm, kShiftArrayImm,  // Shift opcode with immediate.
   kShiftRegCl, kShiftMemCl, kShiftArrayCl,     // Shift opcode with register CL.
   kRegRegReg, kRegRegMem, kRegRegArray,    // RRR, RRM, RRA instruction kinds.
@@ -537,6 +688,15 @@
 // 64 Bit Operand Size
 #define REX_W 0x48
 // Extension of the ModR/M reg field
+#define REX_R 0x44
+// Extension of the SIB index field
+#define REX_X 0x42
+// Extension of the ModR/M r/m field, SIB base field, or Opcode reg field
+#define REX_B 0x41
+// Mask extracting the least 3 bits of r0..r15
+#define kRegNumMask32 0x07
+// Value indicating that base or reg is not used
+#define NO_REG 0
 
 #define IS_SIMM8(v) ((-128 <= (v)) && ((v) <= 127))
 #define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32767))
diff --git a/compiler/dex/reg_storage.h b/compiler/dex/reg_storage.h
index 3387c50..7e50c31 100644
--- a/compiler/dex/reg_storage.h
+++ b/compiler/dex/reg_storage.h
@@ -225,24 +225,6 @@
     return reg_ & kRegNumMask;
   }
 
-  // Aliased double to low single.
-  RegStorage DoubleToLowSingle() const {
-    DCHECK(IsDouble());
-    return FloatSolo32(GetRegNum() << 1);
-  }
-
-  // Aliased double to high single.
-  RegStorage DoubleToHighSingle() const {
-    DCHECK(IsDouble());
-    return FloatSolo32((GetRegNum() << 1) + 1);
-  }
-
-  // Single to aliased double.
-  RegStorage SingleToDouble() const {
-    DCHECK(IsSingle());
-    return FloatSolo64(GetRegNum() >> 1);
-  }
-
   // Is register number in 0..7?
   bool Low8() const {
     return GetRegNum() < 8;
@@ -280,6 +262,11 @@
     return RegStorage(k32BitSolo, (reg_num & kRegNumMask) | kFloatingPoint);
   }
 
+  // Create a 128-bit solo.
+  static RegStorage Solo128(int reg_num) {
+    return RegStorage(k128BitSolo, reg_num & kRegTypeMask);
+  }
+
   // Create a 64-bit solo.
   static RegStorage Solo64(int reg_num) {
     return RegStorage(k64BitSolo, reg_num & kRegTypeMask);
@@ -312,7 +299,7 @@
       case k256BitSolo: return 32;
       case k512BitSolo: return 64;
       case k1024BitSolo: return 128;
-      default: LOG(FATAL) << "Unexpected shap";
+      default: LOG(FATAL) << "Unexpected shape";
     }
     return 0;
   }
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 5aa093a..bd6bc225 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -14,7 +14,6 @@
  * limitations under the License.
  */
 
-#include "bit_vector_block_iterator.h"
 #include "compiler_internals.h"
 #include "dataflow_iterator-inl.h"
 
@@ -127,12 +126,7 @@
     return false;
   }
 
-  ArenaBitVector::Iterator iterator(bb->data_flow_info->def_v);
-  while (true) {
-    int idx = iterator.Next();
-    if (idx == -1) {
-      break;
-    }
+  for (uint32_t idx : bb->data_flow_info->def_v->Indexes()) {
     /* Block bb defines register idx */
     def_block_matrix_[idx]->SetBit(bb->id);
   }
@@ -173,8 +167,8 @@
 }
 
 void MIRGraph::ComputeDomPostOrderTraversal(BasicBlock* bb) {
-  if (dom_post_order_traversal_ == NULL) {
-    // First time - create the array.
+  if (dom_post_order_traversal_ == NULL || max_num_reachable_blocks_ < num_reachable_blocks_) {
+    // First time or too small - create the array.
     dom_post_order_traversal_ =
         new (arena_) GrowableArray<BasicBlockId>(arena_, num_reachable_blocks_,
                                         kGrowableArrayDomPostOrderTraversal);
@@ -182,22 +176,22 @@
     dom_post_order_traversal_->Reset();
   }
   ClearAllVisitedFlags();
-  std::vector<std::pair<BasicBlock*, ArenaBitVector::Iterator*>> work_stack;
+  std::vector<std::pair<BasicBlock*, ArenaBitVector::IndexIterator>> work_stack;
   bb->visited = true;
-  work_stack.push_back(std::make_pair(bb, bb->i_dominated->GetIterator()));
+  work_stack.push_back(std::make_pair(bb, bb->i_dominated->Indexes().begin()));
   while (!work_stack.empty()) {
-    const std::pair<BasicBlock*, ArenaBitVector::Iterator*>& curr = work_stack.back();
-    BasicBlock* curr_bb = curr.first;
-    ArenaBitVector::Iterator* curr_idom_iter = curr.second;
-    int bb_idx = curr_idom_iter->Next();
-    while ((bb_idx != -1) && (NeedsVisit(GetBasicBlock(bb_idx)) == NULL)) {
-      bb_idx = curr_idom_iter->Next();
+    std::pair<BasicBlock*, ArenaBitVector::IndexIterator>* curr = &work_stack.back();
+    BasicBlock* curr_bb = curr->first;
+    ArenaBitVector::IndexIterator* curr_idom_iter = &curr->second;
+    while (!curr_idom_iter->Done() && (NeedsVisit(GetBasicBlock(**curr_idom_iter)) == nullptr)) {
+      ++*curr_idom_iter;
     }
-    if (bb_idx != -1) {
-      BasicBlock* new_bb = GetBasicBlock(bb_idx);
+    // NOTE: work_stack.push_back()/pop_back() invalidate curr and curr_idom_iter.
+    if (!curr_idom_iter->Done()) {
+      BasicBlock* new_bb = GetBasicBlock(**curr_idom_iter);
+      ++*curr_idom_iter;
       new_bb->visited = true;
-      work_stack.push_back(
-          std::make_pair(new_bb, new_bb->i_dominated->GetIterator()));
+      work_stack.push_back(std::make_pair(new_bb, new_bb->i_dominated->Indexes().begin()));
     } else {
       // no successor/next
       if (curr_bb->id != NullBasicBlockId) {
@@ -249,11 +243,10 @@
   }
 
   /* Calculate DF_up */
-  BitVectorBlockIterator it(bb->i_dominated, cu_);
-  for (BasicBlock *dominated_bb = it.Next(); dominated_bb != nullptr; dominated_bb = it.Next()) {
-    BitVectorBlockIterator inner_it(dominated_bb->dom_frontier, cu_);
-    for (BasicBlock *df_up_block = inner_it.Next(); df_up_block != nullptr;
-         df_up_block = inner_it.Next()) {
+  for (uint32_t dominated_idx : bb->i_dominated->Indexes()) {
+    BasicBlock* dominated_bb = GetBasicBlock(dominated_idx);
+    for (uint32_t df_up_block_idx : dominated_bb->dom_frontier->Indexes()) {
+      BasicBlock* df_up_block = GetBasicBlock(df_up_block_idx);
       CheckForDominanceFrontier(bb, df_up_block);
     }
   }
@@ -380,8 +373,8 @@
     InitializeDominationInfo(bb);
   }
 
-  /* Initalize & Clear i_dom_list */
-  if (i_dom_list_ == NULL) {
+  /* Initialize & Clear i_dom_list */
+  if (max_num_reachable_blocks_ < num_reachable_blocks_) {
     i_dom_list_ = static_cast<int*>(arena_->Alloc(sizeof(int) * num_reachable_blocks,
                                                   kArenaAllocDFInfo));
   }
@@ -449,7 +442,8 @@
  * insert a phi node if the variable is live-in to the block.
  */
 bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) {
-  ArenaBitVector* temp_dalvik_register_v = temp_dalvik_register_v_;
+  DCHECK_EQ(temp_bit_vector_size_, cu_->num_dalvik_registers);
+  ArenaBitVector* temp_dalvik_register_v = temp_bit_vector_;
 
   if (bb->data_flow_info == NULL) {
     return false;
@@ -487,15 +481,10 @@
 /* Insert phi nodes to for each variable to the dominance frontiers */
 void MIRGraph::InsertPhiNodes() {
   int dalvik_reg;
-  ArenaBitVector* phi_blocks =
-      new (arena_) ArenaBitVector(arena_, GetNumBlocks(), false, kBitMapPhi);
-  ArenaBitVector* tmp_blocks =
-      new (arena_) ArenaBitVector(arena_, GetNumBlocks(), false, kBitMapTmpBlocks);
-  ArenaBitVector* input_blocks =
-      new (arena_) ArenaBitVector(arena_, GetNumBlocks(), false, kBitMapInputBlocks);
-
-  temp_dalvik_register_v_ =
-      new (arena_) ArenaBitVector(arena_, cu_->num_dalvik_registers, false, kBitMapRegisterV);
+  ArenaBitVector* phi_blocks = new (temp_scoped_alloc_.get()) ArenaBitVector(
+      temp_scoped_alloc_.get(), GetNumBlocks(), false, kBitMapPhi);
+  ArenaBitVector* input_blocks = new (temp_scoped_alloc_.get()) ArenaBitVector(
+      temp_scoped_alloc_.get(), GetNumBlocks(), false, kBitMapInputBlocks);
 
   RepeatingPostOrderDfsIterator iter(this);
   bool change = false;
@@ -505,60 +494,29 @@
 
   /* Iterate through each Dalvik register */
   for (dalvik_reg = cu_->num_dalvik_registers - 1; dalvik_reg >= 0; dalvik_reg--) {
-    bool change;
-
     input_blocks->Copy(def_block_matrix_[dalvik_reg]);
     phi_blocks->ClearAllBits();
-
-    /* Calculate the phi blocks for each Dalvik register */
     do {
-      change = false;
-      tmp_blocks->ClearAllBits();
-      ArenaBitVector::Iterator iterator(input_blocks);
-
-      while (true) {
-        int idx = iterator.Next();
-        if (idx == -1) {
-          break;
-        }
+      // TUNING: When we repeat this, we could skip indexes from the previous pass.
+      for (uint32_t idx : input_blocks->Indexes()) {
         BasicBlock* def_bb = GetBasicBlock(idx);
-
-        /* Merge the dominance frontier to tmp_blocks */
-        // TUNING: hot call to Union().
-        if (def_bb->dom_frontier != NULL) {
-          tmp_blocks->Union(def_bb->dom_frontier);
+        if (def_bb->dom_frontier != nullptr) {
+          phi_blocks->Union(def_bb->dom_frontier);
         }
       }
-      if (!phi_blocks->Equal(tmp_blocks)) {
-        change = true;
-        phi_blocks->Copy(tmp_blocks);
-
-        /*
-         * Iterate through the original blocks plus the new ones in
-         * the dominance frontier.
-         */
-        input_blocks->Copy(phi_blocks);
-        input_blocks->Union(def_block_matrix_[dalvik_reg]);
-      }
-    } while (change);
+    } while (input_blocks->Union(phi_blocks));
 
     /*
      * Insert a phi node for dalvik_reg in the phi_blocks if the Dalvik
      * register is in the live-in set.
      */
-    ArenaBitVector::Iterator iterator(phi_blocks);
-    while (true) {
-      int idx = iterator.Next();
-      if (idx == -1) {
-        break;
-      }
+    for (uint32_t idx : phi_blocks->Indexes()) {
       BasicBlock* phi_bb = GetBasicBlock(idx);
       /* Variable will be clobbered before being used - no need for phi */
       if (!phi_bb->data_flow_info->live_in_v->IsBitSet(dalvik_reg)) {
         continue;
       }
-      MIR *phi =
-          static_cast<MIR*>(arena_->Alloc(sizeof(MIR), kArenaAllocDFInfo));
+      MIR *phi = NewMIR();
       phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
       phi->dalvikInsn.vA = dalvik_reg;
       phi->offset = phi_bb->start_offset;
@@ -584,12 +542,8 @@
     /* Iterate through the predecessors */
     GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
     size_t num_uses = bb->predecessors->Size();
-    mir->ssa_rep->num_uses = num_uses;
-    int* uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
-                                                kArenaAllocDFInfo));
-    mir->ssa_rep->uses = uses;
-    mir->ssa_rep->fp_use =
-        static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses, kArenaAllocDFInfo));
+    AllocateSSAUseData(mir, num_uses);
+    int* uses = mir->ssa_rep->uses;
     BasicBlockId* incoming =
         static_cast<BasicBlockId*>(arena_->Alloc(sizeof(BasicBlockId) * num_uses,
                                                  kArenaAllocDFInfo));
@@ -598,9 +552,9 @@
     while (true) {
       BasicBlock* pred_bb = GetBasicBlock(iter.Next());
       if (!pred_bb) {
-        break;
+       break;
       }
-      int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map[v_reg];
+      int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
       uses[idx] = ssa_reg;
       incoming[idx] = pred_bb->id;
       idx++;
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index 95b3d86..c4af9cb 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -233,8 +233,7 @@
 
     // Special-case handling for format 35c/3rc invokes
     Instruction::Code opcode = mir->dalvikInsn.opcode;
-    int flags = (static_cast<int>(opcode) >= kNumPackedOpcodes)
-        ? 0 : Instruction::FlagsOf(mir->dalvikInsn.opcode);
+    int flags = IsPseudoMirOp(opcode) ? 0 : Instruction::FlagsOf(mir->dalvikInsn.opcode);
     if ((flags & Instruction::kInvoke) &&
         (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC))) {
       DCHECK_EQ(next, 0);
@@ -317,8 +316,7 @@
        * The Phi set will include all low words or all high
        * words, so we have to treat them specially.
        */
-      bool is_phi = (static_cast<int>(mir->dalvikInsn.opcode) ==
-                    kMirOpPhi);
+      bool is_phi = (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi);
       RegLocation rl_temp = reg_location_[defs[0]];
       bool defined_fp = rl_temp.defined && rl_temp.fp;
       bool defined_core = rl_temp.defined && rl_temp.core;
@@ -425,6 +423,9 @@
     loc[ct->s_reg_low].defined = true;
   }
 
+  /* Treat Method* as a normal reference */
+  loc[GetMethodSReg()].ref = true;
+
   reg_location_ = loc;
 
   int num_regs = cu_->num_dalvik_registers;
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 08fd386..45abfcc 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -42,8 +42,8 @@
 }
 
 inline mirror::Class* CompilerDriver::ResolveCompilingMethodsClass(
-    ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
-    const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit) {
+    ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
+    Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit) {
   DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
   DCHECK_EQ(class_loader.Get(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
   const DexFile::MethodId& referrer_method_id =
@@ -59,8 +59,8 @@
 }
 
 inline mirror::ArtField* CompilerDriver::ResolveField(
-    ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
-    const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+    ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
+    Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
     uint32_t field_idx, bool is_static) {
   DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
   DCHECK_EQ(class_loader.Get(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
@@ -165,13 +165,14 @@
 }
 
 inline mirror::ArtMethod* CompilerDriver::ResolveMethod(
-    ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
-    const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+    ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
+    Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
     uint32_t method_idx, InvokeType invoke_type) {
-  DCHECK(dex_cache->GetDexFile() == mUnit->GetDexFile());
-  DCHECK(class_loader.Get() == soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+  DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
+  DCHECK_EQ(class_loader.Get(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
   mirror::ArtMethod* resolved_method = mUnit->GetClassLinker()->ResolveMethod(
-      *mUnit->GetDexFile(), method_idx, dex_cache, class_loader, nullptr, invoke_type);
+      *mUnit->GetDexFile(), method_idx, dex_cache, class_loader, NullHandle<mirror::ArtMethod>(),
+      invoke_type);
   DCHECK_EQ(resolved_method == nullptr, soa.Self()->IsExceptionPending());
   if (UNLIKELY(resolved_method == nullptr)) {
     // Clean up any exception left by type resolution.
@@ -206,8 +207,8 @@
 }
 
 inline int CompilerDriver::IsFastInvoke(
-    ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
-    const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+    ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
+    Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
     mirror::Class* referrer_class, mirror::ArtMethod* resolved_method, InvokeType* invoke_type,
     MethodReference* target_method, const MethodReference* devirt_target,
     uintptr_t* direct_code, uintptr_t* direct_method) {
@@ -256,15 +257,17 @@
     ClassLinker* class_linker = mUnit->GetClassLinker();
     if (LIKELY(devirt_target->dex_file == mUnit->GetDexFile())) {
       called_method = class_linker->ResolveMethod(*devirt_target->dex_file,
-                                                  devirt_target->dex_method_index,
-                                                  dex_cache, class_loader, NULL, kVirtual);
+                                                  devirt_target->dex_method_index, dex_cache,
+                                                  class_loader, NullHandle<mirror::ArtMethod>(),
+                                                  kVirtual);
     } else {
       StackHandleScope<1> hs(soa.Self());
       Handle<mirror::DexCache> target_dex_cache(
           hs.NewHandle(class_linker->FindDexCache(*devirt_target->dex_file)));
       called_method = class_linker->ResolveMethod(*devirt_target->dex_file,
                                                   devirt_target->dex_method_index,
-                                                  target_dex_cache, class_loader, NULL, kVirtual);
+                                                  target_dex_cache, class_loader,
+                                                  NullHandle<mirror::ArtMethod>(), kVirtual);
     }
     CHECK(called_method != NULL);
     CHECK(!called_method->IsAbstract());
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index f8c75d1..8d4e283 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -511,7 +511,7 @@
 }
 
 static DexToDexCompilationLevel GetDexToDexCompilationlevel(
-    Thread* self, Handle<mirror::ClassLoader>& class_loader, const DexFile& dex_file,
+    Thread* self, Handle<mirror::ClassLoader> class_loader, const DexFile& dex_file,
     const DexFile::ClassDef& class_def) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   const char* descriptor = dex_file.GetClassDescriptor(class_def);
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -731,11 +731,11 @@
     for (const std::pair<uint16_t, const DexFile*>& exception_type : unresolved_exception_types) {
       uint16_t exception_type_idx = exception_type.first;
       const DexFile* dex_file = exception_type.second;
-      StackHandleScope<3> hs(self);
+      StackHandleScope<2> hs(self);
       Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(*dex_file)));
-      auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
       Handle<mirror::Class> klass(hs.NewHandle(
-          class_linker->ResolveType(*dex_file, exception_type_idx, dex_cache, class_loader)));
+          class_linker->ResolveType(*dex_file, exception_type_idx, dex_cache,
+                                    NullHandle<mirror::ClassLoader>())));
       if (klass.Get() == NULL) {
         const DexFile::TypeId& type_id = dex_file->GetTypeId(exception_type_idx);
         const char* descriptor = dex_file->GetTypeDescriptor(type_id);
@@ -1152,28 +1152,22 @@
       *type = sharp_type;
     }
   } else {
-    if (compiling_boot) {
+    bool method_in_image = compiling_boot ||
+        Runtime::Current()->GetHeap()->FindSpaceFromObject(method, false)->IsImageSpace();
+    if (method_in_image) {
+      CHECK(!method->IsAbstract());
       *type = sharp_type;
-      *direct_method = -1;
-      *direct_code = -1;
+      *direct_method = compiling_boot ? -1 : reinterpret_cast<uintptr_t>(method);
+      *direct_code = compiling_boot ? -1 : compiler_->GetEntryPointOf(method);
+      target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
+      target_method->dex_method_index = method->GetDexMethodIndex();
+    } else if (!must_use_direct_pointers) {
+      // Set the code and rely on the dex cache for the method.
+      *type = sharp_type;
+      *direct_code = compiler_->GetEntryPointOf(method);
     } else {
-      bool method_in_image =
-          Runtime::Current()->GetHeap()->FindSpaceFromObject(method, false)->IsImageSpace();
-      if (method_in_image) {
-        CHECK(!method->IsAbstract());
-        *type = sharp_type;
-        *direct_method = reinterpret_cast<uintptr_t>(method);
-        *direct_code = compiler_->GetEntryPointOf(method);
-        target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
-        target_method->dex_method_index = method->GetDexMethodIndex();
-      } else if (!must_use_direct_pointers) {
-        // Set the code and rely on the dex cache for the method.
-        *type = sharp_type;
-        *direct_code = compiler_->GetEntryPointOf(method);
-      } else {
-        // Direct pointers were required but none were available.
-        VLOG(compiler) << "Dex cache devirtualization failed for: " << PrettyMethod(method);
-      }
+      // Direct pointers were required but none were available.
+      VLOG(compiler) << "Dex cache devirtualization failed for: " << PrettyMethod(method);
     }
   }
 }
@@ -1547,7 +1541,8 @@
       if (resolve_fields_and_methods) {
         while (it.HasNextDirectMethod()) {
           mirror::ArtMethod* method = class_linker->ResolveMethod(dex_file, it.GetMemberIndex(),
-                                                                  dex_cache, class_loader, NULL,
+                                                                  dex_cache, class_loader,
+                                                                  NullHandle<mirror::ArtMethod>(),
                                                                   it.GetMethodInvokeType(class_def));
           if (method == NULL) {
             CHECK(soa.Self()->IsExceptionPending());
@@ -1557,7 +1552,8 @@
         }
         while (it.HasNextVirtualMethod()) {
           mirror::ArtMethod* method = class_linker->ResolveMethod(dex_file, it.GetMemberIndex(),
-                                                                  dex_cache, class_loader, NULL,
+                                                                  dex_cache, class_loader,
+                                                                  NullHandle<mirror::ArtMethod>(),
                                                                   it.GetMethodInvokeType(class_def));
           if (method == NULL) {
             CHECK(soa.Self()->IsExceptionPending());
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index abca659..14ccb50 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -221,15 +221,15 @@
 
   // Resolve compiling method's class. Returns nullptr on failure.
   mirror::Class* ResolveCompilingMethodsClass(
-      ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
-      const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit)
+      ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
+      Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Resolve a field. Returns nullptr on failure, including incompatible class change.
   // NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static.
   mirror::ArtField* ResolveField(
-      ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
-      const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+      ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
+      Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
       uint32_t field_idx, bool is_static)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -258,8 +258,8 @@
 
   // Resolve a method. Returns nullptr on failure, including incompatible class change.
   mirror::ArtMethod* ResolveMethod(
-      ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
-      const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+      ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
+      Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
       uint32_t method_idx, InvokeType invoke_type)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -277,8 +277,8 @@
   // Can we fast-path an INVOKE? If no, returns 0. If yes, returns a non-zero opaque flags value
   // for ProcessedInvoke() and computes the necessary lowering info.
   int IsFastInvoke(
-      ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
-      const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+      ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
+      Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
       mirror::Class* referrer_class, mirror::ArtMethod* resolved_method, InvokeType* invoke_type,
       MethodReference* target_method, const MethodReference* devirt_target,
       uintptr_t* direct_code, uintptr_t* direct_method)
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 4efd27d..ca956aa 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -152,10 +152,9 @@
   jobject class_loader;
   {
     ScopedObjectAccess soa(Thread::Current());
-    StackHandleScope<1> hs(soa.Self());
-    auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
-    CompileVirtualMethod(null_loader, "java.lang.Class", "isFinalizable", "()Z");
-    CompileDirectMethod(null_loader, "java.lang.Object", "<init>", "()V");
+    CompileVirtualMethod(NullHandle<mirror::ClassLoader>(), "java.lang.Class", "isFinalizable",
+                         "()Z");
+    CompileDirectMethod(NullHandle<mirror::ClassLoader>(), "java.lang.Object", "<init>", "()V");
     class_loader = LoadDex("AbstractMethod");
   }
   ASSERT_TRUE(class_loader != NULL);
@@ -174,7 +173,10 @@
   env_->ExceptionClear();
   jclass jlame = env_->FindClass("java/lang/AbstractMethodError");
   EXPECT_TRUE(env_->IsInstanceOf(exception, jlame));
-  Thread::Current()->ClearException();
+  {
+    ScopedObjectAccess soa(Thread::Current());
+    Thread::Current()->ClearException();
+  }
 }
 
 // TODO: need check-cast test (when stub complete & we can throw/catch
diff --git a/compiler/elf_writer_mclinker.cc b/compiler/elf_writer_mclinker.cc
index 0e27210..3dba426 100644
--- a/compiler/elf_writer_mclinker.cc
+++ b/compiler/elf_writer_mclinker.cc
@@ -361,10 +361,11 @@
       ClassLinker* linker = Runtime::Current()->GetClassLinker();
       // Unchecked as we hold mutator_lock_ on entry.
       ScopedObjectAccessUnchecked soa(Thread::Current());
-      StackHandleScope<2> hs(soa.Self());
+      StackHandleScope<1> hs(soa.Self());
       Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(dex_file)));
-      auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
-      method = linker->ResolveMethod(dex_file, method_idx, dex_cache, class_loader, NULL, invoke_type);
+      method = linker->ResolveMethod(dex_file, method_idx, dex_cache,
+                                     NullHandle<mirror::ClassLoader>(),
+                                     NullHandle<mirror::ArtMethod>(), invoke_type);
       CHECK(method != NULL);
     }
     const CompiledMethod* compiled_method =
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index e88ed42..09f2eae 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -739,8 +739,8 @@
                 << " for " << elf_file_->GetPath();
     return false;
   }
-  BufferedOutputStream output_stream(new FileOutputStream(elf_file_));
-  if (!oat_writer->Write(&output_stream)) {
+  std::unique_ptr<BufferedOutputStream> output_stream(new BufferedOutputStream(new FileOutputStream(elf_file_)));
+  if (!oat_writer->Write(output_stream.get())) {
     PLOG(ERROR) << "Failed to write .rodata and .text for " << elf_file_->GetPath();
     return false;
   }
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 70144c8..e37f943 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -695,15 +695,14 @@
 static ArtMethod* GetTargetMethod(const CompilerDriver::CallPatchInformation* patch)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  StackHandleScope<2> hs(Thread::Current());
+  StackHandleScope<1> hs(Thread::Current());
   Handle<mirror::DexCache> dex_cache(
       hs.NewHandle(class_linker->FindDexCache(*patch->GetTargetDexFile())));
-  auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
   ArtMethod* method = class_linker->ResolveMethod(*patch->GetTargetDexFile(),
                                                   patch->GetTargetMethodIdx(),
                                                   dex_cache,
-                                                  class_loader,
-                                                  NULL,
+                                                  NullHandle<mirror::ClassLoader>(),
+                                                  NullHandle<mirror::ArtMethod>(),
                                                   patch->GetTargetInvokeType());
   CHECK(method != NULL)
     << patch->GetTargetDexFile()->GetLocation() << " " << patch->GetTargetMethodIdx();
@@ -721,11 +720,8 @@
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   StackHandleScope<2> hs(Thread::Current());
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(patch->GetDexFile())));
-  auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
-  Class* klass = class_linker->ResolveType(patch->GetDexFile(),
-                                           patch->GetTargetTypeIdx(),
-                                           dex_cache,
-                                           class_loader);
+  Class* klass = class_linker->ResolveType(patch->GetDexFile(), patch->GetTargetTypeIdx(),
+                                           dex_cache, NullHandle<mirror::ClassLoader>());
   CHECK(klass != NULL)
     << patch->GetDexFile().GetLocation() << " " << patch->GetTargetTypeIdx();
   CHECK(dex_cache->GetResolvedTypes()->Get(patch->GetTargetTypeIdx()) == klass)
@@ -746,30 +742,42 @@
     const CompilerDriver::CallPatchInformation* patch = code_to_patch[i];
     ArtMethod* target = GetTargetMethod(patch);
     uintptr_t quick_code = reinterpret_cast<uintptr_t>(class_linker->GetQuickOatCodeFor(target));
+    DCHECK_NE(quick_code, 0U) << PrettyMethod(target);
     uintptr_t code_base = reinterpret_cast<uintptr_t>(&oat_file_->GetOatHeader());
     uintptr_t code_offset = quick_code - code_base;
+    bool is_quick_offset = false;
+    if (quick_code == reinterpret_cast<uintptr_t>(GetQuickToInterpreterBridge())) {
+      is_quick_offset = true;
+      code_offset = quick_to_interpreter_bridge_offset_;
+    } else if (quick_code ==
+        reinterpret_cast<uintptr_t>(class_linker->GetQuickGenericJniTrampoline())) {
+      CHECK(target->IsNative());
+      is_quick_offset = true;
+      code_offset = quick_generic_jni_trampoline_offset_;
+    }
+    uintptr_t value;
     if (patch->IsRelative()) {
       // value to patch is relative to the location being patched
       const void* quick_oat_code =
         class_linker->GetQuickOatCodeFor(patch->GetDexFile(),
                                          patch->GetReferrerClassDefIdx(),
                                          patch->GetReferrerMethodIdx());
+      if (is_quick_offset) {
+        // If its a quick offset it means that we are doing a relative patch from the class linker
+        // oat_file to the image writer oat_file so we need to adjust the quick oat code to be the
+        // one in the image writer oat_file.
+        quick_code = PointerToLowMemUInt32(GetOatAddress(code_offset));
+        quick_oat_code =
+            reinterpret_cast<const void*>(reinterpret_cast<uintptr_t>(quick_oat_code) +
+                reinterpret_cast<uintptr_t>(oat_data_begin_) - code_base);
+      }
       uintptr_t base = reinterpret_cast<uintptr_t>(quick_oat_code);
       uintptr_t patch_location = base + patch->GetLiteralOffset();
-      uintptr_t value = quick_code - patch_location + patch->RelativeOffset();
-      SetPatchLocation(patch, value);
+      value = quick_code - patch_location + patch->RelativeOffset();
     } else {
-      if (quick_code == reinterpret_cast<uintptr_t>(GetQuickToInterpreterBridge()) ||
-          quick_code == reinterpret_cast<uintptr_t>(class_linker->GetQuickGenericJniTrampoline())) {
-        if (target->IsNative()) {
-          // generic JNI, not interpreter bridge from GetQuickOatCodeFor().
-          code_offset = quick_generic_jni_trampoline_offset_;
-        } else {
-          code_offset = quick_to_interpreter_bridge_offset_;
-        }
-      }
-      SetPatchLocation(patch, PointerToLowMemUInt32(GetOatAddress(code_offset)));
+      value = PointerToLowMemUInt32(GetOatAddress(code_offset));
     }
+    SetPatchLocation(patch, value);
   }
 
   const CallPatches& methods_to_patch = compiler_driver_.GetMethodsToPatch();
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 9927fe1..8f4eddb 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -60,7 +60,7 @@
     } else {
       method = c->FindVirtualMethod(method_name, method_sig);
     }
-    ASSERT_TRUE(method != NULL) << method_name << " " << method_sig;
+    ASSERT_TRUE(method != nullptr) << method_name << " " << method_sig;
     if (method->GetEntryPointFromQuickCompiledCode() == nullptr) {
       ASSERT_TRUE(method->GetEntryPointFromPortableCompiledCode() == nullptr);
       CompileMethod(method);
@@ -88,16 +88,16 @@
     // JNI operations after runtime start.
     env_ = Thread::Current()->GetJniEnv();
     jklass_ = env_->FindClass("MyClassNatives");
-    ASSERT_TRUE(jklass_ != NULL) << method_name << " " << method_sig;
+    ASSERT_TRUE(jklass_ != nullptr) << method_name << " " << method_sig;
 
     if (direct) {
       jmethod_ = env_->GetStaticMethodID(jklass_, method_name, method_sig);
     } else {
       jmethod_ = env_->GetMethodID(jklass_, method_name, method_sig);
     }
-    ASSERT_TRUE(jmethod_ != NULL) << method_name << " " << method_sig;
+    ASSERT_TRUE(jmethod_ != nullptr) << method_name << " " << method_sig;
 
-    if (native_fnptr != NULL) {
+    if (native_fnptr != nullptr) {
       JNINativeMethod methods[] = { { method_name, method_sig, native_fnptr } };
       ASSERT_EQ(JNI_OK, env_->RegisterNatives(jklass_, methods, 1))
               << method_name << " " << method_sig;
@@ -107,7 +107,7 @@
 
     jmethodID constructor = env_->GetMethodID(jklass_, "<init>", "()V");
     jobj_ = env_->NewObject(jklass_, constructor);
-    ASSERT_TRUE(jobj_ != NULL) << method_name << " " << method_sig;
+    ASSERT_TRUE(jobj_ != nullptr) << method_name << " " << method_sig;
   }
 
  public:
@@ -125,13 +125,14 @@
 jobject JniCompilerTest::jobj_;
 jobject JniCompilerTest::class_loader_;
 
+
 int gJava_MyClassNatives_foo_calls = 0;
 void Java_MyClassNatives_foo(JNIEnv* env, jobject thisObj) {
   // 1 = thisObj
   EXPECT_EQ(kNative, Thread::Current()->GetState());
   Locks::mutator_lock_->AssertNotHeld(Thread::Current());
   EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
-  EXPECT_TRUE(thisObj != NULL);
+  EXPECT_TRUE(thisObj != nullptr);
   EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
   gJava_MyClassNatives_foo_calls++;
   ScopedObjectAccess soa(Thread::Current());
@@ -151,8 +152,8 @@
 
 TEST_F(JniCompilerTest, CompileAndRunIntMethodThroughStub) {
   TEST_DISABLED_FOR_PORTABLE();
-  SetUpForTest(false, "bar", "(I)I",
-               NULL /* calling through stub will link with &Java_MyClassNatives_bar */);
+  SetUpForTest(false, "bar", "(I)I", nullptr);
+  // calling through stub will link with &Java_MyClassNatives_bar
 
   ScopedObjectAccess soa(Thread::Current());
   std::string reason;
@@ -168,8 +169,8 @@
 
 TEST_F(JniCompilerTest, CompileAndRunStaticIntMethodThroughStub) {
   TEST_DISABLED_FOR_PORTABLE();
-  SetUpForTest(true, "sbar", "(I)I",
-               NULL /* calling through stub will link with &Java_MyClassNatives_sbar */);
+  SetUpForTest(true, "sbar", "(I)I", nullptr);
+  // calling through stub will link with &Java_MyClassNatives_sbar
 
   ScopedObjectAccess soa(Thread::Current());
   std::string reason;
@@ -188,7 +189,7 @@
   // 1 = thisObj
   EXPECT_EQ(kNative, Thread::Current()->GetState());
   EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
-  EXPECT_TRUE(thisObj != NULL);
+  EXPECT_TRUE(thisObj != nullptr);
   EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
   gJava_MyClassNatives_fooI_calls++;
   ScopedObjectAccess soa(Thread::Current());
@@ -215,7 +216,7 @@
   // 1 = thisObj
   EXPECT_EQ(kNative, Thread::Current()->GetState());
   EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
-  EXPECT_TRUE(thisObj != NULL);
+  EXPECT_TRUE(thisObj != nullptr);
   EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
   gJava_MyClassNatives_fooII_calls++;
   ScopedObjectAccess soa(Thread::Current());
@@ -243,7 +244,7 @@
   // 1 = thisObj
   EXPECT_EQ(kNative, Thread::Current()->GetState());
   EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
-  EXPECT_TRUE(thisObj != NULL);
+  EXPECT_TRUE(thisObj != nullptr);
   EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
   gJava_MyClassNatives_fooJJ_calls++;
   ScopedObjectAccess soa(Thread::Current());
@@ -272,7 +273,7 @@
   // 1 = thisObj
   EXPECT_EQ(kNative, Thread::Current()->GetState());
   EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
-  EXPECT_TRUE(thisObj != NULL);
+  EXPECT_TRUE(thisObj != nullptr);
   EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
   gJava_MyClassNatives_fooDD_calls++;
   ScopedObjectAccess soa(Thread::Current());
@@ -302,7 +303,7 @@
   // 1 = thisObj
   EXPECT_EQ(kNative, Thread::Current()->GetState());
   EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
-  EXPECT_TRUE(thisObj != NULL);
+  EXPECT_TRUE(thisObj != nullptr);
   EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
   gJava_MyClassNatives_fooJJ_synchronized_calls++;
   ScopedObjectAccess soa(Thread::Current());
@@ -329,7 +330,7 @@
   // 3 = this + y + z
   EXPECT_EQ(kNative, Thread::Current()->GetState());
   EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
-  EXPECT_TRUE(thisObj != NULL);
+  EXPECT_TRUE(thisObj != nullptr);
   EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
   gJava_MyClassNatives_fooIOO_calls++;
   ScopedObjectAccess soa(Thread::Current());
@@ -353,28 +354,28 @@
                reinterpret_cast<void*>(&Java_MyClassNatives_fooIOO));
 
   EXPECT_EQ(0, gJava_MyClassNatives_fooIOO_calls);
-  jobject result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 0, NULL, NULL);
+  jobject result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 0, nullptr, nullptr);
   EXPECT_TRUE(env_->IsSameObject(jobj_, result));
   EXPECT_EQ(1, gJava_MyClassNatives_fooIOO_calls);
 
-  result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 0, NULL, jklass_);
+  result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 0, nullptr, jklass_);
   EXPECT_TRUE(env_->IsSameObject(jobj_, result));
   EXPECT_EQ(2, gJava_MyClassNatives_fooIOO_calls);
-  result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 1, NULL, jklass_);
-  EXPECT_TRUE(env_->IsSameObject(NULL, result));
+  result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 1, nullptr, jklass_);
+  EXPECT_TRUE(env_->IsSameObject(nullptr, result));
   EXPECT_EQ(3, gJava_MyClassNatives_fooIOO_calls);
-  result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 2, NULL, jklass_);
+  result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 2, nullptr, jklass_);
   EXPECT_TRUE(env_->IsSameObject(jklass_, result));
   EXPECT_EQ(4, gJava_MyClassNatives_fooIOO_calls);
 
-  result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 0, jklass_, NULL);
+  result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 0, jklass_, nullptr);
   EXPECT_TRUE(env_->IsSameObject(jobj_, result));
   EXPECT_EQ(5, gJava_MyClassNatives_fooIOO_calls);
-  result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 1, jklass_, NULL);
+  result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 1, jklass_, nullptr);
   EXPECT_TRUE(env_->IsSameObject(jklass_, result));
   EXPECT_EQ(6, gJava_MyClassNatives_fooIOO_calls);
-  result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 2, jklass_, NULL);
-  EXPECT_TRUE(env_->IsSameObject(NULL, result));
+  result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 2, jklass_, nullptr);
+  EXPECT_TRUE(env_->IsSameObject(nullptr, result));
   EXPECT_EQ(7, gJava_MyClassNatives_fooIOO_calls);
 }
 
@@ -383,7 +384,7 @@
   // 1 = klass
   EXPECT_EQ(kNative, Thread::Current()->GetState());
   EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
-  EXPECT_TRUE(klass != NULL);
+  EXPECT_TRUE(klass != nullptr);
   EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass));
   gJava_MyClassNatives_fooSII_calls++;
   ScopedObjectAccess soa(Thread::Current());
@@ -407,7 +408,7 @@
   // 1 = klass
   EXPECT_EQ(kNative, Thread::Current()->GetState());
   EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
-  EXPECT_TRUE(klass != NULL);
+  EXPECT_TRUE(klass != nullptr);
   EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass));
   gJava_MyClassNatives_fooSDD_calls++;
   ScopedObjectAccess soa(Thread::Current());
@@ -437,7 +438,7 @@
   // 3 = klass + y + z
   EXPECT_EQ(kNative, Thread::Current()->GetState());
   EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
-  EXPECT_TRUE(klass != NULL);
+  EXPECT_TRUE(klass != nullptr);
   EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass));
   gJava_MyClassNatives_fooSIOO_calls++;
   ScopedObjectAccess soa(Thread::Current());
@@ -462,28 +463,28 @@
                reinterpret_cast<void*>(&Java_MyClassNatives_fooSIOO));
 
   EXPECT_EQ(0, gJava_MyClassNatives_fooSIOO_calls);
-  jobject result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, NULL, NULL);
+  jobject result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, nullptr, nullptr);
   EXPECT_TRUE(env_->IsSameObject(jklass_, result));
   EXPECT_EQ(1, gJava_MyClassNatives_fooSIOO_calls);
 
-  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, NULL, jobj_);
+  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, nullptr, jobj_);
   EXPECT_TRUE(env_->IsSameObject(jklass_, result));
   EXPECT_EQ(2, gJava_MyClassNatives_fooSIOO_calls);
-  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, NULL, jobj_);
-  EXPECT_TRUE(env_->IsSameObject(NULL, result));
+  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, nullptr, jobj_);
+  EXPECT_TRUE(env_->IsSameObject(nullptr, result));
   EXPECT_EQ(3, gJava_MyClassNatives_fooSIOO_calls);
-  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, NULL, jobj_);
+  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, nullptr, jobj_);
   EXPECT_TRUE(env_->IsSameObject(jobj_, result));
   EXPECT_EQ(4, gJava_MyClassNatives_fooSIOO_calls);
 
-  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, jobj_, NULL);
+  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, jobj_, nullptr);
   EXPECT_TRUE(env_->IsSameObject(jklass_, result));
   EXPECT_EQ(5, gJava_MyClassNatives_fooSIOO_calls);
-  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, jobj_, NULL);
+  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, jobj_, nullptr);
   EXPECT_TRUE(env_->IsSameObject(jobj_, result));
   EXPECT_EQ(6, gJava_MyClassNatives_fooSIOO_calls);
-  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, jobj_, NULL);
-  EXPECT_TRUE(env_->IsSameObject(NULL, result));
+  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, jobj_, nullptr);
+  EXPECT_TRUE(env_->IsSameObject(nullptr, result));
   EXPECT_EQ(7, gJava_MyClassNatives_fooSIOO_calls);
 }
 
@@ -492,7 +493,7 @@
   // 3 = klass + y + z
   EXPECT_EQ(kNative, Thread::Current()->GetState());
   EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
-  EXPECT_TRUE(klass != NULL);
+  EXPECT_TRUE(klass != nullptr);
   EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass));
   gJava_MyClassNatives_fooSSIOO_calls++;
   ScopedObjectAccess soa(Thread::Current());
@@ -516,28 +517,28 @@
                reinterpret_cast<void*>(&Java_MyClassNatives_fooSSIOO));
 
   EXPECT_EQ(0, gJava_MyClassNatives_fooSSIOO_calls);
-  jobject result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, NULL, NULL);
+  jobject result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, nullptr, nullptr);
   EXPECT_TRUE(env_->IsSameObject(jklass_, result));
   EXPECT_EQ(1, gJava_MyClassNatives_fooSSIOO_calls);
 
-  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, NULL, jobj_);
+  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, nullptr, jobj_);
   EXPECT_TRUE(env_->IsSameObject(jklass_, result));
   EXPECT_EQ(2, gJava_MyClassNatives_fooSSIOO_calls);
-  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, NULL, jobj_);
-  EXPECT_TRUE(env_->IsSameObject(NULL, result));
+  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, nullptr, jobj_);
+  EXPECT_TRUE(env_->IsSameObject(nullptr, result));
   EXPECT_EQ(3, gJava_MyClassNatives_fooSSIOO_calls);
-  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, NULL, jobj_);
+  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, nullptr, jobj_);
   EXPECT_TRUE(env_->IsSameObject(jobj_, result));
   EXPECT_EQ(4, gJava_MyClassNatives_fooSSIOO_calls);
 
-  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, jobj_, NULL);
+  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, jobj_, nullptr);
   EXPECT_TRUE(env_->IsSameObject(jklass_, result));
   EXPECT_EQ(5, gJava_MyClassNatives_fooSSIOO_calls);
-  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, jobj_, NULL);
+  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, jobj_, nullptr);
   EXPECT_TRUE(env_->IsSameObject(jobj_, result));
   EXPECT_EQ(6, gJava_MyClassNatives_fooSSIOO_calls);
-  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, jobj_, NULL);
-  EXPECT_TRUE(env_->IsSameObject(NULL, result));
+  result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, jobj_, nullptr);
+  EXPECT_TRUE(env_->IsSameObject(nullptr, result));
   EXPECT_EQ(7, gJava_MyClassNatives_fooSSIOO_calls);
 }
 
@@ -591,7 +592,7 @@
 
 jint Java_MyClassNatives_nativeUpCall(JNIEnv* env, jobject thisObj, jint i) {
   if (i <= 0) {
-    // We want to check raw Object*/Array* below
+    // We want to check raw Object* / Array* below
     ScopedObjectAccess soa(env);
 
     // Build stack trace
@@ -599,7 +600,7 @@
     jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(soa, internal);
     mirror::ObjectArray<mirror::StackTraceElement>* trace_array =
         soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(ste_array);
-    EXPECT_TRUE(trace_array != NULL);
+    EXPECT_TRUE(trace_array != nullptr);
     EXPECT_EQ(11, trace_array->GetLength());
 
     // Check stack trace entries have expected values
@@ -615,9 +616,9 @@
     return 0;
   } else {
     jclass jklass = env->FindClass("MyClassNatives");
-    EXPECT_TRUE(jklass != NULL);
+    EXPECT_TRUE(jklass != nullptr);
     jmethodID jmethod = env->GetMethodID(jklass, "fooI", "(I)I");
-    EXPECT_TRUE(jmethod != NULL);
+    EXPECT_TRUE(jmethod != nullptr);
 
     // Recurse with i - 1
     jint result = env->CallNonvirtualIntMethod(thisObj, jklass, jmethod, i - 1);
@@ -721,7 +722,7 @@
 
 TEST_F(JniCompilerTest, GetSinkPropertiesNative) {
   TEST_DISABLED_FOR_PORTABLE();
-  SetUpForTest(false, "getSinkPropertiesNative", "(Ljava/lang/String;)[Ljava/lang/Object;", NULL);
+  SetUpForTest(false, "getSinkPropertiesNative", "(Ljava/lang/String;)[Ljava/lang/Object;", nullptr);
   // This space intentionally left blank. Just testing compilation succeeds.
 }
 
@@ -804,7 +805,7 @@
 jfloat Java_MyClassNatives_checkFloats(JNIEnv* env, jobject thisObj, jfloat f1, jfloat f2) {
   EXPECT_EQ(kNative, Thread::Current()->GetState());
   EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
-  EXPECT_TRUE(thisObj != NULL);
+  EXPECT_TRUE(thisObj != nullptr);
   EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
   ScopedObjectAccess soa(Thread::Current());
   EXPECT_EQ(1U, Thread::Current()->NumStackReferences());
@@ -826,12 +827,12 @@
 }
 
 void Java_MyClassNatives_checkParameterAlign(JNIEnv* env, jobject thisObj, jint i1, jlong l1) {
-  /*EXPECT_EQ(kNative, Thread::Current()->GetState());
-  EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
-  EXPECT_TRUE(thisObj != NULL);
-  EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
-  ScopedObjectAccess soa(Thread::Current());
-  EXPECT_EQ(1U, Thread::Current()->NumStackReferences());*/
+//  EXPECT_EQ(kNative, Thread::Current()->GetState());
+//  EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
+//  EXPECT_TRUE(thisObj != nullptr);
+//  EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
+//  ScopedObjectAccess soa(Thread::Current());
+//  EXPECT_EQ(1U, Thread::Current()->NumStackReferences());
   EXPECT_EQ(i1, 1234);
   EXPECT_EQ(l1, INT64_C(0x12345678ABCDEF0));
 }
@@ -879,7 +880,7 @@
     jobject o248, jobject o249, jobject o250, jobject o251, jobject o252, jobject o253) {
   EXPECT_EQ(kNative, Thread::Current()->GetState());
   EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
-  EXPECT_TRUE(thisObj != NULL);
+  EXPECT_TRUE(thisObj != nullptr);
   EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
   ScopedObjectAccess soa(Thread::Current());
   EXPECT_GE(255U, Thread::Current()->NumStackReferences());
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
index 649a80f..f0c0ed7 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.cc
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -143,9 +143,10 @@
 
 size_t ArmJniCallingConvention::FrameSize() {
   // Method*, LR and callee save area size, local reference segment state
-  size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kFramePointerSize;
+  size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) +
+      (2 + CalleeSaveRegisters().size()) * kFramePointerSize;
   // References plus 2 words for HandleScope header
-  size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
+  size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
   // Plus return value spill area size
   return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
 }
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index ffd27ee..0a00d7d 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -95,7 +95,7 @@
   CHECK(IsCurrentParamOnStack());
   FrameOffset result =
       FrameOffset(displacement_.Int32Value() +   // displacement
-                  kFramePointerSize +                 // Method*
+                  sizeof(StackReference<mirror::ArtMethod>) +  // Method ref
                   (itr_slots_ * sizeof(uint32_t)));  // offset into in args
   return result;
 }
@@ -196,9 +196,10 @@
 
 size_t Arm64JniCallingConvention::FrameSize() {
   // Method*, callee save area size, local reference segment state
-  size_t frame_data_size = ((1 + CalleeSaveRegisters().size()) * kFramePointerSize) + sizeof(uint32_t);
+  size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) +
+      CalleeSaveRegisters().size() * kFramePointerSize + sizeof(uint32_t);
   // References plus 2 words for HandleScope header
-  size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
+  size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
   // Plus return value spill area size
   return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
 }
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index 2a6e7d9..efc0b42 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -319,7 +319,8 @@
 
   // Position of handle scope and interior fields
   FrameOffset HandleScopeOffset() const {
-    return FrameOffset(this->displacement_.Int32Value() + frame_pointer_size_);  // above Method*
+    return FrameOffset(this->displacement_.Int32Value() + sizeof(StackReference<mirror::ArtMethod>));
+    // above Method reference
   }
 
   FrameOffset HandleScopeLinkOffset() const {
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
index 0402fe6..f7a7be7 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.cc
+++ b/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -147,9 +147,10 @@
 
 size_t MipsJniCallingConvention::FrameSize() {
   // Method*, LR and callee save area size, local reference segment state
-  size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kFramePointerSize;
+  size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) +
+      (2 + CalleeSaveRegisters().size()) * kFramePointerSize;
   // References plus 2 words for HandleScope header
-  size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
+  size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
   // Plus return value spill area size
   return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
 }
diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc
index 97b4cdf..9bf7d0f 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.cc
+++ b/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -124,9 +124,10 @@
 
 size_t X86JniCallingConvention::FrameSize() {
   // Method*, return address and callee save area size, local reference segment state
-  size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kFramePointerSize;
+  size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) +
+      (2 + CalleeSaveRegisters().size()) * kFramePointerSize;
   // References plus 2 words for HandleScope header
-  size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
+  size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
   // Plus return value spill area size
   return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
 }
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index 4871c87..5febed2 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -96,7 +96,7 @@
 
 FrameOffset X86_64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
   return FrameOffset(displacement_.Int32Value() +   // displacement
-                     kFramePointerSize +                 // Method*
+                     sizeof(StackReference<mirror::ArtMethod>) +  // Method ref
                      (itr_slots_ * sizeof(uint32_t)));  // offset into in args
 }
 
@@ -139,9 +139,10 @@
 
 size_t X86_64JniCallingConvention::FrameSize() {
   // Method*, return address and callee save area size, local reference segment state
-  size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kFramePointerSize;
+  size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) +
+      (2 + CalleeSaveRegisters().size()) * kFramePointerSize;
   // References plus link_ (pointer) and number_of_references_ (uint32_t) for HandleScope header
-  size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
+  size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
   // Plus return value spill area size
   return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
 }
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index a7ee82e..49cf71b 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -154,8 +154,8 @@
     }
     const char* descriptor = dex_file->GetClassDescriptor(class_def);
     StackHandleScope<1> hs(soa.Self());
-    auto loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
-    mirror::Class* klass = class_linker->FindClass(soa.Self(), descriptor, loader);
+    mirror::Class* klass = class_linker->FindClass(soa.Self(), descriptor,
+                                                   NullHandle<mirror::ClassLoader>());
 
     const OatFile::OatClass oat_class = oat_dex_file->GetOatClass(i);
     CHECK_EQ(mirror::Class::Status::kStatusNotReady, oat_class.GetStatus()) << descriptor;
@@ -180,7 +180,7 @@
   EXPECT_EQ(80U, sizeof(OatHeader));
   EXPECT_EQ(8U, sizeof(OatMethodOffsets));
   EXPECT_EQ(24U, sizeof(OatQuickMethodHeader));
-  EXPECT_EQ(80 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
+  EXPECT_EQ(79 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
 }
 
 TEST_F(OatTest, OatHeaderIsValid) {
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index bace25c..5d532ab 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -513,9 +513,10 @@
     ScopedObjectAccessUnchecked soa(Thread::Current());
     StackHandleScope<2> hs(soa.Self());
     Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(*dex_file_)));
-    auto class_loader = hs.NewHandle<mirror::ClassLoader>(nullptr);
     mirror::ArtMethod* method = linker->ResolveMethod(*dex_file_, it.GetMemberIndex(), dex_cache,
-                                                      class_loader, nullptr, invoke_type);
+                                                      NullHandle<mirror::ClassLoader>(),
+                                                      NullHandle<mirror::ArtMethod>(),
+                                                      invoke_type);
     CHECK(method != NULL);
     // Portable code offsets are set by ElfWriterMclinker::FixupCompiledCodeOffset after linking.
     method->SetQuickOatCodeOffset(offsets.code_offset_);
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index aafd801..e197ccd 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -20,6 +20,7 @@
 #include "base/bit_field.h"
 #include "globals.h"
 #include "instruction_set.h"
+#include "locations.h"
 #include "memory_region.h"
 #include "nodes.h"
 #include "utils/assembler.h"
@@ -46,267 +47,6 @@
   uintptr_t native_pc;
 };
 
-/**
- * A Location is an abstraction over the potential location
- * of an instruction. It could be in register or stack.
- */
-class Location : public ValueObject {
- public:
-  enum Kind {
-    kInvalid = 0,
-    kStackSlot = 1,  // Word size slot.
-    kDoubleStackSlot = 2,  // 64bit stack slot.
-    kRegister = 3,
-    // On 32bits architectures, quick can pass a long where the
-    // low bits are in the last parameter register, and the high
-    // bits are in a stack slot. The kQuickParameter kind is for
-    // handling this special case.
-    kQuickParameter = 4,
-
-    // Unallocated location represents a location that is not fixed and can be
-    // allocated by a register allocator.  Each unallocated location has
-    // a policy that specifies what kind of location is suitable. Payload
-    // contains register allocation policy.
-    kUnallocated = 5,
-  };
-
-  Location() : value_(kInvalid) {
-    DCHECK(!IsValid());
-  }
-
-  Location(const Location& other) : ValueObject(), value_(other.value_) {}
-
-  Location& operator=(const Location& other) {
-    value_ = other.value_;
-    return *this;
-  }
-
-  bool IsValid() const {
-    return value_ != kInvalid;
-  }
-
-  // Register locations.
-  static Location RegisterLocation(ManagedRegister reg) {
-    return Location(kRegister, reg.RegId());
-  }
-
-  bool IsRegister() const {
-    return GetKind() == kRegister;
-  }
-
-  ManagedRegister reg() const {
-    DCHECK(IsRegister());
-    return static_cast<ManagedRegister>(GetPayload());
-  }
-
-  static uword EncodeStackIndex(intptr_t stack_index) {
-    DCHECK(-kStackIndexBias <= stack_index);
-    DCHECK(stack_index < kStackIndexBias);
-    return static_cast<uword>(kStackIndexBias + stack_index);
-  }
-
-  static Location StackSlot(intptr_t stack_index) {
-    uword payload = EncodeStackIndex(stack_index);
-    Location loc(kStackSlot, payload);
-    // Ensure that sign is preserved.
-    DCHECK_EQ(loc.GetStackIndex(), stack_index);
-    return loc;
-  }
-
-  bool IsStackSlot() const {
-    return GetKind() == kStackSlot;
-  }
-
-  static Location DoubleStackSlot(intptr_t stack_index) {
-    uword payload = EncodeStackIndex(stack_index);
-    Location loc(kDoubleStackSlot, payload);
-    // Ensure that sign is preserved.
-    DCHECK_EQ(loc.GetStackIndex(), stack_index);
-    return loc;
-  }
-
-  bool IsDoubleStackSlot() const {
-    return GetKind() == kDoubleStackSlot;
-  }
-
-  intptr_t GetStackIndex() const {
-    DCHECK(IsStackSlot() || IsDoubleStackSlot());
-    // Decode stack index manually to preserve sign.
-    return GetPayload() - kStackIndexBias;
-  }
-
-  intptr_t GetHighStackIndex(uintptr_t word_size) const {
-    DCHECK(IsDoubleStackSlot());
-    // Decode stack index manually to preserve sign.
-    return GetPayload() - kStackIndexBias + word_size;
-  }
-
-  static Location QuickParameter(uint32_t parameter_index) {
-    return Location(kQuickParameter, parameter_index);
-  }
-
-  uint32_t GetQuickParameterIndex() const {
-    DCHECK(IsQuickParameter());
-    return GetPayload();
-  }
-
-  bool IsQuickParameter() const {
-    return GetKind() == kQuickParameter;
-  }
-
-  arm::ArmManagedRegister AsArm() const;
-  x86::X86ManagedRegister AsX86() const;
-
-  Kind GetKind() const {
-    return KindField::Decode(value_);
-  }
-
-  bool Equals(Location other) const {
-    return value_ == other.value_;
-  }
-
-  const char* DebugString() const {
-    switch (GetKind()) {
-      case kInvalid: return "?";
-      case kRegister: return "R";
-      case kStackSlot: return "S";
-      case kDoubleStackSlot: return "DS";
-      case kQuickParameter: return "Q";
-      case kUnallocated: return "U";
-    }
-    return "?";
-  }
-
-  // Unallocated locations.
-  enum Policy {
-    kAny,
-    kRequiresRegister,
-    kSameAsFirstInput,
-  };
-
-  bool IsUnallocated() const {
-    return GetKind() == kUnallocated;
-  }
-
-  static Location UnallocatedLocation(Policy policy) {
-    return Location(kUnallocated, PolicyField::Encode(policy));
-  }
-
-  // Any free register is suitable to replace this unallocated location.
-  static Location Any() {
-    return UnallocatedLocation(kAny);
-  }
-
-  static Location RequiresRegister() {
-    return UnallocatedLocation(kRequiresRegister);
-  }
-
-  // The location of the first input to the instruction will be
-  // used to replace this unallocated location.
-  static Location SameAsFirstInput() {
-    return UnallocatedLocation(kSameAsFirstInput);
-  }
-
-  Policy GetPolicy() const {
-    DCHECK(IsUnallocated());
-    return PolicyField::Decode(GetPayload());
-  }
-
-  uword GetEncoding() const {
-    return GetPayload();
-  }
-
- private:
-  // Number of bits required to encode Kind value.
-  static constexpr uint32_t kBitsForKind = 4;
-  static constexpr uint32_t kBitsForPayload = kWordSize * kBitsPerByte - kBitsForKind;
-
-  explicit Location(uword value) : value_(value) {}
-
-  Location(Kind kind, uword payload)
-      : value_(KindField::Encode(kind) | PayloadField::Encode(payload)) {}
-
-  uword GetPayload() const {
-    return PayloadField::Decode(value_);
-  }
-
-  typedef BitField<Kind, 0, kBitsForKind> KindField;
-  typedef BitField<uword, kBitsForKind, kBitsForPayload> PayloadField;
-
-  // Layout for kUnallocated locations payload.
-  typedef BitField<Policy, 0, 3> PolicyField;
-
-  // Layout for stack slots.
-  static const intptr_t kStackIndexBias =
-      static_cast<intptr_t>(1) << (kBitsForPayload - 1);
-
-  // Location either contains kind and payload fields or a tagged handle for
-  // a constant locations. Values of enumeration Kind are selected in such a
-  // way that none of them can be interpreted as a kConstant tag.
-  uword value_;
-};
-
-/**
- * The code generator computes LocationSummary for each instruction so that
- * the instruction itself knows what code to generate: where to find the inputs
- * and where to place the result.
- *
- * The intent is to have the code for generating the instruction independent of
- * register allocation. A register allocator just has to provide a LocationSummary.
- */
-class LocationSummary : public ArenaObject {
- public:
-  explicit LocationSummary(HInstruction* instruction)
-      : inputs_(instruction->GetBlock()->GetGraph()->GetArena(), instruction->InputCount()),
-        temps_(instruction->GetBlock()->GetGraph()->GetArena(), 0) {
-    inputs_.SetSize(instruction->InputCount());
-    for (size_t i = 0; i < instruction->InputCount(); i++) {
-      inputs_.Put(i, Location());
-    }
-  }
-
-  void SetInAt(uint32_t at, Location location) {
-    inputs_.Put(at, location);
-  }
-
-  Location InAt(uint32_t at) const {
-    return inputs_.Get(at);
-  }
-
-  size_t GetInputCount() const {
-    return inputs_.Size();
-  }
-
-  void SetOut(Location location) {
-    output_ = Location(location);
-  }
-
-  void AddTemp(Location location) {
-    temps_.Add(location);
-  }
-
-  Location GetTemp(uint32_t at) const {
-    return temps_.Get(at);
-  }
-
-  void SetTempAt(uint32_t at, Location location) {
-    temps_.Put(at, location);
-  }
-
-  size_t GetTempCount() const {
-    return temps_.Size();
-  }
-
-  Location Out() const { return output_; }
-
- private:
-  GrowableArray<Location> inputs_;
-  GrowableArray<Location> temps_;
-  Location output_;
-
-  DISALLOW_COPY_AND_ASSIGN(LocationSummary);
-};
-
 class CodeGenerator : public ArenaObject {
  public:
   // Compiles the graph to executable instructions. Returns whether the compilation
@@ -334,6 +74,13 @@
   void SetFrameSize(uint32_t size) { frame_size_ = size; }
   uint32_t GetCoreSpillMask() const { return core_spill_mask_; }
 
+  virtual size_t GetNumberOfCoreRegisters() const = 0;
+  virtual size_t GetNumberOfFloatingPointRegisters() const = 0;
+  virtual size_t GetNumberOfRegisters() const = 0;
+  virtual void SetupBlockedRegisters(bool* blocked_registers) const = 0;
+  virtual void DumpCoreRegister(std::ostream& stream, int reg) const = 0;
+  virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const = 0;
+
   void RecordPcInfo(uint32_t dex_pc) {
     struct PcInfo pc_info;
     pc_info.dex_pc = dex_pc;
@@ -352,8 +99,7 @@
         graph_(graph),
         block_labels_(graph->GetArena(), 0),
         pc_infos_(graph->GetArena(), 32),
-        blocked_registers_(static_cast<bool*>(
-            graph->GetArena()->Alloc(number_of_registers * sizeof(bool), kArenaAllocData))) {
+        blocked_registers_(graph->GetArena()->AllocArray<bool>(number_of_registers)) {
     block_labels_.SetSize(graph->GetBlocks().Size());
   }
   ~CodeGenerator() { }
@@ -369,9 +115,6 @@
   // the first available register.
   size_t AllocateFreeRegisterInternal(bool* blocked_registers, size_t number_of_registers) const;
 
-  virtual void SetupBlockedRegisters(bool* blocked_registers) const = 0;
-  virtual size_t GetNumberOfRegisters() const = 0;
-
   virtual Location GetStackLocation(HLoadLocal* load) const = 0;
 
   // Frame size required for this method.
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index be51232..ed3f43c 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -37,6 +37,14 @@
 static constexpr int kNumberOfPushedRegistersAtEntry = 1;
 static constexpr int kCurrentMethodStackOffset = 0;
 
+void CodeGeneratorARM::DumpCoreRegister(std::ostream& stream, int reg) const {
+  stream << ArmManagedRegister::FromCoreRegister(Register(reg));
+}
+
+void CodeGeneratorARM::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
+  stream << ArmManagedRegister::FromDRegister(DRegister(reg));
+}
+
 CodeGeneratorARM::CodeGeneratorARM(HGraph* graph)
     : CodeGenerator(graph, kNumberOfRegIds),
       location_builder_(graph, this),
@@ -793,5 +801,13 @@
   LOG(FATAL) << "Unimplemented";
 }
 
+void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction) {
+  LOG(FATAL) << "Unimplemented";
+}
+
+void InstructionCodeGeneratorARM::VisitParallelMove(HParallelMove* instruction) {
+  LOG(FATAL) << "Unimplemented";
+}
+
 }  // namespace arm
 }  // namespace art
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 2405d4b..423b13e 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -133,6 +133,17 @@
   int32_t GetStackSlot(HLocal* local) const;
   virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
 
+  virtual size_t GetNumberOfCoreRegisters() const OVERRIDE {
+    return kNumberOfCoreRegisters;
+  }
+
+  virtual size_t GetNumberOfFloatingPointRegisters() const OVERRIDE {
+    return kNumberOfDRegisters;
+  }
+
+  virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+  virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+
  private:
   // Helper method to move a 32bits value between two locations.
   void Move32(Location destination, Location source);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index e4f95c7..8bfd8d6 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -37,6 +37,14 @@
 static constexpr int kNumberOfPushedRegistersAtEntry = 1;
 static constexpr int kCurrentMethodStackOffset = 0;
 
+void CodeGeneratorX86::DumpCoreRegister(std::ostream& stream, int reg) const {
+  stream << X86ManagedRegister::FromCpuRegister(Register(reg));
+}
+
+void CodeGeneratorX86::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
+  stream << X86ManagedRegister::FromXmmRegister(XmmRegister(reg));
+}
+
 CodeGeneratorX86::CodeGeneratorX86(HGraph* graph)
     : CodeGenerator(graph, kNumberOfRegIds),
       location_builder_(graph, this),
@@ -813,5 +821,13 @@
   LOG(FATAL) << "Unimplemented";
 }
 
+void LocationsBuilderX86::VisitParallelMove(HParallelMove* instruction) {
+  LOG(FATAL) << "Unimplemented";
+}
+
+void InstructionCodeGeneratorX86::VisitParallelMove(HParallelMove* instruction) {
+  LOG(FATAL) << "Unimplemented";
+}
+
 }  // namespace x86
 }  // namespace art
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 1ee11bf..4a70636 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -134,6 +134,17 @@
   int32_t GetStackSlot(HLocal* local) const;
   virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
 
+  virtual size_t GetNumberOfCoreRegisters() const OVERRIDE {
+    return kNumberOfCpuRegisters;
+  }
+
+  virtual size_t GetNumberOfFloatingPointRegisters() const OVERRIDE {
+    return kNumberOfXmmRegisters;
+  }
+
+  virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+  virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+
  private:
   // Helper method to move a 32bits value between two locations.
   void Move32(Location destination, Location source);
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 52e3e37..5c5042e 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -16,6 +16,7 @@
 
 #include "graph_visualizer.h"
 
+#include "code_generator.h"
 #include "driver/dex_compilation_unit.h"
 #include "nodes.h"
 #include "ssa_liveness_analysis.h"
@@ -27,8 +28,8 @@
  */
 class HGraphVisualizerPrinter : public HGraphVisitor {
  public:
-  HGraphVisualizerPrinter(HGraph* graph, std::ostream& output)
-      : HGraphVisitor(graph), output_(output), indent_(0) {}
+  HGraphVisualizerPrinter(HGraph* graph, std::ostream& output, const CodeGenerator& codegen)
+      : HGraphVisitor(graph), output_(output), codegen_(codegen), indent_(0) {}
 
   void StartTag(const char* name) {
     AddIndent();
@@ -107,17 +108,18 @@
       output_ << " (liveness: " << instruction->GetLifetimePosition();
       if (instruction->HasLiveInterval()) {
         output_ << " ";
-        const GrowableArray<LiveRange>& ranges = instruction->GetLiveInterval()->GetRanges();
-        size_t i = ranges.Size() - 1;
-        do {
-          output_ << "[" << ranges.Get(i).GetStart() << "," << ranges.Get(i).GetEnd() << "[";
-          if (i == 0) {
-            break;
+        const LiveInterval& interval = *instruction->GetLiveInterval();
+        interval.Dump(output_);
+        if (interval.HasRegister()) {
+          int reg = interval.GetRegister();
+          output_ << " ";
+          if (instruction->GetType() == Primitive::kPrimFloat
+              || instruction->GetType() == Primitive::kPrimDouble) {
+            codegen_.DumpFloatingPointRegister(output_, reg);
           } else {
-            --i;
-            output_ << ",";
+            codegen_.DumpCoreRegister(output_, reg);
           }
-        } while (true);
+        }
       }
       output_ << ")";
     }
@@ -186,6 +188,7 @@
 
  private:
   std::ostream& output_;
+  const CodeGenerator& codegen_;
   size_t indent_;
 
   DISALLOW_COPY_AND_ASSIGN(HGraphVisualizerPrinter);
@@ -194,8 +197,9 @@
 HGraphVisualizer::HGraphVisualizer(std::ostream* output,
                                    HGraph* graph,
                                    const char* string_filter,
+                                   const CodeGenerator& codegen,
                                    const DexCompilationUnit& cu)
-    : output_(output), graph_(graph), is_enabled_(false) {
+    : output_(output), graph_(graph), codegen_(codegen), is_enabled_(false) {
   if (output == nullptr) {
     return;
   }
@@ -205,7 +209,7 @@
   }
 
   is_enabled_ = true;
-  HGraphVisualizerPrinter printer(graph, *output_);
+  HGraphVisualizerPrinter printer(graph, *output_, codegen_);
   printer.StartTag("compilation");
   printer.PrintProperty("name", pretty_name.c_str());
   printer.PrintProperty("method", pretty_name.c_str());
@@ -215,14 +219,15 @@
 
 HGraphVisualizer::HGraphVisualizer(std::ostream* output,
                                    HGraph* graph,
+                                   const CodeGenerator& codegen,
                                    const char* name)
-    : output_(output), graph_(graph), is_enabled_(false) {
+    : output_(output), graph_(graph), codegen_(codegen), is_enabled_(false) {
   if (output == nullptr) {
     return;
   }
 
   is_enabled_ = true;
-  HGraphVisualizerPrinter printer(graph, *output_);
+  HGraphVisualizerPrinter printer(graph, *output_, codegen_);
   printer.StartTag("compilation");
   printer.PrintProperty("name", name);
   printer.PrintProperty("method", name);
@@ -234,7 +239,7 @@
   if (!is_enabled_) {
     return;
   }
-  HGraphVisualizerPrinter printer(graph_, *output_);
+  HGraphVisualizerPrinter printer(graph_, *output_, codegen_);
   printer.Run(pass_name);
 }
 
diff --git a/compiler/optimizing/graph_visualizer.h b/compiler/optimizing/graph_visualizer.h
index 2b88e65..2638cf5 100644
--- a/compiler/optimizing/graph_visualizer.h
+++ b/compiler/optimizing/graph_visualizer.h
@@ -21,6 +21,7 @@
 
 namespace art {
 
+class CodeGenerator;
 class DexCompilationUnit;
 class HGraph;
 
@@ -39,13 +40,17 @@
   HGraphVisualizer(std::ostream* output,
                    HGraph* graph,
                    const char* string_filter,
+                   const CodeGenerator& codegen,
                    const DexCompilationUnit& cu);
 
   /**
    * Version of `HGraphVisualizer` for unit testing, that is when a
    * `DexCompilationUnit` is not available.
    */
-  HGraphVisualizer(std::ostream* output, HGraph* graph, const char* name);
+  HGraphVisualizer(std::ostream* output,
+                   HGraph* graph,
+                   const CodeGenerator& codegen,
+                   const char* name);
 
   /**
    * If this visualizer is enabled, emit the compilation information
@@ -56,6 +61,7 @@
  private:
   std::ostream* const output_;
   HGraph* const graph_;
+  const CodeGenerator& codegen_;
 
   // Is true when `output_` is not null, and the compiled method's name
   // contains the string_filter given in the constructor.
diff --git a/compiler/optimizing/live_interval_test.cc b/compiler/optimizing/live_interval_test.cc
new file mode 100644
index 0000000..3e4b83b
--- /dev/null
+++ b/compiler/optimizing/live_interval_test.cc
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "optimizing_unit_test.h"
+#include "ssa_liveness_analysis.h"
+#include "utils/arena_allocator.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(LiveInterval, GetStart) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+
+  {
+    static constexpr size_t ranges[][2] = {{0, 42}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    ASSERT_EQ(0u, interval->GetStart());
+  }
+
+  {
+    static constexpr size_t ranges[][2] = {{4, 12}, {14, 16}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    ASSERT_EQ(4u, interval->GetStart());
+  }
+}
+
+TEST(LiveInterval, IsDeadAt) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+
+  {
+    static constexpr size_t ranges[][2] = {{0, 42}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    ASSERT_TRUE(interval->IsDeadAt(42));
+    ASSERT_TRUE(interval->IsDeadAt(43));
+    ASSERT_FALSE(interval->IsDeadAt(41));
+    ASSERT_FALSE(interval->IsDeadAt(0));
+    ASSERT_FALSE(interval->IsDeadAt(22));
+  }
+
+  {
+    static constexpr size_t ranges[][2] = {{4, 12}, {14, 16}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    ASSERT_TRUE(interval->IsDeadAt(16));
+    ASSERT_TRUE(interval->IsDeadAt(32));
+    ASSERT_FALSE(interval->IsDeadAt(0));
+    ASSERT_FALSE(interval->IsDeadAt(4));
+    ASSERT_FALSE(interval->IsDeadAt(12));
+    ASSERT_FALSE(interval->IsDeadAt(13));
+    ASSERT_FALSE(interval->IsDeadAt(14));
+    ASSERT_FALSE(interval->IsDeadAt(15));
+  }
+}
+
+TEST(LiveInterval, Covers) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+
+  {
+    static constexpr size_t ranges[][2] = {{0, 42}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    ASSERT_TRUE(interval->Covers(0));
+    ASSERT_TRUE(interval->Covers(4));
+    ASSERT_TRUE(interval->Covers(41));
+    ASSERT_FALSE(interval->Covers(42));
+    ASSERT_FALSE(interval->Covers(54));
+  }
+
+  {
+    static constexpr size_t ranges[][2] = {{4, 12}, {14, 16}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    ASSERT_TRUE(interval->Covers(4));
+    ASSERT_TRUE(interval->Covers(11));
+    ASSERT_TRUE(interval->Covers(14));
+    ASSERT_TRUE(interval->Covers(15));
+    ASSERT_FALSE(interval->Covers(0));
+    ASSERT_FALSE(interval->Covers(12));
+    ASSERT_FALSE(interval->Covers(13));
+    ASSERT_FALSE(interval->Covers(16));
+  }
+}
+
+TEST(LiveInterval, FirstIntersectionWith) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+
+  {
+    static constexpr size_t ranges1[][2] = {{0, 4}, {8, 10}};
+    LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+    static constexpr size_t ranges2[][2] = {{5, 6}};
+    LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+
+    ASSERT_EQ(kNoLifetime, interval1->FirstIntersectionWith(interval2));
+  }
+
+  {
+    static constexpr size_t ranges1[][2] = {{0, 4}, {8, 10}};
+    LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+    static constexpr size_t ranges2[][2] = {{5, 42}};
+    LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+
+    ASSERT_EQ(8u, interval1->FirstIntersectionWith(interval2));
+  }
+
+  {
+    static constexpr size_t ranges1[][2] = {{0, 4}, {8, 10}};
+    LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+    static constexpr size_t ranges2[][2] = {{5, 6}, {7, 8}, {11, 12}};
+    LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+
+    ASSERT_EQ(kNoLifetime, interval1->FirstIntersectionWith(interval2));
+  }
+
+  {
+    static constexpr size_t ranges1[][2] = {{0, 4}, {8, 10}};
+    LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+    static constexpr size_t ranges2[][2] = {{5, 6}, {7, 8}, {9, 10}};
+    LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+
+    ASSERT_EQ(9u, interval1->FirstIntersectionWith(interval2));
+  }
+
+  {
+    static constexpr size_t ranges1[][2] = {{0, 1}, {2, 7}, {8, 10}};
+    LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+    static constexpr size_t ranges2[][2] = {{1, 2}, {6, 7}, {9, 10}};
+    LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+
+    ASSERT_EQ(6u, interval1->FirstIntersectionWith(interval2));
+  }
+
+  {
+    static constexpr size_t ranges1[][2] = {{0, 1}, {2, 8}, {55, 58}};
+    LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+    static constexpr size_t ranges2[][2] = {{1, 2}, {11, 42}, {43, 48}, {54, 56}};
+    LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+
+    ASSERT_EQ(55u, interval1->FirstIntersectionWith(interval2));
+  }
+
+  {
+    static constexpr size_t ranges1[][2] = {{0, 1}, {2, 8}, {15, 18}, {27, 32}, {41, 53}, {54, 60}};
+    LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+    static constexpr size_t ranges2[][2] = {{1, 2}, {11, 12}, {19, 25}, {34, 42}, {52, 60}};
+    LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+
+    ASSERT_EQ(41u, interval1->FirstIntersectionWith(interval2));
+  }
+}
+
+static bool RangesEquals(LiveInterval* interval,
+                         const size_t expected[][2],
+                         size_t number_of_expected_ranges) {
+  LiveRange* current = interval->GetFirstRange();
+
+  size_t i = 0;
+  for (;
+       i < number_of_expected_ranges && current != nullptr;
+       ++i, current = current->GetNext()) {
+    if (expected[i][0] != current->GetStart()) {
+      return false;
+    }
+    if (expected[i][1] != current->GetEnd()) {
+      return false;
+    }
+  }
+
+  if (current != nullptr || i != number_of_expected_ranges) {
+    return false;
+  }
+
+  return true;
+}
+
+TEST(LiveInterval, SplitAt) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+
+  {
+    // Test within one range.
+    static constexpr size_t ranges[][2] = {{0, 4}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    LiveInterval* split = interval->SplitAt(1);
+    static constexpr size_t expected[][2] = {{0, 1}};
+    ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
+    static constexpr size_t expected_split[][2] = {{1, 4}};
+    ASSERT_TRUE(RangesEquals(split, expected_split, arraysize(expected_split)));
+  }
+
+  {
+    // Test just before the end of one range.
+    static constexpr size_t ranges[][2] = {{0, 4}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    LiveInterval* split = interval->SplitAt(3);
+    static constexpr size_t expected[][2] = {{0, 3}};
+    ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
+    static constexpr size_t expected_split[][2] = {{3, 4}};
+    ASSERT_TRUE(RangesEquals(split, expected_split, arraysize(expected_split)));
+  }
+
+  {
+    // Test withing the first range.
+    static constexpr size_t ranges[][2] = {{0, 4}, {8, 12}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    LiveInterval* split = interval->SplitAt(1);
+    static constexpr size_t expected[][2] = {{0, 1}};
+    ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
+    static constexpr size_t expected_split[][2] = {{1, 4}, {8, 12}};
+    ASSERT_TRUE(RangesEquals(split, expected_split, arraysize(expected_split)));
+  }
+
+  {
+    // Test in a hole.
+    static constexpr size_t ranges[][2] = {{0, 4}, {8, 12}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    LiveInterval* split = interval->SplitAt(5);
+    static constexpr size_t expected[][2] = {{0, 4}};
+    ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
+    static constexpr size_t expected_split[][2] = {{8, 12}};
+    ASSERT_TRUE(RangesEquals(split, expected_split, arraysize(expected_split)));
+  }
+
+  {
+    // Test withing the second range.
+    static constexpr size_t ranges[][2] = {{0, 4}, {8, 12}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    LiveInterval* split = interval->SplitAt(9);
+    static constexpr size_t expected[][2] = {{0, 4}, {8, 9}};
+    ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
+    static constexpr size_t expected_split[][2] = {{9, 12}};
+    ASSERT_TRUE(RangesEquals(split, expected_split, arraysize(expected_split)));
+  }
+
+  {
+    // Test at the beginning of the second range.
+    static constexpr size_t ranges[][2] = {{0, 4}, {6, 10}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    LiveInterval* split = interval->SplitAt(6);
+    static constexpr size_t expected[][2] = {{0, 4}};
+    ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
+    static constexpr size_t expected_split[][2] = {{6, 10}};
+    ASSERT_TRUE(RangesEquals(split, expected_split, arraysize(expected_split)));
+  }
+
+  {
+    // Test at the end of the first range.
+    static constexpr size_t ranges[][2] = {{0, 4}, {6, 10}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    LiveInterval* split = interval->SplitAt(4);
+    static constexpr size_t expected[][2] = {{0, 4}};
+    ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
+    static constexpr size_t expected_split[][2] = {{6, 10}};
+    ASSERT_TRUE(RangesEquals(split, expected_split, arraysize(expected_split)));
+  }
+
+  {
+    // Test that we get null if we split at a position where the interval is dead.
+    static constexpr size_t ranges[][2] = {{0, 4}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    LiveInterval* split = interval->SplitAt(5);
+    ASSERT_TRUE(split == nullptr);
+    ASSERT_TRUE(RangesEquals(interval, ranges, arraysize(ranges)));
+  }
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index 9849388..c797497 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -43,11 +43,11 @@
    *
    * Which becomes the following graph (numbered by lifetime position):
    *       2: constant0
-   *       3: goto
+   *       4: goto
    *           |
-   *       6: return
+   *       8: return
    *           |
-   *       9: exit
+   *       12: exit
    */
   const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
     Instruction::CONST_4 | 0 | 0,
@@ -60,14 +60,14 @@
   liveness.Analyze();
 
   LiveInterval* interval = liveness.GetInstructionFromSsaIndex(0)->GetLiveInterval();
-  ASSERT_EQ(1u, interval->GetRanges().Size());
-  LiveRange range = interval->GetRanges().Get(0);
-  ASSERT_EQ(2u, range.GetStart());
+  LiveRange* range = interval->GetFirstRange();
+  ASSERT_EQ(2u, range->GetStart());
   // Last use is the return instruction.
-  ASSERT_EQ(6u, range.GetEnd());
+  ASSERT_EQ(8u, range->GetEnd());
   HBasicBlock* block = graph->GetBlocks().Get(1);
   ASSERT_TRUE(block->GetLastInstruction()->AsReturn() != nullptr);
-  ASSERT_EQ(6u, block->GetLastInstruction()->GetLifetimePosition());
+  ASSERT_EQ(8u, block->GetLastInstruction()->GetLifetimePosition());
+  ASSERT_TRUE(range->GetNext() == nullptr);
 }
 
 TEST(LiveRangesTest, CFG2) {
@@ -81,16 +81,16 @@
    *
    * Which becomes the following graph (numbered by lifetime position):
    *       2: constant0
-   *       3: goto
+   *       4: goto
    *           |
-   *       6: equal
-   *       7: if
+   *       8: equal
+   *       10: if
    *       /       \
-   *   10: goto   13: goto
+   *   14: goto   18: goto
    *       \       /
-   *       16: return
+   *       22: return
    *         |
-   *       19: exit
+   *       26: exit
    */
   const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
     Instruction::CONST_4 | 0 | 0,
@@ -105,14 +105,14 @@
   liveness.Analyze();
 
   LiveInterval* interval = liveness.GetInstructionFromSsaIndex(0)->GetLiveInterval();
-  ASSERT_EQ(1u, interval->GetRanges().Size());
-  LiveRange range = interval->GetRanges().Get(0);
-  ASSERT_EQ(2u, range.GetStart());
+  LiveRange* range = interval->GetFirstRange();
+  ASSERT_EQ(2u, range->GetStart());
   // Last use is the return instruction.
-  ASSERT_EQ(16u, range.GetEnd());
+  ASSERT_EQ(22u, range->GetEnd());
   HBasicBlock* block = graph->GetBlocks().Get(3);
   ASSERT_TRUE(block->GetLastInstruction()->AsReturn() != nullptr);
-  ASSERT_EQ(16u, block->GetLastInstruction()->GetLifetimePosition());
+  ASSERT_EQ(22u, block->GetLastInstruction()->GetLifetimePosition());
+  ASSERT_TRUE(range->GetNext() == nullptr);
 }
 
 TEST(LiveRangesTest, CFG3) {
@@ -127,18 +127,18 @@
    *
    * Which becomes the following graph (numbered by lifetime position):
    *       2: constant0
-   *       3: constant4
-   *       4: goto
+   *       4: constant4
+   *       6: goto
    *           |
-   *       7: equal
-   *       8: if
+   *       10: equal
+   *       12: if
    *       /       \
-   *   11: goto   14: goto
+   *   16: goto   20: goto
    *       \       /
-   *       16: phi
-   *       17: return
+   *       22: phi
+   *       24: return
    *         |
-   *       20: exit
+   *       38: exit
    */
   const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
     Instruction::CONST_4 | 0 | 0,
@@ -154,34 +154,34 @@
 
   // Test for the 0 constant.
   LiveInterval* interval = liveness.GetInstructionFromSsaIndex(0)->GetLiveInterval();
-  ASSERT_EQ(1u, interval->GetRanges().Size());
-  LiveRange range = interval->GetRanges().Get(0);
-  ASSERT_EQ(2u, range.GetStart());
+  LiveRange* range = interval->GetFirstRange();
+  ASSERT_EQ(2u, range->GetStart());
   // Last use is the phi at the return block so instruction is live until
   // the end of the then block.
-  ASSERT_EQ(12u, range.GetEnd());
+  ASSERT_EQ(18u, range->GetEnd());
+  ASSERT_TRUE(range->GetNext() == nullptr);
 
   // Test for the 4 constant.
   interval = liveness.GetInstructionFromSsaIndex(1)->GetLiveInterval();
   // The then branch is a hole for this constant, therefore its interval has 2 ranges.
-  ASSERT_EQ(2u, interval->GetRanges().Size());
-  // First range is the else block.
-  range = interval->GetRanges().Get(0);
-  ASSERT_EQ(13u, range.GetStart());
-  // Last use is the phi at the return block.
-  ASSERT_EQ(15u, range.GetEnd());
-  // Second range starts from the definition and ends at the if block.
-  range = interval->GetRanges().Get(1);
-  ASSERT_EQ(3u, range.GetStart());
+  // First range starts from the definition and ends at the if block.
+  range = interval->GetFirstRange();
+  ASSERT_EQ(4u, range->GetStart());
   // 9 is the end of the if block.
-  ASSERT_EQ(9u, range.GetEnd());
+  ASSERT_EQ(14u, range->GetEnd());
+  // Second range is the else block.
+  range = range->GetNext();
+  ASSERT_EQ(18u, range->GetStart());
+  // Last use is the phi at the return block.
+  ASSERT_EQ(22u, range->GetEnd());
+  ASSERT_TRUE(range->GetNext() == nullptr);
 
   // Test for the phi.
   interval = liveness.GetInstructionFromSsaIndex(3)->GetLiveInterval();
-  ASSERT_EQ(1u, interval->GetRanges().Size());
-  range = interval->GetRanges().Get(0);
-  ASSERT_EQ(16u, range.GetStart());
-  ASSERT_EQ(17u, range.GetEnd());
+  range = interval->GetFirstRange();
+  ASSERT_EQ(22u, range->GetStart());
+  ASSERT_EQ(24u, range->GetEnd());
+  ASSERT_TRUE(range->GetNext() == nullptr);
 }
 
 TEST(LiveRangesTest, Loop) {
@@ -195,21 +195,21 @@
    *
    * Which becomes the following graph (numbered by lifetime position):
    *       2: constant0
-   *       3: constant4
-   *       4: constant5
-   *       5: goto
-   *           |
+   *       4: constant4
+   *       6: constant5
    *       8: goto
    *           |
-   *       10: phi
-   *       11: equal
-   *       12: if +++++
+   *       12: goto
+   *           |
+   *       14: phi
+   *       16: equal
+   *       18: if +++++
    *        |       \ +
-   *        |     15: goto
+   *        |     22: goto
    *        |
-   *       18: return
+   *       26: return
    *         |
-   *       21: exit
+   *       30: exit
    */
 
   const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
@@ -228,36 +228,36 @@
 
   // Test for the 0 constant.
   LiveInterval* interval = liveness.GetInstructionFromSsaIndex(0)->GetLiveInterval();
-  ASSERT_EQ(1u, interval->GetRanges().Size());
-  LiveRange range = interval->GetRanges().Get(0);
-  ASSERT_EQ(2u, range.GetStart());
+  LiveRange* range = interval->GetFirstRange();
+  ASSERT_EQ(2u, range->GetStart());
   // Last use is the loop phi so instruction is live until
   // the end of the pre loop header.
-  ASSERT_EQ(9u, range.GetEnd());
+  ASSERT_EQ(14u, range->GetEnd());
+  ASSERT_TRUE(range->GetNext() == nullptr);
 
   // Test for the 4 constant.
   interval = liveness.GetInstructionFromSsaIndex(1)->GetLiveInterval();
+  range = interval->GetFirstRange();
   // The instruction is live until the end of the loop.
-  ASSERT_EQ(1u, interval->GetRanges().Size());
-  range = interval->GetRanges().Get(0);
-  ASSERT_EQ(3u, range.GetStart());
-  ASSERT_EQ(16u, range.GetEnd());
+  ASSERT_EQ(4u, range->GetStart());
+  ASSERT_EQ(24u, range->GetEnd());
+  ASSERT_TRUE(range->GetNext() == nullptr);
 
   // Test for the 5 constant.
   interval = liveness.GetInstructionFromSsaIndex(2)->GetLiveInterval();
-  // The instruction is live until the return instruction of the loop.
-  ASSERT_EQ(1u, interval->GetRanges().Size());
-  range = interval->GetRanges().Get(0);
-  ASSERT_EQ(4u, range.GetStart());
-  ASSERT_EQ(18u, range.GetEnd());
+  range = interval->GetFirstRange();
+  // The instruction is live until the return instruction after the loop.
+  ASSERT_EQ(6u, range->GetStart());
+  ASSERT_EQ(26u, range->GetEnd());
+  ASSERT_TRUE(range->GetNext() == nullptr);
 
   // Test for the phi.
   interval = liveness.GetInstructionFromSsaIndex(3)->GetLiveInterval();
-  ASSERT_EQ(1u, interval->GetRanges().Size());
-  range = interval->GetRanges().Get(0);
+  range = interval->GetFirstRange();
   // Instruction is consumed by the if.
-  ASSERT_EQ(10u, range.GetStart());
-  ASSERT_EQ(11u, range.GetEnd());
+  ASSERT_EQ(14u, range->GetStart());
+  ASSERT_EQ(16u, range->GetEnd());
+  ASSERT_TRUE(range->GetNext() == nullptr);
 }
 
 }  // namespace art
diff --git a/compiler/dex/bit_vector_block_iterator.cc b/compiler/optimizing/locations.cc
similarity index 61%
copy from compiler/dex/bit_vector_block_iterator.cc
copy to compiler/optimizing/locations.cc
index 32d7d71..98766d2 100644
--- a/compiler/dex/bit_vector_block_iterator.cc
+++ b/compiler/optimizing/locations.cc
@@ -1,4 +1,4 @@
-/*
+  /*
  * Copyright (C) 2014 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,19 +14,19 @@
  * limitations under the License.
  */
 
-#include "bit_vector_block_iterator.h"
-#include "mir_graph.h"
+#include "locations.h"
+
+#include "nodes.h"
 
 namespace art {
 
-BasicBlock* BitVectorBlockIterator::Next() {
-  int idx = internal_iterator_.Next();
-
-  if (idx == -1) {
-    return nullptr;
+LocationSummary::LocationSummary(HInstruction* instruction)
+    : inputs_(instruction->GetBlock()->GetGraph()->GetArena(), instruction->InputCount()),
+      temps_(instruction->GetBlock()->GetGraph()->GetArena(), 0) {
+  inputs_.SetSize(instruction->InputCount());
+  for (size_t i = 0; i < instruction->InputCount(); i++) {
+    inputs_.Put(i, Location());
   }
-
-  return mir_graph_->GetBasicBlock(idx);
 }
 
 }  // namespace art
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
new file mode 100644
index 0000000..3c60d3c
--- /dev/null
+++ b/compiler/optimizing/locations.h
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_LOCATIONS_H_
+#define ART_COMPILER_OPTIMIZING_LOCATIONS_H_
+
+#include "base/bit_field.h"
+#include "utils/allocation.h"
+#include "utils/growable_array.h"
+#include "utils/managed_register.h"
+
+namespace art {
+
+class HInstruction;
+
+/**
+ * A Location is an abstraction over the potential location
+ * of an instruction. It could be in register or stack.
+ */
+class Location : public ValueObject {
+ public:
+  enum Kind {
+    kInvalid = 0,
+    kStackSlot = 1,  // Word size slot.
+    kDoubleStackSlot = 2,  // 64bit stack slot.
+    kRegister = 3,
+    // On 32bits architectures, quick can pass a long where the
+    // low bits are in the last parameter register, and the high
+    // bits are in a stack slot. The kQuickParameter kind is for
+    // handling this special case.
+    kQuickParameter = 4,
+
+    // Unallocated location represents a location that is not fixed and can be
+    // allocated by a register allocator.  Each unallocated location has
+    // a policy that specifies what kind of location is suitable. Payload
+    // contains register allocation policy.
+    kUnallocated = 5,
+  };
+
+  Location() : value_(kInvalid) {
+    DCHECK(!IsValid());
+  }
+
+  Location(const Location& other) : ValueObject(), value_(other.value_) {}
+
+  Location& operator=(const Location& other) {
+    value_ = other.value_;
+    return *this;
+  }
+
+  bool IsValid() const {
+    return value_ != kInvalid;
+  }
+
+  bool IsInvalid() const {
+    return !IsValid();
+  }
+
+  bool IsConstant() const {
+    // TODO: support constants.
+    return false;
+  }
+
+  // Empty location. Used if there the location should be ignored.
+  static Location NoLocation() {
+    return Location();
+  }
+
+  // Register locations.
+  static Location RegisterLocation(ManagedRegister reg) {
+    return Location(kRegister, reg.RegId());
+  }
+
+  bool IsRegister() const {
+    return GetKind() == kRegister;
+  }
+
+  ManagedRegister reg() const {
+    DCHECK(IsRegister());
+    return static_cast<ManagedRegister>(GetPayload());
+  }
+
+  static uword EncodeStackIndex(intptr_t stack_index) {
+    DCHECK(-kStackIndexBias <= stack_index);
+    DCHECK(stack_index < kStackIndexBias);
+    return static_cast<uword>(kStackIndexBias + stack_index);
+  }
+
+  static Location StackSlot(intptr_t stack_index) {
+    uword payload = EncodeStackIndex(stack_index);
+    Location loc(kStackSlot, payload);
+    // Ensure that sign is preserved.
+    DCHECK_EQ(loc.GetStackIndex(), stack_index);
+    return loc;
+  }
+
+  bool IsStackSlot() const {
+    return GetKind() == kStackSlot;
+  }
+
+  static Location DoubleStackSlot(intptr_t stack_index) {
+    uword payload = EncodeStackIndex(stack_index);
+    Location loc(kDoubleStackSlot, payload);
+    // Ensure that sign is preserved.
+    DCHECK_EQ(loc.GetStackIndex(), stack_index);
+    return loc;
+  }
+
+  bool IsDoubleStackSlot() const {
+    return GetKind() == kDoubleStackSlot;
+  }
+
+  intptr_t GetStackIndex() const {
+    DCHECK(IsStackSlot() || IsDoubleStackSlot());
+    // Decode stack index manually to preserve sign.
+    return GetPayload() - kStackIndexBias;
+  }
+
+  intptr_t GetHighStackIndex(uintptr_t word_size) const {
+    DCHECK(IsDoubleStackSlot());
+    // Decode stack index manually to preserve sign.
+    return GetPayload() - kStackIndexBias + word_size;
+  }
+
+  static Location QuickParameter(uint32_t parameter_index) {
+    return Location(kQuickParameter, parameter_index);
+  }
+
+  uint32_t GetQuickParameterIndex() const {
+    DCHECK(IsQuickParameter());
+    return GetPayload();
+  }
+
+  bool IsQuickParameter() const {
+    return GetKind() == kQuickParameter;
+  }
+
+  arm::ArmManagedRegister AsArm() const;
+  x86::X86ManagedRegister AsX86() const;
+
+  Kind GetKind() const {
+    return KindField::Decode(value_);
+  }
+
+  bool Equals(Location other) const {
+    return value_ == other.value_;
+  }
+
+  const char* DebugString() const {
+    switch (GetKind()) {
+      case kInvalid: return "?";
+      case kRegister: return "R";
+      case kStackSlot: return "S";
+      case kDoubleStackSlot: return "DS";
+      case kQuickParameter: return "Q";
+      case kUnallocated: return "U";
+    }
+    return "?";
+  }
+
+  // Unallocated locations.
+  enum Policy {
+    kAny,
+    kRequiresRegister,
+    kSameAsFirstInput,
+  };
+
+  bool IsUnallocated() const {
+    return GetKind() == kUnallocated;
+  }
+
+  static Location UnallocatedLocation(Policy policy) {
+    return Location(kUnallocated, PolicyField::Encode(policy));
+  }
+
+  // Any free register is suitable to replace this unallocated location.
+  static Location Any() {
+    return UnallocatedLocation(kAny);
+  }
+
+  static Location RequiresRegister() {
+    return UnallocatedLocation(kRequiresRegister);
+  }
+
+  // The location of the first input to the instruction will be
+  // used to replace this unallocated location.
+  static Location SameAsFirstInput() {
+    return UnallocatedLocation(kSameAsFirstInput);
+  }
+
+  Policy GetPolicy() const {
+    DCHECK(IsUnallocated());
+    return PolicyField::Decode(GetPayload());
+  }
+
+  uword GetEncoding() const {
+    return GetPayload();
+  }
+
+ private:
+  // Number of bits required to encode Kind value.
+  static constexpr uint32_t kBitsForKind = 4;
+  static constexpr uint32_t kBitsForPayload = kWordSize * kBitsPerByte - kBitsForKind;
+
+  explicit Location(uword value) : value_(value) {}
+
+  Location(Kind kind, uword payload)
+      : value_(KindField::Encode(kind) | PayloadField::Encode(payload)) {}
+
+  uword GetPayload() const {
+    return PayloadField::Decode(value_);
+  }
+
+  typedef BitField<Kind, 0, kBitsForKind> KindField;
+  typedef BitField<uword, kBitsForKind, kBitsForPayload> PayloadField;
+
+  // Layout for kUnallocated locations payload.
+  typedef BitField<Policy, 0, 3> PolicyField;
+
+  // Layout for stack slots.
+  static const intptr_t kStackIndexBias =
+      static_cast<intptr_t>(1) << (kBitsForPayload - 1);
+
+  // Location either contains kind and payload fields or a tagged handle for
+  // a constant locations. Values of enumeration Kind are selected in such a
+  // way that none of them can be interpreted as a kConstant tag.
+  uword value_;
+};
+
+/**
+ * The code generator computes LocationSummary for each instruction so that
+ * the instruction itself knows what code to generate: where to find the inputs
+ * and where to place the result.
+ *
+ * The intent is to have the code for generating the instruction independent of
+ * register allocation. A register allocator just has to provide a LocationSummary.
+ */
+class LocationSummary : public ArenaObject {
+ public:
+  explicit LocationSummary(HInstruction* instruction);
+
+  void SetInAt(uint32_t at, Location location) {
+    inputs_.Put(at, location);
+  }
+
+  Location InAt(uint32_t at) const {
+    return inputs_.Get(at);
+  }
+
+  size_t GetInputCount() const {
+    return inputs_.Size();
+  }
+
+  void SetOut(Location location) {
+    output_ = Location(location);
+  }
+
+  void AddTemp(Location location) {
+    temps_.Add(location);
+  }
+
+  Location GetTemp(uint32_t at) const {
+    return temps_.Get(at);
+  }
+
+  void SetTempAt(uint32_t at, Location location) {
+    temps_.Put(at, location);
+  }
+
+  size_t GetTempCount() const {
+    return temps_.Size();
+  }
+
+  Location Out() const { return output_; }
+
+ private:
+  GrowableArray<Location> inputs_;
+  GrowableArray<Location> temps_;
+  Location output_;
+
+  DISALLOW_COPY_AND_ASSIGN(LocationSummary);
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_LOCATIONS_H_
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index afaedd7..752466b 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -291,6 +291,17 @@
   return false;
 }
 
+void HBasicBlock::InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor) {
+  DCHECK(cursor->AsPhi() == nullptr);
+  DCHECK(instruction->AsPhi() == nullptr);
+  instruction->next_ = cursor;
+  instruction->previous_ = cursor->previous_;
+  cursor->previous_ = instruction;
+  if (GetFirstInstruction() == cursor) {
+    instructions_.first_instruction_ = instruction;
+  }
+}
+
 static void Add(HInstructionList* instruction_list,
                 HBasicBlock* block,
                 HInstruction* instruction) {
@@ -377,6 +388,7 @@
 }
 
 void HInstruction::ReplaceWith(HInstruction* other) {
+  DCHECK(other != nullptr);
   for (HUseIterator<HInstruction> it(GetUses()); !it.Done(); it.Advance()) {
     HUseListNode<HInstruction>* current = it.Current();
     HInstruction* user = current->GetUser();
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index a2cb1c4..b1c8016 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -17,6 +17,7 @@
 #ifndef ART_COMPILER_OPTIMIZING_NODES_H_
 #define ART_COMPILER_OPTIMIZING_NODES_H_
 
+#include "locations.h"
 #include "utils/allocation.h"
 #include "utils/arena_bit_vector.h"
 #include "utils/growable_array.h"
@@ -275,6 +276,7 @@
   HInstruction* GetLastInstruction() const { return instructions_.last_instruction_; }
   const HInstructionList& GetInstructions() const { return instructions_; }
   const HInstructionList& GetPhis() const { return phis_; }
+  HInstruction* GetFirstPhi() const { return phis_.first_instruction_; }
 
   void AddSuccessor(HBasicBlock* block) {
     successors_.Add(block);
@@ -315,6 +317,7 @@
 
   void AddInstruction(HInstruction* instruction);
   void RemoveInstruction(HInstruction* instruction);
+  void InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor);
   void AddPhi(HPhi* phi);
   void RemovePhi(HPhi* phi);
 
@@ -383,6 +386,7 @@
   M(NewInstance)                                           \
   M(Not)                                                   \
   M(ParameterValue)                                        \
+  M(ParallelMove)                                          \
   M(Phi)                                                   \
   M(Return)                                                \
   M(ReturnVoid)                                            \
@@ -394,9 +398,9 @@
 #undef FORWARD_DECLARATION
 
 #define DECLARE_INSTRUCTION(type)                          \
-  virtual void Accept(HGraphVisitor* visitor);             \
   virtual const char* DebugName() const { return #type; }  \
   virtual H##type* As##type() { return this; }             \
+  virtual void Accept(HGraphVisitor* visitor)              \
 
 template <typename T>
 class HUseListNode : public ArenaObject {
@@ -731,7 +735,7 @@
  public:
   HReturnVoid() { }
 
-  DECLARE_INSTRUCTION(ReturnVoid)
+  DECLARE_INSTRUCTION(ReturnVoid);
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HReturnVoid);
@@ -745,7 +749,7 @@
     SetRawInputAt(0, value);
   }
 
-  DECLARE_INSTRUCTION(Return)
+  DECLARE_INSTRUCTION(Return);
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HReturn);
@@ -758,7 +762,7 @@
  public:
   HExit() { }
 
-  DECLARE_INSTRUCTION(Exit)
+  DECLARE_INSTRUCTION(Exit);
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HExit);
@@ -773,7 +777,7 @@
     return GetBlock()->GetSuccessors().Get(0);
   }
 
-  DECLARE_INSTRUCTION(Goto)
+  DECLARE_INSTRUCTION(Goto);
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HGoto);
@@ -795,7 +799,7 @@
     return GetBlock()->GetSuccessors().Get(1);
   }
 
-  DECLARE_INSTRUCTION(If)
+  DECLARE_INSTRUCTION(If);
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HIf);
@@ -834,7 +838,7 @@
 
   virtual Primitive::Type GetType() const { return Primitive::kPrimBoolean; }
 
-  DECLARE_INSTRUCTION(Equal)
+  DECLARE_INSTRUCTION(Equal);
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HEqual);
@@ -845,7 +849,7 @@
  public:
   explicit HLocal(uint16_t reg_number) : reg_number_(reg_number) { }
 
-  DECLARE_INSTRUCTION(Local)
+  DECLARE_INSTRUCTION(Local);
 
   uint16_t GetRegNumber() const { return reg_number_; }
 
@@ -867,7 +871,7 @@
 
   HLocal* GetLocal() const { return reinterpret_cast<HLocal*>(InputAt(0)); }
 
-  DECLARE_INSTRUCTION(LoadLocal)
+  DECLARE_INSTRUCTION(LoadLocal);
 
  private:
   const Primitive::Type type_;
@@ -886,7 +890,7 @@
 
   HLocal* GetLocal() const { return reinterpret_cast<HLocal*>(InputAt(0)); }
 
-  DECLARE_INSTRUCTION(StoreLocal)
+  DECLARE_INSTRUCTION(StoreLocal);
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HStoreLocal);
@@ -901,7 +905,7 @@
   int32_t GetValue() const { return value_; }
   virtual Primitive::Type GetType() const { return Primitive::kPrimInt; }
 
-  DECLARE_INSTRUCTION(IntConstant)
+  DECLARE_INSTRUCTION(IntConstant);
 
  private:
   const int32_t value_;
@@ -917,7 +921,7 @@
 
   virtual Primitive::Type GetType() const { return Primitive::kPrimLong; }
 
-  DECLARE_INSTRUCTION(LongConstant)
+  DECLARE_INSTRUCTION(LongConstant);
 
  private:
   const int64_t value_;
@@ -977,7 +981,7 @@
 
   uint32_t GetIndexInDexCache() const { return index_in_dex_cache_; }
 
-  DECLARE_INSTRUCTION(InvokeStatic)
+  DECLARE_INSTRUCTION(InvokeStatic);
 
  private:
   const uint32_t index_in_dex_cache_;
@@ -997,7 +1001,7 @@
   // Calls runtime so needs an environment.
   virtual bool NeedsEnvironment() const { return true; }
 
-  DECLARE_INSTRUCTION(NewInstance)
+  DECLARE_INSTRUCTION(NewInstance);
 
  private:
   const uint32_t dex_pc_;
@@ -1088,20 +1092,103 @@
   void AddInput(HInstruction* input);
 
   virtual Primitive::Type GetType() const { return type_; }
+  void SetType(Primitive::Type type) { type_ = type; }
 
   uint32_t GetRegNumber() const { return reg_number_; }
 
-  DECLARE_INSTRUCTION(Phi)
+  DECLARE_INSTRUCTION(Phi);
 
  protected:
   GrowableArray<HInstruction*> inputs_;
   const uint32_t reg_number_;
-  const Primitive::Type type_;
+  Primitive::Type type_;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HPhi);
 };
 
+class MoveOperands : public ArenaObject {
+ public:
+  MoveOperands(Location source, Location destination)
+      : source_(source), destination_(destination) {}
+
+  Location GetSource() const { return source_; }
+  Location GetDestination() const { return destination_; }
+
+  void SetSource(Location value) { source_ = value; }
+  void SetDestination(Location value) { destination_ = value; }
+
+  // The parallel move resolver marks moves as "in-progress" by clearing the
+  // destination (but not the source).
+  Location MarkPending() {
+    DCHECK(!IsPending());
+    Location dest = destination_;
+    destination_ = Location::NoLocation();
+    return dest;
+  }
+
+  void ClearPending(Location dest) {
+    DCHECK(IsPending());
+    destination_ = dest;
+  }
+
+  bool IsPending() const {
+    DCHECK(!source_.IsInvalid() || destination_.IsInvalid());
+    return destination_.IsInvalid() && !source_.IsInvalid();
+  }
+
+  // True if this blocks a move from the given location.
+  bool Blocks(Location loc) const {
+    return !IsEliminated() && source_.Equals(loc);
+  }
+
+  // A move is redundant if it's been eliminated, if its source and
+  // destination are the same, or if its destination is unneeded.
+  bool IsRedundant() const {
+    return IsEliminated() || destination_.IsInvalid() || source_.Equals(destination_);
+  }
+
+  // We clear both operands to indicate move that's been eliminated.
+  void Eliminate() {
+    source_ = destination_ = Location::NoLocation();
+  }
+
+  bool IsEliminated() const {
+    DCHECK(!source_.IsInvalid() || destination_.IsInvalid());
+    return source_.IsInvalid();
+  }
+
+ private:
+  Location source_;
+  Location destination_;
+
+  DISALLOW_COPY_AND_ASSIGN(MoveOperands);
+};
+
+static constexpr size_t kDefaultNumberOfMoves = 4;
+
+class HParallelMove : public HTemplateInstruction<0> {
+ public:
+  explicit HParallelMove(ArenaAllocator* arena) : moves_(arena, kDefaultNumberOfMoves) {}
+
+  void AddMove(MoveOperands* move) {
+    moves_.Add(move);
+  }
+
+  MoveOperands* MoveOperandsAt(size_t index) const {
+    return moves_.Get(index);
+  }
+
+  size_t NumMoves() const { return moves_.Size(); }
+
+  DECLARE_INSTRUCTION(ParallelMove);
+
+ private:
+  GrowableArray<MoveOperands*> moves_;
+
+  DISALLOW_COPY_AND_ASSIGN(HParallelMove);
+};
+
 class HGraphVisitor : public ValueObject {
  public:
   explicit HGraphVisitor(HGraph* graph) : graph_(graph) { }
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 286f48a..dfbb488 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -24,6 +24,7 @@
 #include "driver/dex_compilation_unit.h"
 #include "graph_visualizer.h"
 #include "nodes.h"
+#include "register_allocator.h"
 #include "ssa_liveness_analysis.h"
 #include "utils/arena_allocator.h"
 
@@ -96,8 +97,6 @@
     }
     return nullptr;
   }
-  HGraphVisualizer visualizer(visualizer_output_.get(), graph, kStringFilter, dex_compilation_unit);
-  visualizer.DumpGraph("builder");
 
   InstructionSet instruction_set = GetCompilerDriver()->GetInstructionSet();
   // The optimizing compiler currently does not have a Thumb2 assembler.
@@ -112,6 +111,10 @@
     return nullptr;
   }
 
+  HGraphVisualizer visualizer(
+      visualizer_output_.get(), graph, kStringFilter, *codegen, dex_compilation_unit);
+  visualizer.DumpGraph("builder");
+
   CodeVectorAllocator allocator;
   codegen->Compile(&allocator);
 
@@ -128,9 +131,13 @@
   visualizer.DumpGraph("ssa");
 
   graph->FindNaturalLoops();
-  SsaLivenessAnalysis(*graph).Analyze();
+  SsaLivenessAnalysis liveness(*graph);
+  liveness.Analyze();
   visualizer.DumpGraph("liveness");
 
+  RegisterAllocator(graph->GetArena(), *codegen).AllocateRegisters(liveness);
+  visualizer.DumpGraph("register");
+
   return new CompiledMethod(GetCompilerDriver(),
                             instruction_set,
                             allocator.GetMemory(),
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 67c4850..36a6a21 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -17,6 +17,10 @@
 #ifndef ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
 #define ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
 
+#include "ssa_liveness_analysis.h"
+
+namespace art {
+
 #define NUM_INSTRUCTIONS(...)  \
   (sizeof((uint16_t[]) {__VA_ARGS__}) /sizeof(uint16_t))
 
@@ -29,4 +33,21 @@
 #define TWO_REGISTERS_CODE_ITEM(...)                                       \
     { 2, 0, 0, 0, 0, 0, NUM_INSTRUCTIONS(__VA_ARGS__), 0, __VA_ARGS__ }
 
+#define THREE_REGISTERS_CODE_ITEM(...)                                     \
+    { 3, 0, 0, 0, 0, 0, NUM_INSTRUCTIONS(__VA_ARGS__), 0, __VA_ARGS__ }
+
+LiveInterval* BuildInterval(const size_t ranges[][2],
+                            size_t number_of_ranges,
+                            ArenaAllocator* allocator,
+                            int reg = -1) {
+  LiveInterval* interval = new (allocator) LiveInterval(allocator, Primitive::kPrimInt);
+  for (size_t i = number_of_ranges; i > 0; --i) {
+    interval->AddRange(ranges[i - 1][0], ranges[i - 1][1]);
+  }
+  interval->SetRegister(reg);
+  return interval;
+}
+
+}  // namespace art
+
 #endif  // ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
diff --git a/compiler/optimizing/parallel_move_resolver.cc b/compiler/optimizing/parallel_move_resolver.cc
new file mode 100644
index 0000000..3d2d136
--- /dev/null
+++ b/compiler/optimizing/parallel_move_resolver.cc
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "parallel_move_resolver.h"
+#include "nodes.h"
+#include "locations.h"
+
+namespace art {
+
+void ParallelMoveResolver::EmitNativeCode(HParallelMove* parallel_move) {
+  DCHECK(moves_.IsEmpty());
+  // Build up a worklist of moves.
+  BuildInitialMoveList(parallel_move);
+
+  for (size_t i = 0; i < moves_.Size(); ++i) {
+    const MoveOperands& move = *moves_.Get(i);
+    // Skip constants to perform them last.  They don't block other moves
+    // and skipping such moves with register destinations keeps those
+    // registers free for the whole algorithm.
+    if (!move.IsEliminated() && !move.GetSource().IsConstant()) {
+      PerformMove(i);
+    }
+  }
+
+  // Perform the moves with constant sources.
+  for (size_t i = 0; i < moves_.Size(); ++i) {
+    const MoveOperands& move = *moves_.Get(i);
+    if (!move.IsEliminated()) {
+      DCHECK(move.GetSource().IsConstant());
+      EmitMove(i);
+    }
+  }
+
+  moves_.Reset();
+}
+
+
+void ParallelMoveResolver::BuildInitialMoveList(HParallelMove* parallel_move) {
+  // Perform a linear sweep of the moves to add them to the initial list of
+  // moves to perform, ignoring any move that is redundant (the source is
+  // the same as the destination, the destination is ignored and
+  // unallocated, or the move was already eliminated).
+  for (size_t i = 0; i < parallel_move->NumMoves(); ++i) {
+    MoveOperands* move = parallel_move->MoveOperandsAt(i);
+    if (!move->IsRedundant()) {
+      moves_.Add(move);
+    }
+  }
+}
+
+
+void ParallelMoveResolver::PerformMove(size_t index) {
+  // Each call to this function performs a move and deletes it from the move
+  // graph.  We first recursively perform any move blocking this one.  We
+  // mark a move as "pending" on entry to PerformMove in order to detect
+  // cycles in the move graph.  We use operand swaps to resolve cycles,
+  // which means that a call to PerformMove could change any source operand
+  // in the move graph.
+
+  DCHECK(!moves_.Get(index)->IsPending());
+  DCHECK(!moves_.Get(index)->IsRedundant());
+
+  // Clear this move's destination to indicate a pending move.  The actual
+  // destination is saved in a stack-allocated local.  Recursion may allow
+  // multiple moves to be pending.
+  DCHECK(!moves_.Get(index)->GetSource().IsInvalid());
+  Location destination = moves_.Get(index)->MarkPending();
+
+  // Perform a depth-first traversal of the move graph to resolve
+  // dependencies.  Any unperformed, unpending move with a source the same
+  // as this one's destination blocks this one so recursively perform all
+  // such moves.
+  for (size_t i = 0; i < moves_.Size(); ++i) {
+    const MoveOperands& other_move = *moves_.Get(i);
+    if (other_move.Blocks(destination) && !other_move.IsPending()) {
+      // Though PerformMove can change any source operand in the move graph,
+      // this call cannot create a blocking move via a swap (this loop does
+      // not miss any).  Assume there is a non-blocking move with source A
+      // and this move is blocked on source B and there is a swap of A and
+      // B.  Then A and B must be involved in the same cycle (or they would
+      // not be swapped).  Since this move's destination is B and there is
+      // only a single incoming edge to an operand, this move must also be
+      // involved in the same cycle.  In that case, the blocking move will
+      // be created but will be "pending" when we return from PerformMove.
+      PerformMove(i);
+    }
+  }
+  MoveOperands* move = moves_.Get(index);
+
+  // We are about to resolve this move and don't need it marked as
+  // pending, so restore its destination.
+  move->ClearPending(destination);
+
+  // This move's source may have changed due to swaps to resolve cycles and
+  // so it may now be the last move in the cycle.  If so remove it.
+  if (move->GetSource().Equals(destination)) {
+    move->Eliminate();
+    return;
+  }
+
+  // The move may be blocked on a (at most one) pending move, in which case
+  // we have a cycle.  Search for such a blocking move and perform a swap to
+  // resolve it.
+  bool do_swap = false;
+  for (size_t i = 0; i < moves_.Size(); ++i) {
+    const MoveOperands& other_move = *moves_.Get(i);
+    if (other_move.Blocks(destination)) {
+      DCHECK(other_move.IsPending());
+      do_swap = true;
+      break;
+    }
+  }
+
+  if (do_swap) {
+    EmitSwap(index);
+    // Any unperformed (including pending) move with a source of either
+    // this move's source or destination needs to have their source
+    // changed to reflect the state of affairs after the swap.
+    Location source = move->GetSource();
+    Location destination = move->GetDestination();
+    move->Eliminate();
+    for (size_t i = 0; i < moves_.Size(); ++i) {
+      const MoveOperands& other_move = *moves_.Get(i);
+      if (other_move.Blocks(source)) {
+        moves_.Get(i)->SetSource(destination);
+      } else if (other_move.Blocks(destination)) {
+        moves_.Get(i)->SetSource(source);
+      }
+    }
+  } else {
+    // This move is not blocked.
+    EmitMove(index);
+    move->Eliminate();
+  }
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/parallel_move_resolver.h b/compiler/optimizing/parallel_move_resolver.h
new file mode 100644
index 0000000..ff20cb0
--- /dev/null
+++ b/compiler/optimizing/parallel_move_resolver.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_PARALLEL_MOVE_RESOLVER_H_
+#define ART_COMPILER_OPTIMIZING_PARALLEL_MOVE_RESOLVER_H_
+
+#include "utils/allocation.h"
+#include "utils/growable_array.h"
+
+namespace art {
+
+class HParallelMove;
+class MoveOperands;
+
+/**
+ * Helper class to resolve a set of parallel moves. Architecture dependent code
+ * generator must have their own subclass that implements the `EmitMove` and `EmitSwap`
+ * operations.
+ */
+class ParallelMoveResolver : public ValueObject {
+ public:
+  explicit ParallelMoveResolver(ArenaAllocator* allocator) : moves_(allocator, 32) {}
+  virtual ~ParallelMoveResolver() {}
+
+  // Resolve a set of parallel moves, emitting assembler instructions.
+  void EmitNativeCode(HParallelMove* parallel_move);
+
+ protected:
+  // Emit a move.
+  virtual void EmitMove(size_t index) = 0;
+
+  // Execute a move by emitting a swap of two operands.
+  virtual void EmitSwap(size_t index) = 0;
+
+  // List of moves not yet resolved.
+  GrowableArray<MoveOperands*> moves_;
+
+ private:
+  // Build the initial list of moves.
+  void BuildInitialMoveList(HParallelMove* parallel_move);
+
+  // Perform the move at the moves_ index in question (possibly requiring
+  // other moves to satisfy dependencies).
+  void PerformMove(size_t index);
+
+  DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolver);
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_PARALLEL_MOVE_RESOLVER_H_
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
new file mode 100644
index 0000000..88df24d
--- /dev/null
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "nodes.h"
+#include "parallel_move_resolver.h"
+#include "utils/arena_allocator.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+class TestParallelMoveResolver : public ParallelMoveResolver {
+ public:
+  explicit TestParallelMoveResolver(ArenaAllocator* allocator) : ParallelMoveResolver(allocator) {}
+
+  virtual void EmitMove(size_t index) {
+    MoveOperands* move = moves_.Get(index);
+    if (!message_.str().empty()) {
+      message_ << " ";
+    }
+    message_ << "("
+             << move->GetSource().reg().RegId()
+             << " -> "
+             << move->GetDestination().reg().RegId()
+             << ")";
+  }
+
+  virtual void EmitSwap(size_t index) {
+    MoveOperands* move = moves_.Get(index);
+    if (!message_.str().empty()) {
+      message_ << " ";
+    }
+    message_ << "("
+             << move->GetSource().reg().RegId()
+             << " <-> "
+             << move->GetDestination().reg().RegId()
+             << ")";
+  }
+
+  std::string GetMessage() const {
+    return  message_.str();
+  }
+
+ private:
+  std::ostringstream message_;
+
+
+  DISALLOW_COPY_AND_ASSIGN(TestParallelMoveResolver);
+};
+
+static HParallelMove* BuildParallelMove(ArenaAllocator* allocator,
+                                        const size_t operands[][2],
+                                        size_t number_of_moves) {
+  HParallelMove* moves = new (allocator) HParallelMove(allocator);
+  for (size_t i = 0; i < number_of_moves; ++i) {
+    moves->AddMove(new (allocator) MoveOperands(
+        Location::RegisterLocation(ManagedRegister(operands[i][0])),
+        Location::RegisterLocation(ManagedRegister(operands[i][1]))));
+  }
+  return moves;
+}
+
+TEST(ParallelMoveTest, Dependency) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+
+  {
+    TestParallelMoveResolver resolver(&allocator);
+    static constexpr size_t moves[][2] = {{0, 1}, {1, 2}};
+    resolver.EmitNativeCode(BuildParallelMove(&allocator, moves, arraysize(moves)));
+    ASSERT_STREQ("(1 -> 2) (0 -> 1)", resolver.GetMessage().c_str());
+  }
+
+  {
+    TestParallelMoveResolver resolver(&allocator);
+    static constexpr size_t moves[][2] = {{0, 1}, {1, 2}, {2, 3}, {1, 4}};
+    resolver.EmitNativeCode(BuildParallelMove(&allocator, moves, arraysize(moves)));
+    ASSERT_STREQ("(2 -> 3) (1 -> 2) (1 -> 4) (0 -> 1)", resolver.GetMessage().c_str());
+  }
+}
+
+TEST(ParallelMoveTest, Swap) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+
+  {
+    TestParallelMoveResolver resolver(&allocator);
+    static constexpr size_t moves[][2] = {{0, 1}, {1, 0}};
+    resolver.EmitNativeCode(BuildParallelMove(&allocator, moves, arraysize(moves)));
+    ASSERT_STREQ("(1 <-> 0)", resolver.GetMessage().c_str());
+  }
+
+  {
+    TestParallelMoveResolver resolver(&allocator);
+    static constexpr size_t moves[][2] = {{0, 1}, {1, 2}, {1, 0}};
+    resolver.EmitNativeCode(BuildParallelMove(&allocator, moves, arraysize(moves)));
+    ASSERT_STREQ("(1 -> 2) (1 <-> 0)", resolver.GetMessage().c_str());
+  }
+
+  {
+    TestParallelMoveResolver resolver(&allocator);
+    static constexpr size_t moves[][2] = {{0, 1}, {1, 2}, {2, 3}, {3, 4}, {4, 1}};
+    resolver.EmitNativeCode(BuildParallelMove(&allocator, moves, arraysize(moves)));
+    ASSERT_STREQ("(4 <-> 1) (3 <-> 4) (2 <-> 3) (0 -> 1)", resolver.GetMessage().c_str());
+  }
+
+  {
+    TestParallelMoveResolver resolver(&allocator);
+    static constexpr size_t moves[][2] = {{0, 1}, {1, 2}, {2, 3}, {3, 4}, {4, 1}, {5, 4}};
+    resolver.EmitNativeCode(BuildParallelMove(&allocator, moves, arraysize(moves)));
+    ASSERT_STREQ("(4 <-> 1) (3 <-> 4) (2 <-> 3) (0 -> 1) (5 -> 4)", resolver.GetMessage().c_str());
+  }
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
new file mode 100644
index 0000000..dd175d2
--- /dev/null
+++ b/compiler/optimizing/register_allocator.cc
@@ -0,0 +1,378 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "register_allocator.h"
+
+#include "code_generator.h"
+#include "ssa_liveness_analysis.h"
+
+namespace art {
+
+static constexpr size_t kMaxLifetimePosition = -1;
+
+RegisterAllocator::RegisterAllocator(ArenaAllocator* allocator, const CodeGenerator& codegen)
+      : allocator_(allocator),
+        codegen_(codegen),
+        unhandled_(allocator, 0),
+        handled_(allocator, 0),
+        active_(allocator, 0),
+        inactive_(allocator, 0),
+        processing_core_registers_(false),
+        number_of_registers_(-1),
+        registers_array_(nullptr),
+        blocked_registers_(allocator->AllocArray<bool>(codegen.GetNumberOfRegisters())) {
+  codegen.SetupBlockedRegisters(blocked_registers_);
+}
+
+static bool ShouldProcess(bool processing_core_registers, HInstruction* instruction) {
+  bool is_core_register = (instruction->GetType() != Primitive::kPrimDouble)
+      && (instruction->GetType() != Primitive::kPrimFloat);
+  return processing_core_registers == is_core_register;
+}
+
+void RegisterAllocator::AllocateRegistersInternal(const SsaLivenessAnalysis& liveness) {
+  number_of_registers_ = processing_core_registers_
+      ? codegen_.GetNumberOfCoreRegisters()
+      : codegen_.GetNumberOfFloatingPointRegisters();
+
+  registers_array_ = allocator_->AllocArray<size_t>(number_of_registers_);
+
+  // Iterate post-order, to ensure the list is sorted, and the last added interval
+  // is the one with the lowest start position.
+  for (size_t i = liveness.GetNumberOfSsaValues(); i > 0; --i) {
+    HInstruction* instruction = liveness.GetInstructionFromSsaIndex(i - 1);
+    if (ShouldProcess(processing_core_registers_, instruction)) {
+      LiveInterval* current = instruction->GetLiveInterval();
+      DCHECK(unhandled_.IsEmpty() || current->StartsBefore(unhandled_.Peek()));
+      unhandled_.Add(current);
+    }
+  }
+
+  LinearScan();
+  if (kIsDebugBuild) {
+    ValidateInternal(liveness, true);
+  }
+}
+
+bool RegisterAllocator::ValidateInternal(const SsaLivenessAnalysis& liveness,
+                                         bool log_fatal_on_failure) const {
+  // To simplify unit testing, we eagerly create the array of intervals, and
+  // call the helper method.
+  GrowableArray<LiveInterval*> intervals(allocator_, 0);
+  for (size_t i = 0; i < liveness.GetNumberOfSsaValues(); ++i) {
+    HInstruction* instruction = liveness.GetInstructionFromSsaIndex(i);
+    if (ShouldProcess(processing_core_registers_, instruction)) {
+      intervals.Add(instruction->GetLiveInterval());
+    }
+  }
+  return ValidateIntervals(intervals, codegen_, allocator_, processing_core_registers_,
+                           log_fatal_on_failure);
+}
+
+bool RegisterAllocator::ValidateIntervals(const GrowableArray<LiveInterval*>& ranges,
+                                          const CodeGenerator& codegen,
+                                          ArenaAllocator* allocator,
+                                          bool processing_core_registers,
+                                          bool log_fatal_on_failure) {
+  size_t number_of_registers = processing_core_registers
+      ? codegen.GetNumberOfCoreRegisters()
+      : codegen.GetNumberOfFloatingPointRegisters();
+  GrowableArray<ArenaBitVector*> bit_vectors(allocator, number_of_registers);
+
+  // Allocate a bit vector per register. A live interval that has a register
+  // allocated will populate the associated bit vector based on its live ranges.
+  for (size_t i = 0; i < number_of_registers; i++) {
+    bit_vectors.Add(new (allocator) ArenaBitVector(allocator, 0, true));
+  }
+
+  for (size_t i = 0, e = ranges.Size(); i < e; ++i) {
+    LiveInterval* current = ranges.Get(i);
+    do {
+      if (!current->HasRegister()) {
+        continue;
+      }
+      BitVector* vector = bit_vectors.Get(current->GetRegister());
+      LiveRange* range = current->GetFirstRange();
+      do {
+        for (size_t j = range->GetStart(); j < range->GetEnd(); ++j) {
+          if (vector->IsBitSet(j)) {
+            if (log_fatal_on_failure) {
+              std::ostringstream message;
+              message << "Register conflict at " << j << " for ";
+              if (processing_core_registers) {
+                codegen.DumpCoreRegister(message, current->GetRegister());
+              } else {
+                codegen.DumpFloatingPointRegister(message, current->GetRegister());
+              }
+              LOG(FATAL) << message.str();
+            } else {
+              return false;
+            }
+          } else {
+            vector->SetBit(j);
+          }
+        }
+      } while ((range = range->GetNext()) != nullptr);
+    } while ((current = current->GetNextSibling()) != nullptr);
+  }
+  return true;
+}
+
+void RegisterAllocator::DumpInterval(std::ostream& stream, LiveInterval* interval) {
+  interval->Dump(stream);
+  stream << ": ";
+  if (interval->HasRegister()) {
+    if (processing_core_registers_) {
+      codegen_.DumpCoreRegister(stream, interval->GetRegister());
+    } else {
+      codegen_.DumpFloatingPointRegister(stream, interval->GetRegister());
+    }
+  } else {
+    stream << "spilled";
+  }
+  stream << std::endl;
+}
+
+// By the book implementation of a linear scan register allocator.
+void RegisterAllocator::LinearScan() {
+  while (!unhandled_.IsEmpty()) {
+    // (1) Remove interval with the lowest start position from unhandled.
+    LiveInterval* current = unhandled_.Pop();
+    size_t position = current->GetStart();
+
+    // (2) Remove currently active intervals that are dead at this position.
+    //     Move active intervals that have a lifetime hole at this position
+    //     to inactive.
+    for (size_t i = 0; i < active_.Size(); ++i) {
+      LiveInterval* interval = active_.Get(i);
+      if (interval->IsDeadAt(position)) {
+        active_.Delete(interval);
+        --i;
+        handled_.Add(interval);
+      } else if (!interval->Covers(position)) {
+        active_.Delete(interval);
+        --i;
+        inactive_.Add(interval);
+      }
+    }
+
+    // (3) Remove currently inactive intervals that are dead at this position.
+    //     Move inactive intervals that cover this position to active.
+    for (size_t i = 0; i < inactive_.Size(); ++i) {
+      LiveInterval* interval = inactive_.Get(i);
+      if (interval->IsDeadAt(position)) {
+        inactive_.Delete(interval);
+        --i;
+        handled_.Add(interval);
+      } else if (interval->Covers(position)) {
+        inactive_.Delete(interval);
+        --i;
+        active_.Add(interval);
+      }
+    }
+
+    // (4) Try to find an available register.
+    bool success = TryAllocateFreeReg(current);
+
+    // (5) If no register could be found, we need to spill.
+    if (!success) {
+      success = AllocateBlockedReg(current);
+    }
+
+    // (6) If the interval had a register allocated, add it to the list of active
+    //     intervals.
+    if (success) {
+      active_.Add(current);
+    }
+  }
+}
+
+// Find a free register. If multiple are found, pick the register that
+// is free the longest.
+bool RegisterAllocator::TryAllocateFreeReg(LiveInterval* current) {
+  size_t* free_until = registers_array_;
+
+  // First set all registers to be free.
+  for (size_t i = 0; i < number_of_registers_; ++i) {
+    free_until[i] = kMaxLifetimePosition;
+  }
+
+  // For each active interval, set its register to not free.
+  for (size_t i = 0, e = active_.Size(); i < e; ++i) {
+    LiveInterval* interval = active_.Get(i);
+    DCHECK(interval->HasRegister());
+    free_until[interval->GetRegister()] = 0;
+  }
+
+  // For each inactive interval, set its register to be free until
+  // the next intersection with `current`.
+  // Thanks to SSA, this should only be needed for intervals
+  // that are the result of a split.
+  for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
+    LiveInterval* inactive = inactive_.Get(i);
+    DCHECK(inactive->HasRegister());
+    size_t next_intersection = inactive->FirstIntersectionWith(current);
+    if (next_intersection != kNoLifetime) {
+      free_until[inactive->GetRegister()] = next_intersection;
+    }
+  }
+
+  // Pick the register that is free the longest.
+  int reg = -1;
+  for (size_t i = 0; i < number_of_registers_; ++i) {
+    if (IsBlocked(i)) continue;
+    if (reg == -1 || free_until[i] > free_until[reg]) {
+      reg = i;
+      if (free_until[i] == kMaxLifetimePosition) break;
+    }
+  }
+
+  // If we could not find a register, we need to spill.
+  if (reg == -1 || free_until[reg] == 0) {
+    return false;
+  }
+
+  current->SetRegister(reg);
+  if (!current->IsDeadAt(free_until[reg])) {
+    // If the register is only available for a subset of live ranges
+    // covered by `current`, split `current` at the position where
+    // the register is not available anymore.
+    LiveInterval* split = Split(current, free_until[reg]);
+    DCHECK(split != nullptr);
+    AddToUnhandled(split);
+  }
+  return true;
+}
+
+bool RegisterAllocator::IsBlocked(int reg) const {
+  // TODO: This only works for core registers and needs to be adjusted for
+  // floating point registers.
+  DCHECK(processing_core_registers_);
+  return blocked_registers_[reg];
+}
+
+// Find the register that is used the last, and spill the interval
+// that holds it. If the first use of `current` is after that register
+// we spill `current` instead.
+bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
+  size_t first_register_use = current->FirstRegisterUse();
+  if (current->FirstRegisterUse() == kNoLifetime) {
+    // TODO: Allocate spill slot for `current`.
+    return false;
+  }
+
+  // First set all registers as not being used.
+  size_t* next_use = registers_array_;
+  for (size_t i = 0; i < number_of_registers_; ++i) {
+    next_use[i] = kMaxLifetimePosition;
+  }
+
+  // For each active interval, find the next use of its register after the
+  // start of current.
+  for (size_t i = 0, e = active_.Size(); i < e; ++i) {
+    LiveInterval* active = active_.Get(i);
+    DCHECK(active->HasRegister());
+    size_t use = active->FirstRegisterUseAfter(current->GetStart());
+    if (use != kNoLifetime) {
+      next_use[active->GetRegister()] = use;
+    }
+  }
+
+  // For each inactive interval, find the next use of its register after the
+  // start of current.
+  // Thanks to SSA, this should only be needed for intervals
+  // that are the result of a split.
+  for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
+    LiveInterval* inactive = inactive_.Get(i);
+    DCHECK(inactive->HasRegister());
+    size_t use = inactive->FirstRegisterUseAfter(current->GetStart());
+    if (use != kNoLifetime) {
+      next_use[inactive->GetRegister()] = use;
+    }
+  }
+
+  // Pick the register that is used the last.
+  int reg = -1;
+  for (size_t i = 0; i < number_of_registers_; ++i) {
+    if (IsBlocked(i)) continue;
+    if (reg == -1 || next_use[i] > next_use[reg]) {
+      reg = i;
+      if (next_use[i] == kMaxLifetimePosition) break;
+    }
+  }
+
+  if (first_register_use >= next_use[reg]) {
+    // If the first use of that instruction is after the last use of the found
+    // register, we split this interval just before its first register use.
+    LiveInterval* split = Split(current, first_register_use - 1);
+    AddToUnhandled(split);
+    return false;
+  } else {
+    // Use this register and spill the active and inactives interval that
+    // have that register.
+    current->SetRegister(reg);
+
+    for (size_t i = 0, e = active_.Size(); i < e; ++i) {
+      LiveInterval* active = active_.Get(i);
+      if (active->GetRegister() == reg) {
+        LiveInterval* split = Split(active, current->GetStart());
+        active_.DeleteAt(i);
+        handled_.Add(active);
+        AddToUnhandled(split);
+        break;
+      }
+    }
+
+    for (size_t i = 0; i < inactive_.Size(); ++i) {
+      LiveInterval* inactive = inactive_.Get(i);
+      if (inactive->GetRegister() == reg) {
+        LiveInterval* split = Split(inactive, current->GetStart());
+        inactive_.DeleteAt(i);
+        handled_.Add(inactive);
+        AddToUnhandled(split);
+        --i;
+      }
+    }
+
+    return true;
+  }
+}
+
+void RegisterAllocator::AddToUnhandled(LiveInterval* interval) {
+  for (size_t i = unhandled_.Size(); i > 0; --i) {
+    LiveInterval* current = unhandled_.Get(i - 1);
+    if (current->StartsAfter(interval)) {
+      unhandled_.InsertAt(i, interval);
+      break;
+    }
+  }
+}
+
+LiveInterval* RegisterAllocator::Split(LiveInterval* interval, size_t position) {
+  DCHECK(position >= interval->GetStart());
+  DCHECK(!interval->IsDeadAt(position));
+  if (position == interval->GetStart()) {
+    // Spill slot will be allocated when handling `interval` again.
+    interval->ClearRegister();
+    return interval;
+  } else {
+    LiveInterval* new_interval = interval->SplitAt(position);
+    // TODO: Allocate spill slot for `interval`.
+    return new_interval;
+  }
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
new file mode 100644
index 0000000..e575b96
--- /dev/null
+++ b/compiler/optimizing/register_allocator.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_H_
+#define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_H_
+
+#include "base/macros.h"
+#include "utils/growable_array.h"
+
+namespace art {
+
+class CodeGenerator;
+class LiveInterval;
+class SsaLivenessAnalysis;
+
+/**
+ * An implementation of a linear scan register allocator on an `HGraph` with SSA form.
+ */
+class RegisterAllocator {
+ public:
+  RegisterAllocator(ArenaAllocator* allocator, const CodeGenerator& codegen);
+
+  // Main entry point for the register allocator. Given the liveness analysis,
+  // allocates registers to live intervals.
+  void AllocateRegisters(const SsaLivenessAnalysis& liveness) {
+    processing_core_registers_ = true;
+    AllocateRegistersInternal(liveness);
+    processing_core_registers_ = false;
+    AllocateRegistersInternal(liveness);
+  }
+
+  // Validate that the register allocator did not allocate the same register to
+  // intervals that intersect each other. Returns false if it did not.
+  bool Validate(const SsaLivenessAnalysis& liveness, bool log_fatal_on_failure) {
+    processing_core_registers_ = true;
+    if (!ValidateInternal(liveness, log_fatal_on_failure)) {
+      return false;
+    }
+    processing_core_registers_ = false;
+    return ValidateInternal(liveness, log_fatal_on_failure);
+  }
+
+  // Helper method for validation. Used by unit testing.
+  static bool ValidateIntervals(const GrowableArray<LiveInterval*>& intervals,
+                                const CodeGenerator& codegen,
+                                ArenaAllocator* allocator,
+                                bool processing_core_registers,
+                                bool log_fatal_on_failure);
+
+ private:
+  // Main methods of the allocator.
+  void LinearScan();
+  bool TryAllocateFreeReg(LiveInterval* interval);
+  bool AllocateBlockedReg(LiveInterval* interval);
+
+  // Add `interval` in the sorted list of unhandled intervals.
+  void AddToUnhandled(LiveInterval* interval);
+
+  // Split `interval` at the position `at`. The new interval starts at `at`.
+  LiveInterval* Split(LiveInterval* interval, size_t at);
+
+  // Returns whether `reg` is blocked by the code generator.
+  bool IsBlocked(int reg) const;
+
+  // Helper methods.
+  void AllocateRegistersInternal(const SsaLivenessAnalysis& liveness);
+  bool ValidateInternal(const SsaLivenessAnalysis& liveness, bool log_fatal_on_failure) const;
+  void DumpInterval(std::ostream& stream, LiveInterval* interval);
+
+  ArenaAllocator* const allocator_;
+  const CodeGenerator& codegen_;
+
+  // List of intervals that must be processed, ordered by start position. Last entry
+  // is the interval that has the lowest start position.
+  GrowableArray<LiveInterval*> unhandled_;
+
+  // List of intervals that have been processed.
+  GrowableArray<LiveInterval*> handled_;
+
+  // List of intervals that are currently active when processing a new live interval.
+  // That is, they have a live range that spans the start of the new interval.
+  GrowableArray<LiveInterval*> active_;
+
+  // List of intervals that are currently inactive when processing a new live interval.
+  // That is, they have a lifetime hole that spans the start of the new interval.
+  GrowableArray<LiveInterval*> inactive_;
+
+  // True if processing core registers. False if processing floating
+  // point registers.
+  bool processing_core_registers_;
+
+  // Number of registers for the current register kind (core or floating point).
+  size_t number_of_registers_;
+
+  // Temporary array, allocated ahead of time for simplicity.
+  size_t* registers_array_;
+
+  // Blocked registers, as decided by the code generator.
+  bool* const blocked_registers_;
+
+  DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_H_
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
new file mode 100644
index 0000000..019d0f8
--- /dev/null
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "builder.h"
+#include "code_generator.h"
+#include "dex_file.h"
+#include "dex_instruction.h"
+#include "nodes.h"
+#include "optimizing_unit_test.h"
+#include "register_allocator.h"
+#include "ssa_liveness_analysis.h"
+#include "utils/arena_allocator.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+// Note: the register allocator tests rely on the fact that constants have live
+// intervals and registers get allocated to them.
+
+static bool Check(const uint16_t* data) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+  HGraphBuilder builder(&allocator);
+  const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
+  HGraph* graph = builder.BuildGraph(*item);
+  graph->BuildDominatorTree();
+  graph->TransformToSSA();
+  graph->FindNaturalLoops();
+  SsaLivenessAnalysis liveness(*graph);
+  liveness.Analyze();
+  CodeGenerator* codegen = CodeGenerator::Create(&allocator, graph, kX86);
+  RegisterAllocator register_allocator(&allocator, *codegen);
+  register_allocator.AllocateRegisters(liveness);
+  return register_allocator.Validate(liveness, false);
+}
+
+/**
+ * Unit testing of RegisterAllocator::ValidateIntervals. Register allocator
+ * tests are based on this validation method.
+ */
+TEST(RegisterAllocatorTest, ValidateIntervals) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+  HGraph* graph = new (&allocator) HGraph(&allocator);
+  CodeGenerator* codegen = CodeGenerator::Create(&allocator, graph, kX86);
+  GrowableArray<LiveInterval*> intervals(&allocator, 0);
+
+  // Test with two intervals of the same range.
+  {
+    static constexpr size_t ranges[][2] = {{0, 42}};
+    intervals.Add(BuildInterval(ranges, arraysize(ranges), &allocator, 0));
+    intervals.Add(BuildInterval(ranges, arraysize(ranges), &allocator, 1));
+    ASSERT_TRUE(RegisterAllocator::ValidateIntervals(intervals, *codegen, &allocator, true, false));
+
+    intervals.Get(1)->SetRegister(0);
+    ASSERT_FALSE(RegisterAllocator::ValidateIntervals(intervals, *codegen, &allocator, true, false));
+    intervals.Reset();
+  }
+
+  // Test with two non-intersecting intervals.
+  {
+    static constexpr size_t ranges1[][2] = {{0, 42}};
+    intervals.Add(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
+    static constexpr size_t ranges2[][2] = {{42, 43}};
+    intervals.Add(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
+    ASSERT_TRUE(RegisterAllocator::ValidateIntervals(intervals, *codegen, &allocator, true, false));
+
+    intervals.Get(1)->SetRegister(0);
+    ASSERT_TRUE(RegisterAllocator::ValidateIntervals(intervals, *codegen, &allocator, true, false));
+    intervals.Reset();
+  }
+
+  // Test with two non-intersecting intervals, with one with a lifetime hole.
+  {
+    static constexpr size_t ranges1[][2] = {{0, 42}, {45, 48}};
+    intervals.Add(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
+    static constexpr size_t ranges2[][2] = {{42, 43}};
+    intervals.Add(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
+    ASSERT_TRUE(RegisterAllocator::ValidateIntervals(intervals, *codegen, &allocator, true, false));
+
+    intervals.Get(1)->SetRegister(0);
+    ASSERT_TRUE(RegisterAllocator::ValidateIntervals(intervals, *codegen, &allocator, true, false));
+    intervals.Reset();
+  }
+
+  // Test with intersecting intervals.
+  {
+    static constexpr size_t ranges1[][2] = {{0, 42}, {44, 48}};
+    intervals.Add(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
+    static constexpr size_t ranges2[][2] = {{42, 47}};
+    intervals.Add(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
+    ASSERT_TRUE(RegisterAllocator::ValidateIntervals(intervals, *codegen, &allocator, true, false));
+
+    intervals.Get(1)->SetRegister(0);
+    ASSERT_FALSE(RegisterAllocator::ValidateIntervals(intervals, *codegen, &allocator, true, false));
+    intervals.Reset();
+  }
+
+  // Test with siblings.
+  {
+    static constexpr size_t ranges1[][2] = {{0, 42}, {44, 48}};
+    intervals.Add(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
+    intervals.Get(0)->SplitAt(43);
+    static constexpr size_t ranges2[][2] = {{42, 47}};
+    intervals.Add(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
+    ASSERT_TRUE(RegisterAllocator::ValidateIntervals(intervals, *codegen, &allocator, true, false));
+
+    intervals.Get(1)->SetRegister(0);
+    // Sibling of the first interval has no register allocated to it.
+    ASSERT_TRUE(RegisterAllocator::ValidateIntervals(intervals, *codegen, &allocator, true, false));
+
+    intervals.Get(0)->GetNextSibling()->SetRegister(0);
+    ASSERT_FALSE(RegisterAllocator::ValidateIntervals(intervals, *codegen, &allocator, true, false));
+  }
+}
+
+TEST(RegisterAllocatorTest, CFG1) {
+  /*
+   * Test the following snippet:
+   *  return 0;
+   *
+   * Which becomes the following graph:
+   *       constant0
+   *       goto
+   *        |
+   *       return
+   *        |
+   *       exit
+   */
+  const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::RETURN);
+
+  ASSERT_TRUE(Check(data));
+}
+
+TEST(RegisterAllocatorTest, Loop1) {
+  /*
+   * Test the following snippet:
+   *  int a = 0;
+   *  while (a == a) {
+   *    a = 4;
+   *  }
+   *  return 5;
+   *
+   * Which becomes the following graph:
+   *       constant0
+   *       constant4
+   *       constant5
+   *       goto
+   *        |
+   *       goto
+   *        |
+   *       phi
+   *       equal
+   *       if +++++
+   *        |       \ +
+   *        |     goto
+   *        |
+   *       return
+   *        |
+   *       exit
+   */
+
+  const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::IF_EQ, 4,
+    Instruction::CONST_4 | 4 << 12 | 0,
+    Instruction::GOTO | 0xFD00,
+    Instruction::CONST_4 | 5 << 12 | 1 << 8,
+    Instruction::RETURN | 1 << 8);
+
+  ASSERT_TRUE(Check(data));
+}
+
+TEST(RegisterAllocatorTest, Loop2) {
+  /*
+   * Test the following snippet:
+   *  int a = 0;
+   *  while (a == 8) {
+   *    a = 4 + 5;
+   *  }
+   *  return 6 + 7;
+   *
+   * Which becomes the following graph:
+   *       constant0
+   *       constant4
+   *       constant5
+   *       constant6
+   *       constant7
+   *       constant8
+   *       goto
+   *        |
+   *       goto
+   *        |
+   *       phi
+   *       equal
+   *       if +++++
+   *        |       \ +
+   *        |      4 + 5
+   *        |      goto
+   *        |
+   *       6 + 7
+   *       return
+   *        |
+   *       exit
+   */
+
+  const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::CONST_4 | 8 << 12 | 1 << 8,
+    Instruction::IF_EQ | 1 << 8, 7,
+    Instruction::CONST_4 | 4 << 12 | 0 << 8,
+    Instruction::CONST_4 | 5 << 12 | 1 << 8,
+    Instruction::ADD_INT, 1 << 8 | 0,
+    Instruction::GOTO | 0xFA00,
+    Instruction::CONST_4 | 6 << 12 | 1 << 8,
+    Instruction::CONST_4 | 7 << 12 | 1 << 8,
+    Instruction::ADD_INT, 1 << 8 | 0,
+    Instruction::RETURN | 1 << 8);
+
+  ASSERT_TRUE(Check(data));
+}
+
+static HGraph* BuildSSAGraph(const uint16_t* data, ArenaAllocator* allocator) {
+  HGraphBuilder builder(allocator);
+  const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
+  HGraph* graph = builder.BuildGraph(*item);
+  graph->BuildDominatorTree();
+  graph->TransformToSSA();
+  graph->FindNaturalLoops();
+  return graph;
+}
+
+TEST(RegisterAllocatorTest, Loop3) {
+  /*
+   * Test the following snippet:
+   *  int a = 0
+   *  do {
+   *    b = a;
+   *    a++;
+   *  } while (a != 5)
+   *  return b;
+   *
+   * Which becomes the following graph:
+   *       constant0
+   *       constant1
+   *       constant5
+   *       goto
+   *        |
+   *       goto
+   *        |++++++++++++
+   *       phi          +
+   *       a++          +
+   *       equals       +
+   *       if           +
+   *        |++++++++++++
+   *       return
+   *        |
+   *       exit
+   */
+
+  const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::ADD_INT_LIT8 | 1 << 8, 1 << 8,
+    Instruction::CONST_4 | 5 << 12 | 2 << 8,
+    Instruction::IF_NE | 1 << 8 | 2 << 12, 3,
+    Instruction::RETURN | 0 << 8,
+    Instruction::MOVE | 1 << 12 | 0 << 8,
+    Instruction::GOTO | 0xF900);
+
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+  HGraph* graph = BuildSSAGraph(data, &allocator);
+  SsaLivenessAnalysis liveness(*graph);
+  liveness.Analyze();
+  CodeGenerator* codegen = CodeGenerator::Create(&allocator, graph, kX86);
+  RegisterAllocator register_allocator(&allocator, *codegen);
+  register_allocator.AllocateRegisters(liveness);
+  ASSERT_TRUE(register_allocator.Validate(liveness, false));
+
+  HBasicBlock* loop_header = graph->GetBlocks().Get(2);
+  HPhi* phi = loop_header->GetFirstPhi()->AsPhi();
+
+  LiveInterval* phi_interval = phi->GetLiveInterval();
+  LiveInterval* loop_update = phi->InputAt(1)->GetLiveInterval();
+  ASSERT_TRUE(phi_interval->HasRegister());
+  ASSERT_TRUE(loop_update->HasRegister());
+  ASSERT_NE(phi_interval->GetRegister(), loop_update->GetRegister());
+
+  HBasicBlock* return_block = graph->GetBlocks().Get(3);
+  HReturn* ret = return_block->GetFirstInstruction()->AsReturn();
+  ASSERT_EQ(phi_interval->GetRegister(), ret->InputAt(0)->GetLiveInterval()->GetRegister());
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 50e3254..33084df 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -19,6 +19,18 @@
 
 namespace art {
 
+static Primitive::Type MergeTypes(Primitive::Type existing, Primitive::Type new_type) {
+  // We trust the verifier has already done the necessary checking.
+  switch (existing) {
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble:
+    case Primitive::kPrimNot:
+      return existing;
+    default:
+      return new_type;
+  }
+}
+
 void SsaBuilder::BuildSsa() {
   // 1) Visit in reverse post order. We need to have all predecessors of a block visited
   // (with the exception of loops) in order to create the right environment for that
@@ -32,11 +44,16 @@
     HBasicBlock* block = loop_headers_.Get(i);
     for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
       HPhi* phi = it.Current()->AsPhi();
+      Primitive::Type type = Primitive::kPrimVoid;
       for (size_t pred = 0; pred < block->GetPredecessors().Size(); pred++) {
-        phi->AddInput(ValueOfLocal(block->GetPredecessors().Get(pred), phi->GetRegNumber()));
+        HInstruction* input = ValueOfLocal(block->GetPredecessors().Get(pred), phi->GetRegNumber());
+        phi->AddInput(input);
+        type = MergeTypes(type, input->GetType());
       }
+      phi->SetType(type);
     }
   }
+  // TODO: Now that the type of loop phis is set, we need a type propagation phase.
 
   // 3) Clear locals.
   // TODO: Move this to a dead code eliminator phase.
@@ -65,7 +82,6 @@
     for (size_t local = 0; local < current_locals_->Size(); local++) {
       HInstruction* incoming = ValueOfLocal(block->GetLoopInformation()->GetPreHeader(), local);
       if (incoming != nullptr) {
-        // TODO: Compute union type.
         HPhi* phi = new (GetGraph()->GetArena()) HPhi(
             GetGraph()->GetArena(), local, 0, Primitive::kPrimVoid);
         block->AddPhi(phi);
@@ -88,12 +104,18 @@
         }
       }
       if (is_different) {
-        // TODO: Compute union type.
         HPhi* phi = new (GetGraph()->GetArena()) HPhi(
             GetGraph()->GetArena(), local, block->GetPredecessors().Size(), Primitive::kPrimVoid);
+        Primitive::Type type = Primitive::kPrimVoid;
         for (size_t i = 0; i < block->GetPredecessors().Size(); i++) {
-          phi->SetRawInputAt(i, ValueOfLocal(block->GetPredecessors().Get(i), local));
+          HInstruction* value = ValueOfLocal(block->GetPredecessors().Get(i), local);
+          // We need to merge the incoming types, as the Dex format does not
+          // guarantee the inputs have the same type. In particular the 0 constant is
+          // used for all types, but the graph builder treats it as an int.
+          type = MergeTypes(type, value->GetType());
+          phi->SetRawInputAt(i, value);
         }
+        phi->SetType(type);
         block->AddPhi(phi);
         value = phi;
       }
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 0f16ad2..dc4b2e5 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -122,20 +122,27 @@
 void SsaLivenessAnalysis::NumberInstructions() {
   int ssa_index = 0;
   size_t lifetime_position = 0;
-  // Each instruction gets an individual lifetime position, and a block gets a lifetime
+  // Each instruction gets a lifetime position, and a block gets a lifetime
   // start and end position. Non-phi instructions have a distinct lifetime position than
   // the block they are in. Phi instructions have the lifetime start of their block as
-  // lifetime position
+  // lifetime position.
+  //
+  // Because the register allocator will insert moves in the graph, we need
+  // to differentiate between the start and end of an instruction. Adding 2 to
+  // the lifetime position for each instruction ensures the start of an
+  // instruction is different than the end of the previous instruction.
   for (HLinearOrderIterator it(linear_post_order_); !it.Done(); it.Advance()) {
     HBasicBlock* block = it.Current();
-    block->SetLifetimeStart(++lifetime_position);
+    block->SetLifetimeStart(lifetime_position);
+    lifetime_position += 2;
 
     for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
       HInstruction* current = it.Current();
       if (current->HasUses()) {
         instructions_from_ssa_index_.Add(current);
         current->SetSsaIndex(ssa_index++);
-        current->SetLiveInterval(new (graph_.GetArena()) LiveInterval(graph_.GetArena()));
+        current->SetLiveInterval(
+            new (graph_.GetArena()) LiveInterval(graph_.GetArena(), current->GetType()));
       }
       current->SetLifetimePosition(lifetime_position);
     }
@@ -145,12 +152,14 @@
       if (current->HasUses()) {
         instructions_from_ssa_index_.Add(current);
         current->SetSsaIndex(ssa_index++);
-        current->SetLiveInterval(new (graph_.GetArena()) LiveInterval(graph_.GetArena()));
+        current->SetLiveInterval(
+            new (graph_.GetArena()) LiveInterval(graph_.GetArena(), current->GetType()));
       }
-      current->SetLifetimePosition(++lifetime_position);
+      current->SetLifetimePosition(lifetime_position);
+      lifetime_position += 2;
     }
 
-    block->SetLifetimeEnd(++lifetime_position);
+    block->SetLifetimeEnd(lifetime_position);
   }
   number_of_ssa_values_ = ssa_index;
 }
@@ -174,28 +183,6 @@
   ComputeLiveInAndLiveOutSets();
 }
 
-class InstructionBitVectorIterator : public ValueObject {
- public:
-  InstructionBitVectorIterator(const BitVector& vector,
-                               const GrowableArray<HInstruction*>& instructions)
-        : instructions_(instructions),
-          iterator_(BitVector::Iterator(&vector)),
-          current_bit_index_(iterator_.Next()) {}
-
-  bool Done() const { return current_bit_index_ == -1; }
-  HInstruction* Current() const { return instructions_.Get(current_bit_index_); }
-  void Advance() {
-    current_bit_index_ = iterator_.Next();
-  }
-
- private:
-  const GrowableArray<HInstruction*> instructions_;
-  BitVector::Iterator iterator_;
-  int32_t current_bit_index_;
-
-  DISALLOW_COPY_AND_ASSIGN(InstructionBitVectorIterator);
-};
-
 void SsaLivenessAnalysis::ComputeLiveRanges() {
   // Do a post order visit, adding inputs of instructions live in the block where
   // that instruction is defined, and killing instructions that are being visited.
@@ -212,16 +199,19 @@
       live_in->Union(GetLiveInSet(*successor));
       size_t phi_input_index = successor->GetPredecessorIndexOf(block);
       for (HInstructionIterator it(successor->GetPhis()); !it.Done(); it.Advance()) {
-        HInstruction* input = it.Current()->InputAt(phi_input_index);
+        HInstruction* phi = it.Current();
+        HInstruction* input = phi->InputAt(phi_input_index);
+        input->GetLiveInterval()->AddPhiUse(phi, block);
+        // A phi input whose last user is the phi dies at the end of the predecessor block,
+        // and not at the phi's lifetime position.
         live_in->SetBit(input->GetSsaIndex());
       }
     }
 
     // Add a range that covers this block to all instructions live_in because of successors.
-    for (InstructionBitVectorIterator it(*live_in, instructions_from_ssa_index_);
-         !it.Done();
-         it.Advance()) {
-      it.Current()->GetLiveInterval()->AddRange(block->GetLifetimeStart(), block->GetLifetimeEnd());
+    for (uint32_t idx : live_in->Indexes()) {
+      HInstruction* current = instructions_from_ssa_index_.Get(idx);
+      current->GetLiveInterval()->AddRange(block->GetLifetimeStart(), block->GetLifetimeEnd());
     }
 
     for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
@@ -268,11 +258,10 @@
       HBasicBlock* back_edge = block->GetLoopInformation()->GetBackEdges().Get(0);
       // For all live_in instructions at the loop header, we need to create a range
       // that covers the full loop.
-      for (InstructionBitVectorIterator it(*live_in, instructions_from_ssa_index_);
-           !it.Done();
-           it.Advance()) {
-        it.Current()->GetLiveInterval()->AddLoopRange(block->GetLifetimeStart(),
-                                                      back_edge->GetLifetimeEnd());
+      for (uint32_t idx : live_in->Indexes()) {
+        HInstruction* current = instructions_from_ssa_index_.Get(idx);
+        current->GetLiveInterval()->AddLoopRange(block->GetLifetimeStart(),
+                                                 back_edge->GetLifetimeEnd());
       }
     }
   }
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 2d91436..4d56e1f 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -48,21 +48,68 @@
  * A live range contains the start and end of a range where an instruction
  * is live.
  */
-class LiveRange : public ValueObject {
+class LiveRange : public ArenaObject {
  public:
-  LiveRange(size_t start, size_t end) : start_(start), end_(end) {
+  LiveRange(size_t start, size_t end, LiveRange* next) : start_(start), end_(end), next_(next) {
     DCHECK_LT(start, end);
+    DCHECK(next_ == nullptr || next_->GetStart() > GetEnd());
   }
 
   size_t GetStart() const { return start_; }
   size_t GetEnd() const { return end_; }
+  LiveRange* GetNext() const { return next_; }
+
+  bool IntersectsWith(const LiveRange& other) {
+    return (start_ >= other.start_ && start_ < other.end_)
+        || (other.start_ >= start_ && other.start_ < end_);
+  }
+
+  bool IsBefore(const LiveRange& other) {
+    return end_ <= other.start_;
+  }
+
+  void Dump(std::ostream& stream) {
+    stream << "[" << start_ << ", " << end_ << ")";
+  }
 
  private:
   size_t start_;
   size_t end_;
+  LiveRange* next_;
+
+  friend class LiveInterval;
+
+  DISALLOW_COPY_AND_ASSIGN(LiveRange);
 };
 
-static constexpr int kDefaultNumberOfRanges = 3;
+/**
+ * A use position represents a live interval use at a given position.
+ */
+class UsePosition : public ArenaObject {
+ public:
+  UsePosition(HInstruction* user, size_t position, UsePosition* next)
+      : user_(user), position_(position), next_(next) {
+    DCHECK(user->AsPhi() != nullptr || GetPosition() == user->GetLifetimePosition());
+    DCHECK(next_ == nullptr || next->GetPosition() >= GetPosition());
+  }
+
+  size_t GetPosition() const { return position_; }
+
+  UsePosition* GetNext() const { return next_; }
+
+  HInstruction* GetUser() const { return user_; }
+
+  void Dump(std::ostream& stream) {
+    stream << position_;
+  }
+
+ private:
+  HInstruction* const user_;
+  const size_t position_;
+  UsePosition* const next_;
+
+  DISALLOW_COPY_AND_ASSIGN(UsePosition);
+};
 
 /**
  * An interval is a list of disjoint live ranges where an instruction is live.
@@ -70,67 +117,276 @@
  */
 class LiveInterval : public ArenaObject {
  public:
-  explicit LiveInterval(ArenaAllocator* allocator) : ranges_(allocator, kDefaultNumberOfRanges) {}
+  LiveInterval(ArenaAllocator* allocator, Primitive::Type type)
+      : allocator_(allocator),
+        first_range_(nullptr),
+        last_range_(nullptr),
+        first_use_(nullptr),
+        type_(type),
+        next_sibling_(nullptr),
+        register_(kNoRegister) {}
 
   void AddUse(HInstruction* instruction) {
     size_t position = instruction->GetLifetimePosition();
     size_t start_block_position = instruction->GetBlock()->GetLifetimeStart();
     size_t end_block_position = instruction->GetBlock()->GetLifetimeEnd();
-    if (ranges_.IsEmpty()) {
+    if (first_range_ == nullptr) {
       // First time we see a use of that interval.
-      ranges_.Add(LiveRange(start_block_position, position));
-    } else if (ranges_.Peek().GetStart() == start_block_position) {
+      first_range_ = last_range_ = new (allocator_) LiveRange(start_block_position, position, nullptr);
+    } else if (first_range_->GetStart() == start_block_position) {
       // There is a use later in the same block.
-      DCHECK_LE(position, ranges_.Peek().GetEnd());
-    } else if (ranges_.Peek().GetStart() == end_block_position + 1) {
-      // Last use is in a following block.
-      LiveRange existing = ranges_.Pop();
-      ranges_.Add(LiveRange(start_block_position, existing.GetEnd()));
+      DCHECK_LE(position, first_range_->GetEnd());
+    } else if (first_range_->GetStart() == end_block_position) {
+      // Last use is in the following block.
+      first_range_->start_ = start_block_position;
     } else {
       // There is a hole in the interval. Create a new range.
-      ranges_.Add(LiveRange(start_block_position, position));
+      first_range_ = new (allocator_) LiveRange(start_block_position, position, first_range_);
     }
+    first_use_ = new (allocator_) UsePosition(instruction, position, first_use_);
+  }
+
+  void AddPhiUse(HInstruction* instruction, HBasicBlock* block) {
+    DCHECK(instruction->AsPhi() != nullptr);
+    first_use_ = new (allocator_) UsePosition(instruction, block->GetLifetimeEnd(), first_use_);
   }
 
   void AddRange(size_t start, size_t end) {
-    if (ranges_.IsEmpty()) {
-      ranges_.Add(LiveRange(start, end));
-    } else if (ranges_.Peek().GetStart() == end + 1) {
+    if (first_range_ == nullptr) {
+      first_range_ = last_range_ = new (allocator_) LiveRange(start, end, first_range_);
+    } else if (first_range_->GetStart() == end) {
       // There is a use in the following block.
-      LiveRange existing = ranges_.Pop();
-      ranges_.Add(LiveRange(start, existing.GetEnd()));
+      first_range_->start_ = start;
     } else {
       // There is a hole in the interval. Create a new range.
-      ranges_.Add(LiveRange(start, end));
+      first_range_ = new (allocator_) LiveRange(start, end, first_range_);
     }
   }
 
   void AddLoopRange(size_t start, size_t end) {
-    DCHECK(!ranges_.IsEmpty());
-    while (!ranges_.IsEmpty() && ranges_.Peek().GetEnd() < end) {
-      DCHECK_LE(start, ranges_.Peek().GetStart());
-      ranges_.Pop();
+    DCHECK(first_range_ != nullptr);
+    while (first_range_ != nullptr && first_range_->GetEnd() < end) {
+      DCHECK_LE(start, first_range_->GetStart());
+      first_range_ = first_range_->GetNext();
     }
-    if (ranges_.IsEmpty()) {
+    if (first_range_ == nullptr) {
       // Uses are only in the loop.
-      ranges_.Add(LiveRange(start, end));
+      first_range_ = last_range_ = new (allocator_) LiveRange(start, end, nullptr);
     } else {
       // There are uses after the loop.
-      LiveRange range = ranges_.Pop();
-      ranges_.Add(LiveRange(start, range.GetEnd()));
+      first_range_->start_ = start;
     }
   }
 
   void SetFrom(size_t from) {
-    DCHECK(!ranges_.IsEmpty());
-    LiveRange existing = ranges_.Pop();
-    ranges_.Add(LiveRange(from, existing.GetEnd()));
+    DCHECK(first_range_ != nullptr);
+    first_range_->start_ = from;
   }
 
-  const GrowableArray<LiveRange>& GetRanges() const { return ranges_; }
+  LiveRange* GetFirstRange() const { return first_range_; }
+
+  int GetRegister() const { return register_; }
+  void SetRegister(int reg) { register_ = reg; }
+  void ClearRegister() { register_ = kNoRegister; }
+  bool HasRegister() const { return register_ != kNoRegister; }
+
+  bool IsDeadAt(size_t position) {
+    return last_range_->GetEnd() <= position;
+  }
+
+  bool Covers(size_t position) {
+    LiveRange* current = first_range_;
+    while (current != nullptr) {
+      if (position >= current->GetStart() && position < current->GetEnd()) {
+        return true;
+      }
+      current = current->GetNext();
+    }
+    return false;
+  }
+
+  /**
+   * Returns the first intersection of this interval with `other`.
+   */
+  size_t FirstIntersectionWith(LiveInterval* other) {
+    // We only call this method if there is a lifetime hole in this interval
+    // at the start of `other`.
+    DCHECK(!Covers(other->GetStart()));
+    DCHECK_LE(GetStart(), other->GetStart());
+    // Move to the range in this interval that starts after the other interval.
+    size_t other_start = other->GetStart();
+    LiveRange* my_range = first_range_;
+    while (my_range != nullptr) {
+      if (my_range->GetStart() >= other_start) {
+        break;
+      } else {
+        my_range = my_range->GetNext();
+      }
+    }
+    if (my_range == nullptr) {
+      return kNoLifetime;
+    }
+
+    // Advance both intervals and find the first matching range start in
+    // this interval.
+    LiveRange* other_range = other->first_range_;
+    do {
+      if (my_range->IntersectsWith(*other_range)) {
+        return std::max(my_range->GetStart(), other_range->GetStart());
+      } else if (my_range->IsBefore(*other_range)) {
+        my_range = my_range->GetNext();
+        if (my_range == nullptr) {
+          return kNoLifetime;
+        }
+      } else {
+        DCHECK(other_range->IsBefore(*my_range));
+        other_range = other_range->GetNext();
+        if (other_range == nullptr) {
+          return kNoLifetime;
+        }
+      }
+    } while (true);
+  }
+
+  size_t GetStart() const {
+    return first_range_->GetStart();
+  }
+
+  size_t FirstRegisterUseAfter(size_t position) const {
+    UsePosition* use = first_use_;
+    while (use != nullptr) {
+      size_t use_position = use->GetPosition();
+      // TODO: Once we plug the Locations builder of the code generator
+      // to the register allocator, this method must be adjusted. We
+      // test if there is an environment, because these are currently the only
+      // instructions that could have more uses than the number of registers.
+      if (use_position >= position && !use->GetUser()->NeedsEnvironment()) {
+        return use_position;
+      }
+      use = use->GetNext();
+    }
+    return kNoLifetime;
+  }
+
+  size_t FirstRegisterUse() const {
+    return FirstRegisterUseAfter(GetStart());
+  }
+
+  Primitive::Type GetType() const {
+    return type_;
+  }
+
+  /**
+   * Split this interval at `position`. This interval is changed to:
+   * [start ... position).
+   *
+   * The new interval covers:
+   * [position ... end)
+   */
+  LiveInterval* SplitAt(size_t position) {
+    DCHECK(next_sibling_ == nullptr);
+    DCHECK_GT(position, GetStart());
+
+    if (last_range_->GetEnd() <= position) {
+      // This range dies before `position`, no need to split.
+      return nullptr;
+    }
+
+    LiveInterval* new_interval = new (allocator_) LiveInterval(allocator_, type_);
+    next_sibling_ = new_interval;
+
+    new_interval->first_use_ = first_use_;
+    LiveRange* current = first_range_;
+    LiveRange* previous = nullptr;
+    // Iterate over the ranges, and either find a range that covers this position, or
+    // a two ranges in between this position (that is, the position is in a lifetime hole).
+    do {
+      if (position >= current->GetEnd()) {
+        // Move to next range.
+        previous = current;
+        current = current->next_;
+      } else if (position <= current->GetStart()) {
+        // If the previous range did not cover this position, we know position is in
+        // a lifetime hole. We can just break the first_range_ and last_range_ links
+        // and return the new interval.
+        DCHECK(previous != nullptr);
+        DCHECK(current != first_range_);
+        new_interval->last_range_ = last_range_;
+        last_range_ = previous;
+        previous->next_ = nullptr;
+        new_interval->first_range_ = current;
+        return new_interval;
+      } else {
+        // This range covers position. We create a new last_range_ for this interval
+        // that covers last_range_->Start() and position. We also shorten the current
+        // range and make it the first range of the new interval.
+        DCHECK(position < current->GetEnd() && position > current->GetStart());
+        new_interval->last_range_ = last_range_;
+        last_range_ = new (allocator_) LiveRange(current->start_, position, nullptr);
+        if (previous != nullptr) {
+          previous->next_ = last_range_;
+        } else {
+          first_range_ = last_range_;
+        }
+        new_interval->first_range_ = current;
+        current->start_ = position;
+        return new_interval;
+      }
+    } while (current != nullptr);
+
+    LOG(FATAL) << "Unreachable";
+    return nullptr;
+  }
+
+  bool StartsBefore(LiveInterval* other) const {
+    return GetStart() <= other->GetStart();
+  }
+
+  bool StartsAfter(LiveInterval* other) const {
+    return GetStart() >= other->GetStart();
+  }
+
+  void Dump(std::ostream& stream) const {
+    stream << "ranges: { ";
+    LiveRange* current = first_range_;
+    do {
+      current->Dump(stream);
+      stream << " ";
+    } while ((current = current->GetNext()) != nullptr);
+    stream << "}, uses: { ";
+    UsePosition* use = first_use_;
+    if (use != nullptr) {
+      do {
+        use->Dump(stream);
+        stream << " ";
+      } while ((use = use->GetNext()) != nullptr);
+    }
+    stream << "}";
+  }
+
+  LiveInterval* GetNextSibling() const { return next_sibling_; }
 
  private:
-  GrowableArray<LiveRange> ranges_;
+  ArenaAllocator* const allocator_;
+
+  // Ranges of this interval. We need a quick access to the last range to test
+  // for liveness (see `IsDeadAt`).
+  LiveRange* first_range_;
+  LiveRange* last_range_;
+
+  // Uses of this interval. Note that this linked list is shared amongst siblings.
+  UsePosition* first_use_;
+
+  // The instruction type this interval corresponds to.
+  const Primitive::Type type_;
+
+  // Live interval that is the result of a split.
+  LiveInterval* next_sibling_;
+
+  // The register allocated to this interval.
+  int register_;
+
+  static constexpr int kNoRegister = -1;
 
   DISALLOW_COPY_AND_ASSIGN(LiveInterval);
 };
@@ -164,10 +420,14 @@
     return linear_post_order_;
   }
 
-  HInstruction* GetInstructionFromSsaIndex(size_t index) {
+  HInstruction* GetInstructionFromSsaIndex(size_t index) const {
     return instructions_from_ssa_index_.Get(index);
   }
 
+  size_t GetNumberOfSsaValues() const {
+    return number_of_ssa_values_;
+  }
+
  private:
   // Linearize the graph so that:
   // (1): a block is always after its dominator,
diff --git a/compiler/output_stream.h b/compiler/output_stream.h
index 478a854..97ccc2c 100644
--- a/compiler/output_stream.h
+++ b/compiler/output_stream.h
@@ -18,6 +18,7 @@
 #define ART_COMPILER_OUTPUT_STREAM_H_
 
 #include <stdint.h>
+#include <sys/types.h>
 
 #include <string>
 
diff --git a/compiler/utils/arena_allocator.h b/compiler/utils/arena_allocator.h
index 032eabc..dbe482d 100644
--- a/compiler/utils/arena_allocator.h
+++ b/compiler/utils/arena_allocator.h
@@ -170,6 +170,10 @@
     return ret;
   }
 
+  template <typename T> T* AllocArray(size_t length) {
+    return static_cast<T*>(Alloc(length * sizeof(T), kArenaAllocMisc));
+  }
+
   void* AllocValgrind(size_t bytes, ArenaAllocKind kind);
   void ObtainNewArenaForAllocation(size_t allocation_size);
   size_t BytesAllocated() const;
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 27188b2..009b227 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -530,7 +530,7 @@
   Arm64ManagedRegister scratch = m_scratch.AsArm64();
   CHECK(scratch.IsCoreRegister()) << scratch;
   // Call *(*(SP + base) + offset)
-  LoadFromOffset(scratch.AsCoreRegister(), SP, base.Int32Value());
+  LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(), SP, base.Int32Value());
   LoadFromOffset(scratch.AsCoreRegister(), scratch.AsCoreRegister(), offs.Int32Value());
   ___ Blr(reg_x(scratch.AsCoreRegister()));
 }
@@ -656,16 +656,17 @@
   // trashed by native code.
   ___ Mov(reg_x(ETR), reg_x(TR));
 
-  // Increate frame to required size - must be at least space to push Method*.
+  // Increase frame to required size - must be at least space to push StackReference<Method>.
   CHECK_GT(frame_size, kCalleeSavedRegsSize * kFramePointerSize);
   size_t adjust = frame_size - (kCalleeSavedRegsSize * kFramePointerSize);
   IncreaseFrameSize(adjust);
 
-  // Write Method*.
-  StoreToOffset(X0, SP, 0);
+  // Write StackReference<Method>.
+  DCHECK_EQ(4U, sizeof(StackReference<mirror::ArtMethod>));
+  StoreWToOffset(StoreOperandType::kStoreWord, W0, SP, 0);
 
   // Write out entry spills
-  int32_t offset = frame_size + kFramePointerSize;
+  int32_t offset = frame_size + sizeof(StackReference<mirror::ArtMethod>);
   for (size_t i = 0; i < entry_spills.size(); ++i) {
     Arm64ManagedRegister reg = entry_spills.at(i).AsArm64();
     if (reg.IsNoRegister()) {
diff --git a/compiler/utils/growable_array.h b/compiler/utils/growable_array.h
index e703d8e..a1a3312 100644
--- a/compiler/utils/growable_array.h
+++ b/compiler/utils/growable_array.h
@@ -169,6 +169,13 @@
       num_used_--;
     };
 
+    void DeleteAt(size_t index) {
+      for (size_t i = index; i < num_used_ - 1; i++) {
+        elem_list_[i] = elem_list_[i + 1];
+      }
+      num_used_--;
+    };
+
     size_t GetNumAllocated() const { return num_allocated_; }
 
     size_t Size() const { return num_used_; }
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 0791c63..56c6536 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1411,10 +1411,12 @@
   }
   // return address then method on stack
   addl(ESP, Immediate(-frame_size + (spill_regs.size() * kFramePointerSize) +
-                      kFramePointerSize /*method*/ + kFramePointerSize /*return address*/));
+                      sizeof(StackReference<mirror::ArtMethod>) /*method*/ +
+                      kFramePointerSize /*return address*/));
   pushl(method_reg.AsX86().AsCpuRegister());
   for (size_t i = 0; i < entry_spills.size(); ++i) {
-    movl(Address(ESP, frame_size + kFramePointerSize + (i * kFramePointerSize)),
+    movl(Address(ESP, frame_size + sizeof(StackReference<mirror::ArtMethod>) +
+                 (i * kFramePointerSize)),
          entry_spills.at(i).AsX86().AsCpuRegister());
   }
 }
@@ -1422,7 +1424,8 @@
 void X86Assembler::RemoveFrame(size_t frame_size,
                             const std::vector<ManagedRegister>& spill_regs) {
   CHECK_ALIGNED(frame_size, kStackAlignment);
-  addl(ESP, Immediate(frame_size - (spill_regs.size() * kFramePointerSize) - kFramePointerSize));
+  addl(ESP, Immediate(frame_size - (spill_regs.size() * kFramePointerSize) -
+                      sizeof(StackReference<mirror::ArtMethod>)));
   for (size_t i = 0; i < spill_regs.size(); ++i) {
     popl(spill_regs.at(i).AsX86().AsCpuRegister());
   }
diff --git a/compiler/utils/x86/managed_register_x86.cc b/compiler/utils/x86/managed_register_x86.cc
index 034a795..021fe88 100644
--- a/compiler/utils/x86/managed_register_x86.cc
+++ b/compiler/utils/x86/managed_register_x86.cc
@@ -95,11 +95,11 @@
   if (!IsValidManagedRegister()) {
     os << "No Register";
   } else if (IsXmmRegister()) {
-    os << "XMM: " << static_cast<int>(AsXmmRegister());
+    os << "XMM: " << AsXmmRegister();
   } else if (IsX87Register()) {
-    os << "X87: " << static_cast<int>(AsX87Register());
+    os << "X87: " << AsX87Register();
   } else if (IsCpuRegister()) {
-    os << "CPU: " << static_cast<int>(AsCpuRegister());
+    os << "CPU: " << AsCpuRegister();
   } else if (IsRegisterPair()) {
     os << "Pair: " << AsRegisterPairLow() << ", " << AsRegisterPairHigh();
   } else {
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 0ede875..a14551c 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -59,7 +59,6 @@
   EmitLabel(label, kSize);
 }
 
-
 void X86_64Assembler::pushq(CpuRegister reg) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitOptionalRex32(reg);
@@ -1652,8 +1651,12 @@
   }
   // return address then method on stack
   addq(CpuRegister(RSP), Immediate(-frame_size + (spill_regs.size() * kFramePointerSize) +
-                                   kFramePointerSize /*method*/ + kFramePointerSize /*return address*/));
-  pushq(method_reg.AsX86_64().AsCpuRegister());
+                                   sizeof(StackReference<mirror::ArtMethod>) /*method*/ +
+                                   kFramePointerSize /*return address*/));
+
+  DCHECK_EQ(4U, sizeof(StackReference<mirror::ArtMethod>));
+  subq(CpuRegister(RSP), Immediate(4));
+  movl(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
 
   for (size_t i = 0; i < entry_spills.size(); ++i) {
     ManagedRegisterSpill spill = entry_spills.at(i);
@@ -1732,7 +1735,7 @@
 void X86_64Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
   X86_64ManagedRegister src = msrc.AsX86_64();
   CHECK(src.IsCpuRegister());
-  movq(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
+  movl(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
 }
 
 void X86_64Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
@@ -2070,7 +2073,7 @@
 
 void X86_64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
   CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
-  movq(scratch, Address(CpuRegister(RSP), base));
+  movl(scratch, Address(CpuRegister(RSP), base));
   call(Address(scratch, offset));
 }
 
diff --git a/dalvikvm/Android.mk b/dalvikvm/Android.mk
index 0ded2d8..03d32f0 100644
--- a/dalvikvm/Android.mk
+++ b/dalvikvm/Android.mk
@@ -51,6 +51,7 @@
 LOCAL_LDFLAGS := -ldl -lpthread
 LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
 LOCAL_IS_HOST_MODULE := true
+include external/libcxx/libcxx.mk
 include $(BUILD_HOST_EXECUTABLE)
 ART_HOST_EXECUTABLES += $(HOST_OUT_EXECUTABLES)/$(LOCAL_MODULE)
 endif
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 9914875..c6b1aa5 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -33,7 +33,7 @@
 #include "compiler.h"
 #include "compiler_callbacks.h"
 #include "dex_file-inl.h"
-#include "dex/pass_driver.h"
+#include "dex/pass_driver_me_opts.h"
 #include "dex/verification_results.h"
 #include "driver/compiler_callbacks_impl.h"
 #include "driver/compiler_driver.h"
@@ -295,8 +295,9 @@
                                 zip_filename, error_msg->c_str());
       return nullptr;
     }
-    std::unique_ptr<MemMap> image_classes_file(zip_entry->ExtractToMemMap(image_classes_filename,
-                                                                    error_msg));
+    std::unique_ptr<MemMap> image_classes_file(zip_entry->ExtractToMemMap(zip_filename,
+                                                                          image_classes_filename,
+                                                                          error_msg));
     if (image_classes_file.get() == nullptr) {
       *error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", image_classes_filename,
                                 zip_filename, error_msg->c_str());
@@ -918,10 +919,18 @@
     } else if (option == "--no-profile-file") {
       // No profile
     } else if (option == "--print-pass-names") {
-      PassDriver::PrintPassNames();
+      PassDriverMEOpts::PrintPassNames();
     } else if (option.starts_with("--disable-passes=")) {
       std::string disable_passes = option.substr(strlen("--disable-passes=")).data();
-      PassDriver::CreateDefaultPassList(disable_passes);
+      PassDriverMEOpts::CreateDefaultPassList(disable_passes);
+    } else if (option.starts_with("--print-passes=")) {
+      std::string print_passes = option.substr(strlen("--print-passes=")).data();
+      PassDriverMEOpts::SetPrintPassList(print_passes);
+    } else if (option == "--print-all-passes") {
+      PassDriverMEOpts::SetPrintAllPasses();
+    } else if (option.starts_with("--dump-cfg-passes=")) {
+      std::string dump_passes = option.substr(strlen("--dump-cfg-passes=")).data();
+      PassDriverMEOpts::SetDumpPassList(dump_passes);
     } else {
       Usage("Unknown argument %s", option.data());
     }
diff --git a/disassembler/Android.mk b/disassembler/Android.mk
index dd4e9d5..814323c 100644
--- a/disassembler/Android.mk
+++ b/disassembler/Android.mk
@@ -87,8 +87,8 @@
 
   LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common.mk
   LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
+  include external/libcxx/libcxx.mk
   ifeq ($$(art_target_or_host),target)
-    include external/libcxx/libcxx.mk
     LOCAL_SHARED_LIBRARIES += libcutils libvixl
     include $(BUILD_SHARED_LIBRARY)
   else # host
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index 5cc6acf..614eca1 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -363,10 +363,49 @@
         src_reg_file = dst_reg_file = SSE;
         break;
       case 0x38:  // 3 byte extended opcode
-        opcode << StringPrintf("unknown opcode '0F 38 %02X'", *instr);
+        instr++;
+        if (prefix[2] == 0x66) {
+          switch (*instr) {
+            case 0x40:
+              opcode << "pmulld";
+              prefix[2] = 0;
+              has_modrm = true;
+              load = true;
+              src_reg_file = dst_reg_file = SSE;
+              break;
+            default:
+              opcode << StringPrintf("unknown opcode '0F 38 %02X'", *instr);
+          }
+        } else {
+          opcode << StringPrintf("unknown opcode '0F 38 %02X'", *instr);
+        }
         break;
       case 0x3A:  // 3 byte extended opcode
-        opcode << StringPrintf("unknown opcode '0F 3A %02X'", *instr);
+        instr++;
+        if (prefix[2] == 0x66) {
+          switch (*instr) {
+            case 0x14:
+              opcode << "pextrb";
+              prefix[2] = 0;
+              has_modrm = true;
+              store = true;
+              dst_reg_file = SSE;
+              immediate_bytes = 1;
+              break;
+            case 0x16:
+              opcode << "pextrd";
+              prefix[2] = 0;
+              has_modrm = true;
+              store = true;
+              dst_reg_file = SSE;
+              immediate_bytes = 1;
+              break;
+            default:
+              opcode << StringPrintf("unknown opcode '0F 3A %02X'", *instr);
+          }
+        } else {
+          opcode << StringPrintf("unknown opcode '0F 3A %02X'", *instr);
+        }
         break;
       case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
       case 0x48: case 0x49: case 0x4A: case 0x4B: case 0x4C: case 0x4D: case 0x4E: case 0x4F:
@@ -467,11 +506,11 @@
         break;
       case 0x6F:
         if (prefix[2] == 0x66) {
-          dst_reg_file = SSE;
+          src_reg_file = dst_reg_file = SSE;
           opcode << "movdqa";
           prefix[2] = 0;  // clear prefix now it's served its purpose as part of the opcode
         } else if (prefix[0] == 0xF3) {
-          dst_reg_file = SSE;
+          src_reg_file = dst_reg_file = SSE;
           opcode << "movdqu";
           prefix[0] = 0;  // clear prefix now it's served its purpose as part of the opcode
         } else {
@@ -481,6 +520,25 @@
         load = true;
         has_modrm = true;
         break;
+      case 0x70:
+        if (prefix[2] == 0x66) {
+          opcode << "pshufd";
+          prefix[2] = 0;
+          has_modrm = true;
+          store = true;
+          src_reg_file = dst_reg_file = SSE;
+          immediate_bytes = 1;
+        } else if (prefix[0] == 0xF2) {
+          opcode << "pshuflw";
+          prefix[0] = 0;
+          has_modrm = true;
+          store = true;
+          src_reg_file = dst_reg_file = SSE;
+          immediate_bytes = 1;
+        } else {
+          opcode << StringPrintf("unknown opcode '0F %02X'", *instr);
+        }
+        break;
       case 0x71:
         if (prefix[2] == 0x66) {
           dst_reg_file = SSE;
@@ -603,6 +661,18 @@
       case 0xB7: opcode << "movzxw"; has_modrm = true; load = true; break;
       case 0xBE: opcode << "movsxb"; has_modrm = true; load = true; break;
       case 0xBF: opcode << "movsxw"; has_modrm = true; load = true; break;
+      case 0xC5:
+        if (prefix[2] == 0x66) {
+          opcode << "pextrw";
+          prefix[2] = 0;
+          has_modrm = true;
+          store = true;
+          src_reg_file = dst_reg_file = SSE;
+          immediate_bytes = 1;
+        } else {
+          opcode << StringPrintf("unknown opcode '0F %02X'", *instr);
+        }
+        break;
       case 0xC7:
         static const char* x0FxC7_opcodes[] = { "unknown-0f-c7", "cmpxchg8b", "unknown-0f-c7", "unknown-0f-c7", "unknown-0f-c7", "unknown-0f-c7", "unknown-0f-c7", "unknown-0f-c7" };
         modrm_opcodes = x0FxC7_opcodes;
@@ -614,6 +684,125 @@
         opcode << "bswap";
         reg_in_opcode = true;
         break;
+      case 0xDB:
+        if (prefix[2] == 0x66) {
+          src_reg_file = dst_reg_file = SSE;
+          prefix[2] = 0;  // clear prefix now it's served its purpose as part of the opcode
+        } else {
+          src_reg_file = dst_reg_file = MMX;
+        }
+        opcode << "pand";
+        prefix[2] = 0;
+        has_modrm = true;
+        load = true;
+        break;
+      case 0xD5:
+        if (prefix[2] == 0x66) {
+          opcode << "pmullw";
+          prefix[2] = 0;
+          has_modrm = true;
+          load = true;
+          src_reg_file = dst_reg_file = SSE;
+        } else {
+          opcode << StringPrintf("unknown opcode '0F %02X'", *instr);
+        }
+        break;
+      case 0xEB:
+        if (prefix[2] == 0x66) {
+          src_reg_file = dst_reg_file = SSE;
+          prefix[2] = 0;  // clear prefix now it's served its purpose as part of the opcode
+        } else {
+          src_reg_file = dst_reg_file = MMX;
+        }
+        opcode << "por";
+        prefix[2] = 0;
+        has_modrm = true;
+        load = true;
+        break;
+      case 0xEF:
+        if (prefix[2] == 0x66) {
+          src_reg_file = dst_reg_file = SSE;
+          prefix[2] = 0;  // clear prefix now it's served its purpose as part of the opcode
+        } else {
+          src_reg_file = dst_reg_file = MMX;
+        }
+        opcode << "pxor";
+        prefix[2] = 0;
+        has_modrm = true;
+        load = true;
+        break;
+      case 0xF8:
+        if (prefix[2] == 0x66) {
+          src_reg_file = dst_reg_file = SSE;
+          prefix[2] = 0;  // clear prefix now it's served its purpose as part of the opcode
+        } else {
+          src_reg_file = dst_reg_file = MMX;
+        }
+        opcode << "psubb";
+        prefix[2] = 0;
+        has_modrm = true;
+        load = true;
+        break;
+      case 0xF9:
+        if (prefix[2] == 0x66) {
+          src_reg_file = dst_reg_file = SSE;
+          prefix[2] = 0;  // clear prefix now it's served its purpose as part of the opcode
+        } else {
+          src_reg_file = dst_reg_file = MMX;
+        }
+        opcode << "psubw";
+        prefix[2] = 0;
+        has_modrm = true;
+        load = true;
+        break;
+      case 0xFA:
+        if (prefix[2] == 0x66) {
+          src_reg_file = dst_reg_file = SSE;
+          prefix[2] = 0;  // clear prefix now it's served its purpose as part of the opcode
+        } else {
+          src_reg_file = dst_reg_file = MMX;
+        }
+        opcode << "psubd";
+        prefix[2] = 0;
+        has_modrm = true;
+        load = true;
+        break;
+      case 0xFC:
+        if (prefix[2] == 0x66) {
+          src_reg_file = dst_reg_file = SSE;
+          prefix[2] = 0;  // clear prefix now it's served its purpose as part of the opcode
+        } else {
+          src_reg_file = dst_reg_file = MMX;
+        }
+        opcode << "paddb";
+        prefix[2] = 0;
+        has_modrm = true;
+        load = true;
+        break;
+      case 0xFD:
+        if (prefix[2] == 0x66) {
+          src_reg_file = dst_reg_file = SSE;
+          prefix[2] = 0;  // clear prefix now it's served its purpose as part of the opcode
+        } else {
+          src_reg_file = dst_reg_file = MMX;
+        }
+        opcode << "paddw";
+        prefix[2] = 0;
+        has_modrm = true;
+        load = true;
+        break;
+      case 0xFE:
+        if (prefix[2] == 0x66) {
+          src_reg_file = dst_reg_file = SSE;
+          prefix[2] = 0;  // clear prefix now it's served its purpose as part of the opcode
+        } else {
+          src_reg_file = dst_reg_file = MMX;
+        }
+        opcode << "paddd";
+        prefix[2] = 0;
+        has_modrm = true;
+        load = true;
+        break;
       default:
         opcode << StringPrintf("unknown opcode '0F %02X'", *instr);
         break;
@@ -851,8 +1040,13 @@
       instr++;
     } else {
       CHECK_EQ(immediate_bytes, 4u);
-      args << StringPrintf("%d", *reinterpret_cast<const int32_t*>(instr));
-      instr += 4;
+      if (prefix[2] == 0x66) {  // Operand size override from 32-bit to 16-bit.
+        args << StringPrintf("%d", *reinterpret_cast<const int16_t*>(instr));
+        instr += 2;
+      } else {
+        args << StringPrintf("%d", *reinterpret_cast<const int32_t*>(instr));
+        instr += 4;
+      }
     }
   } else if (branch_bytes > 0) {
     DCHECK(!has_modrm);
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index dcae502..7c76b3c 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -76,6 +76,13 @@
           "      Example: --boot-image=/system/framework/boot.art\n"
           "\n");
   fprintf(stderr,
+          "  --instruction-set=(arm|arm64|mips|x86|x86_64): for locating the image file based on the image location\n"
+          "      set.\n"
+          "      Example: --instruction-set=x86\n"
+          "      Default: %s\n"
+          "\n",
+          GetInstructionSetString(kRuntimeISA));
+  fprintf(stderr,
           "  --output=<file> may be used to send the output to a file.\n"
           "      Example: --output=/tmp/oatdump.txt\n"
           "\n");
@@ -417,13 +424,13 @@
       Runtime* runtime = Runtime::Current();
       if (runtime != nullptr) {
         ScopedObjectAccess soa(Thread::Current());
-        StackHandleScope<2> hs(soa.Self());
+        StackHandleScope<1> hs(soa.Self());
         Handle<mirror::DexCache> dex_cache(
             hs.NewHandle(runtime->GetClassLinker()->FindDexCache(dex_file)));
-        auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
+        NullHandle<mirror::ClassLoader> class_loader;
         verifier::MethodVerifier verifier(&dex_file, &dex_cache, &class_loader, &class_def,
                                           code_item, dex_method_idx, nullptr, method_access_flags,
-                                          true, true);
+                                          true, true, true);
         verifier.Verify();
         DumpCode(indent2_os, &verifier, oat_method, code_item);
       } else {
@@ -1461,8 +1468,9 @@
   }
 
   const char* oat_filename = NULL;
-  const char* image_filename = NULL;
-  const char* boot_image_filename = NULL;
+  const char* image_location = NULL;
+  const char* boot_image_location = NULL;
+  InstructionSet instruction_set = kRuntimeISA;
   std::string elf_filename_prefix;
   std::ostream* os = &std::cout;
   std::unique_ptr<std::ofstream> out;
@@ -1474,9 +1482,22 @@
     if (option.starts_with("--oat-file=")) {
       oat_filename = option.substr(strlen("--oat-file=")).data();
     } else if (option.starts_with("--image=")) {
-      image_filename = option.substr(strlen("--image=")).data();
+      image_location = option.substr(strlen("--image=")).data();
     } else if (option.starts_with("--boot-image=")) {
-      boot_image_filename = option.substr(strlen("--boot-image=")).data();
+      boot_image_location = option.substr(strlen("--boot-image=")).data();
+    } else if (option.starts_with("--instruction-set=")) {
+      StringPiece instruction_set_str = option.substr(strlen("--instruction-set=")).data();
+      if (instruction_set_str == "arm") {
+        instruction_set = kThumb2;
+      } else if (instruction_set_str == "arm64") {
+        instruction_set = kArm64;
+      } else if (instruction_set_str == "mips") {
+        instruction_set = kMips;
+      } else if (instruction_set_str == "x86") {
+        instruction_set = kX86;
+      } else if (instruction_set_str == "x86_64") {
+        instruction_set = kX86_64;
+      }
     } else if (option.starts_with("--dump:")) {
         if (option == "--dump:raw_mapping_table") {
           dump_raw_mapping_table = true;
@@ -1500,12 +1521,12 @@
     }
   }
 
-  if (image_filename == NULL && oat_filename == NULL) {
+  if (image_location == NULL && oat_filename == NULL) {
     fprintf(stderr, "Either --image or --oat must be specified\n");
     return EXIT_FAILURE;
   }
 
-  if (image_filename != NULL && oat_filename != NULL) {
+  if (image_location != NULL && oat_filename != NULL) {
     fprintf(stderr, "Either --image or --oat must be specified but not both\n");
     return EXIT_FAILURE;
   }
@@ -1533,16 +1554,19 @@
   NoopCompilerCallbacks callbacks;
   options.push_back(std::make_pair("compilercallbacks", &callbacks));
 
-  if (boot_image_filename != NULL) {
+  if (boot_image_location != NULL) {
     boot_image_option += "-Ximage:";
-    boot_image_option += boot_image_filename;
+    boot_image_option += boot_image_location;
     options.push_back(std::make_pair(boot_image_option.c_str(), reinterpret_cast<void*>(NULL)));
   }
-  if (image_filename != NULL) {
+  if (image_location != NULL) {
     image_option += "-Ximage:";
-    image_option += image_filename;
+    image_option += image_location;
     options.push_back(std::make_pair(image_option.c_str(), reinterpret_cast<void*>(NULL)));
   }
+  options.push_back(
+      std::make_pair("imageinstructionset",
+                     reinterpret_cast<const void*>(GetInstructionSetString(instruction_set))));
 
   if (!Runtime::Create(options, false)) {
     fprintf(stderr, "Failed to create runtime\n");
@@ -1558,7 +1582,7 @@
   CHECK(image_space != NULL);
   const ImageHeader& image_header = image_space->GetImageHeader();
   if (!image_header.IsValid()) {
-    fprintf(stderr, "Invalid image header %s\n", image_filename);
+    fprintf(stderr, "Invalid image header %s\n", image_location);
     return EXIT_FAILURE;
   }
   ImageDumper image_dumper(os, *image_space, image_header,
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 1521caa..17f0493 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -35,11 +35,9 @@
 	base/unix_file/random_access_file_utils.cc \
 	base/unix_file/string_file.cc \
 	check_jni.cc \
-	catch_block_stack_visitor.cc \
 	class_linker.cc \
 	common_throws.cc \
 	debugger.cc \
-	deoptimize_stack_visitor.cc \
 	dex_file.cc \
 	dex_file_verifier.cc \
 	dex_instruction.cc \
@@ -308,6 +306,12 @@
   LIBART_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
 endif
 
+ifeq ($(MALLOC_IMPL),jemalloc)
+  LIBART_CFLAGS += -DUSE_JEMALLOC
+else
+  LIBART_CFLAGS += -DUSE_DLMALLOC
+endif
+
 # $(1): target or host
 # $(2): ndebug or debug
 # $(3): true or false for LOCAL_CLANG
@@ -396,15 +400,13 @@
     endif
   endif
   LOCAL_C_INCLUDES += $(ART_C_INCLUDES)
+  LOCAL_C_INCLUDES += art/sigchainlib
+
   LOCAL_SHARED_LIBRARIES += liblog libnativehelper
+  include external/libcxx/libcxx.mk
+  LOCAL_SHARED_LIBRARIES += libbacktrace_libc++
   ifeq ($$(art_target_or_host),target)
-    include external/libcxx/libcxx.mk
-    LOCAL_SHARED_LIBRARIES += libbacktrace_libc++
-  else
-    LOCAL_SHARED_LIBRARIES += libbacktrace
-  endif
-  ifeq ($$(art_target_or_host),target)
-    LOCAL_SHARED_LIBRARIES += libcutils libdl libselinux libutils
+    LOCAL_SHARED_LIBRARIES += libcutils libdl libselinux libutils libsigchain
     LOCAL_STATIC_LIBRARIES := libziparchive libz
   else # host
     LOCAL_STATIC_LIBRARIES += libcutils libziparchive-host libz libutils
@@ -457,3 +459,4 @@
 ifeq ($(ART_BUILD_TARGET_DEBUG),true)
   $(eval $(call build-libart,target,debug,$(ART_TARGET_CLANG)))
 endif
+
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index 45ff21f..5220dc3 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -337,30 +337,22 @@
 // The following tests are all for the running architecture. So we get away
 // with just including it and not undefining it every time.
 
-
 #if defined(__arm__)
 #include "arch/arm/asm_support_arm.h"
-#undef ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_
 #elif defined(__aarch64__)
 #include "arch/arm64/asm_support_arm64.h"
-#undef ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_H_
 #elif defined(__mips__)
 #include "arch/mips/asm_support_mips.h"
-#undef ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
 #elif defined(__i386__)
 #include "arch/x86/asm_support_x86.h"
-#undef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
 #elif defined(__x86_64__)
 #include "arch/x86_64/asm_support_x86_64.h"
-#undef ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
 #else
   // This happens for the host test.
 #ifdef __LP64__
 #include "arch/x86_64/asm_support_x86_64.h"
-#undef ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
 #else
 #include "arch/x86/asm_support_x86.h"
-#undef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
 #endif
 #endif
 
@@ -436,4 +428,13 @@
 #endif
 }
 
+TEST_F(ArchTest, StackReferenceSize) {
+#if defined(STACK_REFERENCE_SIZE)
+  EXPECT_EQ(sizeof(StackReference<mirror::Object>),
+            static_cast<size_t>(STACK_REFERENCE_SIZE));
+#else
+  LOG(INFO) << "No expected StackReference Size #define found.";
+#endif
+}
+
 }  // namespace art
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 23e3433..340a83e 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -196,7 +196,6 @@
   qpoints->pCmplDouble = CmplDouble;
   qpoints->pCmplFloat = CmplFloat;
   qpoints->pFmod = fmod;
-  qpoints->pSqrt = sqrt;
   qpoints->pL2d = __aeabi_l2d;
   qpoints->pFmodf = fmodf;
   qpoints->pL2f = __aeabi_l2f;
diff --git a/runtime/arch/arm64/asm_support_arm64.S b/runtime/arch/arm64/asm_support_arm64.S
index 9614c29..b94375e 100644
--- a/runtime/arch/arm64/asm_support_arm64.S
+++ b/runtime/arch/arm64/asm_support_arm64.S
@@ -21,6 +21,9 @@
 
 // Define special registers.
 
+// Register holding suspend check count down.
+// 32-bit is enough for the suspend register.
+#define wSUSPEND w19
 // Register holding Thread::Current().
 #define xSELF x18
 // Frame Pointer
diff --git a/runtime/arch/arm64/asm_support_arm64.h b/runtime/arch/arm64/asm_support_arm64.h
index e55885f..422e20cf 100644
--- a/runtime/arch/arm64/asm_support_arm64.h
+++ b/runtime/arch/arm64/asm_support_arm64.h
@@ -43,5 +43,7 @@
 
 // Expected size of a heap reference
 #define HEAP_REFERENCE_SIZE 4
+// Expected size of a stack reference
+#define STACK_REFERENCE_SIZE 4
 
 #endif  // ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_H_
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 2a5c7d1..46e819e 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -84,12 +84,6 @@
 // Double-precision FP arithmetics.
 extern "C" double fmod(double a, double b);         // REM_DOUBLE[_2ADDR]
 
-// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR]
-extern "C" int64_t art_quick_mul_long(int64_t, int64_t);
-extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t);
-
 // Intrinsic entrypoints.
 extern "C" int32_t __memcmp16(void*, void*, int32_t);
 extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
@@ -188,7 +182,6 @@
   qpoints->pCmplDouble = CmplDouble;
   qpoints->pCmplFloat = CmplFloat;
   qpoints->pFmod = fmod;
-  qpoints->pSqrt = sqrt;
   qpoints->pL2d = NULL;
   qpoints->pFmodf = fmodf;
   qpoints->pL2f = NULL;
@@ -199,10 +192,10 @@
   qpoints->pF2l = NULL;
   qpoints->pLdiv = NULL;
   qpoints->pLmod = NULL;
-  qpoints->pLmul = art_quick_mul_long;
-  qpoints->pShlLong = art_quick_shl_long;
-  qpoints->pShrLong = art_quick_shr_long;
-  qpoints->pUshrLong = art_quick_ushr_long;
+  qpoints->pLmul = NULL;
+  qpoints->pShlLong = NULL;
+  qpoints->pShrLong = NULL;
+  qpoints->pUshrLong = NULL;
 
   // Intrinsics
   qpoints->pIndexOf = art_quick_indexof;
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index ac922dd..28bf856 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -197,7 +197,8 @@
 .endm
 
 .macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
-    brk 0
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    ret
 .endm
 
 
@@ -558,35 +559,38 @@
 
 .macro INVOKE_STUB_CREATE_FRAME
 
-SAVE_SIZE=5*8   // x4, x5, SP, LR & FP saved.
-SAVE_SIZE_AND_METHOD=SAVE_SIZE+8
+SAVE_SIZE=6*8   // x4, x5, x19(wSUSPEND), SP, LR & FP saved.
+SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE
 
-    mov x9, sp                          // Save stack pointer.
+
+    mov x9, sp                             // Save stack pointer.
     .cfi_register sp,x9
 
-    add x10, x2, # SAVE_SIZE_AND_METHOD // calculate size of frame.
-    sub x10, sp, x10                    // Calculate SP position - saves + ArtMethod* +  args
-    and x10, x10, # ~0xf                // Enforce 16 byte stack alignment.
-    mov sp, x10                         // Set new SP.
+    add x10, x2, # SAVE_SIZE_AND_METHOD    // calculate size of frame.
+    sub x10, sp, x10                       // Calculate SP position - saves + ArtMethod* +  args
+    and x10, x10, # ~0xf                   // Enforce 16 byte stack alignment.
+    mov sp, x10                            // Set new SP.
 
-    sub x10, x9, #SAVE_SIZE             // Calculate new FP (later). Done here as we must move SP
-    .cfi_def_cfa_register x10           // before this.
+    sub x10, x9, #SAVE_SIZE                // Calculate new FP (later). Done here as we must move SP
+    .cfi_def_cfa_register x10              // before this.
     .cfi_adjust_cfa_offset SAVE_SIZE
 
-    str x9, [x10, #32]                  // Save old stack pointer.
+    stp x9, x19, [x10, #32]                // Save old stack pointer and x19(wSUSPEND)
     .cfi_rel_offset sp, 32
+    .cfi_rel_offset x19, 40
 
-    stp x4, x5, [x10, #16]              // Save result and shorty addresses.
+    stp x4, x5, [x10, #16]                 // Save result and shorty addresses.
     .cfi_rel_offset x4, 16
     .cfi_rel_offset x5, 24
 
-    stp xFP, xLR, [x10]                 // Store LR & FP.
+    stp xFP, xLR, [x10]                    // Store LR & FP.
     .cfi_rel_offset x29, 0
     .cfi_rel_offset x30, 8
 
-    mov xFP, x10                        // Use xFP now, as it's callee-saved.
+    mov xFP, x10                           // Use xFP now, as it's callee-saved.
     .cfi_def_cfa_register x29
-    mov xSELF, x3                       // Move thread pointer into SELF register.
+    mov xSELF, x3                          // Move thread pointer into SELF register.
+    mov wSUSPEND, #SUSPEND_CHECK_INTERVAL  // reset wSUSPEND to suspend check interval
 
     // Copy arguments into stack frame.
     // Use simple copy routine for now.
@@ -595,7 +599,7 @@
     // W2 - args length
     // X9 - destination address.
     // W10 - temporary
-    add x9, sp, #8     // Destination address is bottom of stack + NULL.
+    add x9, sp, #4                         // Destination address is bottom of stack + NULL.
 
     // Use \@ to differentiate between macro invocations.
 .LcopyParams\@:
@@ -609,9 +613,12 @@
 
 .LendCopyParams\@:
 
-    // Store NULL into Method* at bottom of frame.
-    str xzr, [sp]
+    // Store NULL into StackReference<Method>* at bottom of frame.
+    str wzr, [sp]
 
+#if (STACK_REFERENCE_SIZE != 4)
+#error "STACK_REFERENCE_SIZE(ARM64) size not as expected."
+#endif
 .endm
 
 .macro INVOKE_STUB_CALL_AND_RETURN
@@ -649,7 +656,8 @@
     str x0, [x4]
 
 .Lexit_art_quick_invoke_stub\@:
-    ldr x2, [x29, #32]   // Restore stack pointer.
+    ldp x2, x19, [x29, #32]   // Restore stack pointer and x19.
+    .cfi_restore x19
     mov sp, x2
     .cfi_restore sp
 
@@ -685,7 +693,7 @@
  *  | uint32_t out[n-1]    |
  *  |    :      :          |        Outs
  *  | uint32_t out[0]      |
- *  | ArtMethod* NULL      | <- SP
+ *  | StackRef<ArtMethod>  | <- SP  value=null
  *  +----------------------+
  *
  * Outgoing registers:
@@ -693,6 +701,7 @@
  *  x1-x7 - integer parameters.
  *  d0-d7 - Floating point parameters.
  *  xSELF = self
+ *  wSUSPEND = suspend count
  *  SP = & of ArtMethod*
  *  x1 = "this" pointer.
  *
@@ -1286,7 +1295,7 @@
     .extern \entrypoint
 ENTRY \name
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case of GC
-    ldr    x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
+    ldr    w1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
     mov    x2, xSELF                  // pass Thread::Current
     mov    x3, sp                     // pass SP
     bl     \entrypoint                // (uint32_t type_idx, Method* method, Thread*, SP)
@@ -1300,7 +1309,7 @@
     .extern \entrypoint
 ENTRY \name
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case of GC
-    ldr    x2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
+    ldr    w2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
     mov    x3, xSELF                  // pass Thread::Current
     mov    x4, sp                     // pass SP
     bl     \entrypoint
@@ -1314,7 +1323,7 @@
     .extern \entrypoint
 ENTRY \name
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case of GC
-    ldr    x3, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
+    ldr    w3, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
     mov    x4, xSELF                  // pass Thread::Current
     mov    x5, sp                     // pass SP
     bl     \entrypoint
@@ -1353,7 +1362,7 @@
 ENTRY art_quick_set64_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case of GC
     mov    x3, x1                     // Store value
-    ldr    x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
+    ldr    w1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
     mov    x2, x3                     // Put value param
     mov    x3, xSELF                  // pass Thread::Current
     mov    x4, sp                     // pass SP
@@ -1373,7 +1382,22 @@
 // Generate the allocation entrypoints for each allocator.
 GENERATE_ALL_ALLOC_ENTRYPOINTS
 
-UNIMPLEMENTED art_quick_test_suspend
+    /*
+     * Called by managed code when the value in wSUSPEND has been decremented to 0.
+     */
+    .extern artTestSuspendFromCode
+ENTRY art_quick_test_suspend
+    ldrh   w0, [xSELF, #THREAD_FLAGS_OFFSET]  // get xSELF->state_and_flags.as_struct.flags
+    mov    wSUSPEND, #SUSPEND_CHECK_INTERVAL  // reset wSUSPEND to SUSPEND_CHECK_INTERVAL
+    cbnz   w0, .Lneed_suspend                 // check flags == 0
+    ret                                       // return if flags == 0
+.Lneed_suspend:
+    mov    x0, xSELF
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME          // save callee saves for stack crawl
+    mov    x1, sp
+    bl     artTestSuspendFromCode             // (Thread*, SP)
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+END art_quick_test_suspend
 
      /*
      * Called by managed code that is attempting to call a method on a proxy class. On entry
@@ -1402,7 +1426,7 @@
      * dex method index.
      */
 ENTRY art_quick_imt_conflict_trampoline
-    ldr    x0, [sp, #0]                                // load caller Method*
+    ldr    w0, [sp, #0]                                // load caller Method*
     ldr    w0, [x0, #METHOD_DEX_CACHE_METHODS_OFFSET]  // load dex_cache_resolved_methods
     add    x0, x0, #OBJECT_ARRAY_DATA_OFFSET           // get starting address of data
     ldr    w0, [x0, x12, lsl 2]                        // load the target method
@@ -1416,7 +1440,7 @@
     bl artQuickResolutionTrampoline  // (called, receiver, Thread*, SP)
     cbz x0, 1f
     mov x9, x0              // Remember returned code pointer in x9.
-    ldr x0, [sp, #0]        // artQuickResolutionTrampoline puts called method in *SP.
+    ldr w0, [sp, #0]        // artQuickResolutionTrampoline puts called method in *SP.
     RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
     br x9
 1:
@@ -1466,7 +1490,7 @@
  * | D2                |    float arg 3
  * | D1                |    float arg 2
  * | D0                |    float arg 1
- * | RDI/Method*       |  <- X0
+ * | Method*           | <- X0
  * #-------------------#
  * | local ref cookie  | // 4B
  * | handle scope size | // 4B
@@ -1611,10 +1635,6 @@
 UNIMPLEMENTED art_quick_instrumentation_entry
 UNIMPLEMENTED art_quick_instrumentation_exit
 UNIMPLEMENTED art_quick_deoptimize
-UNIMPLEMENTED art_quick_mul_long
-UNIMPLEMENTED art_quick_shl_long
-UNIMPLEMENTED art_quick_shr_long
-UNIMPLEMENTED art_quick_ushr_long
 UNIMPLEMENTED art_quick_indexof
 
    /*
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 95fcd73..96e0afd 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -298,7 +298,11 @@
     l.s     $f29, 116($a1)
     l.s     $f30, 120($a1)
     l.s     $f31, 124($a1)
+    .set push
+    .set nomacro
+    .set noat
     lw      $at, 4($a0)
+    .set pop
     lw      $v0, 8($a0)
     lw      $v1, 12($a0)
     lw      $a1, 20($a0)
@@ -322,8 +326,6 @@
     lw      $s7, 92($a0)
     lw      $t8, 96($a0)
     lw      $t9, 100($a0)
-    lw      $k0, 104($a0)
-    lw      $k1, 108($a0)
     lw      $gp, 112($a0)
     lw      $sp, 116($a0)
     lw      $fp, 120($a0)
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index fac9883..44edd4b 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -225,7 +225,8 @@
         "cmp x1, x2\n\t"
         "b.ne 1f\n\t"
 
-        "mov %[fpr_result], #0\n\t"
+        "mov x2, #0\n\t"
+        "str x2, %[fpr_result]\n\t"
 
         // Finish up.
         "2:\n\t"
@@ -247,15 +248,16 @@
 
         // Failed fpr verification.
         "1:\n\t"
-        "mov %[fpr_result], #1\n\t"
+        "mov x2, #1\n\t"
+        "str x2, %[fpr_result]\n\t"
         "b 2b\n\t"                     // Goto finish-up
 
         // End
         "3:\n\t"
-        : [result] "=r" (result), [fpr_result] "=r" (fpr_result)
+        : [result] "=r" (result)
           // Use the result from r0
         : [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
-          [referrer] "r"(referrer)
+          [referrer] "r"(referrer), [fpr_result] "m" (fpr_result)
         : "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17");  // clobber.
 #elif defined(__x86_64__)
     // Note: Uses the native convention
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index c4a7b1b..c53fa1e 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -71,11 +71,8 @@
 // Math entrypoints.
 extern "C" double art_quick_fmod(double, double);
 extern "C" float art_quick_fmodf(float, float);
-extern "C" double art_quick_l2d(int64_t);
-extern "C" float art_quick_l2f(int64_t);
 extern "C" int64_t art_quick_d2l(double);
 extern "C" int64_t art_quick_f2l(float);
-extern "C" int32_t art_quick_idivmod(int32_t, int32_t);
 extern "C" int64_t art_quick_ldiv(int64_t, int64_t);
 extern "C" int64_t art_quick_lmod(int64_t, int64_t);
 extern "C" int64_t art_quick_lmul(int64_t, int64_t);
@@ -180,13 +177,12 @@
   // points->pCmplDouble = NULL;  // Not needed on x86.
   // points->pCmplFloat = NULL;  // Not needed on x86.
   qpoints->pFmod = art_quick_fmod;
-  // qpoints->pSqrt = NULL;  // Not needed on x86.
-  qpoints->pL2d = art_quick_l2d;
+  // qpoints->pL2d = NULL;  // Not needed on x86.
   qpoints->pFmodf = art_quick_fmodf;
-  qpoints->pL2f = art_quick_l2f;
+  // qpoints->pL2f = NULL;  // Not needed on x86.
   // points->pD2iz = NULL;  // Not needed on x86.
   // points->pF2iz = NULL;  // Not needed on x86.
-  qpoints->pIdivmod = art_quick_idivmod;
+  // qpoints->pIdivmod = NULL;  // Not needed on x86.
   qpoints->pD2l = art_quick_d2l;
   qpoints->pF2l = art_quick_f2l;
   qpoints->pLdiv = art_quick_ldiv;
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 339ed2e..07268ea 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -763,28 +763,6 @@
     ret
 END_FUNCTION art_quick_fmodf
 
-DEFINE_FUNCTION art_quick_l2d
-    PUSH ecx                      // push arg2 a.hi
-    PUSH eax                      // push arg1 a.lo
-    fildll (%esp)                 // load as integer and push into st0
-    fstpl (%esp)                  // pop value off fp stack as double
-    movsd (%esp), %xmm0           // place into %xmm0
-    addl LITERAL(8), %esp         // pop arguments
-    CFI_ADJUST_CFA_OFFSET(-8)
-    ret
-END_FUNCTION art_quick_l2d
-
-DEFINE_FUNCTION art_quick_l2f
-    PUSH ecx                      // push arg2 a.hi
-    PUSH eax                      // push arg1 a.lo
-    fildll (%esp)                 // load as integer and push into st0
-    fstps (%esp)                  // pop value off fp stack as a single
-    movss (%esp), %xmm0           // place into %xmm0
-    addl LITERAL(8), %esp         // pop argument
-    CFI_ADJUST_CFA_OFFSET(-8)
-    ret
-END_FUNCTION art_quick_l2f
-
 DEFINE_FUNCTION art_quick_d2l
     PUSH eax                      // alignment padding
     PUSH ecx                      // pass arg2 a.hi
@@ -807,20 +785,6 @@
     ret
 END_FUNCTION art_quick_f2l
 
-DEFINE_FUNCTION art_quick_idivmod
-    cmpl LITERAL(0x80000000), %eax
-    je .Lcheck_arg2  // special case
-.Largs_ok:
-    cdq         // edx:eax = sign extend eax
-    idiv %ecx   // (edx,eax) = (edx:eax % ecx, edx:eax / ecx)
-    ret
-.Lcheck_arg2:
-    cmpl LITERAL(-1), %ecx
-    jne .Largs_ok
-    xorl %edx, %edx
-    ret         // eax already holds min int
-END_FUNCTION art_quick_idivmod
-
 DEFINE_FUNCTION art_quick_ldiv
     subl LITERAL(12), %esp       // alignment padding
     CFI_ADJUST_CFA_OFFSET(12)
@@ -1232,7 +1196,7 @@
     addl  LITERAL(28), %esp       // Pop arguments upto saved Method*.
     movl 28(%esp), %edi           // Restore edi.
     movl %eax, 28(%esp)           // Place code* over edi, just under return pc.
-    movl LITERAL(PLT_SYMBOL(art_quick_instrumentation_exit)), 32(%esp)
+    movl LITERAL(SYMBOL(art_quick_instrumentation_exit)), 32(%esp)
                                   // Place instrumentation exit as return pc.
     movl (%esp), %eax             // Restore eax.
     movl 8(%esp), %ecx            // Restore ecx.
diff --git a/runtime/arch/x86/thread_x86.cc b/runtime/arch/x86/thread_x86.cc
index 26cd864..9f36927 100644
--- a/runtime/arch/x86/thread_x86.cc
+++ b/runtime/arch/x86/thread_x86.cc
@@ -40,10 +40,9 @@
 
 namespace art {
 
-static Mutex modify_ldt_lock("modify_ldt lock");
-
 void Thread::InitCpu() {
-  MutexLock mu(Thread::Current(), modify_ldt_lock);
+  // Take the ldt lock, Thread::Current isn't yet established.
+  MutexLock mu(nullptr, *Locks::modify_ldt_lock_);
 
   const uintptr_t base = reinterpret_cast<uintptr_t>(this);
   const size_t limit = kPageSize;
@@ -138,7 +137,7 @@
 }
 
 void Thread::CleanupCpu() {
-  MutexLock mu(Thread::Current(), modify_ldt_lock);
+  MutexLock mu(this, *Locks::modify_ldt_lock_);
 
   // Sanity check that reads from %fs point to this Thread*.
   Thread* self_check;
diff --git a/runtime/arch/x86_64/asm_support_x86_64.h b/runtime/arch/x86_64/asm_support_x86_64.h
index 29633fb..bff8501 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.h
+++ b/runtime/arch/x86_64/asm_support_x86_64.h
@@ -41,5 +41,7 @@
 
 // Expected size of a heap reference
 #define HEAP_REFERENCE_SIZE 4
+// Expected size of a stack reference
+#define STACK_REFERENCE_SIZE 4
 
 #endif  // ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index 30067cf..aeda072 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -18,6 +18,7 @@
 #include "entrypoints/quick/quick_alloc_entrypoints.h"
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "entrypoints/entrypoint_utils.h"
+#include "entrypoints/math_entrypoints.h"
 
 namespace art {
 
@@ -34,8 +35,8 @@
 extern "C" void art_portable_to_interpreter_bridge(mirror::ArtMethod*);
 
 // Cast entrypoints.
-extern "C" uint32_t art_quick_is_assignable(const mirror::Class* klass,
-                                                const mirror::Class* ref_class);
+extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
+                                            const mirror::Class* ref_class);
 extern "C" void art_quick_check_cast(void*, void*);
 
 // DexCache entrypoints.
@@ -69,13 +70,8 @@
 extern "C" void art_quick_unlock_object(void*);
 
 // Math entrypoints.
-extern "C" double art_quick_fmod(double, double);
-extern "C" float art_quick_fmodf(float, float);
-extern "C" double art_quick_l2d(int64_t);
-extern "C" float art_quick_l2f(int64_t);
 extern "C" int64_t art_quick_d2l(double);
 extern "C" int64_t art_quick_f2l(float);
-extern "C" int32_t art_quick_idivmod(int32_t, int32_t);
 extern "C" int64_t art_quick_ldiv(int64_t, int64_t);
 extern "C" int64_t art_quick_lmod(int64_t, int64_t);
 extern "C" int64_t art_quick_lmul(int64_t, int64_t);
@@ -85,7 +81,6 @@
 
 // Intrinsic entrypoints.
 extern "C" int32_t art_quick_memcmp16(void*, void*, int32_t);
-extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
 extern "C" int32_t art_quick_string_compareto(void*, void*);
 extern "C" void* art_quick_memcpy(void*, const void*, size_t);
 
@@ -133,7 +128,7 @@
   ResetQuickAllocEntryPoints(qpoints);
 
   // Cast
-  qpoints->pInstanceofNonTrivial = art_quick_is_assignable;
+  qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
   qpoints->pCheckCast = art_quick_check_cast;
 
   // DexCache
@@ -180,16 +175,15 @@
   // points->pCmpgFloat = NULL;  // Not needed on x86.
   // points->pCmplDouble = NULL;  // Not needed on x86.
   // points->pCmplFloat = NULL;  // Not needed on x86.
-  qpoints->pFmod = art_quick_fmod;
-  // qpoints->pSqrt = NULL;  // Not needed on x86.
-  qpoints->pL2d = art_quick_l2d;
-  qpoints->pFmodf = art_quick_fmodf;
-  qpoints->pL2f = art_quick_l2f;
+  qpoints->pFmod = fmod;
+  // qpoints->pL2d = NULL;  // Not needed on x86.
+  qpoints->pFmodf = fmodf;
+  // qpoints->pL2f = NULL;  // Not needed on x86.
   // points->pD2iz = NULL;  // Not needed on x86.
   // points->pF2iz = NULL;  // Not needed on x86.
-  qpoints->pIdivmod = art_quick_idivmod;
-  qpoints->pD2l = art_quick_d2l;
-  qpoints->pF2l = art_quick_f2l;
+  // qpoints->pIdivmod = NULL;  // Not needed on x86.
+  qpoints->pD2l = art_d2l;
+  qpoints->pF2l = art_f2l;
   qpoints->pLdiv = art_quick_ldiv;
   qpoints->pLmod = art_quick_lmod;
   qpoints->pLmul = art_quick_lmul;
@@ -198,7 +192,7 @@
   qpoints->pUshrLong = art_quick_lushr;
 
   // Intrinsics
-  qpoints->pIndexOf = art_quick_indexof;
+  // qpoints->pIndexOf = NULL;  // Not needed on x86.
   qpoints->pMemcmp16 = art_quick_memcmp16;
   qpoints->pStringCompareTo = art_quick_string_compareto;
   qpoints->pMemcpy = art_quick_memcpy;
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index ed7f246..48c33d5 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -384,16 +384,24 @@
     PUSH r9                       // Save r9/shorty*.
     movq %rsp, %rbp               // Copy value of stack pointer into base pointer.
     CFI_DEF_CFA_REGISTER(rbp)
+
     movl %edx, %r10d
-    addl LITERAL(64), %edx        // Reserve space for return addr, method*, rbp, r8 and r9 in frame.
+    addl LITERAL(60), %edx        // Reserve space for return addr, StackReference<method>, rbp,
+                                  // r8 and r9 in frame.
     andl LITERAL(0xFFFFFFF0), %edx    // Align frame size to 16 bytes.
     subl LITERAL(32), %edx        // Remove space for return address, rbp, r8 and r9.
     subq %rdx, %rsp               // Reserve stack space for argument array.
-    movq LITERAL(0), (%rsp)       // Store NULL for method*
+
+#if (STACK_REFERENCE_SIZE != 4)
+#error "STACK_REFERENCE_SIZE(X86_64) size not as expected."
+#endif
+    movl LITERAL(0), (%rsp)       // Store NULL for method*
+
     movl %r10d, %ecx              // Place size of args in rcx.
     movq %rdi, %rax               // RAX := method to be called
     movq %rsi, %r11               // R11 := arg_array
-    leaq 8(%rsp), %rdi            // Rdi is pointing just above the method* in the stack arguments.
+    leaq 4(%rsp), %rdi            // Rdi is pointing just above the StackReference<method> in the
+                                  // stack arguments.
     // Copy arg array into stack.
     rep movsb                     // while (rcx--) { *rdi++ = *rsi++ }
     leaq 1(%r9), %r10             // R10 := shorty + 1  ; ie skip return arg character
@@ -455,16 +463,24 @@
     PUSH r9                       // Save r9/shorty*.
     movq %rsp, %rbp               // Copy value of stack pointer into base pointer.
     CFI_DEF_CFA_REGISTER(rbp)
+
     movl %edx, %r10d
-    addl LITERAL(64), %edx        // Reserve space for return addr, method*, rbp, r8 and r9 in frame.
+    addl LITERAL(60), %edx        // Reserve space for return addr, StackReference<method>, rbp,
+                                  // r8 and r9 in frame.
     andl LITERAL(0xFFFFFFF0), %edx    // Align frame size to 16 bytes.
     subl LITERAL(32), %edx        // Remove space for return address, rbp, r8 and r9.
     subq %rdx, %rsp               // Reserve stack space for argument array.
-    movq LITERAL(0), (%rsp)       // Store NULL for method*
+
+#if (STACK_REFERENCE_SIZE != 4)
+#error "STACK_REFERENCE_SIZE(X86_64) size not as expected."
+#endif
+    movl LITERAL(0), (%rsp)       // Store NULL for method*
+
     movl %r10d, %ecx              // Place size of args in rcx.
     movq %rdi, %rax               // RAX := method to be called
     movq %rsi, %r11               // R11 := arg_array
-    leaq 8(%rsp), %rdi            // Rdi is pointing just above the method* in the stack arguments.
+    leaq 4(%rsp), %rdi            // Rdi is pointing just above the StackReference<method> in the
+                                  // stack arguments.
     // Copy arg array into stack.
     rep movsb                     // while (rcx--) { *rdi++ = *rsi++ }
     leaq 1(%r9), %r10             // R10 := shorty + 1  ; ie skip return arg character
@@ -737,11 +753,6 @@
     RETURN_IF_EAX_ZERO
 END_FUNCTION art_quick_unlock_object
 
-DEFINE_FUNCTION art_quick_is_assignable
-    int3
-    int3
-END_FUNCTION art_quick_is_assignable
-
 DEFINE_FUNCTION art_quick_check_cast
     PUSH rdi                          // Save args for exc
     PUSH rsi
@@ -876,13 +887,6 @@
 
 NO_ARG_DOWNCALL art_quick_test_suspend, artTestSuspendFromCode, ret
 
-UNIMPLEMENTED art_quick_fmod
-UNIMPLEMENTED art_quick_fmodf
-UNIMPLEMENTED art_quick_l2d
-UNIMPLEMENTED art_quick_l2f
-UNIMPLEMENTED art_quick_d2l
-UNIMPLEMENTED art_quick_f2l
-UNIMPLEMENTED art_quick_idivmod
 UNIMPLEMENTED art_quick_ldiv
 UNIMPLEMENTED art_quick_lmod
 UNIMPLEMENTED art_quick_lmul
@@ -1301,8 +1305,6 @@
      */
 UNIMPLEMENTED art_quick_deoptimize
 
-UNIMPLEMENTED art_quick_indexof
-
     /*
      * String's compareTo.
      *
diff --git a/runtime/arch/x86_64/thread_x86_64.cc b/runtime/arch/x86_64/thread_x86_64.cc
index de4c56a..b7a5c43 100644
--- a/runtime/arch/x86_64/thread_x86_64.cc
+++ b/runtime/arch/x86_64/thread_x86_64.cc
@@ -31,8 +31,7 @@
   syscall(__NR_arch_prctl, code, val);
 }
 void Thread::InitCpu() {
-  static Mutex modify_ldt_lock("modify_ldt lock");
-  MutexLock mu(Thread::Current(), modify_ldt_lock);
+  MutexLock mu(nullptr, *Locks::modify_ldt_lock_);
   arch_prctl(ARCH_SET_GS, this);
 
   // Allow easy indirection back to Thread*.
diff --git a/runtime/base/bit_vector.cc b/runtime/base/bit_vector.cc
index a3e2b15..0053389 100644
--- a/runtime/base/bit_vector.cc
+++ b/runtime/base/bit_vector.cc
@@ -45,10 +45,11 @@
     storage_size_(storage_size),
     storage_(storage),
     number_of_bits_(start_bits) {
-  DCHECK_EQ(sizeof(*storage_), 4U);  // Assuming 32-bit units.
+  COMPILE_ASSERT(sizeof(*storage_) == kWordBytes, check_word_bytes);
+  COMPILE_ASSERT(sizeof(*storage_) * 8u == kWordBits, check_word_bits);
   if (storage_ == nullptr) {
     storage_size_ = BitsToWords(start_bits);
-    storage_ = static_cast<uint32_t*>(allocator_->Alloc(storage_size_ * sizeof(*storage_)));
+    storage_ = static_cast<uint32_t*>(allocator_->Alloc(storage_size_ * kWordBytes));
   }
 }
 
@@ -61,7 +62,7 @@
  */
 bool BitVector::IsBitSet(uint32_t num) const {
   // If the index is over the size:
-  if (num >= storage_size_ * sizeof(*storage_) * 8) {
+  if (num >= storage_size_ * kWordBits) {
     // Whether it is expandable or not, this bit does not exist: thus it is not set.
     return false;
   }
@@ -71,7 +72,7 @@
 
 // Mark all bits bit as "clear".
 void BitVector::ClearAllBits() {
-  memset(storage_, 0, storage_size_ * sizeof(*storage_));
+  memset(storage_, 0, storage_size_ * kWordBytes);
 }
 
 // Mark the specified bit as "set".
@@ -80,17 +81,17 @@
  * not using it badly or change resize mechanism.
  */
 void BitVector::SetBit(uint32_t num) {
-  if (num >= storage_size_ * sizeof(*storage_) * 8) {
+  if (num >= storage_size_ * kWordBits) {
     DCHECK(expandable_) << "Attempted to expand a non-expandable bitmap to position " << num;
 
     /* Round up to word boundaries for "num+1" bits */
     uint32_t new_size = BitsToWords(num + 1);
     DCHECK_GT(new_size, storage_size_);
     uint32_t *new_storage =
-        static_cast<uint32_t*>(allocator_->Alloc(new_size * sizeof(*storage_)));
-    memcpy(new_storage, storage_, storage_size_ * sizeof(*storage_));
+        static_cast<uint32_t*>(allocator_->Alloc(new_size * kWordBytes));
+    memcpy(new_storage, storage_, storage_size_ * kWordBytes);
     // Zero out the new storage words.
-    memset(&new_storage[storage_size_], 0, (new_size - storage_size_) * sizeof(*storage_));
+    memset(&new_storage[storage_size_], 0, (new_size - storage_size_) * kWordBytes);
     // TOTO: collect stats on space wasted because of resize.
     storage_ = new_storage;
     storage_size_ = new_size;
@@ -103,7 +104,7 @@
 // Mark the specified bit as "unset".
 void BitVector::ClearBit(uint32_t num) {
   // If the index is over the size, we don't have to do anything, it is cleared.
-  if (num < storage_size_ * sizeof(*storage_) * 8) {
+  if (num < storage_size_ * kWordBits) {
     // Otherwise, go ahead and clear it.
     storage_[num >> 5] &= ~check_masks[num & 0x1f];
   }
@@ -132,7 +133,7 @@
   //   - Therefore, min_size goes up to at least that, we are thus comparing at least what we need to, but not less.
   //      ie. we are comparing all storage cells that could have difference, if both vectors have cells above our_highest_index,
   //          they are automatically at 0.
-  return (memcmp(storage_, src->GetRawStorage(), our_highest_index * sizeof(*storage_)) == 0);
+  return (memcmp(storage_, src->GetRawStorage(), our_highest_index * kWordBytes) == 0);
 }
 
 // Intersect with another bit vector.
@@ -180,7 +181,7 @@
     SetBit(highest_bit);
 
     // Paranoid: storage size should be big enough to hold this bit now.
-    DCHECK_LT(static_cast<uint32_t> (highest_bit), storage_size_ * sizeof(*(storage_)) * 8);
+    DCHECK_LT(static_cast<uint32_t> (highest_bit), storage_size_ * kWordBits);
   }
 
   for (uint32_t idx = 0; idx < src_size; idx++) {
@@ -215,7 +216,7 @@
     SetBit(highest_bit);
 
     // Paranoid: storage size should be big enough to hold this bit now.
-    DCHECK_LT(static_cast<uint32_t> (highest_bit), storage_size_ * sizeof(*(storage_)) * 8);
+    DCHECK_LT(static_cast<uint32_t> (highest_bit), storage_size_ * kWordBits);
   }
 
   uint32_t not_in_size = not_in->GetStorageSize();
@@ -268,14 +269,10 @@
 
 // Count the number of bits that are set in range [0, end).
 uint32_t BitVector::NumSetBits(uint32_t end) const {
-  DCHECK_LE(end, storage_size_ * sizeof(*storage_) * 8);
+  DCHECK_LE(end, storage_size_ * kWordBits);
   return NumSetBits(storage_, end);
 }
 
-BitVector::Iterator* BitVector::GetIterator() const {
-  return new (allocator_) Iterator(this);
-}
-
 /*
  * Mark specified number of bits as "set". Cannot set all bits like ClearAll
  * since there might be unused bits - setting those to one will confuse the
@@ -329,7 +326,7 @@
       }
 
       // Return cnt + how many storage units still remain * the number of bits per unit.
-      int res = cnt + (idx * (sizeof(*storage_) * 8));
+      int res = cnt + (idx * kWordBits);
       return res;
     }
   }
@@ -369,14 +366,14 @@
   SetBit(highest_bit);
 
   // Now set until highest bit's storage.
-  uint32_t size = 1 + (highest_bit / (sizeof(*storage_) * 8));
-  memcpy(storage_, src->GetRawStorage(), sizeof(*storage_) * size);
+  uint32_t size = 1 + (highest_bit / kWordBits);
+  memcpy(storage_, src->GetRawStorage(), kWordBytes * size);
 
   // Set upper bits to 0.
   uint32_t left = storage_size_ - size;
 
   if (left > 0) {
-    memset(storage_ + size, 0, sizeof(*storage_) * left);
+    memset(storage_ + size, 0, kWordBytes * left);
   }
 }
 
@@ -401,14 +398,12 @@
 
 void BitVector::Dump(std::ostream& os, const char *prefix) const {
   std::ostringstream buffer;
-  DumpHelper(buffer, prefix);
+  DumpHelper(prefix, buffer);
   os << buffer.str() << std::endl;
 }
 
-void BitVector::DumpDot(FILE* file, const char* prefix, bool last_entry) const {
-  std::ostringstream buffer;
-  Dump(buffer, prefix);
 
+void BitVector::DumpDotHelper(bool last_entry, FILE* file, std::ostringstream& buffer) const {
   // Now print it to the file.
   fprintf(file, "    {%s}", buffer.str().c_str());
 
@@ -421,7 +416,32 @@
   fprintf(file, "\\\n");
 }
 
-void BitVector::DumpHelper(std::ostringstream& buffer, const char* prefix) const {
+void BitVector::DumpDot(FILE* file, const char* prefix, bool last_entry) const {
+  std::ostringstream buffer;
+  DumpHelper(prefix, buffer);
+  DumpDotHelper(last_entry, file, buffer);
+}
+
+void BitVector::DumpIndicesDot(FILE* file, const char* prefix, bool last_entry) const {
+  std::ostringstream buffer;
+  DumpIndicesHelper(prefix, buffer);
+  DumpDotHelper(last_entry, file, buffer);
+}
+
+void BitVector::DumpIndicesHelper(const char* prefix, std::ostringstream& buffer) const {
+  // Initialize it.
+  if (prefix != nullptr) {
+    buffer << prefix;
+  }
+
+  for (size_t i = 0; i < number_of_bits_; i++) {
+    if (IsBitSet(i)) {
+      buffer << i << " ";
+    }
+  }
+}
+
+void BitVector::DumpHelper(const char* prefix, std::ostringstream& buffer) const {
   // Initialize it.
   if (prefix != nullptr) {
     buffer << prefix;
diff --git a/runtime/base/bit_vector.h b/runtime/base/bit_vector.h
index 2a68396..8f9afff 100644
--- a/runtime/base/bit_vector.h
+++ b/runtime/base/bit_vector.h
@@ -32,59 +32,115 @@
  */
 class BitVector {
   public:
-    class Iterator {
+    class IndexContainer;
+
+    /**
+     * @brief Convenient iterator across the indexes of the BitVector's set bits.
+     *
+     * @details IndexIterator is a Forward iterator (C++11: 24.2.5) from the lowest
+     * to the highest index of the BitVector's set bits. Instances can be retrieved
+     * only through BitVector::Indexes() which returns an IndexContainer wrapper
+     * object with begin() and end() suitable for range-based loops:
+     *   for (uint32_t idx : bit_vector.Indexes()) {
+     *     // Use idx.
+     *   }
+     */
+    class IndexIterator
+        : std::iterator<std::forward_iterator_tag, uint32_t, ptrdiff_t, void, uint32_t> {
       public:
-        explicit Iterator(const BitVector* bit_vector)
-          : p_bits_(bit_vector),
-            bit_storage_(bit_vector->GetRawStorage()),
-            bit_index_(0),
-            bit_size_(p_bits_->storage_size_ * sizeof(uint32_t) * 8) {}
-
-        // Return the position of the next set bit.  -1 means end-of-element reached.
-        int32_t Next() {
-          // Did anything obviously change since we started?
-          DCHECK_EQ(bit_size_, p_bits_->GetStorageSize() * sizeof(uint32_t) * 8);
-          DCHECK_EQ(bit_storage_, p_bits_->GetRawStorage());
-
-          if (UNLIKELY(bit_index_ >= bit_size_)) {
-            return -1;
-          }
-
-          uint32_t word_index = bit_index_ / 32;
-          uint32_t word = bit_storage_[word_index];
-          // Mask out any bits in the first word we've already considered.
-          word >>= bit_index_ & 0x1f;
-          if (word == 0) {
-            bit_index_ &= ~0x1f;
-            do {
-              word_index++;
-              if (UNLIKELY((word_index * 32) >= bit_size_)) {
-                bit_index_ = bit_size_;
-                return -1;
-              }
-              word = bit_storage_[word_index];
-              bit_index_ += 32;
-            } while (word == 0);
-          }
-          bit_index_ += CTZ(word) + 1;
-          return bit_index_ - 1;
+        bool operator==(const IndexIterator& other) const {
+          DCHECK(bit_storage_ == other.bit_storage_);
+          DCHECK_EQ(storage_size_, other.storage_size_);
+          return bit_index_ == other.bit_index_;
         }
 
-        static void* operator new(size_t size, Allocator* allocator) {
-          return allocator->Alloc(sizeof(BitVector::Iterator));
-        };
-        static void operator delete(void* p) {
-          Iterator* it = reinterpret_cast<Iterator*>(p);
-          it->p_bits_->allocator_->Free(p);
+        bool operator!=(const IndexIterator& other) const {
+          return !(*this == other);
+        }
+
+        int operator*() const {
+          DCHECK_LT(bit_index_, BitSize());
+          return bit_index_;
+        }
+
+        IndexIterator& operator++() {
+          DCHECK_LT(bit_index_, BitSize());
+          bit_index_ = FindIndex(bit_index_ + 1u);
+          return *this;
+        }
+
+        IndexIterator operator++(int) {
+          IndexIterator result(*this);
+          ++*this;
+          return result;
+        }
+
+        // Helper function to check for end without comparing with bit_vector.Indexes().end().
+        bool Done() const {
+          return bit_index_ == BitSize();
         }
 
       private:
-        const BitVector* const p_bits_;
-        const uint32_t* const bit_storage_;
-        uint32_t bit_index_;           // Current index (size in bits).
-        const uint32_t bit_size_;      // Size of vector in bits.
+        struct begin_tag { };
+        struct end_tag { };
 
-        friend class BitVector;
+        IndexIterator(const BitVector* bit_vector, begin_tag)
+          : bit_storage_(bit_vector->GetRawStorage()),
+            storage_size_(bit_vector->storage_size_),
+            bit_index_(FindIndex(0u)) { }
+
+        IndexIterator(const BitVector* bit_vector, end_tag)
+          : bit_storage_(bit_vector->GetRawStorage()),
+            storage_size_(bit_vector->storage_size_),
+            bit_index_(BitSize()) { }
+
+        uint32_t BitSize() const {
+          return storage_size_ * kWordBits;
+        }
+
+        uint32_t FindIndex(uint32_t start_index) const {
+          DCHECK_LE(start_index, BitSize());
+          uint32_t word_index = start_index / kWordBits;
+          if (UNLIKELY(word_index == storage_size_)) {
+            return start_index;
+          }
+          uint32_t word = bit_storage_[word_index];
+          // Mask out any bits in the first word we've already considered.
+          word &= static_cast<uint32_t>(-1) << (start_index & 0x1f);
+          while (word == 0u) {
+            ++word_index;
+            if (UNLIKELY(word_index == storage_size_)) {
+              return BitSize();
+            }
+            word = bit_storage_[word_index];
+          }
+          return word_index * 32u + CTZ(word);
+        }
+
+        const uint32_t* const bit_storage_;
+        const uint32_t storage_size_;  // Size of vector in words.
+        uint32_t bit_index_;           // Current index (size in bits).
+
+        friend class BitVector::IndexContainer;
+    };
+
+    /**
+     * @brief BitVector wrapper class for iteration across indexes of set bits.
+     */
+    class IndexContainer {
+     public:
+      explicit IndexContainer(const BitVector* bit_vector) : bit_vector_(bit_vector) { }
+
+      IndexIterator begin() const {
+        return IndexIterator(bit_vector_, IndexIterator::begin_tag());
+      }
+
+      IndexIterator end() const {
+        return IndexIterator(bit_vector_, IndexIterator::end_tag());
+      }
+
+     private:
+      const BitVector* const bit_vector_;
     };
 
     BitVector(uint32_t start_bits,
@@ -127,14 +183,16 @@
     // Number of bits set in range [0, end).
     uint32_t NumSetBits(uint32_t end) const;
 
-    Iterator* GetIterator() const;
+    IndexContainer Indexes() const {
+      return IndexContainer(this);
+    }
 
     uint32_t GetStorageSize() const { return storage_size_; }
     bool IsExpandable() const { return expandable_; }
     uint32_t GetRawStorageWord(size_t idx) const { return storage_[idx]; }
     uint32_t* GetRawStorage() { return storage_; }
     const uint32_t* GetRawStorage() const { return storage_; }
-    size_t GetSizeOf() const { return storage_size_ * sizeof(uint32_t); }
+    size_t GetSizeOf() const { return storage_size_ * kWordBytes; }
 
     /**
      * @return the highest bit set, -1 if none are set
@@ -149,12 +207,42 @@
     bool EnsureSizeAndClear(unsigned int num);
 
     void Dump(std::ostream& os, const char* prefix) const;
+
+    /**
+     * @brief last_entry is this the last entry for the dot dumping
+     * @details if not, a "|" is appended to the dump.
+     */
     void DumpDot(FILE* file, const char* prefix, bool last_entry = false) const;
 
+    /**
+     * @brief last_entry is this the last entry for the dot dumping
+     * @details if not, a "|" is appended to the dump.
+     */
+    void DumpIndicesDot(FILE* file, const char* prefix, bool last_entry = false) const;
+
   protected:
-    void DumpHelper(std::ostringstream& buffer, const char* prefix) const;
+    /**
+     * @brief Dump the bitvector into buffer in a 00101..01 format.
+     * @param buffer the ostringstream used to dump the bitvector into.
+     */
+    void DumpHelper(const char* prefix, std::ostringstream& buffer) const;
+
+    /**
+     * @brief Dump the bitvector in a 1 2 5 8 format, where the numbers are the bit set.
+     * @param buffer the ostringstream used to dump the bitvector into.
+     */
+    void DumpIndicesHelper(const char* prefix, std::ostringstream& buffer) const;
+
+    /**
+     * @brief Wrapper to perform the bitvector dumping with the .dot format.
+     * @param buffer the ostringstream used to dump the bitvector into.
+     */
+    void DumpDotHelper(bool last_entry, FILE* file, std::ostringstream& buffer) const;
 
   private:
+    static constexpr uint32_t kWordBytes = sizeof(uint32_t);
+    static constexpr uint32_t kWordBits = kWordBytes * 8;
+
     Allocator* const allocator_;
     const bool expandable_;         // expand bitmap if we run out?
     uint32_t   storage_size_;       // current size, in 32-bit words.
diff --git a/runtime/base/bit_vector_test.cc b/runtime/base/bit_vector_test.cc
index 0f866a4..1403f50 100644
--- a/runtime/base/bit_vector_test.cc
+++ b/runtime/base/bit_vector_test.cc
@@ -38,11 +38,8 @@
   EXPECT_EQ(0U, bv.GetRawStorageWord(0));
   EXPECT_EQ(0U, *bv.GetRawStorage());
 
-  BitVector::Iterator empty_iterator(&bv);
-  EXPECT_EQ(-1, empty_iterator.Next());
-
-  std::unique_ptr<BitVector::Iterator> empty_iterator_on_heap(bv.GetIterator());
-  EXPECT_EQ(-1, empty_iterator_on_heap->Next());
+  EXPECT_TRUE(bv.Indexes().begin().Done());
+  EXPECT_TRUE(bv.Indexes().begin() == bv.Indexes().end());
 
   bv.SetBit(0);
   bv.SetBit(kBits - 1);
@@ -57,10 +54,14 @@
   EXPECT_EQ(0x80000001U, bv.GetRawStorageWord(0));
   EXPECT_EQ(0x80000001U, *bv.GetRawStorage());
 
-  BitVector::Iterator iterator(&bv);
-  EXPECT_EQ(0, iterator.Next());
-  EXPECT_EQ(static_cast<int>(kBits - 1), iterator.Next());
-  EXPECT_EQ(-1, iterator.Next());
+  BitVector::IndexIterator iterator = bv.Indexes().begin();
+  EXPECT_TRUE(iterator != bv.Indexes().end());
+  EXPECT_EQ(0, *iterator);
+  ++iterator;
+  EXPECT_TRUE(iterator != bv.Indexes().end());
+  EXPECT_EQ(static_cast<int>(kBits - 1), *iterator);
+  ++iterator;
+  EXPECT_TRUE(iterator == bv.Indexes().end());
 }
 
 TEST(BitVector, NoopAllocator) {
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index 8175514..47571f8 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -169,7 +169,7 @@
 // bionic and glibc both have TEMP_FAILURE_RETRY, but Mac OS' libc doesn't.
 #ifndef TEMP_FAILURE_RETRY
 #define TEMP_FAILURE_RETRY(exp) ({ \
-  typeof(exp) _rc; \
+  decltype(exp) _rc; \
   do { \
     _rc = (exp); \
   } while (_rc == -1 && errno == EINTR); \
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index adf4c66..a9472f7 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -132,9 +132,21 @@
   // TODO: tighten this check.
   if (kDebugLocking) {
     Runtime* runtime = Runtime::Current();
-    CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDownLocked() ||
-          level == kDefaultMutexLevel  || level == kRuntimeShutdownLock ||
-          level == kThreadListLock || level == kLoggingLock || level == kAbortLock);
+    CHECK(runtime == nullptr || !runtime->IsStarted() || runtime->IsShuttingDownLocked() ||
+          // Used during thread creation to avoid races with runtime shutdown. Thread::Current not
+          // yet established.
+          level == kRuntimeShutdownLock ||
+          // Thread Ids are allocated/released before threads are established.
+          level == kAllocatedThreadIdsLock ||
+          // Thread LDT's are initialized without Thread::Current established.
+          level == kModifyLdtLock ||
+          // Threads are unregistered while holding the thread list lock, during this process they
+          // no longer exist and so we expect an unlock with no self.
+          level == kThreadListLock ||
+          // Ignore logging which may or may not have set up thread data structures.
+          level == kLoggingLock ||
+          // Avoid recursive death.
+          level == kAbortLock) << level;
   }
 }
 
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 6f7f2c1..d2b4e01 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -30,10 +30,12 @@
 namespace art {
 
 Mutex* Locks::abort_lock_ = nullptr;
+Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
 Mutex* Locks::breakpoint_lock_ = nullptr;
 ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
 ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
 Mutex* Locks::logging_lock_ = nullptr;
+Mutex* Locks::modify_ldt_lock_ = nullptr;
 ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
 Mutex* Locks::runtime_shutdown_lock_ = nullptr;
 Mutex* Locks::thread_list_lock_ = nullptr;
@@ -814,7 +816,13 @@
 void Locks::Init() {
   if (logging_lock_ != nullptr) {
     // Already initialized.
+    if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
+      DCHECK(modify_ldt_lock_ != nullptr);
+    } else {
+      DCHECK(modify_ldt_lock_ == nullptr);
+    }
     DCHECK(abort_lock_ != nullptr);
+    DCHECK(allocated_thread_ids_lock_ != nullptr);
     DCHECK(breakpoint_lock_ != nullptr);
     DCHECK(classlinker_classes_lock_ != nullptr);
     DCHECK(heap_bitmap_lock_ != nullptr);
@@ -827,32 +835,76 @@
     DCHECK(unexpected_signal_lock_ != nullptr);
     DCHECK(intern_table_lock_ != nullptr);
   } else {
-    logging_lock_ = new Mutex("logging lock", kLoggingLock, true);
-    abort_lock_ = new Mutex("abort lock", kAbortLock, true);
+    // Create global locks in level order from highest lock level to lowest.
+    LockLevel current_lock_level = kMutatorLock;
+    DCHECK(mutator_lock_ == nullptr);
+    mutator_lock_ = new ReaderWriterMutex("mutator lock", current_lock_level);
 
+    #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
+        DCHECK_LT(new_level, current_lock_level); \
+        current_lock_level = new_level;
+
+    UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
+    DCHECK(heap_bitmap_lock_ == nullptr);
+    heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
+    DCHECK(runtime_shutdown_lock_ == nullptr);
+    runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
+    DCHECK(profiler_lock_ == nullptr);
+    profiler_lock_ = new Mutex("profiler lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
+    DCHECK(trace_lock_ == nullptr);
+    trace_lock_ = new Mutex("trace lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
+    DCHECK(thread_list_lock_ == nullptr);
+    thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
     DCHECK(breakpoint_lock_ == nullptr);
-    breakpoint_lock_ = new Mutex("breakpoint lock", kBreakpointLock);
+    breakpoint_lock_ = new Mutex("breakpoint lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
     DCHECK(classlinker_classes_lock_ == nullptr);
     classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
-                                                      kClassLinkerClassesLock);
-    DCHECK(heap_bitmap_lock_ == nullptr);
-    heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", kHeapBitmapLock);
-    DCHECK(mutator_lock_ == nullptr);
-    mutator_lock_ = new ReaderWriterMutex("mutator lock", kMutatorLock);
-    DCHECK(runtime_shutdown_lock_ == nullptr);
-    runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", kRuntimeShutdownLock);
-    DCHECK(thread_list_lock_ == nullptr);
-    thread_list_lock_ = new Mutex("thread list lock", kThreadListLock);
-    DCHECK(thread_suspend_count_lock_ == nullptr);
-    thread_suspend_count_lock_ = new Mutex("thread suspend count lock", kThreadSuspendCountLock);
-    DCHECK(trace_lock_ == nullptr);
-    trace_lock_ = new Mutex("trace lock", kTraceLock);
-    DCHECK(profiler_lock_ == nullptr);
-    profiler_lock_ = new Mutex("profiler lock", kProfilerLock);
-    DCHECK(unexpected_signal_lock_ == nullptr);
-    unexpected_signal_lock_ = new Mutex("unexpected signal lock", kUnexpectedSignalLock, true);
+                                                      current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock);
+    DCHECK(allocated_thread_ids_lock_ == nullptr);
+    allocated_thread_ids_lock_ =  new Mutex("allocated thread ids lock", current_lock_level);
+
+    if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
+      UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock);
+      DCHECK(modify_ldt_lock_ == nullptr);
+      modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
+    }
+
+    UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock);
     DCHECK(intern_table_lock_ == nullptr);
-    intern_table_lock_ = new Mutex("InternTable lock", kInternTableLock);
+    intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
+
+
+    UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
+    DCHECK(abort_lock_ == nullptr);
+    abort_lock_ = new Mutex("abort lock", current_lock_level, true);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock);
+    DCHECK(thread_suspend_count_lock_ == nullptr);
+    thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock);
+    DCHECK(unexpected_signal_lock_ == nullptr);
+    unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
+    DCHECK(logging_lock_ == nullptr);
+    logging_lock_ = new Mutex("logging lock", current_lock_level, true);
+
+    #undef UPDATE_CURRENT_LOCK_LEVEL
   }
 }
 
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index e13c8d5..522692e 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -74,6 +74,8 @@
   kPinTableLock,
   kLoadLibraryLock,
   kJdwpObjectRegistryLock,
+  kModifyLdtLock,
+  kAllocatedThreadIdsLock,
   kClassLinkerClassesLock,
   kBreakpointLock,
   kMonitorLock,
@@ -532,28 +534,34 @@
   // Guards shutdown of the runtime.
   static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
 
+  // Guards background profiler global state.
+  static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
+
+  // Guards trace (ie traceview) requests.
+  static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
+
   // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
   // attaching and detaching.
-  static Mutex* thread_list_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
+  static Mutex* thread_list_lock_ ACQUIRED_AFTER(trace_lock_);
 
   // Guards breakpoints.
   static Mutex* breakpoint_lock_ ACQUIRED_AFTER(thread_list_lock_);
 
-  // Guards trace requests.
-  static Mutex* trace_lock_ ACQUIRED_AFTER(breakpoint_lock_);
-
-  // Guards profile objects.
-  static Mutex* profiler_lock_ ACQUIRED_AFTER(trace_lock_);
-
   // Guards lists of classes within the class linker.
-  static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(profiler_lock_);
+  static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
 
   // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
   // doesn't try to hold a higher level Mutex.
   #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
 
+  // Guard the allocation/deallocation of thread ids.
+  static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
+
+  // Guards modification of the LDT on x86.
+  static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
+
   // Guards intern table.
-  static Mutex* intern_table_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
+  static Mutex* intern_table_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
 
   // Have an exclusive aborting thread.
   static Mutex* abort_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
diff --git a/runtime/catch_block_stack_visitor.cc b/runtime/catch_block_stack_visitor.cc
deleted file mode 100644
index 55b330a..0000000
--- a/runtime/catch_block_stack_visitor.cc
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "catch_block_stack_visitor.h"
-
-#include "dex_instruction.h"
-#include "mirror/art_method-inl.h"
-#include "quick_exception_handler.h"
-#include "handle_scope-inl.h"
-#include "verifier/method_verifier.h"
-
-namespace art {
-
-bool CatchBlockStackVisitor::VisitFrame() {
-  exception_handler_->SetHandlerFrameId(GetFrameId());
-  mirror::ArtMethod* method = GetMethod();
-  if (method == nullptr) {
-    // This is the upcall, we remember the frame and last pc so that we may long jump to them.
-    exception_handler_->SetHandlerQuickFramePc(GetCurrentQuickFramePc());
-    exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
-    return false;  // End stack walk.
-  } else {
-    if (method->IsRuntimeMethod()) {
-      // Ignore callee save method.
-      DCHECK(method->IsCalleeSaveMethod());
-      return true;
-    } else {
-      return HandleTryItems(method);
-    }
-  }
-}
-
-bool CatchBlockStackVisitor::HandleTryItems(mirror::ArtMethod* method) {
-  uint32_t dex_pc = DexFile::kDexNoIndex;
-  if (!method->IsNative()) {
-    dex_pc = GetDexPc();
-  }
-  if (dex_pc != DexFile::kDexNoIndex) {
-    bool clear_exception = false;
-    bool exc_changed = false;
-    StackHandleScope<1> hs(Thread::Current());
-    Handle<mirror::Class> to_find(hs.NewHandle((*exception_)->GetClass()));
-    uint32_t found_dex_pc = method->FindCatchBlock(to_find, dex_pc, &clear_exception,
-                                                   &exc_changed);
-    if (UNLIKELY(exc_changed)) {
-      DCHECK_EQ(DexFile::kDexNoIndex, found_dex_pc);
-      exception_->Assign(self_->GetException(nullptr));  // TODO: Throw location?
-      // There is a new context installed, delete it.
-      delete self_->GetLongJumpContext();
-    }
-    exception_handler_->SetClearException(clear_exception);
-    if (found_dex_pc != DexFile::kDexNoIndex) {
-      exception_handler_->SetHandlerDexPc(found_dex_pc);
-      exception_handler_->SetHandlerQuickFramePc(method->ToNativePc(found_dex_pc));
-      exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
-      return false;  // End stack walk.
-    }
-  }
-  return true;  // Continue stack walk.
-}
-
-}  // namespace art
diff --git a/runtime/catch_block_stack_visitor.h b/runtime/catch_block_stack_visitor.h
deleted file mode 100644
index f45cf03..0000000
--- a/runtime/catch_block_stack_visitor.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_CATCH_BLOCK_STACK_VISITOR_H_
-#define ART_RUNTIME_CATCH_BLOCK_STACK_VISITOR_H_
-
-#include "mirror/object-inl.h"
-#include "stack.h"
-#include "handle_scope-inl.h"
-
-namespace art {
-
-namespace mirror {
-class Throwable;
-}  // namespace mirror
-class Context;
-class QuickExceptionHandler;
-class Thread;
-class ThrowLocation;
-
-// Finds catch handler or prepares deoptimization.
-class CatchBlockStackVisitor FINAL : public StackVisitor {
- public:
-  CatchBlockStackVisitor(Thread* self, Context* context, Handle<mirror::Throwable>* exception,
-                         QuickExceptionHandler* exception_handler)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : StackVisitor(self, context), self_(self), exception_(exception),
-        exception_handler_(exception_handler) {
-  }
-
-  bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- private:
-  bool HandleTryItems(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  Thread* const self_;
-  // The type of the exception catch block to find.
-  Handle<mirror::Throwable>* exception_;
-  QuickExceptionHandler* const exception_handler_;
-
-  DISALLOW_COPY_AND_ASSIGN(CatchBlockStackVisitor);
-};
-
-}  // namespace art
-#endif  // ART_RUNTIME_CATCH_BLOCK_STACK_VISITOR_H_
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index ac86014..9d8888c 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -34,24 +34,22 @@
 }
 
 inline mirror::Class* ClassLinker::FindSystemClass(Thread* self, const char* descriptor) {
-  StackHandleScope<1> hs(self);
-  auto class_loader = hs.NewHandle<mirror::ClassLoader>(nullptr);
-  return FindClass(self, descriptor, class_loader);
+  return FindClass(self, descriptor, NullHandle<mirror::ClassLoader>());
 }
 
-inline mirror::Class* ClassLinker::FindArrayClass(Thread* self, mirror::Class* element_class) {
+inline mirror::Class* ClassLinker::FindArrayClass(Thread* self, mirror::Class** element_class) {
   for (size_t i = 0; i < kFindArrayCacheSize; ++i) {
     // Read the cached array class once to avoid races with other threads setting it.
     mirror::Class* array_class = find_array_class_cache_[i];
-    if (array_class != nullptr && array_class->GetComponentType() == element_class) {
+    if (array_class != nullptr && array_class->GetComponentType() == *element_class) {
       return array_class;
     }
   }
-  DCHECK(!element_class->IsPrimitiveVoid());
-  std::string descriptor("[");
-  descriptor += element_class->GetDescriptor();
-  StackHandleScope<1> hs(Thread::Current());
-  Handle<mirror::ClassLoader> class_loader(hs.NewHandle(element_class->GetClassLoader()));
+  DCHECK(!(*element_class)->IsPrimitiveVoid());
+  std::string descriptor = "[" + (*element_class)->GetDescriptor();
+  StackHandleScope<2> hs(Thread::Current());
+  Handle<mirror::ClassLoader> class_loader(hs.NewHandle((*element_class)->GetClassLoader()));
+  HandleWrapper<mirror::Class> h_element_class(hs.NewHandleWrapper(element_class));
   mirror::Class* array_class = FindClass(self, descriptor.c_str(), class_loader);
   // Benign races in storing array class and incrementing index.
   size_t victim_index = find_array_class_cache_next_victim_;
@@ -110,31 +108,47 @@
   return resolved_type;
 }
 
-inline mirror::ArtMethod* ClassLinker::ResolveMethod(uint32_t method_idx,
-                                                     mirror::ArtMethod* referrer,
-                                                     InvokeType type) {
+inline mirror::ArtMethod* ClassLinker::GetResolvedMethod(uint32_t method_idx,
+                                                         mirror::ArtMethod* referrer,
+                                                         InvokeType type) {
   mirror::ArtMethod* resolved_method =
       referrer->GetDexCacheResolvedMethods()->Get(method_idx);
-  if (UNLIKELY(resolved_method == NULL || resolved_method->IsRuntimeMethod())) {
-    mirror::Class* declaring_class = referrer->GetDeclaringClass();
-    StackHandleScope<2> hs(Thread::Current());
-    Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
-    Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
-    const DexFile& dex_file = *dex_cache->GetDexFile();
-    resolved_method = ResolveMethod(dex_file, method_idx, dex_cache, class_loader, referrer, type);
-    if (resolved_method != nullptr) {
-      DCHECK_EQ(dex_cache->GetResolvedMethod(method_idx), resolved_method);
-    }
+  if (resolved_method == nullptr || resolved_method->IsRuntimeMethod()) {
+    return nullptr;
   }
   return resolved_method;
 }
 
-inline mirror::ArtField* ClassLinker::ResolveField(uint32_t field_idx,
-                                                   mirror::ArtMethod* referrer,
+inline mirror::ArtMethod* ClassLinker::ResolveMethod(Thread* self, uint32_t method_idx,
+                                                     mirror::ArtMethod** referrer,
+                                                     InvokeType type) {
+  mirror::ArtMethod* resolved_method = GetResolvedMethod(method_idx, *referrer, type);
+  if (LIKELY(resolved_method != nullptr)) {
+    return resolved_method;
+  }
+  mirror::Class* declaring_class = (*referrer)->GetDeclaringClass();
+  StackHandleScope<3> hs(self);
+  Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
+  Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
+  HandleWrapper<mirror::ArtMethod> h_referrer(hs.NewHandleWrapper(referrer));
+  const DexFile* dex_file = h_dex_cache->GetDexFile();
+  resolved_method = ResolveMethod(*dex_file, method_idx, h_dex_cache, h_class_loader, h_referrer,
+                                  type);
+  if (resolved_method != nullptr) {
+    DCHECK_EQ(h_dex_cache->GetResolvedMethod(method_idx), resolved_method);
+  }
+  return resolved_method;
+}
+
+inline mirror::ArtField* ClassLinker::GetResolvedField(uint32_t field_idx,
+                                                       mirror::Class* field_declaring_class) {
+  return field_declaring_class->GetDexCache()->GetResolvedField(field_idx);
+}
+
+inline mirror::ArtField* ClassLinker::ResolveField(uint32_t field_idx, mirror::ArtMethod* referrer,
                                                    bool is_static) {
   mirror::Class* declaring_class = referrer->GetDeclaringClass();
-  mirror::ArtField* resolved_field =
-      declaring_class->GetDexCache()->GetResolvedField(field_idx);
+  mirror::ArtField* resolved_field = GetResolvedField(field_idx, declaring_class);
   if (UNLIKELY(resolved_field == NULL)) {
     StackHandleScope<2> hs(Thread::Current());
     Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index c7302b5..b9c42ee 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -563,8 +563,7 @@
                                   const char* oat_cache_filename,
                                   std::string* error_msg) {
   Locks::mutator_lock_->AssertNotHeld(Thread::Current());  // Avoid starving GC.
-  std::string dex2oat(GetAndroidRoot());
-  dex2oat += (kIsDebugBuild ? "/bin/dex2oatd" : "/bin/dex2oat");
+  std::string dex2oat(Runtime::Current()->GetCompilerExecutable());
 
   gc::Heap* heap = Runtime::Current()->GetHeap();
   std::string boot_image_option("--boot-image=");
@@ -1369,7 +1368,7 @@
 }
 
 mirror::Class* ClassLinker::FindClass(Thread* self, const char* descriptor,
-                                      const Handle<mirror::ClassLoader>& class_loader) {
+                                      Handle<mirror::ClassLoader> class_loader) {
   DCHECK_NE(*descriptor, '\0') << "descriptor is empty string";
   DCHECK(self != nullptr);
   self->AssertNoPendingException();
@@ -1390,8 +1389,7 @@
     DexFile::ClassPathEntry pair = DexFile::FindInClassPath(descriptor, boot_class_path_);
     if (pair.second != NULL) {
       StackHandleScope<1> hs(self);
-      auto class_loader = hs.NewHandle<mirror::ClassLoader>(nullptr);
-      return DefineClass(descriptor, class_loader, *pair.first, *pair.second);
+      return DefineClass(descriptor, NullHandle<mirror::ClassLoader>(), *pair.first, *pair.second);
     }
   } else if (Runtime::Current()->UseCompileTimeClassPath()) {
     // First try the boot class path, we check the descriptor first to avoid an unnecessary
@@ -1452,7 +1450,7 @@
 }
 
 mirror::Class* ClassLinker::DefineClass(const char* descriptor,
-                                        const Handle<mirror::ClassLoader>& class_loader,
+                                        Handle<mirror::ClassLoader> class_loader,
                                         const DexFile& dex_file,
                                         const DexFile::ClassDef& dex_class_def) {
   Thread* self = Thread::Current();
@@ -1796,10 +1794,9 @@
   // Ignore virtual methods on the iterator.
 }
 
-void ClassLinker::LinkCode(const Handle<mirror::ArtMethod>& method,
-                           const OatFile::OatClass* oat_class,
+void ClassLinker::LinkCode(Handle<mirror::ArtMethod> method, const OatFile::OatClass* oat_class,
                            const DexFile& dex_file, uint32_t dex_method_index,
-                           uint32_t method_index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+                           uint32_t method_index) {
   // Method shouldn't have already been linked.
   DCHECK(method->GetEntryPointFromQuickCompiledCode() == nullptr);
   DCHECK(method->GetEntryPointFromPortableCompiledCode() == nullptr);
@@ -1871,7 +1868,7 @@
 
 void ClassLinker::LoadClass(const DexFile& dex_file,
                             const DexFile::ClassDef& dex_class_def,
-                            const Handle<mirror::Class>& klass,
+                            Handle<mirror::Class> klass,
                             mirror::ClassLoader* class_loader) {
   CHECK(klass.Get() != NULL);
   CHECK(klass->GetDexCache() != NULL);
@@ -1909,7 +1906,7 @@
 
 void ClassLinker::LoadClassMembers(const DexFile& dex_file,
                                    const byte* class_data,
-                                   const Handle<mirror::Class>& klass,
+                                   Handle<mirror::Class> klass,
                                    mirror::ClassLoader* class_loader,
                                    const OatFile::OatClass* oat_class) {
   // Load fields.
@@ -2007,8 +2004,7 @@
 }
 
 void ClassLinker::LoadField(const DexFile& /*dex_file*/, const ClassDataItemIterator& it,
-                            const Handle<mirror::Class>& klass,
-                            const Handle<mirror::ArtField>& dst) {
+                            Handle<mirror::Class> klass, Handle<mirror::ArtField> dst) {
   uint32_t field_idx = it.GetMemberIndex();
   dst->SetDexFieldIndex(field_idx);
   dst->SetDeclaringClass(klass.Get());
@@ -2017,7 +2013,7 @@
 
 mirror::ArtMethod* ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file,
                                            const ClassDataItemIterator& it,
-                                           const Handle<mirror::Class>& klass) {
+                                           Handle<mirror::Class> klass) {
   uint32_t dex_method_idx = it.GetMemberIndex();
   const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
   const char* method_name = dex_file.StringDataByIdx(method_id.name_idx_);
@@ -2088,7 +2084,7 @@
 }
 
 void ClassLinker::AppendToBootClassPath(const DexFile& dex_file,
-                                        const Handle<mirror::DexCache>& dex_cache) {
+                                        Handle<mirror::DexCache> dex_cache) {
   CHECK(dex_cache.Get() != NULL) << dex_file.GetLocation();
   boot_class_path_.push_back(&dex_file);
   RegisterDexFile(dex_file, dex_cache);
@@ -2110,7 +2106,7 @@
 }
 
 void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
-                                        const Handle<mirror::DexCache>& dex_cache) {
+                                        Handle<mirror::DexCache> dex_cache) {
   dex_lock_.AssertExclusiveHeld(Thread::Current());
   CHECK(dex_cache.Get() != NULL) << dex_file.GetLocation();
   CHECK(dex_cache->GetLocation()->Equals(dex_file.GetLocation()))
@@ -2147,7 +2143,7 @@
 }
 
 void ClassLinker::RegisterDexFile(const DexFile& dex_file,
-                                  const Handle<mirror::DexCache>& dex_cache) {
+                                  Handle<mirror::DexCache> dex_cache) {
   WriterMutexLock mu(Thread::Current(), dex_lock_);
   RegisterDexFileLocked(dex_file, dex_cache);
 }
@@ -2224,7 +2220,7 @@
 //
 // Returns NULL with an exception raised on failure.
 mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descriptor,
-                                             const Handle<mirror::ClassLoader>& class_loader) {
+                                             Handle<mirror::ClassLoader> class_loader) {
   // Identify the underlying component type
   CHECK_EQ('[', descriptor[0]);
   StackHandleScope<2> hs(self);
@@ -2416,7 +2412,7 @@
        it != end && it->first == hash;
        ++it) {
     mirror::Class* klass = it->second;
-    if (klass->GetClassLoader() == class_loader && descriptor == klass->GetDescriptor()) {
+    if (klass->GetClassLoader() == class_loader && klass->DescriptorEquals(descriptor)) {
       class_table_.erase(it);
       return true;
     }
@@ -2460,13 +2456,13 @@
   auto end = class_table_.end();
   for (auto it = class_table_.lower_bound(hash); it != end && it->first == hash; ++it) {
     mirror::Class* klass = it->second;
-    if (klass->GetClassLoader() == class_loader && descriptor == klass->GetDescriptor()) {
+    if (klass->GetClassLoader() == class_loader && klass->DescriptorEquals(descriptor)) {
       if (kIsDebugBuild) {
         // Check for duplicates in the table.
         for (++it; it != end && it->first == hash; ++it) {
           mirror::Class* klass2 = it->second;
-          CHECK(!((klass2->GetClassLoader() == class_loader) &&
-              descriptor == klass2->GetDescriptor()))
+          CHECK(!(klass2->GetClassLoader() == class_loader &&
+              klass2->DescriptorEquals(descriptor)))
               << PrettyClass(klass) << " " << klass << " " << klass->GetClassLoader() << " "
               << PrettyClass(klass2) << " " << klass2 << " " << klass2->GetClassLoader();
         }
@@ -2557,13 +2553,13 @@
   for (auto it = class_table_.lower_bound(hash), end = class_table_.end();
       it != end && it->first == hash; ++it) {
     mirror::Class* klass = it->second;
-    if (descriptor == klass->GetDescriptor()) {
+    if (klass->DescriptorEquals(descriptor)) {
       result.push_back(klass);
     }
   }
 }
 
-void ClassLinker::VerifyClass(const Handle<mirror::Class>& klass) {
+void ClassLinker::VerifyClass(Handle<mirror::Class> klass) {
   // TODO: assert that the monitor on the Class is held
   Thread* self = Thread::Current();
   ObjectLock<mirror::Class> lock(self, klass);
@@ -2776,7 +2772,7 @@
 }
 
 void ClassLinker::ResolveClassExceptionHandlerTypes(const DexFile& dex_file,
-                                                    const Handle<mirror::Class>& klass) {
+                                                    Handle<mirror::Class> klass) {
   for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
     ResolveMethodExceptionHandlerTypes(dex_file, klass->GetDirectMethod(i));
   }
@@ -2817,7 +2813,7 @@
 
 static void CheckProxyConstructor(mirror::ArtMethod* constructor);
 static void CheckProxyMethod(mirror::ArtMethod* method,
-                             Handle<mirror::ArtMethod>& prototype);
+                             Handle<mirror::ArtMethod> prototype);
 
 mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& soa, jstring name,
                                              jobjectArray interfaces, jobject loader,
@@ -2999,7 +2995,7 @@
 
 
 mirror::ArtMethod* ClassLinker::CreateProxyConstructor(Thread* self,
-                                                       const Handle<mirror::Class>& klass,
+                                                       Handle<mirror::Class> klass,
                                                        mirror::Class* proxy_class) {
   // Create constructor for Proxy that must initialize h
   mirror::ObjectArray<mirror::ArtMethod>* proxy_direct_methods =
@@ -3030,8 +3026,8 @@
 }
 
 mirror::ArtMethod* ClassLinker::CreateProxyMethod(Thread* self,
-                                                  const Handle<mirror::Class>& klass,
-                                                  const Handle<mirror::ArtMethod>& prototype) {
+                                                  Handle<mirror::Class> klass,
+                                                  Handle<mirror::ArtMethod> prototype) {
   // Ensure prototype is in dex cache so that we can use the dex cache to look up the overridden
   // prototype method
   prototype->GetDeclaringClass()->GetDexCache()->SetResolvedMethod(prototype->GetDexMethodIndex(),
@@ -3058,8 +3054,7 @@
   return method;
 }
 
-static void CheckProxyMethod(mirror::ArtMethod* method,
-                             Handle<mirror::ArtMethod>& prototype)
+static void CheckProxyMethod(mirror::ArtMethod* method, Handle<mirror::ArtMethod> prototype)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   // Basic sanity
   CHECK(!prototype->IsFinal());
@@ -3119,7 +3114,7 @@
   return init_done_;
 }
 
-bool ClassLinker::InitializeClass(const Handle<mirror::Class>& klass, bool can_init_statics,
+bool ClassLinker::InitializeClass(Handle<mirror::Class> klass, bool can_init_statics,
                                   bool can_init_parents) {
   // see JLS 3rd edition, 12.4.2 "Detailed Initialization Procedure" for the locking protocol
 
@@ -3286,7 +3281,7 @@
   return success;
 }
 
-bool ClassLinker::WaitForInitializeClass(const Handle<mirror::Class>& klass, Thread* self,
+bool ClassLinker::WaitForInitializeClass(Handle<mirror::Class> klass, Thread* self,
                                          ObjectLock<mirror::Class>& lock)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   while (true) {
@@ -3326,7 +3321,7 @@
   LOG(FATAL) << "Not Reached" << PrettyClass(klass.Get());
 }
 
-bool ClassLinker::ValidateSuperClassDescriptors(const Handle<mirror::Class>& klass) {
+bool ClassLinker::ValidateSuperClassDescriptors(Handle<mirror::Class> klass) {
   if (klass->IsInterface()) {
     return true;
   }
@@ -3368,18 +3363,12 @@
   return true;
 }
 
-bool ClassLinker::EnsureInitialized(const Handle<mirror::Class>& c, bool can_init_fields,
+bool ClassLinker::EnsureInitialized(Handle<mirror::Class> c, bool can_init_fields,
                                     bool can_init_parents) {
-  DCHECK(c.Get() != NULL);
-  if (c->IsInitialized()) {
-    return true;
-  }
-
-  bool success = InitializeClass(c, can_init_fields, can_init_parents);
-  if (!success) {
-    if (can_init_fields && can_init_parents) {
-      CHECK(Thread::Current()->IsExceptionPending()) << PrettyClass(c.Get());
-    }
+  DCHECK(c.Get() != nullptr);
+  const bool success = c->IsInitialized() || InitializeClass(c, can_init_fields, can_init_parents);
+  if (!success && can_init_fields && can_init_parents) {
+    CHECK(Thread::Current()->IsExceptionPending()) << PrettyClass(c.Get());
   }
   return success;
 }
@@ -3398,8 +3387,8 @@
   }
 }
 
-bool ClassLinker::LinkClass(Thread* self, const Handle<mirror::Class>& klass,
-                            const Handle<mirror::ObjectArray<mirror::Class>>& interfaces) {
+bool ClassLinker::LinkClass(Thread* self, Handle<mirror::Class> klass,
+                            Handle<mirror::ObjectArray<mirror::Class>> interfaces) {
   CHECK_EQ(mirror::Class::kStatusLoaded, klass->GetStatus());
   if (!LinkSuperClass(klass)) {
     return false;
@@ -3420,8 +3409,7 @@
   return true;
 }
 
-bool ClassLinker::LoadSuperAndInterfaces(const Handle<mirror::Class>& klass,
-                                         const DexFile& dex_file) {
+bool ClassLinker::LoadSuperAndInterfaces(Handle<mirror::Class> klass, const DexFile& dex_file) {
   CHECK_EQ(mirror::Class::kStatusIdx, klass->GetStatus());
   const DexFile::ClassDef& class_def = dex_file.GetClassDef(klass->GetDexClassDefIndex());
   uint16_t super_class_idx = class_def.superclass_idx_;
@@ -3464,7 +3452,7 @@
   return true;
 }
 
-bool ClassLinker::LinkSuperClass(const Handle<mirror::Class>& klass) {
+bool ClassLinker::LinkSuperClass(Handle<mirror::Class> klass) {
   CHECK(!klass->IsPrimitive());
   mirror::Class* super = klass->GetSuperClass();
   if (klass.Get() == GetClassRoot(kJavaLangObject)) {
@@ -3524,8 +3512,8 @@
 }
 
 // Populate the class vtable and itable. Compute return type indices.
-bool ClassLinker::LinkMethods(const Handle<mirror::Class>& klass,
-                              const Handle<mirror::ObjectArray<mirror::Class>>& interfaces) {
+bool ClassLinker::LinkMethods(Handle<mirror::Class> klass,
+                              Handle<mirror::ObjectArray<mirror::Class>> interfaces) {
   if (klass->IsInterface()) {
     // No vtable.
     size_t count = klass->NumVirtualMethods();
@@ -3545,7 +3533,7 @@
   return true;
 }
 
-bool ClassLinker::LinkVirtualMethods(const Handle<mirror::Class>& klass) {
+bool ClassLinker::LinkVirtualMethods(Handle<mirror::Class> klass) {
   Thread* self = Thread::Current();
   if (klass->HasSuperClass()) {
     uint32_t max_count = (klass->NumVirtualMethods() +
@@ -3632,9 +3620,8 @@
   return true;
 }
 
-bool ClassLinker::LinkInterfaceMethods(
-    const Handle<mirror::Class>& klass,
-    const Handle<mirror::ObjectArray<mirror::Class>>& interfaces) {
+bool ClassLinker::LinkInterfaceMethods(Handle<mirror::Class> klass,
+                                       Handle<mirror::ObjectArray<mirror::Class>> interfaces) {
   Thread* const self = Thread::Current();
   // Set the imt table to be all conflicts by default.
   klass->SetImTable(Runtime::Current()->GetDefaultImt());
@@ -3889,12 +3876,12 @@
   return true;
 }
 
-bool ClassLinker::LinkInstanceFields(const Handle<mirror::Class>& klass) {
+bool ClassLinker::LinkInstanceFields(Handle<mirror::Class> klass) {
   CHECK(klass.Get() != NULL);
   return LinkFields(klass, false);
 }
 
-bool ClassLinker::LinkStaticFields(const Handle<mirror::Class>& klass) {
+bool ClassLinker::LinkStaticFields(Handle<mirror::Class> klass) {
   CHECK(klass.Get() != NULL);
   size_t allocated_class_size = klass->GetClassSize();
   bool success = LinkFields(klass, true);
@@ -3933,7 +3920,7 @@
   }
 };
 
-bool ClassLinker::LinkFields(const Handle<mirror::Class>& klass, bool is_static) {
+bool ClassLinker::LinkFields(Handle<mirror::Class> klass, bool is_static) {
   size_t num_fields =
       is_static ? klass->NumStaticFields() : klass->NumInstanceFields();
 
@@ -4029,7 +4016,7 @@
   }
 
   // We lie to the GC about the java.lang.ref.Reference.referent field, so it doesn't scan it.
-  if (!is_static && "Ljava/lang/ref/Reference;" == klass->GetDescriptor()) {
+  if (!is_static && klass->DescriptorEquals("Ljava/lang/ref/Reference;")) {
     // We know there are no non-reference fields in the Reference classes, and we know
     // that 'referent' is alphabetically last, so this is easy...
     CHECK_EQ(num_reference_fields, num_fields) << PrettyClass(klass.Get());
@@ -4054,7 +4041,7 @@
       FieldHelper fh(field);
       Primitive::Type type = fh.GetTypeAsPrimitiveType();
       bool is_primitive = type != Primitive::kPrimNot;
-      if ("Ljava/lang/ref/Reference;" == klass->GetDescriptor() &&
+      if (klass->DescriptorEquals("Ljava/lang/ref/Reference;") &&
           strcmp("referent", fh.GetName()) == 0) {
         is_primitive = true;  // We lied above, so we have to expect a lie here.
       }
@@ -4093,7 +4080,7 @@
 
 //  Set the bitmap of reference offsets, refOffsets, from the ifields
 //  list.
-void ClassLinker::CreateReferenceInstanceOffsets(const Handle<mirror::Class>& klass) {
+void ClassLinker::CreateReferenceInstanceOffsets(Handle<mirror::Class> klass) {
   uint32_t reference_offsets = 0;
   mirror::Class* super_class = klass->GetSuperClass();
   if (super_class != NULL) {
@@ -4107,11 +4094,11 @@
   CreateReferenceOffsets(klass, false, reference_offsets);
 }
 
-void ClassLinker::CreateReferenceStaticOffsets(const Handle<mirror::Class>& klass) {
+void ClassLinker::CreateReferenceStaticOffsets(Handle<mirror::Class> klass) {
   CreateReferenceOffsets(klass, true, 0);
 }
 
-void ClassLinker::CreateReferenceOffsets(const Handle<mirror::Class>& klass, bool is_static,
+void ClassLinker::CreateReferenceOffsets(Handle<mirror::Class> klass, bool is_static,
                                          uint32_t reference_offsets) {
   size_t num_reference_fields =
       is_static ? klass->NumReferenceStaticFieldsDuringLinking()
@@ -4144,7 +4131,7 @@
 }
 
 mirror::String* ClassLinker::ResolveString(const DexFile& dex_file, uint32_t string_idx,
-                                           const Handle<mirror::DexCache>& dex_cache) {
+                                           Handle<mirror::DexCache> dex_cache) {
   DCHECK(dex_cache.Get() != nullptr);
   mirror::String* resolved = dex_cache->GetResolvedString(string_idx);
   if (resolved != NULL) {
@@ -4166,8 +4153,8 @@
 }
 
 mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, uint16_t type_idx,
-                                        const Handle<mirror::DexCache>& dex_cache,
-                                        const Handle<mirror::ClassLoader>& class_loader) {
+                                        Handle<mirror::DexCache> dex_cache,
+                                        Handle<mirror::ClassLoader> class_loader) {
   DCHECK(dex_cache.Get() != NULL);
   mirror::Class* resolved = dex_cache->GetResolvedType(type_idx);
   if (resolved == NULL) {
@@ -4198,16 +4185,15 @@
   return resolved;
 }
 
-mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file,
-                                              uint32_t method_idx,
-                                              const Handle<mirror::DexCache>& dex_cache,
-                                              const Handle<mirror::ClassLoader>& class_loader,
-                                              mirror::ArtMethod* referrer,
+mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t method_idx,
+                                              Handle<mirror::DexCache> dex_cache,
+                                              Handle<mirror::ClassLoader> class_loader,
+                                              Handle<mirror::ArtMethod> referrer,
                                               InvokeType type) {
   DCHECK(dex_cache.Get() != NULL);
   // Check for hit in the dex cache.
   mirror::ArtMethod* resolved = dex_cache->GetResolvedMethod(method_idx);
-  if (resolved != NULL && !resolved->IsRuntimeMethod()) {
+  if (resolved != nullptr && !resolved->IsRuntimeMethod()) {
     return resolved;
   }
   // Fail, get the declaring class.
@@ -4282,7 +4268,7 @@
     }
 
     // If we found something, check that it can be accessed by the referrer.
-    if (resolved != NULL && referrer != NULL) {
+    if (resolved != NULL && referrer.Get() != NULL) {
       mirror::Class* methods_class = resolved->GetDeclaringClass();
       mirror::Class* referring_class = referrer->GetDeclaringClass();
       if (!referring_class->CanAccess(methods_class)) {
@@ -4302,11 +4288,11 @@
       case kDirect:
       case kStatic:
         if (resolved != NULL) {
-          ThrowIncompatibleClassChangeError(type, kVirtual, resolved, referrer);
+          ThrowIncompatibleClassChangeError(type, kVirtual, resolved, referrer.Get());
         } else {
           resolved = klass->FindInterfaceMethod(name, signature);
           if (resolved != NULL) {
-            ThrowIncompatibleClassChangeError(type, kInterface, resolved, referrer);
+            ThrowIncompatibleClassChangeError(type, kInterface, resolved, referrer.Get());
           } else {
             ThrowNoSuchMethodError(type, klass, name, signature);
           }
@@ -4314,11 +4300,11 @@
         break;
       case kInterface:
         if (resolved != NULL) {
-          ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer);
+          ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer.Get());
         } else {
           resolved = klass->FindVirtualMethod(name, signature);
           if (resolved != NULL) {
-            ThrowIncompatibleClassChangeError(type, kVirtual, resolved, referrer);
+            ThrowIncompatibleClassChangeError(type, kVirtual, resolved, referrer.Get());
           } else {
             ThrowNoSuchMethodError(type, klass, name, signature);
           }
@@ -4329,11 +4315,11 @@
         break;
       case kVirtual:
         if (resolved != NULL) {
-          ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer);
+          ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer.Get());
         } else {
           resolved = klass->FindInterfaceMethod(name, signature);
           if (resolved != NULL) {
-            ThrowIncompatibleClassChangeError(type, kInterface, resolved, referrer);
+            ThrowIncompatibleClassChangeError(type, kInterface, resolved, referrer.Get());
           } else {
             ThrowNoSuchMethodError(type, klass, name, signature);
           }
@@ -4346,8 +4332,8 @@
 }
 
 mirror::ArtField* ClassLinker::ResolveField(const DexFile& dex_file, uint32_t field_idx,
-                                            const Handle<mirror::DexCache>& dex_cache,
-                                            const Handle<mirror::ClassLoader>& class_loader,
+                                            Handle<mirror::DexCache> dex_cache,
+                                            Handle<mirror::ClassLoader> class_loader,
                                             bool is_static) {
   DCHECK(dex_cache.Get() != nullptr);
   mirror::ArtField* resolved = dex_cache->GetResolvedField(field_idx);
@@ -4389,8 +4375,8 @@
 
 mirror::ArtField* ClassLinker::ResolveFieldJLS(const DexFile& dex_file,
                                                uint32_t field_idx,
-                                               const Handle<mirror::DexCache>& dex_cache,
-                                               const Handle<mirror::ClassLoader>& class_loader) {
+                                               Handle<mirror::DexCache> dex_cache,
+                                               Handle<mirror::ClassLoader> class_loader) {
   DCHECK(dex_cache.Get() != nullptr);
   mirror::ArtField* resolved = dex_cache->GetResolvedField(field_idx);
   if (resolved != NULL) {
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 54805be..ccf0558 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -75,7 +75,7 @@
   // Finds a class by its descriptor, loading it if necessary.
   // If class_loader is null, searches boot_class_path_.
   mirror::Class* FindClass(Thread* self, const char* descriptor,
-                           const Handle<mirror::ClassLoader>& class_loader)
+                           Handle<mirror::ClassLoader> class_loader)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Finds a class by its descriptor using the "system" class loader, ie by searching the
@@ -84,7 +84,7 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Finds the array class given for the element class.
-  mirror::Class* FindArrayClass(Thread* self, mirror::Class* element_class)
+  mirror::Class* FindArrayClass(Thread* self, mirror::Class** element_class)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Reutrns true if the class linker is initialized.
@@ -92,7 +92,7 @@
 
   // Define a new a class based on a ClassDef from a DexFile
   mirror::Class* DefineClass(const char* descriptor,
-                             const Handle<mirror::ClassLoader>& class_loader,
+                             Handle<mirror::ClassLoader> class_loader,
                              const DexFile& dex_file, const DexFile::ClassDef& dex_class_def)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -136,7 +136,7 @@
   // Resolve a String with the given index from the DexFile, storing the
   // result in the DexCache.
   mirror::String* ResolveString(const DexFile& dex_file, uint32_t string_idx,
-                                const Handle<mirror::DexCache>& dex_cache)
+                                Handle<mirror::DexCache> dex_cache)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Resolve a Type with the given index from the DexFile, storing the
@@ -159,8 +159,8 @@
   // type, since it may be referenced from but not contained within
   // the given DexFile.
   mirror::Class* ResolveType(const DexFile& dex_file, uint16_t type_idx,
-                             const Handle<mirror::DexCache>& dex_cache,
-                             const Handle<mirror::ClassLoader>& class_loader)
+                             Handle<mirror::DexCache> dex_cache,
+                             Handle<mirror::ClassLoader> class_loader)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Resolve a method with a given ID from the DexFile, storing the
@@ -170,16 +170,21 @@
   // virtual method.
   mirror::ArtMethod* ResolveMethod(const DexFile& dex_file,
                                    uint32_t method_idx,
-                                   const Handle<mirror::DexCache>& dex_cache,
-                                   const Handle<mirror::ClassLoader>& class_loader,
-                                   mirror::ArtMethod* referrer,
+                                   Handle<mirror::DexCache> dex_cache,
+                                   Handle<mirror::ClassLoader> class_loader,
+                                   Handle<mirror::ArtMethod> referrer,
                                    InvokeType type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  mirror::ArtMethod* ResolveMethod(uint32_t method_idx, mirror::ArtMethod* referrer,
+  mirror::ArtMethod* GetResolvedMethod(uint32_t method_idx, mirror::ArtMethod* referrer,
+                                       InvokeType type)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::ArtMethod* ResolveMethod(Thread* self, uint32_t method_idx, mirror::ArtMethod** referrer,
                                    InvokeType type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  mirror::ArtField* GetResolvedField(uint32_t field_idx, mirror::Class* field_declaring_class)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   mirror::ArtField* ResolveField(uint32_t field_idx, mirror::ArtMethod* referrer,
                                  bool is_static)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -191,8 +196,8 @@
   // field.
   mirror::ArtField* ResolveField(const DexFile& dex_file,
                                  uint32_t field_idx,
-                                 const Handle<mirror::DexCache>& dex_cache,
-                                 const Handle<mirror::ClassLoader>& class_loader,
+                                 Handle<mirror::DexCache> dex_cache,
+                                 Handle<mirror::ClassLoader> class_loader,
                                  bool is_static)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -200,10 +205,9 @@
   // result in DexCache. The ClassLinker and ClassLoader are used as
   // in ResolveType. No is_static argument is provided so that Java
   // field resolution semantics are followed.
-  mirror::ArtField* ResolveFieldJLS(const DexFile& dex_file,
-                                    uint32_t field_idx,
-                                    const Handle<mirror::DexCache>& dex_cache,
-                                    const Handle<mirror::ClassLoader>& class_loader)
+  mirror::ArtField* ResolveFieldJLS(const DexFile& dex_file, uint32_t field_idx,
+                                    Handle<mirror::DexCache> dex_cache,
+                                    Handle<mirror::ClassLoader> class_loader)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Get shorty from method index without resolution. Used to do handlerization.
@@ -213,8 +217,7 @@
   // Returns true on success, false if there's an exception pending.
   // can_run_clinit=false allows the compiler to attempt to init a class,
   // given the restriction that no <clinit> execution is possible.
-  bool EnsureInitialized(const Handle<mirror::Class>& c,
-                         bool can_init_fields, bool can_init_parents)
+  bool EnsureInitialized(Handle<mirror::Class> c, bool can_init_fields, bool can_init_parents)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Initializes classes that have instances in the image but that have
@@ -224,7 +227,7 @@
   void RegisterDexFile(const DexFile& dex_file)
       LOCKS_EXCLUDED(dex_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void RegisterDexFile(const DexFile& dex_file, const Handle<mirror::DexCache>& dex_cache)
+  void RegisterDexFile(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
       LOCKS_EXCLUDED(dex_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -316,12 +319,12 @@
                                                                               size_t length)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void VerifyClass(const Handle<mirror::Class>& klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void VerifyClass(Handle<mirror::Class> klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class* klass,
                                mirror::Class::Status& oat_file_class_status)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void ResolveClassExceptionHandlerTypes(const DexFile& dex_file,
-                                         const Handle<mirror::Class>& klass)
+                                         Handle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, mirror::ArtMethod* klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -420,12 +423,12 @@
 
 
   mirror::Class* CreateArrayClass(Thread* self, const char* descriptor,
-                                  const Handle<mirror::ClassLoader>& class_loader)
+                                  Handle<mirror::ClassLoader> class_loader)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void AppendToBootClassPath(const DexFile& dex_file)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void AppendToBootClassPath(const DexFile& dex_file, const Handle<mirror::DexCache>& dex_cache)
+  void AppendToBootClassPath(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void ConstructFieldMap(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def,
@@ -437,23 +440,23 @@
 
   void LoadClass(const DexFile& dex_file,
                  const DexFile::ClassDef& dex_class_def,
-                 const Handle<mirror::Class>& klass,
+                 Handle<mirror::Class> klass,
                  mirror::ClassLoader* class_loader)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void LoadClassMembers(const DexFile& dex_file,
                         const byte* class_data,
-                        const Handle<mirror::Class>& klass,
+                        Handle<mirror::Class> klass,
                         mirror::ClassLoader* class_loader,
                         const OatFile::OatClass* oat_class)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void LoadField(const DexFile& dex_file, const ClassDataItemIterator& it,
-                 const Handle<mirror::Class>& klass, const Handle<mirror::ArtField>& dst)
+                 Handle<mirror::Class> klass, Handle<mirror::ArtField> dst)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   mirror::ArtMethod* LoadMethod(Thread* self, const DexFile& dex_file,
                                 const ClassDataItemIterator& dex_method,
-                                const Handle<mirror::Class>& klass)
+                                Handle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void FixupStaticTrampolines(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -462,23 +465,23 @@
   OatFile::OatClass GetOatClass(const DexFile& dex_file, uint16_t class_def_idx)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void RegisterDexFileLocked(const DexFile& dex_file, const Handle<mirror::DexCache>& dex_cache)
+  void RegisterDexFileLocked(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
       EXCLUSIVE_LOCKS_REQUIRED(dex_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool IsDexFileRegisteredLocked(const DexFile& dex_file) const
       SHARED_LOCKS_REQUIRED(dex_lock_, Locks::mutator_lock_);
 
-  bool InitializeClass(const Handle<mirror::Class>& klass, bool can_run_clinit,
+  bool InitializeClass(Handle<mirror::Class> klass, bool can_run_clinit,
                        bool can_init_parents)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool WaitForInitializeClass(const Handle<mirror::Class>& klass, Thread* self,
+  bool WaitForInitializeClass(Handle<mirror::Class> klass, Thread* self,
                               ObjectLock<mirror::Class>& lock);
-  bool ValidateSuperClassDescriptors(const Handle<mirror::Class>& klass)
+  bool ValidateSuperClassDescriptors(Handle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   bool IsSameDescriptorInDifferentClassContexts(Thread* self, const char* descriptor,
-                                                Handle<mirror::ClassLoader>& class_loader1,
-                                                Handle<mirror::ClassLoader>& class_loader2)
+                                                Handle<mirror::ClassLoader> class_loader1,
+                                                Handle<mirror::ClassLoader> class_loader2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   bool IsSameMethodSignatureInDifferentClassContexts(Thread* self, mirror::ArtMethod* method,
@@ -486,43 +489,43 @@
                                                      mirror::Class* klass2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool LinkClass(Thread* self, const Handle<mirror::Class>& klass,
-                 const Handle<mirror::ObjectArray<mirror::Class>>& interfaces)
+  bool LinkClass(Thread* self, Handle<mirror::Class> klass,
+                 Handle<mirror::ObjectArray<mirror::Class>> interfaces)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool LinkSuperClass(const Handle<mirror::Class>& klass)
+  bool LinkSuperClass(Handle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool LoadSuperAndInterfaces(const Handle<mirror::Class>& klass, const DexFile& dex_file)
+  bool LoadSuperAndInterfaces(Handle<mirror::Class> klass, const DexFile& dex_file)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool LinkMethods(const Handle<mirror::Class>& klass,
-                   const Handle<mirror::ObjectArray<mirror::Class>>& interfaces)
+  bool LinkMethods(Handle<mirror::Class> klass,
+                   Handle<mirror::ObjectArray<mirror::Class>> interfaces)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool LinkVirtualMethods(const Handle<mirror::Class>& klass)
+  bool LinkVirtualMethods(Handle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool LinkInterfaceMethods(const Handle<mirror::Class>& klass,
-                            const Handle<mirror::ObjectArray<mirror::Class>>& interfaces)
+  bool LinkInterfaceMethods(Handle<mirror::Class> klass,
+                            Handle<mirror::ObjectArray<mirror::Class>> interfaces)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool LinkStaticFields(const Handle<mirror::Class>& klass)
+  bool LinkStaticFields(Handle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool LinkInstanceFields(const Handle<mirror::Class>& klass)
+  bool LinkInstanceFields(Handle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool LinkFields(const Handle<mirror::Class>& klass, bool is_static)
+  bool LinkFields(Handle<mirror::Class> klass, bool is_static)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void LinkCode(const Handle<mirror::ArtMethod>& method, const OatFile::OatClass* oat_class,
+  void LinkCode(Handle<mirror::ArtMethod> method, const OatFile::OatClass* oat_class,
                 const DexFile& dex_file, uint32_t dex_method_index, uint32_t method_index)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
 
-  void CreateReferenceInstanceOffsets(const Handle<mirror::Class>& klass)
+  void CreateReferenceInstanceOffsets(Handle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void CreateReferenceStaticOffsets(const Handle<mirror::Class>& klass)
+  void CreateReferenceStaticOffsets(Handle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void CreateReferenceOffsets(const Handle<mirror::Class>& klass, bool is_static,
+  void CreateReferenceOffsets(Handle<mirror::Class> klass, bool is_static,
                               uint32_t reference_offsets)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -551,11 +554,11 @@
                                                  bool* open_failed)
       LOCKS_EXCLUDED(dex_lock_);
 
-  mirror::ArtMethod* CreateProxyConstructor(Thread* self, const Handle<mirror::Class>& klass,
+  mirror::ArtMethod* CreateProxyConstructor(Thread* self, Handle<mirror::Class> klass,
                                             mirror::Class* proxy_class)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  mirror::ArtMethod* CreateProxyMethod(Thread* self, const Handle<mirror::Class>& klass,
-                                       const Handle<mirror::ArtMethod>& prototype)
+  mirror::ArtMethod* CreateProxyMethod(Thread* self, Handle<mirror::Class> klass,
+                                       Handle<mirror::ArtMethod> prototype)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   std::vector<const DexFile*> boot_class_path_;
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index d04f02b..e397a5c 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -108,7 +108,7 @@
     AssertArrayClass(array_descriptor, array);
   }
 
-  void AssertArrayClass(const std::string& array_descriptor, const Handle<mirror::Class>& array)
+  void AssertArrayClass(const std::string& array_descriptor, Handle<mirror::Class> array)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     ASSERT_TRUE(array.Get() != NULL);
     ASSERT_TRUE(array->GetClass() != NULL);
@@ -147,7 +147,8 @@
     EXPECT_STREQ(direct_interface0->GetDescriptor().c_str(), "Ljava/lang/Cloneable;");
     mirror::Class* direct_interface1 = mirror::Class::GetDirectInterface(self, array, 1);
     EXPECT_STREQ(direct_interface1->GetDescriptor().c_str(), "Ljava/io/Serializable;");
-    EXPECT_EQ(class_linker_->FindArrayClass(self, array->GetComponentType()), array.Get());
+    mirror::Class* array_ptr = array->GetComponentType();
+    EXPECT_EQ(class_linker_->FindArrayClass(self, &array_ptr), array.Get());
   }
 
   void AssertMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -178,7 +179,7 @@
     EXPECT_TRUE(fh.GetType() != NULL);
   }
 
-  void AssertClass(const std::string& descriptor, const Handle<mirror::Class>& klass)
+  void AssertClass(const std::string& descriptor, Handle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     EXPECT_STREQ(descriptor.c_str(), klass->GetDescriptor().c_str());
     if (descriptor == "Ljava/lang/Object;") {
@@ -846,8 +847,7 @@
   // Validate that the "value" field is always the 0th field in each of java.lang's box classes.
   // This lets UnboxPrimitive avoid searching for the field by name at runtime.
   ScopedObjectAccess soa(Thread::Current());
-  StackHandleScope<1> hs(soa.Self());
-  auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
+  NullHandle<mirror::ClassLoader> class_loader;
   mirror::Class* c;
   c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Boolean;", class_loader);
   FieldHelper fh(c->GetIFields()->Get(0));
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 7136c67..984f287 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -2881,7 +2881,7 @@
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(mh.GetClassLoader()));
   verifier::MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader,
                                     &mh.GetClassDef(), code_item, m->GetDexMethodIndex(), m,
-                                    m->GetAccessFlags(), false, true);
+                                    m->GetAccessFlags(), false, true, false);
   // Note: we don't need to verify the method.
   return InlineMethodAnalyser::AnalyseMethodCode(&verifier, nullptr);
 }
@@ -4039,7 +4039,11 @@
   // Send a series of heap segment chunks.
   HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native);
   if (native) {
+#ifdef USE_DLMALLOC
     dlmalloc_inspect_all(HeapChunkContext::HeapChunkCallback, &context);
+#else
+    UNIMPLEMENTED(WARNING) << "Native heap inspection is only supported with dlmalloc";
+#endif
   } else {
     gc::Heap* heap = Runtime::Current()->GetHeap();
     const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces();
diff --git a/runtime/deoptimize_stack_visitor.cc b/runtime/deoptimize_stack_visitor.cc
deleted file mode 100644
index c7fbc87..0000000
--- a/runtime/deoptimize_stack_visitor.cc
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "deoptimize_stack_visitor.h"
-
-#include "mirror/art_method-inl.h"
-#include "object_utils.h"
-#include "quick_exception_handler.h"
-#include "handle_scope-inl.h"
-#include "verifier/method_verifier.h"
-
-namespace art {
-
-bool DeoptimizeStackVisitor::VisitFrame() {
-  exception_handler_->SetHandlerFrameId(GetFrameId());
-  mirror::ArtMethod* method = GetMethod();
-  if (method == nullptr) {
-    // This is the upcall, we remember the frame and last pc so that we may long jump to them.
-    exception_handler_->SetHandlerQuickFramePc(GetCurrentQuickFramePc());
-    exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
-    return false;  // End stack walk.
-  } else if (method->IsRuntimeMethod()) {
-    // Ignore callee save method.
-    DCHECK(method->IsCalleeSaveMethod());
-    return true;
-  } else {
-    return HandleDeoptimization(method);
-  }
-}
-
-bool DeoptimizeStackVisitor::HandleDeoptimization(mirror::ArtMethod* m) {
-  MethodHelper mh(m);
-  const DexFile::CodeItem* code_item = mh.GetCodeItem();
-  CHECK(code_item != nullptr);
-  uint16_t num_regs = code_item->registers_size_;
-  uint32_t dex_pc = GetDexPc();
-  const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc);
-  uint32_t new_dex_pc = dex_pc + inst->SizeInCodeUnits();
-  ShadowFrame* new_frame = ShadowFrame::Create(num_regs, nullptr, m, new_dex_pc);
-  StackHandleScope<2> hs(self_);
-  Handle<mirror::DexCache> dex_cache(hs.NewHandle(mh.GetDexCache()));
-  Handle<mirror::ClassLoader> class_loader(hs.NewHandle(mh.GetClassLoader()));
-  verifier::MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader,
-                                    &mh.GetClassDef(), code_item, m->GetDexMethodIndex(), m,
-                                    m->GetAccessFlags(), false, true);
-  verifier.Verify();
-  std::vector<int32_t> kinds = verifier.DescribeVRegs(dex_pc);
-  for (uint16_t reg = 0; reg < num_regs; ++reg) {
-    VRegKind kind = static_cast<VRegKind>(kinds.at(reg * 2));
-    switch (kind) {
-      case kUndefined:
-        new_frame->SetVReg(reg, 0xEBADDE09);
-        break;
-      case kConstant:
-        new_frame->SetVReg(reg, kinds.at((reg * 2) + 1));
-        break;
-      case kReferenceVReg:
-        new_frame->SetVRegReference(reg,
-                                    reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kind)));
-        break;
-      default:
-        new_frame->SetVReg(reg, GetVReg(m, reg, kind));
-        break;
-    }
-  }
-  if (prev_shadow_frame_ != nullptr) {
-    prev_shadow_frame_->SetLink(new_frame);
-  } else {
-    self_->SetDeoptimizationShadowFrame(new_frame);
-  }
-  prev_shadow_frame_ = new_frame;
-  return true;
-}
-
-}  // namespace art
diff --git a/runtime/deoptimize_stack_visitor.h b/runtime/deoptimize_stack_visitor.h
deleted file mode 100644
index c41b803..0000000
--- a/runtime/deoptimize_stack_visitor.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_DEOPTIMIZE_STACK_VISITOR_H_
-#define ART_RUNTIME_DEOPTIMIZE_STACK_VISITOR_H_
-
-#include "base/mutex.h"
-#include "stack.h"
-#include "thread.h"
-
-namespace art {
-
-namespace mirror {
-class ArtMethod;
-}  // namespace mirror
-class QuickExceptionHandler;
-class Thread;
-
-// Prepares deoptimization.
-class DeoptimizeStackVisitor FINAL : public StackVisitor {
- public:
-  DeoptimizeStackVisitor(Thread* self, Context* context, QuickExceptionHandler* exception_handler)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : StackVisitor(self, context), self_(self), exception_handler_(exception_handler),
-        prev_shadow_frame_(nullptr) {
-    CHECK(!self_->HasDeoptimizationShadowFrame());
-  }
-
-  bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- private:
-  bool HandleDeoptimization(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  Thread* const self_;
-  QuickExceptionHandler* const exception_handler_;
-  ShadowFrame* prev_shadow_frame_;
-
-  DISALLOW_COPY_AND_ASSIGN(DeoptimizeStackVisitor);
-};
-
-}  // namespace art
-#endif  // ART_RUNTIME_DEOPTIMIZE_STACK_VISITOR_H_
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 43ae308..3ff55ab 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -245,7 +245,7 @@
   if (zip_entry.get() == NULL) {
     return nullptr;
   }
-  std::unique_ptr<MemMap> map(zip_entry->ExtractToMemMap(kClassesDex, error_msg));
+  std::unique_ptr<MemMap> map(zip_entry->ExtractToMemMap(location.c_str(), kClassesDex, error_msg));
   if (map.get() == NULL) {
     *error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", kClassesDex, location.c_str(),
                               error_msg->c_str());
@@ -258,15 +258,15 @@
                               error_msg->c_str());
     return nullptr;
   }
-  if (!DexFileVerifier::Verify(dex_file.get(), dex_file->Begin(), dex_file->Size(),
-                               location.c_str(), error_msg)) {
-    return nullptr;
-  }
   if (!dex_file->DisableWrite()) {
     *error_msg = StringPrintf("Failed to make dex file '%s' read only", location.c_str());
     return nullptr;
   }
   CHECK(dex_file->IsReadOnly()) << location;
+  if (!DexFileVerifier::Verify(dex_file.get(), dex_file->Begin(), dex_file->Size(),
+                               location.c_str(), error_msg)) {
+    return nullptr;
+  }
   return dex_file.release();
 }
 
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index a1c8c71..17d1ffc 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -698,6 +698,7 @@
   const byte* file_end = begin_ + size_;
 
   for (uint32_t i = 0; i < size; i++) {
+    CHECK_LT(i, size);  // b/15014252 Prevents hitting the impossible case below
     if (UNLIKELY(ptr_ >= file_end)) {
       ErrorStringPrintf("String data would go beyond end-of-file");
       return false;
@@ -710,6 +711,7 @@
       case 0x00:
         // Special case of bit pattern 0xxx.
         if (UNLIKELY(byte == 0)) {
+          CHECK_LT(i, size);  // b/15014252 Actually hit this impossible case with clang
           ErrorStringPrintf("String data shorter than indicated utf16_size %x", size);
           return false;
         }
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index 560e5ff..1ff5c19 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -118,13 +118,30 @@
   };
 
   enum Flags {
-    kBranch   = 0x01,  // conditional or unconditional branch
-    kContinue = 0x02,  // flow can continue to next statement
-    kSwitch   = 0x04,  // switch statement
-    kThrow    = 0x08,  // could cause an exception to be thrown
-    kReturn   = 0x10,  // returns, no additional statements
-    kInvoke   = 0x20,  // a flavor of invoke
-    kUnconditional = 0x40,  // unconditional branch
+    kBranch              = 0x000001,  // conditional or unconditional branch
+    kContinue            = 0x000002,  // flow can continue to next statement
+    kSwitch              = 0x000004,  // switch statement
+    kThrow               = 0x000008,  // could cause an exception to be thrown
+    kReturn              = 0x000010,  // returns, no additional statements
+    kInvoke              = 0x000020,  // a flavor of invoke
+    kUnconditional       = 0x000040,  // unconditional branch
+    kAdd                 = 0x000080,  // addition
+    kSubtract            = 0x000100,  // subtract
+    kMultiply            = 0x000200,  // multiply
+    kDivide              = 0x000400,  // division
+    kRemainder           = 0x000800,  // remainder
+    kAnd                 = 0x001000,  // and
+    kOr                  = 0x002000,  // or
+    kXor                 = 0x004000,  // xor
+    kShl                 = 0x008000,  // shl
+    kShr                 = 0x010000,  // shr
+    kUshr                = 0x020000,  // ushr
+    kCast                = 0x040000,  // cast
+    kStore               = 0x080000,  // store opcode
+    kLoad                = 0x100000,  // load opcode
+    kClobber             = 0x200000,  // clobbers memory in a big way (not just a write)
+    kRegCFieldOrConstant = 0x400000,  // is the third virtual register a field or literal constant (vC)
+    kRegBFieldOrConstant = 0x800000,  // is the second virtual register a field or literal constant (vB)
   };
 
   enum VerifyFlag {
diff --git a/runtime/dex_instruction_list.h b/runtime/dex_instruction_list.h
index c2cd65a..f43e42f 100644
--- a/runtime/dex_instruction_list.h
+++ b/runtime/dex_instruction_list.h
@@ -36,27 +36,27 @@
   V(0x0F, RETURN, "return", k11x, false, kNone, kReturn, kVerifyRegA) \
   V(0x10, RETURN_WIDE, "return-wide", k11x, false, kNone, kReturn, kVerifyRegAWide) \
   V(0x11, RETURN_OBJECT, "return-object", k11x, false, kNone, kReturn, kVerifyRegA) \
-  V(0x12, CONST_4, "const/4", k11n, true, kNone, kContinue, kVerifyRegA) \
-  V(0x13, CONST_16, "const/16", k21s, true, kNone, kContinue, kVerifyRegA) \
-  V(0x14, CONST, "const", k31i, true, kNone, kContinue, kVerifyRegA) \
-  V(0x15, CONST_HIGH16, "const/high16", k21h, true, kNone, kContinue, kVerifyRegA) \
-  V(0x16, CONST_WIDE_16, "const-wide/16", k21s, true, kNone, kContinue, kVerifyRegAWide) \
-  V(0x17, CONST_WIDE_32, "const-wide/32", k31i, true, kNone, kContinue, kVerifyRegAWide) \
-  V(0x18, CONST_WIDE, "const-wide", k51l, true, kNone, kContinue, kVerifyRegAWide) \
-  V(0x19, CONST_WIDE_HIGH16, "const-wide/high16", k21h, true, kNone, kContinue, kVerifyRegAWide) \
+  V(0x12, CONST_4, "const/4", k11n, true, kNone, kContinue | kRegBFieldOrConstant, kVerifyRegA) \
+  V(0x13, CONST_16, "const/16", k21s, true, kNone, kContinue | kRegBFieldOrConstant, kVerifyRegA) \
+  V(0x14, CONST, "const", k31i, true, kNone, kContinue | kRegBFieldOrConstant, kVerifyRegA) \
+  V(0x15, CONST_HIGH16, "const/high16", k21h, true, kNone, kContinue | kRegBFieldOrConstant, kVerifyRegA) \
+  V(0x16, CONST_WIDE_16, "const-wide/16", k21s, true, kNone, kContinue | kRegBFieldOrConstant, kVerifyRegAWide) \
+  V(0x17, CONST_WIDE_32, "const-wide/32", k31i, true, kNone, kContinue | kRegBFieldOrConstant, kVerifyRegAWide) \
+  V(0x18, CONST_WIDE, "const-wide", k51l, true, kNone, kContinue | kRegBFieldOrConstant, kVerifyRegAWide) \
+  V(0x19, CONST_WIDE_HIGH16, "const-wide/high16", k21h, true, kNone, kContinue | kRegBFieldOrConstant, kVerifyRegAWide) \
   V(0x1A, CONST_STRING, "const-string", k21c, true, kStringRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBString) \
   V(0x1B, CONST_STRING_JUMBO, "const-string/jumbo", k31c, true, kStringRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBString) \
   V(0x1C, CONST_CLASS, "const-class", k21c, true, kTypeRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBType) \
-  V(0x1D, MONITOR_ENTER, "monitor-enter", k11x, false, kNone, kContinue | kThrow, kVerifyRegA) \
-  V(0x1E, MONITOR_EXIT, "monitor-exit", k11x, false, kNone, kContinue | kThrow, kVerifyRegA) \
+  V(0x1D, MONITOR_ENTER, "monitor-enter", k11x, false, kNone, kContinue | kThrow | kClobber, kVerifyRegA) \
+  V(0x1E, MONITOR_EXIT, "monitor-exit", k11x, false, kNone, kContinue | kThrow | kClobber, kVerifyRegA) \
   V(0x1F, CHECK_CAST, "check-cast", k21c, true, kTypeRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBType) \
   V(0x20, INSTANCE_OF, "instance-of", k22c, true, kTypeRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegCType) \
   V(0x21, ARRAY_LENGTH, "array-length", k12x, true, kNone, kContinue | kThrow, kVerifyRegA | kVerifyRegB) \
-  V(0x22, NEW_INSTANCE, "new-instance", k21c, true, kTypeRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBNewInstance) \
-  V(0x23, NEW_ARRAY, "new-array", k22c, true, kTypeRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegCNewArray) \
-  V(0x24, FILLED_NEW_ARRAY, "filled-new-array", k35c, false, kTypeRef, kContinue | kThrow, kVerifyRegBType | kVerifyVarArg) \
-  V(0x25, FILLED_NEW_ARRAY_RANGE, "filled-new-array/range", k3rc, false, kTypeRef, kContinue | kThrow, kVerifyRegBType | kVerifyVarArgRange) \
-  V(0x26, FILL_ARRAY_DATA, "fill-array-data", k31t, false, kNone, kContinue | kThrow, kVerifyRegA | kVerifyArrayData) \
+  V(0x22, NEW_INSTANCE, "new-instance", k21c, true, kTypeRef, kContinue | kThrow | kClobber, kVerifyRegA | kVerifyRegBNewInstance) \
+  V(0x23, NEW_ARRAY, "new-array", k22c, true, kTypeRef, kContinue | kThrow | kClobber, kVerifyRegA | kVerifyRegB | kVerifyRegCNewArray) \
+  V(0x24, FILLED_NEW_ARRAY, "filled-new-array", k35c, false, kTypeRef, kContinue | kThrow | kClobber, kVerifyRegBType | kVerifyVarArg) \
+  V(0x25, FILLED_NEW_ARRAY_RANGE, "filled-new-array/range", k3rc, false, kTypeRef, kContinue | kThrow | kClobber, kVerifyRegBType | kVerifyVarArgRange) \
+  V(0x26, FILL_ARRAY_DATA, "fill-array-data", k31t, false, kNone, kContinue | kThrow | kClobber, kVerifyRegA | kVerifyArrayData) \
   V(0x27, THROW, "throw", k11x, false, kNone, kThrow, kVerifyRegA) \
   V(0x28, GOTO, "goto", k10t, false, kNone, kBranch | kUnconditional, kVerifyBranchTarget) \
   V(0x29, GOTO_16, "goto/16", k20t, false, kNone, kBranch | kUnconditional, kVerifyBranchTarget) \
@@ -86,48 +86,48 @@
   V(0x41, UNUSED_41, "unused-41", k10x, false, kUnknown, 0, kVerifyError) \
   V(0x42, UNUSED_42, "unused-42", k10x, false, kUnknown, 0, kVerifyError) \
   V(0x43, UNUSED_43, "unused-43", k10x, false, kUnknown, 0, kVerifyError) \
-  V(0x44, AGET, "aget", k23x, true, kNone, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0x45, AGET_WIDE, "aget-wide", k23x, true, kNone, kContinue | kThrow, kVerifyRegAWide | kVerifyRegB | kVerifyRegC) \
-  V(0x46, AGET_OBJECT, "aget-object", k23x, true, kNone, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0x47, AGET_BOOLEAN, "aget-boolean", k23x, true, kNone, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0x48, AGET_BYTE, "aget-byte", k23x, true, kNone, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0x49, AGET_CHAR, "aget-char", k23x, true, kNone, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0x4A, AGET_SHORT, "aget-short", k23x, true, kNone, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0x4B, APUT, "aput", k23x, false, kNone, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0x4C, APUT_WIDE, "aput-wide", k23x, false, kNone, kContinue | kThrow, kVerifyRegAWide | kVerifyRegB | kVerifyRegC) \
-  V(0x4D, APUT_OBJECT, "aput-object", k23x, false, kNone, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0x4E, APUT_BOOLEAN, "aput-boolean", k23x, false, kNone, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0x4F, APUT_BYTE, "aput-byte", k23x, false, kNone, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0x50, APUT_CHAR, "aput-char", k23x, false, kNone, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0x51, APUT_SHORT, "aput-short", k23x, false, kNone, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0x52, IGET, "iget", k22c, true, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
-  V(0x53, IGET_WIDE, "iget-wide", k22c, true, kFieldRef, kContinue | kThrow, kVerifyRegAWide | kVerifyRegB | kVerifyRegCField) \
-  V(0x54, IGET_OBJECT, "iget-object", k22c, true, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
-  V(0x55, IGET_BOOLEAN, "iget-boolean", k22c, true, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
-  V(0x56, IGET_BYTE, "iget-byte", k22c, true, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
-  V(0x57, IGET_CHAR, "iget-char", k22c, true, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
-  V(0x58, IGET_SHORT, "iget-short", k22c, true, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
-  V(0x59, IPUT, "iput", k22c, false, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
-  V(0x5A, IPUT_WIDE, "iput-wide", k22c, false, kFieldRef, kContinue | kThrow, kVerifyRegAWide | kVerifyRegB | kVerifyRegCField) \
-  V(0x5B, IPUT_OBJECT, "iput-object", k22c, false, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
-  V(0x5C, IPUT_BOOLEAN, "iput-boolean", k22c, false, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
-  V(0x5D, IPUT_BYTE, "iput-byte", k22c, false, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
-  V(0x5E, IPUT_CHAR, "iput-char", k22c, false, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
-  V(0x5F, IPUT_SHORT, "iput-short", k22c, false, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
-  V(0x60, SGET, "sget", k21c, true, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBField) \
-  V(0x61, SGET_WIDE, "sget-wide", k21c, true, kFieldRef, kContinue | kThrow, kVerifyRegAWide | kVerifyRegBField) \
-  V(0x62, SGET_OBJECT, "sget-object", k21c, true, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBField) \
-  V(0x63, SGET_BOOLEAN, "sget-boolean", k21c, true, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBField) \
-  V(0x64, SGET_BYTE, "sget-byte", k21c, true, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBField) \
-  V(0x65, SGET_CHAR, "sget-char", k21c, true, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBField) \
-  V(0x66, SGET_SHORT, "sget-short", k21c, true, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBField) \
-  V(0x67, SPUT, "sput", k21c, false, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBField) \
-  V(0x68, SPUT_WIDE, "sput-wide", k21c, false, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBField) \
-  V(0x69, SPUT_OBJECT, "sput-object", k21c, false, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBField) \
-  V(0x6A, SPUT_BOOLEAN, "sput-boolean", k21c, false, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBField) \
-  V(0x6B, SPUT_BYTE, "sput-byte", k21c, false, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBField) \
-  V(0x6C, SPUT_CHAR, "sput-char", k21c, false, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBField) \
-  V(0x6D, SPUT_SHORT, "sput-short", k21c, false, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBField) \
+  V(0x44, AGET, "aget", k23x, true, kNone, kContinue | kThrow | kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0x45, AGET_WIDE, "aget-wide", k23x, true, kNone, kContinue | kThrow | kLoad, kVerifyRegAWide | kVerifyRegB | kVerifyRegC) \
+  V(0x46, AGET_OBJECT, "aget-object", k23x, true, kNone, kContinue | kThrow | kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0x47, AGET_BOOLEAN, "aget-boolean", k23x, true, kNone, kContinue | kThrow | kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0x48, AGET_BYTE, "aget-byte", k23x, true, kNone, kContinue | kThrow | kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0x49, AGET_CHAR, "aget-char", k23x, true, kNone, kContinue | kThrow | kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0x4A, AGET_SHORT, "aget-short", k23x, true, kNone, kContinue | kThrow | kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0x4B, APUT, "aput", k23x, false, kNone, kContinue | kThrow | kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0x4C, APUT_WIDE, "aput-wide", k23x, false, kNone, kContinue | kThrow | kStore, kVerifyRegAWide | kVerifyRegB | kVerifyRegC) \
+  V(0x4D, APUT_OBJECT, "aput-object", k23x, false, kNone, kContinue | kThrow | kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0x4E, APUT_BOOLEAN, "aput-boolean", k23x, false, kNone, kContinue | kThrow | kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0x4F, APUT_BYTE, "aput-byte", k23x, false, kNone, kContinue | kThrow | kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0x50, APUT_CHAR, "aput-char", k23x, false, kNone, kContinue | kThrow | kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0x51, APUT_SHORT, "aput-short", k23x, false, kNone, kContinue | kThrow | kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0x52, IGET, "iget", k22c, true, kFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+  V(0x53, IGET_WIDE, "iget-wide", k22c, true, kFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRegCField) \
+  V(0x54, IGET_OBJECT, "iget-object", k22c, true, kFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+  V(0x55, IGET_BOOLEAN, "iget-boolean", k22c, true, kFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+  V(0x56, IGET_BYTE, "iget-byte", k22c, true, kFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+  V(0x57, IGET_CHAR, "iget-char", k22c, true, kFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+  V(0x58, IGET_SHORT, "iget-short", k22c, true, kFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+  V(0x59, IPUT, "iput", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+  V(0x5A, IPUT_WIDE, "iput-wide", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRegCField) \
+  V(0x5B, IPUT_OBJECT, "iput-object", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+  V(0x5C, IPUT_BOOLEAN, "iput-boolean", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+  V(0x5D, IPUT_BYTE, "iput-byte", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+  V(0x5E, IPUT_CHAR, "iput-char", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+  V(0x5F, IPUT_SHORT, "iput-short", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+  V(0x60, SGET, "sget", k21c, true, kFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+  V(0x61, SGET_WIDE, "sget-wide", k21c, true, kFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegAWide | kVerifyRegBField) \
+  V(0x62, SGET_OBJECT, "sget-object", k21c, true, kFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+  V(0x63, SGET_BOOLEAN, "sget-boolean", k21c, true, kFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+  V(0x64, SGET_BYTE, "sget-byte", k21c, true, kFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+  V(0x65, SGET_CHAR, "sget-char", k21c, true, kFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+  V(0x66, SGET_SHORT, "sget-short", k21c, true, kFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+  V(0x67, SPUT, "sput", k21c, false, kFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+  V(0x68, SPUT_WIDE, "sput-wide", k21c, false, kFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+  V(0x69, SPUT_OBJECT, "sput-object", k21c, false, kFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+  V(0x6A, SPUT_BOOLEAN, "sput-boolean", k21c, false, kFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+  V(0x6B, SPUT_BYTE, "sput-byte", k21c, false, kFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+  V(0x6C, SPUT_CHAR, "sput-char", k21c, false, kFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+  V(0x6D, SPUT_SHORT, "sput-short", k21c, false, kFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
   V(0x6E, INVOKE_VIRTUAL, "invoke-virtual", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArg) \
   V(0x6F, INVOKE_SUPER, "invoke-super", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArg) \
   V(0x70, INVOKE_DIRECT, "invoke-direct", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArg) \
@@ -147,110 +147,110 @@
   V(0x7E, NOT_LONG, "not-long", k12x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
   V(0x7F, NEG_FLOAT, "neg-float", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
   V(0x80, NEG_DOUBLE, "neg-double", k12x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
-  V(0x81, INT_TO_LONG, "int-to-long", k12x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegB) \
-  V(0x82, INT_TO_FLOAT, "int-to-float", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0x83, INT_TO_DOUBLE, "int-to-double", k12x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegB) \
-  V(0x84, LONG_TO_INT, "long-to-int", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegBWide) \
-  V(0x85, LONG_TO_FLOAT, "long-to-float", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegBWide) \
-  V(0x86, LONG_TO_DOUBLE, "long-to-double", k12x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
-  V(0x87, FLOAT_TO_INT, "float-to-int", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0x88, FLOAT_TO_LONG, "float-to-long", k12x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegB) \
-  V(0x89, FLOAT_TO_DOUBLE, "float-to-double", k12x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegB) \
-  V(0x8A, DOUBLE_TO_INT, "double-to-int", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegBWide) \
-  V(0x8B, DOUBLE_TO_LONG, "double-to-long", k12x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
-  V(0x8C, DOUBLE_TO_FLOAT, "double-to-float", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegBWide) \
-  V(0x8D, INT_TO_BYTE, "int-to-byte", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0x8E, INT_TO_CHAR, "int-to-char", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0x8F, INT_TO_SHORT, "int-to-short", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0x90, ADD_INT, "add-int", k23x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0x91, SUB_INT, "sub-int", k23x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0x92, MUL_INT, "mul-int", k23x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0x93, DIV_INT, "div-int", k23x, true, kNone, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0x94, REM_INT, "rem-int", k23x, true, kNone, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0x95, AND_INT, "and-int", k23x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0x96, OR_INT, "or-int", k23x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0x97, XOR_INT, "xor-int", k23x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0x98, SHL_INT, "shl-int", k23x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0x99, SHR_INT, "shr-int", k23x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0x9A, USHR_INT, "ushr-int", k23x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0x9B, ADD_LONG, "add-long", k23x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
-  V(0x9C, SUB_LONG, "sub-long", k23x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
-  V(0x9D, MUL_LONG, "mul-long", k23x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
-  V(0x9E, DIV_LONG, "div-long", k23x, true, kNone, kContinue | kThrow, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
-  V(0x9F, REM_LONG, "rem-long", k23x, true, kNone, kContinue | kThrow, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
-  V(0xA0, AND_LONG, "and-long", k23x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
-  V(0xA1, OR_LONG, "or-long", k23x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
-  V(0xA2, XOR_LONG, "xor-long", k23x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
-  V(0xA3, SHL_LONG, "shl-long", k23x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegC) \
-  V(0xA4, SHR_LONG, "shr-long", k23x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegC) \
-  V(0xA5, USHR_LONG, "ushr-long", k23x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegC) \
-  V(0xA6, ADD_FLOAT, "add-float", k23x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0xA7, SUB_FLOAT, "sub-float", k23x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0xA8, MUL_FLOAT, "mul-float", k23x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0xA9, DIV_FLOAT, "div-float", k23x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0xAA, REM_FLOAT, "rem-float", k23x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
-  V(0xAB, ADD_DOUBLE, "add-double", k23x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
-  V(0xAC, SUB_DOUBLE, "sub-double", k23x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
-  V(0xAD, MUL_DOUBLE, "mul-double", k23x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
-  V(0xAE, DIV_DOUBLE, "div-double", k23x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
-  V(0xAF, REM_DOUBLE, "rem-double", k23x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
-  V(0xB0, ADD_INT_2ADDR, "add-int/2addr", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xB1, SUB_INT_2ADDR, "sub-int/2addr", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xB2, MUL_INT_2ADDR, "mul-int/2addr", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xB3, DIV_INT_2ADDR, "div-int/2addr", k12x, true, kNone, kContinue | kThrow, kVerifyRegA | kVerifyRegB) \
-  V(0xB4, REM_INT_2ADDR, "rem-int/2addr", k12x, true, kNone, kContinue | kThrow, kVerifyRegA | kVerifyRegB) \
-  V(0xB5, AND_INT_2ADDR, "and-int/2addr", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xB6, OR_INT_2ADDR, "or-int/2addr", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xB7, XOR_INT_2ADDR, "xor-int/2addr", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xB8, SHL_INT_2ADDR, "shl-int/2addr", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xB9, SHR_INT_2ADDR, "shr-int/2addr", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xBA, USHR_INT_2ADDR, "ushr-int/2addr", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xBB, ADD_LONG_2ADDR, "add-long/2addr", k12x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
-  V(0xBC, SUB_LONG_2ADDR, "sub-long/2addr", k12x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
-  V(0xBD, MUL_LONG_2ADDR, "mul-long/2addr", k12x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
-  V(0xBE, DIV_LONG_2ADDR, "div-long/2addr", k12x, true, kNone, kContinue | kThrow, kVerifyRegAWide | kVerifyRegBWide) \
-  V(0xBF, REM_LONG_2ADDR, "rem-long/2addr", k12x, true, kNone, kContinue | kThrow, kVerifyRegAWide | kVerifyRegBWide) \
-  V(0xC0, AND_LONG_2ADDR, "and-long/2addr", k12x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
-  V(0xC1, OR_LONG_2ADDR, "or-long/2addr", k12x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
-  V(0xC2, XOR_LONG_2ADDR, "xor-long/2addr", k12x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
-  V(0xC3, SHL_LONG_2ADDR, "shl-long/2addr", k12x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegB) \
-  V(0xC4, SHR_LONG_2ADDR, "shr-long/2addr", k12x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegB) \
-  V(0xC5, USHR_LONG_2ADDR, "ushr-long/2addr", k12x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegB) \
-  V(0xC6, ADD_FLOAT_2ADDR, "add-float/2addr", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xC7, SUB_FLOAT_2ADDR, "sub-float/2addr", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xC8, MUL_FLOAT_2ADDR, "mul-float/2addr", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xC9, DIV_FLOAT_2ADDR, "div-float/2addr", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xCA, REM_FLOAT_2ADDR, "rem-float/2addr", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xCB, ADD_DOUBLE_2ADDR, "add-double/2addr", k12x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
-  V(0xCC, SUB_DOUBLE_2ADDR, "sub-double/2addr", k12x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
-  V(0xCD, MUL_DOUBLE_2ADDR, "mul-double/2addr", k12x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
-  V(0xCE, DIV_DOUBLE_2ADDR, "div-double/2addr", k12x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
-  V(0xCF, REM_DOUBLE_2ADDR, "rem-double/2addr", k12x, true, kNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
-  V(0xD0, ADD_INT_LIT16, "add-int/lit16", k22s, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xD1, RSUB_INT, "rsub-int", k22s, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xD2, MUL_INT_LIT16, "mul-int/lit16", k22s, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xD3, DIV_INT_LIT16, "div-int/lit16", k22s, true, kNone, kContinue | kThrow, kVerifyRegA | kVerifyRegB) \
-  V(0xD4, REM_INT_LIT16, "rem-int/lit16", k22s, true, kNone, kContinue | kThrow, kVerifyRegA | kVerifyRegB) \
-  V(0xD5, AND_INT_LIT16, "and-int/lit16", k22s, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xD6, OR_INT_LIT16, "or-int/lit16", k22s, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xD7, XOR_INT_LIT16, "xor-int/lit16", k22s, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xD8, ADD_INT_LIT8, "add-int/lit8", k22b, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xD9, RSUB_INT_LIT8, "rsub-int/lit8", k22b, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xDA, MUL_INT_LIT8, "mul-int/lit8", k22b, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xDB, DIV_INT_LIT8, "div-int/lit8", k22b, true, kNone, kContinue | kThrow, kVerifyRegA | kVerifyRegB) \
-  V(0xDC, REM_INT_LIT8, "rem-int/lit8", k22b, true, kNone, kContinue | kThrow, kVerifyRegA | kVerifyRegB) \
-  V(0xDD, AND_INT_LIT8, "and-int/lit8", k22b, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xDE, OR_INT_LIT8, "or-int/lit8", k22b, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xDF, XOR_INT_LIT8, "xor-int/lit8", k22b, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xE0, SHL_INT_LIT8, "shl-int/lit8", k22b, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xE1, SHR_INT_LIT8, "shr-int/lit8", k22b, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xE2, USHR_INT_LIT8, "ushr-int/lit8", k22b, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
-  V(0xE3, IGET_QUICK, "iget-quick", k22c, true, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB) \
-  V(0xE4, IGET_WIDE_QUICK, "iget-wide-quick", k22c, true, kFieldRef, kContinue | kThrow, kVerifyRegAWide | kVerifyRegB) \
-  V(0xE5, IGET_OBJECT_QUICK, "iget-object-quick", k22c, true, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB) \
-  V(0xE6, IPUT_QUICK, "iput-quick", k22c, false, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB) \
-  V(0xE7, IPUT_WIDE_QUICK, "iput-wide-quick", k22c, false, kFieldRef, kContinue | kThrow, kVerifyRegAWide | kVerifyRegB) \
-  V(0xE8, IPUT_OBJECT_QUICK, "iput-object-quick", k22c, false, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB) \
+  V(0x81, INT_TO_LONG, "int-to-long", k12x, true, kNone, kContinue | kCast, kVerifyRegAWide | kVerifyRegB) \
+  V(0x82, INT_TO_FLOAT, "int-to-float", k12x, true, kNone, kContinue | kCast, kVerifyRegA | kVerifyRegB) \
+  V(0x83, INT_TO_DOUBLE, "int-to-double", k12x, true, kNone, kContinue | kCast, kVerifyRegAWide | kVerifyRegB) \
+  V(0x84, LONG_TO_INT, "long-to-int", k12x, true, kNone, kContinue | kCast, kVerifyRegA | kVerifyRegBWide) \
+  V(0x85, LONG_TO_FLOAT, "long-to-float", k12x, true, kNone, kContinue | kCast, kVerifyRegA | kVerifyRegBWide) \
+  V(0x86, LONG_TO_DOUBLE, "long-to-double", k12x, true, kNone, kContinue | kCast, kVerifyRegAWide | kVerifyRegBWide) \
+  V(0x87, FLOAT_TO_INT, "float-to-int", k12x, true, kNone, kContinue | kCast, kVerifyRegA | kVerifyRegB) \
+  V(0x88, FLOAT_TO_LONG, "float-to-long", k12x, true, kNone, kContinue | kCast, kVerifyRegAWide | kVerifyRegB) \
+  V(0x89, FLOAT_TO_DOUBLE, "float-to-double", k12x, true, kNone, kContinue | kCast, kVerifyRegAWide | kVerifyRegB) \
+  V(0x8A, DOUBLE_TO_INT, "double-to-int", k12x, true, kNone, kContinue | kCast, kVerifyRegA | kVerifyRegBWide) \
+  V(0x8B, DOUBLE_TO_LONG, "double-to-long", k12x, true, kNone, kContinue | kCast, kVerifyRegAWide | kVerifyRegBWide) \
+  V(0x8C, DOUBLE_TO_FLOAT, "double-to-float", k12x, true, kNone, kContinue | kCast, kVerifyRegA | kVerifyRegBWide) \
+  V(0x8D, INT_TO_BYTE, "int-to-byte", k12x, true, kNone, kContinue | kCast, kVerifyRegA | kVerifyRegB) \
+  V(0x8E, INT_TO_CHAR, "int-to-char", k12x, true, kNone, kContinue | kCast, kVerifyRegA | kVerifyRegB) \
+  V(0x8F, INT_TO_SHORT, "int-to-short", k12x, true, kNone, kContinue | kCast, kVerifyRegA | kVerifyRegB) \
+  V(0x90, ADD_INT, "add-int", k23x, true, kNone, kContinue | kAdd, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0x91, SUB_INT, "sub-int", k23x, true, kNone, kContinue | kSubtract, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0x92, MUL_INT, "mul-int", k23x, true, kNone, kContinue | kMultiply, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0x93, DIV_INT, "div-int", k23x, true, kNone, kContinue | kThrow | kDivide, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0x94, REM_INT, "rem-int", k23x, true, kNone, kContinue | kThrow | kRemainder, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0x95, AND_INT, "and-int", k23x, true, kNone, kContinue | kAnd, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0x96, OR_INT, "or-int", k23x, true, kNone, kContinue | kOr, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0x97, XOR_INT, "xor-int", k23x, true, kNone, kContinue | kXor, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0x98, SHL_INT, "shl-int", k23x, true, kNone, kContinue | kShl, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0x99, SHR_INT, "shr-int", k23x, true, kNone, kContinue | kShr, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0x9A, USHR_INT, "ushr-int", k23x, true, kNone, kContinue | kUshr, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0x9B, ADD_LONG, "add-long", k23x, true, kNone, kContinue | kAdd, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+  V(0x9C, SUB_LONG, "sub-long", k23x, true, kNone, kContinue | kSubtract, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+  V(0x9D, MUL_LONG, "mul-long", k23x, true, kNone, kContinue | kMultiply, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+  V(0x9E, DIV_LONG, "div-long", k23x, true, kNone, kContinue | kThrow | kDivide, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+  V(0x9F, REM_LONG, "rem-long", k23x, true, kNone, kContinue | kThrow | kRemainder, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+  V(0xA0, AND_LONG, "and-long", k23x, true, kNone, kContinue | kAnd, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+  V(0xA1, OR_LONG, "or-long", k23x, true, kNone, kContinue | kOr, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+  V(0xA2, XOR_LONG, "xor-long", k23x, true, kNone, kContinue | kXor, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+  V(0xA3, SHL_LONG, "shl-long", k23x, true, kNone, kContinue | kShl, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegC) \
+  V(0xA4, SHR_LONG, "shr-long", k23x, true, kNone, kContinue | kShr, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegC) \
+  V(0xA5, USHR_LONG, "ushr-long", k23x, true, kNone, kContinue | kUshr, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegC) \
+  V(0xA6, ADD_FLOAT, "add-float", k23x, true, kNone, kContinue | kAdd, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0xA7, SUB_FLOAT, "sub-float", k23x, true, kNone, kContinue | kSubtract, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0xA8, MUL_FLOAT, "mul-float", k23x, true, kNone, kContinue | kMultiply, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0xA9, DIV_FLOAT, "div-float", k23x, true, kNone, kContinue | kDivide, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0xAA, REM_FLOAT, "rem-float", k23x, true, kNone, kContinue | kRemainder, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+  V(0xAB, ADD_DOUBLE, "add-double", k23x, true, kNone, kContinue | kAdd, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+  V(0xAC, SUB_DOUBLE, "sub-double", k23x, true, kNone, kContinue | kSubtract, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+  V(0xAD, MUL_DOUBLE, "mul-double", k23x, true, kNone, kContinue | kMultiply, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+  V(0xAE, DIV_DOUBLE, "div-double", k23x, true, kNone, kContinue | kDivide, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+  V(0xAF, REM_DOUBLE, "rem-double", k23x, true, kNone, kContinue | kRemainder, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+  V(0xB0, ADD_INT_2ADDR, "add-int/2addr", k12x, true, kNone, kContinue | kAdd, kVerifyRegA | kVerifyRegB) \
+  V(0xB1, SUB_INT_2ADDR, "sub-int/2addr", k12x, true, kNone, kContinue | kSubtract, kVerifyRegA | kVerifyRegB) \
+  V(0xB2, MUL_INT_2ADDR, "mul-int/2addr", k12x, true, kNone, kContinue | kMultiply, kVerifyRegA | kVerifyRegB) \
+  V(0xB3, DIV_INT_2ADDR, "div-int/2addr", k12x, true, kNone, kContinue | kThrow | kDivide, kVerifyRegA | kVerifyRegB) \
+  V(0xB4, REM_INT_2ADDR, "rem-int/2addr", k12x, true, kNone, kContinue | kThrow | kRemainder, kVerifyRegA | kVerifyRegB) \
+  V(0xB5, AND_INT_2ADDR, "and-int/2addr", k12x, true, kNone, kContinue | kAnd, kVerifyRegA | kVerifyRegB) \
+  V(0xB6, OR_INT_2ADDR, "or-int/2addr", k12x, true, kNone, kContinue | kOr, kVerifyRegA | kVerifyRegB) \
+  V(0xB7, XOR_INT_2ADDR, "xor-int/2addr", k12x, true, kNone, kContinue | kXor, kVerifyRegA | kVerifyRegB) \
+  V(0xB8, SHL_INT_2ADDR, "shl-int/2addr", k12x, true, kNone, kContinue | kShl, kVerifyRegA | kVerifyRegB) \
+  V(0xB9, SHR_INT_2ADDR, "shr-int/2addr", k12x, true, kNone, kContinue | kShr, kVerifyRegA | kVerifyRegB) \
+  V(0xBA, USHR_INT_2ADDR, "ushr-int/2addr", k12x, true, kNone, kContinue | kUshr, kVerifyRegA | kVerifyRegB) \
+  V(0xBB, ADD_LONG_2ADDR, "add-long/2addr", k12x, true, kNone, kContinue | kAdd, kVerifyRegAWide | kVerifyRegBWide) \
+  V(0xBC, SUB_LONG_2ADDR, "sub-long/2addr", k12x, true, kNone, kContinue | kSubtract, kVerifyRegAWide | kVerifyRegBWide) \
+  V(0xBD, MUL_LONG_2ADDR, "mul-long/2addr", k12x, true, kNone, kContinue | kMultiply, kVerifyRegAWide | kVerifyRegBWide) \
+  V(0xBE, DIV_LONG_2ADDR, "div-long/2addr", k12x, true, kNone, kContinue | kThrow | kDivide, kVerifyRegAWide | kVerifyRegBWide) \
+  V(0xBF, REM_LONG_2ADDR, "rem-long/2addr", k12x, true, kNone, kContinue | kThrow | kRemainder, kVerifyRegAWide | kVerifyRegBWide) \
+  V(0xC0, AND_LONG_2ADDR, "and-long/2addr", k12x, true, kNone, kContinue | kAnd, kVerifyRegAWide | kVerifyRegBWide) \
+  V(0xC1, OR_LONG_2ADDR, "or-long/2addr", k12x, true, kNone, kContinue | kOr, kVerifyRegAWide | kVerifyRegBWide) \
+  V(0xC2, XOR_LONG_2ADDR, "xor-long/2addr", k12x, true, kNone, kContinue | kXor, kVerifyRegAWide | kVerifyRegBWide) \
+  V(0xC3, SHL_LONG_2ADDR, "shl-long/2addr", k12x, true, kNone, kContinue | kShl, kVerifyRegAWide | kVerifyRegB) \
+  V(0xC4, SHR_LONG_2ADDR, "shr-long/2addr", k12x, true, kNone, kContinue | kShr, kVerifyRegAWide | kVerifyRegB) \
+  V(0xC5, USHR_LONG_2ADDR, "ushr-long/2addr", k12x, true, kNone, kContinue | kUshr, kVerifyRegAWide | kVerifyRegB) \
+  V(0xC6, ADD_FLOAT_2ADDR, "add-float/2addr", k12x, true, kNone, kContinue | kAdd, kVerifyRegA | kVerifyRegB) \
+  V(0xC7, SUB_FLOAT_2ADDR, "sub-float/2addr", k12x, true, kNone, kContinue | kSubtract, kVerifyRegA | kVerifyRegB) \
+  V(0xC8, MUL_FLOAT_2ADDR, "mul-float/2addr", k12x, true, kNone, kContinue | kMultiply, kVerifyRegA | kVerifyRegB) \
+  V(0xC9, DIV_FLOAT_2ADDR, "div-float/2addr", k12x, true, kNone, kContinue | kDivide, kVerifyRegA | kVerifyRegB) \
+  V(0xCA, REM_FLOAT_2ADDR, "rem-float/2addr", k12x, true, kNone, kContinue | kRemainder, kVerifyRegA | kVerifyRegB) \
+  V(0xCB, ADD_DOUBLE_2ADDR, "add-double/2addr", k12x, true, kNone, kContinue | kAdd, kVerifyRegAWide | kVerifyRegBWide) \
+  V(0xCC, SUB_DOUBLE_2ADDR, "sub-double/2addr", k12x, true, kNone, kContinue | kSubtract, kVerifyRegAWide | kVerifyRegBWide) \
+  V(0xCD, MUL_DOUBLE_2ADDR, "mul-double/2addr", k12x, true, kNone, kContinue | kMultiply, kVerifyRegAWide | kVerifyRegBWide) \
+  V(0xCE, DIV_DOUBLE_2ADDR, "div-double/2addr", k12x, true, kNone, kContinue | kDivide, kVerifyRegAWide | kVerifyRegBWide) \
+  V(0xCF, REM_DOUBLE_2ADDR, "rem-double/2addr", k12x, true, kNone, kContinue | kRemainder, kVerifyRegAWide | kVerifyRegBWide) \
+  V(0xD0, ADD_INT_LIT16, "add-int/lit16", k22s, true, kNone, kContinue | kAdd | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+  V(0xD1, RSUB_INT, "rsub-int", k22s, true, kNone, kContinue | kSubtract | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+  V(0xD2, MUL_INT_LIT16, "mul-int/lit16", k22s, true, kNone, kContinue | kMultiply | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+  V(0xD3, DIV_INT_LIT16, "div-int/lit16", k22s, true, kNone, kContinue | kThrow | kDivide | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+  V(0xD4, REM_INT_LIT16, "rem-int/lit16", k22s, true, kNone, kContinue | kThrow | kRemainder | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+  V(0xD5, AND_INT_LIT16, "and-int/lit16", k22s, true, kNone, kContinue | kAnd | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+  V(0xD6, OR_INT_LIT16, "or-int/lit16", k22s, true, kNone, kContinue | kOr | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+  V(0xD7, XOR_INT_LIT16, "xor-int/lit16", k22s, true, kNone, kContinue | kXor | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+  V(0xD8, ADD_INT_LIT8, "add-int/lit8", k22b, true, kNone, kContinue | kAdd | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+  V(0xD9, RSUB_INT_LIT8, "rsub-int/lit8", k22b, true, kNone, kContinue | kSubtract | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+  V(0xDA, MUL_INT_LIT8, "mul-int/lit8", k22b, true, kNone, kContinue | kMultiply | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+  V(0xDB, DIV_INT_LIT8, "div-int/lit8", k22b, true, kNone, kContinue | kThrow | kDivide | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+  V(0xDC, REM_INT_LIT8, "rem-int/lit8", k22b, true, kNone, kContinue | kThrow | kRemainder | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+  V(0xDD, AND_INT_LIT8, "and-int/lit8", k22b, true, kNone, kContinue | kAnd | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+  V(0xDE, OR_INT_LIT8, "or-int/lit8", k22b, true, kNone, kContinue | kOr | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+  V(0xDF, XOR_INT_LIT8, "xor-int/lit8", k22b, true, kNone, kContinue | kXor | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+  V(0xE0, SHL_INT_LIT8, "shl-int/lit8", k22b, true, kNone, kContinue | kShl | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+  V(0xE1, SHR_INT_LIT8, "shr-int/lit8", k22b, true, kNone, kContinue | kShr | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+  V(0xE2, USHR_INT_LIT8, "ushr-int/lit8", k22b, true, kNone, kContinue | kUshr | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+  V(0xE3, IGET_QUICK, "iget-quick", k22c, true, kFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+  V(0xE4, IGET_WIDE_QUICK, "iget-wide-quick", k22c, true, kFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB) \
+  V(0xE5, IGET_OBJECT_QUICK, "iget-object-quick", k22c, true, kFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+  V(0xE6, IPUT_QUICK, "iput-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+  V(0xE7, IPUT_WIDE_QUICK, "iput-wide-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB) \
+  V(0xE8, IPUT_OBJECT_QUICK, "iput-object-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
   V(0xE9, INVOKE_VIRTUAL_QUICK, "invoke-virtual-quick", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyVarArg) \
   V(0xEA, INVOKE_VIRTUAL_RANGE_QUICK, "invoke-virtual/range-quick", k3rc, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyVarArgRange) \
   V(0xEB, UNUSED_EB, "unused-eb", k10x, false, kUnknown, 0, kVerifyError) \
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index f1795a5..58b4286 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -385,31 +385,36 @@
 
 template<InvokeType type, bool access_check>
 static inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx,
-                                                    mirror::Object* this_object,
-                                                    mirror::ArtMethod* referrer, Thread* self) {
-  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  StackHandleScope<1> hs(self);
-  Handle<mirror::Object> handle_scope_this(hs.NewHandle(type == kStatic ? nullptr : this_object));
-  mirror::ArtMethod* resolved_method = class_linker->ResolveMethod(method_idx, referrer, type);
+                                                    mirror::Object** this_object,
+                                                    mirror::ArtMethod** referrer, Thread* self) {
+  ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+  mirror::ArtMethod* resolved_method = class_linker->GetResolvedMethod(method_idx, *referrer, type);
+  if (resolved_method == nullptr) {
+    StackHandleScope<1> hs(self);
+    mirror::Object* null_this = nullptr;
+    HandleWrapper<mirror::Object> h_this(
+        hs.NewHandleWrapper(type == kStatic ? &null_this : this_object));
+    resolved_method = class_linker->ResolveMethod(self, method_idx, referrer, type);
+  }
   if (UNLIKELY(resolved_method == nullptr)) {
     DCHECK(self->IsExceptionPending());  // Throw exception and unwind.
     return nullptr;  // Failure.
-  } else if (UNLIKELY(handle_scope_this.Get() == nullptr && type != kStatic)) {
+  } else if (UNLIKELY(*this_object == nullptr && type != kStatic)) {
     // Maintain interpreter-like semantics where NullPointerException is thrown
     // after potential NoSuchMethodError from class linker.
     ThrowLocation throw_location = self->GetCurrentLocationForThrow();
-    DCHECK(referrer == throw_location.GetMethod());
+    DCHECK_EQ(*referrer, throw_location.GetMethod());
     ThrowNullPointerExceptionForMethodAccess(throw_location, method_idx, type);
     return nullptr;  // Failure.
   } else if (access_check) {
     // Incompatible class change should have been handled in resolve method.
     if (UNLIKELY(resolved_method->CheckIncompatibleClassChange(type))) {
       ThrowIncompatibleClassChangeError(type, resolved_method->GetInvokeType(), resolved_method,
-                                        referrer);
+                                        *referrer);
       return nullptr;  // Failure.
     }
     mirror::Class* methods_class = resolved_method->GetDeclaringClass();
-    mirror::Class* referring_class = referrer->GetDeclaringClass();
+    mirror::Class* referring_class = (*referrer)->GetDeclaringClass();
     bool can_access_resolved_method =
         referring_class->CheckResolvedMethodAccess<type>(methods_class, resolved_method,
                                                          method_idx);
@@ -423,7 +428,7 @@
     case kDirect:
       return resolved_method;
     case kVirtual: {
-      mirror::ObjectArray<mirror::ArtMethod>* vtable = handle_scope_this->GetClass()->GetVTable();
+      mirror::ObjectArray<mirror::ArtMethod>* vtable = (*this_object)->GetClass()->GetVTable();
       uint16_t vtable_index = resolved_method->GetMethodIndex();
       if (access_check &&
           (vtable == nullptr || vtable_index >= static_cast<uint32_t>(vtable->GetLength()))) {
@@ -437,7 +442,7 @@
       return vtable->GetWithoutChecks(vtable_index);
     }
     case kSuper: {
-      mirror::Class* super_class = referrer->GetDeclaringClass()->GetSuperClass();
+      mirror::Class* super_class = (*referrer)->GetDeclaringClass()->GetSuperClass();
       uint16_t vtable_index = resolved_method->GetMethodIndex();
       mirror::ObjectArray<mirror::ArtMethod>* vtable;
       if (access_check) {
@@ -460,20 +465,19 @@
     }
     case kInterface: {
       uint32_t imt_index = resolved_method->GetDexMethodIndex() % ClassLinker::kImtSize;
-      mirror::ObjectArray<mirror::ArtMethod>* imt_table = handle_scope_this->GetClass()->GetImTable();
+      mirror::ObjectArray<mirror::ArtMethod>* imt_table = (*this_object)->GetClass()->GetImTable();
       mirror::ArtMethod* imt_method = imt_table->Get(imt_index);
       if (!imt_method->IsImtConflictMethod()) {
         return imt_method;
       } else {
         mirror::ArtMethod* interface_method =
-            handle_scope_this->GetClass()->FindVirtualMethodForInterface(resolved_method);
+            (*this_object)->GetClass()->FindVirtualMethodForInterface(resolved_method);
         if (UNLIKELY(interface_method == nullptr)) {
           ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method,
-                                                                     handle_scope_this.Get(), referrer);
+                                                                     *this_object, *referrer);
           return nullptr;  // Failure.
-        } else {
-          return interface_method;
         }
+        return interface_method;
       }
     }
     default:
@@ -486,8 +490,8 @@
 #define EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, _access_check)                 \
   template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE                       \
   mirror::ArtMethod* FindMethodFromCode<_type, _access_check>(uint32_t method_idx,         \
-                                                              mirror::Object* this_object, \
-                                                              mirror::ArtMethod* referrer, \
+                                                              mirror::Object** this_object, \
+                                                              mirror::ArtMethod** referrer, \
                                                               Thread* self)
 #define EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(_type) \
     EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, false);   \
diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
index 3f02ec7..f2e2bf7 100644
--- a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
+++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
@@ -25,6 +25,7 @@
 
 namespace art {
 
+// TODO: Make the MethodHelper here be compaction safe.
 extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, MethodHelper& mh,
                                                    const DexFile::CodeItem* code_item,
                                                    ShadowFrame* shadow_frame, JValue* result) {
@@ -43,6 +44,8 @@
       }
       self->PopShadowFrame();
       CHECK(h_class->IsInitializing());
+      // Reload from shadow frame in case the method moved, this is faster than adding a handle.
+      method = shadow_frame->GetMethod();
     }
   }
   uint16_t arg_offset = (code_item == NULL) ? 0 : code_item->registers_size_ - code_item->ins_size_;
diff --git a/runtime/entrypoints/portable/portable_invoke_entrypoints.cc b/runtime/entrypoints/portable/portable_invoke_entrypoints.cc
index d34b097..3a898e8 100644
--- a/runtime/entrypoints/portable/portable_invoke_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_invoke_entrypoints.cc
@@ -23,17 +23,20 @@
 
 template<InvokeType type, bool access_check>
 mirror::ArtMethod* FindMethodHelper(uint32_t method_idx, mirror::Object* this_object,
-                                    mirror::ArtMethod* caller_method, Thread* thread) {
+                                    mirror::ArtMethod* caller_method, Thread* self) {
   mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method,
                                              access_check, type);
   if (UNLIKELY(method == NULL)) {
-    method = FindMethodFromCode<type, access_check>(method_idx, this_object, caller_method, thread);
+    // Note: This can cause thread suspension.
+    self->AssertThreadSuspensionIsAllowable();
+    method = FindMethodFromCode<type, access_check>(method_idx, &this_object, &caller_method,
+                                                    self);
     if (UNLIKELY(method == NULL)) {
-      CHECK(thread->IsExceptionPending());
+      CHECK(self->IsExceptionPending());
       return 0;  // failure
     }
   }
-  DCHECK(!thread->IsExceptionPending());
+  DCHECK(!self->IsExceptionPending());
   const void* code = method->GetEntryPointFromPortableCompiledCode();
 
   // When we return, the caller will branch to this address, so it had better not be 0!
diff --git a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
index 17c3222..3756f47 100644
--- a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
@@ -317,11 +317,11 @@
 // Lazily resolve a method for portable. Called by stub code.
 extern "C" const void* artPortableResolutionTrampoline(mirror::ArtMethod* called,
                                                        mirror::Object* receiver,
-                                                       Thread* thread,
+                                                       Thread* self,
                                                        mirror::ArtMethod** called_addr)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   uint32_t dex_pc;
-  mirror::ArtMethod* caller = thread->GetCurrentMethod(&dex_pc);
+  mirror::ArtMethod* caller = self->GetCurrentMethod(&dex_pc);
 
   ClassLinker* linker = Runtime::Current()->GetClassLinker();
   InvokeType invoke_type;
@@ -379,7 +379,7 @@
         is_range = true;
     }
     uint32_t dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c();
-    called = linker->ResolveMethod(dex_method_idx, caller, invoke_type);
+    called = linker->ResolveMethod(Thread::Current(), dex_method_idx, &caller, invoke_type);
     // Incompatible class change should have been handled in resolve method.
     CHECK(!called->CheckIncompatibleClassChange(invoke_type));
     // Refine called method based on receiver.
@@ -395,9 +395,9 @@
     CHECK(!called->CheckIncompatibleClassChange(invoke_type));
   }
   const void* code = nullptr;
-  if (LIKELY(!thread->IsExceptionPending())) {
+  if (LIKELY(!self->IsExceptionPending())) {
     // Ensure that the called method's class is initialized.
-    StackHandleScope<1> hs(Thread::Current());
+    StackHandleScope<1> hs(self);
     Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
     linker->EnsureInitialized(called_class, true, true);
     if (LIKELY(called_class->IsInitialized())) {
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index 3fd4adc..b582abb 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -26,12 +26,12 @@
 }  // namespace mirror
 
 // Place a special frame at the TOS that will save the callee saves for the given type.
-static inline void FinishCalleeSaveFrameSetup(Thread* self, mirror::ArtMethod** sp,
+static inline void FinishCalleeSaveFrameSetup(Thread* self, StackReference<mirror::ArtMethod>* sp,
                                               Runtime::CalleeSaveType type)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   // Be aware the store below may well stomp on an incoming argument.
   Locks::mutator_lock_->AssertSharedHeld(self);
-  *sp = Runtime::Current()->GetCalleeSaveMethod(type);
+  sp->Assign(Runtime::Current()->GetCalleeSaveMethod(type));
   self->SetTopOfStack(sp, 0);
   self->VerifyStack();
 }
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index ccc0f3d..3301254 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -27,32 +27,36 @@
 
 #define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type) \
 extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
-    uint32_t type_idx, mirror::ArtMethod* method, Thread* self, mirror::ArtMethod** sp) \
+    uint32_t type_idx, mirror::ArtMethod* method, Thread* self, \
+    StackReference<mirror::ArtMethod>* sp) \
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
   return AllocObjectFromCode<false, instrumented_bool>(type_idx, method, self, allocator_type); \
 } \
 extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
-    mirror::Class* klass, mirror::ArtMethod* method, Thread* self, mirror::ArtMethod** sp) \
+    mirror::Class* klass, mirror::ArtMethod* method, Thread* self, \
+    StackReference<mirror::ArtMethod>* sp) \
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
   return AllocObjectFromCodeResolved<instrumented_bool>(klass, method, self, allocator_type); \
 } \
 extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
-    mirror::Class* klass, mirror::ArtMethod* method, Thread* self, mirror::ArtMethod** sp) \
+    mirror::Class* klass, mirror::ArtMethod* method, Thread* self, \
+    StackReference<mirror::ArtMethod>* sp) \
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
   return AllocObjectFromCodeInitialized<instrumented_bool>(klass, method, self, allocator_type); \
 } \
 extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck##suffix##suffix2( \
-    uint32_t type_idx, mirror::ArtMethod* method, Thread* self, mirror::ArtMethod** sp) \
+    uint32_t type_idx, mirror::ArtMethod* method, Thread* self, \
+    StackReference<mirror::ArtMethod>* sp) \
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
   return AllocObjectFromCode<true, instrumented_bool>(type_idx, method, self, allocator_type); \
 } \
 extern "C" mirror::Array* artAllocArrayFromCode##suffix##suffix2( \
     uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self, \
-    mirror::ArtMethod** sp) \
+    StackReference<mirror::ArtMethod>* sp) \
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
   return AllocArrayFromCode<false, instrumented_bool>(type_idx, method, component_count, self, \
@@ -60,7 +64,7 @@
 } \
 extern "C" mirror::Array* artAllocArrayFromCodeResolved##suffix##suffix2( \
     mirror::Class* klass, mirror::ArtMethod* method, int32_t component_count, Thread* self, \
-    mirror::ArtMethod** sp) \
+    StackReference<mirror::ArtMethod>* sp) \
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
   return AllocArrayFromCodeResolved<false, instrumented_bool>(klass, method, component_count, self, \
@@ -68,7 +72,7 @@
 } \
 extern "C" mirror::Array* artAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \
     uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self, \
-    mirror::ArtMethod** sp) \
+    StackReference<mirror::ArtMethod>* sp) \
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
   return AllocArrayFromCode<true, instrumented_bool>(type_idx, method, component_count, self, \
@@ -76,7 +80,7 @@
 } \
 extern "C" mirror::Array* artCheckAndAllocArrayFromCode##suffix##suffix2( \
     uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self, \
-    mirror::ArtMethod** sp) \
+    StackReference<mirror::ArtMethod>* sp) \
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
   if (!instrumented_bool) { \
@@ -87,7 +91,7 @@
 } \
 extern "C" mirror::Array* artCheckAndAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \
     uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self, \
-    mirror::ArtMethod** sp) \
+    StackReference<mirror::ArtMethod>* sp) \
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
   if (!instrumented_bool) { \
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index 6448045..47fb9d6 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -28,7 +28,7 @@
 
 namespace art {
 
-extern "C" void artDeoptimize(Thread* self, mirror::ArtMethod** sp)
+extern "C" void artDeoptimize(Thread* self, StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
   self->SetException(ThrowLocation(), Thread::GetDeoptimizationException());
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index ab428a5..53c9b97 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -28,7 +28,7 @@
 extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx,
                                                              mirror::ArtMethod* referrer,
                                                              Thread* self,
-                                                             mirror::ArtMethod** sp)
+                                                             StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   // Called to ensure static storage base is initialized for direct static field reads and writes.
   // A class may be accessing another class' fields when it doesn't have access, as access has been
@@ -39,7 +39,8 @@
 
 extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx,
                                                     mirror::ArtMethod* referrer,
-                                                    Thread* self, mirror::ArtMethod** sp)
+                                                    Thread* self,
+                                                    StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   // Called when method->dex_cache_resolved_types_[] misses.
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
@@ -47,10 +48,9 @@
 }
 
 extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx,
-                                                                   mirror::ArtMethod* referrer,
-                                                                   Thread* self,
-                                                                   mirror::ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    mirror::ArtMethod* referrer,
+    Thread* self,
+    StackReference<mirror::ArtMethod>* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   // Called when caller isn't guaranteed to have access to a type and the dex cache may be
   // unpopulated.
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
@@ -59,7 +59,8 @@
 
 extern "C" mirror::String* artResolveStringFromCode(mirror::ArtMethod* referrer,
                                                     int32_t string_idx,
-                                                    Thread* self, mirror::ArtMethod** sp)
+                                                    Thread* self,
+                                                    StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
   return ResolveStringFromCode(referrer, string_idx);
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index ec69e28..7bd1582 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -98,7 +98,6 @@
   int32_t (*pCmplDouble)(double, double);
   int32_t (*pCmplFloat)(float, float);
   double (*pFmod)(double, double);
-  double (*pSqrt)(double);
   double (*pL2d)(int64_t);
   float (*pFmodf)(float, float);
   float (*pL2f)(int64_t);
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index c38a595..844367d 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -27,7 +27,7 @@
 
 extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx,
                                            mirror::ArtMethod* referrer,
-                                           Thread* self, mirror::ArtMethod** sp)
+                                           Thread* self, StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead,
                                           sizeof(int32_t));
@@ -44,7 +44,7 @@
 
 extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx,
                                            mirror::ArtMethod* referrer,
-                                           Thread* self, mirror::ArtMethod** sp)
+                                           Thread* self, StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead,
                                           sizeof(int64_t));
@@ -61,7 +61,8 @@
 
 extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx,
                                                    mirror::ArtMethod* referrer,
-                                                   Thread* self, mirror::ArtMethod** sp)
+                                                   Thread* self,
+                                                   StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectRead,
                                           sizeof(mirror::HeapReference<mirror::Object>));
@@ -79,7 +80,7 @@
 
 extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
                                              mirror::ArtMethod* referrer, Thread* self,
-                                             mirror::ArtMethod** sp)
+                                             StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead,
                                           sizeof(int32_t));
@@ -102,7 +103,7 @@
 
 extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
                                              mirror::ArtMethod* referrer, Thread* self,
-                                             mirror::ArtMethod** sp)
+                                             StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead,
                                           sizeof(int64_t));
@@ -126,7 +127,7 @@
 extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
                                                      mirror::ArtMethod* referrer,
                                                      Thread* self,
-                                                     mirror::ArtMethod** sp)
+                                                     StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectRead,
                                           sizeof(mirror::HeapReference<mirror::Object>));
@@ -149,7 +150,7 @@
 
 extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value,
                                       mirror::ArtMethod* referrer, Thread* self,
-                                      mirror::ArtMethod** sp)
+                                      StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite,
                                           sizeof(int32_t));
@@ -169,7 +170,8 @@
 }
 
 extern "C" int artSet64StaticFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
-                                      uint64_t new_value, Thread* self, mirror::ArtMethod** sp)
+                                      uint64_t new_value, Thread* self,
+                                      StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite,
                                           sizeof(int64_t));
@@ -190,7 +192,7 @@
 
 extern "C" int artSetObjStaticFromCode(uint32_t field_idx, mirror::Object* new_value,
                                        mirror::ArtMethod* referrer, Thread* self,
-                                       mirror::ArtMethod** sp)
+                                       StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectWrite,
                                           sizeof(mirror::HeapReference<mirror::Object>));
@@ -214,7 +216,7 @@
 
 extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint32_t new_value,
                                         mirror::ArtMethod* referrer, Thread* self,
-                                        mirror::ArtMethod** sp)
+                                        StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
                                           sizeof(int32_t));
@@ -240,13 +242,15 @@
 }
 
 extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint64_t new_value,
-                                        Thread* self, mirror::ArtMethod** sp)
+                                        Thread* self, StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   Runtime* runtime = Runtime::Current();
   mirror::ArtMethod* callee_save = runtime->GetCalleeSaveMethod(Runtime::kRefsOnly);
   uint32_t frame_size =
       runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsOnly).FrameSizeInBytes();
-  mirror::ArtMethod* referrer = sp[frame_size / sizeof(mirror::ArtMethod*)];
+  mirror::ArtMethod* referrer =
+      reinterpret_cast<StackReference<mirror::ArtMethod>*>(
+          reinterpret_cast<uint8_t*>(sp) + frame_size)->AsMirrorPtr();
   mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
                                           sizeof(int64_t));
   if (LIKELY(field != NULL  && obj != NULL)) {
@@ -254,7 +258,7 @@
     field->Set64<false>(obj, new_value);
     return 0;  // success
   }
-  *sp = callee_save;
+  sp->Assign(callee_save);
   self->SetTopOfStack(sp, 0);
   field = FindFieldFromCode<InstancePrimitiveWrite, true>(field_idx, referrer, self,
                                                           sizeof(int64_t));
@@ -274,7 +278,7 @@
 extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
                                          mirror::Object* new_value,
                                          mirror::ArtMethod* referrer, Thread* self,
-                                         mirror::ArtMethod** sp)
+                                         StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
                                           sizeof(mirror::HeapReference<mirror::Object>));
diff --git a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
index 8dac750..4ec2879 100644
--- a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
@@ -39,7 +39,7 @@
  */
 extern "C" int artHandleFillArrayDataFromCode(mirror::Array* array,
                                               const Instruction::ArrayDataPayload* payload,
-                                              Thread* self, mirror::ArtMethod** sp)
+                                              Thread* self, StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
   DCHECK_EQ(payload->ident, static_cast<uint16_t>(Instruction::kArrayDataSignature));
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index 11a4b3b..6ef075d 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -26,7 +26,7 @@
 extern "C" const void* artInstrumentationMethodEntryFromCode(mirror::ArtMethod* method,
                                                              mirror::Object* this_object,
                                                              Thread* self,
-                                                             mirror::ArtMethod** sp,
+                                                             StackReference<mirror::ArtMethod>* sp,
                                                              uintptr_t lr)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
@@ -40,7 +40,8 @@
   return result;
 }
 
-extern "C" uint64_t artInstrumentationMethodExitFromCode(Thread* self, mirror::ArtMethod** sp,
+extern "C" uint64_t artInstrumentationMethodExitFromCode(Thread* self,
+                                                         StackReference<mirror::ArtMethod>* sp,
                                                          uint64_t gpr_result, uint64_t fpr_result)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   // TODO: use FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly) not the hand inlined below.
@@ -50,7 +51,7 @@
   Locks::mutator_lock_->AssertSharedHeld(self);
   Runtime* runtime = Runtime::Current();
   mirror::ArtMethod* callee_save = runtime->GetCalleeSaveMethod(Runtime::kRefsOnly);
-  *sp = callee_save;
+  sp->Assign(callee_save);
   uint32_t return_pc_offset = callee_save->GetReturnPcOffsetInBytes(
       runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsOnly).FrameSizeInBytes());
   uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp) +
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 5d36b4c..140b075 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -34,7 +34,7 @@
   DCHECK(env != nullptr);
   uint32_t saved_local_ref_cookie = env->local_ref_cookie;
   env->local_ref_cookie = env->locals.GetSegmentState();
-  mirror::ArtMethod* native_method = *self->GetManagedStack()->GetTopQuickFrame();
+  mirror::ArtMethod* native_method = self->GetManagedStack()->GetTopQuickFrame()->AsMirrorPtr();
   if (!native_method->IsFastNative()) {
     // When not fast JNI we transition out of runnable.
     self->TransitionFromRunnableToSuspended(kNative);
@@ -49,7 +49,7 @@
 
 // TODO: NO_THREAD_SAFETY_ANALYSIS due to different control paths depending on fast JNI.
 static void GoToRunnable(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
-  mirror::ArtMethod* native_method = *self->GetManagedStack()->GetTopQuickFrame();
+  mirror::ArtMethod* native_method = self->GetManagedStack()->GetTopQuickFrame()->AsMirrorPtr();
   bool is_fast = native_method->IsFastNative();
   if (!is_fast) {
     self->TransitionFromSuspendedToRunnable();
diff --git a/runtime/entrypoints/quick/quick_lock_entrypoints.cc b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
index 817d053..92c0841 100644
--- a/runtime/entrypoints/quick/quick_lock_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
@@ -20,7 +20,8 @@
 
 namespace art {
 
-extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self, mirror::ArtMethod** sp)
+extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self,
+                                     StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
     NO_THREAD_SAFETY_ANALYSIS /* EXCLUSIVE_LOCK_FUNCTION(Monitor::monitor_lock_) */ {
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
@@ -42,7 +43,8 @@
   }
 }
 
-extern "C" int artUnlockObjectFromCode(mirror::Object* obj, Thread* self, mirror::ArtMethod** sp)
+extern "C" int artUnlockObjectFromCode(mirror::Object* obj, Thread* self,
+                                       StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
     NO_THREAD_SAFETY_ANALYSIS /* UNLOCK_FUNCTION(Monitor::monitor_lock_) */ {
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
diff --git a/runtime/entrypoints/quick/quick_thread_entrypoints.cc b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
index 53e725e..f61c754 100644
--- a/runtime/entrypoints/quick/quick_thread_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
@@ -28,7 +28,7 @@
   CheckSuspend(thread);
 }
 
-extern "C" void artTestSuspendFromCode(Thread* thread, mirror::ArtMethod** sp)
+extern "C" void artTestSuspendFromCode(Thread* thread, StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   // Called when suspend count check value is 0 and thread->suspend_count_ != 0
   FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsOnly);
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index 31eacac..e6f294a 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -25,7 +25,8 @@
 namespace art {
 
 // Deliver an exception that's pending on thread helping set up a callee save frame on the way.
-extern "C" void artDeliverPendingExceptionFromCode(Thread* thread, mirror::ArtMethod** sp)
+extern "C" void artDeliverPendingExceptionFromCode(Thread* thread,
+                                                   StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll);
   thread->QuickDeliverException();
@@ -33,7 +34,7 @@
 
 // Called by generated call to throw an exception.
 extern "C" void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self,
-                                            mirror::ArtMethod** sp)
+                                            StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   /*
    * exception may be NULL, in which case this routine should
@@ -55,7 +56,7 @@
 
 // Called by generated call to throw a NPE exception.
 extern "C" void artThrowNullPointerExceptionFromCode(Thread* self,
-                                                     mirror::ArtMethod** sp)
+                                                     StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
   ThrowLocation throw_location = self->GetCurrentLocationForThrow();
@@ -64,8 +65,7 @@
 }
 
 // Called by generated call to throw an arithmetic divide by zero exception.
-extern "C" void artThrowDivZeroFromCode(Thread* self,
-                                        mirror::ArtMethod** sp)
+extern "C" void artThrowDivZeroFromCode(Thread* self, StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
   ThrowArithmeticExceptionDivideByZero();
@@ -74,14 +74,14 @@
 
 // Called by generated call to throw an array index out of bounds exception.
 extern "C" void artThrowArrayBoundsFromCode(int index, int length, Thread* self,
-                                            mirror::ArtMethod** sp)
+                                            StackReference<mirror::ArtMethod>*sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
   ThrowArrayIndexOutOfBoundsException(index, length);
   self->QuickDeliverException();
 }
 
-extern "C" void artThrowStackOverflowFromCode(Thread* self, mirror::ArtMethod** sp)
+extern "C" void artThrowStackOverflowFromCode(Thread* self, StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
   ThrowStackOverflowError(self);
@@ -89,7 +89,7 @@
 }
 
 extern "C" void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self,
-                                             mirror::ArtMethod** sp)
+                                             StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
   ThrowNoSuchMethodError(method_idx);
@@ -97,7 +97,7 @@
 }
 
 extern "C" void artThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type,
-                                           Thread* self, mirror::ArtMethod** sp)
+                                           Thread* self, StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
   CHECK(!dest_type->IsAssignableFrom(src_type));
@@ -106,7 +106,7 @@
 }
 
 extern "C" void artThrowArrayStoreException(mirror::Object* array, mirror::Object* value,
-                                            Thread* self, mirror::ArtMethod** sp)
+                                            Thread* self, StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
   ThrowArrayStoreException(value->GetClass(), array->GetClass());
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index ee276c1..1d524cb 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -195,22 +195,22 @@
 #endif
 
  public:
-  static mirror::ArtMethod* GetCallingMethod(mirror::ArtMethod** sp)
+  static mirror::ArtMethod* GetCallingMethod(StackReference<mirror::ArtMethod>* sp)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    DCHECK((*sp)->IsCalleeSaveMethod());
+    DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
     byte* previous_sp = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
-    return *reinterpret_cast<mirror::ArtMethod**>(previous_sp);
+    return reinterpret_cast<StackReference<mirror::ArtMethod>*>(previous_sp)->AsMirrorPtr();
   }
 
   // For the given quick ref and args quick frame, return the caller's PC.
-  static uintptr_t GetCallingPc(mirror::ArtMethod** sp)
+  static uintptr_t GetCallingPc(StackReference<mirror::ArtMethod>* sp)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    DCHECK((*sp)->IsCalleeSaveMethod());
+    DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
     byte* lr = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
     return *reinterpret_cast<uintptr_t*>(lr);
   }
 
-  QuickArgumentVisitor(mirror::ArtMethod** sp, bool is_static,
+  QuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
                        const char* shorty, uint32_t shorty_len)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
       is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
@@ -383,12 +383,12 @@
     if (kQuickSoftFloatAbi) {
       CHECK_EQ(kNumQuickFprArgs, 0U);
       return (kNumQuickGprArgs * GetBytesPerGprSpillLocation(kRuntimeISA))
-          + GetBytesPerGprSpillLocation(kRuntimeISA) /* ArtMethod* */;
+          + sizeof(StackReference<mirror::ArtMethod>) /* StackReference<ArtMethod> */;
     } else {
       // For now, there is no reg-spill area for the targets with
       // hard float ABI. So, the offset pointing to the first method's
       // parameter ('this' for non-static methods) should be returned.
-      return GetBytesPerGprSpillLocation(kRuntimeISA);  // Skip Method*.
+      return sizeof(StackReference<mirror::ArtMethod>);  // Skip StackReference<ArtMethod>.
     }
   }
 
@@ -410,8 +410,9 @@
 // Visits arguments on the stack placing them into the shadow frame.
 class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor {
  public:
-  BuildQuickShadowFrameVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty,
-                               uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) :
+  BuildQuickShadowFrameVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
+                               const char* shorty, uint32_t shorty_len, ShadowFrame* sf,
+                               size_t first_arg_reg) :
     QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
 
   void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
@@ -457,7 +458,7 @@
 }
 
 extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self,
-                                                mirror::ArtMethod** sp)
+                                                StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   // Ensure we don't get thread suspension until the object arguments are safely in the shadow
   // frame.
@@ -510,9 +511,9 @@
 // to jobjects.
 class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor {
  public:
-  BuildQuickArgumentVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty,
-                            uint32_t shorty_len, ScopedObjectAccessUnchecked* soa,
-                            std::vector<jvalue>* args) :
+  BuildQuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
+                            const char* shorty, uint32_t shorty_len,
+                            ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) :
     QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
 
   void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
@@ -577,7 +578,7 @@
 // field within the proxy object, which will box the primitive arguments and deal with error cases.
 extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method,
                                                mirror::Object* receiver,
-                                               Thread* self, mirror::ArtMethod** sp)
+                                               Thread* self, StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method);
   DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method);
@@ -585,7 +586,7 @@
   const char* old_cause =
       self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
   // Register the top of the managed stack, making stack crawlable.
-  DCHECK_EQ(*sp, proxy_method) << PrettyMethod(proxy_method);
+  DCHECK_EQ(sp->AsMirrorPtr(), proxy_method) << PrettyMethod(proxy_method);
   self->SetTopOfStack(sp, 0);
   DCHECK_EQ(proxy_method->GetFrameSizeInBytes(),
             Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes())
@@ -629,8 +630,9 @@
 // so they don't get garbage collected.
 class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor {
  public:
-  RememberForGcArgumentVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty,
-                               uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
+  RememberForGcArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
+                               const char* shorty, uint32_t shorty_len,
+                               ScopedObjectAccessUnchecked* soa) :
     QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
 
   void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
@@ -666,7 +668,8 @@
 // Lazily resolve a method for quick. Called by stub code.
 extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
                                                     mirror::Object* receiver,
-                                                    Thread* self, mirror::ArtMethod** sp)
+                                                    Thread* self,
+                                                    StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
   // Start new JNI local reference state
@@ -755,11 +758,12 @@
   self->EndAssertNoThreadSuspension(old_cause);
   bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface;
   // Resolve method filling in dex cache.
-  if (called->IsRuntimeMethod()) {
+  if (UNLIKELY(called->IsRuntimeMethod())) {
     StackHandleScope<1> hs(self);
-    Handle<mirror::Object> handle_scope_receiver(hs.NewHandle(virtual_or_interface ? receiver : nullptr));
-    called = linker->ResolveMethod(dex_method_idx, caller, invoke_type);
-    receiver = handle_scope_receiver.Get();
+    mirror::Object* dummy = nullptr;
+    HandleWrapper<mirror::Object> h_receiver(
+        hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy));
+    called = linker->ResolveMethod(self, dex_method_idx, &caller, invoke_type);
   }
   const void* code = NULL;
   if (LIKELY(!self->IsExceptionPending())) {
@@ -820,7 +824,7 @@
   // Fixup any locally saved objects may have moved during a GC.
   visitor.FixupReferences();
   // Place called method in callee-save frame to be placed as first argument to quick method.
-  *sp = called;
+  sp->Assign(called);
   return code;
 }
 
@@ -1170,14 +1174,14 @@
   }
 
   // WARNING: After this, *sp won't be pointing to the method anymore!
-  void ComputeLayout(mirror::ArtMethod*** m, bool is_static, const char* shorty, uint32_t shorty_len,
-                     void* sp, HandleScope** table, uint32_t* handle_scope_entries,
-                     uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr,
-                     void** code_return, size_t* overall_size)
+  void ComputeLayout(StackReference<mirror::ArtMethod>** m, bool is_static, const char* shorty,
+                     uint32_t shorty_len, void* sp, HandleScope** table,
+                     uint32_t* handle_scope_entries, uintptr_t** start_stack, uintptr_t** start_gpr,
+                     uint32_t** start_fpr, void** code_return, size_t* overall_size)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     ComputeAll(is_static, shorty, shorty_len);
 
-    mirror::ArtMethod* method = **m;
+    mirror::ArtMethod* method = (*m)->AsMirrorPtr();
 
     uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp);
 
@@ -1185,20 +1189,30 @@
     // We have to squeeze in the HandleScope, and relocate the method pointer.
 
     // "Free" the slot for the method.
-    sp8 += kPointerSize;
+    sp8 += kPointerSize;  // In the callee-save frame we use a full pointer.
 
-    // Add the HandleScope.
+    // Under the callee saves put handle scope and new method stack reference.
     *handle_scope_entries = num_handle_scope_references_;
-    size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSize(num_handle_scope_references_);
-    sp8 -= handle_scope_size;
-    *table = reinterpret_cast<HandleScope*>(sp8);
+
+    size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_);
+    size_t scope_and_method = handle_scope_size + sizeof(StackReference<mirror::ArtMethod>);
+
+    sp8 -= scope_and_method;
+    // Align by kStackAlignment
+    uintptr_t sp_to_align = reinterpret_cast<uintptr_t>(sp8);
+    sp_to_align = RoundDown(sp_to_align, kStackAlignment);
+    sp8 = reinterpret_cast<uint8_t*>(sp_to_align);
+
+    uint8_t* sp8_table = sp8 + sizeof(StackReference<mirror::ArtMethod>);
+    *table = reinterpret_cast<HandleScope*>(sp8_table);
     (*table)->SetNumberOfReferences(num_handle_scope_references_);
 
     // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us.
-    sp8 -= kPointerSize;
     uint8_t* method_pointer = sp8;
-    *(reinterpret_cast<mirror::ArtMethod**>(method_pointer)) = method;
-    *m = reinterpret_cast<mirror::ArtMethod**>(method_pointer);
+    StackReference<mirror::ArtMethod>* new_method_ref =
+        reinterpret_cast<StackReference<mirror::ArtMethod>*>(method_pointer);
+    new_method_ref->Assign(method);
+    *m = new_method_ref;
 
     // Reference cookie and padding
     sp8 -= 8;
@@ -1305,21 +1319,21 @@
 // of transitioning into native code.
 class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
  public:
-  BuildGenericJniFrameVisitor(mirror::ArtMethod*** sp, bool is_static, const char* shorty,
-                              uint32_t shorty_len, Thread* self) :
+  BuildGenericJniFrameVisitor(StackReference<mirror::ArtMethod>** sp, bool is_static,
+                              const char* shorty, uint32_t shorty_len, Thread* self) :
       QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), sm_(this) {
     ComputeGenericJniFrameSize fsc;
     fsc.ComputeLayout(sp, is_static, shorty, shorty_len, *sp, &handle_scope_, &handle_scope_expected_refs_,
                       &cur_stack_arg_, &cur_gpr_reg_, &cur_fpr_reg_, &code_return_,
                       &alloca_used_size_);
     handle_scope_number_of_references_ = 0;
-    cur_hs_entry_ = reinterpret_cast<StackReference<mirror::Object>*>(GetFirstHandleScopeEntry());
+    cur_hs_entry_ = GetFirstHandleScopeEntry();
 
     // jni environment is always first argument
     sm_.AdvancePointer(self->GetJniEnv());
 
     if (is_static) {
-      sm_.AdvanceHandleScope((**sp)->GetDeclaringClass());
+      sm_.AdvanceHandleScope((*sp)->AsMirrorPtr()->GetDeclaringClass());
     }
   }
 
@@ -1327,7 +1341,13 @@
 
   void FinalizeHandleScope(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  jobject GetFirstHandleScopeEntry() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  StackReference<mirror::Object>* GetFirstHandleScopeEntry()
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return handle_scope_->GetHandle(0).GetReference();
+  }
+
+  jobject GetFirstHandleScopeJObject()
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return handle_scope_->GetHandle(0).ToJObject();
   }
 
@@ -1481,9 +1501,9 @@
  * 1) How many bytes of the alloca can be released, if the value is non-negative.
  * 2) An error, if the value is negative.
  */
-extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, mirror::ArtMethod** sp)
+extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::ArtMethod* called = *sp;
+  mirror::ArtMethod* called = sp->AsMirrorPtr();
   DCHECK(called->IsNative()) << PrettyMethod(called, true);
 
   // run the visitor
@@ -1502,7 +1522,7 @@
   // Start JNI, save the cookie.
   uint32_t cookie;
   if (called->IsSynchronized()) {
-    cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeEntry(), self);
+    cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self);
     if (self->IsExceptionPending()) {
       self->PopHandleScope();
       // A negative value denotes an error.
@@ -1530,7 +1550,7 @@
       DCHECK(self->IsExceptionPending());    // There should be an exception pending now.
 
       // End JNI, as the assembly will move to deliver the exception.
-      jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeEntry() : nullptr;
+      jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr;
       if (mh.GetShorty()[0] == 'L') {
         artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock);
       } else {
@@ -1555,17 +1575,18 @@
  * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and
  * unlocking.
  */
-extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, mirror::ArtMethod** sp,
+extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self,
+                                                    StackReference<mirror::ArtMethod>* sp,
                                                     jvalue result, uint64_t result_f)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
-  mirror::ArtMethod* called = *sp;
+  mirror::ArtMethod* called = sp->AsMirrorPtr();
   uint32_t cookie = *(sp32 - 1);
 
   jobject lock = nullptr;
   if (called->IsSynchronized()) {
     HandleScope* table = reinterpret_cast<HandleScope*>(
-        reinterpret_cast<uint8_t*>(sp) + kPointerSize);
+        reinterpret_cast<uint8_t*>(sp) + sizeof(StackReference<mirror::ArtMethod>));
     lock = table->GetHandle(0).ToJObject();
   }
 
@@ -1662,12 +1683,12 @@
 template<InvokeType type, bool access_check>
 static MethodAndCode artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
                                      mirror::ArtMethod* caller_method,
-                                     Thread* self, mirror::ArtMethod** sp);
+                                     Thread* self, StackReference<mirror::ArtMethod>* sp);
 
 template<InvokeType type, bool access_check>
 static MethodAndCode artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
                                      mirror::ArtMethod* caller_method,
-                                     Thread* self, mirror::ArtMethod** sp) {
+                                     Thread* self, StackReference<mirror::ArtMethod>* sp) {
   mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check,
                                              type);
   if (UNLIKELY(method == nullptr)) {
@@ -1681,7 +1702,8 @@
       ScopedObjectAccessUnchecked soa(self->GetJniEnv());
       RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa);
       visitor.VisitArguments();
-      method = FindMethodFromCode<type, access_check>(method_idx, this_object, caller_method, self);
+      method = FindMethodFromCode<type, access_check>(method_idx, &this_object, &caller_method,
+                                                      self);
       visitor.FixupReferences();
     }
 
@@ -1706,7 +1728,8 @@
   MethodAndCode artInvokeCommon<type, access_check>(uint32_t method_idx,                        \
                                                     mirror::Object* this_object,                \
                                                     mirror::ArtMethod* caller_method,           \
-                                                    Thread* self, mirror::ArtMethod** sp)       \
+                                                    Thread* self,                               \
+                                                    StackReference<mirror::ArtMethod>* sp)      \
 
 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false);
 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true);
@@ -1723,48 +1746,43 @@
 
 // See comments in runtime_support_asm.S
 extern "C" MethodAndCode artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_idx,
-                                                                     mirror::Object* this_object,
-                                                                     mirror::ArtMethod* caller_method,
-                                                                     Thread* self,
-                                                                     mirror::ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    mirror::Object* this_object,
+    mirror::ArtMethod* caller_method,
+    Thread* self,
+    StackReference<mirror::ArtMethod>* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return artInvokeCommon<kInterface, true>(method_idx, this_object, caller_method, self, sp);
 }
 
 
 extern "C" MethodAndCode artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx,
-                                                                  mirror::Object* this_object,
-                                                                  mirror::ArtMethod* caller_method,
-                                                                  Thread* self,
-                                                                  mirror::ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    mirror::Object* this_object,
+    mirror::ArtMethod* caller_method,
+    Thread* self,
+    StackReference<mirror::ArtMethod>* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return artInvokeCommon<kDirect, true>(method_idx, this_object, caller_method, self, sp);
 }
 
 extern "C" MethodAndCode artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx,
-                                                                  mirror::Object* this_object,
-                                                                  mirror::ArtMethod* caller_method,
-                                                                  Thread* self,
-                                                                  mirror::ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    mirror::Object* this_object,
+    mirror::ArtMethod* caller_method,
+    Thread* self,
+    StackReference<mirror::ArtMethod>* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return artInvokeCommon<kStatic, true>(method_idx, this_object, caller_method, self, sp);
 }
 
 extern "C" MethodAndCode artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx,
-                                                                 mirror::Object* this_object,
-                                                                 mirror::ArtMethod* caller_method,
-                                                                 Thread* self,
-                                                                 mirror::ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    mirror::Object* this_object,
+    mirror::ArtMethod* caller_method,
+    Thread* self,
+    StackReference<mirror::ArtMethod>* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return artInvokeCommon<kSuper, true>(method_idx, this_object, caller_method, self, sp);
 }
 
 extern "C" MethodAndCode artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_idx,
-                                                                   mirror::Object* this_object,
-                                                                   mirror::ArtMethod* caller_method,
-                                                                   Thread* self,
-                                                                   mirror::ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    mirror::Object* this_object,
+    mirror::ArtMethod* caller_method,
+    Thread* self,
+    StackReference<mirror::ArtMethod>* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return artInvokeCommon<kVirtual, true>(method_idx, this_object, caller_method, self, sp);
 }
 
@@ -1772,7 +1790,8 @@
 extern "C" MethodAndCode artInvokeInterfaceTrampoline(mirror::ArtMethod* interface_method,
                                                       mirror::Object* this_object,
                                                       mirror::ArtMethod* caller_method,
-                                                      Thread* self, mirror::ArtMethod** sp)
+                                                      Thread* self,
+                                                      StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   mirror::ArtMethod* method;
   if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
@@ -1871,7 +1890,7 @@
       ScopedObjectAccessUnchecked soa(self->GetJniEnv());
       RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa);
       visitor.VisitArguments();
-      method = FindMethodFromCode<kInterface, false>(dex_method_idx, this_object, caller_method,
+      method = FindMethodFromCode<kInterface, false>(dex_method_idx, &this_object, &caller_method,
                                                      self);
       visitor.FixupReferences();
     }
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 751cdb6..99633a3 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -192,7 +192,9 @@
     fake_stack.push_back(0);
 
     // Set up thread to appear as if we called out of method_g_ at pc dex 3
-    thread->SetTopOfStack(reinterpret_cast<mirror::ArtMethod**>(&fake_stack[0]), method_g_->ToNativePc(dex_pc));  // return pc
+    thread->SetTopOfStack(
+        reinterpret_cast<StackReference<mirror::ArtMethod>*>(&fake_stack[0]),
+        method_g_->ToNativePc(dex_pc));  // return pc
   } else {
     // Create/push fake 20-byte shadow frame for method g
     fake_stack.push_back(0);
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 8d750c5..6b216c7 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -29,12 +29,22 @@
 #include "mirror/object-inl.h"
 #include "object_utils.h"
 #include "scoped_thread_state_change.h"
+#ifdef HAVE_ANDROID_OS
+#include "sigchain.h"
+#endif
 #include "verify_object-inl.h"
 
 namespace art {
 // Static fault manger object accessed by signal handler.
 FaultManager fault_manager;
 
+extern "C" {
+void art_sigsegv_fault() {
+  // Set a breakpoint here to be informed when a SIGSEGV is unhandled by ART.
+  VLOG(signals)<< "Caught unknown SIGSEGV in ART fault handler - chaining to next handler.";
+}
+}
+
 // Signal handler called on SIGSEGV.
 static void art_fault_handler(int sig, siginfo_t* info, void* context) {
   fault_manager.HandleFault(sig, info, context);
@@ -45,9 +55,13 @@
 }
 
 FaultManager::~FaultManager() {
+#ifdef HAVE_ANDROID_OS
+  UnclaimSignalChain(SIGSEGV);
+#endif
   sigaction(SIGSEGV, &oldaction_, nullptr);   // Restore old handler.
 }
 
+
 void FaultManager::Init() {
   struct sigaction action;
   action.sa_sigaction = art_fault_handler;
@@ -56,7 +70,13 @@
 #if !defined(__mips__)
   action.sa_restorer = nullptr;
 #endif
+
+  // Set our signal handler now.
   sigaction(SIGSEGV, &action, &oldaction_);
+#ifdef HAVE_ANDROID_OS
+  // Make sure our signal handler is called before any user handlers.
+  ClaimSignalChain(SIGSEGV, &oldaction_);
+#endif
 }
 
 void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
@@ -79,8 +99,13 @@
       return;
     }
   }
-  VLOG(signals)<< "Caught unknown SIGSEGV in ART fault handler";
+  art_sigsegv_fault();
+
+#ifdef HAVE_ANDROID_OS
+  InvokeUserSignalHandler(sig, info, context);
+#else
   oldaction_.sa_sigaction(sig, info, context);
+#endif
 }
 
 void FaultManager::AddHandler(FaultHandler* handler, bool generated_code) {
@@ -225,7 +250,8 @@
     manager_->GetMethodAndReturnPCAndSP(context, &method, &return_pc, &sp);
     Thread* self = Thread::Current();
     // Inside of generated code, sp[0] is the method, so sp is the frame.
-    mirror::ArtMethod** frame = reinterpret_cast<mirror::ArtMethod**>(sp);
+    StackReference<mirror::ArtMethod>* frame =
+        reinterpret_cast<StackReference<mirror::ArtMethod>*>(sp);
     self->SetTopOfStack(frame, 0);  // Since we don't necessarily have a dex pc, pass in 0.
     self->DumpJavaStack(LOG(ERROR));
   }
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 979970c..bd04473 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -35,8 +35,8 @@
 class AtomicStack {
  public:
   // Capacity is how many elements we can store in the stack.
-  static AtomicStack* Create(const std::string& name, size_t capacity) {
-    std::unique_ptr<AtomicStack> mark_stack(new AtomicStack(name, capacity));
+  static AtomicStack* Create(const std::string& name, size_t growth_limit, size_t capacity) {
+    std::unique_ptr<AtomicStack> mark_stack(new AtomicStack(name, growth_limit, capacity));
     mark_stack->Init();
     return mark_stack.release();
   }
@@ -44,7 +44,7 @@
   ~AtomicStack() {}
 
   void Reset() {
-    DCHECK(mem_map_.get() != NULL);
+    DCHECK(mem_map_.get() != nullptr);
     DCHECK(begin_ != NULL);
     front_index_.StoreRelaxed(0);
     back_index_.StoreRelaxed(0);
@@ -58,20 +58,13 @@
   // Beware: Mixing atomic pushes and atomic pops will cause ABA problem.
 
   // Returns false if we overflowed the stack.
+  bool AtomicPushBackIgnoreGrowthLimit(const T& value) {
+    return AtomicPushBackInternal(value, capacity_);
+  }
+
+  // Returns false if we overflowed the stack.
   bool AtomicPushBack(const T& value) {
-    if (kIsDebugBuild) {
-      debug_is_sorted_ = false;
-    }
-    int32_t index;
-    do {
-      index = back_index_.LoadRelaxed();
-      if (UNLIKELY(static_cast<size_t>(index) >= capacity_)) {
-        // Stack overflow.
-        return false;
-      }
-    } while (!back_index_.CompareExchangeWeakRelaxed(index, index + 1));
-    begin_[index] = value;
-    return true;
+    return AtomicPushBackInternal(value, growth_limit_);
   }
 
   // Atomically bump the back index by the given number of
@@ -85,7 +78,7 @@
     do {
       index = back_index_.LoadRelaxed();
       new_index = index + num_slots;
-      if (UNLIKELY(static_cast<size_t>(new_index) >= capacity_)) {
+      if (UNLIKELY(static_cast<size_t>(new_index) >= growth_limit_)) {
         // Stack overflow.
         return false;
       }
@@ -115,7 +108,7 @@
       debug_is_sorted_ = false;
     }
     int32_t index = back_index_.LoadRelaxed();
-    DCHECK_LT(static_cast<size_t>(index), capacity_);
+    DCHECK_LT(static_cast<size_t>(index), growth_limit_);
     back_index_.StoreRelaxed(index + 1);
     begin_[index] = value;
   }
@@ -165,6 +158,7 @@
   // Will clear the stack.
   void Resize(size_t new_capacity) {
     capacity_ = new_capacity;
+    growth_limit_ = new_capacity;
     Init();
   }
 
@@ -189,15 +183,33 @@
   }
 
  private:
-  AtomicStack(const std::string& name, const size_t capacity)
+  AtomicStack(const std::string& name, size_t growth_limit, size_t capacity)
       : name_(name),
         back_index_(0),
         front_index_(0),
-        begin_(NULL),
+        begin_(nullptr),
+        growth_limit_(growth_limit),
         capacity_(capacity),
         debug_is_sorted_(true) {
   }
 
+  // Returns false if we overflowed the stack.
+  bool AtomicPushBackInternal(const T& value, size_t limit) ALWAYS_INLINE {
+    if (kIsDebugBuild) {
+      debug_is_sorted_ = false;
+    }
+    int32_t index;
+    do {
+      index = back_index_.LoadRelaxed();
+      if (UNLIKELY(static_cast<size_t>(index) >= limit)) {
+        // Stack overflow.
+        return false;
+      }
+    } while (!back_index_.CompareExchangeWeakRelaxed(index, index + 1));
+    begin_[index] = value;
+    return true;
+  }
+
   // Size in number of elements.
   void Init() {
     std::string error_msg;
@@ -213,22 +225,18 @@
 
   // Name of the mark stack.
   std::string name_;
-
   // Memory mapping of the atomic stack.
   std::unique_ptr<MemMap> mem_map_;
-
   // Back index (index after the last element pushed).
   AtomicInteger back_index_;
-
   // Front index, used for implementing PopFront.
   AtomicInteger front_index_;
-
   // Base of the atomic stack.
   T* begin_;
-
+  // Current maximum which we can push back to, must be <= capacity_.
+  size_t growth_limit_;
   // Maximum number of elements.
   size_t capacity_;
-
   // Whether or not the stack is sorted, only updated in debug mode to avoid performance overhead.
   bool debug_is_sorted_;
 
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 43331c3..c062706 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -103,6 +103,13 @@
       gc_barrier_(new Barrier(0)),
       mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
       is_concurrent_(is_concurrent), live_stack_freeze_size_(0) {
+  std::string error_msg;
+  MemMap* mem_map = MemMap::MapAnonymous(
+      "mark sweep sweep array free buffer", nullptr,
+      RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
+      PROT_READ | PROT_WRITE, false, &error_msg);
+  CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg;
+  sweep_array_free_buffer_mem_map_.reset(mem_map);
 }
 
 void MarkSweep::InitializePhase() {
@@ -1022,7 +1029,8 @@
 void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
   timings_.StartSplit("SweepArray");
   Thread* self = Thread::Current();
-  mirror::Object* chunk_free_buffer[kSweepArrayChunkFreeSize];
+  mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>(
+      sweep_array_free_buffer_mem_map_->BaseBegin());
   size_t chunk_free_pos = 0;
   size_t freed_bytes = 0;
   size_t freed_large_object_bytes = 0;
@@ -1121,6 +1129,10 @@
   timings_.StartSplit("ResetStack");
   allocations->Reset();
   timings_.EndSplit();
+
+  int success = madvise(sweep_array_free_buffer_mem_map_->BaseBegin(),
+                        sweep_array_free_buffer_mem_map_->BaseSize(), MADV_DONTNEED);
+  DCHECK_EQ(success, 0) << "Failed to madvise the sweep array free buffer pages.";
 }
 
 void MarkSweep::Sweep(bool swap_bitmaps) {
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index d73bf3f..a0a0dd8 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -313,6 +313,8 @@
   // Verification.
   size_t live_stack_freeze_size_;
 
+  std::unique_ptr<MemMap> sweep_array_free_buffer_mem_map_;
+
  private:
   friend class AddIfReachesAllocSpaceVisitor;  // Used by mod-union table.
   friend class CardScanTask;
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index d4e26ab..e5bb1cc 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -272,12 +272,12 @@
   from_space_->Clear();
   VLOG(heap) << "Protecting from_space_: " << *from_space_;
   from_space_->GetMemMap()->Protect(kProtectFromSpace ? PROT_NONE : PROT_READ);
-  if (swap_semi_spaces_) {
-    heap_->SwapSemiSpaces();
-  }
   timings_.StartSplit("PreSweepingGcVerification");
   heap_->PreSweepingGcVerification(this);
   timings_.EndSplit();
+  if (swap_semi_spaces_) {
+    heap_->SwapSemiSpaces();
+  }
 }
 
 void SemiSpace::UpdateAndMarkModUnion() {
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 03b72b6..58ba61b 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -137,33 +137,11 @@
 
 inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object** obj) {
   if (kUseThreadLocalAllocationStack) {
-    bool success = self->PushOnThreadLocalAllocationStack(*obj);
-    if (UNLIKELY(!success)) {
-      // Slow path. Allocate a new thread-local allocation stack.
-      mirror::Object** start_address;
-      mirror::Object** end_address;
-      while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize,
-                                                &start_address, &end_address)) {
-        // TODO: Add handle VerifyObject.
-        StackHandleScope<1> hs(self);
-        HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
-        CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
-      }
-      self->SetThreadLocalAllocationStack(start_address, end_address);
-      // Retry on the new thread-local allocation stack.
-      success = self->PushOnThreadLocalAllocationStack(*obj);
-      // Must succeed.
-      CHECK(success);
+    if (UNLIKELY(!self->PushOnThreadLocalAllocationStack(*obj))) {
+      PushOnThreadLocalAllocationStackWithInternalGC(self, obj);
     }
-  } else {
-    // This is safe to do since the GC will never free objects which are neither in the allocation
-    // stack or the live bitmap.
-    while (!allocation_stack_->AtomicPushBack(*obj)) {
-      // TODO: Add handle VerifyObject.
-      StackHandleScope<1> hs(self);
-      HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
-      CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
-    }
+  } else if (UNLIKELY(!allocation_stack_->AtomicPushBack(*obj))) {
+    PushOnAllocationStackWithInternalGC(self, obj);
   }
 }
 
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index ea1ccdd..a962f06 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -84,9 +84,14 @@
 static constexpr double kStickyGcThroughputAdjustment = 1.0;
 // Whether or not we use the free list large object space.
 static constexpr bool kUseFreeListSpaceForLOS = false;
-// Whtehr or not we compact the zygote in PreZygoteFork.
+// Whether or not we compact the zygote in PreZygoteFork.
 static constexpr bool kCompactZygote = kMovingCollector;
 static constexpr size_t kNonMovingSpaceCapacity = 64 * MB;
+// How many reserve entries are at the end of the allocation stack, these are only needed if the
+// allocation stack overflows.
+static constexpr size_t kAllocationStackReserveSize = 1024;
+// Default mark stack size in bytes.
+static const size_t kDefaultMarkStackSize = 64 * KB;
 
 Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
            double target_utilization, double foreground_heap_growth_multiplier, size_t capacity,
@@ -229,10 +234,10 @@
   // create the bump pointer space if we are not a moving foreground collector but have a moving
   // background collector since the heap transition code will create the temp space by recycling
   // the bitmap from the main space.
-  if (kMovingCollector) {
+  if (kMovingCollector &&
+      (IsMovingGc(foreground_collector_type_) || IsMovingGc(background_collector_type_))) {
     // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
-    // TODO: Not create all the bump pointer spaces if not necessary (currently only GSS needs all
-    // 2 of bump pointer spaces + main space) b/14059466. Divide by 2 for a temporary fix.
+    // Divide by 2 for a temporary fix for reducing virtual memory usage.
     const size_t bump_pointer_space_capacity = capacity / 2;
     bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space",
                                                           bump_pointer_space_capacity, nullptr);
@@ -295,13 +300,13 @@
   // TODO: Count objects in the image space here.
   num_bytes_allocated_.StoreRelaxed(0);
 
-  // Default mark stack size in bytes.
-  static const size_t default_mark_stack_size = 64 * KB;
-  mark_stack_.reset(accounting::ObjectStack::Create("mark stack", default_mark_stack_size));
-  allocation_stack_.reset(accounting::ObjectStack::Create("allocation stack",
-                                                          max_allocation_stack_size_));
-  live_stack_.reset(accounting::ObjectStack::Create("live stack",
-                                                    max_allocation_stack_size_));
+  mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
+                                                    kDefaultMarkStackSize));
+  const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
+  allocation_stack_.reset(accounting::ObjectStack::Create(
+      "allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
+  live_stack_.reset(accounting::ObjectStack::Create(
+      "live stack", max_allocation_stack_size_, alloc_stack_capacity));
 
   // It's still too early to take a lock because there are no threads yet, but we can create locks
   // now. We don't create it earlier to make it clear that you can't use locks during heap
@@ -893,10 +898,16 @@
   uint64_t gc_heap_end_ns = NanoTime();
   // We never move things in the native heap, so we can finish the GC at this point.
   FinishGC(self, collector::kGcTypeNone);
+  size_t native_reclaimed = 0;
+#if defined(USE_DLMALLOC)
   // Trim the native heap.
   dlmalloc_trim(0);
-  size_t native_reclaimed = 0;
   dlmalloc_inspect_all(DlmallocMadviseCallback, &native_reclaimed);
+#elif defined(USE_JEMALLOC)
+  // Jemalloc does it's own internal trimming.
+#else
+  UNIMPLEMENTED(WARNING) << "Add trimming support";
+#endif
   uint64_t end_ns = NanoTime();
   VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
       << ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration="
@@ -1150,7 +1161,7 @@
   }
   ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
   if (ptr == nullptr) {
-    ThrowOutOfMemoryError(self, alloc_size, false);
+    ThrowOutOfMemoryError(self, alloc_size, allocator == kAllocatorTypeLOS);
   }
   return ptr;
 }
@@ -2029,6 +2040,43 @@
   const bool verify_referent_;
 };
 
+void Heap::PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
+  // Slow path, the allocation stack push back must have already failed.
+  DCHECK(!allocation_stack_->AtomicPushBack(*obj));
+  do {
+    // TODO: Add handle VerifyObject.
+    StackHandleScope<1> hs(self);
+    HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
+    // Push our object into the reserve region of the allocaiton stack. This is only required due
+    // to heap verification requiring that roots are live (either in the live bitmap or in the
+    // allocation stack).
+    CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
+    CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
+  } while (!allocation_stack_->AtomicPushBack(*obj));
+}
+
+void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
+  // Slow path, the allocation stack push back must have already failed.
+  DCHECK(!self->PushOnThreadLocalAllocationStack(*obj));
+  mirror::Object** start_address;
+  mirror::Object** end_address;
+  while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
+                                            &end_address)) {
+    // TODO: Add handle VerifyObject.
+    StackHandleScope<1> hs(self);
+    HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
+    // Push our object into the reserve region of the allocaiton stack. This is only required due
+    // to heap verification requiring that roots are live (either in the live bitmap or in the
+    // allocation stack).
+    CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
+    // Push into the reserve allocation stack.
+    CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
+  }
+  self->SetThreadLocalAllocationStack(start_address, end_address);
+  // Retry on the new thread-local allocation stack.
+  CHECK(self->PushOnThreadLocalAllocationStack(*obj));  // Must succeed.
+}
+
 // Must do this with mutators suspended since we are directly accessing the allocation stacks.
 size_t Heap::VerifyHeapReferences(bool verify_referents) {
   Thread* self = Thread::Current();
@@ -2318,7 +2366,6 @@
     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
     // Swapping bound bitmaps does nothing.
     gc->SwapBitmaps();
-    SwapSemiSpaces();
     // Pass in false since concurrent reference processing can mean that the reference referents
     // may point to dead objects at the point which PreSweepingGcVerification is called.
     size_t failures = VerifyHeapReferences(false);
@@ -2326,7 +2373,6 @@
       LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed with " << failures
           << " failures";
     }
-    SwapSemiSpaces();
     gc->SwapBitmaps();
   }
   if (verify_pre_sweeping_rosalloc_) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 887b17e..e568b36 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -408,7 +408,7 @@
 
   // Implements java.lang.Runtime.freeMemory.
   size_t GetFreeMemory() const {
-    return GetTotalMemory() - num_bytes_allocated_.LoadSequentiallyConsistent();
+    return GetMaxMemory() - num_bytes_allocated_.LoadSequentiallyConsistent();
   }
 
   // get the space that corresponds to an object's address. Current implementation searches all
@@ -698,6 +698,10 @@
   // Push an object onto the allocation stack.
   void PushOnAllocationStack(Thread* self, mirror::Object** obj)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, mirror::Object** obj)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
   // sweep GC, false for other GC types.
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 45fee14..3d35c00 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -53,8 +53,7 @@
 
   std::vector<std::string> arg_vector;
 
-  std::string dex2oat(GetAndroidRoot());
-  dex2oat += (kIsDebugBuild ? "/bin/dex2oatd" : "/bin/dex2oat");
+  std::string dex2oat(Runtime::Current()->GetCompilerExecutable());
   arg_vector.push_back(dex2oat);
 
   std::string image_option_string("--image=");
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index e63cc39..54a63f0 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -141,8 +141,10 @@
 size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
   MutexLock mu(self, lock_);
   MemMaps::iterator found = mem_maps_.find(ptr);
-  CHECK(found != mem_maps_.end()) << "Attempted to free large object" << ptr
-      << "which was not live";
+  if (UNLIKELY(found == mem_maps_.end())) {
+    Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+    LOG(FATAL) << "Attempted to free large object " << ptr << " which was not live";
+  }
   DCHECK_GE(num_bytes_allocated_, found->second->Size());
   size_t allocation_size = found->second->Size();
   num_bytes_allocated_ -= allocation_size;
diff --git a/runtime/gc/space/large_object_space_test.cc b/runtime/gc/space/large_object_space_test.cc
index 23c67ff..f733584 100644
--- a/runtime/gc/space/large_object_space_test.cc
+++ b/runtime/gc/space/large_object_space_test.cc
@@ -24,6 +24,10 @@
 class LargeObjectSpaceTest : public SpaceTest {
  public:
   void LargeObjectTest();
+
+  static constexpr size_t kNumThreads = 10;
+  static constexpr size_t kNumIterations = 1000;
+  void RaceTest();
 };
 
 
@@ -89,11 +93,64 @@
   }
 }
 
+class AllocRaceTask : public Task {
+ public:
+  AllocRaceTask(size_t id, size_t iterations, size_t size, LargeObjectSpace* los) :
+    id_(id), iterations_(iterations), size_(size), los_(los) {}
+
+  void Run(Thread* self) {
+    for (size_t i = 0; i < iterations_ ; ++i) {
+      size_t alloc_size;
+      mirror::Object* ptr = los_->Alloc(self, size_, &alloc_size, nullptr);
+
+      NanoSleep((id_ + 3) * 1000);  // (3+id) mu s
+
+      los_->Free(self, ptr);
+    }
+  }
+
+  virtual void Finalize() {
+    delete this;
+  }
+
+ private:
+  size_t id_;
+  size_t iterations_;
+  size_t size_;
+  LargeObjectSpace* los_;
+};
+
+void LargeObjectSpaceTest::RaceTest() {
+  for (size_t los_type = 0; los_type < 2; ++los_type) {
+    LargeObjectSpace* los = nullptr;
+    if (los_type == 0) {
+      los = space::LargeObjectMapSpace::Create("large object space");
+    } else {
+      los = space::FreeListSpace::Create("large object space", nullptr, 128 * MB);
+    }
+
+    Thread* self = Thread::Current();
+    ThreadPool thread_pool("Large object space test thread pool", kNumThreads);
+    for (size_t i = 0; i < kNumThreads; ++i) {
+      thread_pool.AddTask(self, new AllocRaceTask(i, kNumIterations, 16 * KB, los));
+    }
+
+    thread_pool.StartWorkers(self);
+
+    thread_pool.Wait(self, true, false);
+
+    delete los;
+  }
+}
 
 TEST_F(LargeObjectSpaceTest, LargeObjectTest) {
   LargeObjectTest();
 }
 
+TEST_F(LargeObjectSpaceTest, RaceTest) {
+  RaceTest();
+}
+
 }  // namespace space
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index e710409..57ed0bd 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -50,12 +50,12 @@
     CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->End())));
     live_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
         StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
-        Begin(), Capacity()));
+        Begin(), NonGrowthLimitCapacity()));
     DCHECK(live_bitmap_.get() != nullptr) << "could not create allocspace live bitmap #"
         << bitmap_index;
     mark_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
         StringPrintf("allocspace %s mark-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
-        Begin(), Capacity()));
+        Begin(), NonGrowthLimitCapacity()));
     DCHECK(live_bitmap_.get() != nullptr) << "could not create allocspace mark bitmap #"
         << bitmap_index;
   }
@@ -218,10 +218,12 @@
 
 void MallocSpace::Dump(std::ostream& os) const {
   os << GetType()
-      << " begin=" << reinterpret_cast<void*>(Begin())
-      << ",end=" << reinterpret_cast<void*>(End())
-      << ",size=" << PrettySize(Size()) << ",capacity=" << PrettySize(Capacity())
-      << ",name=\"" << GetName() << "\"]";
+     << " begin=" << reinterpret_cast<void*>(Begin())
+     << ",end=" << reinterpret_cast<void*>(End())
+     << ",limit=" << reinterpret_cast<void*>(Limit())
+     << ",size=" << PrettySize(Size()) << ",capacity=" << PrettySize(Capacity())
+     << ",non_growth_limit_capacity=" << PrettySize(NonGrowthLimitCapacity())
+     << ",name=\"" << GetName() << "\"]";
 }
 
 void MallocSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
diff --git a/runtime/handle.h b/runtime/handle.h
index 3127864..b70f651 100644
--- a/runtime/handle.h
+++ b/runtime/handle.h
@@ -53,29 +53,48 @@
     reference_->Assign(reference);
     return old;
   }
-  jobject ToJObject() const ALWAYS_INLINE {
+  jobject ToJObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+    if (UNLIKELY(reference_->AsMirrorPtr() == nullptr)) {
+      // Special case so that we work with NullHandles.
+      return nullptr;
+    }
     return reinterpret_cast<jobject>(reference_);
   }
 
- private:
+ protected:
   StackReference<T>* reference_;
 
   template<typename S>
   explicit Handle(StackReference<S>* reference)
       : reference_(reinterpret_cast<StackReference<T>*>(reference)) {
   }
-
   template<typename S>
   explicit Handle(const Handle<S>& handle)
       : reference_(reinterpret_cast<StackReference<T>*>(handle.reference_)) {
   }
 
+  StackReference<T>* GetReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+    return reference_;
+  }
+
+ private:
+  friend class BuildGenericJniFrameVisitor;
   template<class S> friend class Handle;
   friend class HandleScope;
   template<class S> friend class HandleWrapper;
   template<size_t kNumReferences> friend class StackHandleScope;
 };
 
+template<class T>
+class NullHandle : public Handle<T> {
+ public:
+  NullHandle() : Handle<T>(&null_ref_) {
+  }
+
+ private:
+  StackReference<T> null_ref_;
+};
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_HANDLE_H_
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index f2e059d..8ff7086 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -51,20 +51,12 @@
     return header_size + data_size;
   }
 
-  // Get the size of the handle scope for the number of entries, with padding added for potential alignment.
-  static size_t GetAlignedHandleScopeSize(uint32_t num_references) {
-    size_t handle_scope_size = SizeOf(num_references);
-    return RoundUp(handle_scope_size, 8);
-  }
-
-  // Get the size of the handle scope for the number of entries, with padding added for potential alignment.
-  static size_t GetAlignedHandleScopeSizeTarget(size_t pointer_size, uint32_t num_references) {
+  // Returns the size of a HandleScope containing num_references handles.
+  static size_t SizeOf(size_t pointer_size, uint32_t num_references) {
     // Assume that the layout is packed.
     size_t header_size = pointer_size + sizeof(number_of_references_);
-    // This assumes there is no layout change between 32 and 64b.
     size_t data_size = sizeof(StackReference<mirror::Object>) * num_references;
-    size_t handle_scope_size = header_size + data_size;
-    return RoundUp(handle_scope_size, 8);
+    return header_size + data_size;
   }
 
   // Link to previous HandleScope or null.
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index 42a9757..790f4d0 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -71,12 +71,16 @@
   return true;
 }
 
+template<ReadBarrierOption kReadBarrierOption>
 inline mirror::Object* IndirectReferenceTable::Get(IndirectRef iref) const {
   if (!GetChecked(iref)) {
     return kInvalidIndirectRefObject;
   }
-  mirror::Object* obj = table_[ExtractIndex(iref)];
+  mirror::Object** root = &table_[ExtractIndex(iref)];
+  mirror::Object* obj = *root;
   if (LIKELY(obj != kClearedJniWeakGlobal)) {
+    // The read barrier or VerifyObject won't handle kClearedJniWeakGlobal.
+    obj = ReadBarrier::BarrierForWeakRoot<mirror::Object, kReadBarrierOption>(root);
     VerifyObject(obj);
   }
   return obj;
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 432481b..756ac96 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -266,11 +266,22 @@
 
 void IndirectReferenceTable::Dump(std::ostream& os) const {
   os << kind_ << " table dump:\n";
-  ReferenceTable::Table entries(table_, table_ + Capacity());
-  // Remove NULLs.
-  for (int i = entries.size() - 1; i >= 0; --i) {
-    if (entries[i] == NULL) {
-      entries.erase(entries.begin() + i);
+  ReferenceTable::Table entries;
+  for (size_t i = 0; i < Capacity(); ++i) {
+    mirror::Object** root = &table_[i];
+    mirror::Object* obj = *root;
+    if (UNLIKELY(obj == nullptr)) {
+      // Remove NULLs.
+    } else if (UNLIKELY(obj == kClearedJniWeakGlobal)) {
+      // ReferenceTable::Dump() will handle kClearedJniWeakGlobal
+      // while the read barrier won't.
+      entries.push_back(obj);
+    } else {
+      // We need a read barrier if weak globals. Since this is for
+      // debugging where performance isn't top priority, we
+      // unconditionally enable the read barrier, which is conservative.
+      obj = ReadBarrier::BarrierForWeakRoot<mirror::Object, kWithReadBarrier>(root);
+      entries.push_back(obj);
     }
   }
   ReferenceTable::Dump(os, entries);
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 5015410..5b3ed68 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -263,14 +263,16 @@
    *
    * Returns kInvalidIndirectRefObject if iref is invalid.
    */
+  template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   mirror::Object* Get(IndirectRef iref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       ALWAYS_INLINE;
 
   // Synchronized get which reads a reference, acquiring a lock if necessary.
+  template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   mirror::Object* SynchronizedGet(Thread* /*self*/, ReaderWriterMutex* /*mutex*/,
                                   IndirectRef iref) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return Get(iref);
+    return Get<kReadBarrierOption>(iref);
   }
 
   /*
@@ -366,7 +368,9 @@
   std::unique_ptr<MemMap> table_mem_map_;
   // Mem map where we store the extended debugging info.
   std::unique_ptr<MemMap> slot_mem_map_;
-  /* bottom of the stack */
+  // bottom of the stack. If a JNI weak global table, do not directly
+  // access the object references in this as they are weak roots. Use
+  // Get() that has a read barrier.
   mirror::Object** table_;
   /* bit mask, ORed into all irefs */
   IndirectRefKind kind_;
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 817d104..f12043e 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -82,10 +82,25 @@
   // Note: we deliberately don't visit the weak_interns_ table and the immutable image roots.
 }
 
-mirror::String* InternTable::Lookup(Table& table, mirror::String* s, int32_t hash_code) {
+mirror::String* InternTable::LookupStrong(mirror::String* s, int32_t hash_code) {
+  return Lookup<kWithoutReadBarrier>(&strong_interns_, s, hash_code);
+}
+
+mirror::String* InternTable::LookupWeak(mirror::String* s, int32_t hash_code) {
+  // Weak interns need a read barrier because they are weak roots.
+  return Lookup<kWithReadBarrier>(&weak_interns_, s, hash_code);
+}
+
+template<ReadBarrierOption kReadBarrierOption>
+mirror::String* InternTable::Lookup(Table* table, mirror::String* s, int32_t hash_code) {
+  CHECK_EQ(table == &weak_interns_, kReadBarrierOption == kWithReadBarrier)
+      << "Only weak_interns_ needs a read barrier.";
   Locks::intern_table_lock_->AssertHeld(Thread::Current());
-  for (auto it = table.find(hash_code), end = table.end(); it != end; ++it) {
-    mirror::String* existing_string = it->second;
+  for (auto it = table->lower_bound(hash_code), end = table->end();
+       it != end && it->first == hash_code; ++it) {
+    mirror::String** weak_root = &it->second;
+    mirror::String* existing_string =
+        ReadBarrier::BarrierForWeakRoot<mirror::String, kReadBarrierOption>(weak_root);
     if (existing_string->Equals(s)) {
       return existing_string;
     }
@@ -114,18 +129,29 @@
   return s;
 }
 
+void InternTable::RemoveStrong(mirror::String* s, int32_t hash_code) {
+  Remove<kWithoutReadBarrier>(&strong_interns_, s, hash_code);
+}
+
 void InternTable::RemoveWeak(mirror::String* s, int32_t hash_code) {
   Runtime* runtime = Runtime::Current();
   if (runtime->IsActiveTransaction()) {
     runtime->RecordWeakStringRemoval(s, hash_code);
   }
-  Remove(weak_interns_, s, hash_code);
+  Remove<kWithReadBarrier>(&weak_interns_, s, hash_code);
 }
 
-void InternTable::Remove(Table& table, mirror::String* s, int32_t hash_code) {
-  for (auto it = table.find(hash_code), end = table.end(); it != end; ++it) {
-    if (it->second == s) {
-      table.erase(it);
+template<ReadBarrierOption kReadBarrierOption>
+void InternTable::Remove(Table* table, mirror::String* s, int32_t hash_code) {
+  CHECK_EQ(table == &weak_interns_, kReadBarrierOption == kWithReadBarrier)
+      << "Only weak_interns_ needs a read barrier.";
+  for (auto it = table->lower_bound(hash_code), end = table->end();
+       it != end && it->first == hash_code; ++it) {
+    mirror::String** weak_root = &it->second;
+    mirror::String* existing_string =
+        ReadBarrier::BarrierForWeakRoot<mirror::String, kReadBarrierOption>(weak_root);
+    if (existing_string == s) {
+      table->erase(it);
       return;
     }
   }
@@ -142,11 +168,11 @@
 }
 void InternTable::RemoveStrongFromTransaction(mirror::String* s, int32_t hash_code) {
   DCHECK(!Runtime::Current()->IsActiveTransaction());
-  Remove(strong_interns_, s, hash_code);
+  RemoveStrong(s, hash_code);
 }
 void InternTable::RemoveWeakFromTransaction(mirror::String* s, int32_t hash_code) {
   DCHECK(!Runtime::Current()->IsActiveTransaction());
-  Remove(weak_interns_, s, hash_code);
+  RemoveWeak(s, hash_code);
 }
 
 static mirror::String* LookupStringFromImage(mirror::String* s)
@@ -200,7 +226,7 @@
 
   if (is_strong) {
     // Check the strong table for a match.
-    mirror::String* strong = Lookup(strong_interns_, s, hash_code);
+    mirror::String* strong = LookupStrong(s, hash_code);
     if (strong != NULL) {
       return strong;
     }
@@ -212,7 +238,7 @@
     }
 
     // There is no match in the strong table, check the weak table.
-    mirror::String* weak = Lookup(weak_interns_, s, hash_code);
+    mirror::String* weak = LookupWeak(s, hash_code);
     if (weak != NULL) {
       // A match was found in the weak table. Promote to the strong table.
       RemoveWeak(weak, hash_code);
@@ -225,7 +251,7 @@
   }
 
   // Check the strong table for a match.
-  mirror::String* strong = Lookup(strong_interns_, s, hash_code);
+  mirror::String* strong = LookupStrong(s, hash_code);
   if (strong != NULL) {
     return strong;
   }
@@ -235,7 +261,7 @@
     return InsertWeak(image, hash_code);
   }
   // Check the weak table for a match.
-  mirror::String* weak = Lookup(weak_interns_, s, hash_code);
+  mirror::String* weak = LookupWeak(s, hash_code);
   if (weak != NULL) {
     return weak;
   }
@@ -270,13 +296,14 @@
 
 bool InternTable::ContainsWeak(mirror::String* s) {
   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
-  const mirror::String* found = Lookup(weak_interns_, s, s->GetHashCode());
+  const mirror::String* found = LookupWeak(s, s->GetHashCode());
   return found == s;
 }
 
 void InternTable::SweepInternTableWeaks(IsMarkedCallback* callback, void* arg) {
   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
   for (auto it = weak_interns_.begin(), end = weak_interns_.end(); it != end;) {
+    // This does not need a read barrier because this is called by GC.
     mirror::Object* object = it->second;
     mirror::Object* new_object = callback(object, arg);
     if (new_object == nullptr) {
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 47d5e09..3df2aeb 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -79,15 +79,26 @@
       LOCKS_EXCLUDED(Locks::intern_table_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  mirror::String* Lookup(Table& table, mirror::String* s, int32_t hash_code)
+  mirror::String* LookupStrong(mirror::String* s, int32_t hash_code)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::String* LookupWeak(mirror::String* s, int32_t hash_code)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  mirror::String* Lookup(Table* table, mirror::String* s, int32_t hash_code)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   mirror::String* InsertStrong(mirror::String* s, int32_t hash_code)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
   mirror::String* InsertWeak(mirror::String* s, int32_t hash_code)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
-  void RemoveWeak(mirror::String* s, int32_t hash_code)
+  void RemoveStrong(mirror::String* s, int32_t hash_code)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
-  void Remove(Table& table, mirror::String* s, int32_t hash_code)
+  void RemoveWeak(mirror::String* s, int32_t hash_code)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+  template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  void Remove(Table* table, mirror::String* s, int32_t hash_code)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
 
   // Transaction rollback access.
@@ -96,8 +107,10 @@
   mirror::String* InsertWeakFromTransaction(mirror::String* s, int32_t hash_code)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
   void RemoveStrongFromTransaction(mirror::String* s, int32_t hash_code)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
   void RemoveWeakFromTransaction(mirror::String* s, int32_t hash_code)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
   friend class Transaction;
 
@@ -107,6 +120,9 @@
   Table strong_interns_ GUARDED_BY(Locks::intern_table_lock_);
   std::vector<std::pair<int32_t, mirror::String*>> new_strong_intern_roots_
       GUARDED_BY(Locks::intern_table_lock_);
+  // Since weak_interns_ contain weak roots, they need a read
+  // barrier. Do not directly access the strings in it. Use functions
+  // that contain read barriers.
   Table weak_interns_ GUARDED_BY(Locks::intern_table_lock_);
 };
 
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 478c74c..9cfba8d 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -33,7 +33,7 @@
     DCHECK_GE(length, 0);
     mirror::Class* element_class = reinterpret_cast<Object*>(args[0])->AsClass();
     Runtime* runtime = Runtime::Current();
-    mirror::Class* array_class = runtime->GetClassLinker()->FindArrayClass(self, element_class);
+    mirror::Class* array_class = runtime->GetClassLinker()->FindArrayClass(self, &element_class);
     DCHECK(array_class != nullptr);
     gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentAllocator();
     result->SetL(mirror::Array::Alloc<true>(self, array_class, length,
@@ -524,16 +524,17 @@
   ArtMethod* method = shadow_frame->GetMethod();
   // Ensure static methods are initialized.
   if (method->IsStatic()) {
-    StackHandleScope<1> hs(self);
-    Handle<Class> declaringClass(hs.NewHandle(method->GetDeclaringClass()));
-    if (UNLIKELY(!declaringClass->IsInitializing())) {
-      if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(declaringClass, true,
-                                                                            true))) {
-        DCHECK(Thread::Current()->IsExceptionPending());
+    mirror::Class* declaring_class = method->GetDeclaringClass();
+    if (UNLIKELY(!declaring_class->IsInitializing())) {
+      StackHandleScope<1> hs(self);
+      HandleWrapper<Class> h_declaring_class(hs.NewHandleWrapper(&declaring_class));
+      if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(
+          h_declaring_class, true, true))) {
+        DCHECK(self->IsExceptionPending());
         self->PopShadowFrame();
         return;
       }
-      CHECK(declaringClass->IsInitializing());
+      CHECK(h_declaring_class->IsInitializing());
     }
   }
 
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 418aff5..63ae6fd 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -296,11 +296,9 @@
     // other variants that take more arguments should also be added.
     std::string descriptor(DotToDescriptor(shadow_frame->GetVRegReference(arg_offset)->AsString()->ToModifiedUtf8().c_str()));
 
-    StackHandleScope<1> hs(self);
     // shadow_frame.GetMethod()->GetDeclaringClass()->GetClassLoader();
-    auto class_loader = hs.NewHandle<ClassLoader>(nullptr);
-    Class* found = Runtime::Current()->GetClassLinker()->FindClass(self, descriptor.c_str(),
-                                                                   class_loader);
+    Class* found = Runtime::Current()->GetClassLinker()->FindClass(
+        self, descriptor.c_str(), NullHandle<mirror::ClassLoader>());
     CHECK(found != NULL) << "Class.forName failed in un-started runtime for class: "
         << PrettyDescriptor(descriptor);
     result->SetL(found);
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index b42af11..029af8d 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -29,6 +29,7 @@
 #include "dex_instruction.h"
 #include "entrypoints/entrypoint_utils.h"
 #include "gc/accounting/card_table-inl.h"
+#include "handle_scope-inl.h"
 #include "nth_caller_visitor.h"
 #include "mirror/art_field-inl.h"
 #include "mirror/art_method.h"
@@ -83,6 +84,17 @@
                               const DexFile::CodeItem* code_item,
                               ShadowFrame& shadow_frame, JValue result_register);
 
+// Workaround for b/14882674 where clang allocates stack for each ThrowLocation created by calls to
+// ShadowFrame::GetCurrentLocationForThrow(). Moving the call here prevents from doing such
+// allocation in the interpreter itself.
+static inline void ThrowNullPointerExceptionFromInterpreter(const ShadowFrame& shadow_frame)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) SOMETIMES_INLINE;
+
+static inline void ThrowNullPointerExceptionFromInterpreter(
+    const ShadowFrame& shadow_frame) {
+  ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+}
+
 static inline void DoMonitorEnter(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
   ref->MonitorEnter(self);
 }
@@ -112,9 +124,10 @@
   const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
   const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
   Object* receiver = (type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC);
-  ArtMethod* const method = FindMethodFromCode<type, do_access_check>(method_idx, receiver,
-                                                                      shadow_frame.GetMethod(),
-                                                                      self);
+  mirror::ArtMethod* sf_method = shadow_frame.GetMethod();
+  ArtMethod* const method = FindMethodFromCode<type, do_access_check>(
+      method_idx, &receiver, &sf_method, self);
+  // The shadow frame should already be pushed, so we don't need to update it.
   if (UNLIKELY(method == nullptr)) {
     CHECK(self->IsExceptionPending());
     result->SetJ(0);
@@ -348,6 +361,10 @@
     case Primitive::kPrimNot: {
       Object* reg = shadow_frame.GetVRegReference(vregA);
       if (do_assignability_check && reg != nullptr) {
+        // FieldHelper::GetType can resolve classes, use a handle wrapper which will restore the
+        // object in the destructor.
+        StackHandleScope<1> hs(self);
+        HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(&obj));
         Class* field_class = FieldHelper(f).GetType();
         if (!reg->VerifierInstanceOf(field_class)) {
           // This should never happen.
@@ -372,7 +389,8 @@
 // Handles iput-quick, iput-wide-quick and iput-object-quick instructions.
 // Returns true on success, otherwise throws an exception and returns false.
 template<Primitive::Type field_type, bool transaction_active>
-static SOMETIMES_INLINE_KEYWORD bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data) {
+static SOMETIMES_INLINE_KEYWORD bool DoIPutQuick(const ShadowFrame& shadow_frame,
+                                                 const Instruction* inst, uint16_t inst_data) {
   Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
   if (UNLIKELY(obj == nullptr)) {
     // We lost the reference to the field index so we cannot get a more
@@ -580,16 +598,10 @@
   ThrowLocation throw_location;
   mirror::Throwable* exception = self->GetException(&throw_location);
   bool clear_exception = false;
-  bool new_exception = false;
   StackHandleScope<3> hs(self);
   Handle<mirror::Class> exception_class(hs.NewHandle(exception->GetClass()));
   uint32_t found_dex_pc = shadow_frame.GetMethod()->FindCatchBlock(exception_class, dex_pc,
-                                                                   &clear_exception,
-                                                                   &new_exception);
-  if (UNLIKELY(new_exception)) {
-    // Update the exception.
-    exception = self->GetException(&throw_location);
-  }
+                                                                   &clear_exception);
   if (found_dex_pc == DexFile::kDexNoIndex) {
     instrumentation->MethodUnwindEvent(self, this_object,
                                        shadow_frame.GetMethod(), dex_pc);
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index e0f9e5f..99153c8 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -234,9 +234,9 @@
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(MOVE_EXCEPTION) {
-    Throwable* exception = self->GetException(NULL);
-    self->ClearException();
+    Throwable* exception = self->GetException(nullptr);
     shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), exception);
+    self->ClearException();
     ADVANCE(1);
   }
   HANDLE_INSTRUCTION_END();
@@ -462,7 +462,7 @@
   HANDLE_INSTRUCTION_START(MONITOR_ENTER) {
     Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
     if (UNLIKELY(obj == NULL)) {
-      ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+      ThrowNullPointerExceptionFromInterpreter(shadow_frame);
       HANDLE_PENDING_EXCEPTION();
     } else {
       DoMonitorEnter(self, obj);
@@ -474,7 +474,7 @@
   HANDLE_INSTRUCTION_START(MONITOR_EXIT) {
     Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
     if (UNLIKELY(obj == NULL)) {
-      ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+      ThrowNullPointerExceptionFromInterpreter(shadow_frame);
       HANDLE_PENDING_EXCEPTION();
     } else {
       DoMonitorExit(self, obj);
@@ -516,7 +516,7 @@
   HANDLE_INSTRUCTION_START(ARRAY_LENGTH) {
     Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
     if (UNLIKELY(array == NULL)) {
-      ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+      ThrowNullPointerExceptionFromInterpreter(shadow_frame);
       HANDLE_PENDING_EXCEPTION();
     } else {
       shadow_frame.SetVReg(inst->VRegA_12x(inst_data), array->AsArray()->GetLength());
@@ -957,7 +957,7 @@
   HANDLE_INSTRUCTION_START(AGET_BOOLEAN) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
     if (UNLIKELY(a == NULL)) {
-      ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+      ThrowNullPointerExceptionFromInterpreter(shadow_frame);
       HANDLE_PENDING_EXCEPTION();
     } else {
       int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -975,7 +975,7 @@
   HANDLE_INSTRUCTION_START(AGET_BYTE) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
     if (UNLIKELY(a == NULL)) {
-      ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+      ThrowNullPointerExceptionFromInterpreter(shadow_frame);
       HANDLE_PENDING_EXCEPTION();
     } else {
       int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -993,7 +993,7 @@
   HANDLE_INSTRUCTION_START(AGET_CHAR) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
     if (UNLIKELY(a == NULL)) {
-      ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+      ThrowNullPointerExceptionFromInterpreter(shadow_frame);
       HANDLE_PENDING_EXCEPTION();
     } else {
       int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -1011,7 +1011,7 @@
   HANDLE_INSTRUCTION_START(AGET_SHORT) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
     if (UNLIKELY(a == NULL)) {
-      ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+      ThrowNullPointerExceptionFromInterpreter(shadow_frame);
       HANDLE_PENDING_EXCEPTION();
     } else {
       int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -1029,7 +1029,7 @@
   HANDLE_INSTRUCTION_START(AGET) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
     if (UNLIKELY(a == NULL)) {
-      ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+      ThrowNullPointerExceptionFromInterpreter(shadow_frame);
       HANDLE_PENDING_EXCEPTION();
     } else {
       int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -1047,7 +1047,7 @@
   HANDLE_INSTRUCTION_START(AGET_WIDE)  {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
     if (UNLIKELY(a == NULL)) {
-      ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+      ThrowNullPointerExceptionFromInterpreter(shadow_frame);
       HANDLE_PENDING_EXCEPTION();
     } else {
       int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -1065,7 +1065,7 @@
   HANDLE_INSTRUCTION_START(AGET_OBJECT) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
     if (UNLIKELY(a == NULL)) {
-      ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+      ThrowNullPointerExceptionFromInterpreter(shadow_frame);
       HANDLE_PENDING_EXCEPTION();
     } else {
       int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -1083,7 +1083,7 @@
   HANDLE_INSTRUCTION_START(APUT_BOOLEAN) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
     if (UNLIKELY(a == NULL)) {
-      ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+      ThrowNullPointerExceptionFromInterpreter(shadow_frame);
       HANDLE_PENDING_EXCEPTION();
     } else {
       uint8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
@@ -1102,7 +1102,7 @@
   HANDLE_INSTRUCTION_START(APUT_BYTE) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
     if (UNLIKELY(a == NULL)) {
-      ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+      ThrowNullPointerExceptionFromInterpreter(shadow_frame);
       HANDLE_PENDING_EXCEPTION();
     } else {
       int8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
@@ -1121,7 +1121,7 @@
   HANDLE_INSTRUCTION_START(APUT_CHAR) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
     if (UNLIKELY(a == NULL)) {
-      ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+      ThrowNullPointerExceptionFromInterpreter(shadow_frame);
       HANDLE_PENDING_EXCEPTION();
     } else {
       uint16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
@@ -1140,7 +1140,7 @@
   HANDLE_INSTRUCTION_START(APUT_SHORT) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
     if (UNLIKELY(a == NULL)) {
-      ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+      ThrowNullPointerExceptionFromInterpreter(shadow_frame);
       HANDLE_PENDING_EXCEPTION();
     } else {
       int16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
@@ -1159,7 +1159,7 @@
   HANDLE_INSTRUCTION_START(APUT) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
     if (UNLIKELY(a == NULL)) {
-      ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+      ThrowNullPointerExceptionFromInterpreter(shadow_frame);
       HANDLE_PENDING_EXCEPTION();
     } else {
       int32_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
@@ -1178,7 +1178,7 @@
   HANDLE_INSTRUCTION_START(APUT_WIDE) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
     if (UNLIKELY(a == NULL)) {
-      ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+      ThrowNullPointerExceptionFromInterpreter(shadow_frame);
       HANDLE_PENDING_EXCEPTION();
     } else {
       int64_t val = shadow_frame.GetVRegLong(inst->VRegA_23x(inst_data));
@@ -1197,7 +1197,7 @@
   HANDLE_INSTRUCTION_START(APUT_OBJECT) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
     if (UNLIKELY(a == NULL)) {
-      ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+      ThrowNullPointerExceptionFromInterpreter(shadow_frame);
       HANDLE_PENDING_EXCEPTION();
     } else {
       int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index c0275f6..5e4f5be 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -163,9 +163,9 @@
         break;
       case Instruction::MOVE_EXCEPTION: {
         PREAMBLE();
-        Throwable* exception = self->GetException(NULL);
-        self->ClearException();
+        Throwable* exception = self->GetException(nullptr);
         shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), exception);
+        self->ClearException();
         inst = inst->Next_1xx();
         break;
       }
@@ -375,7 +375,7 @@
         PREAMBLE();
         Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
         if (UNLIKELY(obj == NULL)) {
-          ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+          ThrowNullPointerExceptionFromInterpreter(shadow_frame);
           HANDLE_PENDING_EXCEPTION();
         } else {
           DoMonitorEnter(self, obj);
@@ -387,7 +387,7 @@
         PREAMBLE();
         Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
         if (UNLIKELY(obj == NULL)) {
-          ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+          ThrowNullPointerExceptionFromInterpreter(shadow_frame);
           HANDLE_PENDING_EXCEPTION();
         } else {
           DoMonitorExit(self, obj);
@@ -429,7 +429,7 @@
         PREAMBLE();
         Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
         if (UNLIKELY(array == NULL)) {
-          ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+          ThrowNullPointerExceptionFromInterpreter(shadow_frame);
           HANDLE_PENDING_EXCEPTION();
         } else {
           shadow_frame.SetVReg(inst->VRegA_12x(inst_data), array->AsArray()->GetLength());
@@ -855,7 +855,7 @@
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == NULL)) {
-          ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+          ThrowNullPointerExceptionFromInterpreter(shadow_frame);
           HANDLE_PENDING_EXCEPTION();
           break;
         }
@@ -873,7 +873,7 @@
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == NULL)) {
-          ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+          ThrowNullPointerExceptionFromInterpreter(shadow_frame);
           HANDLE_PENDING_EXCEPTION();
           break;
         }
@@ -891,7 +891,7 @@
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == NULL)) {
-          ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+          ThrowNullPointerExceptionFromInterpreter(shadow_frame);
           HANDLE_PENDING_EXCEPTION();
           break;
         }
@@ -909,7 +909,7 @@
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == NULL)) {
-          ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+          ThrowNullPointerExceptionFromInterpreter(shadow_frame);
           HANDLE_PENDING_EXCEPTION();
           break;
         }
@@ -927,7 +927,7 @@
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == NULL)) {
-          ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+          ThrowNullPointerExceptionFromInterpreter(shadow_frame);
           HANDLE_PENDING_EXCEPTION();
           break;
         }
@@ -945,7 +945,7 @@
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == NULL)) {
-          ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+          ThrowNullPointerExceptionFromInterpreter(shadow_frame);
           HANDLE_PENDING_EXCEPTION();
           break;
         }
@@ -963,7 +963,7 @@
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == NULL)) {
-          ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+          ThrowNullPointerExceptionFromInterpreter(shadow_frame);
           HANDLE_PENDING_EXCEPTION();
           break;
         }
@@ -981,7 +981,7 @@
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == NULL)) {
-          ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+          ThrowNullPointerExceptionFromInterpreter(shadow_frame);
           HANDLE_PENDING_EXCEPTION();
           break;
         }
@@ -1000,7 +1000,7 @@
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == NULL)) {
-          ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+          ThrowNullPointerExceptionFromInterpreter(shadow_frame);
           HANDLE_PENDING_EXCEPTION();
           break;
         }
@@ -1019,7 +1019,7 @@
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == NULL)) {
-          ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+          ThrowNullPointerExceptionFromInterpreter(shadow_frame);
           HANDLE_PENDING_EXCEPTION();
           break;
         }
@@ -1038,7 +1038,7 @@
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == NULL)) {
-          ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+          ThrowNullPointerExceptionFromInterpreter(shadow_frame);
           HANDLE_PENDING_EXCEPTION();
           break;
         }
@@ -1057,7 +1057,7 @@
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == NULL)) {
-          ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+          ThrowNullPointerExceptionFromInterpreter(shadow_frame);
           HANDLE_PENDING_EXCEPTION();
           break;
         }
@@ -1076,7 +1076,7 @@
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == NULL)) {
-          ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+          ThrowNullPointerExceptionFromInterpreter(shadow_frame);
           HANDLE_PENDING_EXCEPTION();
           break;
         }
@@ -1095,7 +1095,7 @@
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == NULL)) {
-          ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+          ThrowNullPointerExceptionFromInterpreter(shadow_frame);
           HANDLE_PENDING_EXCEPTION();
           break;
         }
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 6f3317d..9ca3c85 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -669,7 +669,8 @@
   }
 
   static void ExceptionClear(JNIEnv* env) {
-    static_cast<JNIEnvExt*>(env)->self->ClearException();
+    ScopedObjectAccess soa(env);
+    soa.Self()->ClearException();
   }
 
   static void ExceptionDescribe(JNIEnv* env) {
@@ -2058,7 +2059,7 @@
         return nullptr;
       }
       ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-      array_class = class_linker->FindArrayClass(soa.Self(), element_class);
+      array_class = class_linker->FindArrayClass(soa.Self(), &element_class);
       if (UNLIKELY(array_class == nullptr)) {
         return nullptr;
       }
@@ -2441,7 +2442,9 @@
     switch (kind) {
     case kLocal: {
       ScopedObjectAccess soa(env);
-      if (static_cast<JNIEnvExt*>(env)->locals.Get(ref) != kInvalidIndirectRefObject) {
+      // The local refs don't need a read barrier.
+      if (static_cast<JNIEnvExt*>(env)->locals.Get<kWithoutReadBarrier>(ref) !=
+          kInvalidIndirectRefObject) {
         return JNILocalRefType;
       }
       return JNIInvalidRefType;
@@ -3118,7 +3121,9 @@
   while (UNLIKELY(!allow_new_weak_globals_)) {
     weak_globals_add_condition_.WaitHoldingLocks(self);
   }
-  return weak_globals_.Get(ref);
+  // The weak globals do need a read barrier as they are weak roots.
+  mirror::Object* obj = weak_globals_.Get<kWithReadBarrier>(ref);
+  return obj;
 }
 
 void JavaVMExt::DumpReferenceTables(std::ostream& os) {
@@ -3138,7 +3143,7 @@
 }
 
 bool JavaVMExt::LoadNativeLibrary(const std::string& path,
-                                  const Handle<mirror::ClassLoader>& class_loader,
+                                  Handle<mirror::ClassLoader> class_loader,
                                   std::string* detail) {
   detail->clear();
 
@@ -3298,6 +3303,7 @@
 void JavaVMExt::SweepJniWeakGlobals(IsMarkedCallback* callback, void* arg) {
   MutexLock mu(Thread::Current(), weak_globals_lock_);
   for (mirror::Object** entry : weak_globals_) {
+    // Since this is called by the GC, we don't need a read barrier.
     mirror::Object* obj = *entry;
     mirror::Object* new_obj = callback(obj, arg);
     if (new_obj == nullptr) {
diff --git a/runtime/jni_internal.h b/runtime/jni_internal.h
index 37195eb..4072da4 100644
--- a/runtime/jni_internal.h
+++ b/runtime/jni_internal.h
@@ -67,7 +67,7 @@
    * Returns 'true' on success. On failure, sets 'detail' to a
    * human-readable description of the error.
    */
-  bool LoadNativeLibrary(const std::string& path, const Handle<mirror::ClassLoader>& class_loader,
+  bool LoadNativeLibrary(const std::string& path, Handle<mirror::ClassLoader> class_loader,
                          std::string* detail)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -129,6 +129,9 @@
   // TODO: Make the other members of this class also private.
   // JNI weak global references.
   Mutex weak_globals_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  // Since weak_globals_ contain weak roots, be careful not to
+  // directly access the object references in it. Use Get() with the
+  // read barrier enabled.
   IndirectReferenceTable weak_globals_ GUARDED_BY(weak_globals_lock_);
   bool allow_new_weak_globals_ GUARDED_BY(weak_globals_lock_);
   ConditionVariable weak_globals_add_condition_ GUARDED_BY(weak_globals_lock_);
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index 552652c..f7b5737 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -42,8 +42,8 @@
 // Recursively create an array with multiple dimensions.  Elements may be
 // Objects or primitive types.
 static Array* RecursiveCreateMultiArray(Thread* self,
-                                        const Handle<Class>& array_class, int current_dimension,
-                                        const Handle<mirror::IntArray>& dimensions)
+                                        Handle<Class> array_class, int current_dimension,
+                                        Handle<mirror::IntArray> dimensions)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   int32_t array_length = dimensions->Get(current_dimension);
   StackHandleScope<1> hs(self);
@@ -73,8 +73,8 @@
   return new_array.Get();
 }
 
-Array* Array::CreateMultiArray(Thread* self, const Handle<Class>& element_class,
-                               const Handle<IntArray>& dimensions) {
+Array* Array::CreateMultiArray(Thread* self, Handle<Class> element_class,
+                               Handle<IntArray> dimensions) {
   // Verify dimensions.
   //
   // The caller is responsible for verifying that "dimArray" is non-null
@@ -93,15 +93,17 @@
 
   // Find/generate the array class.
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  mirror::Class* element_class_ptr = element_class.Get();
   StackHandleScope<1> hs(self);
   Handle<mirror::Class> array_class(
-      hs.NewHandle(class_linker->FindArrayClass(self, element_class.Get())));
+      hs.NewHandle(class_linker->FindArrayClass(self, &element_class_ptr)));
   if (UNLIKELY(array_class.Get() == nullptr)) {
     CHECK(self->IsExceptionPending());
     return nullptr;
   }
   for (int32_t i = 1; i < dimensions->GetLength(); ++i) {
-    array_class.Assign(class_linker->FindArrayClass(self, array_class.Get()));
+    mirror::Class* array_class_ptr = array_class.Get();
+    array_class.Assign(class_linker->FindArrayClass(self, &array_class_ptr));
     if (UNLIKELY(array_class.Get() == nullptr)) {
       CHECK(self->IsExceptionPending());
       return nullptr;
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 1b8106e..64e2317 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -38,8 +38,8 @@
                       bool fill_usable = false)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  static Array* CreateMultiArray(Thread* self, const Handle<Class>& element_class,
-                                 const Handle<IntArray>& dimensions)
+  static Array* CreateMultiArray(Thread* self, Handle<Class> element_class,
+                                 Handle<IntArray> dimensions)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 39efa58..5f4619b 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -296,10 +296,16 @@
     // Generic JNI frame.
     DCHECK(IsNative());
     uint32_t handle_refs = MethodHelper(this).GetNumberOfReferenceArgsWithoutReceiver() + 1;
-    size_t scope_size = HandleScope::GetAlignedHandleScopeSize(handle_refs);
+    size_t scope_size = HandleScope::SizeOf(handle_refs);
     QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
-    return QuickMethodFrameInfo(callee_info.FrameSizeInBytes() + scope_size,
-                                callee_info.CoreSpillMask(), callee_info.FpSpillMask());
+
+    // Callee saves + handle scope + method ref + alignment
+    size_t frame_size = RoundUp(callee_info.FrameSizeInBytes() + scope_size
+                                - kPointerSize  // callee-save frame stores a whole method pointer
+                                + sizeof(StackReference<mirror::ArtMethod>),
+                                kStackAlignment);
+
+    return QuickMethodFrameInfo(frame_size, callee_info.CoreSpillMask(), callee_info.FpSpillMask());
   }
 
   const void* code_pointer = EntryPointToCodePointer(entry_point);
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index af544fd..c01fc72 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -229,8 +229,8 @@
   return 0;
 }
 
-uint32_t ArtMethod::FindCatchBlock(Handle<Class>& exception_type, uint32_t dex_pc,
-                                   bool* has_no_move_exception, bool* exc_changed) {
+uint32_t ArtMethod::FindCatchBlock(Handle<Class> exception_type, uint32_t dex_pc,
+                                   bool* has_no_move_exception) {
   MethodHelper mh(this);
   const DexFile::CodeItem* code_item = mh.GetCodeItem();
   // Set aside the exception while we resolve its type.
@@ -251,17 +251,16 @@
     }
     // Does this catch exception type apply?
     Class* iter_exception_type = mh.GetClassFromTypeIdx(iter_type_idx);
-    if (iter_exception_type == nullptr) {
-      // Now have a NoClassDefFoundError as exception.
+    if (UNLIKELY(iter_exception_type == nullptr)) {
+      // Now have a NoClassDefFoundError as exception. Ignore in case the exception class was
+      // removed by a pro-guard like tool.
       // Note: this is not RI behavior. RI would have failed when loading the class.
-      *exc_changed = true;
-
-      // TODO: Add old exception as suppressed.
+      self->ClearException();
+      // Delete any long jump context as this routine is called during a stack walk which will
+      // release its in use context at the end.
+      delete self->GetLongJumpContext();
       LOG(WARNING) << "Unresolved exception class when finding catch block: "
-        << mh.GetTypeDescriptorFromTypeIdx(iter_type_idx);
-
-      // Return immediately.
-      return DexFile::kDexNoIndex;
+        << DescriptorToDot(mh.GetTypeDescriptorFromTypeIdx(iter_type_idx));
     } else if (iter_exception_type->IsAssignableFrom(exception_type.Get())) {
       found_dex_pc = it.GetHandlerAddress();
       break;
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 34fe0bf..f901512 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -398,11 +398,8 @@
   // Find the catch block for the given exception type and dex_pc. When a catch block is found,
   // indicates whether the found catch block is responsible for clearing the exception or whether
   // a move-exception instruction is present.
-  // In the process of finding a catch block we might trigger resolution errors. This is flagged
-  // by exc_changed, which indicates that a different exception is now stored in the thread and
-  // should be reloaded.
-  uint32_t FindCatchBlock(Handle<Class>& exception_type, uint32_t dex_pc,
-                          bool* has_no_move_exception, bool* exc_changed)
+  uint32_t FindCatchBlock(Handle<Class> exception_type, uint32_t dex_pc,
+                          bool* has_no_move_exception)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   static void SetClass(Class* java_lang_reflect_ArtMethod);
diff --git a/runtime/mirror/stack_trace_element.cc b/runtime/mirror/stack_trace_element.cc
index d8591cc..b1de2b6 100644
--- a/runtime/mirror/stack_trace_element.cc
+++ b/runtime/mirror/stack_trace_element.cc
@@ -39,10 +39,8 @@
   java_lang_StackTraceElement_ = NULL;
 }
 
-StackTraceElement* StackTraceElement::Alloc(Thread* self,
-                                            Handle<String>& declaring_class,
-                                            Handle<String>& method_name,
-                                            Handle<String>& file_name,
+StackTraceElement* StackTraceElement::Alloc(Thread* self, Handle<String> declaring_class,
+                                            Handle<String> method_name, Handle<String> file_name,
                                             int32_t line_number) {
   StackTraceElement* trace =
       down_cast<StackTraceElement*>(GetStackTraceElement()->AllocObject(self));
@@ -57,8 +55,8 @@
 }
 
 template<bool kTransactionActive>
-void StackTraceElement::Init(Handle<String>& declaring_class, Handle<String>& method_name,
-                             Handle<String>& file_name, int32_t line_number) {
+void StackTraceElement::Init(Handle<String> declaring_class, Handle<String> method_name,
+                             Handle<String> file_name, int32_t line_number) {
   SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_),
                                      declaring_class.Get());
   SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, method_name_),
diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h
index 22d9b71..e094e8b 100644
--- a/runtime/mirror/stack_trace_element.h
+++ b/runtime/mirror/stack_trace_element.h
@@ -46,10 +46,8 @@
     return GetField32(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_));
   }
 
-  static StackTraceElement* Alloc(Thread* self,
-                                  Handle<String>& declaring_class,
-                                  Handle<String>& method_name,
-                                  Handle<String>& file_name,
+  static StackTraceElement* Alloc(Thread* self, Handle<String> declaring_class,
+                                  Handle<String> method_name, Handle<String> file_name,
                                   int32_t line_number)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -70,8 +68,8 @@
   int32_t line_number_;
 
   template<bool kTransactionActive>
-  void Init(Handle<String>& declaring_class, Handle<String>& method_name,
-            Handle<String>& file_name, int32_t line_number)
+  void Init(Handle<String> declaring_class, Handle<String> method_name, Handle<String> file_name,
+            int32_t line_number)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   static Class* java_lang_StackTraceElement_;
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index ee719b4..1d79106 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -131,7 +131,7 @@
   return Alloc(self, array);
 }
 
-String* String::Alloc(Thread* self, const Handle<CharArray>& array) {
+String* String::Alloc(Thread* self, Handle<CharArray> array) {
   // Hold reference in case AllocObject causes GC.
   String* string = down_cast<String*>(GetJavaLangString()->AllocObject(self));
   if (LIKELY(string != nullptr)) {
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 169b671..6c3015f 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -137,7 +137,7 @@
   static String* Alloc(Thread* self, int32_t utf16_length)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  static String* Alloc(Thread* self, const Handle<CharArray>& array)
+  static String* Alloc(Thread* self, Handle<CharArray> array)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetArray(CharArray* new_array) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index f783edb..58e6dd4 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -111,7 +111,7 @@
   MutexLock mu(self, monitor_lock_);  // Uncontended mutex acquisition as monitor isn't yet public.
   CHECK(owner_ == nullptr || owner_ == self || owner_->IsSuspended());
   // Propagate the lock state.
-  LockWord lw(obj_->GetLockWord(false));
+  LockWord lw(GetObject()->GetLockWord(false));
   switch (lw.GetState()) {
     case LockWord::kThinLocked: {
       CHECK_EQ(owner_->GetThreadId(), lw.ThinLockOwner());
@@ -137,7 +137,7 @@
   }
   LockWord fat(this);
   // Publish the updated lock word, which may race with other threads.
-  bool success = obj_->CasLockWord(lw, fat);
+  bool success = GetObject()->CasLockWord(lw, fat);
   // Lock profiling.
   if (success && owner_ != nullptr && lock_profiling_threshold_ != 0) {
     locking_method_ = owner_->GetCurrentMethod(&locking_dex_pc_);
@@ -226,9 +226,9 @@
     // Do this before releasing the lock so that we don't get deflated.
     ++num_waiters_;
     monitor_lock_.Unlock(self);  // Let go of locks in order.
+    self->SetMonitorEnterObject(GetObject());
     {
       ScopedThreadStateChange tsc(self, kBlocked);  // Change to blocked and give up mutator_lock_.
-      self->SetMonitorEnterObject(obj_);
       MutexLock mu2(self, monitor_lock_);  // Reacquire monitor_lock_ without mutator_lock_ for Wait.
       if (owner_ != NULL) {  // Did the owner_ give the lock up?
         monitor_contenders_.Wait(self);  // Still contended so wait.
@@ -249,8 +249,8 @@
           }
         }
       }
-      self->SetMonitorEnterObject(nullptr);
     }
+    self->SetMonitorEnterObject(nullptr);
     monitor_lock_.Lock(self);  // Reacquire locks in order.
     --num_waiters_;
   }
@@ -363,7 +363,7 @@
     // We don't own this, so we're not allowed to unlock it.
     // The JNI spec says that we should throw IllegalMonitorStateException
     // in this case.
-    FailedUnlock(obj_, self, owner, this);
+    FailedUnlock(GetObject(), self, owner, this);
     return false;
   }
   return true;
@@ -637,7 +637,7 @@
   }
 }
 
-void Monitor::InflateThinLocked(Thread* self, Handle<mirror::Object>& obj, LockWord lock_word,
+void Monitor::InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWord lock_word,
                                 uint32_t hash_code) {
   DCHECK_EQ(lock_word.GetState(), LockWord::kThinLocked);
   uint32_t owner_thread_id = lock_word.ThinLockOwner();
@@ -895,7 +895,7 @@
     MutexLock mu(self, *thread->GetWaitMutex());
     Monitor* monitor = thread->GetWaitMonitor();
     if (monitor != nullptr) {
-      pretty_object = monitor->obj_;
+      pretty_object = monitor->GetObject();
     }
   } else if (state == kBlocked) {
     wait_message = "  - waiting to lock ";
@@ -983,7 +983,7 @@
   // Ask the verifier for the dex pcs of all the monitor-enter instructions corresponding to
   // the locks held in this stack frame.
   std::vector<uint32_t> monitor_enter_dex_pcs;
-  verifier::MethodVerifier::FindLocksAtDexPc(m, stack_visitor->GetDexPc(), monitor_enter_dex_pcs);
+  verifier::MethodVerifier::FindLocksAtDexPc(m, stack_visitor->GetDexPc(), &monitor_enter_dex_pcs);
   if (monitor_enter_dex_pcs.empty()) {
     return;
   }
@@ -1101,12 +1101,13 @@
   MutexLock mu(Thread::Current(), monitor_list_lock_);
   for (auto it = list_.begin(); it != list_.end(); ) {
     Monitor* m = *it;
-    mirror::Object* obj = m->GetObject();
+    // Disable the read barrier in GetObject() as this is called by GC.
+    mirror::Object* obj = m->GetObject<kWithoutReadBarrier>();
     // The object of a monitor can be null if we have deflated it.
     mirror::Object* new_obj = obj != nullptr ? callback(obj, arg) : nullptr;
     if (new_obj == nullptr) {
       VLOG(monitor) << "freeing monitor " << m << " belonging to unmarked object "
-                    << m->GetObject();
+                    << obj;
       delete m;
       it = list_.erase(it);
     } else {
diff --git a/runtime/monitor.h b/runtime/monitor.h
index bc1b2ed4..bd0e23c 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -27,6 +27,7 @@
 #include "atomic.h"
 #include "base/mutex.h"
 #include "object_callbacks.h"
+#include "read_barrier.h"
 #include "thread_state.h"
 
 namespace art {
@@ -92,8 +93,9 @@
 
   static bool IsValidLockWord(LockWord lock_word);
 
-  mirror::Object* GetObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return obj_;
+  template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  mirror::Object* GetObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return ReadBarrier::BarrierForWeakRoot<mirror::Object, kReadBarrierOption>(&obj_);
   }
 
   void SetObject(mirror::Object* object);
@@ -114,7 +116,7 @@
     return monitor_id_;
   }
 
-  static void InflateThinLocked(Thread* self, Handle<mirror::Object>& obj, LockWord lock_word,
+  static void InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWord lock_word,
                                 uint32_t hash_code) NO_THREAD_SAFETY_ANALYSIS;
 
   static bool Deflate(Thread* self, mirror::Object* obj)
@@ -190,7 +192,9 @@
   // Owner's recursive lock depth.
   int lock_count_ GUARDED_BY(monitor_lock_);
 
-  // What object are we part of.
+  // What object are we part of. This is a weak root. Do not access
+  // this directly, use GetObject() to read it so it will be guarded
+  // by a read barrier.
   mirror::Object* obj_;
 
   // Threads currently waiting on this monitor.
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 2572938..a369365 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -67,7 +67,8 @@
     return nullptr;
   }
   Runtime* runtime = Runtime::Current();
-  mirror::Class* array_class = runtime->GetClassLinker()->FindArrayClass(soa.Self(), element_class);
+  mirror::Class* array_class =
+      runtime->GetClassLinker()->FindArrayClass(soa.Self(), &element_class);
   if (UNLIKELY(array_class == nullptr)) {
     return nullptr;
   }
@@ -90,7 +91,7 @@
     return nullptr;
   }
   Runtime* runtime = Runtime::Current();
-  mirror::Class* array_class = runtime->GetClassLinker()->FindArrayClass(soa.Self(), element_class);
+  mirror::Class* array_class = runtime->GetClassLinker()->FindArrayClass(soa.Self(), &element_class);
   if (UNLIKELY(array_class == nullptr)) {
     return nullptr;
   }
@@ -220,7 +221,7 @@
 }
 
 // Based on ClassLinker::ResolveString.
-static void PreloadDexCachesResolveString(Handle<mirror::DexCache>& dex_cache, uint32_t string_idx,
+static void PreloadDexCachesResolveString(Handle<mirror::DexCache> dex_cache, uint32_t string_idx,
                                           StringTable& strings)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   mirror::String* string = dex_cache->GetResolvedString(string_idx);
@@ -266,8 +267,7 @@
 }
 
 // Based on ClassLinker::ResolveField.
-static void PreloadDexCachesResolveField(Handle<mirror::DexCache>& dex_cache,
-                                         uint32_t field_idx,
+static void PreloadDexCachesResolveField(Handle<mirror::DexCache> dex_cache, uint32_t field_idx,
                                          bool is_static)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   mirror::ArtField* field = dex_cache->GetResolvedField(field_idx);
@@ -295,8 +295,7 @@
 }
 
 // Based on ClassLinker::ResolveMethod.
-static void PreloadDexCachesResolveMethod(Handle<mirror::DexCache>& dex_cache,
-                                          uint32_t method_idx,
+static void PreloadDexCachesResolveMethod(Handle<mirror::DexCache> dex_cache, uint32_t method_idx,
                                           InvokeType invoke_type)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   mirror::ArtMethod* method = dex_cache->GetResolvedMethod(method_idx);
diff --git a/runtime/native/java_lang_reflect_Array.cc b/runtime/native/java_lang_reflect_Array.cc
index db77437..eae4584 100644
--- a/runtime/native/java_lang_reflect_Array.cc
+++ b/runtime/native/java_lang_reflect_Array.cc
@@ -46,14 +46,14 @@
 static jobject Array_createObjectArray(JNIEnv* env, jclass, jclass javaElementClass, jint length) {
   ScopedFastNativeObjectAccess soa(env);
   DCHECK(javaElementClass != NULL);
-  mirror::Class* element_class = soa.Decode<mirror::Class*>(javaElementClass);
   if (UNLIKELY(length < 0)) {
     ThrowNegativeArraySizeException(length);
     return NULL;
   }
+  mirror::Class* element_class = soa.Decode<mirror::Class*>(javaElementClass);
   Runtime* runtime = Runtime::Current();
   ClassLinker* class_linker = runtime->GetClassLinker();
-  mirror::Class* array_class = class_linker->FindArrayClass(soa.Self(), element_class);
+  mirror::Class* array_class = class_linker->FindArrayClass(soa.Self(), &element_class);
   if (UNLIKELY(array_class == NULL)) {
     CHECK(soa.Self()->IsExceptionPending());
     return NULL;
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index 5d90f1a..e17e60a 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -52,9 +52,15 @@
     jobject internal_trace = self->CreateInternalStackTrace<false>(soa);
     trace = Thread::InternalStackTraceToStackTraceElementArray(soa, internal_trace);
   } else {
-    // Suspend thread to build stack trace.
     ThreadList* thread_list = Runtime::Current()->GetThreadList();
     bool timed_out;
+
+    // Check for valid thread
+    if (thin_lock_id == ThreadList::kInvalidThreadId) {
+      return nullptr;
+    }
+
+    // Suspend thread to build stack trace.
     Thread* thread = thread_list->SuspendThreadByThreadId(thin_lock_id, false, &timed_out);
     if (thread != nullptr) {
       {
diff --git a/runtime/native/scoped_fast_native_object_access.h b/runtime/native/scoped_fast_native_object_access.h
index 822aefa..606d62d 100644
--- a/runtime/native/scoped_fast_native_object_access.h
+++ b/runtime/native/scoped_fast_native_object_access.h
@@ -31,7 +31,7 @@
     SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE
      : ScopedObjectAccessAlreadyRunnable(env) {
     Locks::mutator_lock_->AssertSharedHeld(Self());
-    DCHECK((*Self()->GetManagedStack()->GetTopQuickFrame())->IsFastNative());
+    DCHECK(Self()->GetManagedStack()->GetTopQuickFrame()->AsMirrorPtr()->IsFastNative());
     // Don't work with raw objects in non-runnable states.
     DCHECK_EQ(Self()->GetState(), kRunnable);
   }
diff --git a/runtime/oat.cc b/runtime/oat.cc
index cb9334a..4c4dddb 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -22,7 +22,7 @@
 namespace art {
 
 const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '2', '8', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '3', '1', '\0' };
 
 OatHeader::OatHeader() {
   memset(this, 0, sizeof(*this));
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 2c7fb75..c3ef5cf 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -15,6 +15,7 @@
  */
 
 #include "parsed_options.h"
+#include "utils.h"
 #ifdef HAVE_ANDROID_OS
 #include "cutils/properties.h"
 #endif
@@ -263,7 +264,7 @@
 #ifdef HAVE_ANDROID_OS
   {
     char buf[PROP_VALUE_MAX];
-    property_get("dalvik.vm.implicit_checks", buf, "none");
+    property_get("dalvik.vm.implicit_checks", buf, "null,stack");
     std::string checks(buf);
     std::vector<std::string> checkvec;
     Split(checks, ',', checkvec);
@@ -533,7 +534,7 @@
       Trace::SetDefaultClockSource(kProfilerClockSourceWall);
     } else if (option == "-Xprofile:dualclock") {
       Trace::SetDefaultClockSource(kProfilerClockSourceDual);
-    } else if (StartsWith(option, "-Xprofile:")) {
+    } else if (StartsWith(option, "-Xprofile-filename:")) {
       if (!ParseStringAfterChar(option, ':', &profile_output_filename_)) {
         return false;
       }
@@ -604,6 +605,10 @@
           return false;
         }
       }
+    } else if (StartsWith(option, "-Xcompiler:")) {
+      if (!ParseStringAfterChar(option, ':', &compiler_executable_)) {
+        return false;
+      }
     } else if (option == "-Xcompiler-option") {
       i++;
       if (i == options.size()) {
@@ -786,11 +791,12 @@
   UsageMessage(stream, "  -Xmethod-trace\n");
   UsageMessage(stream, "  -Xmethod-trace-file:filename");
   UsageMessage(stream, "  -Xmethod-trace-file-size:integervalue\n");
-  UsageMessage(stream, "  -Xprofile=filename\n");
+  UsageMessage(stream, "  -Xprofile-filename:filename\n");
   UsageMessage(stream, "  -Xprofile-period:integervalue\n");
   UsageMessage(stream, "  -Xprofile-duration:integervalue\n");
   UsageMessage(stream, "  -Xprofile-interval:integervalue\n");
-  UsageMessage(stream, "  -Xprofile-backoff:integervalue\n");
+  UsageMessage(stream, "  -Xprofile-backoff:doublevalue\n");
+  UsageMessage(stream, "  -Xcompiler:filename\n");
   UsageMessage(stream, "  -Xcompiler-option dex2oat-option\n");
   UsageMessage(stream, "  -Ximage-compiler-option dex2oat-option\n");
   UsageMessage(stream, "\n");
diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h
index e0b0fb5..25fc12a 100644
--- a/runtime/parsed_options.h
+++ b/runtime/parsed_options.h
@@ -74,6 +74,7 @@
   void (*hook_exit_)(jint status);
   void (*hook_abort_)();
   std::vector<std::string> properties_;
+  std::string compiler_executable_;
   std::vector<std::string> compiler_options_;
   std::vector<std::string> image_compiler_options_;
   bool profile_;
diff --git a/runtime/profiler.cc b/runtime/profiler.cc
index 6e33f9d..5459ce3 100644
--- a/runtime/profiler.cc
+++ b/runtime/profiler.cc
@@ -193,7 +193,7 @@
 
       valid_samples += barrier_count;
 
-      ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun);
+      ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
 
       // Wait for the barrier to be crossed by all runnable threads.  This wait
       // is done with a timeout so that we can detect problems with the checkpoint
@@ -211,13 +211,11 @@
       // code.  Crash the process in this case.
       CHECK_LT(waitdiff_us, kWaitTimeoutUs);
 
-      self->SetState(old_state);
-
       // Update the current time.
       now_us = MicroTime();
     }
 
-    if (valid_samples > 0 && !ShuttingDown(self)) {
+    if (valid_samples > 0) {
       // After the profile has been taken, write it out.
       ScopedObjectAccess soa(self);   // Acquire the mutator lock.
       uint32_t size = profiler->WriteProfile();
@@ -335,6 +333,7 @@
   pthread_t profiler_pthread = 0U;
   {
     MutexLock trace_mu(Thread::Current(), *Locks::profiler_lock_);
+    CHECK(!shutting_down_);
     profiler = profiler_;
     shutting_down_ = true;
     profiler_pthread = profiler_pthread_;
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 8300195..b9cec40 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -16,25 +16,105 @@
 
 #include "quick_exception_handler.h"
 
-#include "catch_block_stack_visitor.h"
-#include "deoptimize_stack_visitor.h"
+#include "dex_instruction.h"
 #include "entrypoints/entrypoint_utils.h"
-#include "mirror/art_method-inl.h"
 #include "handle_scope-inl.h"
+#include "mirror/art_method-inl.h"
+#include "verifier/method_verifier.h"
 
 namespace art {
 
+static constexpr bool kDebugExceptionDelivery = false;
+static constexpr size_t kInvalidFrameId = 0xffffffff;
+
 QuickExceptionHandler::QuickExceptionHandler(Thread* self, bool is_deoptimization)
   : self_(self), context_(self->GetLongJumpContext()), is_deoptimization_(is_deoptimization),
     method_tracing_active_(is_deoptimization ||
                            Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()),
-    handler_quick_frame_(nullptr), handler_quick_frame_pc_(0), handler_dex_pc_(0),
-    clear_exception_(false), handler_frame_id_(kInvalidFrameId) {
+    handler_quick_frame_(nullptr), handler_quick_frame_pc_(0), handler_method_(nullptr),
+    handler_dex_pc_(0), clear_exception_(false), handler_frame_id_(kInvalidFrameId) {
 }
 
+// Finds catch handler or prepares for deoptimization.
+class CatchBlockStackVisitor FINAL : public StackVisitor {
+ public:
+  CatchBlockStackVisitor(Thread* self, Context* context, Handle<mirror::Throwable>* exception,
+                         QuickExceptionHandler* exception_handler)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : StackVisitor(self, context), self_(self), exception_(exception),
+        exception_handler_(exception_handler) {
+  }
+
+  bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    mirror::ArtMethod* method = GetMethod();
+    exception_handler_->SetHandlerFrameId(GetFrameId());
+    if (method == nullptr) {
+      // This is the upcall, we remember the frame and last pc so that we may long jump to them.
+      exception_handler_->SetHandlerQuickFramePc(GetCurrentQuickFramePc());
+      exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
+      uint32_t next_dex_pc;
+      mirror::ArtMethod* next_art_method;
+      bool has_next = GetNextMethodAndDexPc(&next_art_method, &next_dex_pc);
+      // Report the method that did the down call as the handler.
+      exception_handler_->SetHandlerDexPc(next_dex_pc);
+      exception_handler_->SetHandlerMethod(next_art_method);
+      if (!has_next) {
+        // No next method? Check exception handler is set up for the unhandled exception handler
+        // case.
+        DCHECK_EQ(0U, exception_handler_->GetHandlerDexPc());
+        DCHECK(nullptr == exception_handler_->GetHandlerMethod());
+      }
+      return false;  // End stack walk.
+    }
+    if (method->IsRuntimeMethod()) {
+      // Ignore callee save method.
+      DCHECK(method->IsCalleeSaveMethod());
+      return true;
+    }
+    return HandleTryItems(method);
+  }
+
+ private:
+  bool HandleTryItems(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    uint32_t dex_pc = DexFile::kDexNoIndex;
+    if (!method->IsNative()) {
+      dex_pc = GetDexPc();
+    }
+    if (dex_pc != DexFile::kDexNoIndex) {
+      bool clear_exception = false;
+      StackHandleScope<1> hs(Thread::Current());
+      Handle<mirror::Class> to_find(hs.NewHandle((*exception_)->GetClass()));
+      uint32_t found_dex_pc = method->FindCatchBlock(to_find, dex_pc, &clear_exception);
+      exception_handler_->SetClearException(clear_exception);
+      if (found_dex_pc != DexFile::kDexNoIndex) {
+        exception_handler_->SetHandlerMethod(method);
+        exception_handler_->SetHandlerDexPc(found_dex_pc);
+        exception_handler_->SetHandlerQuickFramePc(method->ToNativePc(found_dex_pc));
+        exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
+        return false;  // End stack walk.
+      }
+    }
+    return true;  // Continue stack walk.
+  }
+
+  Thread* const self_;
+  // The exception we're looking for the catch block of.
+  Handle<mirror::Throwable>* exception_;
+  // The quick exception handler we're visiting for.
+  QuickExceptionHandler* const exception_handler_;
+
+  DISALLOW_COPY_AND_ASSIGN(CatchBlockStackVisitor);
+};
+
 void QuickExceptionHandler::FindCatch(const ThrowLocation& throw_location,
                                       mirror::Throwable* exception) {
   DCHECK(!is_deoptimization_);
+  if (kDebugExceptionDelivery) {
+    mirror::String* msg = exception->GetDetailMessage();
+    std::string str_msg(msg != nullptr ? msg->ToModifiedUtf8() : "");
+    self_->DumpStack(LOG(INFO) << "Delivering exception: " << PrettyTypeOf(exception)
+                     << ": " << str_msg << "\n");
+  }
   StackHandleScope<1> hs(self_);
   Handle<mirror::Throwable> exception_ref(hs.NewHandle(exception));
 
@@ -42,14 +122,14 @@
   CatchBlockStackVisitor visitor(self_, context_, &exception_ref, this);
   visitor.WalkStack(true);
 
-  mirror::ArtMethod* catch_method = *handler_quick_frame_;
   if (kDebugExceptionDelivery) {
-    if (catch_method == nullptr) {
+    if (handler_quick_frame_->AsMirrorPtr() == nullptr) {
       LOG(INFO) << "Handler is upcall";
-    } else {
-      const DexFile& dex_file = *catch_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
-      int line_number = dex_file.GetLineNumFromPC(catch_method, handler_dex_pc_);
-      LOG(INFO) << "Handler: " << PrettyMethod(catch_method) << " (line: " << line_number << ")";
+    }
+    if (handler_method_ != nullptr) {
+      const DexFile& dex_file = *handler_method_->GetDeclaringClass()->GetDexCache()->GetDexFile();
+      int line_number = dex_file.GetLineNumFromPC(handler_method_, handler_dex_pc_);
+      LOG(INFO) << "Handler: " << PrettyMethod(handler_method_) << " (line: " << line_number << ")";
     }
   }
   if (clear_exception_) {
@@ -62,12 +142,94 @@
   // The debugger may suspend this thread and walk its stack. Let's do this before popping
   // instrumentation frames.
   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
-  instrumentation->ExceptionCaughtEvent(self_, throw_location, catch_method, handler_dex_pc_,
+  instrumentation->ExceptionCaughtEvent(self_, throw_location, handler_method_, handler_dex_pc_,
                                         exception_ref.Get());
 }
 
+// Prepares deoptimization.
+class DeoptimizeStackVisitor FINAL : public StackVisitor {
+ public:
+  DeoptimizeStackVisitor(Thread* self, Context* context, QuickExceptionHandler* exception_handler)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : StackVisitor(self, context), self_(self), exception_handler_(exception_handler),
+        prev_shadow_frame_(nullptr) {
+    CHECK(!self_->HasDeoptimizationShadowFrame());
+  }
+
+  bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    exception_handler_->SetHandlerFrameId(GetFrameId());
+    mirror::ArtMethod* method = GetMethod();
+    if (method == nullptr) {
+      // This is the upcall, we remember the frame and last pc so that we may long jump to them.
+      exception_handler_->SetHandlerQuickFramePc(GetCurrentQuickFramePc());
+      exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
+      return false;  // End stack walk.
+    } else if (method->IsRuntimeMethod()) {
+      // Ignore callee save method.
+      DCHECK(method->IsCalleeSaveMethod());
+      return true;
+    } else {
+      return HandleDeoptimization(method);
+    }
+  }
+
+ private:
+  bool HandleDeoptimization(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    MethodHelper mh(m);
+    const DexFile::CodeItem* code_item = mh.GetCodeItem();
+    CHECK(code_item != nullptr);
+    uint16_t num_regs = code_item->registers_size_;
+    uint32_t dex_pc = GetDexPc();
+    const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc);
+    uint32_t new_dex_pc = dex_pc + inst->SizeInCodeUnits();
+    ShadowFrame* new_frame = ShadowFrame::Create(num_regs, nullptr, m, new_dex_pc);
+    StackHandleScope<2> hs(self_);
+    Handle<mirror::DexCache> dex_cache(hs.NewHandle(mh.GetDexCache()));
+    Handle<mirror::ClassLoader> class_loader(hs.NewHandle(mh.GetClassLoader()));
+    verifier::MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader,
+                                      &mh.GetClassDef(), code_item, m->GetDexMethodIndex(), m,
+                                      m->GetAccessFlags(), false, true, true);
+    verifier.Verify();
+    std::vector<int32_t> kinds = verifier.DescribeVRegs(dex_pc);
+    for (uint16_t reg = 0; reg < num_regs; ++reg) {
+      VRegKind kind = static_cast<VRegKind>(kinds.at(reg * 2));
+      switch (kind) {
+        case kUndefined:
+          new_frame->SetVReg(reg, 0xEBADDE09);
+          break;
+        case kConstant:
+          new_frame->SetVReg(reg, kinds.at((reg * 2) + 1));
+          break;
+        case kReferenceVReg:
+          new_frame->SetVRegReference(reg,
+                                      reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kind)));
+          break;
+        default:
+          new_frame->SetVReg(reg, GetVReg(m, reg, kind));
+          break;
+      }
+    }
+    if (prev_shadow_frame_ != nullptr) {
+      prev_shadow_frame_->SetLink(new_frame);
+    } else {
+      self_->SetDeoptimizationShadowFrame(new_frame);
+    }
+    prev_shadow_frame_ = new_frame;
+    return true;
+  }
+
+  Thread* const self_;
+  QuickExceptionHandler* const exception_handler_;
+  ShadowFrame* prev_shadow_frame_;
+
+  DISALLOW_COPY_AND_ASSIGN(DeoptimizeStackVisitor);
+};
+
 void QuickExceptionHandler::DeoptimizeStack() {
   DCHECK(is_deoptimization_);
+  if (kDebugExceptionDelivery) {
+    self_->DumpStack(LOG(INFO) << "Deoptimizing: ");
+  }
 
   DeoptimizeStackVisitor visitor(self_, context_, this);
   visitor.WalkStack(true);
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index ef3766c..a4229b3 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -19,6 +19,7 @@
 
 #include "base/logging.h"
 #include "base/mutex.h"
+#include "stack.h"  // StackReference
 
 namespace art {
 
@@ -31,9 +32,6 @@
 class ThrowLocation;
 class ShadowFrame;
 
-static constexpr bool kDebugExceptionDelivery = false;
-static constexpr size_t kInvalidFrameId = 0xffffffff;
-
 // Manages exception delivery for Quick backend. Not used by Portable backend.
 class QuickExceptionHandler {
  public:
@@ -50,7 +48,7 @@
   void UpdateInstrumentationStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void SetHandlerQuickFrame(mirror::ArtMethod** handler_quick_frame) {
+  void SetHandlerQuickFrame(StackReference<mirror::ArtMethod>* handler_quick_frame) {
     handler_quick_frame_ = handler_quick_frame;
   }
 
@@ -58,6 +56,18 @@
     handler_quick_frame_pc_ = handler_quick_frame_pc;
   }
 
+  mirror::ArtMethod* GetHandlerMethod() const {
+    return handler_method_;
+  }
+
+  void SetHandlerMethod(mirror::ArtMethod* handler_quick_method) {
+    handler_method_ = handler_quick_method;
+  }
+
+  uint32_t GetHandlerDexPc() const {
+    return handler_dex_pc_;
+  }
+
   void SetHandlerDexPc(uint32_t dex_pc) {
     handler_dex_pc_ = dex_pc;
   }
@@ -77,10 +87,12 @@
   // Is method tracing active?
   const bool method_tracing_active_;
   // Quick frame with found handler or last frame if no handler found.
-  mirror::ArtMethod** handler_quick_frame_;
+  StackReference<mirror::ArtMethod>* handler_quick_frame_;
   // PC to branch to for the handler.
   uintptr_t handler_quick_frame_pc_;
-  // Associated dex PC.
+  // The handler method to report to the debugger.
+  mirror::ArtMethod* handler_method_;
+  // The handler's dex PC, zero implies an uncaught exception.
   uint32_t handler_dex_pc_;
   // Should the exception be cleared as the catch block has no move-exception?
   bool clear_exception_;
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index 88e2f8f..e252b7b 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -43,6 +43,21 @@
   }
 }
 
+template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
+inline MirrorType* ReadBarrier::BarrierForWeakRoot(MirrorType** weak_root) {
+  MirrorType* ref = *weak_root;
+  const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
+  if (with_read_barrier && kUseBakerReadBarrier) {
+    // To be implemented.
+    return ref;
+  } else if (with_read_barrier && kUseBrooksReadBarrier) {
+    // To be implemented.
+    return ref;
+  } else {
+    return ref;
+  }
+}
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_READ_BARRIER_INL_H_
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index 73c3d43..7232a3f 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -37,6 +37,10 @@
   ALWAYS_INLINE static MirrorType* Barrier(
       mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  ALWAYS_INLINE static MirrorType* BarrierForWeakRoot(MirrorType** weak_root)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 }  // namespace art
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 5ab22f6..68b10cc 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -165,6 +165,11 @@
     }
     shutting_down_ = true;
   }
+  // Shut down background profiler before the runtime exits.
+  if (profile_) {
+    BackgroundMethodSamplingProfiler::Shutdown();
+  }
+
   Trace::Shutdown();
 
   // Make sure to let the GC complete if it is running.
@@ -366,6 +371,15 @@
   return env->NewGlobalRef(system_class_loader.get());
 }
 
+std::string Runtime::GetCompilerExecutable() const {
+  if (!compiler_executable_.empty()) {
+    return compiler_executable_;
+  }
+  std::string compiler_executable(GetAndroidRoot());
+  compiler_executable += (kIsDebugBuild ? "/bin/dex2oatd" : "/bin/dex2oat");
+  return compiler_executable;
+}
+
 bool Runtime::Start() {
   VLOG(startup) << "Runtime::Start entering";
 
@@ -532,6 +546,7 @@
   default_stack_size_ = options->stack_size_;
   stack_trace_file_ = options->stack_trace_file_;
 
+  compiler_executable_ = options->compiler_executable_;
   compiler_options_ = options->compiler_options_;
   image_compiler_options_ = options->image_compiler_options_;
 
@@ -548,9 +563,20 @@
     GetInstrumentation()->ForceInterpretOnly();
   }
 
-  if (options->explicit_checks_ != (ParsedOptions::kExplicitSuspendCheck |
-        ParsedOptions::kExplicitNullCheck |
-        ParsedOptions::kExplicitStackOverflowCheck) || kEnableJavaStackTraceHandler) {
+  bool implicit_checks_supported = false;
+  switch (kRuntimeISA) {
+    case kArm:
+    case kThumb2:
+      implicit_checks_supported = true;
+      break;
+    default:
+      break;
+  }
+
+  if (implicit_checks_supported &&
+      (options->explicit_checks_ != (ParsedOptions::kExplicitSuspendCheck |
+          ParsedOptions::kExplicitNullCheck |
+          ParsedOptions::kExplicitStackOverflowCheck) || kEnableJavaStackTraceHandler)) {
     fault_manager.Init();
 
     // These need to be in a specific order.  The null point check handler must be
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 55c3878..afb5aa7 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -104,6 +104,8 @@
     return is_explicit_gc_disabled_;
   }
 
+  std::string GetCompilerExecutable() const;
+
   const std::vector<std::string>& GetCompilerOptions() const {
     return compiler_options_;
   }
@@ -490,6 +492,7 @@
   bool is_concurrent_gc_enabled_;
   bool is_explicit_gc_disabled_;
 
+  std::string compiler_executable_;
   std::vector<std::string> compiler_options_;
   std::vector<std::string> image_compiler_options_;
 
diff --git a/runtime/safe_map.h b/runtime/safe_map.h
index 190db60..bf3a15e 100644
--- a/runtime/safe_map.h
+++ b/runtime/safe_map.h
@@ -65,6 +65,9 @@
   iterator find(const K& k) { return map_.find(k); }
   const_iterator find(const K& k) const { return map_.find(k); }
 
+  iterator lower_bound(const K& k) { return map_.lower_bound(k); }
+  const_iterator lower_bound(const K& k) const { return map_.lower_bound(k); }
+
   size_type count(const K& k) const { return map_.count(k); }
 
   // Note that unlike std::map's operator[], this doesn't return a reference to the value.
diff --git a/runtime/stack.cc b/runtime/stack.cc
index be1fba4..ef09816 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -95,6 +95,13 @@
   DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
 }
 
+StackVisitor::StackVisitor(Thread* thread, Context* context, size_t num_frames)
+    : thread_(thread), cur_shadow_frame_(NULL),
+      cur_quick_frame_(NULL), cur_quick_frame_pc_(0), num_frames_(num_frames), cur_depth_(0),
+      context_(context) {
+  DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
+}
+
 uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
   if (cur_shadow_frame_ != NULL) {
     return cur_shadow_frame_->GetDexPC();
@@ -205,16 +212,16 @@
 }
 
 uintptr_t StackVisitor::GetReturnPc() const {
-  mirror::ArtMethod** sp = GetCurrentQuickFrame();
+  byte* sp = reinterpret_cast<byte*>(GetCurrentQuickFrame());
   DCHECK(sp != NULL);
-  byte* pc_addr = reinterpret_cast<byte*>(sp) + GetMethod()->GetReturnPcOffsetInBytes();
+  byte* pc_addr = sp + GetMethod()->GetReturnPcOffsetInBytes();
   return *reinterpret_cast<uintptr_t*>(pc_addr);
 }
 
 void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
-  mirror::ArtMethod** sp = GetCurrentQuickFrame();
+  byte* sp = reinterpret_cast<byte*>(GetCurrentQuickFrame());
   CHECK(sp != NULL);
-  byte* pc_addr = reinterpret_cast<byte*>(sp) + GetMethod()->GetReturnPcOffsetInBytes();
+  byte* pc_addr = sp + GetMethod()->GetReturnPcOffsetInBytes();
   *reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc;
 }
 
@@ -223,7 +230,7 @@
     explicit NumFramesVisitor(Thread* thread)
         : StackVisitor(thread, NULL), frames(0) {}
 
-    virtual bool VisitFrame() {
+    bool VisitFrame() OVERRIDE {
       frames++;
       return true;
     }
@@ -235,12 +242,47 @@
   return visitor.frames;
 }
 
+bool StackVisitor::GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32_t* next_dex_pc) {
+  struct HasMoreFramesVisitor : public StackVisitor {
+    explicit HasMoreFramesVisitor(Thread* thread, size_t num_frames, size_t frame_height)
+        : StackVisitor(thread, nullptr, num_frames), frame_height_(frame_height),
+          found_frame_(false), has_more_frames_(false), next_method_(nullptr), next_dex_pc_(0) {
+    }
+
+    bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      if (found_frame_) {
+        mirror::ArtMethod* method = GetMethod();
+        if (method != nullptr && !method->IsRuntimeMethod()) {
+          has_more_frames_ = true;
+          next_method_ = method;
+          next_dex_pc_ = GetDexPc();
+          return false;  // End stack walk once next method is found.
+        }
+      } else if (GetFrameHeight() == frame_height_) {
+        found_frame_ = true;
+      }
+      return true;
+    }
+
+    size_t frame_height_;
+    bool found_frame_;
+    bool has_more_frames_;
+    mirror::ArtMethod* next_method_;
+    uint32_t next_dex_pc_;
+  };
+  HasMoreFramesVisitor visitor(thread_, GetNumFrames(), GetFrameHeight());
+  visitor.WalkStack(true);
+  *next_method = visitor.next_method_;
+  *next_dex_pc = visitor.next_dex_pc_;
+  return visitor.has_more_frames_;
+}
+
 void StackVisitor::DescribeStack(Thread* thread) {
   struct DescribeStackVisitor : public StackVisitor {
     explicit DescribeStackVisitor(Thread* thread)
         : StackVisitor(thread, NULL) {}
 
-    virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
       LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
       return true;
     }
@@ -307,7 +349,7 @@
     if (cur_quick_frame_ != NULL) {  // Handle quick stack frames.
       // Can't be both a shadow and a quick fragment.
       DCHECK(current_fragment->GetTopShadowFrame() == NULL);
-      mirror::ArtMethod* method = *cur_quick_frame_;
+      mirror::ArtMethod* method = cur_quick_frame_->AsMirrorPtr();
       while (method != NULL) {
         SanityCheckFrame();
         bool should_continue = VisitFrame();
@@ -352,9 +394,9 @@
         }
         cur_quick_frame_pc_ = return_pc;
         byte* next_frame = reinterpret_cast<byte*>(cur_quick_frame_) + frame_size;
-        cur_quick_frame_ = reinterpret_cast<mirror::ArtMethod**>(next_frame);
+        cur_quick_frame_ = reinterpret_cast<StackReference<mirror::ArtMethod>*>(next_frame);
         cur_depth_++;
-        method = *cur_quick_frame_;
+        method = cur_quick_frame_->AsMirrorPtr();
       }
     } else if (cur_shadow_frame_ != NULL) {
       do {
diff --git a/runtime/stack.h b/runtime/stack.h
index 2e32f51..fabdd4f 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -429,11 +429,11 @@
     return link_;
   }
 
-  mirror::ArtMethod** GetTopQuickFrame() const {
+  StackReference<mirror::ArtMethod>* GetTopQuickFrame() const {
     return top_quick_frame_;
   }
 
-  void SetTopQuickFrame(mirror::ArtMethod** top) {
+  void SetTopQuickFrame(StackReference<mirror::ArtMethod>* top) {
     DCHECK(top_shadow_frame_ == NULL);
     top_quick_frame_ = top;
   }
@@ -491,7 +491,7 @@
  private:
   ManagedStack* link_;
   ShadowFrame* top_shadow_frame_;
-  mirror::ArtMethod** top_quick_frame_;
+  StackReference<mirror::ArtMethod>* top_quick_frame_;
   uintptr_t top_quick_frame_pc_;
 };
 
@@ -512,17 +512,7 @@
     if (cur_shadow_frame_ != nullptr) {
       return cur_shadow_frame_->GetMethod();
     } else if (cur_quick_frame_ != nullptr) {
-      return *cur_quick_frame_;
-    } else {
-      return nullptr;
-    }
-  }
-
-  mirror::ArtMethod** GetMethodAddress() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    if (cur_shadow_frame_ != nullptr) {
-      return cur_shadow_frame_->GetMethodAddress();
-    } else if (cur_quick_frame_ != nullptr) {
-      return cur_quick_frame_;
+      return cur_quick_frame_->AsMirrorPtr();
     } else {
       return nullptr;
     }
@@ -567,6 +557,10 @@
     return num_frames_;
   }
 
+  // Get the method and dex pc immediately after the one that's currently being visited.
+  bool GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32_t* next_dex_pc)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   uint32_t GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -578,7 +572,8 @@
   void SetGPR(uint32_t reg, uintptr_t value);
 
   // This is a fast-path for getting/setting values in a quick frame.
-  uint32_t* GetVRegAddr(mirror::ArtMethod** cur_quick_frame, const DexFile::CodeItem* code_item,
+  uint32_t* GetVRegAddr(StackReference<mirror::ArtMethod>* cur_quick_frame,
+                        const DexFile::CodeItem* code_item,
                         uint32_t core_spills, uint32_t fp_spills, size_t frame_size,
                         uint16_t vreg) const {
     int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg, kRuntimeISA);
@@ -679,7 +674,7 @@
     return cur_quick_frame_pc_;
   }
 
-  mirror::ArtMethod** GetCurrentQuickFrame() const {
+  StackReference<mirror::ArtMethod>* GetCurrentQuickFrame() const {
     return cur_quick_frame_;
   }
 
@@ -688,7 +683,7 @@
   }
 
   HandleScope* GetCurrentHandleScope() const {
-    mirror::ArtMethod** sp = GetCurrentQuickFrame();
+    StackReference<mirror::ArtMethod>* sp = GetCurrentQuickFrame();
     ++sp;  // Skip Method*; handle scope comes next;
     return reinterpret_cast<HandleScope*>(sp);
   }
@@ -700,13 +695,17 @@
   static void DescribeStack(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
+  // Private constructor known in the case that num_frames_ has already been computed.
+  StackVisitor(Thread* thread, Context* context, size_t num_frames)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   instrumentation::InstrumentationStackFrame& GetInstrumentationStackFrame(uint32_t depth) const;
 
   void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   Thread* const thread_;
   ShadowFrame* cur_shadow_frame_;
-  mirror::ArtMethod** cur_quick_frame_;
+  StackReference<mirror::ArtMethod>* cur_quick_frame_;
   uintptr_t cur_quick_frame_pc_;
   // Lazily computed, number of frames in the stack.
   size_t num_frames_;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 41cfc58..758944c 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1253,7 +1253,8 @@
   // The "kinds" below are sorted by the frequency we expect to encounter them.
   if (kind == kLocal) {
     IndirectReferenceTable& locals = tlsPtr_.jni_env->locals;
-    result = locals.Get(ref);
+    // Local references do not need a read barrier.
+    result = locals.Get<kWithoutReadBarrier>(ref);
   } else if (kind == kHandleScopeOrInvalid) {
     // TODO: make stack indirect reference table lookup more efficient.
     // Check if this is a local reference in the handle scope.
@@ -1266,7 +1267,9 @@
     }
   } else if (kind == kGlobal) {
     JavaVMExt* const vm = Runtime::Current()->GetJavaVM();
-    result = vm->globals.SynchronizedGet(const_cast<Thread*>(this), &vm->globals_lock, ref);
+    // Strong global references do not need a read barrier.
+    result = vm->globals.SynchronizedGet<kWithoutReadBarrier>(
+        const_cast<Thread*>(this), &vm->globals_lock, ref);
   } else {
     DCHECK_EQ(kind, kWeakGlobal);
     result = Runtime::Current()->GetJavaVM()->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
@@ -1816,7 +1819,6 @@
   QUICK_ENTRY_POINT_INFO(pCmplDouble)
   QUICK_ENTRY_POINT_INFO(pCmplFloat)
   QUICK_ENTRY_POINT_INFO(pFmod)
-  QUICK_ENTRY_POINT_INFO(pSqrt)
   QUICK_ENTRY_POINT_INFO(pL2d)
   QUICK_ENTRY_POINT_INFO(pFmodf)
   QUICK_ENTRY_POINT_INFO(pL2f)
@@ -1865,16 +1867,6 @@
   // resolution.
   ClearException();
   bool is_deoptimization = (exception == GetDeoptimizationException());
-  if (kDebugExceptionDelivery) {
-    if (!is_deoptimization) {
-      mirror::String* msg = exception->GetDetailMessage();
-      std::string str_msg(msg != nullptr ? msg->ToModifiedUtf8() : "");
-      DumpStack(LOG(INFO) << "Delivering exception: " << PrettyTypeOf(exception)
-                << ": " << str_msg << "\n");
-    } else {
-      DumpStack(LOG(INFO) << "Deoptimizing: ");
-    }
-  }
   QuickExceptionHandler exception_handler(this, is_deoptimization);
   if (is_deoptimization) {
     exception_handler.DeoptimizeStack();
@@ -2010,9 +2002,14 @@
 
  private:
   void VisitQuickFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    mirror::ArtMethod** method_addr = GetMethodAddress();
-    visitor_(reinterpret_cast<mirror::Object**>(method_addr), 0 /*ignored*/, this);
-    mirror::ArtMethod* m = *method_addr;
+    StackReference<mirror::ArtMethod>* cur_quick_frame = GetCurrentQuickFrame();
+    mirror::ArtMethod* m = cur_quick_frame->AsMirrorPtr();
+    mirror::ArtMethod* old_method = m;
+    visitor_(reinterpret_cast<mirror::Object**>(&m), 0 /*ignored*/, this);
+    if (m != old_method) {
+      cur_quick_frame->Assign(m);
+    }
+
     // Process register map (which native and runtime methods don't have)
     if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
       const uint8_t* native_gc_map = m->GetNativeGcMap();
@@ -2033,7 +2030,7 @@
         const VmapTable vmap_table(m->GetVmapTable(code_pointer));
         QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
         // For all dex registers in the bitmap
-        mirror::ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
+        StackReference<mirror::ArtMethod>* cur_quick_frame = GetCurrentQuickFrame();
         DCHECK(cur_quick_frame != nullptr);
         for (size_t reg = 0; reg < num_regs; ++reg) {
           // Does this register hold a reference?
diff --git a/runtime/thread.h b/runtime/thread.h
index 62fa323..6569a96 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -121,7 +121,7 @@
   // of the stack (lowest memory).  The higher portion of the memory
   // is protected against reads and the lower is available for use while
   // throwing the StackOverflow exception.
-  static constexpr size_t kStackOverflowProtectedSize = 32 * KB;
+  static constexpr size_t kStackOverflowProtectedSize = 16 * KB;
   static constexpr size_t kStackOverflowImplicitCheckSize = kStackOverflowProtectedSize +
     kStackOverflowReservedBytes;
 
@@ -326,7 +326,7 @@
     tlsPtr_.throw_location = throw_location;
   }
 
-  void ClearException() {
+  void ClearException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     tlsPtr_.exception = nullptr;
     tlsPtr_.throw_location.Clear();
   }
@@ -345,7 +345,7 @@
 
   ThrowLocation GetCurrentLocationForThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void SetTopOfStack(mirror::ArtMethod** top_method, uintptr_t pc) {
+  void SetTopOfStack(StackReference<mirror::ArtMethod>* top_method, uintptr_t pc) {
     tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
     tlsPtr_.managed_stack.SetTopQuickFramePc(pc);
   }
@@ -396,11 +396,11 @@
   // Convert a jobject into a Object*
   mirror::Object* DecodeJObject(jobject obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  mirror::Object* GetMonitorEnterObject() const {
+  mirror::Object* GetMonitorEnterObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return tlsPtr_.monitor_enter_object;
   }
 
-  void SetMonitorEnterObject(mirror::Object* obj) {
+  void SetMonitorEnterObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     tlsPtr_.monitor_enter_object = obj;
   }
 
@@ -1045,9 +1045,6 @@
     // A cached pthread_t for the pthread underlying this Thread*.
     pthread_t pthread_self;
 
-    // Support for Mutex lock hierarchy bug detection.
-    BaseMutex* held_mutexes[kLockLevelCount];
-
     // If no_thread_suspension_ is > 0, what is causing that assertion.
     const char* last_no_thread_suspension_cause;
 
@@ -1074,6 +1071,9 @@
     // Thread-local allocation stack data/routines.
     mirror::Object** thread_local_alloc_stack_top;
     mirror::Object** thread_local_alloc_stack_end;
+
+    // Support for Mutex lock hierarchy bug detection.
+    BaseMutex* held_mutexes[kLockLevelCount];
   } tlsPtr_;
 
   // Guards the 'interrupted_' and 'wait_monitor_' members.
@@ -1090,6 +1090,7 @@
   friend class Dbg;  // For SetStateUnsafe.
   friend class gc::collector::SemiSpace;  // For getting stack traces.
   friend class Runtime;  // For CreatePeer.
+  friend class QuickExceptionHandler;  // For dumping the stack.
   friend class ScopedThreadStateChange;
   friend class SignalCatcher;  // For SetStateUnsafe.
   friend class StubTest;  // For accessing entrypoints.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 8046500..388c9b4 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -40,8 +40,7 @@
 namespace art {
 
 ThreadList::ThreadList()
-    : allocated_ids_lock_("allocated thread ids lock"),
-      suspend_all_count_(0), debug_suspend_all_count_(0),
+    : suspend_all_count_(0), debug_suspend_all_count_(0),
       thread_exit_cond_("thread exit condition variable", *Locks::thread_list_lock_) {
   CHECK(Monitor::IsValidLockWord(LockWord::FromThinLockId(kMaxThreadId, 1)));
 }
@@ -849,7 +848,7 @@
 }
 
 uint32_t ThreadList::AllocThreadId(Thread* self) {
-  MutexLock mu(self, allocated_ids_lock_);
+  MutexLock mu(self, *Locks::allocated_thread_ids_lock_);
   for (size_t i = 0; i < allocated_ids_.size(); ++i) {
     if (!allocated_ids_[i]) {
       allocated_ids_.set(i);
@@ -861,7 +860,7 @@
 }
 
 void ThreadList::ReleaseThreadId(Thread* self, uint32_t id) {
-  MutexLock mu(self, allocated_ids_lock_);
+  MutexLock mu(self, *Locks::allocated_thread_ids_lock_);
   --id;  // Zero is reserved to mean "invalid".
   DCHECK(allocated_ids_[id]) << id;
   allocated_ids_.reset(id);
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index a574340..d46987a 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -132,7 +132,7 @@
 
  private:
   uint32_t AllocThreadId(Thread* self);
-  void ReleaseThreadId(Thread* self, uint32_t id) LOCKS_EXCLUDED(allocated_ids_lock_);
+  void ReleaseThreadId(Thread* self, uint32_t id) LOCKS_EXCLUDED(Locks::allocated_thread_ids_lock_);
 
   bool Contains(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_);
   bool Contains(pid_t tid) EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_);
@@ -151,8 +151,7 @@
       LOCKS_EXCLUDED(Locks::thread_list_lock_,
                      Locks::thread_suspend_count_lock_);
 
-  mutable Mutex allocated_ids_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-  std::bitset<kMaxThreadId> allocated_ids_ GUARDED_BY(allocated_ids_lock_);
+  std::bitset<kMaxThreadId> allocated_ids_ GUARDED_BY(Locks::allocated_thread_ids_lock_);
 
   // The actual list of all threads.
   std::list<Thread*> list_ GUARDED_BY(Locks::thread_list_lock_);
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 6fd86c8..7859126 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -147,7 +147,9 @@
       DCHECK(s != nullptr);
     }
 
-    void Undo(InternTable* intern_table) EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+    void Undo(InternTable* intern_table)
+        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+        EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
     void VisitRoots(RootCallback* callback, void* arg);
 
    private:
@@ -169,7 +171,8 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void UndoInternStringTableModifications()
       EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(log_lock_);
+      EXCLUSIVE_LOCKS_REQUIRED(log_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void VisitObjectLogs(RootCallback* callback, void* arg)
       EXCLUSIVE_LOCKS_REQUIRED(log_lock_)
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 4863b83..b5c07aa 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -121,8 +121,8 @@
 }
 
 MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file,
-                                                        Handle<mirror::DexCache>& dex_cache,
-                                                        Handle<mirror::ClassLoader>& class_loader,
+                                                        Handle<mirror::DexCache> dex_cache,
+                                                        Handle<mirror::ClassLoader> class_loader,
                                                         const DexFile::ClassDef* class_def,
                                                         bool allow_soft_failures,
                                                         std::string* error) {
@@ -151,7 +151,8 @@
     previous_direct_method_idx = method_idx;
     InvokeType type = it.GetMethodInvokeType(*class_def);
     mirror::ArtMethod* method =
-        linker->ResolveMethod(*dex_file, method_idx, dex_cache, class_loader, NULL, type);
+        linker->ResolveMethod(*dex_file, method_idx, dex_cache, class_loader,
+                              NullHandle<mirror::ArtMethod>(), type);
     if (method == NULL) {
       DCHECK(Thread::Current()->IsExceptionPending());
       // We couldn't resolve the method, but continue regardless.
@@ -165,7 +166,8 @@
                                                       it.GetMethodCodeItem(),
                                                       method,
                                                       it.GetMemberAccessFlags(),
-                                                      allow_soft_failures);
+                                                      allow_soft_failures,
+                                                      false);
     if (result != kNoFailure) {
       if (result == kHardFailure) {
         hard_fail = true;
@@ -193,7 +195,8 @@
     previous_virtual_method_idx = method_idx;
     InvokeType type = it.GetMethodInvokeType(*class_def);
     mirror::ArtMethod* method =
-        linker->ResolveMethod(*dex_file, method_idx, dex_cache, class_loader, NULL, type);
+        linker->ResolveMethod(*dex_file, method_idx, dex_cache, class_loader,
+                              NullHandle<mirror::ArtMethod>(), type);
     if (method == NULL) {
       DCHECK(Thread::Current()->IsExceptionPending());
       // We couldn't resolve the method, but continue regardless.
@@ -207,7 +210,8 @@
                                                       it.GetMethodCodeItem(),
                                                       method,
                                                       it.GetMemberAccessFlags(),
-                                                      allow_soft_failures);
+                                                      allow_soft_failures,
+                                                      false);
     if (result != kNoFailure) {
       if (result == kHardFailure) {
         hard_fail = true;
@@ -232,38 +236,40 @@
 
 MethodVerifier::FailureKind MethodVerifier::VerifyMethod(uint32_t method_idx,
                                                          const DexFile* dex_file,
-                                                         Handle<mirror::DexCache>& dex_cache,
-                                                         Handle<mirror::ClassLoader>& class_loader,
+                                                         Handle<mirror::DexCache> dex_cache,
+                                                         Handle<mirror::ClassLoader> class_loader,
                                                          const DexFile::ClassDef* class_def,
                                                          const DexFile::CodeItem* code_item,
                                                          mirror::ArtMethod* method,
                                                          uint32_t method_access_flags,
-                                                         bool allow_soft_failures) {
+                                                         bool allow_soft_failures,
+                                                         bool need_precise_constants) {
   MethodVerifier::FailureKind result = kNoFailure;
   uint64_t start_ns = NanoTime();
 
-  MethodVerifier verifier_(dex_file, &dex_cache, &class_loader, class_def, code_item,
-                           method_idx, method, method_access_flags, true, allow_soft_failures);
-  if (verifier_.Verify()) {
+  MethodVerifier verifier(dex_file, &dex_cache, &class_loader, class_def, code_item,
+                           method_idx, method, method_access_flags, true, allow_soft_failures,
+                           need_precise_constants);
+  if (verifier.Verify()) {
     // Verification completed, however failures may be pending that didn't cause the verification
     // to hard fail.
-    CHECK(!verifier_.have_pending_hard_failure_);
-    if (verifier_.failures_.size() != 0) {
+    CHECK(!verifier.have_pending_hard_failure_);
+    if (verifier.failures_.size() != 0) {
       if (VLOG_IS_ON(verifier)) {
-          verifier_.DumpFailures(VLOG_STREAM(verifier) << "Soft verification failures in "
+          verifier.DumpFailures(VLOG_STREAM(verifier) << "Soft verification failures in "
                                 << PrettyMethod(method_idx, *dex_file) << "\n");
       }
       result = kSoftFailure;
     }
   } else {
     // Bad method data.
-    CHECK_NE(verifier_.failures_.size(), 0U);
-    CHECK(verifier_.have_pending_hard_failure_);
-    verifier_.DumpFailures(LOG(INFO) << "Verification error in "
+    CHECK_NE(verifier.failures_.size(), 0U);
+    CHECK(verifier.have_pending_hard_failure_);
+    verifier.DumpFailures(LOG(INFO) << "Verification error in "
                                     << PrettyMethod(method_idx, *dex_file) << "\n");
     if (gDebugVerify) {
-      std::cout << "\n" << verifier_.info_messages_.str();
-      verifier_.Dump(std::cout);
+      std::cout << "\n" << verifier.info_messages_.str();
+      verifier.Dump(std::cout);
     }
     result = kHardFailure;
   }
@@ -277,14 +283,14 @@
 
 void MethodVerifier::VerifyMethodAndDump(std::ostream& os, uint32_t dex_method_idx,
                                          const DexFile* dex_file,
-                                         Handle<mirror::DexCache>& dex_cache,
-                                         Handle<mirror::ClassLoader>& class_loader,
+                                         Handle<mirror::DexCache> dex_cache,
+                                         Handle<mirror::ClassLoader> class_loader,
                                          const DexFile::ClassDef* class_def,
                                          const DexFile::CodeItem* code_item,
                                          mirror::ArtMethod* method,
                                          uint32_t method_access_flags) {
   MethodVerifier verifier(dex_file, &dex_cache, &class_loader, class_def, code_item,
-                          dex_method_idx, method, method_access_flags, true, true);
+                          dex_method_idx, method, method_access_flags, true, true, true);
   verifier.Verify();
   verifier.DumpFailures(os);
   os << verifier.info_messages_.str();
@@ -296,7 +302,8 @@
                                const DexFile::ClassDef* class_def,
                                const DexFile::CodeItem* code_item, uint32_t dex_method_idx,
                                mirror::ArtMethod* method, uint32_t method_access_flags,
-                               bool can_load_classes, bool allow_soft_failures)
+                               bool can_load_classes, bool allow_soft_failures,
+                               bool need_precise_constants)
     : reg_types_(can_load_classes),
       work_insn_idx_(-1),
       dex_method_idx_(dex_method_idx),
@@ -317,6 +324,7 @@
       monitor_enter_count_(0),
       can_load_classes_(can_load_classes),
       allow_soft_failures_(allow_soft_failures),
+      need_precise_constants_(need_precise_constants),
       has_check_casts_(false),
       has_virtual_or_interface_invokes_(false) {
   Runtime::Current()->AddMethodVerifier(this);
@@ -329,16 +337,16 @@
 }
 
 void MethodVerifier::FindLocksAtDexPc(mirror::ArtMethod* m, uint32_t dex_pc,
-                                      std::vector<uint32_t>& monitor_enter_dex_pcs) {
+                                      std::vector<uint32_t>* monitor_enter_dex_pcs) {
   MethodHelper mh(m);
   StackHandleScope<2> hs(Thread::Current());
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(mh.GetDexCache()));
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(mh.GetClassLoader()));
   MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader, &mh.GetClassDef(),
                           mh.GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(), false,
-                          true);
+                          true, false);
   verifier.interesting_dex_pc_ = dex_pc;
-  verifier.monitor_enter_dex_pcs_ = &monitor_enter_dex_pcs;
+  verifier.monitor_enter_dex_pcs_ = monitor_enter_dex_pcs;
   verifier.FindLocksAtDexPc();
 }
 
@@ -354,14 +362,14 @@
 }
 
 mirror::ArtField* MethodVerifier::FindAccessedFieldAtDexPc(mirror::ArtMethod* m,
-                                                        uint32_t dex_pc) {
+                                                           uint32_t dex_pc) {
   MethodHelper mh(m);
   StackHandleScope<2> hs(Thread::Current());
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(mh.GetDexCache()));
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(mh.GetClassLoader()));
   MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader, &mh.GetClassDef(),
                           mh.GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(), true,
-                          true);
+                          true, false);
   return verifier.FindAccessedFieldAtDexPc(dex_pc);
 }
 
@@ -392,7 +400,7 @@
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(mh.GetClassLoader()));
   MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader, &mh.GetClassDef(),
                           mh.GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(), true,
-                          true);
+                          true, false);
   return verifier.FindInvokedMethodAtDexPc(dex_pc);
 }
 
@@ -1436,9 +1444,6 @@
   std::unique_ptr<RegisterLine> branch_line;
   std::unique_ptr<RegisterLine> fallthrough_line;
 
-  // We need precise constant types only for deoptimization which happens at runtime.
-  const bool need_precise_constant = !Runtime::Current()->IsCompiler();
-
   switch (inst->Opcode()) {
     case Instruction::NOP:
       /*
@@ -1590,25 +1595,25 @@
     case Instruction::CONST_4: {
       int32_t val = static_cast<int32_t>(inst->VRegB_11n() << 28) >> 28;
       work_line_->SetRegisterType(inst->VRegA_11n(),
-                                  DetermineCat1Constant(val, need_precise_constant));
+                                  DetermineCat1Constant(val, need_precise_constants_));
       break;
     }
     case Instruction::CONST_16: {
       int16_t val = static_cast<int16_t>(inst->VRegB_21s());
       work_line_->SetRegisterType(inst->VRegA_21s(),
-                                  DetermineCat1Constant(val, need_precise_constant));
+                                  DetermineCat1Constant(val, need_precise_constants_));
       break;
     }
     case Instruction::CONST: {
       int32_t val = inst->VRegB_31i();
       work_line_->SetRegisterType(inst->VRegA_31i(),
-                                  DetermineCat1Constant(val, need_precise_constant));
+                                  DetermineCat1Constant(val, need_precise_constants_));
       break;
     }
     case Instruction::CONST_HIGH16: {
       int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
       work_line_->SetRegisterType(inst->VRegA_21h(),
-                                  DetermineCat1Constant(val, need_precise_constant));
+                                  DetermineCat1Constant(val, need_precise_constants_));
       break;
     }
       /* could be long or double; resolved upon use */
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 495d3c5..a23e80d 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -142,15 +142,15 @@
   /* Verify a class. Returns "kNoFailure" on success. */
   static FailureKind VerifyClass(mirror::Class* klass, bool allow_soft_failures, std::string* error)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static FailureKind VerifyClass(const DexFile* dex_file, Handle<mirror::DexCache>& dex_cache,
-                                 Handle<mirror::ClassLoader>& class_loader,
+  static FailureKind VerifyClass(const DexFile* dex_file, Handle<mirror::DexCache> dex_cache,
+                                 Handle<mirror::ClassLoader> class_loader,
                                  const DexFile::ClassDef* class_def,
                                  bool allow_soft_failures, std::string* error)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   static void VerifyMethodAndDump(std::ostream& os, uint32_t method_idx, const DexFile* dex_file,
-                                  Handle<mirror::DexCache>& dex_cache,
-                                  Handle<mirror::ClassLoader>& class_loader,
+                                  Handle<mirror::DexCache> dex_cache,
+                                  Handle<mirror::ClassLoader> class_loader,
                                   const DexFile::ClassDef* class_def,
                                   const DexFile::CodeItem* code_item,
                                   mirror::ArtMethod* method, uint32_t method_access_flags)
@@ -185,7 +185,7 @@
   // Fills 'monitor_enter_dex_pcs' with the dex pcs of the monitor-enter instructions corresponding
   // to the locks held at 'dex_pc' in method 'm'.
   static void FindLocksAtDexPc(mirror::ArtMethod* m, uint32_t dex_pc,
-                               std::vector<uint32_t>& monitor_enter_dex_pcs)
+                               std::vector<uint32_t>* monitor_enter_dex_pcs)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Returns the accessed field corresponding to the quick instruction's field
@@ -208,7 +208,8 @@
   MethodVerifier(const DexFile* dex_file, Handle<mirror::DexCache>* dex_cache,
                  Handle<mirror::ClassLoader>* class_loader, const DexFile::ClassDef* class_def,
                  const DexFile::CodeItem* code_item, uint32_t method_idx, mirror::ArtMethod* method,
-                 uint32_t access_flags, bool can_load_classes, bool allow_soft_failures)
+                 uint32_t access_flags, bool can_load_classes, bool allow_soft_failures,
+                 bool need_precise_constants)
           SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   ~MethodVerifier();
@@ -255,12 +256,12 @@
    *      for code flow problems.
    */
   static FailureKind VerifyMethod(uint32_t method_idx, const DexFile* dex_file,
-                                  Handle<mirror::DexCache>& dex_cache,
-                                  Handle<mirror::ClassLoader>& class_loader,
+                                  Handle<mirror::DexCache> dex_cache,
+                                  Handle<mirror::ClassLoader> class_loader,
                                   const DexFile::ClassDef* class_def_idx,
                                   const DexFile::CodeItem* code_item,
                                   mirror::ArtMethod* method, uint32_t method_access_flags,
-                                  bool allow_soft_failures)
+                                  bool allow_soft_failures, bool need_precise_constants)
           SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void FindLocksAtDexPc() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -672,6 +673,12 @@
   // running and the verifier is called from the class linker.
   const bool allow_soft_failures_;
 
+  // An optimization where instead of generating unique RegTypes for constants we use imprecise
+  // constants that cover a range of constants. This isn't good enough for deoptimization that
+  // avoids loading from registers in the case of a constant as the dex instruction set lost the
+  // notion of whether a value should be in a floating point or general purpose register file.
+  const bool need_precise_constants_;
+
   // Indicates the method being verified contains at least one check-cast or aput-object
   // instruction. Aput-object operations implicitly check for array-store exceptions, similar to
   // check-cast.
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 8df1e5d..e24c920 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -929,7 +929,7 @@
     }
     mirror::Class* common_elem = ClassJoin(s_ct, t_ct);
     ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-    mirror::Class* array_class = class_linker->FindArrayClass(Thread::Current(), common_elem);
+    mirror::Class* array_class = class_linker->FindArrayClass(Thread::Current(), &common_elem);
     DCHECK(array_class != NULL);
     return array_class;
   } else {
diff --git a/runtime/zip_archive.cc b/runtime/zip_archive.cc
index 841c01a..c02f310 100644
--- a/runtime/zip_archive.cc
+++ b/runtime/zip_archive.cc
@@ -50,13 +50,14 @@
   return true;
 }
 
-MemMap* ZipEntry::ExtractToMemMap(const char* entry_filename, std::string* error_msg) {
+MemMap* ZipEntry::ExtractToMemMap(const char* zip_filename, const char* entry_filename,
+                                  std::string* error_msg) {
   std::string name(entry_filename);
   name += " extracted in memory from ";
-  name += entry_filename;
+  name += zip_filename;
   std::unique_ptr<MemMap> map(MemMap::MapAnonymous(name.c_str(),
-                                             NULL, GetUncompressedLength(),
-                                             PROT_READ | PROT_WRITE, false, error_msg));
+                                                   NULL, GetUncompressedLength(),
+                                                   PROT_READ | PROT_WRITE, false, error_msg));
   if (map.get() == nullptr) {
     DCHECK(!error_msg->empty());
     return nullptr;
diff --git a/runtime/zip_archive.h b/runtime/zip_archive.h
index c0e2f2f..865af51 100644
--- a/runtime/zip_archive.h
+++ b/runtime/zip_archive.h
@@ -37,7 +37,8 @@
 class ZipEntry {
  public:
   bool ExtractToFile(File& file, std::string* error_msg);
-  MemMap* ExtractToMemMap(const char* entry_filename, std::string* error_msg);
+  MemMap* ExtractToMemMap(const char* zip_filename, const char* entry_filename,
+                          std::string* error_msg);
   virtual ~ZipEntry();
 
   uint32_t GetUncompressedLength();
diff --git a/sigchainlib/Android.mk b/sigchainlib/Android.mk
new file mode 100644
index 0000000..cb1778d
--- /dev/null
+++ b/sigchainlib/Android.mk
@@ -0,0 +1,29 @@
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH:= $(call my-dir)
+
+include art/build/Android.common.mk
+
+include $(CLEAR_VARS)
+LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
+LOCAL_MODULE_TAGS := optional
+LOCAL_CFLAGS += $(ART_TARGET_CFLAGS)
+LOCAL_SRC_FILES := sigchain.cc
+LOCAL_MODULE:= libsigchain
+LOCAL_SHARED_LIBRARIES += liblog libdl
+LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
+include $(BUILD_SHARED_LIBRARY)
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
new file mode 100644
index 0000000..26e7d31
--- /dev/null
+++ b/sigchainlib/sigchain.cc
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android/log.h>
+#include <dlfcn.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+namespace art {
+
+class SignalAction {
+ public:
+  SignalAction() : claimed_(false) {
+  }
+
+  // Claim the signal and keep the action specified.
+  void Claim(const struct sigaction& action) {
+    action_ = action;
+    claimed_ = true;
+  }
+
+  // Unclaim the signal and restore the old action.
+  void Unclaim(int signal) {
+    claimed_ = false;
+    sigaction(signal, &action_, NULL);        // Restore old action.
+  }
+
+  // Get the action associated with this signal.
+  const struct sigaction& GetAction() const {
+    return action_;
+  }
+
+  // Is the signal claimed?
+  bool IsClaimed() const {
+    return claimed_;
+  }
+
+  // Change the recorded action to that specified.
+  void SetAction(const struct sigaction& action) {
+    action_ = action;
+  }
+
+ private:
+  struct sigaction action_;     // Action to be performed.
+  bool claimed_;                // Whether signal is claimed or not.
+};
+
+// User's signal handlers
+static SignalAction user_sigactions[_NSIG];
+
+static void log(const char* format, ...) {
+  char buf[256];
+  va_list ap;
+  va_start(ap, format);
+  vsnprintf(buf, sizeof(buf), format, ap);
+  __android_log_write(ANDROID_LOG_ERROR, "libsigchain", buf);
+  va_end(ap);
+}
+
+static void CheckSignalValid(int signal) {
+  if (signal <= 0 || signal >= _NSIG) {
+    log("Invalid signal %d", signal);
+    abort();
+  }
+}
+
+// Claim a signal chain for a particular signal.
+void ClaimSignalChain(int signal, struct sigaction* oldaction) {
+  CheckSignalValid(signal);
+  user_sigactions[signal].Claim(*oldaction);
+}
+
+void UnclaimSignalChain(int signal) {
+  CheckSignalValid(signal);
+
+  user_sigactions[signal].Unclaim(signal);
+}
+
+// Invoke the user's signal handler.
+void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context) {
+  // Check the arguments.
+  CheckSignalValid(sig);
+
+  // The signal must have been claimed in order to get here.  Check it.
+  if (!user_sigactions[sig].IsClaimed()) {
+    abort();
+  }
+
+  const struct sigaction& action = user_sigactions[sig].GetAction();
+
+  // Only deliver the signal if the signal was not masked out.
+  if (sigismember(&action.sa_mask, sig)) {
+     return;
+  }
+  if ((action.sa_flags & SA_SIGINFO) == 0) {
+    if (action.sa_handler != NULL) {
+      action.sa_handler(sig);
+    }
+  } else {
+    if (action.sa_sigaction != NULL) {
+      action.sa_sigaction(sig, info, context);
+    }
+  }
+}
+
+extern "C" {
+// These functions are C linkage since they replace the functions in libc.
+
+int sigaction(int signal, const struct sigaction* new_action, struct sigaction* old_action) {
+  // If this signal has been claimed as a signal chain, record the user's
+  // action but don't pass it on to the kernel.
+  // Note that we check that the signal number is in range here.  An out of range signal
+  // number should behave exactly as the libc sigaction.
+  if (signal > 0 && signal < _NSIG && user_sigactions[signal].IsClaimed()) {
+    if (old_action != NULL) {
+      *old_action = user_sigactions[signal].GetAction();
+    }
+    if (new_action != NULL) {
+      user_sigactions[signal].SetAction(*new_action);
+    }
+    return 0;
+  }
+
+  // Will only get here if the signal chain has not been claimed.  We want
+  // to pass the sigaction on to the kernel via the real sigaction in libc.
+
+  void* linked_sigaction_sym = dlsym(RTLD_NEXT, "sigaction");
+  if (linked_sigaction_sym == nullptr) {
+    log("Unable to find next sigaction in signal chain");
+    abort();
+  }
+
+  typedef int (*SigAction)(int, const struct sigaction*, struct sigaction*);
+  SigAction linked_sigaction = reinterpret_cast<SigAction>(linked_sigaction_sym);
+  return linked_sigaction(signal, new_action, old_action);
+}
+
+
+int sigprocmask(int how, const sigset_t* bionic_new_set, sigset_t* bionic_old_set) {
+  const sigset_t* new_set_ptr = bionic_new_set;
+  sigset_t tmpset;
+  if (bionic_new_set != NULL) {
+    tmpset = *bionic_new_set;
+
+    if (how == SIG_BLOCK) {
+      // Don't allow claimed signals in the mask.  If a signal chain has been claimed
+      // we can't allow the user to block that signal.
+      for (int i = 0 ; i < _NSIG; ++i) {
+        if (user_sigactions[i].IsClaimed() && sigismember(&tmpset, i)) {
+            sigdelset(&tmpset, i);
+        }
+      }
+    }
+    new_set_ptr = &tmpset;
+  }
+
+  void* linked_sigprocmask_sym = dlsym(RTLD_NEXT, "sigprocmask");
+  if (linked_sigprocmask_sym == nullptr) {
+    log("Unable to find next sigprocmask in signal chain");
+    abort();
+  }
+
+  typedef int (*SigProcMask)(int how, const sigset_t*, sigset_t*);
+  SigProcMask linked_sigprocmask= reinterpret_cast<SigProcMask>(linked_sigprocmask_sym);
+  return linked_sigprocmask(how, new_set_ptr, bionic_old_set);
+}
+}   // extern "C"
+}   // namespace art
+
diff --git a/compiler/dex/bit_vector_block_iterator.cc b/sigchainlib/sigchain.h
similarity index 65%
rename from compiler/dex/bit_vector_block_iterator.cc
rename to sigchainlib/sigchain.h
index 32d7d71..f6f2253 100644
--- a/compiler/dex/bit_vector_block_iterator.cc
+++ b/sigchainlib/sigchain.h
@@ -14,19 +14,16 @@
  * limitations under the License.
  */
 
-#include "bit_vector_block_iterator.h"
-#include "mir_graph.h"
+#ifndef ART_SIGCHAINLIB_SIGCHAIN_H_
+#define ART_SIGCHAINLIB_SIGCHAIN_H_
 
+#include <signal.h>
 namespace art {
 
-BasicBlock* BitVectorBlockIterator::Next() {
-  int idx = internal_iterator_.Next();
+void ClaimSignalChain(int signal, struct sigaction* oldaction);
+void UnclaimSignalChain(int signal);
+void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context);
 
-  if (idx == -1) {
-    return nullptr;
-  }
+}   // namespace art
 
-  return mir_graph_->GetBasicBlock(idx);
-}
-
-}  // namespace art
+#endif  // ART_SIGCHAINLIB_SIGCHAIN_H_
diff --git a/test/079-phantom/src/Bitmap.java b/test/079-phantom/src/Bitmap.java
index 9d03cbd..85eb3cc 100644
--- a/test/079-phantom/src/Bitmap.java
+++ b/test/079-phantom/src/Bitmap.java
@@ -29,6 +29,7 @@
             new ReferenceQueue<PhantomWrapper>();
     private static BitmapWatcher sWatcher = new BitmapWatcher(sPhantomQueue);
     static {
+        sWatcher.setDaemon(true);
         sWatcher.start();
     };
 
diff --git a/test/111-unresolvable-exception/expected.txt b/test/111-unresolvable-exception/expected.txt
index 052dd74..f8a1e96 100644
--- a/test/111-unresolvable-exception/expected.txt
+++ b/test/111-unresolvable-exception/expected.txt
@@ -1 +1 @@
-Caught class java.lang.NoClassDefFoundError
+Got expected exception.
diff --git a/test/111-unresolvable-exception/src/Main.java b/test/111-unresolvable-exception/src/Main.java
index ba07ee1..adeb0a2 100644
--- a/test/111-unresolvable-exception/src/Main.java
+++ b/test/111-unresolvable-exception/src/Main.java
@@ -32,7 +32,7 @@
         throw new RuntimeException();  // Trigger exception handling.
       } catch (TestException e) {      // This handler will have an unresolvable class.
       } catch (Exception e) {          // General-purpose handler
-        System.out.println("Should not get here!");
+        System.out.println("Got expected exception.");
       }
     }
 
diff --git a/test/Android.mk b/test/Android.mk
index 8caa033..c15259c 100644
--- a/test/Android.mk
+++ b/test/Android.mk
@@ -46,6 +46,7 @@
 	HelloWorld \
 	InterfaceTest \
 	JniTest \
+	SignalTest \
 	NativeAllocations \
 	ParallelGC \
 	ReferenceMap \
diff --git a/test/SignalTest/SignalTest.java b/test/SignalTest/SignalTest.java
new file mode 100644
index 0000000..7f15aea
--- /dev/null
+++ b/test/SignalTest/SignalTest.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class SignalTest {
+    private static native void initSignalTest();
+    private static native void terminateSignalTest();
+    private static native int testSignal();
+
+    private static void stackOverflow() {
+       stackOverflow();
+    }
+
+    public static void main(String[] args) {
+        System.loadLibrary("arttest");
+
+        System.out.println("init signal test");
+        initSignalTest();
+        try {
+            Object o = null;
+            int hash = o.hashCode();
+
+            // Should never get here.
+            System.out.println("hash: " + hash);
+            throw new AssertionError();
+        } catch (NullPointerException e) {
+            System.out.println("Caught NullPointerException");
+        }
+        try {
+            stackOverflow();
+
+            // Should never get here.
+            throw new AssertionError();
+        } catch (StackOverflowError e) {
+            System.out.println("Caught StackOverflowError");
+        }
+
+        // Test that a signal in native code works.  This will return
+        // the value 1234 if the signal is caught.
+        int x = testSignal();
+        if (x != 1234) {
+            throw new AssertionError();
+        }
+
+        terminateSignalTest();
+        System.out.println("Signal test OK");
+    }
+}
diff --git a/test/SignalTest/signaltest.cc b/test/SignalTest/signaltest.cc
new file mode 100644
index 0000000..b84e395
--- /dev/null
+++ b/test/SignalTest/signaltest.cc
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <signal.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include "jni.h"
+
+#ifdef __arm__
+#include <sys/ucontext.h>
+#endif
+
+static void signalhandler(int sig, siginfo_t* info, void* context) {
+  printf("signal caught\n");
+#ifdef __arm__
+  // On ARM we do a more exhaustive test to make sure the signal
+  // context is OK.
+  // We can do this because we know that the instruction causing
+  // the signal is 2 bytes long (thumb mov instruction).  On
+  // other architectures this is more difficult.
+  // TODO: we could do this on other architectures too if necessary, it's just harder.
+  struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
+  struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
+  sc->arm_pc += 2;          // Skip instruction causing segv.
+#endif
+}
+
+static struct sigaction oldaction;
+
+extern "C" JNIEXPORT void JNICALL Java_SignalTest_initSignalTest(JNIEnv*, jclass) {
+  struct sigaction action;
+  action.sa_sigaction = signalhandler;
+  sigemptyset(&action.sa_mask);
+  action.sa_flags = SA_SIGINFO | SA_ONSTACK;
+#if !defined(__mips__)
+  action.sa_restorer = nullptr;
+#endif
+
+  sigaction(SIGSEGV, &action, &oldaction);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_SignalTest_terminateSignalTest(JNIEnv*, jclass) {
+  sigaction(SIGSEGV, &oldaction, nullptr);
+}
+
+// Prevent the compiler being a smart-alec and optimizing out the assignment
+// to nullptr.
+char *p = nullptr;
+
+extern "C" JNIEXPORT jint JNICALL Java_SignalTest_testSignal(JNIEnv*, jclass) {
+#ifdef __arm__
+  // On ARM we cause a real SEGV.
+  *p = 'a';
+#else
+  // On other architectures we simulate SEGV.
+  kill(getpid(), SIGSEGV);
+#endif
+  return 1234;
+}
+