Merge "Integrate bisection search with javafuzz"
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index 93e310e..6b7dc09 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -60,8 +60,8 @@
 # Do you want to test the optimizing compiler with graph coloring register allocation?
 ART_TEST_OPTIMIZING_GRAPH_COLOR ?= $(ART_TEST_FULL)
 
-# Do we want to test a PIC-compiled core image?
-ART_TEST_PIC_IMAGE ?= $(ART_TEST_FULL)
+# Do we want to test a non-PIC-compiled core image?
+ART_TEST_NPIC_IMAGE ?= $(ART_TEST_FULL)
 
 # Do we want to test PIC-compiled tests ("apps")?
 ART_TEST_PIC_TEST ?= $(ART_TEST_FULL)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 76ee06e..12b50b8 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -90,16 +90,16 @@
 ART_GTEST_type_lookup_table_test_DEX_DEPS := Lookup
 
 # The elf writer test has dependencies on core.oat.
-ART_GTEST_elf_writer_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32)
-ART_GTEST_elf_writer_test_TARGET_DEPS := $(TARGET_CORE_IMAGE_default_no-pic_64) $(TARGET_CORE_IMAGE_default_no-pic_32)
+ART_GTEST_elf_writer_test_HOST_DEPS := $(HOST_CORE_IMAGE_optimizing_no-pic_64) $(HOST_CORE_IMAGE_optimizing_no-pic_32)
+ART_GTEST_elf_writer_test_TARGET_DEPS := $(TARGET_CORE_IMAGE_optimizing_no-pic_64) $(TARGET_CORE_IMAGE_optimizing_no-pic_32)
 
 ART_GTEST_dex2oat_environment_tests_HOST_DEPS := \
-  $(HOST_CORE_IMAGE_default_no-pic_64) \
-  $(HOST_CORE_IMAGE_default_no-pic_32) \
+  $(HOST_CORE_IMAGE_optimizing_pic_64) \
+  $(HOST_CORE_IMAGE_optimizing_pic_32) \
   $(HOST_OUT_EXECUTABLES)/patchoatd
 ART_GTEST_dex2oat_environment_tests_TARGET_DEPS := \
-  $(TARGET_CORE_IMAGE_default_no-pic_64) \
-  $(TARGET_CORE_IMAGE_default_no-pic_32) \
+  $(TARGET_CORE_IMAGE_optimizing_pic_64) \
+  $(TARGET_CORE_IMAGE_optimizing_pic_32) \
   $(TARGET_OUT_EXECUTABLES)/patchoatd
 
 ART_GTEST_oat_file_assistant_test_HOST_DEPS := \
@@ -114,62 +114,62 @@
   $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS)
 
 # TODO: document why this is needed.
-ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32)
+ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_IMAGE_optimizing_no-pic_64) $(HOST_CORE_IMAGE_optimizing_no-pic_32)
 
 # The dexdump test requires an image and the dexdump utility.
 # TODO: rename into dexdump when migration completes
 ART_GTEST_dexdump_test_HOST_DEPS := \
-  $(HOST_CORE_IMAGE_default_no-pic_64) \
-  $(HOST_CORE_IMAGE_default_no-pic_32) \
+  $(HOST_CORE_IMAGE_optimizing_no-pic_64) \
+  $(HOST_CORE_IMAGE_optimizing_no-pic_32) \
   $(HOST_OUT_EXECUTABLES)/dexdump2
 ART_GTEST_dexdump_test_TARGET_DEPS := \
-  $(TARGET_CORE_IMAGE_default_no-pic_64) \
-  $(TARGET_CORE_IMAGE_default_no-pic_32) \
+  $(TARGET_CORE_IMAGE_optimizing_no-pic_64) \
+  $(TARGET_CORE_IMAGE_optimizing_no-pic_32) \
   dexdump2
 
 # The dexlayout test requires an image and the dexlayout utility.
 # TODO: rename into dexdump when migration completes
 ART_GTEST_dexlayout_test_HOST_DEPS := \
-  $(HOST_CORE_IMAGE_default_no-pic_64) \
-  $(HOST_CORE_IMAGE_default_no-pic_32) \
+  $(HOST_CORE_IMAGE_optimizing_no-pic_64) \
+  $(HOST_CORE_IMAGE_optimizing_no-pic_32) \
   $(HOST_OUT_EXECUTABLES)/dexlayout \
   $(HOST_OUT_EXECUTABLES)/dexdump2
 ART_GTEST_dexlayout_test_TARGET_DEPS := \
-  $(TARGET_CORE_IMAGE_default_no-pic_64) \
-  $(TARGET_CORE_IMAGE_default_no-pic_32) \
+  $(TARGET_CORE_IMAGE_optimizing_no-pic_64) \
+  $(TARGET_CORE_IMAGE_optimizing_no-pic_32) \
   dexlayout \
   dexdump2
 
 # The dexlist test requires an image and the dexlist utility.
 ART_GTEST_dexlist_test_HOST_DEPS := \
-  $(HOST_CORE_IMAGE_default_no-pic_64) \
-  $(HOST_CORE_IMAGE_default_no-pic_32) \
+  $(HOST_CORE_IMAGE_optimizing_no-pic_64) \
+  $(HOST_CORE_IMAGE_optimizing_no-pic_32) \
   $(HOST_OUT_EXECUTABLES)/dexlist
 ART_GTEST_dexlist_test_TARGET_DEPS := \
-  $(TARGET_CORE_IMAGE_default_no-pic_64) \
-  $(TARGET_CORE_IMAGE_default_no-pic_32) \
+  $(TARGET_CORE_IMAGE_optimizing_no-pic_64) \
+  $(TARGET_CORE_IMAGE_optimizing_no-pic_32) \
   dexlist
 
 # The imgdiag test has dependencies on core.oat since it needs to load it during the test.
 # For the host, also add the installed tool (in the base size, that should suffice). For the
 # target, just the module is fine, the sync will happen late enough.
 ART_GTEST_imgdiag_test_HOST_DEPS := \
-  $(HOST_CORE_IMAGE_default_no-pic_64) \
-  $(HOST_CORE_IMAGE_default_no-pic_32) \
+  $(HOST_CORE_IMAGE_optimizing_no-pic_64) \
+  $(HOST_CORE_IMAGE_optimizing_no-pic_32) \
   $(HOST_OUT_EXECUTABLES)/imgdiagd
 ART_GTEST_imgdiag_test_TARGET_DEPS := \
-  $(TARGET_CORE_IMAGE_default_no-pic_64) \
-  $(TARGET_CORE_IMAGE_default_no-pic_32) \
+  $(TARGET_CORE_IMAGE_optimizing_no-pic_64) \
+  $(TARGET_CORE_IMAGE_optimizing_no-pic_32) \
   imgdiagd
 
 # Oatdump test requires an image and oatfile to dump.
 ART_GTEST_oatdump_test_HOST_DEPS := \
-  $(HOST_CORE_IMAGE_default_no-pic_64) \
-  $(HOST_CORE_IMAGE_default_no-pic_32) \
+  $(HOST_CORE_IMAGE_optimizing_no-pic_64) \
+  $(HOST_CORE_IMAGE_optimizing_no-pic_32) \
   $(HOST_OUT_EXECUTABLES)/oatdumpd
 ART_GTEST_oatdump_test_TARGET_DEPS := \
-  $(TARGET_CORE_IMAGE_default_no-pic_64) \
-  $(TARGET_CORE_IMAGE_default_no-pic_32) \
+  $(TARGET_CORE_IMAGE_optimizing_no-pic_64) \
+  $(TARGET_CORE_IMAGE_optimizing_no-pic_32) \
   oatdump
 
 # Profile assistant tests requires profman utility.
@@ -214,6 +214,8 @@
   runtime/base/stringprintf_test.cc \
   runtime/base/time_utils_test.cc \
   runtime/base/timing_logger_test.cc \
+  runtime/base/transform_array_ref_test.cc \
+  runtime/base/transform_iterator_test.cc \
   runtime/base/variant_map_test.cc \
   runtime/base/unix_file/fd_file_test.cc \
   runtime/class_linker_test.cc \
@@ -308,9 +310,7 @@
   compiler/utils/intrusive_forward_list_test.cc \
   compiler/utils/string_reference_test.cc \
   compiler/utils/swap_space_test.cc \
-  compiler/utils/test_dex_file_builder_test.cc \
-  compiler/utils/transform_array_ref_test.cc \
-  compiler/utils/transform_iterator_test.cc \
+  compiler/utils/test_dex_file_builder_test.cc
 
 COMPILER_GTEST_COMMON_SRC_FILES_all := \
   compiler/jni/jni_cfi_test.cc \
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 884f698..c4887e6 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -37,7 +37,7 @@
 endif
 
 # Use dex2oat debug version for better error reporting
-# $(1): compiler - default, optimizing, jit, interpreter or interpreter-access-checks.
+# $(1): compiler - optimizing, interpreter or interpreter-access-checks.
 # $(2): pic/no-pic
 # $(3): 2ND_ or undefined, 2ND_ for 32-bit host builds.
 # $(4): wrapper, e.g., valgrind.
@@ -53,13 +53,9 @@
   core_pic_infix :=
   core_dex2oat_dependency := $(DEX2OAT_DEPENDENCY)
 
-  ifeq ($(1),default)
-    core_compile_options += --compiler-backend=Quick
-  endif
   ifeq ($(1),optimizing)
     core_compile_options += --compiler-backend=Optimizing
     core_dex2oat_dependency := $(DEX2OAT)
-    core_infix := -optimizing
   endif
   ifeq ($(1),interpreter)
     core_compile_options += --compiler-filter=interpret-only
@@ -69,24 +65,16 @@
     core_compile_options += --compiler-filter=verify-at-runtime --runtime-arg -Xverify:softfail
     core_infix := -interp-ac
   endif
-  ifeq ($(1),jit)
-    core_compile_options += --compiler-filter=verify-at-runtime
-    core_infix := -jit
-  endif
-  ifeq ($(1),default)
-    # Default has no infix, no compile options.
-  endif
-  ifneq ($(filter-out default interpreter interp-ac jit optimizing,$(1)),)
+  ifneq ($(filter-out interpreter interp-ac optimizing,$(1)),)
     #Technically this test is not precise, but hopefully good enough.
-    $$(error found $(1) expected default, interpreter, interpreter-access-checks, jit or optimizing)
+    $$(error found $(1) expected interpreter, interpreter-access-checks, or optimizing)
   endif
 
   ifeq ($(2),pic)
     core_compile_options += --compile-pic
-    core_pic_infix := -pic
   endif
   ifeq ($(2),no-pic)
-    # No change for non-pic
+    core_pic_infix := -npic
   endif
   ifneq ($(filter-out pic no-pic,$(2)),)
     # Technically this test is not precise, but hopefully good enough.
@@ -148,7 +136,7 @@
   core_pic_infix :=
 endef  # create-core-oat-host-rules
 
-# $(1): compiler - default, optimizing, jit, interpreter or interpreter-access-checks.
+# $(1): compiler - optimizing, interpreter or interpreter-access-checks.
 # $(2): wrapper.
 # $(3): dex2oat suffix.
 # $(4): multi-image.
@@ -162,24 +150,18 @@
   endif
 endef
 
-$(eval $(call create-core-oat-host-rule-combination,default,,,false))
 $(eval $(call create-core-oat-host-rule-combination,optimizing,,,false))
 $(eval $(call create-core-oat-host-rule-combination,interpreter,,,false))
 $(eval $(call create-core-oat-host-rule-combination,interp-ac,,,false))
-$(eval $(call create-core-oat-host-rule-combination,jit,,,false))
-$(eval $(call create-core-oat-host-rule-combination,default,,,true))
 $(eval $(call create-core-oat-host-rule-combination,optimizing,,,true))
 $(eval $(call create-core-oat-host-rule-combination,interpreter,,,true))
 $(eval $(call create-core-oat-host-rule-combination,interp-ac,,,true))
-$(eval $(call create-core-oat-host-rule-combination,jit,,,true))
 
 valgrindHOST_CORE_IMG_OUTS :=
 valgrindHOST_CORE_OAT_OUTS :=
-$(eval $(call create-core-oat-host-rule-combination,default,valgrind,32,false))
 $(eval $(call create-core-oat-host-rule-combination,optimizing,valgrind,32,false))
 $(eval $(call create-core-oat-host-rule-combination,interpreter,valgrind,32,false))
 $(eval $(call create-core-oat-host-rule-combination,interp-ac,valgrind,32,false))
-$(eval $(call create-core-oat-host-rule-combination,jit,valgrind,32,false))
 
 valgrind-test-art-host-dex2oat-host: $(valgrindHOST_CORE_IMG_OUTS)
 
@@ -193,15 +175,11 @@
   core_pic_infix :=
   core_dex2oat_dependency := $(DEX2OAT_DEPENDENCY)
 
-  ifeq ($(1),default)
-    core_compile_options += --compiler-backend=Quick
-  endif
   ifeq ($(1),optimizing)
     core_compile_options += --compiler-backend=Optimizing
     # With the optimizing compiler, we want to rerun dex2oat whenever there is
     # a dex2oat change to catch regressions early.
     core_dex2oat_dependency := $(DEX2OAT)
-    core_infix := -optimizing
   endif
   ifeq ($(1),interpreter)
     core_compile_options += --compiler-filter=interpret-only
@@ -211,24 +189,16 @@
     core_compile_options += --compiler-filter=verify-at-runtime --runtime-arg -Xverify:softfail
     core_infix := -interp-ac
   endif
-  ifeq ($(1),jit)
-    core_compile_options += --compiler-filter=verify-at-runtime
-    core_infix := -jit
-  endif
-  ifeq ($(1),default)
-    # Default has no infix, no compile options.
-  endif
-  ifneq ($(filter-out default interpreter interp-ac jit optimizing,$(1)),)
+  ifneq ($(filter-out interpreter interp-ac optimizing,$(1)),)
     # Technically this test is not precise, but hopefully good enough.
-    $$(error found $(1) expected default, interpreter, interpreter-access-checks, jit or optimizing)
+    $$(error found $(1) expected interpreter, interpreter-access-checks, or optimizing)
   endif
 
   ifeq ($(2),pic)
     core_compile_options += --compile-pic
-    core_pic_infix := -pic
   endif
   ifeq ($(2),no-pic)
-    # No change for non-pic
+    core_pic_infix := -npic
   endif
   ifneq ($(filter-out pic no-pic,$(2)),)
     #Technically this test is not precise, but hopefully good enough.
@@ -283,7 +253,7 @@
   core_pic_infix :=
 endef  # create-core-oat-target-rules
 
-# $(1): compiler - default, optimizing, jit, interpreter or interpreter-access-checks.
+# $(1): compiler - optimizing, interpreter or interpreter-access-checks.
 # $(2): wrapper.
 # $(3): dex2oat suffix.
 define create-core-oat-target-rule-combination
@@ -296,19 +266,15 @@
   endif
 endef
 
-$(eval $(call create-core-oat-target-rule-combination,default,,))
 $(eval $(call create-core-oat-target-rule-combination,optimizing,,))
 $(eval $(call create-core-oat-target-rule-combination,interpreter,,))
 $(eval $(call create-core-oat-target-rule-combination,interp-ac,,))
-$(eval $(call create-core-oat-target-rule-combination,jit,,))
 
 valgrindTARGET_CORE_IMG_OUTS :=
 valgrindTARGET_CORE_OAT_OUTS :=
-$(eval $(call create-core-oat-target-rule-combination,default,valgrind,32))
 $(eval $(call create-core-oat-target-rule-combination,optimizing,valgrind,32))
 $(eval $(call create-core-oat-target-rule-combination,interpreter,valgrind,32))
 $(eval $(call create-core-oat-target-rule-combination,interp-ac,valgrind,32))
-$(eval $(call create-core-oat-target-rule-combination,jit,valgrind,32))
 
 valgrind-test-art-host-dex2oat-target: $(valgrindTARGET_CORE_IMG_OUTS)
 
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 2a81804..1a87448 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -23,10 +23,10 @@
 #include <vector>
 
 #include "arch/instruction_set.h"
+#include "base/array_ref.h"
 #include "base/bit_utils.h"
 #include "base/length_prefixed_array.h"
 #include "method_reference.h"
-#include "utils/array_ref.h"
 
 namespace art {
 
diff --git a/compiler/debug/dwarf/headers.h b/compiler/debug/dwarf/headers.h
index 146d9fd..28f1084 100644
--- a/compiler/debug/dwarf/headers.h
+++ b/compiler/debug/dwarf/headers.h
@@ -19,13 +19,13 @@
 
 #include <cstdint>
 
+#include "base/array_ref.h"
 #include "debug/dwarf/debug_frame_opcode_writer.h"
 #include "debug/dwarf/debug_info_entry_writer.h"
 #include "debug/dwarf/debug_line_opcode_writer.h"
 #include "debug/dwarf/dwarf_constants.h"
 #include "debug/dwarf/register.h"
 #include "debug/dwarf/writer.h"
-#include "utils/array_ref.h"
 
 namespace art {
 namespace dwarf {
diff --git a/compiler/debug/elf_debug_writer.cc b/compiler/debug/elf_debug_writer.cc
index 5bfdd16..d1c10a9 100644
--- a/compiler/debug/elf_debug_writer.cc
+++ b/compiler/debug/elf_debug_writer.cc
@@ -18,6 +18,7 @@
 
 #include <vector>
 
+#include "base/array_ref.h"
 #include "debug/dwarf/dwarf_constants.h"
 #include "debug/elf_compilation_unit.h"
 #include "debug/elf_debug_frame_writer.h"
@@ -29,7 +30,6 @@
 #include "debug/method_debug_info.h"
 #include "elf_builder.h"
 #include "linker/vector_output_stream.h"
-#include "utils/array_ref.h"
 
 namespace art {
 namespace debug {
diff --git a/compiler/debug/elf_debug_writer.h b/compiler/debug/elf_debug_writer.h
index b0542c7..07f7229 100644
--- a/compiler/debug/elf_debug_writer.h
+++ b/compiler/debug/elf_debug_writer.h
@@ -19,11 +19,11 @@
 
 #include <vector>
 
+#include "base/array_ref.h"
 #include "base/macros.h"
 #include "base/mutex.h"
 #include "debug/dwarf/dwarf_constants.h"
 #include "elf_builder.h"
-#include "utils/array_ref.h"
 
 namespace art {
 class OatHeader;
diff --git a/compiler/driver/compiled_method_storage.h b/compiler/driver/compiled_method_storage.h
index 8674abf..124b5a6 100644
--- a/compiler/driver/compiled_method_storage.h
+++ b/compiler/driver/compiled_method_storage.h
@@ -20,9 +20,9 @@
 #include <iosfwd>
 #include <memory>
 
+#include "base/array_ref.h"
 #include "base/length_prefixed_array.h"
 #include "base/macros.h"
-#include "utils/array_ref.h"
 #include "utils/dedupe_set.h"
 #include "utils/swap_space.h"
 
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index c197ff9..a149c07 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -26,6 +26,7 @@
 
 #include "art_field-inl.h"
 #include "art_method-inl.h"
+#include "base/array_ref.h"
 #include "base/bit_vector.h"
 #include "base/enums.h"
 #include "base/stl_util.h"
@@ -67,7 +68,6 @@
 #include "thread_pool.h"
 #include "trampolines/trampoline_compiler.h"
 #include "transaction.h"
-#include "utils/array_ref.h"
 #include "utils/dex_cache_arrays_layout-inl.h"
 #include "utils/swap_space.h"
 #include "verifier/method_verifier.h"
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index fbc1edd..ee21efa 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -24,6 +24,7 @@
 
 #include "arch/instruction_set.h"
 #include "base/arena_allocator.h"
+#include "base/array_ref.h"
 #include "base/bit_utils.h"
 #include "base/mutex.h"
 #include "base/timing_logger.h"
@@ -39,7 +40,6 @@
 #include "runtime.h"
 #include "safe_map.h"
 #include "thread_pool.h"
-#include "utils/array_ref.h"
 #include "utils/dex_cache_arrays_layout.h"
 
 namespace art {
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index 7f2e193..02831c9 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -21,13 +21,13 @@
 
 #include "arch/instruction_set.h"
 #include "arch/mips/instruction_set_features_mips.h"
+#include "base/array_ref.h"
 #include "base/bit_utils.h"
 #include "base/casts.h"
 #include "base/unix_file/fd_file.h"
 #include "elf_utils.h"
 #include "leb128.h"
 #include "linker/error_delaying_output_stream.h"
-#include "utils/array_ref.h"
 
 namespace art {
 
diff --git a/compiler/elf_writer.h b/compiler/elf_writer.h
index c9ea0083..f8f9102 100644
--- a/compiler/elf_writer.h
+++ b/compiler/elf_writer.h
@@ -22,10 +22,10 @@
 #include <string>
 #include <vector>
 
+#include "base/array_ref.h"
 #include "base/macros.h"
 #include "base/mutex.h"
 #include "os.h"
-#include "utils/array_ref.h"
 
 namespace art {
 
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index 3d89146..f541d8f 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -18,11 +18,11 @@
 #define ART_COMPILER_JNI_QUICK_CALLING_CONVENTION_H_
 
 #include "base/arena_object.h"
+#include "base/array_ref.h"
 #include "base/enums.h"
 #include "handle_scope.h"
 #include "primitive.h"
 #include "thread.h"
-#include "utils/array_ref.h"
 #include "utils/managed_register.h"
 
 namespace art {
diff --git a/compiler/linker/arm64/relative_patcher_arm64.h b/compiler/linker/arm64/relative_patcher_arm64.h
index 48ad105..a4a8018 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.h
+++ b/compiler/linker/arm64/relative_patcher_arm64.h
@@ -17,8 +17,8 @@
 #ifndef ART_COMPILER_LINKER_ARM64_RELATIVE_PATCHER_ARM64_H_
 #define ART_COMPILER_LINKER_ARM64_RELATIVE_PATCHER_ARM64_H_
 
+#include "base/array_ref.h"
 #include "linker/arm/relative_patcher_arm_base.h"
-#include "utils/array_ref.h"
 
 namespace art {
 namespace linker {
diff --git a/compiler/linker/relative_patcher.h b/compiler/linker/relative_patcher.h
index a22b9f2..15e955b 100644
--- a/compiler/linker/relative_patcher.h
+++ b/compiler/linker/relative_patcher.h
@@ -21,9 +21,9 @@
 
 #include "arch/instruction_set.h"
 #include "arch/instruction_set_features.h"
+#include "base/array_ref.h"
 #include "base/macros.h"
 #include "method_reference.h"
-#include "utils/array_ref.h"
 
 namespace art {
 
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index d21f33e..304b31c 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -19,6 +19,7 @@
 
 #include "arch/instruction_set.h"
 #include "arch/instruction_set_features.h"
+#include "base/array_ref.h"
 #include "base/macros.h"
 #include "compiled_method.h"
 #include "dex/quick/dex_file_to_method_inliner_map.h"
@@ -31,7 +32,6 @@
 #include "method_reference.h"
 #include "oat.h"
 #include "oat_quick_method_header.h"
-#include "utils/array_ref.h"
 #include "vector_output_stream.h"
 
 namespace art {
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 77525f1..dd7d699 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -21,6 +21,7 @@
 #include <cstddef>
 #include <memory>
 
+#include "base/array_ref.h"
 #include "base/dchecked_vector.h"
 #include "linker/relative_patcher.h"  // For linker::RelativePatcherTargetProvider.
 #include "mem_map.h"
@@ -29,7 +30,6 @@
 #include "oat.h"
 #include "os.h"
 #include "safe_map.h"
-#include "utils/array_ref.h"
 
 namespace art {
 
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index ce9d7e6..38a2410 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -556,10 +556,10 @@
   // artReadBarrierForRootSlow.
   void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
 
-  void GenerateNop();
+  void GenerateNop() OVERRIDE;
 
-  void GenerateImplicitNullCheck(HNullCheck* instruction);
-  void GenerateExplicitNullCheck(HNullCheck* instruction);
+  void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+  void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
 
  private:
   Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index c00ab56..599185a 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -448,7 +448,7 @@
   }
 
   const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARM64"; }
-  bool IsFatal() const { return is_fatal_; }
+  bool IsFatal() const OVERRIDE { return is_fatal_; }
 
  private:
   const bool is_fatal_;
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index f0d7910..f1dc7ee 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -644,10 +644,10 @@
   // artReadBarrierForRootSlow.
   void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
 
-  void GenerateNop();
+  void GenerateNop() OVERRIDE;
 
-  void GenerateImplicitNullCheck(HNullCheck* instruction);
-  void GenerateExplicitNullCheck(HNullCheck* instruction);
+  void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+  void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
 
  private:
   using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, vixl::aarch64::Literal<uint64_t>*>;
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 956a466..a42374f 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -329,10 +329,10 @@
 
   void SetupBlockedRegisters() const OVERRIDE;
 
-  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id);
-  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id);
-  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id);
-  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id);
+  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
   void ClobberRA() {
     clobbered_ra_ = true;
   }
@@ -363,7 +363,7 @@
 
   void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
 
-  void MoveConstant(Location destination, int32_t value);
+  void MoveConstant(Location destination, int32_t value) OVERRIDE;
 
   void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
 
@@ -375,7 +375,7 @@
 
   ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
 
-  bool NeedsTwoRegisters(Primitive::Type type) const {
+  bool NeedsTwoRegisters(Primitive::Type type) const OVERRIDE {
     return type == Primitive::kPrimLong;
   }
 
@@ -403,9 +403,9 @@
     UNIMPLEMENTED(FATAL) << "Not implemented on MIPS";
   }
 
-  void GenerateNop();
-  void GenerateImplicitNullCheck(HNullCheck* instruction);
-  void GenerateExplicitNullCheck(HNullCheck* instruction);
+  void GenerateNop() OVERRIDE;
+  void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+  void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
 
   // The PcRelativePatchInfo is used for PC-relative addressing of dex cache arrays
   // and boot image strings. The only difference is the interpretation of the offset_or_index.
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 3910530..2dd409a 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -285,10 +285,10 @@
 
   void SetupBlockedRegisters() const OVERRIDE;
 
-  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id);
-  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id);
-  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id);
-  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id);
+  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
 
   void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
   void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
@@ -327,7 +327,7 @@
 
   ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
 
-  bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const { return false; }
+  bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { return false; }
 
   // Check if the desired_string_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
@@ -353,9 +353,9 @@
     UNIMPLEMENTED(FATAL) << "Not implemented on MIPS64";
   }
 
-  void GenerateNop();
-  void GenerateImplicitNullCheck(HNullCheck* instruction);
-  void GenerateExplicitNullCheck(HNullCheck* instruction);
+  void GenerateNop() OVERRIDE;
+  void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+  void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
 
  private:
   // Labels for each block that will be compiled.
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index e225098..04a0a3d 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -561,9 +561,9 @@
     }
   }
 
-  void GenerateNop();
-  void GenerateImplicitNullCheck(HNullCheck* instruction);
-  void GenerateExplicitNullCheck(HNullCheck* instruction);
+  void GenerateNop() OVERRIDE;
+  void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+  void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
 
   // When we don't know the proper offset for the value, we use kDummy32BitOffset.
   // The correct value will be inserted when processing Assembler fixups.
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index d939083..693d0b8 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -533,9 +533,9 @@
     }
   }
 
-  void GenerateNop();
-  void GenerateImplicitNullCheck(HNullCheck* instruction);
-  void GenerateExplicitNullCheck(HNullCheck* instruction);
+  void GenerateNop() OVERRIDE;
+  void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+  void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
 
   // When we don't know the proper offset for the value, we use kDummy32BitOffset.
   // We will fix this up in the linker later to have the right value.
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index d9347f6..070cbb3 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -1007,17 +1007,7 @@
 }
 
 TEST_F(CodegenTest, ComparisonsLong) {
-  // TODO: make MIPS work for long
-  if (kRuntimeISA == kMips || kRuntimeISA == kMips64) {
-    return;
-  }
-
   for (CodegenTargetConfig target_config : GetTargetConfigs()) {
-    if ((target_config.GetInstructionSet() == kMips) ||
-        (target_config.GetInstructionSet() == kMips64)) {
-      continue;
-    }
-
     for (int64_t i = -1; i <= 1; i++) {
       for (int64_t j = -1; j <= 1; j++) {
         for (int cond = kCondFirst; cond <= kCondLast; cond++) {
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index e1bde7c..aa3f268 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -16,7 +16,7 @@
 
 #include "dead_code_elimination.h"
 
-#include "utils/array_ref.h"
+#include "base/array_ref.h"
 #include "base/bit_vector-inl.h"
 #include "ssa_phi_elimination.h"
 
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 8f37236..9cfa89b 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -460,6 +460,113 @@
   return kAnalysisSuccess;
 }
 
+static bool InSameLoop(HLoopInformation* first_loop, HLoopInformation* second_loop) {
+  return first_loop == second_loop;
+}
+
+static bool IsLoop(HLoopInformation* info) {
+  return info != nullptr;
+}
+
+static bool IsInnerLoop(HLoopInformation* outer, HLoopInformation* inner) {
+  return (inner != outer)
+      && (inner != nullptr)
+      && (outer != nullptr)
+      && inner->IsIn(*outer);
+}
+
+// Helper method to update work list for linear order.
+static void AddToListForLinearization(ArenaVector<HBasicBlock*>* worklist, HBasicBlock* block) {
+  HLoopInformation* block_loop = block->GetLoopInformation();
+  auto insert_pos = worklist->rbegin();  // insert_pos.base() will be the actual position.
+  for (auto end = worklist->rend(); insert_pos != end; ++insert_pos) {
+    HBasicBlock* current = *insert_pos;
+    HLoopInformation* current_loop = current->GetLoopInformation();
+    if (InSameLoop(block_loop, current_loop)
+        || !IsLoop(current_loop)
+        || IsInnerLoop(current_loop, block_loop)) {
+      // The block can be processed immediately.
+      break;
+    }
+  }
+  worklist->insert(insert_pos.base(), block);
+}
+
+// Helper method to validate linear order.
+static bool IsLinearOrderWellFormed(const HGraph& graph) {
+  for (HBasicBlock* header : graph.GetBlocks()) {
+    if (header == nullptr || !header->IsLoopHeader()) {
+      continue;
+    }
+    HLoopInformation* loop = header->GetLoopInformation();
+    size_t num_blocks = loop->GetBlocks().NumSetBits();
+    size_t found_blocks = 0u;
+    for (HLinearOrderIterator it(graph); !it.Done(); it.Advance()) {
+      HBasicBlock* current = it.Current();
+      if (loop->Contains(*current)) {
+        found_blocks++;
+        if (found_blocks == 1u && current != header) {
+          // First block is not the header.
+          return false;
+        } else if (found_blocks == num_blocks && !loop->IsBackEdge(*current)) {
+          // Last block is not a back edge.
+          return false;
+        }
+      } else if (found_blocks != 0u && found_blocks != num_blocks) {
+        // Blocks are not adjacent.
+        return false;
+      }
+    }
+    DCHECK_EQ(found_blocks, num_blocks);
+  }
+  return true;
+}
+
+void HGraph::Linearize() {
+  // Create a reverse post ordering with the following properties:
+  // - Blocks in a loop are consecutive,
+  // - Back-edge is the last block before loop exits.
+
+  // (1): Record the number of forward predecessors for each block. This is to
+  //      ensure the resulting order is reverse post order. We could use the
+  //      current reverse post order in the graph, but it would require making
+  //      order queries to a GrowableArray, which is not the best data structure
+  //      for it.
+  ArenaVector<uint32_t> forward_predecessors(blocks_.size(),
+                                             arena_->Adapter(kArenaAllocSsaLiveness));
+  for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
+    HBasicBlock* block = it.Current();
+    size_t number_of_forward_predecessors = block->GetPredecessors().size();
+    if (block->IsLoopHeader()) {
+      number_of_forward_predecessors -= block->GetLoopInformation()->NumberOfBackEdges();
+    }
+    forward_predecessors[block->GetBlockId()] = number_of_forward_predecessors;
+  }
+
+  // (2): Following a worklist approach, first start with the entry block, and
+  //      iterate over the successors. When all non-back edge predecessors of a
+  //      successor block are visited, the successor block is added in the worklist
+  //      following an order that satisfies the requirements to build our linear graph.
+  linear_order_.reserve(GetReversePostOrder().size());
+  ArenaVector<HBasicBlock*> worklist(arena_->Adapter(kArenaAllocSsaLiveness));
+  worklist.push_back(GetEntryBlock());
+  do {
+    HBasicBlock* current = worklist.back();
+    worklist.pop_back();
+    linear_order_.push_back(current);
+    for (HBasicBlock* successor : current->GetSuccessors()) {
+      int block_id = successor->GetBlockId();
+      size_t number_of_remaining_predecessors = forward_predecessors[block_id];
+      if (number_of_remaining_predecessors == 1) {
+        AddToListForLinearization(&worklist, successor);
+      }
+      forward_predecessors[block_id] = number_of_remaining_predecessors - 1;
+    }
+  } while (!worklist.empty());
+
+  DCHECK(HasIrreducibleLoops() || IsLinearOrderWellFormed(*this));
+}
+
 void HLoopInformation::Dump(std::ostream& os) {
   os << "header: " << header_->GetBlockId() << std::endl;
   os << "pre header: " << GetPreHeader()->GetBlockId() << std::endl;
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 119b62a..7c3ca5c 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -24,7 +24,9 @@
 #include "base/arena_bit_vector.h"
 #include "base/arena_containers.h"
 #include "base/arena_object.h"
+#include "base/array_ref.h"
 #include "base/stl_util.h"
+#include "base/transform_array_ref.h"
 #include "dex_file.h"
 #include "entrypoints/quick/quick_entrypoints_enum.h"
 #include "handle.h"
@@ -35,9 +37,7 @@
 #include "mirror/class.h"
 #include "offsets.h"
 #include "primitive.h"
-#include "utils/array_ref.h"
 #include "utils/intrusive_forward_list.h"
-#include "utils/transform_array_ref.h"
 
 namespace art {
 
@@ -365,6 +365,13 @@
   // is a throw-catch loop, i.e. the header is a catch block.
   GraphAnalysisResult AnalyzeLoops() const;
 
+  // Computes the linear order (should be called before using HLinearOrderIterator).
+  // Linearizes the graph such that:
+  // (1): a block is always after its dominator,
+  // (2): blocks of loops are contiguous.
+  // This creates a natural and efficient ordering when visualizing live ranges.
+  void Linearize();
+
   // Iterate over blocks to compute try block membership. Needs reverse post
   // order and loop information.
   void ComputeTryBlockInformation();
diff --git a/compiler/optimizing/register_allocation_resolver.h b/compiler/optimizing/register_allocation_resolver.h
index a70ceae..d48b1a0 100644
--- a/compiler/optimizing/register_allocation_resolver.h
+++ b/compiler/optimizing/register_allocation_resolver.h
@@ -18,9 +18,9 @@
 #define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATION_RESOLVER_H_
 
 #include "base/arena_containers.h"
+#include "base/array_ref.h"
 #include "base/value_object.h"
 #include "primitive.h"
-#include "utils/array_ref.h"
 
 namespace art {
 
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index a4d52d7..9ce34aa 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -23,119 +23,11 @@
 namespace art {
 
 void SsaLivenessAnalysis::Analyze() {
-  LinearizeGraph();
+  graph_->Linearize();
   NumberInstructions();
   ComputeLiveness();
 }
 
-static bool IsLoop(HLoopInformation* info) {
-  return info != nullptr;
-}
-
-static bool InSameLoop(HLoopInformation* first_loop, HLoopInformation* second_loop) {
-  return first_loop == second_loop;
-}
-
-static bool IsInnerLoop(HLoopInformation* outer, HLoopInformation* inner) {
-  return (inner != outer)
-      && (inner != nullptr)
-      && (outer != nullptr)
-      && inner->IsIn(*outer);
-}
-
-static void AddToListForLinearization(ArenaVector<HBasicBlock*>* worklist, HBasicBlock* block) {
-  HLoopInformation* block_loop = block->GetLoopInformation();
-  auto insert_pos = worklist->rbegin();  // insert_pos.base() will be the actual position.
-  for (auto end = worklist->rend(); insert_pos != end; ++insert_pos) {
-    HBasicBlock* current = *insert_pos;
-    HLoopInformation* current_loop = current->GetLoopInformation();
-    if (InSameLoop(block_loop, current_loop)
-        || !IsLoop(current_loop)
-        || IsInnerLoop(current_loop, block_loop)) {
-      // The block can be processed immediately.
-      break;
-    }
-  }
-  worklist->insert(insert_pos.base(), block);
-}
-
-static bool IsLinearOrderWellFormed(const HGraph& graph) {
-  for (HBasicBlock* header : graph.GetBlocks()) {
-    if (header == nullptr || !header->IsLoopHeader()) {
-      continue;
-    }
-
-    HLoopInformation* loop = header->GetLoopInformation();
-    size_t num_blocks = loop->GetBlocks().NumSetBits();
-    size_t found_blocks = 0u;
-
-    for (HLinearOrderIterator it(graph); !it.Done(); it.Advance()) {
-      HBasicBlock* current = it.Current();
-      if (loop->Contains(*current)) {
-        found_blocks++;
-        if (found_blocks == 1u && current != header) {
-          // First block is not the header.
-          return false;
-        } else if (found_blocks == num_blocks && !loop->IsBackEdge(*current)) {
-          // Last block is not a back edge.
-          return false;
-        }
-      } else if (found_blocks != 0u && found_blocks != num_blocks) {
-        // Blocks are not adjacent.
-        return false;
-      }
-    }
-    DCHECK_EQ(found_blocks, num_blocks);
-  }
-
-  return true;
-}
-
-void SsaLivenessAnalysis::LinearizeGraph() {
-  // Create a reverse post ordering with the following properties:
-  // - Blocks in a loop are consecutive,
-  // - Back-edge is the last block before loop exits.
-
-  // (1): Record the number of forward predecessors for each block. This is to
-  //      ensure the resulting order is reverse post order. We could use the
-  //      current reverse post order in the graph, but it would require making
-  //      order queries to a GrowableArray, which is not the best data structure
-  //      for it.
-  ArenaVector<uint32_t> forward_predecessors(graph_->GetBlocks().size(),
-                                             graph_->GetArena()->Adapter(kArenaAllocSsaLiveness));
-  for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
-    HBasicBlock* block = it.Current();
-    size_t number_of_forward_predecessors = block->GetPredecessors().size();
-    if (block->IsLoopHeader()) {
-      number_of_forward_predecessors -= block->GetLoopInformation()->NumberOfBackEdges();
-    }
-    forward_predecessors[block->GetBlockId()] = number_of_forward_predecessors;
-  }
-
-  // (2): Following a worklist approach, first start with the entry block, and
-  //      iterate over the successors. When all non-back edge predecessors of a
-  //      successor block are visited, the successor block is added in the worklist
-  //      following an order that satisfies the requirements to build our linear graph.
-  graph_->linear_order_.reserve(graph_->GetReversePostOrder().size());
-  ArenaVector<HBasicBlock*> worklist(graph_->GetArena()->Adapter(kArenaAllocSsaLiveness));
-  worklist.push_back(graph_->GetEntryBlock());
-  do {
-    HBasicBlock* current = worklist.back();
-    worklist.pop_back();
-    graph_->linear_order_.push_back(current);
-    for (HBasicBlock* successor : current->GetSuccessors()) {
-      int block_id = successor->GetBlockId();
-      size_t number_of_remaining_predecessors = forward_predecessors[block_id];
-      if (number_of_remaining_predecessors == 1) {
-        AddToListForLinearization(&worklist, successor);
-      }
-      forward_predecessors[block_id] = number_of_remaining_predecessors - 1;
-    }
-  } while (!worklist.empty());
-
-  DCHECK(graph_->HasIrreducibleLoops() || IsLinearOrderWellFormed(*graph_));
-}
-
 void SsaLivenessAnalysis::NumberInstructions() {
   int ssa_index = 0;
   size_t lifetime_position = 0;
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 0be1611..b62bf4e 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -1186,12 +1186,6 @@
   static constexpr const char* kLivenessPassName = "liveness";
 
  private:
-  // Linearize the graph so that:
-  // (1): a block is always after its dominator,
-  // (2): blocks of loops are contiguous.
-  // This creates a natural and efficient ordering when visualizing live ranges.
-  void LinearizeGraph();
-
   // Give an SSA number to each instruction that defines a value used by another instruction,
   // and setup the lifetime information of each instruction and block.
   void NumberInstructions();
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index ebdfc98..f5ccf40 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -2018,6 +2018,45 @@
   return adjustment;
 }
 
+bool Thumb2Assembler::Fixup::IsCandidateForEmitEarly() const {
+  DCHECK(size_ == original_size_);
+  if (target_ == kUnresolved) {
+    return false;
+  }
+  // GetOffset() does not depend on current_code_size for branches, only for literals.
+  constexpr uint32_t current_code_size = 0u;
+  switch (GetSize()) {
+    case kBranch16Bit:
+      return IsInt(cond_ != AL ? 9 : 12, GetOffset(current_code_size));
+    case kBranch32Bit:
+      // We don't support conditional branches beyond +-1MiB
+      // or unconditional branches beyond +-16MiB.
+      return true;
+
+    case kCbxz16Bit:
+      return IsUint<7>(GetOffset(current_code_size));
+    case kCbxz32Bit:
+      return IsInt<9>(GetOffset(current_code_size));
+    case kCbxz48Bit:
+      // We don't support conditional branches beyond +-1MiB.
+      return true;
+
+    case kLiteral1KiB:
+    case kLiteral4KiB:
+    case kLiteral64KiB:
+    case kLiteral1MiB:
+    case kLiteralFar:
+    case kLiteralAddr1KiB:
+    case kLiteralAddr4KiB:
+    case kLiteralAddr64KiB:
+    case kLiteralAddrFar:
+    case kLongOrFPLiteral1KiB:
+    case kLongOrFPLiteral64KiB:
+    case kLongOrFPLiteralFar:
+      return false;
+  }
+}
+
 uint32_t Thumb2Assembler::Fixup::AdjustSizeIfNeeded(uint32_t current_code_size) {
   uint32_t old_code_size = current_code_size;
   switch (GetSize()) {
@@ -3343,6 +3382,30 @@
 
 void Thumb2Assembler::Bind(Label* label) {
   BindLabel(label, buffer_.Size());
+
+  // Try to emit some Fixups now to reduce the memory needed during the branch fixup later.
+  while (!fixups_.empty() && fixups_.back().IsCandidateForEmitEarly()) {
+    const Fixup& last_fixup = fixups_.back();
+    // Fixups are ordered by location, so the candidate can surely be emitted if it is
+    // a forward branch. If it's a backward branch, it may go over any number of other
+    // fixups. We could check for any number of emit early candidates but we want this
+    // heuristics to be quick, so check just one.
+    uint32_t target = last_fixup.GetTarget();
+    if (target < last_fixup.GetLocation() &&
+        fixups_.size() >= 2u &&
+        fixups_[fixups_.size() - 2u].GetLocation() >= target) {
+      const Fixup& prev_fixup = fixups_[fixups_.size() - 2u];
+      if (!prev_fixup.IsCandidateForEmitEarly()) {
+        break;
+      }
+      uint32_t min_target = std::min(target, prev_fixup.GetTarget());
+      if (fixups_.size() >= 3u && fixups_[fixups_.size() - 3u].GetLocation() >= min_target) {
+        break;
+      }
+    }
+    last_fixup.Emit(&buffer_, buffer_.Size());
+    fixups_.pop_back();
+  }
 }
 
 
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index 13f3bec..917c947 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -22,11 +22,11 @@
 #include <vector>
 
 #include "base/arena_containers.h"
+#include "base/array_ref.h"
 #include "base/logging.h"
 #include "constants_arm.h"
 #include "utils/arm/managed_register_arm.h"
 #include "utils/arm/assembler_arm.h"
-#include "utils/array_ref.h"
 #include "offsets.h"
 
 namespace art {
@@ -573,6 +573,10 @@
       return location_;
     }
 
+    uint32_t GetTarget() const {
+      return target_;
+    }
+
     uint32_t GetAdjustment() const {
       return adjustment_;
     }
@@ -592,6 +596,11 @@
       target_ = target;
     }
 
+    // Branches with bound targets that are in range can be emitted early.
+    // However, the caller still needs to check if the branch doesn't go over
+    // another Fixup that's not ready to be emitted.
+    bool IsCandidateForEmitEarly() const;
+
     // Check if the current size is OK for current location_, target_ and adjustment_.
     // If not, increase the size. Return the size increase, 0 if unchanged.
     // If the target if after this Fixup, also add the difference to adjustment_,
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index a03dd74..14d29c4 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -314,11 +314,21 @@
       CHECK(src.IsCoreRegister()) << src;
       ___ Mov(dst.AsVIXLRegister(), src.AsVIXLRegister());
     } else if (dst.IsDRegister()) {
-      CHECK(src.IsDRegister()) << src;
-      ___ Vmov(F64, dst.AsVIXLDRegister(), src.AsVIXLDRegister());
+      if (src.IsDRegister()) {
+        ___ Vmov(F64, dst.AsVIXLDRegister(), src.AsVIXLDRegister());
+      } else {
+        // VMOV Dn, Rlo, Rhi (Dn = {Rlo, Rhi})
+        CHECK(src.IsRegisterPair()) << src;
+        ___ Vmov(dst.AsVIXLDRegister(), src.AsVIXLRegisterPairLow(), src.AsVIXLRegisterPairHigh());
+      }
     } else if (dst.IsSRegister()) {
-      CHECK(src.IsSRegister()) << src;
-      ___ Vmov(F32, dst.AsVIXLSRegister(), src.AsVIXLSRegister());
+      if (src.IsSRegister()) {
+        ___ Vmov(F32, dst.AsVIXLSRegister(), src.AsVIXLSRegister());
+      } else {
+        // VMOV Sn, Rn  (Sn = Rn)
+        CHECK(src.IsCoreRegister()) << src;
+        ___ Vmov(dst.AsVIXLSRegister(), src.AsVIXLRegister());
+      }
     } else {
       CHECK(dst.IsRegisterPair()) << dst;
       CHECK(src.IsRegisterPair()) << src;
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index b616057..314ff8c 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -24,6 +24,7 @@
 #include "arm/constants_arm.h"
 #include "base/arena_allocator.h"
 #include "base/arena_object.h"
+#include "base/array_ref.h"
 #include "base/enums.h"
 #include "base/logging.h"
 #include "base/macros.h"
@@ -33,7 +34,6 @@
 #include "memory_region.h"
 #include "mips/constants_mips.h"
 #include "offsets.h"
-#include "utils/array_ref.h"
 #include "x86/constants_x86.h"
 #include "x86_64/constants_x86_64.h"
 
diff --git a/compiler/utils/dedupe_set_test.cc b/compiler/utils/dedupe_set_test.cc
index 60a891d..4c0979e 100644
--- a/compiler/utils/dedupe_set_test.cc
+++ b/compiler/utils/dedupe_set_test.cc
@@ -20,10 +20,10 @@
 #include <cstdio>
 #include <vector>
 
+#include "base/array_ref.h"
 #include "dedupe_set-inl.h"
 #include "gtest/gtest.h"
 #include "thread-inl.h"
-#include "utils/array_ref.h"
 
 namespace art {
 
diff --git a/compiler/utils/jni_macro_assembler.h b/compiler/utils/jni_macro_assembler.h
index 6f45bd6..0119ae9 100644
--- a/compiler/utils/jni_macro_assembler.h
+++ b/compiler/utils/jni_macro_assembler.h
@@ -22,12 +22,12 @@
 #include "arch/instruction_set.h"
 #include "base/arena_allocator.h"
 #include "base/arena_object.h"
+#include "base/array_ref.h"
 #include "base/enums.h"
 #include "base/logging.h"
 #include "base/macros.h"
 #include "managed_register.h"
 #include "offsets.h"
-#include "utils/array_ref.h"
 
 namespace art {
 
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 9738784..114986b 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -20,6 +20,7 @@
 #include <vector>
 
 #include "base/arena_containers.h"
+#include "base/array_ref.h"
 #include "base/bit_utils.h"
 #include "base/enums.h"
 #include "base/macros.h"
@@ -27,7 +28,6 @@
 #include "globals.h"
 #include "managed_register_x86.h"
 #include "offsets.h"
-#include "utils/array_ref.h"
 #include "utils/assembler.h"
 
 namespace art {
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h
index 3f07ede..015584c 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.h
+++ b/compiler/utils/x86/jni_macro_assembler_x86.h
@@ -21,10 +21,10 @@
 
 #include "assembler_x86.h"
 #include "base/arena_containers.h"
+#include "base/array_ref.h"
 #include "base/enums.h"
 #include "base/macros.h"
 #include "offsets.h"
-#include "utils/array_ref.h"
 #include "utils/jni_macro_assembler.h"
 
 namespace art {
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index fdd3aa9..acad86d 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -20,13 +20,13 @@
 #include <vector>
 
 #include "base/arena_containers.h"
+#include "base/array_ref.h"
 #include "base/bit_utils.h"
 #include "base/macros.h"
 #include "constants_x86_64.h"
 #include "globals.h"
 #include "managed_register_x86_64.h"
 #include "offsets.h"
-#include "utils/array_ref.h"
 #include "utils/assembler.h"
 #include "utils/jni_macro_assembler.h"
 
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
index cc4e57c..9107f3c 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
@@ -21,10 +21,10 @@
 
 #include "assembler_x86_64.h"
 #include "base/arena_containers.h"
+#include "base/array_ref.h"
 #include "base/enums.h"
 #include "base/macros.h"
 #include "offsets.h"
-#include "utils/array_ref.h"
 #include "utils/assembler.h"
 #include "utils/jni_macro_assembler.h"
 
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 1dd9132..1296bcf 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -2782,6 +2782,8 @@
     return EXIT_FAILURE;
   }
 
+  VLOG(compiler) << "Running dex2oat (parent PID = " << getppid() << ")";
+
   bool result;
   if (dex2oat->IsImage()) {
     result = CompileImage(*dex2oat);
diff --git a/dexlist/dexlist_test.cc b/dexlist/dexlist_test.cc
index 9a65ba6..da1dd7f 100644
--- a/dexlist/dexlist_test.cc
+++ b/dexlist/dexlist_test.cc
@@ -43,11 +43,7 @@
   // Runs test with given arguments.
   bool Exec(const std::vector<std::string>& args, std::string* error_msg) {
     std::string file_path = GetTestAndroidRoot();
-    if (IsHost()) {
-      file_path += "/bin/dexlist";
-    } else {
-      file_path += "/xbin/dexlist";
-    }
+    file_path += "/bin/dexlist";
     EXPECT_TRUE(OS::FileExists(file_path.c_str())) << file_path << " should be a valid file path";
     std::vector<std::string> exec_argv = { file_path };
     exec_argv.insert(exec_argv.end(), args.begin(), args.end());
diff --git a/compiler/utils/array_ref.h b/runtime/base/array_ref.h
similarity index 97%
rename from compiler/utils/array_ref.h
rename to runtime/base/array_ref.h
index 8dc9ab4..00b9bad 100644
--- a/compiler/utils/array_ref.h
+++ b/runtime/base/array_ref.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_COMPILER_UTILS_ARRAY_REF_H_
-#define ART_COMPILER_UTILS_ARRAY_REF_H_
+#ifndef ART_RUNTIME_BASE_ARRAY_REF_H_
+#define ART_RUNTIME_BASE_ARRAY_REF_H_
 
 #include <type_traits>
 #include <vector>
@@ -197,4 +197,4 @@
 }  // namespace art
 
 
-#endif  // ART_COMPILER_UTILS_ARRAY_REF_H_
+#endif  // ART_RUNTIME_BASE_ARRAY_REF_H_
diff --git a/compiler/utils/transform_array_ref.h b/runtime/base/transform_array_ref.h
similarity index 96%
rename from compiler/utils/transform_array_ref.h
rename to runtime/base/transform_array_ref.h
index a6da34f..a4e0bc2 100644
--- a/compiler/utils/transform_array_ref.h
+++ b/runtime/base/transform_array_ref.h
@@ -14,13 +14,13 @@
  * limitations under the License.
  */
 
-#ifndef ART_COMPILER_UTILS_TRANSFORM_ARRAY_REF_H_
-#define ART_COMPILER_UTILS_TRANSFORM_ARRAY_REF_H_
+#ifndef ART_RUNTIME_BASE_TRANSFORM_ARRAY_REF_H_
+#define ART_RUNTIME_BASE_TRANSFORM_ARRAY_REF_H_
 
 #include <type_traits>
 
-#include "utils/array_ref.h"
-#include "utils/transform_iterator.h"
+#include "base/array_ref.h"
+#include "base/transform_iterator.h"
 
 namespace art {
 
@@ -193,4 +193,4 @@
 
 }  // namespace art
 
-#endif  // ART_COMPILER_UTILS_TRANSFORM_ARRAY_REF_H_
+#endif  // ART_RUNTIME_BASE_TRANSFORM_ARRAY_REF_H_
diff --git a/compiler/utils/transform_array_ref_test.cc b/runtime/base/transform_array_ref_test.cc
similarity index 99%
rename from compiler/utils/transform_array_ref_test.cc
rename to runtime/base/transform_array_ref_test.cc
index 8d71fd7..494dbb2 100644
--- a/compiler/utils/transform_array_ref_test.cc
+++ b/runtime/base/transform_array_ref_test.cc
@@ -19,7 +19,7 @@
 
 #include "gtest/gtest.h"
 
-#include "utils/transform_array_ref.h"
+#include "base/transform_array_ref.h"
 
 namespace art {
 
diff --git a/compiler/utils/transform_iterator.h b/runtime/base/transform_iterator.h
similarity index 97%
rename from compiler/utils/transform_iterator.h
rename to runtime/base/transform_iterator.h
index 3bc9046..9c8f822 100644
--- a/compiler/utils/transform_iterator.h
+++ b/runtime/base/transform_iterator.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_COMPILER_UTILS_TRANSFORM_ITERATOR_H_
-#define ART_COMPILER_UTILS_TRANSFORM_ITERATOR_H_
+#ifndef ART_RUNTIME_BASE_TRANSFORM_ITERATOR_H_
+#define ART_RUNTIME_BASE_TRANSFORM_ITERATOR_H_
 
 #include <iterator>
 #include <type_traits>
@@ -175,4 +175,4 @@
 
 }  // namespace art
 
-#endif  // ART_COMPILER_UTILS_TRANSFORM_ITERATOR_H_
+#endif  // ART_RUNTIME_BASE_TRANSFORM_ITERATOR_H_
diff --git a/compiler/utils/transform_iterator_test.cc b/runtime/base/transform_iterator_test.cc
similarity index 99%
rename from compiler/utils/transform_iterator_test.cc
rename to runtime/base/transform_iterator_test.cc
index 57ff0a6..a85dda8 100644
--- a/compiler/utils/transform_iterator_test.cc
+++ b/runtime/base/transform_iterator_test.cc
@@ -22,7 +22,7 @@
 
 #include "gtest/gtest.h"
 
-#include "utils/transform_iterator.h"
+#include "base/transform_iterator.h"
 
 namespace art {
 
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index dba0a81..5bc4e88 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -591,9 +591,9 @@
   if (IsHost()) {
     const char* host_dir = getenv("ANDROID_HOST_OUT");
     CHECK(host_dir != nullptr);
-    location = StringPrintf("%s/framework/core.%s", host_dir, suffix);
+    location = StringPrintf("%s/framework/core-npic.%s", host_dir, suffix);
   } else {
-    location = StringPrintf("/data/art-test/core.%s", suffix);
+    location = StringPrintf("/data/art-test/core-npic.%s", suffix);
   }
 
   return location;
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index d6957fc..e12fd28 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -153,17 +153,17 @@
 ifeq ($(ART_TEST_JNI_FORCECOPY),true)
   JNI_TYPES += forcecopy
 endif
-IMAGE_TYPES := image
+IMAGE_TYPES := picimage
 ifeq ($(ART_TEST_RUN_TEST_NO_IMAGE),true)
   IMAGE_TYPES += no-image
 endif
 ifeq ($(ART_TEST_RUN_TEST_MULTI_IMAGE),true)
-  IMAGE_TYPES := multiimage
+  IMAGE_TYPES := multipicimage
 endif
-ifeq ($(ART_TEST_PIC_IMAGE),true)
-  IMAGE_TYPES += picimage
+ifeq ($(ART_TEST_NPIC_IMAGE),true)
+  IMAGE_TYPES += npicimage
   ifeq ($(ART_TEST_RUN_TEST_MULTI_IMAGE),true)
-    IMAGE_TYPES := multipicimage
+    IMAGE_TYPES := multinpicimage
   endif
 endif
 PICTEST_TYPES := npictest
@@ -896,6 +896,11 @@
   ifeq ($(4),regalloc_gc)
     # Graph coloring tests share the image_suffix with optimizing tests.
     image_suffix := optimizing
+  else
+    ifeq ($(4),jit)
+      # JIT tests share the image_suffix with interpreter tests.
+      image_suffix := interpreter
+    endif
   endif
   ifeq ($(9),no-image)
     test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_IMAGE_RULES
@@ -907,8 +912,9 @@
       prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_no-pic_$(13))
     endif
   else
-    ifeq ($(9),image)
+    ifeq ($(9),npicimage)
       test_groups += ART_RUN_TEST_$$(uc_host_or_target)_IMAGE_RULES
+      run_test_options += --npic-image
       # Add the core dependency.
       ifeq ($(1),host)
         prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_no-pic_$(13))
@@ -918,16 +924,15 @@
     else
       ifeq ($(9),picimage)
         test_groups += ART_RUN_TEST_$$(uc_host_or_target)_PICIMAGE_RULES
-        run_test_options += --pic-image
         ifeq ($(1),host)
           prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_pic_$(13))
         else
           prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_pic_$(13))
         endif
       else
-        ifeq ($(9),multiimage)
+        ifeq ($(9),multinpicimage)
           test_groups += ART_RUN_TEST_$$(uc_host_or_target)_IMAGE_RULES
-          run_test_options += --multi-image
+          run_test_options += --npic-image --multi-image
                 ifeq ($(1),host)
                         prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_no-pic_multi_$(13))
                 else
@@ -936,7 +941,7 @@
         else
           ifeq ($(9),multipicimage)
             test_groups += ART_RUN_TEST_$$(uc_host_or_target)_PICIMAGE_RULES
-                        run_test_options += --pic-image --multi-image
+                        run_test_options += --multi-image
                         ifeq ($(1),host)
                         prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_pic_multi_$(13))
                         else
diff --git a/test/run-test b/test/run-test
index 4c29420..8fb2adf 100755
--- a/test/run-test
+++ b/test/run-test
@@ -132,7 +132,7 @@
 android_root="/system"
 # By default we will use optimizing.
 image_args=""
-image_suffix="-optimizing"
+image_suffix=""
 
 while true; do
     if [ "x$1" = "x--host" ]; then
@@ -180,8 +180,8 @@
     elif [ "x$1" = "x--no-image" ]; then
         have_image="no"
         shift
-    elif [ "x$1" = "x--pic-image" ]; then
-        pic_image_suffix="-pic"
+    elif [ "x$1" = "x--npic-image" ]; then
+        pic_image_suffix="-npic"
         shift
     elif [ "x$1" = "x--multi-image" ]; then
         multi_image_suffix="-multi"
@@ -258,11 +258,10 @@
         shift
     elif [ "x$1" = "x--jit" ]; then
         image_args="--jit"
-        image_suffix="-jit"
+        image_suffix="-interpreter"
         shift
     elif [ "x$1" = "x--optimizing" ]; then
         image_args="-Xcompiler-option --compiler-backend=Optimizing"
-        image_suffix="-optimizing"
         shift
     elif [ "x$1" = "x--no-verify" ]; then
         run_args="${run_args} --no-verify"
@@ -608,8 +607,8 @@
         echo "    --dex2oat-swap        Use a dex2oat swap file."
         echo "    --instruction-set-features [string]"
         echo "                          Set instruction-set-features for compilation."
-        echo "    --pic-image           Use an image compiled with position independent code for the"
-        echo "                          boot class path."
+        echo "    --npic-image          Use an image compiled with non-position independent code "
+        echo "                          for the boot class path."
         echo "    --multi-image         Use a set of images compiled with dex2oat multi-image for"
         echo "                          the boot class path."
         echo "    --pic-test            Compile the test code position independent."
@@ -678,7 +677,7 @@
 # Tests named '<number>-checker-*' will also have their CFGs verified with
 # Checker when compiled with Optimizing on host.
 if [[ "$TEST_NAME" =~ ^[0-9]+-checker- ]]; then
-  if [ "$runtime" = "art" -a "$image_suffix" = "-optimizing" -a "$USE_JACK" = "true" ]; then
+  if [ "$runtime" = "art" -a "$image_suffix" = "" -a "$USE_JACK" = "true" ]; then
     # Optimizing has read barrier support for certain architectures
     # only. On other architectures, compiling is disabled when read
     # barriers are enabled, meaning that we do not produce a CFG file
diff --git a/tools/art b/tools/art
index 1a3bba7..1394a46 100644
--- a/tools/art
+++ b/tools/art
@@ -100,7 +100,7 @@
   $invoke_with $ANDROID_ROOT/bin/$DALVIKVM $lib \
     -XXlib:$LIBART \
     -Xnorelocate \
-    -Ximage:$ANDROID_ROOT/framework/core-optimizing-pic.art \
+    -Ximage:$ANDROID_ROOT/framework/core.art \
     $DEBUG_OPTION \
     "$@"
 
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index 01dae43..74b0f16 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -43,7 +43,7 @@
 image_compiler_option=""
 debug="no"
 verbose="no"
-image="-Ximage:/data/art-test/core-optimizing-pic.art"
+image="-Ximage:/data/art-test/core.art"
 vm_args=""
 # By default, we run the whole JDWP test suite.
 test="org.apache.harmony.jpda.tests.share.AllTests"
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index 2a6e172..01c7f20 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -95,7 +95,7 @@
   if [[ "$1" == "--mode=device" ]]; then
     vogar_args="$vogar_args --device-dir=/data/local/tmp"
     vogar_args="$vogar_args --vm-command=/data/local/tmp/system/bin/art"
-    vogar_args="$vogar_args --vm-arg -Ximage:/data/art-test/core-optimizing.art"
+    vogar_args="$vogar_args --vm-arg -Ximage:/data/art-test/core.art"
     shift
   elif [[ "$1" == "--mode=host" ]]; then
     # We explicitly give a wrong path for the image, to ensure vogar