Merge "Revert "Revert "Revert "Revert "Revert "Make sure that const-class linkage is preserved.""""""
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 1737376..b883e08 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -53,6 +53,7 @@
         "optimizing/code_generator_utils.cc",
         "optimizing/constant_folding.cc",
         "optimizing/dead_code_elimination.cc",
+        "optimizing/escape.cc",
         "optimizing/graph_checker.cc",
         "optimizing/graph_visualizer.cc",
         "optimizing/gvn.cc",
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 59e1784..a78b3da 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -572,8 +572,10 @@
 
   // We are about to use the assembler to place literals directly. Make sure we have enough
   // underlying code buffer and we have generated the jump table with right size.
-  CodeBufferCheckScope scope(codegen->GetVIXLAssembler(), num_entries * sizeof(int32_t),
-                             CodeBufferCheckScope::kCheck, CodeBufferCheckScope::kExactSize);
+  vixl::CodeBufferCheckScope scope(codegen->GetVIXLAssembler(),
+                                   num_entries * sizeof(int32_t),
+                                   vixl::CodeBufferCheckScope::kReserveBufferSpace,
+                                   vixl::CodeBufferCheckScope::kExactSize);
 
   __ Bind(&table_start_);
   const ArenaVector<HBasicBlock*>& successors = switch_instr_->GetBlock()->GetSuccessors();
@@ -2260,10 +2262,10 @@
         masm->GetCursorAddress<vixl::aarch64::Instruction*>() - kInstructionSize;
     if (prev->IsLoadOrStore()) {
       // Make sure we emit only exactly one nop.
-      vixl::aarch64::CodeBufferCheckScope scope(masm,
-                                                kInstructionSize,
-                                                vixl::aarch64::CodeBufferCheckScope::kCheck,
-                                                vixl::aarch64::CodeBufferCheckScope::kExactSize);
+      vixl::CodeBufferCheckScope scope(masm,
+                                       kInstructionSize,
+                                       vixl::CodeBufferCheckScope::kReserveBufferSpace,
+                                       vixl::CodeBufferCheckScope::kExactSize);
       __ nop();
     }
   }
@@ -4036,7 +4038,8 @@
       vixl::aarch64::Label* label = &relative_call_patches_.back().label;
       SingleEmissionCheckScope guard(GetVIXLAssembler());
       __ Bind(label);
-      __ bl(0);  // Branch and link to itself. This will be overriden at link time.
+      // Branch and link to itself. This will be overriden at link time.
+      __ bl(static_cast<int64_t>(0));
       break;
     }
     case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
@@ -4167,7 +4170,7 @@
   DCHECK(reg.IsX());
   SingleEmissionCheckScope guard(GetVIXLAssembler());
   __ Bind(fixup_label);
-  __ adrp(reg, /* offset placeholder */ 0);
+  __ adrp(reg, /* offset placeholder */ static_cast<int64_t>(0));
 }
 
 void CodeGeneratorARM64::EmitAddPlaceholder(vixl::aarch64::Label* fixup_label,
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 7aea616..e399f32 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -47,6 +47,7 @@
 using helpers::InputRegisterAt;
 using helpers::InputSRegisterAt;
 using helpers::InputVRegisterAt;
+using helpers::Int32ConstantFrom;
 using helpers::LocationFrom;
 using helpers::LowRegisterFrom;
 using helpers::LowSRegisterFrom;
@@ -132,7 +133,7 @@
       vixl32::Register base = sp;
       if (stack_offset != 0) {
         base = temps.Acquire();
-        __ Add(base, sp, stack_offset);
+        __ Add(base, sp, Operand::From(stack_offset));
       }
       __ Vstm(F64, base, NO_WRITE_BACK, DRegisterList(d_reg, number_of_d_regs));
     }
@@ -180,7 +181,7 @@
       vixl32::Register base = sp;
       if (stack_offset != 0) {
         base = temps.Acquire();
-        __ Add(base, sp, stack_offset);
+        __ Add(base, sp, Operand::From(stack_offset));
       }
       __ Vldm(F64, base, NO_WRITE_BACK, DRegisterList(d_reg, number_of_d_regs));
     }
@@ -673,8 +674,8 @@
   DCHECK_GE(num_entries, kPackedSwitchCompareJumpThreshold);
 
   // We are about to use the assembler to place literals directly. Make sure we have enough
-  // underlying code buffer and we have generated the jump table with right size.
-  codegen->GetVIXLAssembler()->GetBuffer().Align();
+  // underlying code buffer and we have generated a jump table of the right size, using
+  // codegen->GetVIXLAssembler()->GetBuffer().Align();
   AssemblerAccurateScope aas(codegen->GetVIXLAssembler(),
                              num_entries * sizeof(int32_t),
                              CodeBufferCheckScope::kMaximumSize);
@@ -701,7 +702,7 @@
     DCHECK_GT(jump_offset, std::numeric_limits<int32_t>::min());
     DCHECK_LE(jump_offset, std::numeric_limits<int32_t>::max());
 
-    bb_addresses_[i].get()->UpdateValue(jump_offset, &codegen->GetVIXLAssembler()->GetBuffer());
+    bb_addresses_[i].get()->UpdateValue(jump_offset, codegen->GetVIXLAssembler()->GetBuffer());
   }
 }
 
@@ -1667,7 +1668,20 @@
   // Set the hidden (in r12) argument. It is done here, right before a BLX to prevent other
   // instruction from clobbering it as they might use r12 as a scratch register.
   DCHECK(hidden_reg.Is(r12));
-  __ Mov(hidden_reg, invoke->GetDexMethodIndex());
+
+  {
+    // The VIXL macro assembler may clobber any of the scratch registers that are available to it,
+    // so it checks if the application is using them (by passing them to the macro assembler
+    // methods). The following application of UseScratchRegisterScope corrects VIXL's notion of
+    // what is available, and is the opposite of the standard usage: Instead of requesting a
+    // temporary location, it imposes an external constraint (i.e. a specific register is reserved
+    // for the hidden argument). Note that this works even if VIXL needs a scratch register itself
+    // (to materialize the constant), since the destination register becomes available for such use
+    // internally for the duration of the macro instruction.
+    UseScratchRegisterScope temps(GetVIXLAssembler());
+    temps.Exclude(hidden_reg);
+    __ Mov(hidden_reg, invoke->GetDexMethodIndex());
+  }
 
   {
     AssemblerAccurateScope aas(GetVIXLAssembler(),
@@ -2458,13 +2472,13 @@
   vixl32::Register dividend = InputRegisterAt(instruction, 0);
   vixl32::Register temp1 = RegisterFrom(locations->GetTemp(0));
   vixl32::Register temp2 = RegisterFrom(locations->GetTemp(1));
-  int64_t imm = second.GetConstant()->AsIntConstant()->GetValue();
+  int32_t imm = Int32ConstantFrom(second);
 
   int64_t magic;
   int shift;
   CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
 
-  __ Mov(temp1, magic);
+  __ Mov(temp1, Operand::From(magic));
   __ Smull(temp2, temp1, dividend, temp1);
 
   if (imm > 0 && magic < 0) {
@@ -2857,9 +2871,9 @@
     }
     // Rotate, or mov to out for zero or word size rotations.
     if (rot != 0u) {
-      __ Lsr(out_reg_hi, in_reg_hi, rot);
+      __ Lsr(out_reg_hi, in_reg_hi, Operand::From(rot));
       __ Orr(out_reg_hi, out_reg_hi, Operand(in_reg_lo, ShiftType::LSL, kArmBitsPerWord - rot));
-      __ Lsr(out_reg_lo, in_reg_lo, rot);
+      __ Lsr(out_reg_lo, in_reg_lo, Operand::From(rot));
       __ Orr(out_reg_lo, out_reg_lo, Operand(in_reg_hi, ShiftType::LSL, kArmBitsPerWord - rot));
     } else {
       __ Mov(out_reg_lo, in_reg_lo);
@@ -2874,7 +2888,7 @@
     __ And(shift_right, RegisterFrom(rhs), 0x1F);
     __ Lsrs(shift_left, RegisterFrom(rhs), 6);
     // TODO(VIXL): Check that flags are kept after "vixl32::LeaveFlags" enabled.
-    __ Rsb(shift_left, shift_right, kArmBitsPerWord);
+    __ Rsb(shift_left, shift_right, Operand::From(kArmBitsPerWord));
     __ B(cc, &shift_by_32_plus_shift_right);
 
     // out_reg_hi = (reg_hi << shift_left) | (reg_lo >> shift_right).
@@ -3040,11 +3054,11 @@
           // Shift the high part
           __ Lsl(o_h, high, o_l);
           // Shift the low part and `or` what overflew on the high part
-          __ Rsb(temp, o_l, kArmBitsPerWord);
+          __ Rsb(temp, o_l, Operand::From(kArmBitsPerWord));
           __ Lsr(temp, low, temp);
           __ Orr(o_h, o_h, temp);
           // If the shift is > 32 bits, override the high part
-          __ Subs(temp, o_l, kArmBitsPerWord);
+          __ Subs(temp, o_l, Operand::From(kArmBitsPerWord));
           {
             AssemblerAccurateScope guard(GetVIXLAssembler(),
                                          3 * kArmInstrMaxSizeInBytes,
@@ -3059,11 +3073,11 @@
           // Shift the low part
           __ Lsr(o_l, low, o_h);
           // Shift the high part and `or` what underflew on the low part
-          __ Rsb(temp, o_h, kArmBitsPerWord);
+          __ Rsb(temp, o_h, Operand::From(kArmBitsPerWord));
           __ Lsl(temp, high, temp);
           __ Orr(o_l, o_l, temp);
           // If the shift is > 32 bits, override the low part
-          __ Subs(temp, o_h, kArmBitsPerWord);
+          __ Subs(temp, o_h, Operand::From(kArmBitsPerWord));
           {
             AssemblerAccurateScope guard(GetVIXLAssembler(),
                                          3 * kArmInstrMaxSizeInBytes,
@@ -3077,10 +3091,10 @@
           __ And(o_h, second_reg, kMaxLongShiftDistance);
           // same as Shr except we use `Lsr`s and not `Asr`s
           __ Lsr(o_l, low, o_h);
-          __ Rsb(temp, o_h, kArmBitsPerWord);
+          __ Rsb(temp, o_h, Operand::From(kArmBitsPerWord));
           __ Lsl(temp, high, temp);
           __ Orr(o_l, o_l, temp);
-          __ Subs(temp, o_h, kArmBitsPerWord);
+          __ Subs(temp, o_h, Operand::From(kArmBitsPerWord));
           {
             AssemblerAccurateScope guard(GetVIXLAssembler(),
                                          3 * kArmInstrMaxSizeInBytes,
@@ -3424,7 +3438,7 @@
     __ Add(temp, addr, offset);
     addr = temp;
   }
-  __ Ldrexd(out_lo, out_hi, addr);
+  __ Ldrexd(out_lo, out_hi, MemOperand(addr));
 }
 
 void InstructionCodeGeneratorARMVIXL::GenerateWideAtomicStore(vixl32::Register addr,
@@ -3444,9 +3458,9 @@
   __ Bind(&fail);
   // We need a load followed by store. (The address used in a STREX instruction must
   // be the same as the address in the most recently executed LDREX instruction.)
-  __ Ldrexd(temp1, temp2, addr);
+  __ Ldrexd(temp1, temp2, MemOperand(addr));
   codegen_->MaybeRecordImplicitNullCheck(instruction);
-  __ Strexd(temp1, value_lo, value_hi, addr);
+  __ Strexd(temp1, value_lo, value_hi, MemOperand(addr));
   __ CompareAndBranchIfNonZero(temp1, &fail);
 }
 
@@ -4648,7 +4662,7 @@
   }
   GetAssembler()->LoadFromOffset(
       kLoadWord, card, tr, Thread::CardTableOffset<kArmPointerSize>().Int32Value());
-  __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
+  __ Lsr(temp, object, Operand::From(gc::accounting::CardTable::kCardShift));
   __ Strb(card, MemOperand(card, temp));
   if (can_be_null) {
     __ Bind(&is_null);
diff --git a/compiler/optimizing/common_arm.h b/compiler/optimizing/common_arm.h
index 5129daf..d3623f1 100644
--- a/compiler/optimizing/common_arm.h
+++ b/compiler/optimizing/common_arm.h
@@ -139,9 +139,14 @@
   HConstant* instr = location.GetConstant();
   if (instr->IsIntConstant()) {
     return instr->AsIntConstant()->GetValue();
-  } else {
-    DCHECK(instr->IsNullConstant()) << instr->DebugName();
+  } else if (instr->IsNullConstant()) {
     return 0;
+  } else {
+    DCHECK(instr->IsLongConstant()) << instr->DebugName();
+    const int64_t ret = instr->AsLongConstant()->GetValue();
+    DCHECK_GE(ret, std::numeric_limits<int32_t>::min());
+    DCHECK_LE(ret, std::numeric_limits<int32_t>::max());
+    return ret;
   }
 }
 
@@ -161,7 +166,7 @@
   if (location.IsRegister()) {
     return vixl::aarch32::Operand(RegisterFrom(location, type));
   } else {
-    return vixl::aarch32::Operand(Int64ConstantFrom(location));
+    return vixl::aarch32::Operand(Int32ConstantFrom(location));
   }
 }
 
diff --git a/compiler/optimizing/escape.cc b/compiler/optimizing/escape.cc
new file mode 100644
index 0000000..c80e19e
--- /dev/null
+++ b/compiler/optimizing/escape.cc
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "escape.h"
+
+#include "nodes.h"
+
+namespace art {
+
+void CalculateEscape(HInstruction* reference,
+                     bool (*no_escape)(HInstruction*, HInstruction*),
+                     /*out*/ bool* is_singleton,
+                     /*out*/ bool* is_singleton_and_non_escaping) {
+  // For references not allocated in the method, don't assume anything.
+  if (!reference->IsNewInstance() && !reference->IsNewArray()) {
+    *is_singleton = false;
+    *is_singleton_and_non_escaping = false;
+    return;
+  }
+  // Assume the best until proven otherwise.
+  *is_singleton = true;
+  *is_singleton_and_non_escaping = true;
+  // Visit all uses to determine if this reference can escape into the heap,
+  // a method call, an alias, etc.
+  for (const HUseListNode<HInstruction*>& use : reference->GetUses()) {
+    HInstruction* user = use.GetUser();
+    if (no_escape != nullptr && (*no_escape)(reference, user)) {
+      // Client supplied analysis says there is no escape.
+      continue;
+    } else if (user->IsBoundType() || user->IsNullCheck()) {
+      // BoundType shouldn't normally be necessary for an allocation. Just be conservative
+      // for the uncommon cases. Similarly, null checks are eventually eliminated for explicit
+      // allocations, but if we see one before it is simplified, assume an alias.
+      *is_singleton = false;
+      *is_singleton_and_non_escaping = false;
+      return;
+    } else if (user->IsPhi() || user->IsSelect() || user->IsInvoke() ||
+               (user->IsInstanceFieldSet() && (reference == user->InputAt(1))) ||
+               (user->IsUnresolvedInstanceFieldSet() && (reference == user->InputAt(1))) ||
+               (user->IsStaticFieldSet() && (reference == user->InputAt(1))) ||
+               (user->IsUnresolvedStaticFieldSet() && (reference == user->InputAt(0))) ||
+               (user->IsArraySet() && (reference == user->InputAt(2)))) {
+      // The reference is merged to HPhi/HSelect, passed to a callee, or stored to heap.
+      // Hence, the reference is no longer the only name that can refer to its value.
+      *is_singleton = false;
+      *is_singleton_and_non_escaping = false;
+      return;
+    } else if ((user->IsUnresolvedInstanceFieldGet() && (reference == user->InputAt(0))) ||
+               (user->IsUnresolvedInstanceFieldSet() && (reference == user->InputAt(0)))) {
+      // The field is accessed in an unresolved way. We mark the object as a non-singleton.
+      // Note that we could optimize this case and still perform some optimizations until
+      // we hit the unresolved access, but the conservative assumption is the simplest.
+      *is_singleton = false;
+      *is_singleton_and_non_escaping = false;
+      return;
+    } else if (user->IsReturn()) {
+      *is_singleton_and_non_escaping = false;
+    }
+  }
+
+  // Need for further analysis?
+  if (!*is_singleton_and_non_escaping) {
+    return;
+  }
+
+  // Look at the environment uses and if it's for HDeoptimize, it's treated the
+  // same as a return which escapes at the end of executing the compiled code.
+  // Other environment uses are fine, as long as all client optimizations that
+  // rely on this informations are disabled for debuggable.
+  for (const HUseListNode<HEnvironment*>& use : reference->GetEnvUses()) {
+    HEnvironment* user = use.GetUser();
+    if (user->GetHolder()->IsDeoptimize()) {
+      *is_singleton_and_non_escaping = false;
+      break;
+    }
+  }
+}
+
+bool IsNonEscapingSingleton(HInstruction* reference,
+                            bool (*no_escape)(HInstruction*, HInstruction*)) {
+  bool is_singleton = true;
+  bool is_singleton_and_non_escaping = true;
+  CalculateEscape(reference, no_escape, &is_singleton, &is_singleton_and_non_escaping);
+  return is_singleton_and_non_escaping;
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/escape.h b/compiler/optimizing/escape.h
new file mode 100644
index 0000000..6514843
--- /dev/null
+++ b/compiler/optimizing/escape.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_ESCAPE_H_
+#define ART_COMPILER_OPTIMIZING_ESCAPE_H_
+
+namespace art {
+
+class HInstruction;
+
+/*
+ * Methods related to escape analysis, i.e. determining whether an object
+ * allocation is visible outside ('escapes') its immediate method context.
+ */
+
+/*
+ * Performs escape analysis on the given instruction, typically a reference to an
+ * allocation. The method assigns true to parameter 'is_singleton' if the reference
+ * is the only name that can refer to its value during the lifetime of the method,
+ * meaning that the reference is not aliased with something else, is not stored to
+ * heap memory, and not passed to another method. The method assigns true to parameter
+ * 'is_singleton_and_non_escaping' if the reference is a singleton and is not returned
+ * to the caller or used as an environment local of an HDeoptimize instruction.
+ *
+ * When set, the no_escape function is applied to any use of the allocation instruction
+ * prior to any built-in escape analysis. This allows clients to define better escape
+ * analysis in certain case-specific circumstances. If 'no_escape(reference, user)'
+ * returns true, the user is assumed *not* to cause any escape right away. The return
+ * value false means the client cannot provide a definite answer and built-in escape
+ * analysis is applied to the user instead.
+ */
+void CalculateEscape(HInstruction* reference,
+                     bool (*no_escape)(HInstruction*, HInstruction*),
+                     /*out*/ bool* is_singleton,
+                     /*out*/ bool* is_singleton_and_non_escaping);
+
+/*
+ * Convenience method for testing singleton and non-escaping property at once.
+ * Callers should be aware that this method invokes the full analysis at each call.
+ */
+bool IsNonEscapingSingleton(HInstruction* reference,
+                            bool (*no_escape)(HInstruction*, HInstruction*));
+
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_ESCAPE_H_
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 451abc5..17a97da 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2165,11 +2165,11 @@
   __ Cbz(dst, slow_path->GetEntryLabel());
 
   if (!length.IsConstant()) {
-    // If the length is negative, bail out.
-    __ Tbnz(WRegisterFrom(length), kWRegSize - 1, slow_path->GetEntryLabel());
-    // If the length > 32 then (currently) prefer libcore's native implementation.
+    // Merge the following two comparisons into one:
+    //   If the length is negative, bail out (delegate to libcore's native implementation).
+    //   If the length > 32 then (currently) prefer libcore's native implementation.
     __ Cmp(WRegisterFrom(length), kSystemArrayCopyCharThreshold);
-    __ B(slow_path->GetEntryLabel(), gt);
+    __ B(slow_path->GetEntryLabel(), hi);
   } else {
     // We have already checked in the LocationsBuilder for the constant case.
     DCHECK_GE(length.GetConstant()->AsIntConstant()->GetValue(), 0);
@@ -2379,11 +2379,11 @@
   if (!length.IsConstant() &&
       !optimizations.GetCountIsSourceLength() &&
       !optimizations.GetCountIsDestinationLength()) {
-    // If the length is negative, bail out.
-    __ Tbnz(WRegisterFrom(length), kWRegSize - 1, intrinsic_slow_path->GetEntryLabel());
-    // If the length >= 128 then (currently) prefer native implementation.
+    // Merge the following two comparisons into one:
+    //   If the length is negative, bail out (delegate to libcore's native implementation).
+    //   If the length >= 128 then (currently) prefer native implementation.
     __ Cmp(WRegisterFrom(length), kSystemArrayCopyThreshold);
-    __ B(intrinsic_slow_path->GetEntryLabel(), ge);
+    __ B(intrinsic_slow_path->GetEntryLabel(), hs);
   }
   // Validity checks: source.
   CheckSystemArrayCopyPosition(masm,
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 7a1ec9f..c8e3534 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -518,7 +518,7 @@
 void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekByte(HInvoke* invoke) {
   ArmVIXLAssembler* assembler = GetAssembler();
   // Ignore upper 4B of long address.
-  __ Ldrsb(OutputRegister(invoke), LowRegisterFrom(invoke->GetLocations()->InAt(0)));
+  __ Ldrsb(OutputRegister(invoke), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
 }
 
 void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekIntNative(HInvoke* invoke) {
@@ -528,7 +528,7 @@
 void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekIntNative(HInvoke* invoke) {
   ArmVIXLAssembler* assembler = GetAssembler();
   // Ignore upper 4B of long address.
-  __ Ldr(OutputRegister(invoke), LowRegisterFrom(invoke->GetLocations()->InAt(0)));
+  __ Ldr(OutputRegister(invoke), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
 }
 
 void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekLongNative(HInvoke* invoke) {
@@ -545,9 +545,9 @@
   vixl32::Register hi = HighRegisterFrom(invoke->GetLocations()->Out());
   if (addr.Is(lo)) {
     __ Ldr(hi, MemOperand(addr, 4));
-    __ Ldr(lo, addr);
+    __ Ldr(lo, MemOperand(addr));
   } else {
-    __ Ldr(lo, addr);
+    __ Ldr(lo, MemOperand(addr));
     __ Ldr(hi, MemOperand(addr, 4));
   }
 }
@@ -559,7 +559,7 @@
 void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekShortNative(HInvoke* invoke) {
   ArmVIXLAssembler* assembler = GetAssembler();
   // Ignore upper 4B of long address.
-  __ Ldrsh(OutputRegister(invoke), LowRegisterFrom(invoke->GetLocations()->InAt(0)));
+  __ Ldrsh(OutputRegister(invoke), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
 }
 
 static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
@@ -576,7 +576,7 @@
 
 void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeByte(HInvoke* invoke) {
   ArmVIXLAssembler* assembler = GetAssembler();
-  __ Strb(InputRegisterAt(invoke, 1), LowRegisterFrom(invoke->GetLocations()->InAt(0)));
+  __ Strb(InputRegisterAt(invoke, 1), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
 }
 
 void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeIntNative(HInvoke* invoke) {
@@ -585,7 +585,7 @@
 
 void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeIntNative(HInvoke* invoke) {
   ArmVIXLAssembler* assembler = GetAssembler();
-  __ Str(InputRegisterAt(invoke, 1), LowRegisterFrom(invoke->GetLocations()->InAt(0)));
+  __ Str(InputRegisterAt(invoke, 1), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
 }
 
 void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeLongNative(HInvoke* invoke) {
@@ -598,7 +598,7 @@
   vixl32::Register addr = LowRegisterFrom(invoke->GetLocations()->InAt(0));
   // Worst case: Control register bit SCTLR.A = 0. Then unaligned accesses throw a processor
   // exception. So we can't use ldrd as addr may be unaligned.
-  __ Str(LowRegisterFrom(invoke->GetLocations()->InAt(1)), addr);
+  __ Str(LowRegisterFrom(invoke->GetLocations()->InAt(1)), MemOperand(addr));
   __ Str(HighRegisterFrom(invoke->GetLocations()->InAt(1)), MemOperand(addr, 4));
 }
 
@@ -608,7 +608,7 @@
 
 void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeShortNative(HInvoke* invoke) {
   ArmVIXLAssembler* assembler = GetAssembler();
-  __ Strh(InputRegisterAt(invoke, 1), LowRegisterFrom(invoke->GetLocations()->InAt(0)));
+  __ Strh(InputRegisterAt(invoke, 1), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
 }
 
 void IntrinsicLocationsBuilderARMVIXL::VisitThreadCurrentThread(HInvoke* invoke) {
@@ -842,8 +842,8 @@
       __ Add(temp_reg, base, offset);
       vixl32::Label loop_head;
       __ Bind(&loop_head);
-      __ Ldrexd(temp_lo, temp_hi, temp_reg);
-      __ Strexd(temp_lo, value_lo, value_hi, temp_reg);
+      __ Ldrexd(temp_lo, temp_hi, MemOperand(temp_reg));
+      __ Strexd(temp_lo, value_lo, value_hi, MemOperand(temp_reg));
       __ Cmp(temp_lo, 0);
       __ B(ne, &loop_head);
     } else {
@@ -1042,7 +1042,7 @@
   vixl32::Label loop_head;
   __ Bind(&loop_head);
 
-  __ Ldrex(tmp, tmp_ptr);
+  __ Ldrex(tmp, MemOperand(tmp_ptr));
 
   __ Subs(tmp, tmp, expected);
 
@@ -1052,7 +1052,7 @@
                                CodeBufferCheckScope::kMaximumSize);
 
     __ itt(eq);
-    __ strex(eq, tmp, value, tmp_ptr);
+    __ strex(eq, tmp, value, MemOperand(tmp_ptr));
     __ cmp(eq, tmp, 1);
   }
 
@@ -1220,7 +1220,7 @@
   static_assert(IsAligned<8>(kObjectAlignment),
                 "String data must be 8-byte aligned for unrolled CompareTo loop.");
 
-  const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
+  const unsigned char_size = Primitive::ComponentSize(Primitive::kPrimChar);
   DCHECK_EQ(char_size, 2u);
 
   UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
@@ -1469,7 +1469,7 @@
   __ Bind(&loop);
   __ Ldr(out, MemOperand(str, temp1));
   __ Ldr(temp2, MemOperand(arg, temp1));
-  __ Add(temp1, temp1, sizeof(uint32_t));
+  __ Add(temp1, temp1, Operand::From(sizeof(uint32_t)));
   __ Cmp(out, temp2);
   __ B(ne, &return_false);
   // With string compression, we have compared 4 bytes, otherwise 2 chars.
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 15e6059..edecf17 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -15,6 +15,8 @@
  */
 
 #include "load_store_elimination.h"
+
+#include "escape.h"
 #include "side_effects_analysis.h"
 
 #include <iostream>
@@ -31,70 +33,12 @@
 // whether it's a singleton, returned, etc.
 class ReferenceInfo : public ArenaObject<kArenaAllocMisc> {
  public:
-  ReferenceInfo(HInstruction* reference, size_t pos) : reference_(reference), position_(pos) {
-    is_singleton_ = true;
-    is_singleton_and_non_escaping_ = true;
-    if (!reference_->IsNewInstance() && !reference_->IsNewArray()) {
-      // For references not allocated in the method, don't assume anything.
-      is_singleton_ = false;
-      is_singleton_and_non_escaping_ = false;
-      return;
-    }
-
-    // Visit all uses to determine if this reference can spread into the heap,
-    // a method call, etc.
-    for (const HUseListNode<HInstruction*>& use : reference_->GetUses()) {
-      HInstruction* user = use.GetUser();
-      DCHECK(!user->IsNullCheck()) << "NullCheck should have been eliminated";
-      if (user->IsBoundType()) {
-        // BoundType shouldn't normally be necessary for a NewInstance.
-        // Just be conservative for the uncommon cases.
-        is_singleton_ = false;
-        is_singleton_and_non_escaping_ = false;
-        return;
-      }
-      if (user->IsPhi() || user->IsSelect() || user->IsInvoke() ||
-          (user->IsInstanceFieldSet() && (reference_ == user->InputAt(1))) ||
-          (user->IsUnresolvedInstanceFieldSet() && (reference_ == user->InputAt(1))) ||
-          (user->IsStaticFieldSet() && (reference_ == user->InputAt(1))) ||
-          (user->IsUnresolvedStaticFieldSet() && (reference_ == user->InputAt(0))) ||
-          (user->IsArraySet() && (reference_ == user->InputAt(2)))) {
-        // reference_ is merged to HPhi/HSelect, passed to a callee, or stored to heap.
-        // reference_ isn't the only name that can refer to its value anymore.
-        is_singleton_ = false;
-        is_singleton_and_non_escaping_ = false;
-        return;
-      }
-      if ((user->IsUnresolvedInstanceFieldGet() && (reference_ == user->InputAt(0))) ||
-          (user->IsUnresolvedInstanceFieldSet() && (reference_ == user->InputAt(0)))) {
-        // The field is accessed in an unresolved way. We mark the object as a non-singleton
-        // to disable load/store optimizations on it.
-        // Note that we could optimize this case and still perform some optimizations until
-        // we hit the unresolved access, but disabling is the simplest.
-        is_singleton_ = false;
-        is_singleton_and_non_escaping_ = false;
-        return;
-      }
-      if (user->IsReturn()) {
-        is_singleton_and_non_escaping_ = false;
-      }
-    }
-
-    if (!is_singleton_ || !is_singleton_and_non_escaping_) {
-      return;
-    }
-
-    // Look at Environment uses and if it's for HDeoptimize, it's treated the same
-    // as a return which escapes at the end of executing the compiled code. We don't
-    // do store elimination for singletons that escape through HDeoptimize.
-    // Other Environment uses are fine since LSE is disabled for debuggable.
-    for (const HUseListNode<HEnvironment*>& use : reference_->GetEnvUses()) {
-      HEnvironment* user = use.GetUser();
-      if (user->GetHolder()->IsDeoptimize()) {
-        is_singleton_and_non_escaping_ = false;
-        break;
-      }
-    }
+  ReferenceInfo(HInstruction* reference, size_t pos)
+      : reference_(reference),
+        position_(pos),
+        is_singleton_(true),
+        is_singleton_and_non_escaping_(true) {
+    CalculateEscape(reference_, nullptr, &is_singleton_, &is_singleton_and_non_escaping_);
   }
 
   HInstruction* GetReference() const {
diff --git a/compiler/utils/arm/assembler_arm_vixl.cc b/compiler/utils/arm/assembler_arm_vixl.cc
index 1aaa231..c35c393 100644
--- a/compiler/utils/arm/assembler_arm_vixl.cc
+++ b/compiler/utils/arm/assembler_arm_vixl.cc
@@ -43,12 +43,12 @@
 }
 
 const uint8_t* ArmVIXLAssembler::CodeBufferBaseAddress() const {
-  return vixl_masm_.GetStartAddress<uint8_t*>();
+  return vixl_masm_.GetBuffer().GetStartAddress<const uint8_t*>();
 }
 
 void ArmVIXLAssembler::FinalizeInstructions(const MemoryRegion& region) {
   // Copy the instructions from the buffer.
-  MemoryRegion from(vixl_masm_.GetStartAddress<void*>(), CodeSize());
+  MemoryRegion from(vixl_masm_.GetBuffer()->GetStartAddress<void*>(), CodeSize());
   region.CopyFrom(0, from);
 }
 
@@ -365,7 +365,7 @@
       if (stack_offset != 0) {
         base = temps.Acquire();
         DCHECK_EQ(regs & (1u << base.GetCode()), 0u);
-        ___ Add(base, sp, stack_offset);
+        ___ Add(base, sp, Operand::From(stack_offset));
       }
       ___ Stm(base, NO_WRITE_BACK, RegisterList(regs));
     } else {
@@ -385,7 +385,7 @@
       vixl32::Register base = sp;
       if (stack_offset != 0) {
         base = temps.Acquire();
-        ___ Add(base, sp, stack_offset);
+        ___ Add(base, sp, Operand::From(stack_offset));
       }
       ___ Ldm(base, NO_WRITE_BACK, RegisterList(regs));
     } else {
diff --git a/compiler/utils/arm/jni_macro_assembler_arm.cc b/compiler/utils/arm/jni_macro_assembler_arm.cc
index cf7a4d1..3f425df 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm.cc
@@ -594,6 +594,41 @@
   __ b(slow->Entry(), NE);
 }
 
+std::unique_ptr<JNIMacroLabel> ArmJNIMacroAssembler::CreateLabel() {
+  return std::unique_ptr<JNIMacroLabel>(new ArmJNIMacroLabel());
+}
+
+void ArmJNIMacroAssembler::Jump(JNIMacroLabel* label) {
+  CHECK(label != nullptr);
+  __ b(ArmJNIMacroLabel::Cast(label)->AsArm());
+}
+
+void ArmJNIMacroAssembler::Jump(JNIMacroLabel* label,
+                                JNIMacroUnaryCondition condition,
+                                ManagedRegister test) {
+  CHECK(label != nullptr);
+
+  arm::Condition arm_cond;
+  switch (condition) {
+    case JNIMacroUnaryCondition::kZero:
+      arm_cond = EQ;
+      break;
+    case JNIMacroUnaryCondition::kNotZero:
+      arm_cond = NE;
+      break;
+    default:
+      LOG(FATAL) << "Not implemented condition: " << static_cast<int>(condition);
+      UNREACHABLE();
+  }
+  __ cmp(test.AsArm().AsCoreRegister(), ShifterOperand(0));
+  __ b(ArmJNIMacroLabel::Cast(label)->AsArm(), arm_cond);
+}
+
+void ArmJNIMacroAssembler::Bind(JNIMacroLabel* label) {
+  CHECK(label != nullptr);
+  __ Bind(ArmJNIMacroLabel::Cast(label)->AsArm());
+}
+
 #undef __
 
 void ArmExceptionSlowPath::Emit(Assembler* sasm) {
diff --git a/compiler/utils/arm/jni_macro_assembler_arm.h b/compiler/utils/arm/jni_macro_assembler_arm.h
index 4471906..809ac8b 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm.h
+++ b/compiler/utils/arm/jni_macro_assembler_arm.h
@@ -25,6 +25,7 @@
 #include "base/enums.h"
 #include "base/macros.h"
 #include "utils/jni_macro_assembler.h"
+#include "utils/label.h"
 #include "offsets.h"
 
 namespace art {
@@ -159,10 +160,26 @@
 
   void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
 
+  // Create a new label that can be used with Jump/Bind calls.
+  std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+  // Emit an unconditional jump to the label.
+  void Jump(JNIMacroLabel* label) OVERRIDE;
+  // Emit a conditional jump to the label by applying a unary condition test to the register.
+  void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+  // Code at this offset will serve as the target for the Jump call.
+  void Bind(JNIMacroLabel* label) OVERRIDE;
+
  private:
   std::unique_ptr<ArmAssembler> asm_;
 };
 
+class ArmJNIMacroLabel FINAL : public JNIMacroLabelCommon<ArmJNIMacroLabel, art::Label, kArm> {
+ public:
+  art::Label* AsArm() {
+    return AsPlatformLabel();
+  }
+};
+
 }  // namespace arm
 }  // namespace art
 
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index b2bbd72..f20ed0a 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -168,6 +168,8 @@
     CHECK_EQ(0u, size);
   } else if (src.IsCoreRegister()) {
     CHECK_EQ(4u, size);
+    UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+    temps.Exclude(src.AsVIXLRegister());
     asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value());
   } else if (src.IsRegisterPair()) {
     CHECK_EQ(8u, size);
@@ -186,12 +188,16 @@
 void ArmVIXLJNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
   ArmManagedRegister src = msrc.AsArm();
   CHECK(src.IsCoreRegister()) << src;
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(src.AsVIXLRegister());
   asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value());
 }
 
 void ArmVIXLJNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
   ArmManagedRegister src = msrc.AsArm();
   CHECK(src.IsCoreRegister()) << src;
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(src.AsVIXLRegister());
   asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value());
 }
 
@@ -202,6 +208,8 @@
   ArmManagedRegister src = msrc.AsArm();
   ArmManagedRegister scratch = mscratch.AsArm();
   asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value());
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(scratch.AsVIXLRegister());
   asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, in_off.Int32Value());
   asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), sp, dest.Int32Value() + 4);
 }
@@ -210,6 +218,8 @@
                                        FrameOffset src,
                                        ManagedRegister mscratch) {
   ArmManagedRegister scratch = mscratch.AsArm();
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(scratch.AsVIXLRegister());
   asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, src.Int32Value());
   asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), sp, dest.Int32Value());
 }
@@ -220,6 +230,8 @@
                                        bool unpoison_reference) {
   ArmManagedRegister dst = dest.AsArm();
   CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(dst.AsVIXLRegister(), base.AsArm().AsVIXLRegister());
   asm_.LoadFromOffset(kLoadWord,
                       dst.AsVIXLRegister(),
                       base.AsArm().AsVIXLRegister(),
@@ -246,6 +258,8 @@
                                                      ManagedRegister scratch) {
   ArmManagedRegister mscratch = scratch.AsArm();
   CHECK(mscratch.IsCoreRegister()) << mscratch;
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(mscratch.AsVIXLRegister());
   asm_.LoadImmediate(mscratch.AsVIXLRegister(), imm);
   asm_.StoreToOffset(kStoreWord, mscratch.AsVIXLRegister(), sp, dest.Int32Value());
 }
@@ -263,6 +277,8 @@
 void ArmVIXLJNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset32 offs) {
   ArmManagedRegister dst = m_dst.AsArm();
   CHECK(dst.IsCoreRegister()) << dst;
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(dst.AsVIXLRegister());
   asm_.LoadFromOffset(kLoadWord, dst.AsVIXLRegister(), tr, offs.Int32Value());
 }
 
@@ -271,6 +287,8 @@
                                                     ManagedRegister mscratch) {
   ArmManagedRegister scratch = mscratch.AsArm();
   CHECK(scratch.IsCoreRegister()) << scratch;
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(scratch.AsVIXLRegister());
   asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), tr, thr_offs.Int32Value());
   asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), sp, fr_offs.Int32Value());
 }
@@ -286,6 +304,8 @@
                                                         ManagedRegister mscratch) {
   ArmManagedRegister scratch = mscratch.AsArm();
   CHECK(scratch.IsCoreRegister()) << scratch;
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(scratch.AsVIXLRegister());
   asm_.AddConstant(scratch.AsVIXLRegister(), sp, fr_offs.Int32Value());
   asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), tr, thr_offs.Int32Value());
 }
@@ -312,6 +332,8 @@
   if (!dst.Equals(src)) {
     if (dst.IsCoreRegister()) {
       CHECK(src.IsCoreRegister()) << src;
+      UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+      temps.Exclude(dst.AsVIXLRegister());
       ___ Mov(dst.AsVIXLRegister(), src.AsVIXLRegister());
     } else if (dst.IsDRegister()) {
       if (src.IsDRegister()) {
@@ -351,6 +373,8 @@
   ArmManagedRegister temp = scratch.AsArm();
   CHECK(temp.IsCoreRegister()) << temp;
   CHECK(size == 4 || size == 8) << size;
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(temp.AsVIXLRegister());
   if (size == 4) {
     asm_.LoadFromOffset(kLoadWord, temp.AsVIXLRegister(), sp, src.Int32Value());
     asm_.StoreToOffset(kStoreWord, temp.AsVIXLRegister(), sp, dest.Int32Value());
@@ -414,6 +438,8 @@
   ArmManagedRegister in_reg = min_reg.AsArm();
   CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
   CHECK(out_reg.IsCoreRegister()) << out_reg;
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(out_reg.AsVIXLRegister());
   if (null_allowed) {
     // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
     // the address in the handle scope holding the reference.
@@ -425,6 +451,8 @@
                           handle_scope_offset.Int32Value());
       in_reg = out_reg;
     }
+
+    temps.Exclude(in_reg.AsVIXLRegister());
     ___ Cmp(in_reg.AsVIXLRegister(), 0);
 
     if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value(), kCcDontCare)) {
@@ -457,6 +485,8 @@
                                                       bool null_allowed) {
   ArmManagedRegister scratch = mscratch.AsArm();
   CHECK(scratch.IsCoreRegister()) << scratch;
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(scratch.AsVIXLRegister());
   if (null_allowed) {
     asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, handle_scope_offset.Int32Value());
     // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
@@ -503,6 +533,8 @@
   ArmManagedRegister scratch = mscratch.AsArm();
   CHECK(base.IsCoreRegister()) << base;
   CHECK(scratch.IsCoreRegister()) << scratch;
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(scratch.AsVIXLRegister());
   asm_.LoadFromOffset(kLoadWord,
                       scratch.AsVIXLRegister(),
                       base.AsVIXLRegister(),
@@ -514,6 +546,8 @@
 void ArmVIXLJNIMacroAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
   ArmManagedRegister scratch = mscratch.AsArm();
   CHECK(scratch.IsCoreRegister()) << scratch;
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(scratch.AsVIXLRegister());
   // Call *(*(SP + base) + offset)
   asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, base.Int32Value());
   asm_.LoadFromOffset(kLoadWord,
@@ -541,6 +575,8 @@
 void ArmVIXLJNIMacroAssembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) {
   CHECK_ALIGNED(stack_adjust, kStackAlignment);
   ArmManagedRegister scratch = m_scratch.AsArm();
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(scratch.AsVIXLRegister());
   exception_blocks_.emplace_back(
       new ArmVIXLJNIMacroAssembler::ArmException(scratch, stack_adjust));
   asm_.LoadFromOffset(kLoadWord,
@@ -598,11 +634,14 @@
   if (exception->stack_adjust_ != 0) {  // Fix up the frame.
     DecreaseFrameSize(exception->stack_adjust_);
   }
+
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(exception->scratch_.AsVIXLRegister());
   // Pass exception object as argument.
   // Don't care about preserving r0 as this won't return.
   ___ Mov(r0, exception->scratch_.AsVIXLRegister());
+  temps.Include(exception->scratch_.AsVIXLRegister());
   // TODO: check that exception->scratch_ is dead by this point.
-  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
   vixl32::Register temp = temps.Acquire();
   ___ Ldr(temp,
           MemOperand(tr,
@@ -624,6 +663,9 @@
   } else if (dest.IsCoreRegister()) {
     CHECK(!dest.AsVIXLRegister().Is(sp)) << dest;
 
+    UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+    temps.Exclude(dest.AsVIXLRegister());
+
     if (size == 1u) {
       ___ Ldrb(dest.AsVIXLRegister(), MemOperand(base, offset));
     } else {
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index f91bcfa..6ed0e9b 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -40,12 +40,12 @@
 }
 
 const uint8_t* Arm64Assembler::CodeBufferBaseAddress() const {
-  return vixl_masm_.GetStartAddress<uint8_t*>();
+  return vixl_masm_.GetBuffer().GetStartAddress<const uint8_t*>();
 }
 
 void Arm64Assembler::FinalizeInstructions(const MemoryRegion& region) {
   // Copy the instructions from the buffer.
-  MemoryRegion from(vixl_masm_.GetStartAddress<void*>(), CodeSize());
+  MemoryRegion from(vixl_masm_.GetBuffer()->GetStartAddress<void*>(), CodeSize());
   region.CopyFrom(0, from);
 }
 
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 10bed13..50a1d9f 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -1753,7 +1753,10 @@
   __ LoadFromOffset(kLoadWordPair, R2, R4, 0x40400);
   __ LoadFromOffset(kLoadWordPair, R4, R4, 0x40400);
 
+  vixl::aarch32::UseScratchRegisterScope temps(assembler.asm_.GetVIXLAssembler());
+  temps.Exclude(R12);
   __ LoadFromOffset(kLoadWord, R0, R12, 12);  // 32-bit because of R12.
+  temps.Include(R12);
   __ LoadFromOffset(kLoadWord, R2, R4, 0xa4 - 0x100000);
 
   __ LoadFromOffset(kLoadSignedByte, R2, R4, 12);
@@ -1783,7 +1786,10 @@
   __ StoreToOffset(kStoreWordPair, R2, R4, 0x40400);
   __ StoreToOffset(kStoreWordPair, R4, R4, 0x40400);
 
+  vixl::aarch32::UseScratchRegisterScope temps(assembler.asm_.GetVIXLAssembler());
+  temps.Exclude(R12);
   __ StoreToOffset(kStoreWord, R0, R12, 12);  // 32-bit because of R12.
+  temps.Include(R12);
   __ StoreToOffset(kStoreWord, R2, R4, 0xa4 - 0x100000);
 
   __ StoreToOffset(kStoreByte, R2, R4, 12);
diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc
index 2f154fb..1b74313 100644
--- a/compiler/utils/jni_macro_assembler.cc
+++ b/compiler/utils/jni_macro_assembler.cc
@@ -20,7 +20,7 @@
 #include <vector>
 
 #ifdef ART_ENABLE_CODEGEN_arm
-#include "arm/jni_macro_assembler_arm_vixl.h"
+#include "arm/jni_macro_assembler_arm.h"
 #endif
 #ifdef ART_ENABLE_CODEGEN_arm64
 #include "arm64/jni_macro_assembler_arm64.h"
@@ -58,7 +58,7 @@
 #ifdef ART_ENABLE_CODEGEN_arm
     case kArm:
     case kThumb2:
-      return MacroAsm32UniquePtr(new (arena) arm::ArmVIXLJNIMacroAssembler(arena));
+      return MacroAsm32UniquePtr(new (arena) arm::ArmJNIMacroAssembler(arena, instruction_set));
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips
     case kMips:
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index 30b708c..3347dac 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -63,9 +63,7 @@
         case kVld2Location:
         case kVld3Location:
         case kVld4Location: {
-          const uintptr_t pc_delta = disasm_->IsT32()
-              ? vixl::aarch32::kT32PcDelta
-              : vixl::aarch32::kA32PcDelta;
+          const uintptr_t pc_delta = label.GetLabel()->GetPcOffset();
           const int32_t offset = label.GetLabel()->GetLocation();
 
           os() << "[pc, #" << offset - pc_delta << "]";
@@ -77,7 +75,7 @@
       }
     }
 
-    DisassemblerStream& operator<<(const vixl::aarch32::Register reg) OVERRIDE {
+    DisassemblerStream& operator<<(vixl::aarch32::Register reg) OVERRIDE {
       if (reg.Is(tr)) {
         os() << "tr";
         return *this;
@@ -118,20 +116,11 @@
   CustomDisassembler(std::ostream& os, const DisassemblerOptions* options)
       : PrintDisassembler(&disassembler_stream_), disassembler_stream_(os, this, options) {}
 
-  void PrintPc(uint32_t prog_ctr) OVERRIDE {
+  void PrintCodeAddress(uint32_t prog_ctr) OVERRIDE {
     os() << "0x" << std::hex << std::setw(8) << std::setfill('0') << prog_ctr << ": ";
   }
 
-  bool IsT32() const {
-    return is_t32_;
-  }
-
-  void SetT32(bool is_t32) {
-    is_t32_ = is_t32;
-  }
-
  private:
-  bool is_t32_;
   CustomDisassemblerStream disassembler_stream_;
 };
 
@@ -152,7 +141,7 @@
       sizeof(unaligned_float), sizeof(unaligned_double)};
   const uintptr_t begin = reinterpret_cast<uintptr_t>(options_->base_address_);
   const uintptr_t end = reinterpret_cast<uintptr_t>(options_->end_address_);
-  uintptr_t literal_addr = RoundDown(disasm_->GetPc(), vixl::aarch32::kRegSizeInBytes) + offset;
+  uintptr_t literal_addr = RoundDown(disasm_->GetCodeAddress(), vixl::aarch32::kRegSizeInBytes) + offset;
 
   if (!options_->absolute_addresses_) {
     literal_addr += begin;
@@ -208,12 +197,14 @@
   // Remove the Thumb specifier bit; no effect if begin does not point to T32 code.
   const uintptr_t instr_ptr = reinterpret_cast<uintptr_t>(begin) & ~1;
 
-  disasm_->SetT32((reinterpret_cast<uintptr_t>(begin) & 1) != 0);
-  disasm_->JumpToPc(GetPc(instr_ptr));
+  const bool is_t32 = (reinterpret_cast<uintptr_t>(begin) & 1) != 0;
+  disasm_->SetCodeAddress(GetPc(instr_ptr));
 
-  if (disasm_->IsT32()) {
+  if (is_t32) {
     const uint16_t* const ip = reinterpret_cast<const uint16_t*>(instr_ptr);
-    next = reinterpret_cast<uintptr_t>(disasm_->DecodeT32At(ip));
+    const uint16_t* const end_address = reinterpret_cast<const uint16_t*>(
+        GetDisassemblerOptions()->end_address_);
+    next = reinterpret_cast<uintptr_t>(disasm_->DecodeT32At(ip, end_address));
   } else {
     const uint32_t* const ip = reinterpret_cast<const uint32_t*>(instr_ptr);
     next = reinterpret_cast<uintptr_t>(disasm_->DecodeA32At(ip));
@@ -230,10 +221,10 @@
   // Remove the Thumb specifier bit; no effect if begin does not point to T32 code.
   const uintptr_t base = reinterpret_cast<uintptr_t>(begin) & ~1;
 
-  disasm_->SetT32((reinterpret_cast<uintptr_t>(begin) & 1) != 0);
-  disasm_->JumpToPc(GetPc(base));
+  const bool is_t32 = (reinterpret_cast<uintptr_t>(begin) & 1) != 0;
+  disasm_->SetCodeAddress(GetPc(base));
 
-  if (disasm_->IsT32()) {
+  if (is_t32) {
     // The Thumb specifier bits cancel each other.
     disasm_->DisassembleT32Buffer(reinterpret_cast<const uint16_t*>(base), end - begin);
   } else {
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index 6b21a56..1dca428 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -80,7 +80,7 @@
     gCmdLine.reset(new std::string("<unset>"));
   }
 
-#ifdef __ANDROID__
+#ifdef ART_TARGET_ANDROID
 #define INIT_LOGGING_DEFAULT_LOGGER android::base::LogdLogger()
 #else
 #define INIT_LOGGING_DEFAULT_LOGGER android::base::StderrLogger
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index d098ee2..72dbe6a 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -427,7 +427,7 @@
         if (!reg->VerifierInstanceOf(field_class.Ptr())) {
           // This should never happen.
           std::string temp1, temp2, temp3;
-          self->ThrowNewExceptionF("Ljava/lang/VirtualMachineError;",
+          self->ThrowNewExceptionF("Ljava/lang/InternalError;",
                                    "Put '%s' that is not instance of field '%s' in '%s'",
                                    reg->GetClass()->GetDescriptor(&temp1),
                                    field_class->GetDescriptor(&temp2),
@@ -1493,7 +1493,7 @@
             if (!o->VerifierInstanceOf(arg_type)) {
               // This should never happen.
               std::string temp1, temp2;
-              self->ThrowNewExceptionF("Ljava/lang/VirtualMachineError;",
+              self->ThrowNewExceptionF("Ljava/lang/InternalError;",
                                        "Invoking %s with bad arg %d, type '%s' not instance of '%s'",
                                        new_shadow_frame->GetMethod()->GetName(), shorty_pos,
                                        o->GetClass()->GetDescriptor(&temp1),
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 989b7da..22c0fe0 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -287,7 +287,7 @@
           if (!obj_result->VerifierInstanceOf(return_type)) {
             // This should never happen.
             std::string temp1, temp2;
-            self->ThrowNewExceptionF("Ljava/lang/VirtualMachineError;",
+            self->ThrowNewExceptionF("Ljava/lang/InternalError;",
                                      "Returning '%s' that is not instance of return type '%s'",
                                      obj_result->GetClass()->GetDescriptor(&temp1),
                                      return_type->GetDescriptor(&temp2));
@@ -577,7 +577,7 @@
         } else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
           // This should never happen.
           std::string temp;
-          self->ThrowNewExceptionF("Ljava/lang/VirtualMachineError;",
+          self->ThrowNewExceptionF("Ljava/lang/InternalError;",
                                    "Throwing '%s' that is not instance of Throwable",
                                    exception->GetClass()->GetDescriptor(&temp));
         } else {
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index fad7d90..5574a11 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -621,8 +621,8 @@
   Thread* const self = Thread::Current();
   self->AssertThreadSuspensionIsAllowable();
   CHECK(pReq != nullptr);
+  CHECK_EQ(threadId, Dbg::GetThreadSelfId()) << "Only the current thread can suspend itself";
   /* send request and possibly suspend ourselves */
-  JDWP::ObjectId thread_self_id = Dbg::GetThreadSelfId();
   ScopedThreadSuspension sts(self, kWaitingForDebuggerSend);
   if (suspend_policy != SP_NONE) {
     AcquireJdwpTokenForEvent(threadId);
@@ -631,7 +631,7 @@
   {
     // Before suspending, we change our state to kSuspended so the debugger sees us as RUNNING.
     ScopedThreadStateChange stsc(self, kSuspended);
-    SuspendByPolicy(suspend_policy, thread_self_id);
+    SuspendByPolicy(suspend_policy, threadId);
   }
 }
 
@@ -658,13 +658,10 @@
 }
 
 void JdwpState::AcquireJdwpTokenForEvent(ObjectId threadId) {
-  CHECK_NE(Thread::Current(), GetDebugThread()) << "Expected event thread";
-  CHECK_NE(debug_thread_id_, threadId) << "Not expected debug thread";
   SetWaitForJdwpToken(threadId);
 }
 
 void JdwpState::ReleaseJdwpTokenForEvent() {
-  CHECK_NE(Thread::Current(), GetDebugThread()) << "Expected event thread";
   ClearWaitForJdwpToken();
 }
 
@@ -685,23 +682,28 @@
   /* this is held for very brief periods; contention is unlikely */
   MutexLock mu(self, jdwp_token_lock_);
 
-  CHECK_NE(jdwp_token_owner_thread_id_, threadId) << "Thread is already holding event thread lock";
+  if (jdwp_token_owner_thread_id_ == threadId) {
+    // Only the debugger thread may already hold the event token. For instance, it may trigger
+    // a CLASS_PREPARE event while processing a command that initializes a class.
+    CHECK_EQ(threadId, debug_thread_id_) << "Non-debugger thread is already holding event token";
+  } else {
+    /*
+     * If another thread is already doing stuff, wait for it.  This can
+     * go to sleep indefinitely.
+     */
 
-  /*
-   * If another thread is already doing stuff, wait for it.  This can
-   * go to sleep indefinitely.
-   */
-  while (jdwp_token_owner_thread_id_ != 0) {
-    VLOG(jdwp) << StringPrintf("event in progress (%#" PRIx64 "), %#" PRIx64 " sleeping",
-                               jdwp_token_owner_thread_id_, threadId);
-    waited = true;
-    jdwp_token_cond_.Wait(self);
-  }
+    while (jdwp_token_owner_thread_id_ != 0) {
+      VLOG(jdwp) << StringPrintf("event in progress (%#" PRIx64 "), %#" PRIx64 " sleeping",
+                                 jdwp_token_owner_thread_id_, threadId);
+      waited = true;
+      jdwp_token_cond_.Wait(self);
+    }
 
-  if (waited || threadId != debug_thread_id_) {
-    VLOG(jdwp) << StringPrintf("event token grabbed (%#" PRIx64 ")", threadId);
+    if (waited || threadId != debug_thread_id_) {
+      VLOG(jdwp) << StringPrintf("event token grabbed (%#" PRIx64 ")", threadId);
+    }
+    jdwp_token_owner_thread_id_ = threadId;
   }
-  jdwp_token_owner_thread_id_ = threadId;
 }
 
 /*
@@ -1224,14 +1226,15 @@
     VLOG(jdwp) << "  suspend_policy=" << suspend_policy;
   }
 
-  if (thread_id == debug_thread_id_) {
+  ObjectId reported_thread_id = thread_id;
+  if (reported_thread_id == debug_thread_id_) {
     /*
      * JDWP says that, for a class prep in the debugger thread, we
      * should set thread to null and if any threads were supposed
      * to be suspended then we suspend all other threads.
      */
     VLOG(jdwp) << "  NOTE: class prepare in debugger thread!";
-    thread_id = 0;
+    reported_thread_id = 0;
     if (suspend_policy == SP_EVENT_THREAD) {
       suspend_policy = SP_ALL;
     }
@@ -1244,7 +1247,7 @@
   for (const JdwpEvent* pEvent : match_list) {
     expandBufAdd1(pReq, pEvent->eventKind);
     expandBufAdd4BE(pReq, pEvent->requestId);
-    expandBufAddObjectId(pReq, thread_id);
+    expandBufAddObjectId(pReq, reported_thread_id);
     expandBufAdd1(pReq, tag);
     expandBufAddRefTypeId(pReq, class_id);
     expandBufAddUtf8String(pReq, signature);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index f0ed237..3531852 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -428,10 +428,16 @@
           core_spill_mask,
           fp_spill_mask,
           code_size);
+      // Flush caches before we remove write permission because on some ARMv8 hardware,
+      // flushing caches require write permissions.
+      //
+      // For reference, here are kernel patches discussing about this issue:
+      // https://android.googlesource.com/kernel/msm/%2B/0e7f7bcc3fc87489cda5aa6aff8ce40eed912279
+      // https://patchwork.kernel.org/patch/9047921/
+      FlushInstructionCache(reinterpret_cast<char*>(code_ptr),
+                            reinterpret_cast<char*>(code_ptr + code_size));
     }
 
-    FlushInstructionCache(reinterpret_cast<char*>(code_ptr),
-                          reinterpret_cast<char*>(code_ptr + code_size));
     number_of_compilations_++;
   }
   // We need to update the entry point in the runnable state for the instrumentation.
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 404e5ce..bdf8b0e 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -719,7 +719,7 @@
     dlopen_handle_ = android_dlopen_ext(absolute_path.get(), RTLD_NOW, &extinfo);
 #else
     UNUSED(oat_file_begin);
-    static_assert(!kIsTargetBuild, "host_dlopen_handles_ will leak handles");
+    static_assert(!kIsTargetBuild || kIsTargetLinux, "host_dlopen_handles_ will leak handles");
     MutexLock mu(Thread::Current(), *Locks::host_dlopen_handles_lock_);
     dlopen_handle_ = dlopen(absolute_path.get(), RTLD_NOW);
     if (dlopen_handle_ != nullptr) {
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 6197120..4d1e1ea 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -146,14 +146,13 @@
   return true;
 }
 
-OatFileAssistant::DexOptNeeded
-OatFileAssistant::GetDexOptNeeded(CompilerFilter::Filter target, bool profile_changed) {
+int OatFileAssistant::GetDexOptNeeded(CompilerFilter::Filter target, bool profile_changed) {
   OatFileInfo& info = GetBestInfo();
   DexOptNeeded dexopt_needed = info.GetDexOptNeeded(target, profile_changed);
-  if (dexopt_needed == kPatchOatNeeded && info.IsOatLocation()) {
-    dexopt_needed = kSelfPatchOatNeeded;
+  if (info.IsOatLocation() || dexopt_needed == kDex2OatFromScratch) {
+    return dexopt_needed;
   }
-  return dexopt_needed;
+  return -dexopt_needed;
 }
 
 // Figure out the currently specified compile filter option in the runtime.
@@ -191,13 +190,21 @@
 
   OatFileInfo& info = GetBestInfo();
   switch (info.GetDexOptNeeded(target, profile_changed)) {
-    case kNoDexOptNeeded: return kUpdateSucceeded;
-    case kDex2OatNeeded: return GenerateOatFile(error_msg);
-    case kPatchOatNeeded: return RelocateOatFile(info.Filename(), error_msg);
+    case kNoDexOptNeeded:
+      return kUpdateSucceeded;
 
-    // kSelfPatchOatNeeded will never be returned by GetDexOptNeeded for an
-    // individual OatFileInfo.
-    case kSelfPatchOatNeeded: UNREACHABLE();
+    // TODO: For now, don't bother with all the different ways we can call
+    // dex2oat to generate the oat file. Always generate the oat file as if it
+    // were kDex2OatFromScratch.
+    case kDex2OatFromScratch:
+    case kDex2OatForBootImage:
+    case kDex2OatForRelocation:
+    case kDex2OatForFilter:
+      return GenerateOatFile(error_msg);
+
+    case kPatchoatForRelocation: {
+      return RelocateOatFile(info.Filename(), error_msg);
+    }
   }
   UNREACHABLE();
 }
@@ -830,29 +837,43 @@
 OatFileAssistant::DexOptNeeded OatFileAssistant::OatFileInfo::GetDexOptNeeded(
     CompilerFilter::Filter target, bool profile_changed) {
   bool compilation_desired = CompilerFilter::IsBytecodeCompilationEnabled(target);
+  bool filter_okay = CompilerFilterIsOkay(target, profile_changed);
 
-  if (CompilerFilterIsOkay(target, profile_changed)) {
-    if (Status() == kOatUpToDate) {
-      // The oat file is in good shape as is.
-      return kNoDexOptNeeded;
-    }
-
-    if (Status() == kOatRelocationOutOfDate) {
-      if (!compilation_desired) {
-        // If no compilation is desired, then it doesn't matter if the oat
-        // file needs relocation. It's in good shape as is.
-        return kNoDexOptNeeded;
-      }
-
-      if (compilation_desired && HasPatchInfo()) {
-        // Relocate if we can.
-        return kPatchOatNeeded;
-      }
-    }
+  if (filter_okay && Status() == kOatUpToDate) {
+    // The oat file is in good shape as is.
+    return kNoDexOptNeeded;
   }
 
-  // We can only run dex2oat if there are original dex files.
-  return oat_file_assistant_->HasOriginalDexFiles() ? kDex2OatNeeded : kNoDexOptNeeded;
+  if (filter_okay && !compilation_desired && Status() == kOatRelocationOutOfDate) {
+    // If no compilation is desired, then it doesn't matter if the oat
+    // file needs relocation. It's in good shape as is.
+    return kNoDexOptNeeded;
+  }
+
+  if (filter_okay && Status() == kOatRelocationOutOfDate && HasPatchInfo()) {
+    return kPatchoatForRelocation;
+  }
+
+  if (oat_file_assistant_->HasOriginalDexFiles()) {
+    // Run dex2oat for relocation if we didn't have the patch info necessary
+    // to use patchoat.
+    if (filter_okay && Status() == kOatRelocationOutOfDate) {
+      return kDex2OatForRelocation;
+    }
+
+    if (IsUseable()) {
+      return kDex2OatForFilter;
+    }
+
+    if (Status() == kOatBootImageOutOfDate) {
+      return kDex2OatForBootImage;
+    }
+
+    return kDex2OatFromScratch;
+  }
+
+  // Otherwise there is nothing we can do, even if we want to.
+  return kNoDexOptNeeded;
 }
 
 const OatFile* OatFileAssistant::OatFileInfo::GetFile() {
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 3a838d7..bed1edc 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -48,25 +48,33 @@
 class OatFileAssistant {
  public:
   enum DexOptNeeded {
-    // kNoDexOptNeeded - The code for this dex location is up to date and can
-    // be used as is.
+    // No dexopt should (or can) be done to update the apk/jar.
     // Matches Java: dalvik.system.DexFile.NO_DEXOPT_NEEDED = 0
     kNoDexOptNeeded = 0,
 
-    // kDex2OatNeeded - In order to make the code for this dex location up to
-    // date, dex2oat must be run on the dex file.
-    // Matches Java: dalvik.system.DexFile.DEX2OAT_NEEDED = 1
-    kDex2OatNeeded = 1,
+    // dex2oat should be run to update the apk/jar from scratch.
+    // Matches Java: dalvik.system.DexFile.DEX2OAT_FROM_SCRATCH = 1
+    kDex2OatFromScratch = 1,
 
-    // kPatchOatNeeded - In order to make the code for this dex location up to
-    // date, patchoat must be run on the odex file.
-    // Matches Java: dalvik.system.DexFile.PATCHOAT_NEEDED = 2
-    kPatchOatNeeded = 2,
+    // dex2oat should be run to update the apk/jar because the existing code
+    // is out of date with respect to the boot image.
+    // Matches Java: dalvik.system.DexFile.DEX2OAT_FOR_BOOT_IMAGE
+    kDex2OatForBootImage = 2,
 
-    // kSelfPatchOatNeeded - In order to make the code for this dex location
-    // up to date, patchoat must be run on the oat file.
-    // Matches Java: dalvik.system.DexFile.SELF_PATCHOAT_NEEDED = 3
-    kSelfPatchOatNeeded = 3,
+    // dex2oat should be run to update the apk/jar because the existing code
+    // is out of date with respect to the target compiler filter.
+    // Matches Java: dalvik.system.DexFile.DEX2OAT_FOR_FILTER
+    kDex2OatForFilter = 3,
+
+    // dex2oat should be run to update the apk/jar because the existing code
+    // is not relocated to match the boot image and does not have the
+    // necessary patch information to use patchoat.
+    // Matches Java: dalvik.system.DexFile.DEX2OAT_FOR_RELOCATION
+    kDex2OatForRelocation = 4,
+
+    // patchoat should be run to update the apk/jar.
+    // Matches Java: dalvik.system.DexFile.PATCHOAT_FOR_RELOCATION
+    kPatchoatForRelocation = 5,
   };
 
   enum OatStatus {
@@ -149,8 +157,10 @@
   // dex location that is at least as good as an oat file generated with the
   // given compiler filter. profile_changed should be true to indicate the
   // profile has recently changed for this dex location.
-  DexOptNeeded GetDexOptNeeded(CompilerFilter::Filter target_compiler_filter,
-                               bool profile_changed = false);
+  // Returns a positive status code if the status refers to the oat file in
+  // the oat location. Returns a negative status code if the status refers to
+  // the oat file in the odex location.
+  int GetDexOptNeeded(CompilerFilter::Filter target_compiler_filter, bool profile_changed = false);
 
   // Returns true if there is up-to-date code for this dex location,
   // irrespective of the compiler filter of the up-to-date code.
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 1848255..5730cf2 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -306,13 +306,13 @@
 
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
 
-  EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+  EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kVerifyAtRuntime));
-  EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+  EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kInterpretOnly));
-  EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+  EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeedProfile));
-  EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+  EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
@@ -356,7 +356,7 @@
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kInterpretOnly));
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kVerifyAtRuntime));
-  EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+  EXPECT_EQ(OatFileAssistant::kDex2OatForFilter,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kEverything));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
@@ -380,7 +380,7 @@
 
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
 
-  EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+  EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
@@ -402,9 +402,9 @@
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeedProfile, false));
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kInterpretOnly, false));
-  EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+  EXPECT_EQ(OatFileAssistant::kDex2OatForFilter,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeedProfile, true));
-  EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+  EXPECT_EQ(OatFileAssistant::kDex2OatForFilter,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kInterpretOnly, true));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
@@ -448,7 +448,7 @@
   Copy(GetMultiDexSrc2(), dex_location);
 
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
-  EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+  EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed, false));
   EXPECT_TRUE(oat_file_assistant.HasOriginalDexFiles());
 }
@@ -497,9 +497,9 @@
   Copy(GetDexSrc2(), dex_location);
 
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
-  EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+  EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kVerifyAtRuntime));
-  EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+  EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
@@ -522,11 +522,11 @@
                      /*with_alternate_image*/true);
 
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
-  EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+  EXPECT_EQ(OatFileAssistant::kDex2OatForBootImage,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kVerifyAtRuntime));
-  EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+  EXPECT_EQ(OatFileAssistant::kDex2OatForBootImage,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kInterpretOnly));
-  EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+  EXPECT_EQ(OatFileAssistant::kDex2OatForBootImage,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
@@ -553,7 +553,7 @@
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kVerifyAtRuntime));
-  EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+  EXPECT_EQ(OatFileAssistant::kDex2OatForFilter,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kInterpretOnly));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
@@ -577,7 +577,7 @@
 
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kVerifyAtRuntime));
-  EXPECT_EQ(OatFileAssistant::kPatchOatNeeded,
+  EXPECT_EQ(-OatFileAssistant::kPatchoatForRelocation,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
@@ -606,7 +606,7 @@
   // Verify the status.
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
 
-  EXPECT_EQ(OatFileAssistant::kPatchOatNeeded,
+  EXPECT_EQ(-OatFileAssistant::kPatchoatForRelocation,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
@@ -659,7 +659,7 @@
 
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kVerifyAtRuntime));
-  EXPECT_EQ(OatFileAssistant::kPatchOatNeeded,
+  EXPECT_EQ(-OatFileAssistant::kPatchoatForRelocation,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,  // Can't run dex2oat because dex file is stripped.
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kEverything));
@@ -747,9 +747,9 @@
 
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kInterpretOnly));
-  EXPECT_EQ(OatFileAssistant::kSelfPatchOatNeeded,
+  EXPECT_EQ(OatFileAssistant::kPatchoatForRelocation,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
-  EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+  EXPECT_EQ(OatFileAssistant::kDex2OatForFilter,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kEverything));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
@@ -793,7 +793,7 @@
   OatFileAssistant oat_file_assistant(dex_location.c_str(),
       oat_location.c_str(), kRuntimeISA, true);
 
-  EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+  EXPECT_EQ(OatFileAssistant::kDex2OatForRelocation,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
 
   // Make the oat file up to date.
@@ -832,10 +832,10 @@
   OatFileAssistant oat_file_assistant(dex_location.c_str(),
       oat_location.c_str(), kRuntimeISA, true);
 
-  // kSelfPatchOatNeeded is expected rather than kPatchOatNeeded based on the
-  // assumption that the oat location is more up-to-date than the odex
+  // kPatchoatForRelocation is expected rather than -kPatchoatForRelocation
+  // based on the assumption that the oat location is more up-to-date than the odex
   // location, even if they both need relocation.
-  EXPECT_EQ(OatFileAssistant::kSelfPatchOatNeeded,
+  EXPECT_EQ(OatFileAssistant::kPatchoatForRelocation,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
@@ -868,7 +868,7 @@
 
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
-  EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+  EXPECT_EQ(-OatFileAssistant::kDex2OatForFilter,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kEverything));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
@@ -892,7 +892,7 @@
 
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kVerifyAtRuntime));
-  EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+  EXPECT_EQ(-OatFileAssistant::kDex2OatForFilter,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
@@ -1070,7 +1070,7 @@
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
-  EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+  EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
   EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
   EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus());
@@ -1106,7 +1106,7 @@
 
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
 
-  EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+  EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
@@ -1241,7 +1241,7 @@
       oat_file_assistant.MakeUpToDate(false, &error_msg)) << error_msg;
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kInterpretOnly));
-  EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+  EXPECT_EQ(OatFileAssistant::kDex2OatForFilter,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
 
   Runtime::Current()->AddCompilerOption("--compiler-filter=speed");
@@ -1278,6 +1278,15 @@
 // Verify the dexopt status values from dalvik.system.DexFile
 // match the OatFileAssistant::DexOptStatus values.
 TEST_F(OatFileAssistantTest, DexOptStatusValues) {
+  std::pair<OatFileAssistant::DexOptNeeded, const char*> mapping[] = {
+    {OatFileAssistant::kNoDexOptNeeded, "NO_DEXOPT_NEEDED"},
+    {OatFileAssistant::kDex2OatFromScratch, "DEX2OAT_FROM_SCRATCH"},
+    {OatFileAssistant::kDex2OatForBootImage, "DEX2OAT_FOR_BOOT_IMAGE"},
+    {OatFileAssistant::kDex2OatForFilter, "DEX2OAT_FOR_FILTER"},
+    {OatFileAssistant::kDex2OatForRelocation, "DEX2OAT_FOR_RELOCATION"},
+    {OatFileAssistant::kPatchoatForRelocation, "PATCHOAT_FOR_RELOCATION"}
+  };
+
   ScopedObjectAccess soa(Thread::Current());
   StackHandleScope<1> hs(soa.Self());
   ClassLinker* linker = Runtime::Current()->GetClassLinker();
@@ -1286,29 +1295,13 @@
   ASSERT_FALSE(dexfile.Get() == nullptr);
   linker->EnsureInitialized(soa.Self(), dexfile, true, true);
 
-  ArtField* no_dexopt_needed = mirror::Class::FindStaticField(
-      soa.Self(), dexfile, "NO_DEXOPT_NEEDED", "I");
-  ASSERT_FALSE(no_dexopt_needed == nullptr);
-  EXPECT_EQ(no_dexopt_needed->GetTypeAsPrimitiveType(), Primitive::kPrimInt);
-  EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded, no_dexopt_needed->GetInt(dexfile.Get()));
-
-  ArtField* dex2oat_needed = mirror::Class::FindStaticField(
-      soa.Self(), dexfile, "DEX2OAT_NEEDED", "I");
-  ASSERT_FALSE(dex2oat_needed == nullptr);
-  EXPECT_EQ(dex2oat_needed->GetTypeAsPrimitiveType(), Primitive::kPrimInt);
-  EXPECT_EQ(OatFileAssistant::kDex2OatNeeded, dex2oat_needed->GetInt(dexfile.Get()));
-
-  ArtField* patchoat_needed = mirror::Class::FindStaticField(
-      soa.Self(), dexfile, "PATCHOAT_NEEDED", "I");
-  ASSERT_FALSE(patchoat_needed == nullptr);
-  EXPECT_EQ(patchoat_needed->GetTypeAsPrimitiveType(), Primitive::kPrimInt);
-  EXPECT_EQ(OatFileAssistant::kPatchOatNeeded, patchoat_needed->GetInt(dexfile.Get()));
-
-  ArtField* self_patchoat_needed = mirror::Class::FindStaticField(
-      soa.Self(), dexfile, "SELF_PATCHOAT_NEEDED", "I");
-  ASSERT_FALSE(self_patchoat_needed == nullptr);
-  EXPECT_EQ(self_patchoat_needed->GetTypeAsPrimitiveType(), Primitive::kPrimInt);
-  EXPECT_EQ(OatFileAssistant::kSelfPatchOatNeeded, self_patchoat_needed->GetInt(dexfile.Get()));
+  for (std::pair<OatFileAssistant::DexOptNeeded, const char*> field : mapping) {
+    ArtField* art_field = mirror::Class::FindStaticField(
+        soa.Self(), dexfile, field.second, "I");
+    ASSERT_FALSE(art_field == nullptr);
+    EXPECT_EQ(art_field->GetTypeAsPrimitiveType(), Primitive::kPrimInt);
+    EXPECT_EQ(field.first, art_field->GetInt(dexfile.Get()));
+  }
 }
 
 // TODO: More Tests:
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 8a3bac7..ee4d669 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -2183,7 +2183,7 @@
 
 NO_RETURN
 void Runtime::Aborter(const char* abort_message) {
-#ifdef __ANDROID__
+#ifdef ART_TARGET_ANDROID
   android_set_abort_message(abort_message);
 #endif
   Runtime::Abort(abort_message);
diff --git a/test/538-checker-embed-constants/src/Main.java b/test/538-checker-embed-constants/src/Main.java
index 02c609e..6b25747 100644
--- a/test/538-checker-embed-constants/src/Main.java
+++ b/test/538-checker-embed-constants/src/Main.java
@@ -30,7 +30,7 @@
 
   /// CHECK-START-ARM: int Main.and255(int) disassembly (after)
   /// CHECK-NOT:            movs {{r\d+}}, #255
-  /// CHECK:                and {{r\d+}}, {{r\d+}}, #255
+  /// CHECK:                and {{r\d+}}, {{r\d+}}, #0xff
 
   public static int and255(int arg) {
     return arg & 255;
@@ -46,7 +46,7 @@
 
   /// CHECK-START-ARM: int Main.andNot15(int) disassembly (after)
   /// CHECK-NOT:            mvn {{r\d+}}, #15
-  /// CHECK:                bic {{r\d+}}, {{r\d+}}, #15
+  /// CHECK:                bic {{r\d+}}, {{r\d+}}, #0xf
 
   public static int andNot15(int arg) {
     return arg & ~15;
@@ -54,7 +54,7 @@
 
   /// CHECK-START-ARM: int Main.or255(int) disassembly (after)
   /// CHECK-NOT:            movs {{r\d+}}, #255
-  /// CHECK:                orr {{r\d+}}, {{r\d+}}, #255
+  /// CHECK:                orr {{r\d+}}, {{r\d+}}, #0xff
 
   public static int or255(int arg) {
     return arg | 255;
@@ -70,7 +70,7 @@
 
   /// CHECK-START-ARM: int Main.orNot15(int) disassembly (after)
   /// CHECK-NOT:            mvn {{r\d+}}, #15
-  /// CHECK:                orn {{r\d+}}, {{r\d+}}, #15
+  /// CHECK:                orn {{r\d+}}, {{r\d+}}, #0xf
 
   public static int orNot15(int arg) {
     return arg | ~15;
@@ -78,7 +78,7 @@
 
   /// CHECK-START-ARM: int Main.xor255(int) disassembly (after)
   /// CHECK-NOT:            movs {{r\d+}}, #255
-  /// CHECK:                eor {{r\d+}}, {{r\d+}}, #255
+  /// CHECK:                eor {{r\d+}}, {{r\d+}}, #0xff
 
   public static int xor255(int arg) {
     return arg ^ 255;
@@ -104,7 +104,7 @@
   /// CHECK-NOT:            movs {{r\d+}}, #255
   /// CHECK-NOT:            and{{(\.w)?}}
   /// CHECK-NOT:            bic{{(\.w)?}}
-  /// CHECK-DAG:            and {{r\d+}}, {{r\d+}}, #255
+  /// CHECK-DAG:            and {{r\d+}}, {{r\d+}}, #0xff
   /// CHECK-DAG:            movs {{r\d+}}, #0
   /// CHECK-NOT:            and{{(\.w)?}}
   /// CHECK-NOT:            bic{{(\.w)?}}
@@ -131,7 +131,7 @@
   /// CHECK-NOT:            mvn {{r\d+}}, #15
   /// CHECK-NOT:            and{{(\.w)?}}
   /// CHECK-NOT:            bic{{(\.w)?}}
-  /// CHECK:                bic {{r\d+}}, {{r\d+}}, #15
+  /// CHECK:                bic {{r\d+}}, {{r\d+}}, #0xf
   /// CHECK-NOT:            and{{(\.w)?}}
   /// CHECK-NOT:            bic{{(\.w)?}}
 
@@ -144,8 +144,8 @@
   /// CHECK-NOT:            mvn {{r\d+}}, #15
   /// CHECK-NOT:            and{{(\.w)?}}
   /// CHECK-NOT:            bic{{(\.w)?}}
-  /// CHECK-DAG:            and {{r\d+}}, {{r\d+}}, #15
-  /// CHECK-DAG:            bic {{r\d+}}, {{r\d+}}, #15
+  /// CHECK-DAG:            and {{r\d+}}, {{r\d+}}, #0xf
+  /// CHECK-DAG:            bic {{r\d+}}, {{r\d+}}, #0xf
   /// CHECK-NOT:            and{{(\.w)?}}
   /// CHECK-NOT:            bic{{(\.w)?}}
 
@@ -157,7 +157,7 @@
   /// CHECK-NOT:            movs {{r\d+}}, #255
   /// CHECK-NOT:            orr{{(\.w)?}}
   /// CHECK-NOT:            orn
-  /// CHECK:                orr {{r\d+}}, {{r\d+}}, #255
+  /// CHECK:                orr {{r\d+}}, {{r\d+}}, #0xff
   /// CHECK-NOT:            orr{{(\.w)?}}
   /// CHECK-NOT:            orn
 
@@ -183,7 +183,7 @@
   /// CHECK-NOT:            mvn {{r\d+}}, #15
   /// CHECK-NOT:            orr{{(\.w)?}}
   /// CHECK-NOT:            orn
-  /// CHECK-DAG:            orn {{r\d+}}, {{r\d+}}, #15
+  /// CHECK-DAG:            orn {{r\d+}}, {{r\d+}}, #0xf
   /// CHECK-DAG:            mvn {{r\d+}}, #0
   /// CHECK-NOT:            orr{{(\.w)?}}
   /// CHECK-NOT:            orn
@@ -197,8 +197,8 @@
   /// CHECK-NOT:            mvn {{r\d+}}, #15
   /// CHECK-NOT:            orr{{(\.w)?}}
   /// CHECK-NOT:            orn
-  /// CHECK-DAG:            orr {{r\d+}}, {{r\d+}}, #15
-  /// CHECK-DAG:            orn {{r\d+}}, {{r\d+}}, #15
+  /// CHECK-DAG:            orr {{r\d+}}, {{r\d+}}, #0xf
+  /// CHECK-DAG:            orn {{r\d+}}, {{r\d+}}, #0xf
   /// CHECK-NOT:            orr{{(\.w)?}}
   /// CHECK-NOT:            orn
 
@@ -209,7 +209,7 @@
   /// CHECK-START-ARM: long Main.xor255(long) disassembly (after)
   /// CHECK-NOT:            movs {{r\d+}}, #255
   /// CHECK-NOT:            eor{{(\.w)?}}
-  /// CHECK:                eor {{r\d+}}, {{r\d+}}, #255
+  /// CHECK:                eor {{r\d+}}, {{r\d+}}, #0xff
   /// CHECK-NOT:            eor{{(\.w)?}}
 
   public static long xor255(long arg) {
@@ -257,8 +257,8 @@
   /// CHECK-NOT:            movs {{r\d+}}, #15
   /// CHECK-NOT:            mov.w {{r\d+}}, #-268435456
   /// CHECK-NOT:            eor{{(\.w)?}}
-  /// CHECK-DAG:            eor {{r\d+}}, {{r\d+}}, #15
-  /// CHECK-DAG:            eor {{r\d+}}, {{r\d+}}, #4026531840
+  /// CHECK-DAG:            eor {{r\d+}}, {{r\d+}}, #0xf
+  /// CHECK-DAG:            eor {{r\d+}}, {{r\d+}}, #0xf0000000
   /// CHECK-NOT:            eor{{(\.w)?}}
 
   public static long xor0xf00000000000000f(long arg) {
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 554f66d..96b984d 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -608,10 +608,7 @@
 TEST_ART_BROKEN_INTERPRETER_READ_BARRIER_RUN_TESTS :=
 
 # Tests that should fail in the read barrier configuration with the Optimizing compiler (AOT).
-# 484: Baker's fast path based read barrier compiler instrumentation generates code containing
-#      more parallel moves on x86, thus some Checker assertions may fail.
-TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS := \
-  484-checker-register-hints
+TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS :=
 
 # Tests that should fail in the read barrier configuration with JIT (Optimizing compiler).
 TEST_ART_BROKEN_JIT_READ_BARRIER_RUN_TESTS :=
diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk
index 27c2054..493eafb 100644
--- a/tools/ahat/Android.mk
+++ b/tools/ahat/Android.mk
@@ -48,7 +48,7 @@
 include $(CLEAR_VARS)
 LOCAL_SRC_FILES := $(call all-java-files-under, test)
 LOCAL_JAR_MANIFEST := test/manifest.txt
-LOCAL_STATIC_JAVA_LIBRARIES := ahat junit
+LOCAL_STATIC_JAVA_LIBRARIES := ahat junit-host
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MODULE_TAGS := tests
 LOCAL_MODULE := ahat-tests
diff --git a/tools/dexfuzz/README b/tools/dexfuzz/README
index a0658ec..c1cdf1e 100644
--- a/tools/dexfuzz/README
+++ b/tools/dexfuzz/README
@@ -4,7 +4,7 @@
 DexFuzz is primarily a tool for fuzzing DEX files. Fuzzing is the introduction of
 subtle changes ("mutations") to a file to produce a new test case. These test cases
 can be used to test the various modes of execution available to ART (Interpreter,
-Quick compiler, Optimizing compiler) to check for bugs in these modes of execution.
+Optimizing compiler) to check for bugs in these modes of execution.
 This is done by differential testing - each test file is executed with each mode of
 execution, and any differences between the resulting outputs may be an indication of
 a bug in one of the modes.
@@ -53,17 +53,16 @@
 
 And also at least two of the following backends:
   --interpreter
-  --quick
   --optimizing
 
 Note that if you wanted to test both ARM and ARM64 on an ARM64 device, you can use
 --allarm. Also in this case only one backend is needed, if i.e., you wanted to test
-ARM Quick Backend vs. ARM64 Quick Backend.
+ARM Optimizing Backend vs. ARM64 Optimizing Backend.
 
 Some legal examples:
-  --arm --quick --optimizing
-  --x86 --quick --optimizing --interpreter
-  --allarm --quick
+  --arm --optimizing --interpreter
+  --x86 --optimizing --interpreter
+  --allarm --optimizing
 
 Add in --device=<device name, e.g. device:generic> if you want to specify a device.
 Add in --execute-dir=<dir on device> if you want to specify an execution directory.
@@ -98,7 +97,6 @@
              those occurrences.
 Timed Out  - mutated files that timed out for one or more backends.
              Current timeouts are:
-               Quick - 5 seconds
                Optimizing - 5 seconds
                Intepreter - 30 seconds
               (use --short-timeouts to set all backends to 2 seconds.)
diff --git a/tools/dexfuzz/src/dexfuzz/Options.java b/tools/dexfuzz/src/dexfuzz/Options.java
index b442b22..7d5476d 100644
--- a/tools/dexfuzz/src/dexfuzz/Options.java
+++ b/tools/dexfuzz/src/dexfuzz/Options.java
@@ -61,7 +61,6 @@
   public static boolean executeOnHost;
   public static boolean noBootImage;
   public static boolean useInterpreter;
-  public static boolean useQuick;
   public static boolean useOptimizing;
   public static boolean useArchArm;
   public static boolean useArchArm64;
@@ -101,7 +100,6 @@
     Log.always("    --execute-class=<c>  : When executing, execute this class (default: Main)");
     Log.always("");
     Log.always("    --interpreter        : Include the Interpreter in comparisons");
-    Log.always("    --quick              : Include the Quick Compiler in comparisons");
     Log.always("    --optimizing         : Include the Optimizing Compiler in comparisons");
     Log.always("");
     Log.always("    --arm                : Include ARM backends in comparisons");
@@ -160,8 +158,6 @@
       skipHostVerify = true;
     } else if (flag.equals("interpreter")) {
       useInterpreter = true;
-    } else if (flag.equals("quick")) {
-      useQuick = true;
     } else if (flag.equals("optimizing")) {
       useOptimizing = true;
     } else if (flag.equals("arm")) {
@@ -423,18 +419,15 @@
       if (useInterpreter) {
         backends++;
       }
-      if (useQuick) {
-        backends++;
-      }
       if (useOptimizing) {
         backends++;
       }
       if (useArchArm && useArchArm64) {
-        // Could just be comparing quick-ARM versus quick-ARM64?
+        // Could just be comparing optimizing-ARM versus optimizing-ARM64?
         backends++;
       }
       if (backends < 2) {
-        Log.error("Not enough backends specified! Try --quick --interpreter!");
+        Log.error("Not enough backends specified! Try --optimizing --interpreter!");
         return false;
       }
     }
diff --git a/tools/dexfuzz/src/dexfuzz/executors/Arm64OptimizingBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/Arm64OptimizingBackendExecutor.java
index 72e36e8..84ed4c4 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/Arm64OptimizingBackendExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/Arm64OptimizingBackendExecutor.java
@@ -29,6 +29,9 @@
   protected String constructCommand(String programName) {
     StringBuilder commandBuilder = new StringBuilder();
     commandBuilder.append("dalvikvm64 -Xcompiler-option --compiler-backend=Optimizing ");
+    // The -Xno-dex-file-fallback option ensures that the execution does not default to
+    // interpreter if compilations fails.
+    commandBuilder.append("-Xno-dex-file-fallback ");
     if (device.noBootImageAvailable()) {
       commandBuilder.append("-Ximage:/data/art-test/core.art -Xnorelocate ");
     }
diff --git a/tools/dexfuzz/src/dexfuzz/executors/Arm64QuickBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/Arm64QuickBackendExecutor.java
deleted file mode 100644
index d9228ed..0000000
--- a/tools/dexfuzz/src/dexfuzz/executors/Arm64QuickBackendExecutor.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package dexfuzz.executors;
-
-import dexfuzz.listeners.BaseListener;
-
-public class Arm64QuickBackendExecutor extends Executor {
-
-  public Arm64QuickBackendExecutor(BaseListener listener, Device device) {
-    super("ARM64 Quick Backend", 5, listener, Architecture.ARM64, device,
-        /*needsCleanCodeCache*/ true, /*isBisectable*/ false);
-  }
-
-  @Override
-  protected String constructCommand(String programName) {
-    StringBuilder commandBuilder = new StringBuilder();
-    commandBuilder.append("dalvikvm64 -Xcompiler-option --compiler-backend=Quick ");
-    if (device.noBootImageAvailable()) {
-      commandBuilder.append("-Ximage:/data/art-test/core.art -Xnorelocate ");
-    }
-    commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
-    commandBuilder.append(executeClass);
-    return commandBuilder.toString();
-  }
-}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/ArmOptimizingBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/ArmOptimizingBackendExecutor.java
index ded8cf9..26a5eea 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/ArmOptimizingBackendExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/ArmOptimizingBackendExecutor.java
@@ -29,6 +29,9 @@
   protected String constructCommand(String programName) {
     StringBuilder commandBuilder = new StringBuilder();
     commandBuilder.append("dalvikvm32 -Xcompiler-option --compiler-backend=Optimizing ");
+    // The -Xno-dex-file-fallback option ensures that the execution does not default to
+    // interpreter if compilations fails.
+    commandBuilder.append("-Xno-dex-file-fallback ");
     if (device.noBootImageAvailable()) {
       commandBuilder.append("-Ximage:/data/art-test/core.art -Xnorelocate ");
     }
diff --git a/tools/dexfuzz/src/dexfuzz/executors/ArmQuickBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/ArmQuickBackendExecutor.java
deleted file mode 100644
index 0eb35f7..0000000
--- a/tools/dexfuzz/src/dexfuzz/executors/ArmQuickBackendExecutor.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package dexfuzz.executors;
-
-import dexfuzz.listeners.BaseListener;
-
-public class ArmQuickBackendExecutor extends Executor {
-
-  public ArmQuickBackendExecutor(BaseListener listener, Device device) {
-    super("ARM Quick Backend", 5, listener, Architecture.ARM, device,
-        /*needsCleanCodeCache*/ true, /*isBisectable*/ false);
-  }
-
-  @Override
-  protected String constructCommand(String programName) {
-    StringBuilder commandBuilder = new StringBuilder();
-    commandBuilder.append("dalvikvm32 -Xcompiler-option --compiler-backend=Quick ");
-    if (device.noBootImageAvailable()) {
-      commandBuilder.append("-Ximage:/data/art-test/core.art -Xnorelocate ");
-    }
-    commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
-    commandBuilder.append(executeClass);
-    return commandBuilder.toString();
-  }
-}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/Mips64OptimizingBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/Mips64OptimizingBackendExecutor.java
index 72d43e7..883ff2a 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/Mips64OptimizingBackendExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/Mips64OptimizingBackendExecutor.java
@@ -29,6 +29,9 @@
   protected String constructCommand(String programName) {
     StringBuilder commandBuilder = new StringBuilder();
     commandBuilder.append("dalvikvm64 -Xcompiler-option --compiler-backend=Optimizing ");
+    // The -Xno-dex-file-fallback option ensures that the execution does not default to
+    // interpreter if compilations fails.
+    commandBuilder.append("-Xno-dex-file-fallback ");
     commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
     commandBuilder.append(executeClass);
     return commandBuilder.toString();
diff --git a/tools/dexfuzz/src/dexfuzz/executors/Mips64QuickBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/Mips64QuickBackendExecutor.java
deleted file mode 100644
index e7e5ff6..0000000
--- a/tools/dexfuzz/src/dexfuzz/executors/Mips64QuickBackendExecutor.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package dexfuzz.executors;
-
-import dexfuzz.listeners.BaseListener;
-
-public class Mips64QuickBackendExecutor extends Executor {
-
-  public Mips64QuickBackendExecutor(BaseListener listener, Device device) {
-    super("MIPS64 Quick Backend", 5, listener, Architecture.MIPS64, device,
-        /*needsCleanCodeCache*/ true, /*isBisectable*/ false);
-  }
-
-  @Override
-  protected String constructCommand(String programName) {
-    StringBuilder commandBuilder = new StringBuilder();
-    commandBuilder.append("dalvikvm64 -Xcompiler-option --compiler-backend=Quick ");
-    commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
-    commandBuilder.append(executeClass);
-    return commandBuilder.toString();
-  }
-}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/MipsOptimizingBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/MipsOptimizingBackendExecutor.java
index 63f6858..b7babdc 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/MipsOptimizingBackendExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/MipsOptimizingBackendExecutor.java
@@ -29,6 +29,9 @@
   protected String constructCommand(String programName) {
     StringBuilder commandBuilder = new StringBuilder();
     commandBuilder.append("dalvikvm32 -Xcompiler-option --compiler-backend=Optimizing ");
+    // The -Xno-dex-file-fallback option ensures that the execution does not default to
+    // interpreter if compilations fails.
+    commandBuilder.append("-Xno-dex-file-fallback ");
     commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
     commandBuilder.append(executeClass);
     return commandBuilder.toString();
diff --git a/tools/dexfuzz/src/dexfuzz/executors/MipsQuickBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/MipsQuickBackendExecutor.java
deleted file mode 100644
index b262090..0000000
--- a/tools/dexfuzz/src/dexfuzz/executors/MipsQuickBackendExecutor.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package dexfuzz.executors;
-
-import dexfuzz.listeners.BaseListener;
-
-public class MipsQuickBackendExecutor extends Executor {
-
-  public MipsQuickBackendExecutor(BaseListener listener, Device device) {
-    super("MIPS Quick Backend", 5, listener, Architecture.MIPS, device,
-        /*needsCleanCodeCache*/ true, /*isBisectable*/ false);
-  }
-
-  @Override
-  protected String constructCommand(String programName) {
-    StringBuilder commandBuilder = new StringBuilder();
-    commandBuilder.append("dalvikvm32 -Xcompiler-option --compiler-backend=Quick ");
-    commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
-    commandBuilder.append(executeClass);
-    return commandBuilder.toString();
-  }
-}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/X86OptimizingBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/X86OptimizingBackendExecutor.java
index 5908a8b..1d62051 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/X86OptimizingBackendExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/X86OptimizingBackendExecutor.java
@@ -30,6 +30,9 @@
   protected String constructCommand(String programName) {
     StringBuilder commandBuilder = new StringBuilder();
     commandBuilder.append("dalvikvm32 -Xcompiler-option --compiler-backend=Optimizing ");
+    // The -Xno-dex-file-fallback option ensures that the execution does not default to
+    // interpreter if compilations fails.
+    commandBuilder.append("-Xno-dex-file-fallback ");
     if (Options.executeOnHost) {
       commandBuilder.append(device.getHostExecutionFlags()).append(" ");
     }
diff --git a/tools/dexfuzz/src/dexfuzz/executors/X86QuickBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/X86QuickBackendExecutor.java
deleted file mode 100644
index 9e8039d..0000000
--- a/tools/dexfuzz/src/dexfuzz/executors/X86QuickBackendExecutor.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package dexfuzz.executors;
-
-import dexfuzz.Options;
-import dexfuzz.listeners.BaseListener;
-
-public class X86QuickBackendExecutor extends Executor {
-
-  public X86QuickBackendExecutor(BaseListener listener, Device device) {
-    super("x86 Quick Backend", 5, listener, Architecture.X86, device,
-        /*needsCleanCodeCache*/ true, /*isBisectable*/ false);
-  }
-
-  @Override
-  protected String constructCommand(String programName) {
-    StringBuilder commandBuilder = new StringBuilder();
-    commandBuilder.append("dalvikvm32 -Xcompiler-option --compiler-backend=Quick ");
-    if (Options.executeOnHost) {
-      commandBuilder.append(device.getHostExecutionFlags()).append(" ");
-    }
-    commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
-    commandBuilder.append(executeClass);
-    return commandBuilder.toString();
-  }
-}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/X86_64OptimizingBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/X86_64OptimizingBackendExecutor.java
index 28ff1a5..ad44259 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/X86_64OptimizingBackendExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/X86_64OptimizingBackendExecutor.java
@@ -29,6 +29,9 @@
   protected String constructCommand(String programName) {
     StringBuilder commandBuilder = new StringBuilder();
     commandBuilder.append("dalvikvm64 -Xcompiler-option --compiler-backend=Optimizing ");
+    // The -Xno-dex-file-fallback option ensures that the execution does not default to
+    // interpreter if compilations fails.
+    commandBuilder.append("-Xno-dex-file-fallback ");
     commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
     commandBuilder.append(executeClass);
     return commandBuilder.toString();
diff --git a/tools/dexfuzz/src/dexfuzz/executors/X86_64QuickBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/X86_64QuickBackendExecutor.java
deleted file mode 100644
index 22cafe2..0000000
--- a/tools/dexfuzz/src/dexfuzz/executors/X86_64QuickBackendExecutor.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package dexfuzz.executors;
-
-import dexfuzz.listeners.BaseListener;
-
-public class X86_64QuickBackendExecutor extends Executor {
-
-  public X86_64QuickBackendExecutor(BaseListener listener, Device device) {
-    super("x86_64 Quick Backend", 5, listener, Architecture.X86_64, device,
-        /*needsCleanCodeCache*/ true, /*isBisectable*/ false);
-  }
-
-  @Override
-  protected String constructCommand(String programName) {
-    StringBuilder commandBuilder = new StringBuilder();
-    commandBuilder.append("dalvikvm64 -Xcompiler-option --compiler-backend=Quick ");
-    commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
-    commandBuilder.append(executeClass);
-    return commandBuilder.toString();
-  }
-}
diff --git a/tools/dexfuzz/src/dexfuzz/fuzzers/Fuzzer.java b/tools/dexfuzz/src/dexfuzz/fuzzers/Fuzzer.java
index bc39d79..1797d90 100644
--- a/tools/dexfuzz/src/dexfuzz/fuzzers/Fuzzer.java
+++ b/tools/dexfuzz/src/dexfuzz/fuzzers/Fuzzer.java
@@ -22,24 +22,18 @@
 import dexfuzz.executors.Architecture;
 import dexfuzz.executors.Arm64InterpreterExecutor;
 import dexfuzz.executors.Arm64OptimizingBackendExecutor;
-import dexfuzz.executors.Arm64QuickBackendExecutor;
 import dexfuzz.executors.ArmInterpreterExecutor;
 import dexfuzz.executors.ArmOptimizingBackendExecutor;
-import dexfuzz.executors.ArmQuickBackendExecutor;
 import dexfuzz.executors.Device;
 import dexfuzz.executors.Executor;
 import dexfuzz.executors.Mips64InterpreterExecutor;
 import dexfuzz.executors.Mips64OptimizingBackendExecutor;
-import dexfuzz.executors.Mips64QuickBackendExecutor;
 import dexfuzz.executors.MipsInterpreterExecutor;
 import dexfuzz.executors.MipsOptimizingBackendExecutor;
-import dexfuzz.executors.MipsQuickBackendExecutor;
 import dexfuzz.executors.X86InterpreterExecutor;
 import dexfuzz.executors.X86OptimizingBackendExecutor;
-import dexfuzz.executors.X86QuickBackendExecutor;
 import dexfuzz.executors.X86_64InterpreterExecutor;
 import dexfuzz.executors.X86_64OptimizingBackendExecutor;
-import dexfuzz.executors.X86_64QuickBackendExecutor;
 import dexfuzz.listeners.BaseListener;
 import dexfuzz.program.Mutation;
 import dexfuzz.program.Program;
@@ -121,18 +115,13 @@
     }
   }
 
-  private void addExecutorsForArchitecture(Device device, Class<? extends Executor> quick,
-      Class<? extends Executor> optimizing, Class<? extends Executor> interpreter) {
-    // NB: Currently QuickBackend MUST come immediately before same arch's Interpreter.
+  private void addExecutorsForArchitecture(Device device, Class<? extends Executor> optimizing,
+      Class<? extends Executor> interpreter) {
+    // NB: Currently OptimizingBackend MUST come immediately before same arch's Interpreter.
     // This is because intepreter execution relies on there being an OAT file already
     // created to produce correct debug information. Otherwise we will see
     // false-positive divergences.
     try {
-      if (Options.useQuick) {
-        Constructor<? extends Executor> constructor =
-            quick.getConstructor(BaseListener.class, Device.class);
-        executors.add(constructor.newInstance(listener, device));
-      }
       if (Options.useOptimizing) {
         Constructor<? extends Executor> constructor =
             optimizing.getConstructor(BaseListener.class, Device.class);
@@ -165,33 +154,33 @@
     }
 
     if (Options.useArchArm64) {
-      addExecutorsForArchitecture(device, Arm64QuickBackendExecutor.class,
-          Arm64OptimizingBackendExecutor.class, Arm64InterpreterExecutor.class);
+      addExecutorsForArchitecture(device, Arm64OptimizingBackendExecutor.class,
+          Arm64InterpreterExecutor.class);
     }
 
     if (Options.useArchArm) {
-      addExecutorsForArchitecture(device, ArmQuickBackendExecutor.class,
-          ArmOptimizingBackendExecutor.class, ArmInterpreterExecutor.class);
+      addExecutorsForArchitecture(device, ArmOptimizingBackendExecutor.class,
+          ArmInterpreterExecutor.class);
     }
 
     if (Options.useArchX86_64) {
-      addExecutorsForArchitecture(device, X86_64QuickBackendExecutor.class,
-          X86_64OptimizingBackendExecutor.class, X86_64InterpreterExecutor.class);
+      addExecutorsForArchitecture(device, X86_64OptimizingBackendExecutor.class,
+          X86_64InterpreterExecutor.class);
     }
 
     if (Options.useArchX86) {
-      addExecutorsForArchitecture(device, X86QuickBackendExecutor.class,
-          X86OptimizingBackendExecutor.class, X86InterpreterExecutor.class);
+      addExecutorsForArchitecture(device, X86OptimizingBackendExecutor.class,
+          X86InterpreterExecutor.class);
     }
 
     if (Options.useArchMips64) {
-      addExecutorsForArchitecture(device, Mips64QuickBackendExecutor.class,
-          Mips64OptimizingBackendExecutor.class, Mips64InterpreterExecutor.class);
+      addExecutorsForArchitecture(device, Mips64OptimizingBackendExecutor.class,
+          Mips64InterpreterExecutor.class);
     }
 
     if (Options.useArchMips) {
-      addExecutorsForArchitecture(device, MipsQuickBackendExecutor.class,
-          MipsOptimizingBackendExecutor.class, MipsInterpreterExecutor.class);
+      addExecutorsForArchitecture(device, MipsOptimizingBackendExecutor.class,
+          MipsInterpreterExecutor.class);
     }
 
     // Add the first backend as the golden executor for self-divergence tests.