Merge "Optimizing: Fix register allocator validation memory usage."
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index c306cf9..76e6bbd 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -1475,6 +1475,311 @@
                     Thread::PeerOffset<kMipsPointerSize>().Int32Value());
 }
 
+static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
+  bool can_call =
+       invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
+       invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile;
+  LocationSummary* locations = new (arena) LocationSummary(invoke,
+                                                           can_call ?
+                                                               LocationSummary::kCallOnSlowPath :
+                                                               LocationSummary::kNoCall,
+                                                           kIntrinsified);
+  locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetInAt(2, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+static void GenUnsafeGet(HInvoke* invoke,
+                         Primitive::Type type,
+                         bool is_volatile,
+                         bool is_R6,
+                         CodeGeneratorMIPS* codegen) {
+  LocationSummary* locations = invoke->GetLocations();
+  DCHECK((type == Primitive::kPrimInt) ||
+         (type == Primitive::kPrimLong) ||
+         (type == Primitive::kPrimNot)) << type;
+  MipsAssembler* assembler = codegen->GetAssembler();
+  // Object pointer.
+  Register base = locations->InAt(1).AsRegister<Register>();
+  // The "offset" argument is passed as a "long". Since this code is for
+  // a 32-bit processor, we can only use 32-bit addresses, so we only
+  // need the low 32-bits of offset.
+  Register offset_lo = invoke->GetLocations()->InAt(2).AsRegisterPairLow<Register>();
+
+  __ Addu(TMP, base, offset_lo);
+  if (is_volatile) {
+    __ Sync(0);
+  }
+  if (type == Primitive::kPrimLong) {
+    Register trg_lo = locations->Out().AsRegisterPairLow<Register>();
+    Register trg_hi = locations->Out().AsRegisterPairHigh<Register>();
+
+    if (is_R6) {
+      __ Lw(trg_lo, TMP, 0);
+      __ Lw(trg_hi, TMP, 4);
+    } else {
+      __ Lwr(trg_lo, TMP, 0);
+      __ Lwl(trg_lo, TMP, 3);
+      __ Lwr(trg_hi, TMP, 4);
+      __ Lwl(trg_hi, TMP, 7);
+    }
+  } else {
+    Register trg = locations->Out().AsRegister<Register>();
+
+    if (is_R6) {
+      __ Lw(trg, TMP, 0);
+    } else {
+      __ Lwr(trg, TMP, 0);
+      __ Lwl(trg, TMP, 3);
+    }
+  }
+}
+
+// int sun.misc.Unsafe.getInt(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafeGet(HInvoke* invoke) {
+  CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafeGet(HInvoke* invoke) {
+  GenUnsafeGet(invoke, Primitive::kPrimInt, /* is_volatile */ false, IsR6(), codegen_);
+}
+
+// int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) {
+  CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) {
+  GenUnsafeGet(invoke, Primitive::kPrimInt, /* is_volatile */ true, IsR6(), codegen_);
+}
+
+// long sun.misc.Unsafe.getLong(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetLong(HInvoke* invoke) {
+  CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetLong(HInvoke* invoke) {
+  GenUnsafeGet(invoke, Primitive::kPrimLong, /* is_volatile */ false, IsR6(), codegen_);
+}
+
+// long sun.misc.Unsafe.getLongVolatile(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
+  CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
+  GenUnsafeGet(invoke, Primitive::kPrimLong, /* is_volatile */ true, IsR6(), codegen_);
+}
+
+// Object sun.misc.Unsafe.getObject(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObject(HInvoke* invoke) {
+  CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObject(HInvoke* invoke) {
+  GenUnsafeGet(invoke, Primitive::kPrimNot, /* is_volatile */ false, IsR6(), codegen_);
+}
+
+// Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
+  CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
+  GenUnsafeGet(invoke, Primitive::kPrimNot, /* is_volatile */ true, IsR6(), codegen_);
+}
+
+static void CreateIntIntIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
+  LocationSummary* locations = new (arena) LocationSummary(invoke,
+                                                           LocationSummary::kNoCall,
+                                                           kIntrinsified);
+  locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetInAt(2, Location::RequiresRegister());
+  locations->SetInAt(3, Location::RequiresRegister());
+}
+
+static void GenUnsafePut(LocationSummary* locations,
+                         Primitive::Type type,
+                         bool is_volatile,
+                         bool is_ordered,
+                         bool is_R6,
+                         CodeGeneratorMIPS* codegen) {
+  DCHECK((type == Primitive::kPrimInt) ||
+         (type == Primitive::kPrimLong) ||
+         (type == Primitive::kPrimNot)) << type;
+  MipsAssembler* assembler = codegen->GetAssembler();
+  // Object pointer.
+  Register base = locations->InAt(1).AsRegister<Register>();
+  // The "offset" argument is passed as a "long", i.e., it's 64-bits in
+  // size. Since this code is for a 32-bit processor, we can only use
+  // 32-bit addresses, so we only need the low 32-bits of offset.
+  Register offset_lo = locations->InAt(2).AsRegisterPairLow<Register>();
+
+  __ Addu(TMP, base, offset_lo);
+  if (is_volatile || is_ordered) {
+    __ Sync(0);
+  }
+  if ((type == Primitive::kPrimInt) || (type == Primitive::kPrimNot)) {
+    Register value = locations->InAt(3).AsRegister<Register>();
+
+    if (is_R6) {
+      __ Sw(value, TMP, 0);
+    } else {
+      __ Swr(value, TMP, 0);
+      __ Swl(value, TMP, 3);
+    }
+  } else {
+    Register value_lo = locations->InAt(3).AsRegisterPairLow<Register>();
+    Register value_hi = locations->InAt(3).AsRegisterPairHigh<Register>();
+
+    if (is_R6) {
+      __ Sw(value_lo, TMP, 0);
+      __ Sw(value_hi, TMP, 4);
+    } else {
+      __ Swr(value_lo, TMP, 0);
+      __ Swl(value_lo, TMP, 3);
+      __ Swr(value_hi, TMP, 4);
+      __ Swl(value_hi, TMP, 7);
+    }
+  }
+
+  if (is_volatile) {
+    __ Sync(0);
+  }
+
+  if (type == Primitive::kPrimNot) {
+    codegen->MarkGCCard(base, locations->InAt(3).AsRegister<Register>());
+  }
+}
+
+// void sun.misc.Unsafe.putInt(Object o, long offset, int x)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafePut(HInvoke* invoke) {
+  CreateIntIntIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafePut(HInvoke* invoke) {
+  GenUnsafePut(invoke->GetLocations(),
+               Primitive::kPrimInt,
+               /* is_volatile */ false,
+               /* is_ordered */ false,
+               IsR6(),
+               codegen_);
+}
+
+// void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafePutOrdered(HInvoke* invoke) {
+  CreateIntIntIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafePutOrdered(HInvoke* invoke) {
+  GenUnsafePut(invoke->GetLocations(),
+               Primitive::kPrimInt,
+               /* is_volatile */ false,
+               /* is_ordered */ true,
+               IsR6(),
+               codegen_);
+}
+
+// void sun.misc.Unsafe.putIntVolatile(Object o, long offset, int x)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafePutVolatile(HInvoke* invoke) {
+  CreateIntIntIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafePutVolatile(HInvoke* invoke) {
+  GenUnsafePut(invoke->GetLocations(),
+               Primitive::kPrimInt,
+               /* is_volatile */ true,
+               /* is_ordered */ false,
+               IsR6(),
+               codegen_);
+}
+
+// void sun.misc.Unsafe.putObject(Object o, long offset, Object x)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObject(HInvoke* invoke) {
+  CreateIntIntIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObject(HInvoke* invoke) {
+  GenUnsafePut(invoke->GetLocations(),
+               Primitive::kPrimNot,
+               /* is_volatile */ false,
+               /* is_ordered */ false,
+               IsR6(),
+               codegen_);
+}
+
+// void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
+  CreateIntIntIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
+  GenUnsafePut(invoke->GetLocations(),
+               Primitive::kPrimNot,
+               /* is_volatile */ false,
+               /* is_ordered */ true,
+               IsR6(),
+               codegen_);
+}
+
+// void sun.misc.Unsafe.putObjectVolatile(Object o, long offset, Object x)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
+  CreateIntIntIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
+  GenUnsafePut(invoke->GetLocations(),
+               Primitive::kPrimNot,
+               /* is_volatile */ true,
+               /* is_ordered */ false,
+               IsR6(),
+               codegen_);
+}
+
+// void sun.misc.Unsafe.putLong(Object o, long offset, long x)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLong(HInvoke* invoke) {
+  CreateIntIntIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLong(HInvoke* invoke) {
+  GenUnsafePut(invoke->GetLocations(),
+               Primitive::kPrimLong,
+               /* is_volatile */ false,
+               /* is_ordered */ false,
+               IsR6(),
+               codegen_);
+}
+
+// void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) {
+  CreateIntIntIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) {
+  GenUnsafePut(invoke->GetLocations(),
+               Primitive::kPrimLong,
+               /* is_volatile */ false,
+               /* is_ordered */ true,
+               IsR6(),
+               codegen_);
+}
+
+// void sun.misc.Unsafe.putLongVolatile(Object o, long offset, long x)
+void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLongVolatile(HInvoke* invoke) {
+  CreateIntIntIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLongVolatile(HInvoke* invoke) {
+  GenUnsafePut(invoke->GetLocations(),
+               Primitive::kPrimLong,
+               /* is_volatile */ true,
+               /* is_ordered */ false,
+               IsR6(),
+               codegen_);
+}
+
 // char java.lang.String.charAt(int index)
 void IntrinsicLocationsBuilderMIPS::VisitStringCharAt(HInvoke* invoke) {
   LocationSummary* locations = new (arena_) LocationSummary(invoke,
@@ -1482,7 +1787,7 @@
                                                             kIntrinsified);
   locations->SetInAt(0, Location::RequiresRegister());
   locations->SetInAt(1, Location::RequiresRegister());
-  // The inputs will be considered live at the last instruction and restored. This will overwrite
+  // The inputs will be considered live at the last instruction and restored. This would overwrite
   // the output with kNoOutputOverlap.
   locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
 }
@@ -2042,21 +2347,6 @@
 UNIMPLEMENTED_INTRINSIC(MIPS, MathRint)
 UNIMPLEMENTED_INTRINSIC(MIPS, MathRoundDouble)
 UNIMPLEMENTED_INTRINSIC(MIPS, MathRoundFloat)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGet)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetVolatile)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetLong)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetLongVolatile)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetObject)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetObjectVolatile)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePut)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutOrdered)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutVolatile)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutObject)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutObjectOrdered)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutObjectVolatile)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutLong)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutLongOrdered)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutLongVolatile)
 UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASInt)
 UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASLong)
 UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASObject)
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index e2ef7ac..2b43dfb 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -7557,34 +7557,6 @@
   return descriptor;
 }
 
-bool ClassLinker::MayBeCalledWithDirectCodePointer(ArtMethod* m) {
-  Runtime* const runtime = Runtime::Current();
-  if (runtime->UseJit()) {
-    // JIT can have direct code pointers from any method to any other method.
-    return true;
-  }
-  // Non-image methods don't use direct code pointer.
-  if (!m->GetDeclaringClass()->IsBootStrapClassLoaded()) {
-    return false;
-  }
-  if (m->IsPrivate()) {
-    // The method can only be called inside its own oat file. Therefore it won't be called using
-    // its direct code if the oat file has been compiled in PIC mode.
-    const DexFile& dex_file = m->GetDeclaringClass()->GetDexFile();
-    const OatFile::OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
-    if (oat_dex_file == nullptr) {
-      // No oat file: the method has not been compiled.
-      return false;
-    }
-    const OatFile* oat_file = oat_dex_file->GetOatFile();
-    return oat_file != nullptr && !oat_file->IsPic();
-  } else {
-    // The method can be called outside its own oat file. Therefore it won't be called using its
-    // direct code pointer only if all loaded oat files have been compiled in PIC mode.
-    return runtime->GetOatFileManager().HaveNonPicOatFile();
-  }
-}
-
 jobject ClassLinker::CreatePathClassLoader(Thread* self, std::vector<const DexFile*>& dex_files) {
   // SOAAlreadyRunnable is protected, and we need something to add a global reference.
   // We could move the jobject to the callers, but all call-sites do this...
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 36ed820..c368a3a 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -549,10 +549,6 @@
       REQUIRES(!Locks::classlinker_classes_lock_)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
-  // Returns true if the method can be called with its direct code pointer, false otherwise.
-  bool MayBeCalledWithDirectCodePointer(ArtMethod* m)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
-
   // Creates a GlobalRef PathClassLoader that can be used to load classes from the given dex files.
   // Note: the objects are not completely set up. Do not use this outside of tests and the compiler.
   jobject CreatePathClassLoader(Thread* self, std::vector<const DexFile*>& dex_files)
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index c375bba..109e03d 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -44,7 +44,6 @@
 #include "mirror/object_array-inl.h"
 #include "mirror/string-inl.h"
 #include "mirror/throwable.h"
-#include "quick/inline_method_analyser.h"
 #include "reflection.h"
 #include "safe_map.h"
 #include "scoped_thread_state_change.h"
@@ -53,7 +52,6 @@
 #include "handle_scope-inl.h"
 #include "thread_list.h"
 #include "utf.h"
-#include "verifier/method_verifier-inl.h"
 #include "well_known_classes.h"
 
 namespace art {
@@ -3239,27 +3237,6 @@
   CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
 }
 
-static bool IsMethodPossiblyInlined(Thread* self, ArtMethod* m)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
-  const DexFile::CodeItem* code_item = m->GetCodeItem();
-  if (code_item == nullptr) {
-    // TODO We should not be asked to watch location in a native or abstract method so the code item
-    // should never be null. We could just check we never encounter this case.
-    return false;
-  }
-  // Note: method verifier may cause thread suspension.
-  self->AssertThreadSuspensionIsAllowable();
-  StackHandleScope<2> hs(self);
-  mirror::Class* declaring_class = m->GetDeclaringClass();
-  Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
-  Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
-  verifier::MethodVerifier verifier(self, dex_cache->GetDexFile(), dex_cache, class_loader,
-                                    &m->GetClassDef(), code_item, m->GetDexMethodIndex(), m,
-                                    m->GetAccessFlags(), false, true, false, true);
-  // Note: we don't need to verify the method.
-  return InlineMethodAnalyser::AnalyseMethodCode(&verifier, nullptr);
-}
-
 static const Breakpoint* FindFirstBreakpointForMethod(ArtMethod* m)
     SHARED_REQUIRES(Locks::mutator_lock_, Locks::breakpoint_lock_) {
   for (Breakpoint& breakpoint : gBreakpoints) {
@@ -3322,33 +3299,22 @@
   }
 
   if (first_breakpoint == nullptr) {
-    // There is no breakpoint on this method yet: we need to deoptimize. If this method may be
-    // inlined or default, we deoptimize everything; otherwise we deoptimize only this method. We
+    // There is no breakpoint on this method yet: we need to deoptimize. If this method is default,
+    // we deoptimize everything; otherwise we deoptimize only this method. We
     // deoptimize with defaults because we do not know everywhere they are used. It is possible some
-    // of the copies could be inlined or otherwise missed.
+    // of the copies could be missed.
     // TODO Deoptimizing on default methods might not be necessary in all cases.
-    // Note: IsMethodPossiblyInlined goes into the method verifier and may cause thread suspension.
-    // Therefore we must not hold any lock when we call it.
-    bool need_full_deoptimization = m->IsDefault() || IsMethodPossiblyInlined(self, m);
+    bool need_full_deoptimization = m->IsDefault();
     if (need_full_deoptimization) {
-      VLOG(jdwp) << "Need full deoptimization because of possible inlining or copying of method "
+      VLOG(jdwp) << "Need full deoptimization because of copying of method "
                  << PrettyMethod(m);
       return DeoptimizationRequest::kFullDeoptimization;
     } else {
       // We don't need to deoptimize if the method has not been compiled.
       const bool is_compiled = m->HasAnyCompiledCode();
       if (is_compiled) {
-        ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
-        // If the method may be called through its direct code pointer (without loading
-        // its updated entrypoint), we need full deoptimization to not miss the breakpoint.
-        if (class_linker->MayBeCalledWithDirectCodePointer(m)) {
-          VLOG(jdwp) << "Need full deoptimization because of possible direct code call "
-                     << "into image for compiled method " << PrettyMethod(m);
-          return DeoptimizationRequest::kFullDeoptimization;
-        } else {
-          VLOG(jdwp) << "Need selective deoptimization for compiled method " << PrettyMethod(m);
-          return DeoptimizationRequest::kSelectiveDeoptimization;
-        }
+        VLOG(jdwp) << "Need selective deoptimization for compiled method " << PrettyMethod(m);
+        return DeoptimizationRequest::kSelectiveDeoptimization;
       } else {
         // Method is not compiled: we don't need to deoptimize.
         VLOG(jdwp) << "No need for deoptimization for non-compiled method " << PrettyMethod(m);
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 7319045..e3714bb 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -288,6 +288,12 @@
     records->new_record_condition_.WaitHoldingLocks(self);
   }
 
+  if (!heap->IsAllocTrackingEnabled()) {
+    // Return if the allocation tracking has been disabled while waiting for system weak access
+    // above.
+    return;
+  }
+
   DCHECK_LE(records->Size(), records->alloc_record_max_);
 
   // Get stack trace.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index f4fccee..4ff0c6b 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3960,31 +3960,31 @@
 
 void Heap::AllowNewAllocationRecords() const {
   CHECK(!kUseReadBarrier);
-  if (IsAllocTrackingEnabled()) {
-    MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
-    if (IsAllocTrackingEnabled()) {
-      GetAllocationRecords()->AllowNewAllocationRecords();
-    }
+  MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
+  AllocRecordObjectMap* allocation_records = GetAllocationRecords();
+  if (allocation_records != nullptr) {
+    allocation_records->AllowNewAllocationRecords();
   }
 }
 
 void Heap::DisallowNewAllocationRecords() const {
   CHECK(!kUseReadBarrier);
-  if (IsAllocTrackingEnabled()) {
-    MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
-    if (IsAllocTrackingEnabled()) {
-      GetAllocationRecords()->DisallowNewAllocationRecords();
-    }
+  MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
+  AllocRecordObjectMap* allocation_records = GetAllocationRecords();
+  if (allocation_records != nullptr) {
+    allocation_records->DisallowNewAllocationRecords();
   }
 }
 
 void Heap::BroadcastForNewAllocationRecords() const {
   CHECK(kUseReadBarrier);
-  if (IsAllocTrackingEnabled()) {
-    MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
-    if (IsAllocTrackingEnabled()) {
-      GetAllocationRecords()->BroadcastForNewAllocationRecords();
-    }
+  // Always broadcast without checking IsAllocTrackingEnabled() because IsAllocTrackingEnabled() may
+  // be set to false while some threads are waiting for system weak access in
+  // AllocRecordObjectMap::RecordAllocation() and we may fail to wake them up. b/27467554.
+  MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
+  AllocRecordObjectMap* allocation_records = GetAllocationRecords();
+  if (allocation_records != nullptr) {
+    allocation_records->BroadcastForNewAllocationRecords();
   }
 }
 
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index c681ed7..344fcb9 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -366,7 +366,8 @@
     if (osr) {
       number_of_osr_compilations_++;
       osr_code_map_.Put(method, code_ptr);
-    } else {
+    } else if (!Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()) {
+      // TODO(ngeoffray): Clean up instrumentation and code cache interactions.
       Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
           method, method_header->GetEntryPoint());
     }
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 2176444..2a7cd07 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -561,6 +561,7 @@
 
   // Read every page from the high address to the low.
   volatile uint8_t dont_optimize_this;
+  UNUSED(dont_optimize_this);
   for (uint8_t* p = stack_top; p >= pregion; p -= kPageSize) {
     dont_optimize_this = *p;
   }
diff --git a/test/551-checker-shifter-operand/build b/test/551-checker-shifter-operand/build
index 18e8c59..a78021f 100644
--- a/test/551-checker-shifter-operand/build
+++ b/test/551-checker-shifter-operand/build
@@ -58,8 +58,8 @@
 
 # Setup experimental flag mappings in a bash associative array.
 declare -A JACK_EXPERIMENTAL_ARGS
-JACK_EXPERIMENTAL_ARGS["default-methods"]="-D jack.java.source.version=1.8"
-JACK_EXPERIMENTAL_ARGS["lambdas"]="-D jack.java.source.version=1.8"
+JACK_EXPERIMENTAL_ARGS["default-methods"]="-D jack.java.source.version=1.8 -D jack.android.min-api-level=24"
+JACK_EXPERIMENTAL_ARGS["lambdas"]="-D jack.java.source.version=1.8 -D jack.android.min-api-level=24"
 
 while true; do
   if [ "x$1" = "x--dx-option" ]; then
diff --git a/test/970-iface-super-resolution-generated/build b/test/970-iface-super-resolution-generated/build
index 2d9830b..fd1b271 100755
--- a/test/970-iface-super-resolution-generated/build
+++ b/test/970-iface-super-resolution-generated/build
@@ -31,7 +31,7 @@
 if [[ $@ == *"--jvm"* ]]; then
   USES_JAVA_SOURCE="true"
 elif [[ "$USE_JACK" == "true" ]]; then
-  if $JACK -D jack.java.source.version=1.8 2>/dev/null; then
+  if $JACK -D jack.java.source.version=1.8 -D jack.android.min-api-level=24 2>/dev/null; then
     USES_JAVA_SOURCE="true"
   else
     echo "WARNING: Cannot use jack because it does not support JLS 1.8. Falling back to smali" >&2
diff --git a/test/etc/default-build b/test/etc/default-build
index d048757..3d84821 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -66,8 +66,8 @@
 
 # Setup experimental flag mappings in a bash associative array.
 declare -A JACK_EXPERIMENTAL_ARGS
-JACK_EXPERIMENTAL_ARGS["default-methods"]="-D jack.java.source.version=1.8"
-JACK_EXPERIMENTAL_ARGS["lambdas"]="-D jack.java.source.version=1.8"
+JACK_EXPERIMENTAL_ARGS["default-methods"]="-D jack.java.source.version=1.8 -D jack.android.min-api-level=24"
+JACK_EXPERIMENTAL_ARGS["lambdas"]="-D jack.java.source.version=1.8 -D jack.android.min-api-level=24"
 
 while true; do
   if [ "x$1" = "x--dx-option" ]; then