Revert^4 "Initial support for adding virtuals with structural redefinition"

This reverts commit 2f8c1ac61b0c611d67badea70261c851ed19b82a.

If there were pending tasks to jit compile a method which is made
obsolete the JIT would CHECK fail, since the newly obsolete method is
marked DontCompile. This didn't happen with non-structural
redefinition since in that case the 'valid' ArtMethod always remains
the same.

To fix this we just have the JitTask check if the method it's
compiling is compilable and fail if it's not.

Reason for revert: Fixed JIT check failure.

Test: ./test.py --host
Bug: 134162467
Bug: 144168550
Bug: 144729319
Change-Id: Ib867b2de13bb4c2978b4538a5851c647caf0e1cc
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 85b79da..b462e29 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -17,6 +17,7 @@
 #include "heap.h"
 
 #include <limits>
+#include "android-base/thread_annotations.h"
 #if defined(__BIONIC__) || defined(__GLIBC__)
 #include <malloc.h>  // For mallinfo()
 #endif
@@ -1723,18 +1724,46 @@
                                              size_t* bytes_allocated,
                                              size_t* usable_size,
                                              size_t* bytes_tl_bulk_allocated,
-                                             ObjPtr<mirror::Class>* klass) {
+                                             ObjPtr<mirror::Class>* klass,
+                                             /*out*/const char** old_no_thread_suspend_cause) {
   bool was_default_allocator = allocator == GetCurrentAllocator();
   // Make sure there is no pending exception since we may need to throw an OOME.
   self->AssertNoPendingException();
   DCHECK(klass != nullptr);
+
   StackHandleScope<1> hs(self);
-  HandleWrapperObjPtr<mirror::Class> h(hs.NewHandleWrapper(klass));
+  HandleWrapperObjPtr<mirror::Class> h_klass(hs.NewHandleWrapper(klass));
+
+  auto release_no_suspend = [&]() RELEASE(Roles::uninterruptible_) {
+    self->EndAssertNoThreadSuspension(*old_no_thread_suspend_cause);
+  };
+  auto send_object_pre_alloc = [&]() ACQUIRE(Roles::uninterruptible_)
+                                     REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (UNLIKELY(instrumented)) {
+    AllocationListener* l = nullptr;
+    l = alloc_listener_.load(std::memory_order_seq_cst);
+    if (UNLIKELY(l != nullptr) && UNLIKELY(l->HasPreAlloc())) {
+      l->PreObjectAllocated(self, h_klass, &alloc_size);
+    }
+  }
+  *old_no_thread_suspend_cause =
+      self->StartAssertNoThreadSuspension("Called PreObjectAllocated, no suspend until alloc");
+};
+#define PERFORM_SUSPENDING_OPERATION(op)                                          \
+  [&]() REQUIRES(Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_) { \
+    release_no_suspend();                                                         \
+    auto res = (op);                                                              \
+    send_object_pre_alloc();                                                      \
+    return res;                                                                   \
+  }()
+
   // The allocation failed. If the GC is running, block until it completes, and then retry the
   // allocation.
   collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
   // If we were the default allocator but the allocator changed while we were suspended,
   // abort the allocation.
+  // We just waited, call the pre-alloc again.
+  send_object_pre_alloc();
   if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
       (!instrumented && EntrypointsInstrumented())) {
     return nullptr;
@@ -1749,8 +1778,9 @@
   }
 
   collector::GcType tried_type = next_gc_type_;
-  const bool gc_ran =
-      CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
+  const bool gc_ran = PERFORM_SUSPENDING_OPERATION(
+      CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone);
+
   if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
       (!instrumented && EntrypointsInstrumented())) {
     return nullptr;
@@ -1769,8 +1799,8 @@
       continue;
     }
     // Attempt to run the collector, if we succeed, re-try the allocation.
-    const bool plan_gc_ran =
-        CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
+    const bool plan_gc_ran = PERFORM_SUSPENDING_OPERATION(
+        CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone);
     if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
         (!instrumented && EntrypointsInstrumented())) {
       return nullptr;
@@ -1800,7 +1830,7 @@
   // TODO: Run finalization, but this may cause more allocations to occur.
   // We don't need a WaitForGcToComplete here either.
   DCHECK(!gc_plan_.empty());
-  CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
+  PERFORM_SUSPENDING_OPERATION(CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true));
   if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
       (!instrumented && EntrypointsInstrumented())) {
     return nullptr;
@@ -1817,7 +1847,8 @@
             current_time - last_time_homogeneous_space_compaction_by_oom_ >
             min_interval_homogeneous_space_compaction_by_oom_) {
           last_time_homogeneous_space_compaction_by_oom_ = current_time;
-          HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
+          HomogeneousSpaceCompactResult result =
+              PERFORM_SUSPENDING_OPERATION(PerformHomogeneousSpaceCompact());
           // Thread suspension could have occurred.
           if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
               (!instrumented && EntrypointsInstrumented())) {
@@ -1862,9 +1893,13 @@
       }
     }
   }
+#undef PERFORM_SUSPENDING_OPERATION
   // If the allocation hasn't succeeded by this point, throw an OOM error.
   if (ptr == nullptr) {
+    release_no_suspend();
     ThrowOutOfMemoryError(self, alloc_size, allocator);
+    *old_no_thread_suspend_cause =
+        self->StartAssertNoThreadSuspension("Failed allocation fallback");
   }
   return ptr;
 }