Revert^4 "Initial support for adding virtuals with structural redefinition"
This reverts commit 2f8c1ac61b0c611d67badea70261c851ed19b82a.
If there were pending tasks to jit compile a method which is made
obsolete the JIT would CHECK fail, since the newly obsolete method is
marked DontCompile. This didn't happen with non-structural
redefinition since in that case the 'valid' ArtMethod always remains
the same.
To fix this we just have the JitTask check if the method it's
compiling is compilable and fail if it's not.
Reason for revert: Fixed JIT check failure.
Test: ./test.py --host
Bug: 134162467
Bug: 144168550
Bug: 144729319
Change-Id: Ib867b2de13bb4c2978b4538a5851c647caf0e1cc
diff --git a/runtime/base/locks.h b/runtime/base/locks.h
index 4b85df0..c3518f3 100644
--- a/runtime/base/locks.h
+++ b/runtime/base/locks.h
@@ -129,6 +129,9 @@
kMutatorLock,
kInstrumentEntrypointsLock,
+ // This is a generic lock level for a top-level lock meant to be gained after having the
+ // UserCodeSuspensionLock.
+ kPostUserCodeSuspensionTopLevelLock,
kUserCodeSuspensionLock,
kZygoteCreationLock,
diff --git a/runtime/gc/allocation_listener.h b/runtime/gc/allocation_listener.h
index a578252..376b524 100644
--- a/runtime/gc/allocation_listener.h
+++ b/runtime/gc/allocation_listener.h
@@ -23,11 +23,13 @@
#include "base/locks.h"
#include "base/macros.h"
#include "gc_root.h"
+#include "handle.h"
#include "obj_ptr.h"
namespace art {
namespace mirror {
+class Class;
class Object;
} // namespace mirror
@@ -39,6 +41,26 @@
public:
virtual ~AllocationListener() {}
+ // An event to allow a listener to intercept and modify an allocation before it takes place.
+ // The listener can change the byte_count and type as they see fit. Extreme caution should be used
+ // when doing so. This can also be used to control allocation occurring on another thread.
+ //
+ // Concurrency guarantees: This might be called multiple times for each single allocation. It's
+ // guaranteed that, between the final call to the callback and the object being visible to
+ // heap-walks there are no suspensions. If a suspension was allowed between these events the
+ // callback will be invoked again after passing the suspend point.
+ //
+ // If the alloc succeeds it is guaranteed there are no suspend-points between the last return of
+ // PreObjectAlloc and the newly allocated object being visible to heap-walks.
+ //
+ // This can also be used to make any last-minute changes to the type or size of the allocation.
+ virtual void PreObjectAllocated(Thread* self ATTRIBUTE_UNUSED,
+ MutableHandle<mirror::Class> type ATTRIBUTE_UNUSED,
+ size_t* byte_count ATTRIBUTE_UNUSED)
+ REQUIRES(!Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_) {}
+ // Fast check if we want to get the PreObjectAllocated callback, to avoid the expense of creating
+ // handles. Defaults to false.
+ virtual bool HasPreAlloc() const { return false; }
virtual void ObjectAllocated(Thread* self, ObjPtr<mirror::Object>* obj, size_t byte_count)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
};
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index c1b3a63..04632ef 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -65,10 +65,30 @@
HandleWrapperObjPtr<mirror::Class> h = hs.NewHandleWrapper(&klass);
self->PoisonObjectPointers();
}
+ auto send_pre_object_allocated = [&]() REQUIRES_SHARED(Locks::mutator_lock_)
+ ACQUIRE(Roles::uninterruptible_) {
+ if constexpr (kInstrumented) {
+ AllocationListener* l = nullptr;
+ l = alloc_listener_.load(std::memory_order_seq_cst);
+ if (UNLIKELY(l != nullptr) && UNLIKELY(l->HasPreAlloc())) {
+ StackHandleScope<1> hs(self);
+ HandleWrapperObjPtr<mirror::Class> h_klass(hs.NewHandleWrapper(&klass));
+ l->PreObjectAllocated(self, h_klass, &byte_count);
+ }
+ }
+ return self->StartAssertNoThreadSuspension("Called PreObjectAllocated, no suspend until alloc");
+ };
+ // Do the initial pre-alloc
+ const char* old_cause = send_pre_object_allocated();
+ // We shouldn't have any NoThreadSuspension here!
+ DCHECK(old_cause == nullptr) << old_cause;
+
// Need to check that we aren't the large object allocator since the large object allocation code
// path includes this function. If we didn't check we would have an infinite loop.
ObjPtr<mirror::Object> obj;
if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
+ // AllocLargeObject can suspend and will recall PreObjectAllocated if needed.
+ self->EndAssertNoThreadSuspension(old_cause);
obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count,
pre_fence_visitor);
if (obj != nullptr) {
@@ -80,6 +100,8 @@
// If the large object allocation failed, try to use the normal spaces (main space,
// non moving space). This can happen if there is significant virtual address space
// fragmentation.
+ // We need to send the PreObjectAllocated again, we might have suspended during our failure.
+ old_cause = send_pre_object_allocated();
}
// bytes allocated for the (individual) object.
size_t bytes_allocated;
@@ -100,6 +122,7 @@
usable_size = bytes_allocated;
no_suspend_pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
+ self->EndAssertNoThreadSuspension(old_cause);
} else if (
!kInstrumented && allocator == kAllocatorTypeRosAlloc &&
(obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) != nullptr &&
@@ -112,6 +135,7 @@
usable_size = bytes_allocated;
no_suspend_pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
+ self->EndAssertNoThreadSuspension(old_cause);
} else {
// Bytes allocated that includes bulk thread-local buffer allocations in addition to direct
// non-TLAB object allocations.
@@ -121,14 +145,19 @@
if (UNLIKELY(obj == nullptr)) {
// AllocateInternalWithGc can cause thread suspension, if someone instruments the entrypoints
// or changes the allocator in a suspend point here, we need to retry the allocation.
+ // It will send the pre-alloc event again.
+ self->EndAssertNoThreadSuspension(old_cause);
obj = AllocateInternalWithGc(self,
allocator,
kInstrumented,
byte_count,
&bytes_allocated,
&usable_size,
- &bytes_tl_bulk_allocated, &klass);
+ &bytes_tl_bulk_allocated,
+ &klass,
+ &old_cause);
if (obj == nullptr) {
+ self->EndAssertNoThreadSuspension(old_cause);
// The only way that we can get a null return if there is no pending exception is if the
// allocator or instrumentation changed.
if (!self->IsExceptionPending()) {
@@ -156,6 +185,7 @@
}
no_suspend_pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
+ self->EndAssertNoThreadSuspension(old_cause);
if (bytes_tl_bulk_allocated > 0) {
size_t num_bytes_allocated_before =
num_bytes_allocated_.fetch_add(bytes_tl_bulk_allocated, std::memory_order_relaxed);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 85b79da..b462e29 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -17,6 +17,7 @@
#include "heap.h"
#include <limits>
+#include "android-base/thread_annotations.h"
#if defined(__BIONIC__) || defined(__GLIBC__)
#include <malloc.h> // For mallinfo()
#endif
@@ -1723,18 +1724,46 @@
size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated,
- ObjPtr<mirror::Class>* klass) {
+ ObjPtr<mirror::Class>* klass,
+ /*out*/const char** old_no_thread_suspend_cause) {
bool was_default_allocator = allocator == GetCurrentAllocator();
// Make sure there is no pending exception since we may need to throw an OOME.
self->AssertNoPendingException();
DCHECK(klass != nullptr);
+
StackHandleScope<1> hs(self);
- HandleWrapperObjPtr<mirror::Class> h(hs.NewHandleWrapper(klass));
+ HandleWrapperObjPtr<mirror::Class> h_klass(hs.NewHandleWrapper(klass));
+
+ auto release_no_suspend = [&]() RELEASE(Roles::uninterruptible_) {
+ self->EndAssertNoThreadSuspension(*old_no_thread_suspend_cause);
+ };
+ auto send_object_pre_alloc = [&]() ACQUIRE(Roles::uninterruptible_)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (UNLIKELY(instrumented)) {
+ AllocationListener* l = nullptr;
+ l = alloc_listener_.load(std::memory_order_seq_cst);
+ if (UNLIKELY(l != nullptr) && UNLIKELY(l->HasPreAlloc())) {
+ l->PreObjectAllocated(self, h_klass, &alloc_size);
+ }
+ }
+ *old_no_thread_suspend_cause =
+ self->StartAssertNoThreadSuspension("Called PreObjectAllocated, no suspend until alloc");
+};
+#define PERFORM_SUSPENDING_OPERATION(op) \
+ [&]() REQUIRES(Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_) { \
+ release_no_suspend(); \
+ auto res = (op); \
+ send_object_pre_alloc(); \
+ return res; \
+ }()
+
// The allocation failed. If the GC is running, block until it completes, and then retry the
// allocation.
collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
// If we were the default allocator but the allocator changed while we were suspended,
// abort the allocation.
+ // We just waited, call the pre-alloc again.
+ send_object_pre_alloc();
if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
(!instrumented && EntrypointsInstrumented())) {
return nullptr;
@@ -1749,8 +1778,9 @@
}
collector::GcType tried_type = next_gc_type_;
- const bool gc_ran =
- CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
+ const bool gc_ran = PERFORM_SUSPENDING_OPERATION(
+ CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone);
+
if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
(!instrumented && EntrypointsInstrumented())) {
return nullptr;
@@ -1769,8 +1799,8 @@
continue;
}
// Attempt to run the collector, if we succeed, re-try the allocation.
- const bool plan_gc_ran =
- CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
+ const bool plan_gc_ran = PERFORM_SUSPENDING_OPERATION(
+ CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone);
if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
(!instrumented && EntrypointsInstrumented())) {
return nullptr;
@@ -1800,7 +1830,7 @@
// TODO: Run finalization, but this may cause more allocations to occur.
// We don't need a WaitForGcToComplete here either.
DCHECK(!gc_plan_.empty());
- CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
+ PERFORM_SUSPENDING_OPERATION(CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true));
if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
(!instrumented && EntrypointsInstrumented())) {
return nullptr;
@@ -1817,7 +1847,8 @@
current_time - last_time_homogeneous_space_compaction_by_oom_ >
min_interval_homogeneous_space_compaction_by_oom_) {
last_time_homogeneous_space_compaction_by_oom_ = current_time;
- HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
+ HomogeneousSpaceCompactResult result =
+ PERFORM_SUSPENDING_OPERATION(PerformHomogeneousSpaceCompact());
// Thread suspension could have occurred.
if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
(!instrumented && EntrypointsInstrumented())) {
@@ -1862,9 +1893,13 @@
}
}
}
+#undef PERFORM_SUSPENDING_OPERATION
// If the allocation hasn't succeeded by this point, throw an OOM error.
if (ptr == nullptr) {
+ release_no_suspend();
ThrowOutOfMemoryError(self, alloc_size, allocator);
+ *old_no_thread_suspend_cause =
+ self->StartAssertNoThreadSuspension("Failed allocation fallback");
}
return ptr;
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 9ef6af5..6f6cfd1 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -1011,8 +1011,10 @@
size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated,
- ObjPtr<mirror::Class>* klass)
+ ObjPtr<mirror::Class>* klass,
+ /*out*/const char** old_no_thread_suspend_cause)
REQUIRES(!Locks::thread_suspend_count_lock_, !*gc_complete_lock_, !*pending_task_lock_)
+ ACQUIRE(Roles::uninterruptible_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Allocate into a specific space.
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index f69d786..8c7d657 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -291,6 +291,15 @@
return false;
}
+ if (!method->IsCompilable()) {
+ DCHECK(method->GetDeclaringClass()->IsObsoleteObject() ||
+ method->IsProxyMethod()) << method->PrettyMethod();
+ VLOG(jit) << "JIT not compiling " << method->PrettyMethod() << " due to method being made "
+ << "obsolete while waiting for JIT task to run. This probably happened due to "
+ << "concurrent structural class redefinition.";
+ return false;
+ }
+
// Don't compile the method if we are supposed to be deoptimized.
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (instrumentation->AreAllMethodsDeoptimized() || instrumentation->IsDeoptimized(method)) {
diff --git a/runtime/offsets.h b/runtime/offsets.h
index 6d1a8e0..2f36fe6 100644
--- a/runtime/offsets.h
+++ b/runtime/offsets.h
@@ -37,6 +37,9 @@
constexpr size_t SizeValue() const {
return val_;
}
+ constexpr bool operator==(Offset o) const {
+ return SizeValue() == o.SizeValue();
+ }
protected:
size_t val_;