Compacting collector.
The compacting collector is currently similar to semispace. It works by
copying objects back and forth between two bump pointer spaces. There
are types of objects which are "non-movable" due to current runtime
limitations. These are Classes, Methods, and Fields.
Bump pointer spaces are a new type of continuous alloc space which have
no lock in the allocation code path. When you allocate from these it uses
atomic operations to increase an index. Traversing the objects in the bump
pointer space relies on Object::SizeOf matching the allocated size exactly.
Runtime changes:
JNI::GetArrayElements returns copies objects if you attempt to get the
backing data of a movable array. For GetArrayElementsCritical, we return
direct backing storage for any types of arrays, but temporarily disable
the GC until the critical region is completed.
Added a new runtime call called VisitObjects, this is used in place of
the old pattern which was flushing the allocation stack and walking
the bitmaps.
Changed image writer to be compaction safe and use object monitor word
for forwarding addresses.
Added a bunch of added SIRTs to ClassLinker, MethodLinker, etc..
TODO: Enable switching allocators, compacting on background, etc..
Bug: 8981901
Change-Id: I3c886fd322a6eef2b99388d19a765042ec26ab99
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 9751076..1f6dd69 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -152,7 +152,7 @@
MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
// Check that if we got here we cannot be shutting down (as shutdown should never have started
// while threads are being born).
- CHECK(!runtime->IsShuttingDown());
+ CHECK(!runtime->IsShuttingDownLocked());
self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
Runtime::Current()->EndThreadBirth();
}
@@ -241,7 +241,7 @@
bool thread_start_during_shutdown = false;
{
MutexLock mu(self, *Locks::runtime_shutdown_lock_);
- if (runtime->IsShuttingDown()) {
+ if (runtime->IsShuttingDownLocked()) {
thread_start_during_shutdown = true;
} else {
runtime->StartThreadBirth();
@@ -328,7 +328,7 @@
}
{
MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
- if (runtime->IsShuttingDown()) {
+ if (runtime->IsShuttingDownLocked()) {
LOG(ERROR) << "Thread attaching while runtime is shutting down: " << thread_name;
return NULL;
} else {
@@ -1352,13 +1352,12 @@
*stack_depth = depth;
}
- MethodHelper mh;
for (int32_t i = 0; i < depth; ++i) {
mirror::ObjectArray<mirror::Object>* method_trace =
soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal);
// Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
mirror::ArtMethod* method = down_cast<mirror::ArtMethod*>(method_trace->Get(i));
- mh.ChangeMethod(method);
+ MethodHelper mh(method);
mirror::IntArray* pc_trace = down_cast<mirror::IntArray*>(method_trace->Get(depth));
uint32_t dex_pc = pc_trace->Get(i);
int32_t line_number = mh.GetLineNumFromDexPC(dex_pc);
@@ -1385,11 +1384,8 @@
SirtRef<mirror::String> source_name_object(soa.Self(),
mirror::String::AllocFromModifiedUtf8(soa.Self(),
source_file));
- mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(soa.Self(),
- class_name_object.get(),
- method_name_object.get(),
- source_name_object.get(),
- line_number);
+ mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(
+ soa.Self(), class_name_object, method_name_object, source_name_object, line_number);
if (obj == NULL) {
return NULL;
}
@@ -1437,8 +1433,10 @@
if (throw_location.GetMethod() != NULL) {
cl = throw_location.GetMethod()->GetDeclaringClass()->GetClassLoader();
}
+ SirtRef<mirror::ClassLoader> class_loader(this, cl);
SirtRef<mirror::Class>
- exception_class(this, runtime->GetClassLinker()->FindClass(exception_class_descriptor, cl));
+ exception_class(this, runtime->GetClassLinker()->FindClass(exception_class_descriptor,
+ class_loader));
if (UNLIKELY(exception_class.get() == NULL)) {
CHECK(IsExceptionPending());
LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor);
@@ -1453,6 +1451,12 @@
SirtRef<mirror::Throwable> exception(this,
down_cast<mirror::Throwable*>(exception_class->AllocObject(this)));
+ // If we couldn't allocate the exception, throw the pre-allocated out of memory exception.
+ if (exception.get() == nullptr) {
+ SetException(throw_location, Runtime::Current()->GetPreAllocatedOutOfMemoryError());
+ return;
+ }
+
// Choose an appropriate constructor and set up the arguments.
const char* signature;
SirtRef<mirror::String> msg_string(this, NULL);
@@ -1741,18 +1745,21 @@
return true; // Continue stack walk.
}
- bool HandleDeoptimization(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool HandleDeoptimization(mirror::ArtMethod* m)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
MethodHelper mh(m);
const DexFile::CodeItem* code_item = mh.GetCodeItem();
CHECK(code_item != NULL);
- uint16_t num_regs = code_item->registers_size_;
+ uint16_t num_regs = code_item->registers_size_;
uint32_t dex_pc = GetDexPc();
const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc);
uint32_t new_dex_pc = dex_pc + inst->SizeInCodeUnits();
ShadowFrame* new_frame = ShadowFrame::Create(num_regs, NULL, m, new_dex_pc);
- verifier::MethodVerifier verifier(&mh.GetDexFile(), mh.GetDexCache(), mh.GetClassLoader(),
- &mh.GetClassDef(), code_item,
- m->GetDexMethodIndex(), m, m->GetAccessFlags(), false, true);
+ SirtRef<mirror::DexCache> dex_cache(self_, mh.GetDexCache());
+ SirtRef<mirror::ClassLoader> class_loader(self_, mh.GetClassLoader());
+ verifier::MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader,
+ &mh.GetClassDef(), code_item, m->GetDexMethodIndex(), m,
+ m->GetAccessFlags(), false, true);
verifier.Verify();
std::vector<int32_t> kinds = verifier.DescribeVRegs(dex_pc);
for (uint16_t reg = 0; reg < num_regs; reg++) {
@@ -2088,6 +2095,13 @@
void* const arg_;
};
+void Thread::SetClassLoaderOverride(mirror::ClassLoader* class_loader_override) {
+ if (kIsDebugBuild) {
+ Runtime::Current()->GetHeap()->VerifyObject(class_loader_override);
+ }
+ class_loader_override_ = class_loader_override;
+}
+
void Thread::VisitRoots(RootVisitor* visitor, void* arg) {
if (opeer_ != nullptr) {
opeer_ = visitor(opeer_, arg);
@@ -2115,10 +2129,9 @@
for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
if (frame.this_object_ != nullptr) {
frame.this_object_ = visitor(frame.this_object_, arg);
- DCHECK(frame.this_object_ != nullptr);
}
- frame.method_ = reinterpret_cast<mirror::ArtMethod*>(visitor(frame.method_, arg));
DCHECK(frame.method_ != nullptr);
+ frame.method_ = reinterpret_cast<mirror::ArtMethod*>(visitor(frame.method_, arg));
}
}