Visit class native roots from VisitReferences
Visit class roots when we call Class::VisitReferences instead of in
the class linker. This makes it easier to implement class unloading
since unmarked classes won't have their roots visited by the class
linker.
Bug: 22181835
Change-Id: I63f31e5ebef7b2a0b764b3ba3cb038b3f561b379
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 1a0ee0f..fa0694b 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -151,8 +151,9 @@
void SetObj(mirror::Object* object, mirror::Object* new_value)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
template<typename RootVisitorType>
- void VisitRoots(RootVisitorType& visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ void VisitRoots(RootVisitorType& visitor) NO_THREAD_SAFETY_ANALYSIS;
bool IsVolatile() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccVolatile) != 0;
diff --git a/runtime/art_method.h b/runtime/art_method.h
index a5bd2f0..90352b7 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -447,8 +447,9 @@
bool* has_no_move_exception)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
template<typename RootVisitorType>
- void VisitRoots(RootVisitorType& visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ void VisitRoots(RootVisitorType& visitor) NO_THREAD_SAFETY_ANALYSIS;
const DexFile* GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index fc59d50..6a76bf7 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1252,8 +1252,7 @@
// There is 3 GC cases to handle:
// Non moving concurrent:
// This case is easy to handle since the reference members of ArtMethod and ArtFields are held
- // live by the class and class roots. In this case we probably don't even need to call
- // VisitNativeRoots.
+ // live by the class and class roots.
//
// Moving non-concurrent:
// This case needs to call visit VisitNativeRoots in case the classes or dex cache arrays move.
@@ -1266,23 +1265,14 @@
// marked concurrently and we don't hold the classlinker_classes_lock_ when we do the copy.
for (GcRoot<mirror::Class>& root : class_table_) {
buffered_visitor.VisitRoot(root);
- if ((flags & kVisitRootFlagNonMoving) == 0) {
- // Don't bother visiting ArtField and ArtMethod if kVisitRootFlagNonMoving is set since
- // these roots are all reachable from the class or dex cache.
- root.Read()->VisitNativeRoots(buffered_visitor, image_pointer_size_);
- }
}
// PreZygote classes can't move so we won't need to update fields' declaring classes.
for (GcRoot<mirror::Class>& root : pre_zygote_class_table_) {
buffered_visitor.VisitRoot(root);
- if ((flags & kVisitRootFlagNonMoving) == 0) {
- root.Read()->VisitNativeRoots(buffered_visitor, image_pointer_size_);
- }
}
} else if ((flags & kVisitRootFlagNewRoots) != 0) {
for (auto& root : new_class_roots_) {
mirror::Class* old_ref = root.Read<kWithoutReadBarrier>();
- old_ref->VisitNativeRoots(buffered_visitor, image_pointer_size_);
root.VisitRoot(visitor, RootInfo(kRootStickyClass));
mirror::Class* new_ref = root.Read<kWithoutReadBarrier>();
if (UNLIKELY(new_ref != old_ref)) {
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 68e7fa0..157f609 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -110,6 +110,18 @@
}
}
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (kIsDebugBuild && !root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(from_space_->HasAddress(root->AsMirrorPtr()));
+ }
+
private:
MarkObjectVisitor* const visitor_;
// Space which we are scanning
@@ -163,7 +175,7 @@
}
// Extra parameters are required since we use this same visitor signature for checking objects.
- void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
+ void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::HeapReference<Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
mirror::Object* ref = ref_ptr->AsMirrorPtr();
@@ -174,6 +186,18 @@
}
}
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (kIsDebugBuild && !root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(!mod_union_table_->ShouldAddReference(root->AsMirrorPtr()));
+ }
+
private:
ModUnionTableReferenceCache* const mod_union_table_;
std::vector<mirror::HeapReference<Object>*>* const references_;
@@ -208,7 +232,7 @@
}
// Extra parameters are required since we use this same visitor signature for checking objects.
- void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
+ void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
if (ref != nullptr && mod_union_table_->ShouldAddReference(ref) &&
@@ -228,6 +252,18 @@
}
}
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (kIsDebugBuild && !root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(!mod_union_table_->ShouldAddReference(root->AsMirrorPtr()));
+ }
+
private:
ModUnionTableReferenceCache* const mod_union_table_;
const std::set<const Object*>& references_;
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index 994a0ad..251b50d 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -67,7 +67,7 @@
: collector_(collector), target_space_(target_space),
contains_reference_to_target_space_(contains_reference_to_target_space) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
+ void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
@@ -79,14 +79,25 @@
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
- SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(Locks::heap_bitmap_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
if (target_space_->HasAddress(ref->GetReferent())) {
*contains_reference_to_target_space_ = true;
collector_->DelayReferenceReferent(klass, ref);
}
}
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (kIsDebugBuild && !root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(!target_space_->HasAddress(root->AsMirrorPtr()));
+ }
+
private:
collector::GarbageCollector* const collector_;
space::ContinuousSpace* const target_space_;
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index baa33b3..ec689f8 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -638,7 +638,7 @@
explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
+ void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref =
obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
@@ -651,6 +651,19 @@
this->operator()(ref, mirror::Reference::ReferentOffset(), false);
}
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_);
+ visitor(root->AsMirrorPtr());
+ }
+
private:
ConcurrentCopying* const collector_;
};
@@ -750,18 +763,31 @@
explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
+ void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref =
obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
visitor(ref);
}
- void operator()(mirror::Class* klass, mirror::Reference* /* ref */) const
+ void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const
SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
}
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
+ visitor(root->AsMirrorPtr());
+ }
+
private:
ConcurrentCopying* const collector_;
};
@@ -1500,6 +1526,18 @@
collector_->DelayReferenceReferent(klass, ref);
}
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ collector_->MarkRoot(root);
+ }
+
private:
ConcurrentCopying* const collector_;
};
@@ -1513,7 +1551,8 @@
// Process a field.
inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
- mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
+ mirror::Object* ref = obj->GetFieldObject<
+ mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
if (ref == nullptr || region_space_->IsInToSpace(ref)) {
return;
}
@@ -1530,8 +1569,8 @@
// It was updated by the mutator.
break;
}
- } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<false, false, kVerifyNone>(
- offset, expected_ref, new_ref));
+ } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<
+ false, false, kVerifyNone>(offset, expected_ref, new_ref));
}
// Process some roots.
@@ -1559,22 +1598,18 @@
}
}
-void ConcurrentCopying::VisitRoots(
- mirror::CompressedReference<mirror::Object>** roots, size_t count,
- const RootInfo& info ATTRIBUTE_UNUSED) {
- for (size_t i = 0; i < count; ++i) {
- mirror::CompressedReference<mirror::Object>* root = roots[i];
- mirror::Object* ref = root->AsMirrorPtr();
- if (ref == nullptr || region_space_->IsInToSpace(ref)) {
- continue;
- }
- mirror::Object* to_ref = Mark(ref);
- if (to_ref == ref) {
- continue;
- }
+void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
+ DCHECK(!root->IsNull());
+ mirror::Object* const ref = root->AsMirrorPtr();
+ if (region_space_->IsInToSpace(ref)) {
+ return;
+ }
+ mirror::Object* to_ref = Mark(ref);
+ if (to_ref != ref) {
auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
+ // If the cas fails, then it was updated by the mutator.
do {
if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
// It was updated by the mutator.
@@ -1584,6 +1619,17 @@
}
}
+void ConcurrentCopying::VisitRoots(
+ mirror::CompressedReference<mirror::Object>** roots, size_t count,
+ const RootInfo& info ATTRIBUTE_UNUSED) {
+ for (size_t i = 0; i < count; ++i) {
+ mirror::CompressedReference<mirror::Object>* const root = roots[i];
+ if (!root->IsNull()) {
+ MarkRoot(root);
+ }
+ }
+}
+
// Fill the given memory block with a dummy object. Used to fill in a
// copy of objects that was lost in race.
void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index d324ce1..a4fd71c 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -122,6 +122,8 @@
virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ void MarkRoot(mirror::CompressedReference<mirror::Object>* root)
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info)
OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index c5ad613..4b2c588 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -438,6 +438,19 @@
ref->GetFieldObjectReferenceAddr<kVerifyNone>(mirror::Reference::ReferentOffset()));
}
+ // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ root->Assign(collector_->GetMarkedForwardAddress(root->AsMirrorPtr()));
+ }
+
private:
MarkCompact* const collector_;
};
@@ -575,6 +588,19 @@
collector_->DelayReferenceReferent(klass, ref);
}
+ // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ collector_->MarkObject(root->AsMirrorPtr());
+ }
+
private:
MarkCompact* const collector_;
};
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 92dde51..7f2c204 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -365,7 +365,7 @@
: mark_sweep_(mark_sweep), holder_(holder), offset_(offset) {
}
- void operator()(const mirror::Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
+ void operator()(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
if (kProfileLargeObjects) {
// TODO: Differentiate between marking and testing somehow.
++mark_sweep_->large_object_test_;
@@ -597,8 +597,7 @@
: mark_sweep_(mark_sweep) {}
void operator()(mirror::Object* obj) const ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(Locks::heap_bitmap_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
if (kCheckLocks) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
@@ -649,13 +648,33 @@
protected:
class MarkObjectParallelVisitor {
public:
- explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task,
- MarkSweep* mark_sweep) ALWAYS_INLINE
- : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
+ ALWAYS_INLINE explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task,
+ MarkSweep* mark_sweep)
+ : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE
+ void operator()(mirror::Object* obj, MemberOffset offset, bool /* static */) const
+ ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ Mark(obj->GetFieldObject<mirror::Object>(offset));
+ }
+
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
SHARED_REQUIRES(Locks::mutator_lock_) {
- mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (kCheckLocks) {
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
+ }
+ Mark(root->AsMirrorPtr());
+ }
+
+ private:
+ void Mark(mirror::Object* ref) const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
if (kUseFinger) {
std::atomic_thread_fence(std::memory_order_seq_cst);
@@ -668,7 +687,6 @@
}
}
- private:
MarkStackTask<kUseFinger>* const chunk_task_;
MarkSweep* const mark_sweep_;
};
@@ -1268,6 +1286,22 @@
mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset), obj, offset);
}
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ if (kCheckLocks) {
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
+ }
+ mark_sweep_->MarkObject(root->AsMirrorPtr());
+ }
+
private:
MarkSweep* const mark_sweep_;
};
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 99e00f9..606be63 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -58,12 +58,12 @@
~MarkSweep() {}
- virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
+ virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_);
void InitializePhase();
- void MarkingPhase() SHARED_REQUIRES(Locks::mutator_lock_, !mark_stack_lock_);
+ void MarkingPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
void PausePhase() REQUIRES(Locks::mutator_lock_, !mark_stack_lock_);
void ReclaimPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
- void FinishPhase() SHARED_REQUIRES(Locks::mutator_lock_);
+ void FinishPhase();
virtual void MarkReachableObjects()
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index e93ff05..acc1d9b 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -295,8 +295,26 @@
LOG(FATAL) << ref << " found in from space";
}
}
+
+ // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ if (kIsDebugBuild) {
+ Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
+ Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
+ }
+ CHECK(!from_space_->HasAddress(root->AsMirrorPtr()));
+ }
+
private:
- space::ContinuousMemMapAllocSpace* from_space_;
+ space::ContinuousMemMapAllocSpace* const from_space_;
};
void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) {
@@ -313,6 +331,7 @@
DCHECK(obj != nullptr);
semi_space_->VerifyNoFromSpaceReferences(obj);
}
+
private:
SemiSpace* const semi_space_;
};
@@ -670,11 +689,27 @@
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
- SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(Locks::heap_bitmap_lock_) {
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
collector_->DelayReferenceReferent(klass, ref);
}
+ // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ if (kIsDebugBuild) {
+ Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
+ Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
+ }
+ collector_->MarkObject(root);
+ }
+
private:
SemiSpace* const collector_;
};
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 7a9e418..07309d8 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1780,7 +1780,7 @@
}
// For Object::VisitReferences.
- void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
+ void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
if (ref == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
@@ -1788,6 +1788,10 @@
}
}
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
+ const {}
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
+
private:
const mirror::Object* const object_;
const uint32_t max_count_;
@@ -2612,15 +2616,14 @@
return fail_count_->LoadSequentiallyConsistent();
}
- void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(klass);
if (verify_referent_) {
VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset());
}
}
- void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const
+ void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
SHARED_REQUIRES(Locks::mutator_lock_) {
VerifyReference(obj, obj->GetFieldObject<mirror::Object>(offset), offset);
}
@@ -2629,7 +2632,19 @@
return heap_->IsLiveObjectLocked(obj, true, false, true);
}
- void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const_cast<VerifyReferenceVisitor*>(this)->VisitRoot(
+ root->AsMirrorPtr(), RootInfo(kRootVMInternal));
+ }
+
+ virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE
SHARED_REQUIRES(Locks::mutator_lock_) {
if (root == nullptr) {
LOG(ERROR) << "Root is null with info " << root_info.GetType();
@@ -2747,7 +2762,7 @@
: heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {
}
- void operator()(mirror::Object* obj) const
+ void operator()(mirror::Object* obj)
SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
// Note: we are verifying the references in obj but not obj itself, this is because obj must
// be live or else how did we find it in the live bitmap?
@@ -2762,8 +2777,7 @@
visitor->operator()(obj);
}
- void VerifyRoots() SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(!Locks::heap_bitmap_lock_) {
+ void VerifyRoots() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) {
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
Runtime::Current()->VisitRoots(&visitor);
@@ -2860,6 +2874,11 @@
: heap_(heap), failed_(failed) {
}
+ // There is no card marks for native roots on a class.
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
+ const {}
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
+
// TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
// annotalysis on visitors.
void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index fc27315..069e346 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -680,6 +680,8 @@
// linked yet.
VisitStaticFieldsReferences<kVisitClass>(this, visitor);
}
+ // Since this class is reachable, we must also visit the associated roots when we scan it.
+ VisitNativeRoots(visitor, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
}
template<ReadBarrierOption kReadBarrierOption>
@@ -816,20 +818,22 @@
if (sfields != nullptr) {
for (size_t i = 0, count = NumStaticFields(); i < count; ++i) {
auto* f = &sfields[i];
+ // Visit roots first in case the declaring class gets moved.
+ f->VisitRoots(visitor);
if (kIsDebugBuild && IsResolved()) {
CHECK_EQ(f->GetDeclaringClass(), this) << GetStatus();
}
- f->VisitRoots(visitor);
}
}
ArtField* const ifields = GetIFieldsUnchecked();
if (ifields != nullptr) {
for (size_t i = 0, count = NumInstanceFields(); i < count; ++i) {
auto* f = &ifields[i];
+ // Visit roots first in case the declaring class gets moved.
+ f->VisitRoots(visitor);
if (kIsDebugBuild && IsResolved()) {
CHECK_EQ(f->GetDeclaringClass(), this) << GetStatus();
}
- f->VisitRoots(visitor);
}
}
// We may see GetDirectMethodsPtr() == null with NumDirectMethods() != 0 if the root marking
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 27dd743..80decaa 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -63,6 +63,11 @@
this->operator()(ref, mirror::Reference::ReferentOffset(), false);
}
+ // Unused since we don't copy class native roots.
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
+ const {}
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
+
private:
Object* const dest_obj_;
};