pooled_mark_stacks: Add extra logging
Maintain a map of thread and its associated mark stack. Everytime when
the pooled_mark_stacks_ is of same size as kMarkStackPoolSize, this map
should be empty. Also, there should not be any entry in the map for a
destroying thread.
This change is to be reverted once the bug is fixed.
Bug: 140119552
Test: art/test/testrunner/testrunner.py
Change-Id: I415ccd210985878dd6689ee2ef1e35712d464150
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 274fd32..a1663c8 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -944,6 +944,27 @@
Thread* const self_;
};
+void ConcurrentCopying::RemoveThreadMarkStackMapping(Thread* thread,
+ accounting::ObjectStack* tl_mark_stack) {
+ CHECK(tl_mark_stack != nullptr);
+ auto it = thread_mark_stack_map_.find(thread);
+ CHECK(it != thread_mark_stack_map_.end());
+ CHECK(it->second == tl_mark_stack);
+ thread_mark_stack_map_.erase(it);
+}
+
+void ConcurrentCopying::AssertNoThreadMarkStackMapping(Thread* thread) {
+ MutexLock mu(Thread::Current(), mark_stack_lock_);
+ CHECK(thread_mark_stack_map_.find(thread) == thread_mark_stack_map_.end());
+}
+
+void ConcurrentCopying::AddThreadMarkStackMapping(Thread* thread,
+ accounting::ObjectStack* tl_mark_stack) {
+ CHECK(tl_mark_stack != nullptr);
+ CHECK(thread_mark_stack_map_.find(thread) == thread_mark_stack_map_.end());
+ thread_mark_stack_map_.insert({thread, tl_mark_stack});
+}
+
class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
public:
RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
@@ -963,6 +984,7 @@
MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
thread->SetThreadLocalMarkStack(nullptr);
+ concurrent_copying_->RemoveThreadMarkStackMapping(thread, tl_mark_stack);
}
// Disable weak ref access.
if (disable_weak_ref_access_) {
@@ -1779,7 +1801,9 @@
if (tl_mark_stack != nullptr) {
// Store the old full stack into a vector.
revoked_mark_stacks_.push_back(tl_mark_stack);
+ RemoveThreadMarkStackMapping(self, tl_mark_stack);
}
+ AddThreadMarkStackMapping(self, new_tl_mark_stack);
} else {
tl_mark_stack->PushBack(to_ref);
}
@@ -2006,6 +2030,7 @@
CHECK(is_marking_);
MutexLock mu(self, mark_stack_lock_);
revoked_mark_stacks_.push_back(tl_mark_stack);
+ RemoveThreadMarkStackMapping(thread, tl_mark_stack);
thread->SetThreadLocalMarkStack(nullptr);
}
}
@@ -2055,6 +2080,10 @@
{
MutexLock mu(thread_running_gc_, mark_stack_lock_);
CHECK(revoked_mark_stacks_.empty());
+ CHECK(thread_mark_stack_map_.empty()) << " size:"
+ << thread_mark_stack_map_.size()
+ << " pooled_mark_stacks size:"
+ << pooled_mark_stacks_.size();
CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
}
while (true) {
@@ -2082,6 +2111,10 @@
{
MutexLock mu(thread_running_gc_, mark_stack_lock_);
CHECK(revoked_mark_stacks_.empty());
+ CHECK(thread_mark_stack_map_.empty()) << " size:"
+ << thread_mark_stack_map_.size()
+ << " pooled_mark_stacks size:"
+ << pooled_mark_stacks_.size();
CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
}
// Process the GC mark stack in the exclusive mode. No need to take the lock.
@@ -2369,6 +2402,10 @@
MutexLock mu(thread_running_gc_, mark_stack_lock_);
CHECK(gc_mark_stack_->IsEmpty());
CHECK(revoked_mark_stacks_.empty());
+ CHECK(thread_mark_stack_map_.empty()) << " size:"
+ << thread_mark_stack_map_.size()
+ << " pooled_mark_stacks size:"
+ << pooled_mark_stacks_.size();
CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
}
}
@@ -3632,6 +3669,10 @@
{
MutexLock mu(self, mark_stack_lock_);
CHECK(revoked_mark_stacks_.empty());
+ CHECK(thread_mark_stack_map_.empty()) << " size:"
+ << thread_mark_stack_map_.size()
+ << " pooled_mark_stacks size:"
+ << pooled_mark_stacks_.size();
CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
}
// kVerifyNoMissingCardMarks relies on the region space cards not being cleared to avoid false
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 0cb5a3e..bc6d7b5 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -24,6 +24,7 @@
#include <map>
#include <memory>
+#include <unordered_map>
#include <vector>
namespace art {
@@ -155,6 +156,8 @@
mirror::Object* IsMarked(mirror::Object* from_ref) override
REQUIRES_SHARED(Locks::mutator_lock_);
+ void AssertNoThreadMarkStackMapping(Thread* thread) REQUIRES(!mark_stack_lock_);
+
private:
void PushOntoMarkStack(Thread* const self, mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_)
@@ -322,6 +325,11 @@
void ProcessMarkStackForMarkingAndComputeLiveBytes() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
+ void RemoveThreadMarkStackMapping(Thread* thread, accounting::ObjectStack* tl_mark_stack)
+ REQUIRES(mark_stack_lock_);
+ void AddThreadMarkStackMapping(Thread* thread, accounting::ObjectStack* tl_mark_stack)
+ REQUIRES(mark_stack_lock_);
+
space::RegionSpace* region_space_; // The underlying region space.
std::unique_ptr<Barrier> gc_barrier_;
std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
@@ -359,6 +367,9 @@
static constexpr size_t kMarkStackPoolSize = 256;
std::vector<accounting::ObjectStack*> pooled_mark_stacks_
GUARDED_BY(mark_stack_lock_);
+ // TODO(lokeshgidra b/140119552): remove this after bug fix.
+ std::unordered_map<Thread*, accounting::ObjectStack*> thread_mark_stack_map_
+ GUARDED_BY(mark_stack_lock_);
Thread* thread_running_gc_;
bool is_marking_; // True while marking is ongoing.
// True while we might dispatch on the read barrier entrypoints.
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 31fc730..10a4365 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2464,6 +2464,8 @@
if (kUseReadBarrier) {
CHECK(tlsPtr_.thread_local_mark_stack == nullptr);
+ Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()
+ ->AssertNoThreadMarkStackMapping(this);
}
// Make sure we processed all deoptimization requests.
CHECK(tlsPtr_.deoptimization_context_stack == nullptr) << "Missed deoptimization";