Address some comments and clean up
Change-Id: I538cf204f1c89d5fc81f8fc5e5800fcf1cf87359
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 17d75a3..953b1de 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -719,7 +719,7 @@
}
// InternImageString allows us to intern while holding the heap bitmap lock. This is safe since
// we are guaranteed to not have GC during image writing.
- mirror::String* const interned = Runtime::Current()->GetInternTable()->InternImageString(
+ mirror::String* const interned = Runtime::Current()->GetInternTable()->InternStrongImageString(
obj->AsString());
if (obj != interned) {
if (!IsImageBinSlotAssigned(interned)) {
diff --git a/runtime/barrier.h b/runtime/barrier.h
index 02f9f58..94977fb 100644
--- a/runtime/barrier.h
+++ b/runtime/barrier.h
@@ -51,7 +51,7 @@
// to sleep, resulting in a deadlock.
// Increment the count by delta, wait on condition if count is non zero.
- void Increment(Thread* self, int delta) REQUIRES(!lock_);;
+ void Increment(Thread* self, int delta) REQUIRES(!lock_);
// Increment the count by delta, wait on condition if count is non zero, with a timeout. Returns
// true if time out occurred.
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index d0504d9..2801fb7 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -332,36 +332,40 @@
bool IsExclusiveHeld(const Thread* self) const;
// Assert the current thread has exclusive access to the ReaderWriterMutex.
- void AssertExclusiveHeld(const Thread* self) {
+ void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
if (kDebugLocking && (gAborting == 0)) {
CHECK(IsExclusiveHeld(self)) << *this;
}
}
- void AssertWriterHeld(const Thread* self) { AssertExclusiveHeld(self); }
+ void AssertWriterHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
// Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
- void AssertNotExclusiveHeld(const Thread* self) {
+ void AssertNotExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
if (kDebugLocking && (gAborting == 0)) {
CHECK(!IsExclusiveHeld(self)) << *this;
}
}
- void AssertNotWriterHeld(const Thread* self) { AssertNotExclusiveHeld(self); }
+ void AssertNotWriterHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
+ AssertNotExclusiveHeld(self);
+ }
// Is the current thread a shared holder of the ReaderWriterMutex.
bool IsSharedHeld(const Thread* self) const;
// Assert the current thread has shared access to the ReaderWriterMutex.
- void AssertSharedHeld(const Thread* self) {
+ void AssertSharedHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
if (kDebugLocking && (gAborting == 0)) {
// TODO: we can only assert this well when self != null.
CHECK(IsSharedHeld(self) || self == nullptr) << *this;
}
}
- void AssertReaderHeld(const Thread* self) { AssertSharedHeld(self); }
+ void AssertReaderHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
+ AssertSharedHeld(self);
+ }
// Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
// mode.
- void AssertNotHeld(const Thread* self) {
+ void AssertNotHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(!this) {
if (kDebugLocking && (gAborting == 0)) {
CHECK(!IsSharedHeld(self)) << *this;
}
@@ -679,6 +683,7 @@
class Roles {
public:
+ // Uninterruptible means that the thread may not become suspended.
static Uninterruptible uninterruptible_;
};
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 252a47d..4182954 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -107,13 +107,14 @@
return item.IsNull();
}
};
- // hash set which hashes class descriptor, and compares descriptors nad class loaders. Results
+ // hash set which hashes class descriptor, and compares descriptors and class loaders. Results
// should be compared for a matching Class descriptor and class loader.
typedef HashSet<GcRoot<mirror::Class>, GcRootEmptyFn, ClassDescriptorHashEquals,
ClassDescriptorHashEquals, TrackingAllocator<GcRoot<mirror::Class>, kAllocatorTagClassTable>>
ClassSet;
// TODO: shard lock to have one per class loader.
+ // We have a vector to help prevent dirty pages after the zygote forks by calling FreezeSnapshot.
std::vector<ClassSet> classes_ GUARDED_BY(Locks::classlinker_classes_lock_);
};
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 1865516..e1e130b 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -2094,7 +2094,7 @@
case kWaitingInMainDebuggerLoop:
case kWaitingInMainSignalCatcherLoop:
case kWaitingPerformingGc:
- case kWaitingWeakRootRead:
+ case kWaitingWeakGcRootRead:
case kWaiting:
return JDWP::TS_WAIT;
// Don't add a 'default' here so the compiler can spot incompatible enum changes.
diff --git a/runtime/gc/weak_root_state.h b/runtime/gc/weak_root_state.h
index b66f19d..e3cefc4 100644
--- a/runtime/gc/weak_root_state.h
+++ b/runtime/gc/weak_root_state.h
@@ -28,6 +28,8 @@
// Need to wait until we can read weak roots.
kWeakRootStateNoReadsOrWrites,
// Need to mark new weak roots to make sure they don't get swept.
+ // kWeakRootStateMarkNewRoots is currently unused but I was planning on using to allow adding new
+ // weak roots during the CMS reference processing phase.
kWeakRootStateMarkNewRoots,
};
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index ae521b1..2be570a 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -90,7 +90,6 @@
}
mirror::String* InternTable::LookupWeak(mirror::String* s) {
- // TODO: Return only if marked.
return weak_interns_.Find(s);
}
@@ -229,7 +228,7 @@
void InternTable::WaitUntilAccessible(Thread* self) {
Locks::intern_table_lock_->ExclusiveUnlock(self);
- self->TransitionFromRunnableToSuspended(kWaitingWeakRootRead);
+ self->TransitionFromRunnableToSuspended(kWaitingWeakGcRootRead);
Locks::intern_table_lock_->ExclusiveLock(self);
while (weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) {
weak_intern_condition_.Wait(self);
@@ -250,24 +249,35 @@
CHECK_EQ(2u, self->NumberOfHeldMutexes()) << "may only safely hold the mutator lock";
}
while (true) {
+ if (holding_locks) {
+ if (!kUseReadBarrier) {
+ CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
+ } else {
+ CHECK(self->GetWeakRefAccessEnabled());
+ }
+ }
// Check the strong table for a match.
mirror::String* strong = LookupStrong(s);
if (strong != nullptr) {
return strong;
}
+ if ((!kUseReadBarrier && weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) ||
+ (kUseReadBarrier && self->GetWeakRefAccessEnabled())) {
+ break;
+ }
// weak_root_state_ is set to gc::kWeakRootStateNoReadsOrWrites in the GC pause but is only
// cleared after SweepSystemWeaks has completed. This is why we need to wait until it is
// cleared.
- if (weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) {
- break;
- }
CHECK(!holding_locks);
StackHandleScope<1> hs(self);
auto h = hs.NewHandleWrapper(&s);
WaitUntilAccessible(self);
}
- CHECK_NE(weak_root_state_, gc::kWeakRootStateNoReadsOrWrites);
- DCHECK_NE(weak_root_state_, gc::kWeakRootStateMarkNewRoots) << "Unsupported";
+ if (!kUseReadBarrier) {
+ CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
+ } else {
+ CHECK(self->GetWeakRefAccessEnabled());
+ }
// There is no match in the strong table, check the weak table.
mirror::String* weak = LookupWeak(s);
if (weak != nullptr) {
@@ -298,7 +308,7 @@
return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
}
-mirror::String* InternTable::InternImageString(mirror::String* s) {
+mirror::String* InternTable::InternStrongImageString(mirror::String* s) {
// May be holding the heap bitmap lock.
return Insert(s, true, true);
}
@@ -319,8 +329,6 @@
void InternTable::SweepInternTableWeaks(IsMarkedVisitor* visitor) {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
weak_interns_.SweepWeaks(visitor);
- // Done sweeping, back to a normal state.
- ChangeWeakRootStateLocked(gc::kWeakRootStateNormal);
}
void InternTable::AddImageInternTable(gc::space::ImageSpace* image_space) {
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 0be6675..ae9f7a7 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -61,8 +61,10 @@
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// Only used by image writer. Special version that may not cause thread suspension since the GC
- // can not be running while we are doing image writing.
- mirror::String* InternImageString(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_);
+ // can not be running while we are doing image writing. Maybe be called while while holding a
+ // lock since there will not be thread suspension.
+ mirror::String* InternStrongImageString(mirror::String* s)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Interns a potentially new string in the 'strong' table. May cause thread suspension.
mirror::String* InternStrong(const char* utf8_data) SHARED_REQUIRES(Locks::mutator_lock_)
@@ -184,7 +186,9 @@
UnorderedSet post_zygote_table_;
};
- // Insert if non null, otherwise return null.
+ // Insert if non null, otherwise return null. Must be called holding the mutator lock.
+ // If holding_locks is true, then we may also hold other locks. If holding_locks is true, then we
+ // require GC is not running since it is not safe to wait while holding locks.
mirror::String* Insert(mirror::String* s, bool is_strong, bool holding_locks)
REQUIRES(!Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 9de9e8a..f923b84 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -884,7 +884,7 @@
// Explicit DoCall template function declarations.
#define EXPLICIT_DO_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
+ template SHARED_REQUIRES(Locks::mutator_lock_) \
bool DoCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self, \
ShadowFrame& shadow_frame, \
const Instruction* inst, uint16_t inst_data, \
@@ -897,7 +897,7 @@
// Explicit DoLambdaCall template function declarations.
#define EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
+ template SHARED_REQUIRES(Locks::mutator_lock_) \
bool DoLambdaCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self, \
ShadowFrame& shadow_frame, \
const Instruction* inst, \
@@ -911,7 +911,7 @@
// Explicit DoFilledNewArray template function declarations.
#define EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(_is_range_, _check, _transaction_active) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
+ template SHARED_REQUIRES(Locks::mutator_lock_) \
bool DoFilledNewArray<_is_range_, _check, _transaction_active>(const Instruction* inst, \
const ShadowFrame& shadow_frame, \
Thread* self, JValue* result)
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index f5ac9d0..ae02fe6 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -128,6 +128,9 @@
* the debugger.
*
* Returns a newly-allocated JdwpState struct on success, or nullptr on failure.
+ *
+ * NO_THREAD_SAFETY_ANALYSIS since we can't annotate that we do not have
+ * state->thread_start_lock_ held.
*/
static JdwpState* Create(const JdwpOptions* options)
REQUIRES(!Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index b40d94a..7118f36 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -90,7 +90,7 @@
case kWaitingInMainSignalCatcherLoop: return kJavaWaiting;
case kWaitingForMethodTracingStart: return kJavaWaiting;
case kWaitingForVisitObjects: return kJavaWaiting;
- case kWaitingWeakRootRead: return kJavaWaiting;
+ case kWaitingWeakGcRootRead: return kJavaWaiting;
case kSuspended: return kJavaRunnable;
// Don't add a 'default' here so the compiler can spot incompatible enum changes.
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index a27acb2..b08e521 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1516,7 +1516,7 @@
void Runtime::AllowNewSystemWeaks() {
monitor_list_->AllowNewMonitors();
- intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal); // TODO: Do this in the sweeping?
+ intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal); // TODO: Do this in the sweeping.
java_vm_->AllowNewWeakGlobals();
heap_->AllowNewAllocationRecords();
lambda_box_table_->AllowNewWeakBoxedLambdas();
diff --git a/runtime/thread.cc b/runtime/thread.cc
index b3efad0..7433600 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2736,7 +2736,7 @@
size_t Thread::NumberOfHeldMutexes() const {
size_t count = 0;
for (BaseMutex* mu : tlsPtr_.held_mutexes) {
- count += static_cast<size_t>(mu != nullptr);
+ count += mu != nullptr ? 1 : 0;
}
return count;
}
diff --git a/runtime/thread_state.h b/runtime/thread_state.h
index c000e61..a11d213 100644
--- a/runtime/thread_state.h
+++ b/runtime/thread_state.h
@@ -43,7 +43,7 @@
kWaitingForMethodTracingStart, // WAITING TS_WAIT waiting for method tracing to start
kWaitingForVisitObjects, // WAITING TS_WAIT waiting for visiting objects
kWaitingForGetObjectsAllocated, // WAITING TS_WAIT waiting for getting the number of allocated objects
- kWaitingWeakRootRead, // WAITING TS_WAIT waiting to read a weak root
+ kWaitingWeakGcRootRead, // WAITING TS_WAIT waiting on the GC to read a weak root
kStarting, // NEW TS_WAIT native thread started, not yet ready to run managed code
kNative, // RUNNABLE TS_RUNNING running in a JNI native method
kSuspended, // RUNNABLE TS_RUNNING suspended by GC or debugger