Merge "Revert^2 "ARM: Reimplement the UnsafeCASObject intrinsic.""
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index cde5dc7..783f2fc 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -151,7 +151,9 @@
case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace:
return MarkUnevacFromSpaceRegion(self, from_ref, region_space_bitmap_);
default:
- // The reference is in an unused region.
+ // The reference is in an unused region. Remove memory protection from
+ // the region space and log debugging information.
+ region_space_->Unprotect();
LOG(FATAL_WITHOUT_ABORT) << DumpHeapReference(holder, offset, from_ref);
region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal */ true);
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 07abbfc..7688b54 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -1873,17 +1873,20 @@
} else if (type == RegionType::kRegionTypeUnevacFromSpace) {
if (!IsMarkedInUnevacFromSpace(ref)) {
LOG(FATAL_WITHOUT_ABORT) << "Found unmarked reference in unevac from-space:";
+ // Remove memory protection from the region space and log debugging information.
+ region_space_->Unprotect();
LOG(FATAL_WITHOUT_ABORT) << DumpHeapReference(obj, offset, ref);
}
CHECK(IsMarkedInUnevacFromSpace(ref)) << ref;
} else {
// Not OK: either a from-space ref or a reference in an unused region.
- // Do extra logging.
if (type == RegionType::kRegionTypeFromSpace) {
LOG(FATAL_WITHOUT_ABORT) << "Found from-space reference:";
} else {
LOG(FATAL_WITHOUT_ABORT) << "Found reference in region with type " << type << ":";
}
+ // Remove memory protection from the region space and log debugging information.
+ region_space_->Unprotect();
LOG(FATAL_WITHOUT_ABORT) << DumpHeapReference(obj, offset, ref);
if (obj != nullptr) {
LogFromSpaceRefHolder(obj, offset);
@@ -1951,17 +1954,20 @@
} else if (type == RegionType::kRegionTypeUnevacFromSpace) {
if (!IsMarkedInUnevacFromSpace(ref)) {
LOG(FATAL_WITHOUT_ABORT) << "Found unmarked reference in unevac from-space:";
+ // Remove memory protection from the region space and log debugging information.
+ region_space_->Unprotect();
LOG(FATAL_WITHOUT_ABORT) << DumpGcRoot(ref);
}
CHECK(IsMarkedInUnevacFromSpace(ref)) << ref;
} else {
// Not OK: either a from-space ref or a reference in an unused region.
- // Do extra logging.
if (type == RegionType::kRegionTypeFromSpace) {
LOG(FATAL_WITHOUT_ABORT) << "Found from-space reference:";
} else {
LOG(FATAL_WITHOUT_ABORT) << "Found reference in region with type " << type << ":";
}
+ // Remove memory protection from the region space and log debugging information.
+ region_space_->Unprotect();
LOG(FATAL_WITHOUT_ABORT) << DumpGcRoot(ref);
if (gc_root_source == nullptr) {
// No info.
@@ -2361,6 +2367,8 @@
// from a previous GC that is either inside or outside the allocated region.
mirror::Class* klass = from_ref->GetClass<kVerifyNone, kWithoutReadBarrier>();
if (UNLIKELY(klass == nullptr)) {
+ // Remove memory protection from the region space and log debugging information.
+ region_space_->Unprotect();
heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal */ true);
}
// There must not be a read barrier to avoid nested RB that might violate the to-space invariant.
@@ -2640,8 +2648,11 @@
}
}
if (is_los && !IsAligned<kPageSize>(ref)) {
- // Ref is a large object that is not aligned, it must be heap corruption. Dump data before
- // AtomicSetReadBarrierState since it will fault if the address is not valid.
+ // Ref is a large object that is not aligned, it must be heap
+ // corruption. Remove memory protection and dump data before
+ // AtomicSetReadBarrierState since it will fault if the address is not
+ // valid.
+ region_space_->Unprotect();
heap_->GetVerification()->LogHeapCorruption(holder, offset, ref, /* fatal */ true);
}
// Not marked nor on the allocation stack. Try to mark it.
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 0569092..db8253c 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -552,6 +552,12 @@
evac_region_ = &full_region_;
}
+void RegionSpace::Unprotect() {
+ if (kProtectClearedRegions) {
+ CheckedCall(mprotect, __FUNCTION__, Begin(), Size(), PROT_READ | PROT_WRITE);
+ }
+}
+
void RegionSpace::ClampGrowthLimit(size_t new_capacity) {
MutexLock mu(Thread::Current(), region_lock_);
CHECK_LE(new_capacity, NonGrowthLimitCapacity());
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 90f1f1d..eb570d2 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -108,6 +108,12 @@
void Clear() OVERRIDE REQUIRES(!region_lock_);
+ // Remove memory protection from the whole region space, i.e. make memory
+ // pages backing the region area readable and writable. This method is useful
+ // to avoid page protection faults when dumping information about an invalid
+ // reference.
+ void Unprotect();
+
// Change the non growth limit capacity to new capacity by shrinking or expanding the map.
// Currently, only shrinking is supported.
// Unlike implementations of this function in other spaces, we need to pass