Revert "Handle OOM situation in java-heap more aggressively"
This reverts commit a698102d622e1cff97a30428878c8075059f396c.
Reason for revert: Causing ART-test failures. We need to take care of those tests before resubmitting. Revert will also help in tracking down b/154100060 (OOME in ThreadStress), which started occurring before this CL was merged.
Change-Id: Ieb37d38f3c756a5252cc7f30d60cf82a7f95c49c
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 401394a..494df83 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1750,9 +1750,6 @@
size_t* usable_size,
size_t* bytes_tl_bulk_allocated,
ObjPtr<mirror::Class>* klass) {
- // After a GC (due to allocation failure) we should retrieve at least this
- // fraction of the current max heap size. Otherwise throw OOME.
- constexpr double kMinFreeHeapAfterGcForAlloc = 0.01;
bool was_default_allocator = allocator == GetCurrentAllocator();
// Make sure there is no pending exception since we may need to throw an OOME.
self->AssertNoPendingException();
@@ -1797,36 +1794,50 @@
}
}
- auto have_reclaimed_enough = [&]() {
- size_t curr_bytes_allocated = GetBytesAllocated();
- double curr_free_heap =
- static_cast<double>(growth_limit_ - curr_bytes_allocated) / growth_limit_;
- return curr_free_heap >= kMinFreeHeapAfterGcForAlloc;
- };
- // We perform one GC as per the next_gc_type_ (chosen in GrowForUtilization),
- // if it's not already tried. If that doesn't succeed then go for the most
- // exhaustive option. Perform a full-heap collection including clearing
- // SoftReferences. In case of ConcurrentCopying, it will also ensure that
- // all regions are evacuated. If allocation doesn't succeed even after that
- // then there is no hope, so we throw OOME.
collector::GcType tried_type = next_gc_type_;
- if (last_gc < tried_type) {
- const bool gc_ran = PERFORM_SUSPENDING_OPERATION(
- CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone);
+ const bool gc_ran = PERFORM_SUSPENDING_OPERATION(
+ CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone);
+ if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
+ (!instrumented && EntrypointsInstrumented())) {
+ return nullptr;
+ }
+ if (gc_ran) {
+ mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
+ usable_size, bytes_tl_bulk_allocated);
+ if (ptr != nullptr) {
+ return ptr;
+ }
+ }
+
+ // Loop through our different Gc types and try to Gc until we get enough free memory.
+ for (collector::GcType gc_type : gc_plan_) {
+ if (gc_type == tried_type) {
+ continue;
+ }
+ // Attempt to run the collector, if we succeed, re-try the allocation.
+ const bool plan_gc_ran = PERFORM_SUSPENDING_OPERATION(
+ CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone);
if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
(!instrumented && EntrypointsInstrumented())) {
return nullptr;
}
- if (gc_ran && have_reclaimed_enough()) {
- mirror::Object* ptr = TryToAllocate<true, false>(self, allocator,
- alloc_size, bytes_allocated,
+ if (plan_gc_ran) {
+ // Did we free sufficient memory for the allocation to succeed?
+ mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
usable_size, bytes_tl_bulk_allocated);
if (ptr != nullptr) {
return ptr;
}
}
}
+ // Allocations have failed after GCs; this is an exceptional state.
+ // Try harder, growing the heap if necessary.
+ mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
+ usable_size, bytes_tl_bulk_allocated);
+ if (ptr != nullptr) {
+ return ptr;
+ }
// Most allocations should have succeeded by now, so the heap is really full, really fragmented,
// or the requested size is really big. Do another GC, collecting SoftReferences this time. The
// VM spec requires that all SoftReferences have been collected and cleared before throwing
@@ -1841,12 +1852,8 @@
(!instrumented && EntrypointsInstrumented())) {
return nullptr;
}
- mirror::Object* ptr = nullptr;
- if (have_reclaimed_enough()) {
- ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
- usable_size, bytes_tl_bulk_allocated);
- }
-
+ ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
if (ptr == nullptr) {
const uint64_t current_time = NanoTime();
switch (allocator) {