blob: fd98e2917fb5365c838512a4d8bc8b9ba5373efb [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070018
Mathieu Chartier752a0e62013-06-27 11:03:27 -070019#define ATRACE_TAG ATRACE_TAG_DALVIK
20#include <cutils/trace.h>
Brian Carlstrom5643b782012-02-05 12:32:53 -080021
Brian Carlstrom58ae9412011-10-04 00:56:06 -070022#include <limits>
Carl Shapiro58551df2011-07-24 03:09:51 -070023#include <vector>
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -070024#include <valgrind.h>
Carl Shapiro58551df2011-07-24 03:09:51 -070025
Mathieu Chartierb2f99362013-11-20 17:26:00 -080026#include "base/histogram-inl.h"
Elliott Hughes1aa246d2012-12-13 09:29:36 -080027#include "base/stl_util.h"
Mathieu Chartier987ccff2013-07-08 11:05:21 -070028#include "common_throws.h"
Ian Rogers48931882013-01-22 14:35:16 -080029#include "cutils/sched_policy.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070030#include "debugger.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070031#include "gc/accounting/atomic_stack.h"
32#include "gc/accounting/card_table-inl.h"
33#include "gc/accounting/heap_bitmap-inl.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070034#include "gc/accounting/mod_union_table.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070035#include "gc/accounting/mod_union_table-inl.h"
36#include "gc/accounting/space_bitmap-inl.h"
37#include "gc/collector/mark_sweep-inl.h"
38#include "gc/collector/partial_mark_sweep.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070039#include "gc/collector/semi_space.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070040#include "gc/collector/sticky_mark_sweep.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070041#include "gc/space/bump_pointer_space.h"
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -070042#include "gc/space/dlmalloc_space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070043#include "gc/space/image_space.h"
44#include "gc/space/large_object_space.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070045#include "gc/space/rosalloc_space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070046#include "gc/space/space-inl.h"
Mathieu Chartiera1602f22014-01-13 17:19:19 -080047#include "gc/space/zygote_space.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070048#include "heap-inl.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070049#include "image.h"
Jeff Hao5d917302013-02-27 17:57:33 -080050#include "invoke_arg_array_builder.h"
Brian Carlstromea46f952013-07-30 01:26:50 -070051#include "mirror/art_field-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080052#include "mirror/class-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080053#include "mirror/object.h"
54#include "mirror/object-inl.h"
55#include "mirror/object_array-inl.h"
Ian Rogers6d4d9fc2011-11-30 16:24:48 -080056#include "object_utils.h"
Brian Carlstrom5643b782012-02-05 12:32:53 -080057#include "os.h"
Mathieu Chartier0de9f732013-11-22 17:58:48 -080058#include "runtime.h"
Mathieu Chartier7664f5c2012-06-08 18:15:32 -070059#include "ScopedLocalRef.h"
Ian Rogers00f7d0e2012-07-19 15:28:27 -070060#include "scoped_thread_state_change.h"
Ian Rogers1f539342012-10-03 21:09:42 -070061#include "sirt_ref.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070062#include "thread_list.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070063#include "UniquePtr.h"
Elliott Hugheseac76672012-05-24 21:56:51 -070064#include "well_known_classes.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070065
66namespace art {
Mathieu Chartier50482232013-11-21 11:48:14 -080067
68extern void SetQuickAllocEntryPointsAllocator(gc::AllocatorType allocator);
69
Ian Rogers1d54e732013-05-02 21:10:01 -070070namespace gc {
Carl Shapiro69759ea2011-07-21 18:13:35 -070071
Mathieu Chartier720ef762013-08-17 14:46:54 -070072static constexpr bool kGCALotMode = false;
73static constexpr size_t kGcAlotInterval = KB;
Ian Rogers1d54e732013-05-02 21:10:01 -070074// Minimum amount of remaining bytes before a concurrent GC is triggered.
Mathieu Chartier720ef762013-08-17 14:46:54 -070075static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
Mathieu Chartier74762802014-01-24 10:21:35 -080076static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
Mathieu Chartier0051be62012-10-12 17:47:11 -070077
Mathieu Chartier0051be62012-10-12 17:47:11 -070078Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
Ian Rogers8d31bbd2013-10-13 10:44:14 -070079 double target_utilization, size_t capacity, const std::string& image_file_name,
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080080 CollectorType post_zygote_collector_type, CollectorType background_collector_type,
81 size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
82 size_t long_pause_log_threshold, size_t long_gc_log_threshold,
Mathieu Chartier938a03b2014-01-16 15:10:31 -080083 bool ignore_max_footprint, bool use_tlab, bool verify_pre_gc_heap,
84 bool verify_post_gc_heap)
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080085 : non_moving_space_(nullptr),
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080086 rosalloc_space_(nullptr),
87 dlmalloc_space_(nullptr),
Mathieu Chartierfc5b5282014-01-09 16:15:36 -080088 main_space_(nullptr),
Mathieu Chartier7bf82af2013-12-06 16:51:45 -080089 concurrent_gc_(false),
90 collector_type_(kCollectorTypeNone),
91 post_zygote_collector_type_(post_zygote_collector_type),
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080092 background_collector_type_(background_collector_type),
Mathieu Chartier2775ee42013-08-20 17:43:47 -070093 parallel_gc_threads_(parallel_gc_threads),
94 conc_gc_threads_(conc_gc_threads),
Mathieu Chartiere0a53e92013-08-05 10:17:40 -070095 low_memory_mode_(low_memory_mode),
Mathieu Chartier2775ee42013-08-20 17:43:47 -070096 long_pause_log_threshold_(long_pause_log_threshold),
97 long_gc_log_threshold_(long_gc_log_threshold),
98 ignore_max_footprint_(ignore_max_footprint),
Ian Rogers00f7d0e2012-07-19 15:28:27 -070099 have_zygote_space_(false),
Mathieu Chartier39e32612013-11-12 16:28:05 -0800100 soft_reference_queue_(this),
101 weak_reference_queue_(this),
102 finalizer_reference_queue_(this),
103 phantom_reference_queue_(this),
104 cleared_references_(this),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800105 is_gc_running_(false),
Ian Rogers1d54e732013-05-02 21:10:01 -0700106 last_gc_type_(collector::kGcTypeNone),
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -0700107 next_gc_type_(collector::kGcTypePartial),
Mathieu Chartier80de7a62012-11-27 17:21:50 -0800108 capacity_(capacity),
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700109 growth_limit_(growth_limit),
Mathieu Chartier0051be62012-10-12 17:47:11 -0700110 max_allowed_footprint_(initial_size),
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700111 native_footprint_gc_watermark_(initial_size),
112 native_footprint_limit_(2 * initial_size),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700113 native_need_to_run_finalization_(false),
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800114 // Initially assume we perceive jank in case the process state is never updated.
115 process_state_(kProcessStateJankPerceptible),
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800116 concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
Ian Rogers1d54e732013-05-02 21:10:01 -0700117 total_bytes_freed_ever_(0),
118 total_objects_freed_ever_(0),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800119 num_bytes_allocated_(0),
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700120 native_bytes_allocated_(0),
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700121 gc_memory_overhead_(0),
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700122 verify_missing_card_marks_(false),
123 verify_system_weaks_(false),
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800124 verify_pre_gc_heap_(verify_pre_gc_heap),
125 verify_post_gc_heap_(verify_post_gc_heap),
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700126 verify_mod_union_table_(false),
Ian Rogers1d54e732013-05-02 21:10:01 -0700127 last_trim_time_ms_(0),
Mathieu Chartier65db8802012-11-20 12:36:46 -0800128 allocation_rate_(0),
Mathieu Chartier0418ae22013-07-31 13:35:46 -0700129 /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This
130 * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
131 * verification is enabled, we limit the size of allocation stacks to speed up their
132 * searching.
133 */
134 max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval
Mathieu Chartier590fee92013-09-13 13:46:47 -0700135 : (kDesiredHeapVerification > kVerifyAllFast) ? KB : MB),
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800136 current_allocator_(kAllocatorTypeDlMalloc),
137 current_non_moving_allocator_(kAllocatorTypeNonMoving),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700138 bump_pointer_space_(nullptr),
139 temp_space_(nullptr),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800140 reference_referent_offset_(0),
141 reference_queue_offset_(0),
142 reference_queueNext_offset_(0),
143 reference_pendingNext_offset_(0),
144 finalizer_reference_zombie_offset_(0),
Mathieu Chartier0051be62012-10-12 17:47:11 -0700145 min_free_(min_free),
146 max_free_(max_free),
147 target_utilization_(target_utilization),
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700148 total_wait_time_(0),
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700149 total_allocation_time_(0),
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700150 verify_object_mode_(kHeapVerificationNotPermitted),
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800151 disable_moving_gc_count_(0),
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800152 running_on_valgrind_(RUNNING_ON_VALGRIND),
153 use_tlab_(use_tlab) {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800154 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800155 LOG(INFO) << "Heap() entering";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700156 }
Mathieu Chartier50482232013-11-21 11:48:14 -0800157 // If we aren't the zygote, switch to the default non zygote allocator. This may update the
158 // entrypoints.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800159 if (!Runtime::Current()->IsZygote() || !kMovingCollector) {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800160 ChangeCollector(post_zygote_collector_type_);
161 } else {
162 // We are the zygote, use bump pointer allocation + semi space collector.
163 ChangeCollector(kCollectorTypeSS);
Mathieu Chartier50482232013-11-21 11:48:14 -0800164 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800165
Ian Rogers1d54e732013-05-02 21:10:01 -0700166 live_bitmap_.reset(new accounting::HeapBitmap(this));
167 mark_bitmap_.reset(new accounting::HeapBitmap(this));
Ian Rogers30fab402012-01-23 15:43:46 -0800168 // Requested begin for the alloc space, to follow the mapped image and oat files
Mathieu Chartier50482232013-11-21 11:48:14 -0800169 byte* requested_alloc_space_begin = nullptr;
Brian Carlstrom5643b782012-02-05 12:32:53 -0800170 if (!image_file_name.empty()) {
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700171 space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str());
Mathieu Chartier50482232013-11-21 11:48:14 -0800172 CHECK(image_space != nullptr) << "Failed to create space for " << image_file_name;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700173 AddSpace(image_space);
Ian Rogers30fab402012-01-23 15:43:46 -0800174 // Oat files referenced by image files immediately follow them in memory, ensure alloc space
175 // isn't going to get in the middle
Brian Carlstrom700c8d32012-11-05 10:42:02 -0800176 byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
177 CHECK_GT(oat_file_end_addr, image_space->End());
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700178 if (oat_file_end_addr > requested_alloc_space_begin) {
Mathieu Chartier50482232013-11-21 11:48:14 -0800179 requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
Brian Carlstrom58ae9412011-10-04 00:56:06 -0700180 }
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700181 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700182 const char* name = Runtime::Current()->IsZygote() ? "zygote space" : "alloc space";
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800183 space::MallocSpace* malloc_space;
184 if (kUseRosAlloc) {
185 malloc_space = space::RosAllocSpace::Create(name, initial_size, growth_limit, capacity,
186 requested_alloc_space_begin, low_memory_mode_);
187 CHECK(malloc_space != nullptr) << "Failed to create rosalloc space";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700188 } else {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800189 malloc_space = space::DlMallocSpace::Create(name, initial_size, growth_limit, capacity,
190 requested_alloc_space_begin);
191 CHECK(malloc_space != nullptr) << "Failed to create dlmalloc space";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700192 }
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800193 VLOG(heap) << "malloc_space : " << malloc_space;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700194 if (kMovingCollector) {
195 // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
196 // TODO: Having 3+ spaces as big as the large heap size can cause virtual memory fragmentation
197 // issues.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800198 const size_t bump_pointer_space_size = std::min(malloc_space->Capacity(), 128 * MB);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700199 bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space",
200 bump_pointer_space_size, nullptr);
201 CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
202 AddSpace(bump_pointer_space_);
203 temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2", bump_pointer_space_size,
204 nullptr);
205 CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
206 AddSpace(temp_space_);
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800207 VLOG(heap) << "bump_pointer_space : " << bump_pointer_space_;
208 VLOG(heap) << "temp_space : " << temp_space_;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700209 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800210 non_moving_space_ = malloc_space;
211 malloc_space->SetFootprintLimit(malloc_space->Capacity());
212 AddSpace(malloc_space);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700213
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700214 // Allocate the large object space.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800215 constexpr bool kUseFreeListSpaceForLOS = false;
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700216 if (kUseFreeListSpaceForLOS) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800217 large_object_space_ = space::FreeListSpace::Create("large object space", nullptr, capacity);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700218 } else {
219 large_object_space_ = space::LargeObjectMapSpace::Create("large object space");
220 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800221 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
Mathieu Chartier590fee92013-09-13 13:46:47 -0700222 AddSpace(large_object_space_);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700223
Ian Rogers1d54e732013-05-02 21:10:01 -0700224 // Compute heap capacity. Continuous spaces are sorted in order of Begin().
Mathieu Chartier590fee92013-09-13 13:46:47 -0700225 CHECK(!continuous_spaces_.empty());
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800226
Mathieu Chartier590fee92013-09-13 13:46:47 -0700227 // Relies on the spaces being sorted.
Mathieu Chartier9be9a7a2014-01-24 14:07:33 -0800228 byte* heap_begin = continuous_spaces_.front()->Begin();
229 byte* heap_end = continuous_spaces_.back()->Limit();
230 if (Runtime::Current()->IsZygote()) {
231 std::string error_str;
232 post_zygote_non_moving_space_mem_map_.reset(
233 MemMap::MapAnonymous("post zygote non-moving space", nullptr, 64 * MB,
234 PROT_READ | PROT_WRITE, &error_str));
235 CHECK(post_zygote_non_moving_space_mem_map_.get() != nullptr) << error_str;
236 heap_begin = std::min(post_zygote_non_moving_space_mem_map_->Begin(), heap_begin);
237 heap_end = std::max(post_zygote_non_moving_space_mem_map_->End(), heap_end);
238 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700239 size_t heap_capacity = heap_end - heap_begin;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700240
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800241 // Allocate the card table.
Ian Rogers1d54e732013-05-02 21:10:01 -0700242 card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700243 CHECK(card_table_.get() != NULL) << "Failed to create card table";
Ian Rogers5d76c432011-10-31 21:42:49 -0700244
Mathieu Chartier590fee92013-09-13 13:46:47 -0700245 // Card cache for now since it makes it easier for us to update the references to the copying
246 // spaces.
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700247 accounting::ModUnionTable* mod_union_table =
Mathieu Chartier590fee92013-09-13 13:46:47 -0700248 new accounting::ModUnionTableCardCache("Image mod-union table", this, GetImageSpace());
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700249 CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
250 AddModUnionTable(mod_union_table);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700251
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700252 // TODO: Count objects in the image space here.
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -0700253 num_bytes_allocated_ = 0;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700254
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800255 // Default mark stack size in bytes.
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700256 static const size_t default_mark_stack_size = 64 * KB;
Ian Rogers1d54e732013-05-02 21:10:01 -0700257 mark_stack_.reset(accounting::ObjectStack::Create("mark stack", default_mark_stack_size));
258 allocation_stack_.reset(accounting::ObjectStack::Create("allocation stack",
259 max_allocation_stack_size_));
260 live_stack_.reset(accounting::ObjectStack::Create("live stack",
261 max_allocation_stack_size_));
Mathieu Chartier5301cd22012-05-31 12:11:36 -0700262
Mathieu Chartier65db8802012-11-20 12:36:46 -0800263 // It's still too early to take a lock because there are no threads yet, but we can create locks
264 // now. We don't create it earlier to make it clear that you can't use locks during heap
265 // initialization.
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700266 gc_complete_lock_ = new Mutex("GC complete lock");
Ian Rogersc604d732012-10-14 16:09:54 -0700267 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
268 *gc_complete_lock_));
Ian Rogers1d54e732013-05-02 21:10:01 -0700269 last_gc_time_ns_ = NanoTime();
Mathieu Chartier65db8802012-11-20 12:36:46 -0800270 last_gc_size_ = GetBytesAllocated();
271
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700272 if (ignore_max_footprint_) {
273 SetIdealFootprint(std::numeric_limits<size_t>::max());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700274 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700275 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700276 CHECK_NE(max_allowed_footprint_, 0U);
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700277
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800278 // Create our garbage collectors.
Mathieu Chartier50482232013-11-21 11:48:14 -0800279 for (size_t i = 0; i < 2; ++i) {
280 const bool concurrent = i != 0;
281 garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
282 garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
283 garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
284 }
Mathieu Chartier50482232013-11-21 11:48:14 -0800285 if (kMovingCollector) {
286 // TODO: Clean this up.
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800287 bool generational = post_zygote_collector_type_ == kCollectorTypeGSS;
288 semi_space_collector_ = new collector::SemiSpace(this, generational);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700289 garbage_collectors_.push_back(semi_space_collector_);
Mathieu Chartier0325e622012-09-05 14:22:51 -0700290 }
291
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700292 if (running_on_valgrind_) {
Ian Rogersfa824272013-11-05 16:12:57 -0800293 Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700294 }
295
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800296 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800297 LOG(INFO) << "Heap() exiting";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700298 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700299}
300
Mathieu Chartier50482232013-11-21 11:48:14 -0800301void Heap::ChangeAllocator(AllocatorType allocator) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800302 // These two allocators are only used internally and don't have any entrypoints.
Mathieu Chartier50482232013-11-21 11:48:14 -0800303 DCHECK_NE(allocator, kAllocatorTypeLOS);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800304 DCHECK_NE(allocator, kAllocatorTypeNonMoving);
Mathieu Chartier50482232013-11-21 11:48:14 -0800305 if (current_allocator_ != allocator) {
306 current_allocator_ = allocator;
307 SetQuickAllocEntryPointsAllocator(current_allocator_);
308 Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
309 }
310}
311
Mathieu Chartier590fee92013-09-13 13:46:47 -0700312bool Heap::IsCompilingBoot() const {
313 for (const auto& space : continuous_spaces_) {
314 if (space->IsImageSpace()) {
315 return false;
316 } else if (space->IsZygoteSpace()) {
317 return false;
318 }
319 }
320 return true;
321}
322
323bool Heap::HasImageSpace() const {
324 for (const auto& space : continuous_spaces_) {
325 if (space->IsImageSpace()) {
326 return true;
327 }
328 }
329 return false;
330}
331
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800332void Heap::IncrementDisableMovingGC(Thread* self) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700333 // Need to do this holding the lock to prevent races where the GC is about to run / running when
334 // we attempt to disable it.
335 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
336 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800337 ++disable_moving_gc_count_;
338 // TODO: Wait for compacting GC to complete if we ever have a concurrent compacting GC.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700339}
340
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800341void Heap::DecrementDisableMovingGC(Thread* self) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700342 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800343 CHECK_GE(disable_moving_gc_count_, 0U);
344 --disable_moving_gc_count_;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700345}
346
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800347void Heap::UpdateProcessState(ProcessState process_state) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800348 if (process_state_ != process_state) {
349 process_state_ = process_state;
350 if (process_state_ == kProcessStateJankPerceptible) {
351 TransitionCollector(post_zygote_collector_type_);
352 } else {
353 TransitionCollector(background_collector_type_);
354 }
355 } else {
356 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
357 }
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800358}
359
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700360void Heap::CreateThreadPool() {
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700361 const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
362 if (num_threads != 0) {
Mathieu Chartierbcd5e9d2013-11-13 14:33:28 -0800363 thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700364 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700365}
366
Mathieu Chartier590fee92013-09-13 13:46:47 -0700367void Heap::VisitObjects(ObjectVisitorCallback callback, void* arg) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700368 Thread* self = Thread::Current();
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800369 // GCs can move objects, so don't allow this.
370 const char* old_cause = self->StartAssertNoThreadSuspension("Visiting objects");
Mathieu Chartier590fee92013-09-13 13:46:47 -0700371 if (bump_pointer_space_ != nullptr) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800372 // Visit objects in bump pointer space.
373 bump_pointer_space_->Walk(callback, arg);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700374 }
375 // TODO: Switch to standard begin and end to use ranged a based loop.
376 for (mirror::Object** it = allocation_stack_->Begin(), **end = allocation_stack_->End();
377 it < end; ++it) {
378 mirror::Object* obj = *it;
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800379 callback(obj, arg);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700380 }
381 GetLiveBitmap()->Walk(callback, arg);
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800382 self->EndAssertNoThreadSuspension(old_cause);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700383}
384
385void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800386 space::ContinuousSpace* space1 = rosalloc_space_ != nullptr ? rosalloc_space_ : non_moving_space_;
387 space::ContinuousSpace* space2 = dlmalloc_space_ != nullptr ? dlmalloc_space_ : non_moving_space_;
388 // This is just logic to handle a case of either not having a rosalloc or dlmalloc space.
389 // TODO: Generalize this to n bitmaps?
390 if (space1 == nullptr) {
391 DCHECK(space2 != nullptr);
392 space1 = space2;
393 }
394 if (space2 == nullptr) {
395 DCHECK(space1 != nullptr);
396 space2 = space1;
397 }
398 MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
399 large_object_space_->GetLiveObjects(), stack);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700400}
401
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700402void Heap::DeleteThreadPool() {
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700403 thread_pool_.reset(nullptr);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700404}
405
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800406void Heap::AddSpace(space::Space* space, bool set_as_default) {
407 DCHECK(space != nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700408 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
409 if (space->IsContinuousSpace()) {
410 DCHECK(!space->IsDiscontinuousSpace());
411 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
412 // Continuous spaces don't necessarily have bitmaps.
413 accounting::SpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
414 accounting::SpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
415 if (live_bitmap != nullptr) {
416 DCHECK(mark_bitmap != nullptr);
417 live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
418 mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700419 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700420 continuous_spaces_.push_back(continuous_space);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800421 if (set_as_default) {
422 if (continuous_space->IsDlMallocSpace()) {
423 dlmalloc_space_ = continuous_space->AsDlMallocSpace();
424 } else if (continuous_space->IsRosAllocSpace()) {
425 rosalloc_space_ = continuous_space->AsRosAllocSpace();
426 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700427 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700428 // Ensure that spaces remain sorted in increasing order of start address.
429 std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
430 [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
431 return a->Begin() < b->Begin();
432 });
Mathieu Chartier590fee92013-09-13 13:46:47 -0700433 } else {
434 DCHECK(space->IsDiscontinuousSpace());
435 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
436 DCHECK(discontinuous_space->GetLiveObjects() != nullptr);
437 live_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetLiveObjects());
438 DCHECK(discontinuous_space->GetMarkObjects() != nullptr);
439 mark_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetMarkObjects());
440 discontinuous_spaces_.push_back(discontinuous_space);
441 }
442 if (space->IsAllocSpace()) {
443 alloc_spaces_.push_back(space->AsAllocSpace());
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700444 }
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800445}
446
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800447void Heap::RemoveSpace(space::Space* space) {
448 DCHECK(space != nullptr);
449 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
450 if (space->IsContinuousSpace()) {
451 DCHECK(!space->IsDiscontinuousSpace());
452 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
453 // Continuous spaces don't necessarily have bitmaps.
454 accounting::SpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
455 accounting::SpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
456 if (live_bitmap != nullptr) {
457 DCHECK(mark_bitmap != nullptr);
458 live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
459 mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
460 }
461 auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
462 DCHECK(it != continuous_spaces_.end());
463 continuous_spaces_.erase(it);
464 if (continuous_space == dlmalloc_space_) {
465 dlmalloc_space_ = nullptr;
466 } else if (continuous_space == rosalloc_space_) {
467 rosalloc_space_ = nullptr;
468 }
Mathieu Chartierfc5b5282014-01-09 16:15:36 -0800469 if (continuous_space == main_space_) {
470 main_space_ = nullptr;
471 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800472 } else {
473 DCHECK(space->IsDiscontinuousSpace());
474 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
475 DCHECK(discontinuous_space->GetLiveObjects() != nullptr);
476 live_bitmap_->RemoveDiscontinuousObjectSet(discontinuous_space->GetLiveObjects());
477 DCHECK(discontinuous_space->GetMarkObjects() != nullptr);
478 mark_bitmap_->RemoveDiscontinuousObjectSet(discontinuous_space->GetMarkObjects());
479 auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
480 discontinuous_space);
481 DCHECK(it != discontinuous_spaces_.end());
482 discontinuous_spaces_.erase(it);
483 }
484 if (space->IsAllocSpace()) {
485 auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
486 DCHECK(it != alloc_spaces_.end());
487 alloc_spaces_.erase(it);
488 }
489}
490
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700491void Heap::RegisterGCAllocation(size_t bytes) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700492 if (this != nullptr) {
Ian Rogersb122a4b2013-11-19 18:00:50 -0800493 gc_memory_overhead_.FetchAndAdd(bytes);
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700494 }
495}
496
497void Heap::RegisterGCDeAllocation(size_t bytes) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700498 if (this != nullptr) {
Ian Rogersb122a4b2013-11-19 18:00:50 -0800499 gc_memory_overhead_.FetchAndSub(bytes);
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700500 }
501}
502
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700503void Heap::DumpGcPerformanceInfo(std::ostream& os) {
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700504 // Dump cumulative timings.
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700505 os << "Dumping cumulative Gc timings\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700506 uint64_t total_duration = 0;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800507
508 // Dump cumulative loggers for each GC type.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800509 uint64_t total_paused_time = 0;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700510 for (const auto& collector : garbage_collectors_) {
Sameer Abu Asala8439542013-02-14 16:06:42 -0800511 CumulativeLogger& logger = collector->GetCumulativeTimings();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800512 if (logger.GetTotalNs() != 0) {
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700513 os << Dumpable<CumulativeLogger>(logger);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800514 const uint64_t total_ns = logger.GetTotalNs();
Mathieu Chartier02e25112013-08-14 16:14:24 -0700515 const uint64_t total_pause_ns = collector->GetTotalPausedTimeNs();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800516 double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
517 const uint64_t freed_bytes = collector->GetTotalFreedBytes();
518 const uint64_t freed_objects = collector->GetTotalFreedObjects();
Mathieu Chartierb2f99362013-11-20 17:26:00 -0800519 Histogram<uint64_t>::CumulativeData cumulative_data;
520 collector->GetPauseHistogram().CreateHistogram(&cumulative_data);
521 collector->GetPauseHistogram().PrintConfidenceIntervals(os, 0.99, cumulative_data);
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700522 os << collector->GetName() << " total time: " << PrettyDuration(total_ns) << "\n"
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700523 << collector->GetName() << " freed: " << freed_objects
524 << " objects with total size " << PrettySize(freed_bytes) << "\n"
525 << collector->GetName() << " throughput: " << freed_objects / seconds << "/s / "
526 << PrettySize(freed_bytes / seconds) << "/s\n";
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800527 total_duration += total_ns;
528 total_paused_time += total_pause_ns;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700529 }
530 }
531 uint64_t allocation_time = static_cast<uint64_t>(total_allocation_time_) * kTimeAdjust;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700532 if (total_duration != 0) {
Brian Carlstrom2d888622013-07-18 17:02:00 -0700533 const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700534 os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
535 os << "Mean GC size throughput: "
Ian Rogers1d54e732013-05-02 21:10:01 -0700536 << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700537 os << "Mean GC object throughput: "
Ian Rogers1d54e732013-05-02 21:10:01 -0700538 << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700539 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800540 size_t total_objects_allocated = GetObjectsAllocatedEver();
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700541 os << "Total number of allocations: " << total_objects_allocated << "\n";
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800542 size_t total_bytes_allocated = GetBytesAllocatedEver();
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700543 os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n";
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700544 if (kMeasureAllocationTime) {
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700545 os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n";
546 os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated)
547 << "\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700548 }
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700549 os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
550 os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700551 os << "Approximate GC data structures memory overhead: " << gc_memory_overhead_;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700552}
553
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800554Heap::~Heap() {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700555 VLOG(heap) << "Starting ~Heap()";
Mathieu Chartier590fee92013-09-13 13:46:47 -0700556 STLDeleteElements(&garbage_collectors_);
557 // If we don't reset then the mark stack complains in its destructor.
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700558 allocation_stack_->Reset();
559 live_stack_->Reset();
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700560 STLDeleteValues(&mod_union_tables_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700561 STLDeleteElements(&continuous_spaces_);
562 STLDeleteElements(&discontinuous_spaces_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700563 delete gc_complete_lock_;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700564 VLOG(heap) << "Finished ~Heap()";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700565}
566
Ian Rogers1d54e732013-05-02 21:10:01 -0700567space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
568 bool fail_ok) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700569 for (const auto& space : continuous_spaces_) {
570 if (space->Contains(obj)) {
571 return space;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700572 }
573 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700574 if (!fail_ok) {
575 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
576 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700577 return NULL;
578}
579
Ian Rogers1d54e732013-05-02 21:10:01 -0700580space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
581 bool fail_ok) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700582 for (const auto& space : discontinuous_spaces_) {
583 if (space->Contains(obj)) {
584 return space;
Ian Rogers1d54e732013-05-02 21:10:01 -0700585 }
586 }
587 if (!fail_ok) {
588 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
589 }
590 return NULL;
591}
592
593space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
594 space::Space* result = FindContinuousSpaceFromObject(obj, true);
595 if (result != NULL) {
596 return result;
597 }
598 return FindDiscontinuousSpaceFromObject(obj, true);
599}
600
Mathieu Chartier39e32612013-11-12 16:28:05 -0800601struct SoftReferenceArgs {
602 RootVisitor* is_marked_callback_;
603 RootVisitor* recursive_mark_callback_;
604 void* arg_;
605};
606
607mirror::Object* Heap::PreserveSoftReferenceCallback(mirror::Object* obj, void* arg) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800608 SoftReferenceArgs* args = reinterpret_cast<SoftReferenceArgs*>(arg);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800609 // TODO: Not preserve all soft references.
610 return args->recursive_mark_callback_(obj, args->arg_);
611}
612
613// Process reference class instances and schedule finalizations.
614void Heap::ProcessReferences(TimingLogger& timings, bool clear_soft,
615 RootVisitor* is_marked_callback,
616 RootVisitor* recursive_mark_object_callback, void* arg) {
617 // Unless we are in the zygote or required to clear soft references with white references,
618 // preserve some white referents.
619 if (!clear_soft && !Runtime::Current()->IsZygote()) {
620 SoftReferenceArgs soft_reference_args;
621 soft_reference_args.is_marked_callback_ = is_marked_callback;
622 soft_reference_args.recursive_mark_callback_ = recursive_mark_object_callback;
623 soft_reference_args.arg_ = arg;
624 soft_reference_queue_.PreserveSomeSoftReferences(&PreserveSoftReferenceCallback,
625 &soft_reference_args);
626 }
627 timings.StartSplit("ProcessReferences");
628 // Clear all remaining soft and weak references with white referents.
629 soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
630 weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
631 timings.EndSplit();
632 // Preserve all white objects with finalize methods and schedule them for finalization.
633 timings.StartSplit("EnqueueFinalizerReferences");
634 finalizer_reference_queue_.EnqueueFinalizerReferences(cleared_references_, is_marked_callback,
635 recursive_mark_object_callback, arg);
636 timings.EndSplit();
637 timings.StartSplit("ProcessReferences");
638 // Clear all f-reachable soft and weak references with white referents.
639 soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
640 weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
641 // Clear all phantom references with white referents.
642 phantom_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
643 // At this point all reference queues other than the cleared references should be empty.
644 DCHECK(soft_reference_queue_.IsEmpty());
645 DCHECK(weak_reference_queue_.IsEmpty());
646 DCHECK(finalizer_reference_queue_.IsEmpty());
647 DCHECK(phantom_reference_queue_.IsEmpty());
648 timings.EndSplit();
649}
650
651bool Heap::IsEnqueued(mirror::Object* ref) const {
652 // Since the references are stored as cyclic lists it means that once enqueued, the pending next
653 // will always be non-null.
654 return ref->GetFieldObject<mirror::Object*>(GetReferencePendingNextOffset(), false) != nullptr;
655}
656
657bool Heap::IsEnqueuable(const mirror::Object* ref) const {
658 DCHECK(ref != nullptr);
659 const mirror::Object* queue =
660 ref->GetFieldObject<mirror::Object*>(GetReferenceQueueOffset(), false);
661 const mirror::Object* queue_next =
662 ref->GetFieldObject<mirror::Object*>(GetReferenceQueueNextOffset(), false);
663 return queue != nullptr && queue_next == nullptr;
664}
665
666// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
667// marked, put it on the appropriate list in the heap for later processing.
668void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj,
669 RootVisitor mark_visitor, void* arg) {
670 DCHECK(klass != nullptr);
671 DCHECK(klass->IsReferenceClass());
672 DCHECK(obj != nullptr);
673 mirror::Object* referent = GetReferenceReferent(obj);
674 if (referent != nullptr) {
675 mirror::Object* forward_address = mark_visitor(referent, arg);
676 // Null means that the object is not currently marked.
677 if (forward_address == nullptr) {
678 Thread* self = Thread::Current();
679 // TODO: Remove these locks, and use atomic stacks for storing references?
680 // We need to check that the references haven't already been enqueued since we can end up
681 // scanning the same reference multiple times due to dirty cards.
682 if (klass->IsSoftReferenceClass()) {
683 soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
684 } else if (klass->IsWeakReferenceClass()) {
685 weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
686 } else if (klass->IsFinalizerReferenceClass()) {
687 finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
688 } else if (klass->IsPhantomReferenceClass()) {
689 phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
690 } else {
691 LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
692 << klass->GetAccessFlags();
693 }
694 } else if (referent != forward_address) {
695 // Referent is already marked and we need to update it.
696 SetReferenceReferent(obj, forward_address);
697 }
698 }
699}
700
Ian Rogers1d54e732013-05-02 21:10:01 -0700701space::ImageSpace* Heap::GetImageSpace() const {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700702 for (const auto& space : continuous_spaces_) {
703 if (space->IsImageSpace()) {
704 return space->AsImageSpace();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700705 }
706 }
707 return NULL;
708}
709
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700710static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700711 size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start);
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700712 if (used_bytes < chunk_size) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700713 size_t chunk_free_bytes = chunk_size - used_bytes;
714 size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg);
715 max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes);
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700716 }
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700717}
718
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700719void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, bool large_object_allocation) {
720 std::ostringstream oss;
721 int64_t total_bytes_free = GetFreeMemory();
722 oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
723 << " free bytes";
724 // If the allocation failed due to fragmentation, print out the largest continuous allocation.
725 if (!large_object_allocation && total_bytes_free >= byte_count) {
726 size_t max_contiguous_allocation = 0;
727 for (const auto& space : continuous_spaces_) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700728 if (space->IsMallocSpace()) {
729 // To allow the Walk/InspectAll() to exclusively-lock the mutator
730 // lock, temporarily release the shared access to the mutator
731 // lock here by transitioning to the suspended state.
732 Locks::mutator_lock_->AssertSharedHeld(self);
733 self->TransitionFromRunnableToSuspended(kSuspended);
734 space->AsMallocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation);
735 self->TransitionFromSuspendedToRunnable();
736 Locks::mutator_lock_->AssertSharedHeld(self);
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700737 }
738 }
739 oss << "; failed due to fragmentation (largest possible contiguous allocation "
740 << max_contiguous_allocation << " bytes)";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700741 }
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700742 self->ThrowOutOfMemoryError(oss.str().c_str());
743}
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700744
Mathieu Chartier590fee92013-09-13 13:46:47 -0700745void Heap::Trim() {
746 uint64_t start_ns = NanoTime();
747 // Trim the managed spaces.
748 uint64_t total_alloc_space_allocated = 0;
749 uint64_t total_alloc_space_size = 0;
750 uint64_t managed_reclaimed = 0;
751 for (const auto& space : continuous_spaces_) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800752 if (space->IsMallocSpace()) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700753 gc::space::MallocSpace* alloc_space = space->AsMallocSpace();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700754 total_alloc_space_size += alloc_space->Size();
755 managed_reclaimed += alloc_space->Trim();
756 }
757 }
758 total_alloc_space_allocated = GetBytesAllocated() - large_object_space_->GetBytesAllocated() -
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800759 bump_pointer_space_->Size();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700760 const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
761 static_cast<float>(total_alloc_space_size);
762 uint64_t gc_heap_end_ns = NanoTime();
763 // Trim the native heap.
764 dlmalloc_trim(0);
765 size_t native_reclaimed = 0;
766 dlmalloc_inspect_all(DlmallocMadviseCallback, &native_reclaimed);
767 uint64_t end_ns = NanoTime();
768 VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
769 << ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration="
770 << PrettyDuration(end_ns - gc_heap_end_ns) << ", advised=" << PrettySize(native_reclaimed)
771 << ") heaps. Managed heap utilization of " << static_cast<int>(100 * managed_utilization)
772 << "%.";
773}
774
775bool Heap::IsValidObjectAddress(const mirror::Object* obj) const {
776 // Note: we deliberately don't take the lock here, and mustn't test anything that would require
777 // taking the lock.
778 if (obj == nullptr) {
Elliott Hughes88c5c352012-03-15 18:49:48 -0700779 return true;
780 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700781 return IsAligned<kObjectAlignment>(obj) && IsHeapAddress(obj);
782}
783
784bool Heap::IsHeapAddress(const mirror::Object* obj) const {
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800785 if (kMovingCollector && bump_pointer_space_ && bump_pointer_space_->HasAddress(obj)) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700786 return true;
Elliott Hughesa2501992011-08-26 19:39:54 -0700787 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700788 // TODO: This probably doesn't work for large objects.
789 return FindSpaceFromObject(obj, true) != nullptr;
Elliott Hughesa2501992011-08-26 19:39:54 -0700790}
791
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700792bool Heap::IsLiveObjectLocked(const mirror::Object* obj, bool search_allocation_stack,
793 bool search_live_stack, bool sorted) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800794 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
795 return false;
796 }
797 if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj)) {
798 mirror::Class* klass = obj->GetClass();
799 if (obj == klass) {
Mathieu Chartier9be9a7a2014-01-24 14:07:33 -0800800 // This case happens for java.lang.Class.
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800801 return true;
802 }
803 return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
804 } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700805 return false;
806 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700807 space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
808 space::DiscontinuousSpace* d_space = NULL;
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800809 if (c_space != nullptr) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700810 if (c_space->GetLiveBitmap()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700811 return true;
812 }
813 } else {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700814 d_space = FindDiscontinuousSpaceFromObject(obj, true);
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800815 if (d_space != nullptr) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700816 if (d_space->GetLiveObjects()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700817 return true;
818 }
819 }
820 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700821 // This is covering the allocation/live stack swapping that is done without mutators suspended.
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700822 for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
823 if (i > 0) {
824 NanoSleep(MsToNs(10));
Ian Rogers1d54e732013-05-02 21:10:01 -0700825 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700826 if (search_allocation_stack) {
827 if (sorted) {
828 if (allocation_stack_->ContainsSorted(const_cast<mirror::Object*>(obj))) {
829 return true;
830 }
831 } else if (allocation_stack_->Contains(const_cast<mirror::Object*>(obj))) {
832 return true;
833 }
834 }
835
836 if (search_live_stack) {
837 if (sorted) {
838 if (live_stack_->ContainsSorted(const_cast<mirror::Object*>(obj))) {
839 return true;
840 }
841 } else if (live_stack_->Contains(const_cast<mirror::Object*>(obj))) {
842 return true;
843 }
844 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700845 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700846 // We need to check the bitmaps again since there is a race where we mark something as live and
847 // then clear the stack containing it.
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800848 if (c_space != nullptr) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700849 if (c_space->GetLiveBitmap()->Test(obj)) {
850 return true;
851 }
852 } else {
853 d_space = FindDiscontinuousSpaceFromObject(obj, true);
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800854 if (d_space != nullptr && d_space->GetLiveObjects()->Test(obj)) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700855 return true;
856 }
857 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700858 return false;
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700859}
860
Ian Rogers04d7aa92013-03-16 14:29:17 -0700861void Heap::VerifyObjectImpl(const mirror::Object* obj) {
862 if (Thread::Current() == NULL ||
jeffhao25045522012-03-13 19:34:37 -0700863 Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700864 return;
865 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700866 VerifyObjectBody(obj);
Elliott Hughes92b3b562011-09-08 16:32:26 -0700867}
Elliott Hughes92b3b562011-09-08 16:32:26 -0700868
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800869bool Heap::VerifyClassClass(const mirror::Class* c) const {
870 // Note: we don't use the accessors here as they have internal sanity checks that we don't want
871 // to run
872 const byte* raw_addr =
873 reinterpret_cast<const byte*>(c) + mirror::Object::ClassOffset().Int32Value();
874 const mirror::Class* c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
875 raw_addr = reinterpret_cast<const byte*>(c_c) + mirror::Object::ClassOffset().Int32Value();
876 const mirror::Class* c_c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
877 return c_c == c_c_c;
878}
879
Mathieu Chartier590fee92013-09-13 13:46:47 -0700880void Heap::DumpSpaces(std::ostream& stream) {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700881 for (const auto& space : continuous_spaces_) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700882 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
883 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700884 stream << space << " " << *space << "\n";
885 if (live_bitmap != nullptr) {
886 stream << live_bitmap << " " << *live_bitmap << "\n";
887 }
888 if (mark_bitmap != nullptr) {
889 stream << mark_bitmap << " " << *mark_bitmap << "\n";
890 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700891 }
Mathieu Chartier02e25112013-08-14 16:14:24 -0700892 for (const auto& space : discontinuous_spaces_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700893 stream << space << " " << *space << "\n";
Mathieu Chartier128c52c2012-10-16 14:12:41 -0700894 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700895}
896
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800897void Heap::VerifyObjectBody(const mirror::Object* obj) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700898 CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
899 // Ignore early dawn of the universe verifications.
Ian Rogersb122a4b2013-11-19 18:00:50 -0800900 if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.Load()) < 10 * KB)) {
Ian Rogers62d6c772013-02-27 08:32:07 -0800901 return;
902 }
903 const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
904 mirror::Object::ClassOffset().Int32Value();
905 const mirror::Class* c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
906 if (UNLIKELY(c == NULL)) {
907 LOG(FATAL) << "Null class in object: " << obj;
908 } else if (UNLIKELY(!IsAligned<kObjectAlignment>(c))) {
909 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
910 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800911 CHECK(VerifyClassClass(c));
Mathieu Chartier0325e622012-09-05 14:22:51 -0700912
Mathieu Chartier590fee92013-09-13 13:46:47 -0700913 if (verify_object_mode_ > kVerifyAllFast) {
Ian Rogers62d6c772013-02-27 08:32:07 -0800914 // TODO: the bitmap tests below are racy if VerifyObjectBody is called without the
915 // heap_bitmap_lock_.
Ian Rogers1d54e732013-05-02 21:10:01 -0700916 if (!IsLiveObjectLocked(obj)) {
917 DumpSpaces();
918 LOG(FATAL) << "Object is dead: " << obj;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700919 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700920 if (!IsLiveObjectLocked(c)) {
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700921 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
922 }
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700923 }
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700924}
925
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800926void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700927 DCHECK(obj != NULL);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700928 reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700929}
930
931void Heap::VerifyHeap() {
Ian Rogers50b35e22012-10-04 10:09:15 -0700932 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700933 GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700934}
935
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800936void Heap::RecordFree(int64_t freed_objects, int64_t freed_bytes) {
937 DCHECK_LE(freed_bytes, num_bytes_allocated_.Load());
Ian Rogersb122a4b2013-11-19 18:00:50 -0800938 num_bytes_allocated_.FetchAndSub(freed_bytes);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700939 if (Runtime::Current()->HasStatsEnabled()) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700940 RuntimeStats* thread_stats = Thread::Current()->GetStats();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700941 thread_stats->freed_objects += freed_objects;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700942 thread_stats->freed_bytes += freed_bytes;
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700943 // TODO: Do this concurrently.
944 RuntimeStats* global_stats = Runtime::Current()->GetStats();
945 global_stats->freed_objects += freed_objects;
946 global_stats->freed_bytes += freed_bytes;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700947 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700948}
949
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800950mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator,
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800951 size_t alloc_size, size_t* bytes_allocated,
952 mirror::Class** klass) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800953 mirror::Object* ptr = nullptr;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800954 bool was_default_allocator = allocator == GetCurrentAllocator();
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800955 DCHECK(klass != nullptr);
956 SirtRef<mirror::Class> sirt_klass(self, *klass);
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700957 // The allocation failed. If the GC is running, block until it completes, and then retry the
958 // allocation.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700959 collector::GcType last_gc = WaitForGcToComplete(self);
Ian Rogers1d54e732013-05-02 21:10:01 -0700960 if (last_gc != collector::kGcTypeNone) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800961 // If we were the default allocator but the allocator changed while we were suspended,
962 // abort the allocation.
963 if (was_default_allocator && allocator != GetCurrentAllocator()) {
964 *klass = sirt_klass.get();
965 return nullptr;
966 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700967 // A GC was in progress and we blocked, retry allocation now that memory has been freed.
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800968 ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700969 }
970
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700971 // Loop through our different Gc types and try to Gc until we get enough free memory.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800972 for (collector::GcType gc_type : gc_plan_) {
973 if (ptr != nullptr) {
974 break;
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700975 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800976 // Attempt to run the collector, if we succeed, re-try the allocation.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800977 bool gc_ran =
978 CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
979 if (was_default_allocator && allocator != GetCurrentAllocator()) {
980 *klass = sirt_klass.get();
981 return nullptr;
982 }
983 if (gc_ran) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700984 // Did we free sufficient memory for the allocation to succeed?
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800985 ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated);
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700986 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700987 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700988 // Allocations have failed after GCs; this is an exceptional state.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800989 if (ptr == nullptr) {
990 // Try harder, growing the heap if necessary.
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800991 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700992 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800993 if (ptr == nullptr) {
994 // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
995 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
996 // VM spec requires that all SoftReferences have been collected and cleared before throwing
997 // OOME.
998 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
999 << " allocation";
1000 // TODO: Run finalization, but this may cause more allocations to occur.
1001 // We don't need a WaitForGcToComplete here either.
1002 DCHECK(!gc_plan_.empty());
1003 CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001004 if (was_default_allocator && allocator != GetCurrentAllocator()) {
1005 *klass = sirt_klass.get();
1006 return nullptr;
1007 }
Mathieu Chartierc528dba2013-11-26 12:00:11 -08001008 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001009 if (ptr == nullptr) {
1010 ThrowOutOfMemoryError(self, alloc_size, false);
1011 }
1012 }
Mathieu Chartierc528dba2013-11-26 12:00:11 -08001013 *klass = sirt_klass.get();
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001014 return ptr;
Carl Shapiro69759ea2011-07-21 18:13:35 -07001015}
1016
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001017void Heap::SetTargetHeapUtilization(float target) {
1018 DCHECK_GT(target, 0.0f); // asserted in Java code
1019 DCHECK_LT(target, 1.0f);
1020 target_utilization_ = target;
1021}
1022
Ian Rogers1d54e732013-05-02 21:10:01 -07001023size_t Heap::GetObjectsAllocated() const {
1024 size_t total = 0;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001025 for (space::AllocSpace* space : alloc_spaces_) {
1026 total += space->GetObjectsAllocated();
Ian Rogers1d54e732013-05-02 21:10:01 -07001027 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001028 return total;
1029}
1030
Ian Rogers1d54e732013-05-02 21:10:01 -07001031size_t Heap::GetObjectsAllocatedEver() const {
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001032 return GetObjectsFreedEver() + GetObjectsAllocated();
Ian Rogers1d54e732013-05-02 21:10:01 -07001033}
1034
1035size_t Heap::GetBytesAllocatedEver() const {
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001036 return GetBytesFreedEver() + GetBytesAllocated();
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001037}
1038
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001039class InstanceCounter {
1040 public:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001041 InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts)
Ian Rogersb726dcb2012-09-05 08:57:23 -07001042 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001043 : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001044 }
1045
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001046 void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001047 for (size_t i = 0; i < classes_.size(); ++i) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001048 const mirror::Class* instance_class = o->GetClass();
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001049 if (use_is_assignable_from_) {
1050 if (instance_class != NULL && classes_[i]->IsAssignableFrom(instance_class)) {
1051 ++counts_[i];
1052 }
1053 } else {
1054 if (instance_class == classes_[i]) {
1055 ++counts_[i];
1056 }
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001057 }
1058 }
1059 }
1060
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001061 private:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001062 const std::vector<mirror::Class*>& classes_;
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001063 bool use_is_assignable_from_;
1064 uint64_t* const counts_;
1065
1066 DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001067};
1068
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001069void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001070 uint64_t* counts) {
1071 // We only want reachable instances, so do a GC. This also ensures that the alloc stack
1072 // is empty, so the live bitmap is the only place we need to look.
1073 Thread* self = Thread::Current();
1074 self->TransitionFromRunnableToSuspended(kNative);
1075 CollectGarbage(false);
1076 self->TransitionFromSuspendedToRunnable();
1077
1078 InstanceCounter counter(classes, use_is_assignable_from, counts);
1079 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001080 GetLiveBitmap()->Visit(counter);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001081}
1082
Elliott Hughes3b78c942013-01-15 17:35:41 -08001083class InstanceCollector {
1084 public:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001085 InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
Elliott Hughes3b78c942013-01-15 17:35:41 -08001086 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1087 : class_(c), max_count_(max_count), instances_(instances) {
1088 }
1089
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001090 void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1091 const mirror::Class* instance_class = o->GetClass();
Elliott Hughes3b78c942013-01-15 17:35:41 -08001092 if (instance_class == class_) {
1093 if (max_count_ == 0 || instances_.size() < max_count_) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001094 instances_.push_back(const_cast<mirror::Object*>(o));
Elliott Hughes3b78c942013-01-15 17:35:41 -08001095 }
1096 }
1097 }
1098
1099 private:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001100 mirror::Class* class_;
Elliott Hughes3b78c942013-01-15 17:35:41 -08001101 uint32_t max_count_;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001102 std::vector<mirror::Object*>& instances_;
Elliott Hughes3b78c942013-01-15 17:35:41 -08001103
1104 DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
1105};
1106
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001107void Heap::GetInstances(mirror::Class* c, int32_t max_count,
1108 std::vector<mirror::Object*>& instances) {
Elliott Hughes3b78c942013-01-15 17:35:41 -08001109 // We only want reachable instances, so do a GC. This also ensures that the alloc stack
1110 // is empty, so the live bitmap is the only place we need to look.
1111 Thread* self = Thread::Current();
1112 self->TransitionFromRunnableToSuspended(kNative);
1113 CollectGarbage(false);
1114 self->TransitionFromSuspendedToRunnable();
1115
1116 InstanceCollector collector(c, max_count, instances);
1117 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1118 GetLiveBitmap()->Visit(collector);
1119}
1120
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001121class ReferringObjectsFinder {
1122 public:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001123 ReferringObjectsFinder(mirror::Object* object, int32_t max_count,
1124 std::vector<mirror::Object*>& referring_objects)
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001125 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1126 : object_(object), max_count_(max_count), referring_objects_(referring_objects) {
1127 }
1128
1129 // For bitmap Visit.
1130 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1131 // annotalysis on visitors.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001132 void operator()(const mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
1133 collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(o), *this, true);
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001134 }
1135
1136 // For MarkSweep::VisitObjectReferences.
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001137 void operator()(mirror::Object* referrer, mirror::Object* object,
Brian Carlstromdf629502013-07-17 22:39:56 -07001138 const MemberOffset&, bool) const {
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001139 if (object == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001140 referring_objects_.push_back(referrer);
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001141 }
1142 }
1143
1144 private:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001145 mirror::Object* object_;
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001146 uint32_t max_count_;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001147 std::vector<mirror::Object*>& referring_objects_;
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001148
1149 DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
1150};
1151
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001152void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
1153 std::vector<mirror::Object*>& referring_objects) {
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001154 // We only want reachable instances, so do a GC. This also ensures that the alloc stack
1155 // is empty, so the live bitmap is the only place we need to look.
1156 Thread* self = Thread::Current();
1157 self->TransitionFromRunnableToSuspended(kNative);
1158 CollectGarbage(false);
1159 self->TransitionFromSuspendedToRunnable();
1160
1161 ReferringObjectsFinder finder(o, max_count, referring_objects);
1162 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1163 GetLiveBitmap()->Visit(finder);
1164}
1165
Ian Rogers30fab402012-01-23 15:43:46 -08001166void Heap::CollectGarbage(bool clear_soft_references) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001167 // Even if we waited for a GC we still need to do another GC since weaks allocated during the
1168 // last GC will not have necessarily been cleared.
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001169 CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001170}
1171
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001172void Heap::TransitionCollector(CollectorType collector_type) {
1173 if (collector_type == collector_type_) {
1174 return;
1175 }
1176 uint64_t start_time = NanoTime();
1177 int32_t before_size = GetTotalMemory();
1178 int32_t before_allocated = num_bytes_allocated_.Load();
1179 ThreadList* tl = Runtime::Current()->GetThreadList();
1180 Thread* self = Thread::Current();
1181 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1182 Locks::mutator_lock_->AssertNotHeld(self);
Mathieu Chartier1d27b342014-01-28 12:51:09 -08001183 // Busy wait until we can GC (StartGC can fail if we have a non-zero
1184 // compacting_gc_disable_count_, this should rarely occurs).
1185 bool copying_transition =
1186 IsCompactingGC(background_collector_type_) || IsCompactingGC(post_zygote_collector_type_);
1187 while (!StartGC(self, copying_transition)) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001188 usleep(100);
1189 }
1190 tl->SuspendAll();
1191 switch (collector_type) {
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08001192 case kCollectorTypeSS:
Mathieu Chartier1d27b342014-01-28 12:51:09 -08001193 // Fall-through.
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08001194 case kCollectorTypeGSS: {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001195 mprotect(temp_space_->Begin(), temp_space_->Capacity(), PROT_READ | PROT_WRITE);
Mathieu Chartierfc5b5282014-01-09 16:15:36 -08001196 CHECK(main_space_ != nullptr);
1197 Compact(temp_space_, main_space_);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001198 DCHECK(allocator_mem_map_.get() == nullptr);
Mathieu Chartierfc5b5282014-01-09 16:15:36 -08001199 allocator_mem_map_.reset(main_space_->ReleaseMemMap());
1200 madvise(main_space_->Begin(), main_space_->Size(), MADV_DONTNEED);
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001201 // RemoveSpace does not delete the removed space.
1202 space::Space* old_space = main_space_;
1203 RemoveSpace(old_space);
1204 delete old_space;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001205 break;
1206 }
1207 case kCollectorTypeMS:
1208 // Fall through.
1209 case kCollectorTypeCMS: {
Mathieu Chartier1d27b342014-01-28 12:51:09 -08001210 if (IsCompactingGC(collector_type_)) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001211 // TODO: Use mem-map from temp space?
1212 MemMap* mem_map = allocator_mem_map_.release();
1213 CHECK(mem_map != nullptr);
1214 size_t initial_size = kDefaultInitialSize;
1215 mprotect(mem_map->Begin(), initial_size, PROT_READ | PROT_WRITE);
Mathieu Chartierfc5b5282014-01-09 16:15:36 -08001216 CHECK(main_space_ == nullptr);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001217 if (kUseRosAlloc) {
Mathieu Chartierfc5b5282014-01-09 16:15:36 -08001218 main_space_ =
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001219 space::RosAllocSpace::CreateFromMemMap(mem_map, "alloc space", kPageSize,
1220 initial_size, mem_map->Size(),
1221 mem_map->Size(), low_memory_mode_);
1222 } else {
Mathieu Chartierfc5b5282014-01-09 16:15:36 -08001223 main_space_ =
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001224 space::DlMallocSpace::CreateFromMemMap(mem_map, "alloc space", kPageSize,
1225 initial_size, mem_map->Size(),
1226 mem_map->Size());
1227 }
Mathieu Chartierfc5b5282014-01-09 16:15:36 -08001228 main_space_->SetFootprintLimit(main_space_->Capacity());
1229 AddSpace(main_space_);
1230 Compact(main_space_, bump_pointer_space_);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001231 }
1232 break;
1233 }
1234 default: {
1235 LOG(FATAL) << "Attempted to transition to invalid collector type";
1236 break;
1237 }
1238 }
1239 ChangeCollector(collector_type);
1240 tl->ResumeAll();
1241 // Can't call into java code with all threads suspended.
1242 EnqueueClearedReferences();
1243 uint64_t duration = NanoTime() - start_time;
1244 GrowForUtilization(collector::kGcTypeFull, duration);
1245 FinishGC(self, collector::kGcTypeFull);
1246 int32_t after_size = GetTotalMemory();
1247 int32_t delta_size = before_size - after_size;
1248 int32_t after_allocated = num_bytes_allocated_.Load();
1249 int32_t delta_allocated = before_allocated - after_allocated;
1250 const std::string saved_bytes_str =
1251 delta_size < 0 ? "-" + PrettySize(-delta_size) : PrettySize(delta_size);
1252 LOG(INFO) << "Heap transition to " << process_state_ << " took "
1253 << PrettyDuration(duration) << " " << PrettySize(before_size) << "->"
1254 << PrettySize(after_size) << " from " << PrettySize(delta_allocated) << " to "
1255 << PrettySize(delta_size) << " saved";
1256}
1257
Mathieu Chartier0de9f732013-11-22 17:58:48 -08001258void Heap::ChangeCollector(CollectorType collector_type) {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001259 // TODO: Only do this with all mutators suspended to avoid races.
1260 if (collector_type != collector_type_) {
1261 collector_type_ = collector_type;
1262 gc_plan_.clear();
1263 switch (collector_type_) {
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08001264 case kCollectorTypeSS:
Mathieu Chartier1d27b342014-01-28 12:51:09 -08001265 // Fall-through.
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08001266 case kCollectorTypeGSS: {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001267 concurrent_gc_ = false;
1268 gc_plan_.push_back(collector::kGcTypeFull);
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001269 if (use_tlab_) {
1270 ChangeAllocator(kAllocatorTypeTLAB);
1271 } else {
1272 ChangeAllocator(kAllocatorTypeBumpPointer);
1273 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001274 break;
1275 }
1276 case kCollectorTypeMS: {
1277 concurrent_gc_ = false;
1278 gc_plan_.push_back(collector::kGcTypeSticky);
1279 gc_plan_.push_back(collector::kGcTypePartial);
1280 gc_plan_.push_back(collector::kGcTypeFull);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001281 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001282 break;
1283 }
1284 case kCollectorTypeCMS: {
1285 concurrent_gc_ = true;
1286 gc_plan_.push_back(collector::kGcTypeSticky);
1287 gc_plan_.push_back(collector::kGcTypePartial);
1288 gc_plan_.push_back(collector::kGcTypeFull);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001289 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001290 break;
1291 }
1292 default: {
1293 LOG(FATAL) << "Unimplemented";
1294 }
Mathieu Chartier0de9f732013-11-22 17:58:48 -08001295 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001296 if (concurrent_gc_) {
1297 concurrent_start_bytes_ =
1298 std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
1299 } else {
1300 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Mathieu Chartier0de9f732013-11-22 17:58:48 -08001301 }
1302 }
1303}
1304
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001305// Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
1306class ZygoteCompactingCollector : public collector::SemiSpace {
1307 public:
1308 explicit ZygoteCompactingCollector(gc::Heap* heap) : SemiSpace(heap, "zygote collector") {
1309 }
1310
1311 void BuildBins(space::ContinuousSpace* space) {
1312 bin_live_bitmap_ = space->GetLiveBitmap();
1313 bin_mark_bitmap_ = space->GetMarkBitmap();
1314 BinContext context;
1315 context.prev_ = reinterpret_cast<uintptr_t>(space->Begin());
1316 context.collector_ = this;
1317 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1318 // Note: This requires traversing the space in increasing order of object addresses.
1319 bin_live_bitmap_->Walk(Callback, reinterpret_cast<void*>(&context));
1320 // Add the last bin which spans after the last object to the end of the space.
1321 AddBin(reinterpret_cast<uintptr_t>(space->End()) - context.prev_, context.prev_);
1322 }
1323
1324 private:
1325 struct BinContext {
1326 uintptr_t prev_; // The end of the previous object.
1327 ZygoteCompactingCollector* collector_;
1328 };
1329 // Maps from bin sizes to locations.
1330 std::multimap<size_t, uintptr_t> bins_;
1331 // Live bitmap of the space which contains the bins.
1332 accounting::SpaceBitmap* bin_live_bitmap_;
1333 // Mark bitmap of the space which contains the bins.
1334 accounting::SpaceBitmap* bin_mark_bitmap_;
1335
1336 static void Callback(mirror::Object* obj, void* arg)
1337 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1338 DCHECK(arg != nullptr);
1339 BinContext* context = reinterpret_cast<BinContext*>(arg);
1340 ZygoteCompactingCollector* collector = context->collector_;
1341 uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
1342 size_t bin_size = object_addr - context->prev_;
1343 // Add the bin consisting of the end of the previous object to the start of the current object.
1344 collector->AddBin(bin_size, context->prev_);
1345 context->prev_ = object_addr + RoundUp(obj->SizeOf(), kObjectAlignment);
1346 }
1347
1348 void AddBin(size_t size, uintptr_t position) {
1349 if (size != 0) {
1350 bins_.insert(std::make_pair(size, position));
1351 }
1352 }
1353
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001354 virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const {
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001355 // Don't sweep any spaces since we probably blasted the internal accounting of the free list
1356 // allocator.
1357 return false;
1358 }
1359
1360 virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
1361 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
1362 size_t object_size = RoundUp(obj->SizeOf(), kObjectAlignment);
Mathieu Chartier5dc08a62014-01-10 10:10:23 -08001363 mirror::Object* forward_address;
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001364 // Find the smallest bin which we can move obj in.
1365 auto it = bins_.lower_bound(object_size);
1366 if (it == bins_.end()) {
1367 // No available space in the bins, place it in the target space instead (grows the zygote
1368 // space).
Mathieu Chartier5dc08a62014-01-10 10:10:23 -08001369 size_t bytes_allocated;
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001370 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated);
1371 if (to_space_live_bitmap_ != nullptr) {
1372 to_space_live_bitmap_->Set(forward_address);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001373 } else {
1374 GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
1375 GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001376 }
1377 } else {
1378 size_t size = it->first;
1379 uintptr_t pos = it->second;
1380 bins_.erase(it); // Erase the old bin which we replace with the new smaller bin.
1381 forward_address = reinterpret_cast<mirror::Object*>(pos);
1382 // Set the live and mark bits so that sweeping system weaks works properly.
1383 bin_live_bitmap_->Set(forward_address);
1384 bin_mark_bitmap_->Set(forward_address);
1385 DCHECK_GE(size, object_size);
1386 AddBin(size - object_size, pos + object_size); // Add a new bin with the remaining space.
1387 }
1388 // Copy the object over to its new location.
1389 memcpy(reinterpret_cast<void*>(forward_address), obj, object_size);
1390 return forward_address;
1391 }
1392};
1393
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001394void Heap::UnBindBitmaps() {
1395 for (const auto& space : GetContinuousSpaces()) {
1396 if (space->IsContinuousMemMapAllocSpace()) {
1397 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1398 if (alloc_space->HasBoundBitmaps()) {
1399 alloc_space->UnBindBitmaps();
1400 }
1401 }
1402 }
1403}
1404
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001405void Heap::PreZygoteFork() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001406 static Mutex zygote_creation_lock_("zygote creation lock", kZygoteCreationLock);
Ian Rogers81d425b2012-09-27 16:03:43 -07001407 Thread* self = Thread::Current();
1408 MutexLock mu(self, zygote_creation_lock_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001409 // Try to see if we have any Zygote spaces.
1410 if (have_zygote_space_) {
1411 return;
1412 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001413 VLOG(heap) << "Starting PreZygoteFork";
Mathieu Chartier590fee92013-09-13 13:46:47 -07001414 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
1415 // Trim the pages at the end of the non moving space.
1416 non_moving_space_->Trim();
1417 non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001418 // Change the collector to the post zygote one.
1419 ChangeCollector(post_zygote_collector_type_);
Mathieu Chartier50482232013-11-21 11:48:14 -08001420 // TODO: Delete bump_pointer_space_ and temp_pointer_space_?
Mathieu Chartier590fee92013-09-13 13:46:47 -07001421 if (semi_space_collector_ != nullptr) {
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001422 ZygoteCompactingCollector zygote_collector(this);
1423 zygote_collector.BuildBins(non_moving_space_);
Mathieu Chartier50482232013-11-21 11:48:14 -08001424 // Create a new bump pointer space which we will compact into.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001425 space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
1426 non_moving_space_->Limit());
1427 // Compact the bump pointer space to a new zygote bump pointer space.
1428 temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001429 zygote_collector.SetFromSpace(bump_pointer_space_);
1430 zygote_collector.SetToSpace(&target_space);
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08001431 zygote_collector.Run(kGcCauseCollectorTransition, false);
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001432 CHECK(temp_space_->IsEmpty());
Mathieu Chartier590fee92013-09-13 13:46:47 -07001433 total_objects_freed_ever_ += semi_space_collector_->GetFreedObjects();
1434 total_bytes_freed_ever_ += semi_space_collector_->GetFreedBytes();
1435 // Update the end and write out image.
1436 non_moving_space_->SetEnd(target_space.End());
1437 non_moving_space_->SetLimit(target_space.Limit());
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001438 VLOG(heap) << "Zygote size " << non_moving_space_->Size() << " bytes";
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001439 }
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001440 // Save the old space so that we can remove it after we complete creating the zygote space.
1441 space::MallocSpace* old_alloc_space = non_moving_space_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001442 // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001443 // the remaining available space.
1444 // Remove the old space before creating the zygote space since creating the zygote space sets
1445 // the old alloc space's bitmaps to nullptr.
1446 RemoveSpace(old_alloc_space);
1447 space::ZygoteSpace* zygote_space = old_alloc_space->CreateZygoteSpace("alloc space",
1448 low_memory_mode_,
1449 &main_space_);
1450 delete old_alloc_space;
1451 CHECK(zygote_space != nullptr) << "Failed creating zygote space";
1452 AddSpace(zygote_space, false);
1453 CHECK(main_space_ != nullptr);
Mathieu Chartierfc5b5282014-01-09 16:15:36 -08001454 if (main_space_->IsRosAllocSpace()) {
1455 rosalloc_space_ = main_space_->AsRosAllocSpace();
1456 } else if (main_space_->IsDlMallocSpace()) {
1457 dlmalloc_space_ = main_space_->AsDlMallocSpace();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001458 }
Mathieu Chartierfc5b5282014-01-09 16:15:36 -08001459 main_space_->SetFootprintLimit(main_space_->Capacity());
Mathieu Chartierfc5b5282014-01-09 16:15:36 -08001460 AddSpace(main_space_);
Ian Rogers1d54e732013-05-02 21:10:01 -07001461 have_zygote_space_ = true;
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001462 // Create the zygote space mod union table.
1463 accounting::ModUnionTable* mod_union_table =
1464 new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space);
1465 CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
1466 AddModUnionTable(mod_union_table);
Ian Rogers5f5a2c02012-09-17 10:52:08 -07001467 // Reset the cumulative loggers since we now have a few additional timing phases.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001468 for (const auto& collector : garbage_collectors_) {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001469 collector->ResetCumulativeStatistics();
Mathieu Chartier0325e622012-09-05 14:22:51 -07001470 }
Mathieu Chartierfc5b5282014-01-09 16:15:36 -08001471 // Can't use RosAlloc for non moving space due to thread local buffers.
1472 // TODO: Non limited space for non-movable objects?
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001473 MemMap* mem_map = post_zygote_non_moving_space_mem_map_.release();
1474 space::MallocSpace* new_non_moving_space =
1475 space::DlMallocSpace::CreateFromMemMap(mem_map, "Non moving dlmalloc space", kPageSize,
1476 2 * MB, mem_map->Size(), mem_map->Size());
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001477 AddSpace(new_non_moving_space, false);
1478 CHECK(new_non_moving_space != nullptr) << "Failed to create new non-moving space";
1479 new_non_moving_space->SetFootprintLimit(new_non_moving_space->Capacity());
1480 non_moving_space_ = new_non_moving_space;
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001481}
1482
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001483void Heap::FlushAllocStack() {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001484 MarkAllocStackAsLive(allocation_stack_.get());
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001485 allocation_stack_->Reset();
1486}
1487
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001488void Heap::MarkAllocStack(accounting::SpaceBitmap* bitmap1,
1489 accounting::SpaceBitmap* bitmap2,
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -08001490 accounting::ObjectSet* large_objects,
Ian Rogers1d54e732013-05-02 21:10:01 -07001491 accounting::ObjectStack* stack) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001492 DCHECK(bitmap1 != nullptr);
1493 DCHECK(bitmap2 != nullptr);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001494 mirror::Object** limit = stack->End();
1495 for (mirror::Object** it = stack->Begin(); it != limit; ++it) {
1496 const mirror::Object* obj = *it;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001497 DCHECK(obj != nullptr);
1498 if (bitmap1->HasAddress(obj)) {
1499 bitmap1->Set(obj);
1500 } else if (bitmap2->HasAddress(obj)) {
1501 bitmap2->Set(obj);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001502 } else {
1503 large_objects->Set(obj);
1504 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001505 }
1506}
1507
Mathieu Chartier590fee92013-09-13 13:46:47 -07001508void Heap::SwapSemiSpaces() {
1509 // Swap the spaces so we allocate into the space which we just evacuated.
1510 std::swap(bump_pointer_space_, temp_space_);
1511}
1512
1513void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
1514 space::ContinuousMemMapAllocSpace* source_space) {
1515 CHECK(kMovingCollector);
Mathieu Chartier50482232013-11-21 11:48:14 -08001516 CHECK_NE(target_space, source_space) << "In-place compaction currently unsupported";
Mathieu Chartier590fee92013-09-13 13:46:47 -07001517 if (target_space != source_space) {
1518 semi_space_collector_->SetFromSpace(source_space);
1519 semi_space_collector_->SetToSpace(target_space);
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08001520 semi_space_collector_->Run(kGcCauseCollectorTransition, false);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001521 }
1522}
Anwar Ghuloum67f99412013-08-12 14:19:48 -07001523
Ian Rogers1d54e732013-05-02 21:10:01 -07001524collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause,
1525 bool clear_soft_references) {
Ian Rogers81d425b2012-09-27 16:03:43 -07001526 Thread* self = Thread::Current();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001527 Runtime* runtime = Runtime::Current();
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001528 // If the heap can't run the GC, silently fail and return that no GC was run.
1529 switch (gc_type) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001530 case collector::kGcTypePartial: {
1531 if (!have_zygote_space_) {
1532 return collector::kGcTypeNone;
1533 }
1534 break;
1535 }
1536 default: {
1537 // Other GC types don't have any special cases which makes them not runnable. The main case
1538 // here is full GC.
1539 }
1540 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08001541 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
Ian Rogers81d425b2012-09-27 16:03:43 -07001542 Locks::mutator_lock_->AssertNotHeld(self);
Ian Rogers120f1c72012-09-28 17:17:10 -07001543 if (self->IsHandlingStackOverflow()) {
1544 LOG(WARNING) << "Performing GC on a thread that is handling a stack overflow.";
1545 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001546 gc_complete_lock_->AssertNotHeld(self);
Mathieu Chartier1d27b342014-01-28 12:51:09 -08001547 const bool compacting_gc = IsCompactingGC(collector_type_);
1548 if (!StartGC(self, compacting_gc)) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001549 return collector::kGcTypeNone;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001550 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001551 if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
1552 ++runtime->GetStats()->gc_for_alloc_count;
1553 ++self->GetStats()->gc_for_alloc_count;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001554 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001555 uint64_t gc_start_time_ns = NanoTime();
Mathieu Chartier65db8802012-11-20 12:36:46 -08001556 uint64_t gc_start_size = GetBytesAllocated();
1557 // Approximate allocation rate in bytes / second.
Ian Rogers1d54e732013-05-02 21:10:01 -07001558 uint64_t ms_delta = NsToMs(gc_start_time_ns - last_gc_time_ns_);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001559 // Back to back GCs can cause 0 ms of wait time in between GC invocations.
1560 if (LIKELY(ms_delta != 0)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001561 allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta;
Mathieu Chartier65db8802012-11-20 12:36:46 -08001562 VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s";
1563 }
1564
Ian Rogers1d54e732013-05-02 21:10:01 -07001565 DCHECK_LT(gc_type, collector::kGcTypeMax);
1566 DCHECK_NE(gc_type, collector::kGcTypeNone);
Anwar Ghuloum67f99412013-08-12 14:19:48 -07001567
Mathieu Chartier590fee92013-09-13 13:46:47 -07001568 collector::GarbageCollector* collector = nullptr;
Mathieu Chartier50482232013-11-21 11:48:14 -08001569 // TODO: Clean this up.
Mathieu Chartier1d27b342014-01-28 12:51:09 -08001570 if (compacting_gc) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001571 DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
1572 current_allocator_ == kAllocatorTypeTLAB);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001573 gc_type = semi_space_collector_->GetGcType();
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001574 CHECK(temp_space_->IsEmpty());
Mathieu Chartier590fee92013-09-13 13:46:47 -07001575 semi_space_collector_->SetFromSpace(bump_pointer_space_);
1576 semi_space_collector_->SetToSpace(temp_space_);
1577 mprotect(temp_space_->Begin(), temp_space_->Capacity(), PROT_READ | PROT_WRITE);
Mathieu Chartier50482232013-11-21 11:48:14 -08001578 collector = semi_space_collector_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001579 gc_type = collector::kGcTypeFull;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001580 } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
1581 current_allocator_ == kAllocatorTypeDlMalloc) {
Mathieu Chartier50482232013-11-21 11:48:14 -08001582 for (const auto& cur_collector : garbage_collectors_) {
1583 if (cur_collector->IsConcurrent() == concurrent_gc_ &&
1584 cur_collector->GetGcType() == gc_type) {
1585 collector = cur_collector;
1586 break;
1587 }
1588 }
1589 } else {
1590 LOG(FATAL) << "Invalid current allocator " << current_allocator_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001591 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001592 CHECK(collector != nullptr)
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001593 << "Could not find garbage collector with concurrent=" << concurrent_gc_
1594 << " and type=" << gc_type;
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001595
Mathieu Chartier590fee92013-09-13 13:46:47 -07001596 ATRACE_BEGIN(StringPrintf("%s %s GC", PrettyCause(gc_cause), collector->GetName()).c_str());
1597
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08001598 collector->Run(gc_cause, clear_soft_references);
Ian Rogers1d54e732013-05-02 21:10:01 -07001599 total_objects_freed_ever_ += collector->GetFreedObjects();
1600 total_bytes_freed_ever_ += collector->GetFreedBytes();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001601
Mathieu Chartier39e32612013-11-12 16:28:05 -08001602 // Enqueue cleared references.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001603 Locks::mutator_lock_->AssertNotHeld(self);
Mathieu Chartier39e32612013-11-12 16:28:05 -08001604 EnqueueClearedReferences();
1605
Mathieu Chartier590fee92013-09-13 13:46:47 -07001606 // Grow the heap so that we know when to perform the next GC.
1607 GrowForUtilization(gc_type, collector->GetDurationNs());
1608
Mathieu Chartierca2a24d2013-11-25 15:12:12 -08001609 if (CareAboutPauseTimes()) {
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001610 const size_t duration = collector->GetDurationNs();
1611 std::vector<uint64_t> pauses = collector->GetPauseTimes();
1612 // GC for alloc pauses the allocating thread, so consider it as a pause.
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001613 bool was_slow = duration > long_gc_log_threshold_ ||
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001614 (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001615 if (!was_slow) {
1616 for (uint64_t pause : pauses) {
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001617 was_slow = was_slow || pause > long_pause_log_threshold_;
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001618 }
1619 }
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001620 if (was_slow) {
1621 const size_t percent_free = GetPercentFree();
1622 const size_t current_heap_size = GetBytesAllocated();
1623 const size_t total_memory = GetTotalMemory();
1624 std::ostringstream pause_string;
1625 for (size_t i = 0; i < pauses.size(); ++i) {
1626 pause_string << PrettyDuration((pauses[i] / 1000) * 1000)
1627 << ((i != pauses.size() - 1) ? ", " : "");
1628 }
1629 LOG(INFO) << gc_cause << " " << collector->GetName()
1630 << " GC freed " << collector->GetFreedObjects() << "("
1631 << PrettySize(collector->GetFreedBytes()) << ") AllocSpace objects, "
1632 << collector->GetFreedLargeObjects() << "("
1633 << PrettySize(collector->GetFreedLargeObjectBytes()) << ") LOS objects, "
1634 << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
1635 << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
1636 << " total " << PrettyDuration((duration / 1000) * 1000);
1637 if (VLOG_IS_ON(heap)) {
Ian Rogers5fe9af72013-11-14 00:17:20 -08001638 LOG(INFO) << Dumpable<TimingLogger>(collector->GetTimings());
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001639 }
1640 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001641 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001642 FinishGC(self, gc_type);
Mathieu Chartier752a0e62013-06-27 11:03:27 -07001643 ATRACE_END();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001644
1645 // Inform DDMS that a GC completed.
Ian Rogers15bf2d32012-08-28 17:33:04 -07001646 Dbg::GcDidFinish();
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001647 return gc_type;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001648}
Mathieu Chartiera6399032012-06-11 18:49:50 -07001649
Mathieu Chartier1d27b342014-01-28 12:51:09 -08001650bool Heap::StartGC(Thread* self, bool is_compacting) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001651 MutexLock mu(self, *gc_complete_lock_);
1652 // Ensure there is only one GC at a time.
1653 WaitForGcToCompleteLocked(self);
1654 // TODO: if another thread beat this one to do the GC, perhaps we should just return here?
1655 // Not doing at the moment to ensure soft references are cleared.
1656 // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
Mathieu Chartier1d27b342014-01-28 12:51:09 -08001657 if (is_compacting && disable_moving_gc_count_ != 0) {
1658 LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001659 return false;
1660 }
1661 is_gc_running_ = true;
1662 return true;
1663}
1664
1665void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
1666 MutexLock mu(self, *gc_complete_lock_);
1667 is_gc_running_ = false;
1668 last_gc_type_ = gc_type;
1669 // Wake anyone who may have been waiting for the GC to complete.
1670 gc_complete_cond_->Broadcast(self);
1671}
1672
Mathieu Chartier423d2a32013-09-12 17:33:56 -07001673static mirror::Object* RootMatchesObjectVisitor(mirror::Object* root, void* arg) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001674 mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001675 if (root == obj) {
1676 LOG(INFO) << "Object " << obj << " is a root";
1677 }
Mathieu Chartier423d2a32013-09-12 17:33:56 -07001678 return root;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001679}
1680
1681class ScanVisitor {
1682 public:
Brian Carlstromdf629502013-07-17 22:39:56 -07001683 void operator()(const mirror::Object* obj) const {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001684 LOG(ERROR) << "Would have rescanned object " << obj;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001685 }
1686};
1687
Ian Rogers1d54e732013-05-02 21:10:01 -07001688// Verify a reference from an object.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001689class VerifyReferenceVisitor {
1690 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001691 explicit VerifyReferenceVisitor(Heap* heap)
Ian Rogers1d54e732013-05-02 21:10:01 -07001692 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001693 : heap_(heap), failed_(false) {}
Ian Rogers1d54e732013-05-02 21:10:01 -07001694
1695 bool Failed() const {
1696 return failed_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001697 }
1698
1699 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter
Ian Rogers1d54e732013-05-02 21:10:01 -07001700 // analysis on visitors.
Brian Carlstromdf629502013-07-17 22:39:56 -07001701 void operator()(const mirror::Object* obj, const mirror::Object* ref,
1702 const MemberOffset& offset, bool /* is_static */) const
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001703 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001704 if (ref == nullptr || IsLive(ref)) {
1705 // Verify that the reference is live.
1706 return;
1707 }
1708 if (!failed_) {
1709 // Print message on only on first failure to prevent spam.
1710 LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
1711 failed_ = true;
1712 }
1713 if (obj != nullptr) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001714 accounting::CardTable* card_table = heap_->GetCardTable();
1715 accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
1716 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001717 byte* card_addr = card_table->CardFromAddr(obj);
1718 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
1719 << offset << "\n card value = " << static_cast<int>(*card_addr);
1720 if (heap_->IsValidObjectAddress(obj->GetClass())) {
1721 LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
1722 } else {
1723 LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001724 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001725
1726 // Attmept to find the class inside of the recently freed objects.
1727 space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
1728 if (ref_space != nullptr && ref_space->IsMallocSpace()) {
1729 space::MallocSpace* space = ref_space->AsMallocSpace();
1730 mirror::Class* ref_class = space->FindRecentFreedObject(ref);
1731 if (ref_class != nullptr) {
1732 LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
1733 << PrettyClass(ref_class);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001734 } else {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001735 LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001736 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001737 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001738
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001739 if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
1740 ref->GetClass()->IsClass()) {
1741 LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
1742 } else {
1743 LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
1744 << ") is not a valid heap address";
1745 }
1746
1747 card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj));
1748 void* cover_begin = card_table->AddrFromCard(card_addr);
1749 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
1750 accounting::CardTable::kCardSize);
1751 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
1752 << "-" << cover_end;
1753 accounting::SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
1754
1755 if (bitmap == nullptr) {
1756 LOG(ERROR) << "Object " << obj << " has no bitmap";
1757 if (!heap_->VerifyClassClass(obj->GetClass())) {
1758 LOG(ERROR) << "Object " << obj << " failed class verification!";
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001759 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001760 } else {
Ian Rogers1d54e732013-05-02 21:10:01 -07001761 // Print out how the object is live.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001762 if (bitmap->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001763 LOG(ERROR) << "Object " << obj << " found in live bitmap";
1764 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001765 if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001766 LOG(ERROR) << "Object " << obj << " found in allocation stack";
1767 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001768 if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001769 LOG(ERROR) << "Object " << obj << " found in live stack";
1770 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001771 if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
1772 LOG(ERROR) << "Ref " << ref << " found in allocation stack";
1773 }
1774 if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
1775 LOG(ERROR) << "Ref " << ref << " found in live stack";
1776 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001777 // Attempt to see if the card table missed the reference.
1778 ScanVisitor scan_visitor;
1779 byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
1780 card_table->Scan(bitmap, byte_cover_begin,
Mathieu Chartier184e3222013-08-03 14:02:57 -07001781 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001782 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001783
1784 // Search to see if any of the roots reference our object.
1785 void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj));
1786 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false);
1787
1788 // Search to see if any of the roots reference our reference.
1789 arg = const_cast<void*>(reinterpret_cast<const void*>(ref));
1790 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false);
1791 } else {
1792 LOG(ERROR) << "Root " << ref << " is dead with type " << PrettyTypeOf(ref);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001793 }
1794 }
1795
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001796 bool IsLive(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001797 return heap_->IsLiveObjectLocked(obj, true, false, true);
Ian Rogers1d54e732013-05-02 21:10:01 -07001798 }
1799
Mathieu Chartier423d2a32013-09-12 17:33:56 -07001800 static mirror::Object* VerifyRoots(mirror::Object* root, void* arg) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001801 VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg);
Mathieu Chartier423d2a32013-09-12 17:33:56 -07001802 (*visitor)(nullptr, root, MemberOffset(0), true);
1803 return root;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001804 }
1805
1806 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07001807 Heap* const heap_;
1808 mutable bool failed_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001809};
1810
Ian Rogers1d54e732013-05-02 21:10:01 -07001811// Verify all references within an object, for use with HeapBitmap::Visit.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001812class VerifyObjectVisitor {
1813 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001814 explicit VerifyObjectVisitor(Heap* heap) : heap_(heap), failed_(false) {}
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001815
Mathieu Chartier590fee92013-09-13 13:46:47 -07001816 void operator()(mirror::Object* obj) const
Ian Rogersb726dcb2012-09-05 08:57:23 -07001817 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001818 // Note: we are verifying the references in obj but not obj itself, this is because obj must
1819 // be live or else how did we find it in the live bitmap?
1820 VerifyReferenceVisitor visitor(heap_);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001821 // The class doesn't count as a reference but we should verify it anyways.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001822 collector::MarkSweep::VisitObjectReferences(obj, visitor, true);
1823 if (obj->GetClass()->IsReferenceClass()) {
1824 visitor(obj, heap_->GetReferenceReferent(obj), MemberOffset(0), false);
1825 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001826 failed_ = failed_ || visitor.Failed();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001827 }
1828
Mathieu Chartier590fee92013-09-13 13:46:47 -07001829 static void VisitCallback(mirror::Object* obj, void* arg)
1830 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1831 VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
1832 visitor->operator()(obj);
1833 }
1834
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001835 bool Failed() const {
1836 return failed_;
1837 }
1838
1839 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07001840 Heap* const heap_;
1841 mutable bool failed_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001842};
1843
1844// Must do this with mutators suspended since we are directly accessing the allocation stacks.
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001845bool Heap::VerifyHeapReferences() {
Ian Rogers81d425b2012-09-27 16:03:43 -07001846 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001847 // Lets sort our allocation stacks so that we can efficiently binary search them.
Ian Rogers1d54e732013-05-02 21:10:01 -07001848 allocation_stack_->Sort();
1849 live_stack_->Sort();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001850 VerifyObjectVisitor visitor(this);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001851 // Verify objects in the allocation stack since these will be objects which were:
1852 // 1. Allocated prior to the GC (pre GC verification).
1853 // 2. Allocated during the GC (pre sweep GC verification).
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001854 // We don't want to verify the objects in the live stack since they themselves may be
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001855 // pointing to dead objects if they are not reachable.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001856 VisitObjects(VerifyObjectVisitor::VisitCallback, &visitor);
1857 // Verify the roots:
1858 Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRoots, &visitor, false, false);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001859 if (visitor.Failed()) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001860 // Dump mod-union tables.
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001861 for (const auto& table_pair : mod_union_tables_) {
1862 accounting::ModUnionTable* mod_union_table = table_pair.second;
1863 mod_union_table->Dump(LOG(ERROR) << mod_union_table->GetName() << ": ");
1864 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001865 DumpSpaces();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001866 return false;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001867 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001868 return true;
1869}
1870
1871class VerifyReferenceCardVisitor {
1872 public:
1873 VerifyReferenceCardVisitor(Heap* heap, bool* failed)
1874 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
1875 Locks::heap_bitmap_lock_)
Ian Rogers1d54e732013-05-02 21:10:01 -07001876 : heap_(heap), failed_(failed) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001877 }
1878
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08001879 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1880 // annotalysis on visitors.
Brian Carlstromdf629502013-07-17 22:39:56 -07001881 void operator()(const mirror::Object* obj, const mirror::Object* ref, const MemberOffset& offset,
1882 bool is_static) const NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08001883 // Filter out class references since changing an object's class does not mark the card as dirty.
1884 // Also handles large objects, since the only reference they hold is a class reference.
1885 if (ref != NULL && !ref->IsClass()) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001886 accounting::CardTable* card_table = heap_->GetCardTable();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001887 // If the object is not dirty and it is referencing something in the live stack other than
1888 // class, then it must be on a dirty card.
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001889 if (!card_table->AddrIsInCardTable(obj)) {
1890 LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
1891 *failed_ = true;
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001892 } else if (!card_table->IsDirty(obj)) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001893 // TODO: Check mod-union tables.
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08001894 // Card should be either kCardDirty if it got re-dirtied after we aged it, or
1895 // kCardDirty - 1 if it didnt get touched since we aged it.
Ian Rogers1d54e732013-05-02 21:10:01 -07001896 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001897 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) {
1898 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001899 LOG(ERROR) << "Object " << obj << " found in live stack";
1900 }
1901 if (heap_->GetLiveBitmap()->Test(obj)) {
1902 LOG(ERROR) << "Object " << obj << " found in live bitmap";
1903 }
1904 LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj)
1905 << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack";
1906
1907 // Print which field of the object is dead.
1908 if (!obj->IsObjectArray()) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001909 const mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001910 CHECK(klass != NULL);
Brian Carlstromea46f952013-07-30 01:26:50 -07001911 const mirror::ObjectArray<mirror::ArtField>* fields = is_static ? klass->GetSFields()
1912 : klass->GetIFields();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001913 CHECK(fields != NULL);
1914 for (int32_t i = 0; i < fields->GetLength(); ++i) {
Brian Carlstromea46f952013-07-30 01:26:50 -07001915 const mirror::ArtField* cur = fields->Get(i);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001916 if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
1917 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
1918 << PrettyField(cur);
1919 break;
1920 }
1921 }
1922 } else {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001923 const mirror::ObjectArray<mirror::Object>* object_array =
1924 obj->AsObjectArray<mirror::Object>();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001925 for (int32_t i = 0; i < object_array->GetLength(); ++i) {
1926 if (object_array->Get(i) == ref) {
1927 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
1928 }
1929 }
1930 }
1931
1932 *failed_ = true;
1933 }
1934 }
1935 }
1936 }
1937
1938 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07001939 Heap* const heap_;
1940 bool* const failed_;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001941};
1942
1943class VerifyLiveStackReferences {
1944 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001945 explicit VerifyLiveStackReferences(Heap* heap)
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001946 : heap_(heap),
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001947 failed_(false) {}
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001948
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001949 void operator()(mirror::Object* obj) const
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001950 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1951 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
Mathieu Chartier590fee92013-09-13 13:46:47 -07001952 collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(obj), visitor, true);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001953 }
1954
1955 bool Failed() const {
1956 return failed_;
1957 }
1958
1959 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07001960 Heap* const heap_;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001961 bool failed_;
1962};
1963
1964bool Heap::VerifyMissingCardMarks() {
Ian Rogers81d425b2012-09-27 16:03:43 -07001965 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001966
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001967 // We need to sort the live stack since we binary search it.
Ian Rogers1d54e732013-05-02 21:10:01 -07001968 live_stack_->Sort();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001969 VerifyLiveStackReferences visitor(this);
1970 GetLiveBitmap()->Visit(visitor);
1971
1972 // We can verify objects in the live stack since none of these should reference dead objects.
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001973 for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001974 visitor(*it);
1975 }
1976
1977 if (visitor.Failed()) {
1978 DumpSpaces();
1979 return false;
1980 }
1981 return true;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001982}
1983
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001984void Heap::SwapStacks() {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001985 allocation_stack_.swap(live_stack_);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001986}
1987
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001988accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
1989 auto it = mod_union_tables_.find(space);
1990 if (it == mod_union_tables_.end()) {
1991 return nullptr;
1992 }
1993 return it->second;
1994}
1995
Ian Rogers5fe9af72013-11-14 00:17:20 -08001996void Heap::ProcessCards(TimingLogger& timings) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001997 // Clear cards and keep track of cards cleared in the mod-union table.
Mathieu Chartier02e25112013-08-14 16:14:24 -07001998 for (const auto& space : continuous_spaces_) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001999 accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
2000 if (table != nullptr) {
2001 const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
2002 "ImageModUnionClearCards";
Ian Rogers5fe9af72013-11-14 00:17:20 -08002003 TimingLogger::ScopedSplit split(name, &timings);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002004 table->ClearCards();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002005 } else if (space->GetType() != space::kSpaceTypeBumpPointerSpace) {
Ian Rogers5fe9af72013-11-14 00:17:20 -08002006 TimingLogger::ScopedSplit split("AllocSpaceClearCards", &timings);
Mathieu Chartierd22d5482012-11-06 17:14:12 -08002007 // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
2008 // were dirty before the GC started.
Mathieu Chartier590fee92013-09-13 13:46:47 -07002009 // TODO: Don't need to use atomic.
2010 // The races are we either end up with: Aged card, unaged card. Since we have the checkpoint
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002011 // roots and then we scan / update mod union tables after. We will always scan either card.
Mathieu Chartier590fee92013-09-13 13:46:47 -07002012 // If we end up with the non aged card, we scan it it in the pause.
Mathieu Chartierd22d5482012-11-06 17:14:12 -08002013 card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor());
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07002014 }
2015 }
2016}
2017
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002018static mirror::Object* IdentityCallback(mirror::Object* obj, void*) {
2019 return obj;
2020}
2021
Ian Rogers1d54e732013-05-02 21:10:01 -07002022void Heap::PreGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002023 ThreadList* thread_list = Runtime::Current()->GetThreadList();
2024 Thread* self = Thread::Current();
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002025
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002026 if (verify_pre_gc_heap_) {
2027 thread_list->SuspendAll();
2028 {
2029 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2030 if (!VerifyHeapReferences()) {
2031 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed";
2032 }
2033 }
2034 thread_list->ResumeAll();
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002035 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002036
2037 // Check that all objects which reference things in the live stack are on dirty cards.
2038 if (verify_missing_card_marks_) {
2039 thread_list->SuspendAll();
2040 {
2041 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2042 SwapStacks();
2043 // Sort the live stack so that we can quickly binary search it later.
2044 if (!VerifyMissingCardMarks()) {
2045 LOG(FATAL) << "Pre " << gc->GetName() << " missing card mark verification failed";
2046 }
2047 SwapStacks();
2048 }
2049 thread_list->ResumeAll();
2050 }
2051
2052 if (verify_mod_union_table_) {
2053 thread_list->SuspendAll();
2054 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002055 for (const auto& table_pair : mod_union_tables_) {
2056 accounting::ModUnionTable* mod_union_table = table_pair.second;
2057 mod_union_table->UpdateAndMarkReferences(IdentityCallback, nullptr);
2058 mod_union_table->Verify();
2059 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002060 thread_list->ResumeAll();
2061 }
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002062}
2063
Ian Rogers1d54e732013-05-02 21:10:01 -07002064void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002065 // Called before sweeping occurs since we want to make sure we are not going so reclaim any
2066 // reachable objects.
2067 if (verify_post_gc_heap_) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002068 Thread* self = Thread::Current();
2069 CHECK_NE(self->GetState(), kRunnable);
Ian Rogers1d54e732013-05-02 21:10:01 -07002070 {
2071 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2072 // Swapping bound bitmaps does nothing.
2073 gc->SwapBitmaps();
2074 if (!VerifyHeapReferences()) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002075 LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed";
Ian Rogers1d54e732013-05-02 21:10:01 -07002076 }
2077 gc->SwapBitmaps();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002078 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002079 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002080}
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002081
Ian Rogers1d54e732013-05-02 21:10:01 -07002082void Heap::PostGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002083 if (verify_system_weaks_) {
Anwar Ghuloum67f99412013-08-12 14:19:48 -07002084 Thread* self = Thread::Current();
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002085 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Ian Rogers1d54e732013-05-02 21:10:01 -07002086 collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002087 mark_sweep->VerifySystemWeaks();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002088 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07002089}
2090
Mathieu Chartier590fee92013-09-13 13:46:47 -07002091collector::GcType Heap::WaitForGcToComplete(Thread* self) {
2092 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
2093 MutexLock mu(self, *gc_complete_lock_);
2094 return WaitForGcToCompleteLocked(self);
2095}
2096
2097collector::GcType Heap::WaitForGcToCompleteLocked(Thread* self) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002098 collector::GcType last_gc_type = collector::kGcTypeNone;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002099 uint64_t wait_start = NanoTime();
2100 while (is_gc_running_) {
2101 ATRACE_BEGIN("GC: Wait For Completion");
2102 // We must wait, change thread state then sleep on gc_complete_cond_;
2103 gc_complete_cond_->Wait(self);
2104 last_gc_type = last_gc_type_;
Mathieu Chartier752a0e62013-06-27 11:03:27 -07002105 ATRACE_END();
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002106 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07002107 uint64_t wait_time = NanoTime() - wait_start;
2108 total_wait_time_ += wait_time;
2109 if (wait_time > long_pause_log_threshold_) {
2110 LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time);
2111 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07002112 return last_gc_type;
Carl Shapiro69759ea2011-07-21 18:13:35 -07002113}
2114
Elliott Hughesc967f782012-04-16 10:23:15 -07002115void Heap::DumpForSigQuit(std::ostream& os) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002116 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002117 << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
Elliott Hughes8b788fe2013-04-17 15:57:01 -07002118 DumpGcPerformanceInfo(os);
Elliott Hughesc967f782012-04-16 10:23:15 -07002119}
2120
2121size_t Heap::GetPercentFree() {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002122 return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / GetTotalMemory());
Elliott Hughesc967f782012-04-16 10:23:15 -07002123}
2124
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08002125void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002126 if (max_allowed_footprint > GetMaxMemory()) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002127 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002128 << PrettySize(GetMaxMemory());
2129 max_allowed_footprint = GetMaxMemory();
2130 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07002131 max_allowed_footprint_ = max_allowed_footprint;
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07002132}
2133
Mathieu Chartier590fee92013-09-13 13:46:47 -07002134bool Heap::IsMovableObject(const mirror::Object* obj) const {
2135 if (kMovingCollector) {
2136 DCHECK(!IsInTempSpace(obj));
2137 if (bump_pointer_space_->HasAddress(obj)) {
2138 return true;
2139 }
Mathieu Chartier9be9a7a2014-01-24 14:07:33 -08002140 // TODO: Refactor this logic into the space itself?
2141 // Objects in the main space are only copied during background -> foreground transitions or
2142 // visa versa.
2143 if (main_space_ != nullptr && main_space_->HasAddress(obj) &&
Mathieu Chartier1d27b342014-01-28 12:51:09 -08002144 (IsCompactingGC(background_collector_type_) ||
2145 IsCompactingGC(post_zygote_collector_type_))) {
Mathieu Chartierfc5b5282014-01-09 16:15:36 -08002146 return true;
2147 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07002148 }
2149 return false;
2150}
2151
2152bool Heap::IsInTempSpace(const mirror::Object* obj) const {
2153 if (temp_space_->HasAddress(obj) && !temp_space_->Contains(obj)) {
2154 return true;
2155 }
2156 return false;
2157}
2158
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002159void Heap::UpdateMaxNativeFootprint() {
2160 size_t native_size = native_bytes_allocated_;
2161 // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
2162 size_t target_size = native_size / GetTargetHeapUtilization();
2163 if (target_size > native_size + max_free_) {
2164 target_size = native_size + max_free_;
2165 } else if (target_size < native_size + min_free_) {
2166 target_size = native_size + min_free_;
2167 }
2168 native_footprint_gc_watermark_ = target_size;
2169 native_footprint_limit_ = 2 * target_size - native_size;
2170}
2171
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002172void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002173 // We know what our utilization is at this moment.
2174 // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
Mathieu Chartier65db8802012-11-20 12:36:46 -08002175 const size_t bytes_allocated = GetBytesAllocated();
2176 last_gc_size_ = bytes_allocated;
Ian Rogers1d54e732013-05-02 21:10:01 -07002177 last_gc_time_ns_ = NanoTime();
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002178 size_t target_size;
2179 if (gc_type != collector::kGcTypeSticky) {
2180 // Grow the heap for non sticky GC.
2181 target_size = bytes_allocated / GetTargetHeapUtilization();
2182 if (target_size > bytes_allocated + max_free_) {
2183 target_size = bytes_allocated + max_free_;
2184 } else if (target_size < bytes_allocated + min_free_) {
2185 target_size = bytes_allocated + min_free_;
2186 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07002187 native_need_to_run_finalization_ = true;
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002188 next_gc_type_ = collector::kGcTypeSticky;
2189 } else {
2190 // Based on how close the current heap size is to the target size, decide
2191 // whether or not to do a partial or sticky GC next.
2192 if (bytes_allocated + min_free_ <= max_allowed_footprint_) {
2193 next_gc_type_ = collector::kGcTypeSticky;
2194 } else {
Mathieu Chartier74762802014-01-24 10:21:35 -08002195 next_gc_type_ = have_zygote_space_ ? collector::kGcTypePartial : collector::kGcTypeFull;
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002196 }
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002197 // If we have freed enough memory, shrink the heap back down.
2198 if (bytes_allocated + max_free_ < max_allowed_footprint_) {
2199 target_size = bytes_allocated + max_free_;
2200 } else {
2201 target_size = std::max(bytes_allocated, max_allowed_footprint_);
2202 }
2203 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002204 if (!ignore_max_footprint_) {
2205 SetIdealFootprint(target_size);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002206 if (concurrent_gc_) {
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002207 // Calculate when to perform the next ConcurrentGC.
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002208 // Calculate the estimated GC duration.
Mathieu Chartier74762802014-01-24 10:21:35 -08002209 const double gc_duration_seconds = NsToMs(gc_duration) / 1000.0;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002210 // Estimate how many remaining bytes we will have when we need to start the next GC.
2211 size_t remaining_bytes = allocation_rate_ * gc_duration_seconds;
Mathieu Chartier74762802014-01-24 10:21:35 -08002212 remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002213 remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
2214 if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
2215 // A never going to happen situation that from the estimated allocation rate we will exceed
2216 // the applications entire footprint with the given estimated allocation rate. Schedule
Mathieu Chartier74762802014-01-24 10:21:35 -08002217 // another GC nearly straight away.
2218 remaining_bytes = kMinConcurrentRemainingBytes;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002219 }
Mathieu Chartier74762802014-01-24 10:21:35 -08002220 DCHECK_LE(remaining_bytes, max_allowed_footprint_);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002221 DCHECK_LE(max_allowed_footprint_, growth_limit_);
Mathieu Chartier74762802014-01-24 10:21:35 -08002222 // Start a concurrent GC when we get close to the estimated remaining bytes. When the
2223 // allocation rate is very high, remaining_bytes could tell us that we should start a GC
2224 // right away.
2225 concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes, bytes_allocated);
Mathieu Chartier65db8802012-11-20 12:36:46 -08002226 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08002227 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07002228}
2229
jeffhaoc1160702011-10-27 15:48:45 -07002230void Heap::ClearGrowthLimit() {
Mathieu Chartier80de7a62012-11-27 17:21:50 -08002231 growth_limit_ = capacity_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002232 non_moving_space_->ClearGrowthLimit();
jeffhaoc1160702011-10-27 15:48:45 -07002233}
2234
Elliott Hughesadb460d2011-10-05 17:02:34 -07002235void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset,
Mathieu Chartier50482232013-11-21 11:48:14 -08002236 MemberOffset reference_queue_offset,
2237 MemberOffset reference_queueNext_offset,
2238 MemberOffset reference_pendingNext_offset,
2239 MemberOffset finalizer_reference_zombie_offset) {
Elliott Hughesadb460d2011-10-05 17:02:34 -07002240 reference_referent_offset_ = reference_referent_offset;
2241 reference_queue_offset_ = reference_queue_offset;
2242 reference_queueNext_offset_ = reference_queueNext_offset;
2243 reference_pendingNext_offset_ = reference_pendingNext_offset;
2244 finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset;
2245 CHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
2246 CHECK_NE(reference_queue_offset_.Uint32Value(), 0U);
2247 CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U);
2248 CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
2249 CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
2250}
2251
Mathieu Chartier590fee92013-09-13 13:46:47 -07002252void Heap::SetReferenceReferent(mirror::Object* reference, mirror::Object* referent) {
2253 DCHECK(reference != NULL);
2254 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
2255 reference->SetFieldObject(reference_referent_offset_, referent, true);
2256}
2257
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002258mirror::Object* Heap::GetReferenceReferent(mirror::Object* reference) {
Elliott Hughesadb460d2011-10-05 17:02:34 -07002259 DCHECK(reference != NULL);
2260 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002261 return reference->GetFieldObject<mirror::Object*>(reference_referent_offset_, true);
Elliott Hughesadb460d2011-10-05 17:02:34 -07002262}
2263
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002264void Heap::AddFinalizerReference(Thread* self, mirror::Object* object) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002265 ScopedObjectAccess soa(self);
Jeff Hao5d917302013-02-27 17:57:33 -08002266 JValue result;
Jeff Hao5d917302013-02-27 17:57:33 -08002267 ArgArray arg_array(NULL, 0);
2268 arg_array.Append(reinterpret_cast<uint32_t>(object));
2269 soa.DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self,
Jeff Hao6474d192013-03-26 14:08:09 -07002270 arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002271}
2272
Mathieu Chartier39e32612013-11-12 16:28:05 -08002273void Heap::EnqueueClearedReferences() {
2274 if (!cleared_references_.IsEmpty()) {
Ian Rogers64b6d142012-10-29 16:34:15 -07002275 // When a runtime isn't started there are no reference queues to care about so ignore.
2276 if (LIKELY(Runtime::Current()->IsStarted())) {
2277 ScopedObjectAccess soa(Thread::Current());
Jeff Hao5d917302013-02-27 17:57:33 -08002278 JValue result;
Jeff Hao5d917302013-02-27 17:57:33 -08002279 ArgArray arg_array(NULL, 0);
Mathieu Chartier39e32612013-11-12 16:28:05 -08002280 arg_array.Append(reinterpret_cast<uint32_t>(cleared_references_.GetList()));
Jeff Hao5d917302013-02-27 17:57:33 -08002281 soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(),
Jeff Hao6474d192013-03-26 14:08:09 -07002282 arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
Ian Rogers64b6d142012-10-29 16:34:15 -07002283 }
Mathieu Chartier39e32612013-11-12 16:28:05 -08002284 cleared_references_.Clear();
Elliott Hughesadb460d2011-10-05 17:02:34 -07002285 }
2286}
2287
Ian Rogers1f539342012-10-03 21:09:42 -07002288void Heap::RequestConcurrentGC(Thread* self) {
Mathieu Chartier069387a2012-06-18 12:01:01 -07002289 // Make sure that we can do a concurrent GC.
Ian Rogers120f1c72012-09-28 17:17:10 -07002290 Runtime* runtime = Runtime::Current();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002291 if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
2292 self->IsHandlingStackOverflow()) {
Ian Rogers120f1c72012-09-28 17:17:10 -07002293 return;
2294 }
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002295 // We already have a request pending, no reason to start more until we update
2296 // concurrent_start_bytes_.
2297 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Ian Rogers120f1c72012-09-28 17:17:10 -07002298 JNIEnv* env = self->GetJniEnv();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002299 DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
2300 DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != nullptr);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002301 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
2302 WellKnownClasses::java_lang_Daemons_requestGC);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002303 CHECK(!env->ExceptionCheck());
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002304}
2305
Ian Rogers81d425b2012-09-27 16:03:43 -07002306void Heap::ConcurrentGC(Thread* self) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002307 if (Runtime::Current()->IsShuttingDown(self)) {
2308 return;
Mathieu Chartier2542d662012-06-21 17:14:11 -07002309 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08002310 // Wait for any GCs currently running to finish.
Mathieu Chartier590fee92013-09-13 13:46:47 -07002311 if (WaitForGcToComplete(self) == collector::kGcTypeNone) {
Mathieu Chartierf9ed0d32013-11-21 16:42:47 -08002312 // If the we can't run the GC type we wanted to run, find the next appropriate one and try that
2313 // instead. E.g. can't do partial, so do full instead.
2314 if (CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false) ==
2315 collector::kGcTypeNone) {
2316 for (collector::GcType gc_type : gc_plan_) {
2317 // Attempt to run the collector, if we succeed, we are done.
2318 if (gc_type > next_gc_type_ &&
2319 CollectGarbageInternal(gc_type, kGcCauseBackground, false) != collector::kGcTypeNone) {
2320 break;
2321 }
2322 }
2323 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07002324 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002325}
2326
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08002327void Heap::RequestHeapTrim() {
Ian Rogers48931882013-01-22 14:35:16 -08002328 // GC completed and now we must decide whether to request a heap trim (advising pages back to the
2329 // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
2330 // a space it will hold its lock and can become a cause of jank.
2331 // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
2332 // forking.
2333
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08002334 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
2335 // because that only marks object heads, so a large array looks like lots of empty space. We
2336 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
2337 // to utilization (which is probably inversely proportional to how much benefit we can expect).
2338 // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
2339 // not how much use we're making of those pages.
Ian Rogers48931882013-01-22 14:35:16 -08002340 uint64_t ms_time = MilliTime();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002341 // Don't bother trimming the alloc space if a heap trim occurred in the last two seconds.
2342 if (ms_time - last_trim_time_ms_ < 2 * 1000) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002343 return;
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08002344 }
Ian Rogers120f1c72012-09-28 17:17:10 -07002345
2346 Thread* self = Thread::Current();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002347 Runtime* runtime = Runtime::Current();
2348 if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self)) {
2349 // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time)
2350 // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check
2351 // as we don't hold the lock while requesting the trim).
2352 return;
Ian Rogerse1d490c2012-02-03 09:09:07 -08002353 }
Ian Rogers48931882013-01-22 14:35:16 -08002354
Ian Rogers1d54e732013-05-02 21:10:01 -07002355 last_trim_time_ms_ = ms_time;
Mathieu Chartierc39e3422013-08-07 16:41:36 -07002356
2357 // Trim only if we do not currently care about pause times.
Mathieu Chartierca2a24d2013-11-25 15:12:12 -08002358 if (!CareAboutPauseTimes()) {
Mathieu Chartierc39e3422013-08-07 16:41:36 -07002359 JNIEnv* env = self->GetJniEnv();
2360 DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
2361 DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != NULL);
2362 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
2363 WellKnownClasses::java_lang_Daemons_requestHeapTrim);
2364 CHECK(!env->ExceptionCheck());
2365 }
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08002366}
2367
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002368void Heap::RevokeThreadLocalBuffers(Thread* thread) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002369 if (rosalloc_space_ != nullptr) {
2370 rosalloc_space_->RevokeThreadLocalBuffers(thread);
2371 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -08002372 if (bump_pointer_space_ != nullptr) {
2373 bump_pointer_space_->RevokeThreadLocalBuffers(thread);
2374 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002375}
2376
2377void Heap::RevokeAllThreadLocalBuffers() {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002378 if (rosalloc_space_ != nullptr) {
2379 rosalloc_space_->RevokeAllThreadLocalBuffers();
2380 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -08002381 if (bump_pointer_space_ != nullptr) {
2382 bump_pointer_space_->RevokeAllThreadLocalBuffers();
2383 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002384}
2385
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002386bool Heap::IsGCRequestPending() const {
2387 return concurrent_start_bytes_ != std::numeric_limits<size_t>::max();
2388}
2389
Mathieu Chartier590fee92013-09-13 13:46:47 -07002390void Heap::RunFinalization(JNIEnv* env) {
2391 // Can't do this in WellKnownClasses::Init since System is not properly set up at that point.
2392 if (WellKnownClasses::java_lang_System_runFinalization == nullptr) {
2393 CHECK(WellKnownClasses::java_lang_System != nullptr);
2394 WellKnownClasses::java_lang_System_runFinalization =
2395 CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V");
2396 CHECK(WellKnownClasses::java_lang_System_runFinalization != nullptr);
2397 }
2398 env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
2399 WellKnownClasses::java_lang_System_runFinalization);
2400}
2401
Ian Rogers1eb512d2013-10-18 15:42:20 -07002402void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002403 Thread* self = ThreadForEnv(env);
2404 if (native_need_to_run_finalization_) {
2405 RunFinalization(env);
2406 UpdateMaxNativeFootprint();
2407 native_need_to_run_finalization_ = false;
2408 }
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002409 // Total number of native bytes allocated.
Ian Rogersb122a4b2013-11-19 18:00:50 -08002410 native_bytes_allocated_.FetchAndAdd(bytes);
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002411 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002412 collector::GcType gc_type = have_zygote_space_ ? collector::kGcTypePartial :
2413 collector::kGcTypeFull;
2414
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002415 // The second watermark is higher than the gc watermark. If you hit this it means you are
2416 // allocating native objects faster than the GC can keep up with.
2417 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002418 if (WaitForGcToComplete(self) != collector::kGcTypeNone) {
2419 // Just finished a GC, attempt to run finalizers.
2420 RunFinalization(env);
2421 CHECK(!env->ExceptionCheck());
2422 }
2423 // If we still are over the watermark, attempt a GC for alloc and run finalizers.
2424 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08002425 CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002426 RunFinalization(env);
2427 native_need_to_run_finalization_ = false;
2428 CHECK(!env->ExceptionCheck());
2429 }
2430 // We have just run finalizers, update the native watermark since it is very likely that
2431 // finalizers released native managed allocations.
2432 UpdateMaxNativeFootprint();
2433 } else if (!IsGCRequestPending()) {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002434 if (concurrent_gc_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002435 RequestConcurrentGC(self);
2436 } else {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002437 CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002438 }
2439 }
2440 }
2441}
2442
Ian Rogers1eb512d2013-10-18 15:42:20 -07002443void Heap::RegisterNativeFree(JNIEnv* env, int bytes) {
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002444 int expected_size, new_size;
2445 do {
Ian Rogersb122a4b2013-11-19 18:00:50 -08002446 expected_size = native_bytes_allocated_.Load();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002447 new_size = expected_size - bytes;
2448 if (UNLIKELY(new_size < 0)) {
2449 ScopedObjectAccess soa(env);
2450 env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
2451 StringPrintf("Attempted to free %d native bytes with only %d native bytes "
2452 "registered as allocated", bytes, expected_size).c_str());
2453 break;
2454 }
Ian Rogersb122a4b2013-11-19 18:00:50 -08002455 } while (!native_bytes_allocated_.CompareAndSwap(expected_size, new_size));
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002456}
2457
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -07002458int64_t Heap::GetTotalMemory() const {
2459 int64_t ret = 0;
Mathieu Chartier02e25112013-08-14 16:14:24 -07002460 for (const auto& space : continuous_spaces_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002461 // Currently don't include the image space.
2462 if (!space->IsImageSpace()) {
2463 ret += space->Size();
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -07002464 }
2465 }
Mathieu Chartier02e25112013-08-14 16:14:24 -07002466 for (const auto& space : discontinuous_spaces_) {
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -07002467 if (space->IsLargeObjectSpace()) {
2468 ret += space->AsLargeObjectSpace()->GetBytesAllocated();
2469 }
2470 }
2471 return ret;
2472}
2473
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002474void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
2475 DCHECK(mod_union_table != nullptr);
2476 mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
2477}
2478
Ian Rogers1d54e732013-05-02 21:10:01 -07002479} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07002480} // namespace art