Elliott Hughes | 2faa5f1 | 2012-01-30 14:42:07 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 16 | |
Brian Carlstrom | 578bbdc | 2011-07-21 14:07:47 -0700 | [diff] [blame] | 17 | #include "heap.h" |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 18 | |
Mathieu Chartier | 752a0e6 | 2013-06-27 11:03:27 -0700 | [diff] [blame] | 19 | #define ATRACE_TAG ATRACE_TAG_DALVIK |
| 20 | #include <cutils/trace.h> |
Brian Carlstrom | 5643b78 | 2012-02-05 12:32:53 -0800 | [diff] [blame] | 21 | |
Brian Carlstrom | 58ae941 | 2011-10-04 00:56:06 -0700 | [diff] [blame] | 22 | #include <limits> |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 23 | #include <vector> |
Hiroshi Yamauchi | 50b2928 | 2013-07-30 13:58:37 -0700 | [diff] [blame] | 24 | #include <valgrind.h> |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 25 | |
Mathieu Chartier | b2f9936 | 2013-11-20 17:26:00 -0800 | [diff] [blame] | 26 | #include "base/histogram-inl.h" |
Elliott Hughes | 1aa246d | 2012-12-13 09:29:36 -0800 | [diff] [blame] | 27 | #include "base/stl_util.h" |
Mathieu Chartier | 987ccff | 2013-07-08 11:05:21 -0700 | [diff] [blame] | 28 | #include "common_throws.h" |
Ian Rogers | 4893188 | 2013-01-22 14:35:16 -0800 | [diff] [blame] | 29 | #include "cutils/sched_policy.h" |
Elliott Hughes | 767a147 | 2011-10-26 18:49:02 -0700 | [diff] [blame] | 30 | #include "debugger.h" |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 31 | #include "gc/accounting/atomic_stack.h" |
| 32 | #include "gc/accounting/card_table-inl.h" |
| 33 | #include "gc/accounting/heap_bitmap-inl.h" |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 34 | #include "gc/accounting/mod_union_table.h" |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 35 | #include "gc/accounting/mod_union_table-inl.h" |
| 36 | #include "gc/accounting/space_bitmap-inl.h" |
| 37 | #include "gc/collector/mark_sweep-inl.h" |
| 38 | #include "gc/collector/partial_mark_sweep.h" |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 39 | #include "gc/collector/semi_space.h" |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 40 | #include "gc/collector/sticky_mark_sweep.h" |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 41 | #include "gc/space/bump_pointer_space.h" |
Hiroshi Yamauchi | 50b2928 | 2013-07-30 13:58:37 -0700 | [diff] [blame] | 42 | #include "gc/space/dlmalloc_space-inl.h" |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 43 | #include "gc/space/image_space.h" |
| 44 | #include "gc/space/large_object_space.h" |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 45 | #include "gc/space/rosalloc_space-inl.h" |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 46 | #include "gc/space/space-inl.h" |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 47 | #include "heap-inl.h" |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 48 | #include "image.h" |
Jeff Hao | 5d91730 | 2013-02-27 17:57:33 -0800 | [diff] [blame] | 49 | #include "invoke_arg_array_builder.h" |
Brian Carlstrom | ea46f95 | 2013-07-30 01:26:50 -0700 | [diff] [blame] | 50 | #include "mirror/art_field-inl.h" |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 51 | #include "mirror/class-inl.h" |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 52 | #include "mirror/object.h" |
| 53 | #include "mirror/object-inl.h" |
| 54 | #include "mirror/object_array-inl.h" |
Ian Rogers | 6d4d9fc | 2011-11-30 16:24:48 -0800 | [diff] [blame] | 55 | #include "object_utils.h" |
Brian Carlstrom | 5643b78 | 2012-02-05 12:32:53 -0800 | [diff] [blame] | 56 | #include "os.h" |
Mathieu Chartier | 0de9f73 | 2013-11-22 17:58:48 -0800 | [diff] [blame] | 57 | #include "runtime.h" |
Mathieu Chartier | 7664f5c | 2012-06-08 18:15:32 -0700 | [diff] [blame] | 58 | #include "ScopedLocalRef.h" |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 59 | #include "scoped_thread_state_change.h" |
Ian Rogers | 1f53934 | 2012-10-03 21:09:42 -0700 | [diff] [blame] | 60 | #include "sirt_ref.h" |
Elliott Hughes | 8d768a9 | 2011-09-14 16:35:25 -0700 | [diff] [blame] | 61 | #include "thread_list.h" |
Elliott Hughes | 767a147 | 2011-10-26 18:49:02 -0700 | [diff] [blame] | 62 | #include "UniquePtr.h" |
Elliott Hughes | eac7667 | 2012-05-24 21:56:51 -0700 | [diff] [blame] | 63 | #include "well_known_classes.h" |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 64 | |
| 65 | namespace art { |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 66 | |
| 67 | extern void SetQuickAllocEntryPointsAllocator(gc::AllocatorType allocator); |
| 68 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 69 | namespace gc { |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 70 | |
Mathieu Chartier | 720ef76 | 2013-08-17 14:46:54 -0700 | [diff] [blame] | 71 | static constexpr bool kGCALotMode = false; |
| 72 | static constexpr size_t kGcAlotInterval = KB; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 73 | // Minimum amount of remaining bytes before a concurrent GC is triggered. |
Mathieu Chartier | 720ef76 | 2013-08-17 14:46:54 -0700 | [diff] [blame] | 74 | static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB; |
Mathieu Chartier | 0051be6 | 2012-10-12 17:47:11 -0700 | [diff] [blame] | 75 | |
Mathieu Chartier | 0051be6 | 2012-10-12 17:47:11 -0700 | [diff] [blame] | 76 | Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free, |
Ian Rogers | 8d31bbd | 2013-10-13 10:44:14 -0700 | [diff] [blame] | 77 | double target_utilization, size_t capacity, const std::string& image_file_name, |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 78 | CollectorType post_zygote_collector_type, CollectorType background_collector_type, |
| 79 | size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode, |
| 80 | size_t long_pause_log_threshold, size_t long_gc_log_threshold, |
| 81 | bool ignore_max_footprint, bool use_tlab) |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 82 | : non_moving_space_(nullptr), |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 83 | rosalloc_space_(nullptr), |
| 84 | dlmalloc_space_(nullptr), |
Mathieu Chartier | fc5b528 | 2014-01-09 16:15:36 -0800 | [diff] [blame] | 85 | main_space_(nullptr), |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 86 | concurrent_gc_(false), |
| 87 | collector_type_(kCollectorTypeNone), |
| 88 | post_zygote_collector_type_(post_zygote_collector_type), |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 89 | background_collector_type_(background_collector_type), |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 90 | parallel_gc_threads_(parallel_gc_threads), |
| 91 | conc_gc_threads_(conc_gc_threads), |
Mathieu Chartier | e0a53e9 | 2013-08-05 10:17:40 -0700 | [diff] [blame] | 92 | low_memory_mode_(low_memory_mode), |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 93 | long_pause_log_threshold_(long_pause_log_threshold), |
| 94 | long_gc_log_threshold_(long_gc_log_threshold), |
| 95 | ignore_max_footprint_(ignore_max_footprint), |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 96 | have_zygote_space_(false), |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 97 | soft_reference_queue_(this), |
| 98 | weak_reference_queue_(this), |
| 99 | finalizer_reference_queue_(this), |
| 100 | phantom_reference_queue_(this), |
| 101 | cleared_references_(this), |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 102 | is_gc_running_(false), |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 103 | last_gc_type_(collector::kGcTypeNone), |
Mathieu Chartier | bdd0fb9 | 2013-07-02 10:16:15 -0700 | [diff] [blame] | 104 | next_gc_type_(collector::kGcTypePartial), |
Mathieu Chartier | 80de7a6 | 2012-11-27 17:21:50 -0800 | [diff] [blame] | 105 | capacity_(capacity), |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 106 | growth_limit_(growth_limit), |
Mathieu Chartier | 0051be6 | 2012-10-12 17:47:11 -0700 | [diff] [blame] | 107 | max_allowed_footprint_(initial_size), |
Mathieu Chartier | 987ccff | 2013-07-08 11:05:21 -0700 | [diff] [blame] | 108 | native_footprint_gc_watermark_(initial_size), |
| 109 | native_footprint_limit_(2 * initial_size), |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 110 | native_need_to_run_finalization_(false), |
Mathieu Chartier | ca2a24d | 2013-11-25 15:12:12 -0800 | [diff] [blame] | 111 | // Initially assume we perceive jank in case the process state is never updated. |
| 112 | process_state_(kProcessStateJankPerceptible), |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 113 | concurrent_start_bytes_(std::numeric_limits<size_t>::max()), |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 114 | total_bytes_freed_ever_(0), |
| 115 | total_objects_freed_ever_(0), |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 116 | num_bytes_allocated_(0), |
Mathieu Chartier | 987ccff | 2013-07-08 11:05:21 -0700 | [diff] [blame] | 117 | native_bytes_allocated_(0), |
Mathieu Chartier | 0a9dc05 | 2013-07-25 11:01:28 -0700 | [diff] [blame] | 118 | gc_memory_overhead_(0), |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 119 | verify_missing_card_marks_(false), |
| 120 | verify_system_weaks_(false), |
| 121 | verify_pre_gc_heap_(false), |
| 122 | verify_post_gc_heap_(false), |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 123 | verify_mod_union_table_(false), |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 124 | min_alloc_space_size_for_sticky_gc_(1112 * MB), |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 125 | min_remaining_space_for_sticky_gc_(1 * MB), |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 126 | last_trim_time_ms_(0), |
Mathieu Chartier | 65db880 | 2012-11-20 12:36:46 -0800 | [diff] [blame] | 127 | allocation_rate_(0), |
Mathieu Chartier | 0418ae2 | 2013-07-31 13:35:46 -0700 | [diff] [blame] | 128 | /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This |
| 129 | * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap |
| 130 | * verification is enabled, we limit the size of allocation stacks to speed up their |
| 131 | * searching. |
| 132 | */ |
| 133 | max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 134 | : (kDesiredHeapVerification > kVerifyAllFast) ? KB : MB), |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 135 | current_allocator_(kAllocatorTypeDlMalloc), |
| 136 | current_non_moving_allocator_(kAllocatorTypeNonMoving), |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 137 | bump_pointer_space_(nullptr), |
| 138 | temp_space_(nullptr), |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 139 | reference_referent_offset_(0), |
| 140 | reference_queue_offset_(0), |
| 141 | reference_queueNext_offset_(0), |
| 142 | reference_pendingNext_offset_(0), |
| 143 | finalizer_reference_zombie_offset_(0), |
Mathieu Chartier | 0051be6 | 2012-10-12 17:47:11 -0700 | [diff] [blame] | 144 | min_free_(min_free), |
| 145 | max_free_(max_free), |
| 146 | target_utilization_(target_utilization), |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 147 | total_wait_time_(0), |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 148 | total_allocation_time_(0), |
Hiroshi Yamauchi | 50b2928 | 2013-07-30 13:58:37 -0700 | [diff] [blame] | 149 | verify_object_mode_(kHeapVerificationNotPermitted), |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 150 | gc_disable_count_(0), |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 151 | running_on_valgrind_(RUNNING_ON_VALGRIND), |
| 152 | use_tlab_(use_tlab) { |
Elliott Hughes | 4dd9b4d | 2011-12-12 18:29:24 -0800 | [diff] [blame] | 153 | if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 154 | LOG(INFO) << "Heap() entering"; |
Brian Carlstrom | 0a5b14d | 2011-09-27 13:29:15 -0700 | [diff] [blame] | 155 | } |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 156 | // If we aren't the zygote, switch to the default non zygote allocator. This may update the |
| 157 | // entrypoints. |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 158 | if (!Runtime::Current()->IsZygote() || !kMovingCollector) { |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 159 | ChangeCollector(post_zygote_collector_type_); |
| 160 | } else { |
| 161 | // We are the zygote, use bump pointer allocation + semi space collector. |
| 162 | ChangeCollector(kCollectorTypeSS); |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 163 | } |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 164 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 165 | live_bitmap_.reset(new accounting::HeapBitmap(this)); |
| 166 | mark_bitmap_.reset(new accounting::HeapBitmap(this)); |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 167 | // Requested begin for the alloc space, to follow the mapped image and oat files |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 168 | byte* requested_alloc_space_begin = nullptr; |
Brian Carlstrom | 5643b78 | 2012-02-05 12:32:53 -0800 | [diff] [blame] | 169 | if (!image_file_name.empty()) { |
Ian Rogers | 8d31bbd | 2013-10-13 10:44:14 -0700 | [diff] [blame] | 170 | space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str()); |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 171 | CHECK(image_space != nullptr) << "Failed to create space for " << image_file_name; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 172 | AddSpace(image_space); |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 173 | // Oat files referenced by image files immediately follow them in memory, ensure alloc space |
| 174 | // isn't going to get in the middle |
Brian Carlstrom | 700c8d3 | 2012-11-05 10:42:02 -0800 | [diff] [blame] | 175 | byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd(); |
| 176 | CHECK_GT(oat_file_end_addr, image_space->End()); |
Brian Carlstrom | 56d947f | 2013-07-15 13:14:23 -0700 | [diff] [blame] | 177 | if (oat_file_end_addr > requested_alloc_space_begin) { |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 178 | requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize); |
Brian Carlstrom | 58ae941 | 2011-10-04 00:56:06 -0700 | [diff] [blame] | 179 | } |
Brian Carlstrom | 69b15fb | 2011-09-03 12:25:21 -0700 | [diff] [blame] | 180 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 181 | const char* name = Runtime::Current()->IsZygote() ? "zygote space" : "alloc space"; |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 182 | space::MallocSpace* malloc_space; |
| 183 | if (kUseRosAlloc) { |
| 184 | malloc_space = space::RosAllocSpace::Create(name, initial_size, growth_limit, capacity, |
| 185 | requested_alloc_space_begin, low_memory_mode_); |
| 186 | CHECK(malloc_space != nullptr) << "Failed to create rosalloc space"; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 187 | } else { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 188 | malloc_space = space::DlMallocSpace::Create(name, initial_size, growth_limit, capacity, |
| 189 | requested_alloc_space_begin); |
| 190 | CHECK(malloc_space != nullptr) << "Failed to create dlmalloc space"; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 191 | } |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 192 | VLOG(heap) << "malloc_space : " << malloc_space; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 193 | if (kMovingCollector) { |
| 194 | // TODO: Place bump-pointer spaces somewhere to minimize size of card table. |
| 195 | // TODO: Having 3+ spaces as big as the large heap size can cause virtual memory fragmentation |
| 196 | // issues. |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 197 | const size_t bump_pointer_space_size = std::min(malloc_space->Capacity(), 128 * MB); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 198 | bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space", |
| 199 | bump_pointer_space_size, nullptr); |
| 200 | CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space"; |
| 201 | AddSpace(bump_pointer_space_); |
| 202 | temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2", bump_pointer_space_size, |
| 203 | nullptr); |
| 204 | CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space"; |
| 205 | AddSpace(temp_space_); |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 206 | VLOG(heap) << "bump_pointer_space : " << bump_pointer_space_; |
| 207 | VLOG(heap) << "temp_space : " << temp_space_; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 208 | } |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 209 | non_moving_space_ = malloc_space; |
| 210 | malloc_space->SetFootprintLimit(malloc_space->Capacity()); |
| 211 | AddSpace(malloc_space); |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 212 | |
Mathieu Chartier | eb5710e | 2013-07-25 15:19:42 -0700 | [diff] [blame] | 213 | // Allocate the large object space. |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 214 | constexpr bool kUseFreeListSpaceForLOS = false; |
Mathieu Chartier | eb5710e | 2013-07-25 15:19:42 -0700 | [diff] [blame] | 215 | if (kUseFreeListSpaceForLOS) { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 216 | large_object_space_ = space::FreeListSpace::Create("large object space", nullptr, capacity); |
Mathieu Chartier | eb5710e | 2013-07-25 15:19:42 -0700 | [diff] [blame] | 217 | } else { |
| 218 | large_object_space_ = space::LargeObjectMapSpace::Create("large object space"); |
| 219 | } |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 220 | CHECK(large_object_space_ != nullptr) << "Failed to create large object space"; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 221 | AddSpace(large_object_space_); |
Mathieu Chartier | eb5710e | 2013-07-25 15:19:42 -0700 | [diff] [blame] | 222 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 223 | // Compute heap capacity. Continuous spaces are sorted in order of Begin(). |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 224 | CHECK(!continuous_spaces_.empty()); |
| 225 | // Relies on the spaces being sorted. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 226 | byte* heap_begin = continuous_spaces_.front()->Begin(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 227 | byte* heap_end = continuous_spaces_.back()->Limit(); |
| 228 | size_t heap_capacity = heap_end - heap_begin; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 229 | |
Elliott Hughes | 6c9c06d | 2011-11-07 16:43:47 -0800 | [diff] [blame] | 230 | // Allocate the card table. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 231 | card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity)); |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 232 | CHECK(card_table_.get() != NULL) << "Failed to create card table"; |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 233 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 234 | // Card cache for now since it makes it easier for us to update the references to the copying |
| 235 | // spaces. |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 236 | accounting::ModUnionTable* mod_union_table = |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 237 | new accounting::ModUnionTableCardCache("Image mod-union table", this, GetImageSpace()); |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 238 | CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table"; |
| 239 | AddModUnionTable(mod_union_table); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 240 | |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 241 | // TODO: Count objects in the image space here. |
Mathieu Chartier | 1cd9c5c | 2012-08-23 10:52:44 -0700 | [diff] [blame] | 242 | num_bytes_allocated_ = 0; |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 243 | |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 244 | // Default mark stack size in bytes. |
Mathieu Chartier | d8195f1 | 2012-10-05 12:21:28 -0700 | [diff] [blame] | 245 | static const size_t default_mark_stack_size = 64 * KB; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 246 | mark_stack_.reset(accounting::ObjectStack::Create("mark stack", default_mark_stack_size)); |
| 247 | allocation_stack_.reset(accounting::ObjectStack::Create("allocation stack", |
| 248 | max_allocation_stack_size_)); |
| 249 | live_stack_.reset(accounting::ObjectStack::Create("live stack", |
| 250 | max_allocation_stack_size_)); |
Mathieu Chartier | 5301cd2 | 2012-05-31 12:11:36 -0700 | [diff] [blame] | 251 | |
Mathieu Chartier | 65db880 | 2012-11-20 12:36:46 -0800 | [diff] [blame] | 252 | // It's still too early to take a lock because there are no threads yet, but we can create locks |
| 253 | // now. We don't create it earlier to make it clear that you can't use locks during heap |
| 254 | // initialization. |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 255 | gc_complete_lock_ = new Mutex("GC complete lock"); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 256 | gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable", |
| 257 | *gc_complete_lock_)); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 258 | last_gc_time_ns_ = NanoTime(); |
Mathieu Chartier | 65db880 | 2012-11-20 12:36:46 -0800 | [diff] [blame] | 259 | last_gc_size_ = GetBytesAllocated(); |
| 260 | |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 261 | if (ignore_max_footprint_) { |
| 262 | SetIdealFootprint(std::numeric_limits<size_t>::max()); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 263 | concurrent_start_bytes_ = std::numeric_limits<size_t>::max(); |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 264 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 265 | CHECK_NE(max_allowed_footprint_, 0U); |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 266 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 267 | // Create our garbage collectors. |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 268 | for (size_t i = 0; i < 2; ++i) { |
| 269 | const bool concurrent = i != 0; |
| 270 | garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent)); |
| 271 | garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent)); |
| 272 | garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent)); |
| 273 | } |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 274 | if (kMovingCollector) { |
| 275 | // TODO: Clean this up. |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame^] | 276 | bool generational = post_zygote_collector_type_ == kCollectorTypeGSS; |
| 277 | semi_space_collector_ = new collector::SemiSpace(this, generational); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 278 | garbage_collectors_.push_back(semi_space_collector_); |
Mathieu Chartier | 0325e62 | 2012-09-05 14:22:51 -0700 | [diff] [blame] | 279 | } |
| 280 | |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 281 | if (running_on_valgrind_) { |
Ian Rogers | fa82427 | 2013-11-05 16:12:57 -0800 | [diff] [blame] | 282 | Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints(); |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 283 | } |
| 284 | |
Elliott Hughes | 4dd9b4d | 2011-12-12 18:29:24 -0800 | [diff] [blame] | 285 | if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 286 | LOG(INFO) << "Heap() exiting"; |
Brian Carlstrom | 0a5b14d | 2011-09-27 13:29:15 -0700 | [diff] [blame] | 287 | } |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 288 | } |
| 289 | |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 290 | void Heap::ChangeAllocator(AllocatorType allocator) { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 291 | // These two allocators are only used internally and don't have any entrypoints. |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 292 | DCHECK_NE(allocator, kAllocatorTypeLOS); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 293 | DCHECK_NE(allocator, kAllocatorTypeNonMoving); |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 294 | if (current_allocator_ != allocator) { |
| 295 | current_allocator_ = allocator; |
| 296 | SetQuickAllocEntryPointsAllocator(current_allocator_); |
| 297 | Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints(); |
| 298 | } |
| 299 | } |
| 300 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 301 | bool Heap::IsCompilingBoot() const { |
| 302 | for (const auto& space : continuous_spaces_) { |
| 303 | if (space->IsImageSpace()) { |
| 304 | return false; |
| 305 | } else if (space->IsZygoteSpace()) { |
| 306 | return false; |
| 307 | } |
| 308 | } |
| 309 | return true; |
| 310 | } |
| 311 | |
| 312 | bool Heap::HasImageSpace() const { |
| 313 | for (const auto& space : continuous_spaces_) { |
| 314 | if (space->IsImageSpace()) { |
| 315 | return true; |
| 316 | } |
| 317 | } |
| 318 | return false; |
| 319 | } |
| 320 | |
| 321 | void Heap::IncrementDisableGC(Thread* self) { |
| 322 | // Need to do this holding the lock to prevent races where the GC is about to run / running when |
| 323 | // we attempt to disable it. |
| 324 | ScopedThreadStateChange tsc(self, kWaitingForGcToComplete); |
| 325 | MutexLock mu(self, *gc_complete_lock_); |
| 326 | WaitForGcToCompleteLocked(self); |
| 327 | ++gc_disable_count_; |
| 328 | } |
| 329 | |
| 330 | void Heap::DecrementDisableGC(Thread* self) { |
| 331 | MutexLock mu(self, *gc_complete_lock_); |
| 332 | CHECK_GE(gc_disable_count_, 0U); |
| 333 | --gc_disable_count_; |
| 334 | } |
| 335 | |
Mathieu Chartier | ca2a24d | 2013-11-25 15:12:12 -0800 | [diff] [blame] | 336 | void Heap::UpdateProcessState(ProcessState process_state) { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 337 | if (process_state_ != process_state) { |
| 338 | process_state_ = process_state; |
| 339 | if (process_state_ == kProcessStateJankPerceptible) { |
| 340 | TransitionCollector(post_zygote_collector_type_); |
| 341 | } else { |
| 342 | TransitionCollector(background_collector_type_); |
| 343 | } |
| 344 | } else { |
| 345 | CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false); |
| 346 | } |
Mathieu Chartier | ca2a24d | 2013-11-25 15:12:12 -0800 | [diff] [blame] | 347 | } |
| 348 | |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 349 | void Heap::CreateThreadPool() { |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 350 | const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_); |
| 351 | if (num_threads != 0) { |
Mathieu Chartier | bcd5e9d | 2013-11-13 14:33:28 -0800 | [diff] [blame] | 352 | thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads)); |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 353 | } |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 354 | } |
| 355 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 356 | void Heap::VisitObjects(ObjectVisitorCallback callback, void* arg) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 357 | Thread* self = Thread::Current(); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 358 | // GCs can move objects, so don't allow this. |
| 359 | const char* old_cause = self->StartAssertNoThreadSuspension("Visiting objects"); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 360 | if (bump_pointer_space_ != nullptr) { |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 361 | // Visit objects in bump pointer space. |
| 362 | bump_pointer_space_->Walk(callback, arg); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 363 | } |
| 364 | // TODO: Switch to standard begin and end to use ranged a based loop. |
| 365 | for (mirror::Object** it = allocation_stack_->Begin(), **end = allocation_stack_->End(); |
| 366 | it < end; ++it) { |
| 367 | mirror::Object* obj = *it; |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 368 | callback(obj, arg); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 369 | } |
| 370 | GetLiveBitmap()->Walk(callback, arg); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 371 | self->EndAssertNoThreadSuspension(old_cause); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 372 | } |
| 373 | |
| 374 | void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 375 | space::ContinuousSpace* space1 = rosalloc_space_ != nullptr ? rosalloc_space_ : non_moving_space_; |
| 376 | space::ContinuousSpace* space2 = dlmalloc_space_ != nullptr ? dlmalloc_space_ : non_moving_space_; |
| 377 | // This is just logic to handle a case of either not having a rosalloc or dlmalloc space. |
| 378 | // TODO: Generalize this to n bitmaps? |
| 379 | if (space1 == nullptr) { |
| 380 | DCHECK(space2 != nullptr); |
| 381 | space1 = space2; |
| 382 | } |
| 383 | if (space2 == nullptr) { |
| 384 | DCHECK(space1 != nullptr); |
| 385 | space2 = space1; |
| 386 | } |
| 387 | MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(), |
| 388 | large_object_space_->GetLiveObjects(), stack); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 389 | } |
| 390 | |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 391 | void Heap::DeleteThreadPool() { |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 392 | thread_pool_.reset(nullptr); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 393 | } |
| 394 | |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 395 | void Heap::AddSpace(space::Space* space, bool set_as_default) { |
| 396 | DCHECK(space != nullptr); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 397 | WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); |
| 398 | if (space->IsContinuousSpace()) { |
| 399 | DCHECK(!space->IsDiscontinuousSpace()); |
| 400 | space::ContinuousSpace* continuous_space = space->AsContinuousSpace(); |
| 401 | // Continuous spaces don't necessarily have bitmaps. |
| 402 | accounting::SpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap(); |
| 403 | accounting::SpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap(); |
| 404 | if (live_bitmap != nullptr) { |
| 405 | DCHECK(mark_bitmap != nullptr); |
| 406 | live_bitmap_->AddContinuousSpaceBitmap(live_bitmap); |
| 407 | mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 408 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 409 | continuous_spaces_.push_back(continuous_space); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 410 | if (set_as_default) { |
| 411 | if (continuous_space->IsDlMallocSpace()) { |
| 412 | dlmalloc_space_ = continuous_space->AsDlMallocSpace(); |
| 413 | } else if (continuous_space->IsRosAllocSpace()) { |
| 414 | rosalloc_space_ = continuous_space->AsRosAllocSpace(); |
| 415 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 416 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 417 | // Ensure that spaces remain sorted in increasing order of start address. |
| 418 | std::sort(continuous_spaces_.begin(), continuous_spaces_.end(), |
| 419 | [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) { |
| 420 | return a->Begin() < b->Begin(); |
| 421 | }); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 422 | } else { |
| 423 | DCHECK(space->IsDiscontinuousSpace()); |
| 424 | space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace(); |
| 425 | DCHECK(discontinuous_space->GetLiveObjects() != nullptr); |
| 426 | live_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetLiveObjects()); |
| 427 | DCHECK(discontinuous_space->GetMarkObjects() != nullptr); |
| 428 | mark_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetMarkObjects()); |
| 429 | discontinuous_spaces_.push_back(discontinuous_space); |
| 430 | } |
| 431 | if (space->IsAllocSpace()) { |
| 432 | alloc_spaces_.push_back(space->AsAllocSpace()); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 433 | } |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 434 | } |
| 435 | |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 436 | void Heap::RemoveSpace(space::Space* space) { |
| 437 | DCHECK(space != nullptr); |
| 438 | WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); |
| 439 | if (space->IsContinuousSpace()) { |
| 440 | DCHECK(!space->IsDiscontinuousSpace()); |
| 441 | space::ContinuousSpace* continuous_space = space->AsContinuousSpace(); |
| 442 | // Continuous spaces don't necessarily have bitmaps. |
| 443 | accounting::SpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap(); |
| 444 | accounting::SpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap(); |
| 445 | if (live_bitmap != nullptr) { |
| 446 | DCHECK(mark_bitmap != nullptr); |
| 447 | live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap); |
| 448 | mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap); |
| 449 | } |
| 450 | auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space); |
| 451 | DCHECK(it != continuous_spaces_.end()); |
| 452 | continuous_spaces_.erase(it); |
| 453 | if (continuous_space == dlmalloc_space_) { |
| 454 | dlmalloc_space_ = nullptr; |
| 455 | } else if (continuous_space == rosalloc_space_) { |
| 456 | rosalloc_space_ = nullptr; |
| 457 | } |
Mathieu Chartier | fc5b528 | 2014-01-09 16:15:36 -0800 | [diff] [blame] | 458 | if (continuous_space == main_space_) { |
| 459 | main_space_ = nullptr; |
| 460 | } |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 461 | } else { |
| 462 | DCHECK(space->IsDiscontinuousSpace()); |
| 463 | space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace(); |
| 464 | DCHECK(discontinuous_space->GetLiveObjects() != nullptr); |
| 465 | live_bitmap_->RemoveDiscontinuousObjectSet(discontinuous_space->GetLiveObjects()); |
| 466 | DCHECK(discontinuous_space->GetMarkObjects() != nullptr); |
| 467 | mark_bitmap_->RemoveDiscontinuousObjectSet(discontinuous_space->GetMarkObjects()); |
| 468 | auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(), |
| 469 | discontinuous_space); |
| 470 | DCHECK(it != discontinuous_spaces_.end()); |
| 471 | discontinuous_spaces_.erase(it); |
| 472 | } |
| 473 | if (space->IsAllocSpace()) { |
| 474 | auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace()); |
| 475 | DCHECK(it != alloc_spaces_.end()); |
| 476 | alloc_spaces_.erase(it); |
| 477 | } |
Mathieu Chartier | a4b95a2 | 2014-01-09 18:08:43 -0800 | [diff] [blame] | 478 | delete space; |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 479 | } |
| 480 | |
Mathieu Chartier | 0a9dc05 | 2013-07-25 11:01:28 -0700 | [diff] [blame] | 481 | void Heap::RegisterGCAllocation(size_t bytes) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 482 | if (this != nullptr) { |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 483 | gc_memory_overhead_.FetchAndAdd(bytes); |
Mathieu Chartier | 0a9dc05 | 2013-07-25 11:01:28 -0700 | [diff] [blame] | 484 | } |
| 485 | } |
| 486 | |
| 487 | void Heap::RegisterGCDeAllocation(size_t bytes) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 488 | if (this != nullptr) { |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 489 | gc_memory_overhead_.FetchAndSub(bytes); |
Mathieu Chartier | 0a9dc05 | 2013-07-25 11:01:28 -0700 | [diff] [blame] | 490 | } |
| 491 | } |
| 492 | |
Elliott Hughes | 8b788fe | 2013-04-17 15:57:01 -0700 | [diff] [blame] | 493 | void Heap::DumpGcPerformanceInfo(std::ostream& os) { |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 494 | // Dump cumulative timings. |
Elliott Hughes | 8b788fe | 2013-04-17 15:57:01 -0700 | [diff] [blame] | 495 | os << "Dumping cumulative Gc timings\n"; |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 496 | uint64_t total_duration = 0; |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 497 | |
| 498 | // Dump cumulative loggers for each GC type. |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 499 | uint64_t total_paused_time = 0; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 500 | for (const auto& collector : garbage_collectors_) { |
Sameer Abu Asal | a843954 | 2013-02-14 16:06:42 -0800 | [diff] [blame] | 501 | CumulativeLogger& logger = collector->GetCumulativeTimings(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 502 | if (logger.GetTotalNs() != 0) { |
Elliott Hughes | 8b788fe | 2013-04-17 15:57:01 -0700 | [diff] [blame] | 503 | os << Dumpable<CumulativeLogger>(logger); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 504 | const uint64_t total_ns = logger.GetTotalNs(); |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 505 | const uint64_t total_pause_ns = collector->GetTotalPausedTimeNs(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 506 | double seconds = NsToMs(logger.GetTotalNs()) / 1000.0; |
| 507 | const uint64_t freed_bytes = collector->GetTotalFreedBytes(); |
| 508 | const uint64_t freed_objects = collector->GetTotalFreedObjects(); |
Mathieu Chartier | b2f9936 | 2013-11-20 17:26:00 -0800 | [diff] [blame] | 509 | Histogram<uint64_t>::CumulativeData cumulative_data; |
| 510 | collector->GetPauseHistogram().CreateHistogram(&cumulative_data); |
| 511 | collector->GetPauseHistogram().PrintConfidenceIntervals(os, 0.99, cumulative_data); |
Elliott Hughes | 8b788fe | 2013-04-17 15:57:01 -0700 | [diff] [blame] | 512 | os << collector->GetName() << " total time: " << PrettyDuration(total_ns) << "\n" |
Elliott Hughes | 8b788fe | 2013-04-17 15:57:01 -0700 | [diff] [blame] | 513 | << collector->GetName() << " freed: " << freed_objects |
| 514 | << " objects with total size " << PrettySize(freed_bytes) << "\n" |
| 515 | << collector->GetName() << " throughput: " << freed_objects / seconds << "/s / " |
| 516 | << PrettySize(freed_bytes / seconds) << "/s\n"; |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 517 | total_duration += total_ns; |
| 518 | total_paused_time += total_pause_ns; |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 519 | } |
| 520 | } |
| 521 | uint64_t allocation_time = static_cast<uint64_t>(total_allocation_time_) * kTimeAdjust; |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 522 | if (total_duration != 0) { |
Brian Carlstrom | 2d88862 | 2013-07-18 17:02:00 -0700 | [diff] [blame] | 523 | const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0; |
Elliott Hughes | 8b788fe | 2013-04-17 15:57:01 -0700 | [diff] [blame] | 524 | os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n"; |
| 525 | os << "Mean GC size throughput: " |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 526 | << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n"; |
Elliott Hughes | 8b788fe | 2013-04-17 15:57:01 -0700 | [diff] [blame] | 527 | os << "Mean GC object throughput: " |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 528 | << (GetObjectsFreedEver() / total_seconds) << " objects/s\n"; |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 529 | } |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 530 | size_t total_objects_allocated = GetObjectsAllocatedEver(); |
Elliott Hughes | 8b788fe | 2013-04-17 15:57:01 -0700 | [diff] [blame] | 531 | os << "Total number of allocations: " << total_objects_allocated << "\n"; |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 532 | size_t total_bytes_allocated = GetBytesAllocatedEver(); |
Elliott Hughes | 8b788fe | 2013-04-17 15:57:01 -0700 | [diff] [blame] | 533 | os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n"; |
Hiroshi Yamauchi | 50b2928 | 2013-07-30 13:58:37 -0700 | [diff] [blame] | 534 | if (kMeasureAllocationTime) { |
Elliott Hughes | 8b788fe | 2013-04-17 15:57:01 -0700 | [diff] [blame] | 535 | os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n"; |
| 536 | os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated) |
| 537 | << "\n"; |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 538 | } |
Elliott Hughes | 8b788fe | 2013-04-17 15:57:01 -0700 | [diff] [blame] | 539 | os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n"; |
| 540 | os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n"; |
Mathieu Chartier | 0a9dc05 | 2013-07-25 11:01:28 -0700 | [diff] [blame] | 541 | os << "Approximate GC data structures memory overhead: " << gc_memory_overhead_; |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 542 | } |
| 543 | |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 544 | Heap::~Heap() { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 545 | VLOG(heap) << "Starting ~Heap()"; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 546 | STLDeleteElements(&garbage_collectors_); |
| 547 | // If we don't reset then the mark stack complains in its destructor. |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 548 | allocation_stack_->Reset(); |
| 549 | live_stack_->Reset(); |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 550 | STLDeleteValues(&mod_union_tables_); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 551 | STLDeleteElements(&continuous_spaces_); |
| 552 | STLDeleteElements(&discontinuous_spaces_); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 553 | delete gc_complete_lock_; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 554 | VLOG(heap) << "Finished ~Heap()"; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 555 | } |
| 556 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 557 | space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj, |
| 558 | bool fail_ok) const { |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 559 | for (const auto& space : continuous_spaces_) { |
| 560 | if (space->Contains(obj)) { |
| 561 | return space; |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 562 | } |
| 563 | } |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 564 | if (!fail_ok) { |
| 565 | LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!"; |
| 566 | } |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 567 | return NULL; |
| 568 | } |
| 569 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 570 | space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj, |
| 571 | bool fail_ok) const { |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 572 | for (const auto& space : discontinuous_spaces_) { |
| 573 | if (space->Contains(obj)) { |
| 574 | return space; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 575 | } |
| 576 | } |
| 577 | if (!fail_ok) { |
| 578 | LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!"; |
| 579 | } |
| 580 | return NULL; |
| 581 | } |
| 582 | |
| 583 | space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const { |
| 584 | space::Space* result = FindContinuousSpaceFromObject(obj, true); |
| 585 | if (result != NULL) { |
| 586 | return result; |
| 587 | } |
| 588 | return FindDiscontinuousSpaceFromObject(obj, true); |
| 589 | } |
| 590 | |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 591 | struct SoftReferenceArgs { |
| 592 | RootVisitor* is_marked_callback_; |
| 593 | RootVisitor* recursive_mark_callback_; |
| 594 | void* arg_; |
| 595 | }; |
| 596 | |
| 597 | mirror::Object* Heap::PreserveSoftReferenceCallback(mirror::Object* obj, void* arg) { |
| 598 | SoftReferenceArgs* args = reinterpret_cast<SoftReferenceArgs*>(arg); |
| 599 | // TODO: Not preserve all soft references. |
| 600 | return args->recursive_mark_callback_(obj, args->arg_); |
| 601 | } |
| 602 | |
| 603 | // Process reference class instances and schedule finalizations. |
| 604 | void Heap::ProcessReferences(TimingLogger& timings, bool clear_soft, |
| 605 | RootVisitor* is_marked_callback, |
| 606 | RootVisitor* recursive_mark_object_callback, void* arg) { |
| 607 | // Unless we are in the zygote or required to clear soft references with white references, |
| 608 | // preserve some white referents. |
| 609 | if (!clear_soft && !Runtime::Current()->IsZygote()) { |
| 610 | SoftReferenceArgs soft_reference_args; |
| 611 | soft_reference_args.is_marked_callback_ = is_marked_callback; |
| 612 | soft_reference_args.recursive_mark_callback_ = recursive_mark_object_callback; |
| 613 | soft_reference_args.arg_ = arg; |
| 614 | soft_reference_queue_.PreserveSomeSoftReferences(&PreserveSoftReferenceCallback, |
| 615 | &soft_reference_args); |
| 616 | } |
| 617 | timings.StartSplit("ProcessReferences"); |
| 618 | // Clear all remaining soft and weak references with white referents. |
| 619 | soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg); |
| 620 | weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg); |
| 621 | timings.EndSplit(); |
| 622 | // Preserve all white objects with finalize methods and schedule them for finalization. |
| 623 | timings.StartSplit("EnqueueFinalizerReferences"); |
| 624 | finalizer_reference_queue_.EnqueueFinalizerReferences(cleared_references_, is_marked_callback, |
| 625 | recursive_mark_object_callback, arg); |
| 626 | timings.EndSplit(); |
| 627 | timings.StartSplit("ProcessReferences"); |
| 628 | // Clear all f-reachable soft and weak references with white referents. |
| 629 | soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg); |
| 630 | weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg); |
| 631 | // Clear all phantom references with white referents. |
| 632 | phantom_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg); |
| 633 | // At this point all reference queues other than the cleared references should be empty. |
| 634 | DCHECK(soft_reference_queue_.IsEmpty()); |
| 635 | DCHECK(weak_reference_queue_.IsEmpty()); |
| 636 | DCHECK(finalizer_reference_queue_.IsEmpty()); |
| 637 | DCHECK(phantom_reference_queue_.IsEmpty()); |
| 638 | timings.EndSplit(); |
| 639 | } |
| 640 | |
| 641 | bool Heap::IsEnqueued(mirror::Object* ref) const { |
| 642 | // Since the references are stored as cyclic lists it means that once enqueued, the pending next |
| 643 | // will always be non-null. |
| 644 | return ref->GetFieldObject<mirror::Object*>(GetReferencePendingNextOffset(), false) != nullptr; |
| 645 | } |
| 646 | |
| 647 | bool Heap::IsEnqueuable(const mirror::Object* ref) const { |
| 648 | DCHECK(ref != nullptr); |
| 649 | const mirror::Object* queue = |
| 650 | ref->GetFieldObject<mirror::Object*>(GetReferenceQueueOffset(), false); |
| 651 | const mirror::Object* queue_next = |
| 652 | ref->GetFieldObject<mirror::Object*>(GetReferenceQueueNextOffset(), false); |
| 653 | return queue != nullptr && queue_next == nullptr; |
| 654 | } |
| 655 | |
| 656 | // Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been |
| 657 | // marked, put it on the appropriate list in the heap for later processing. |
| 658 | void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj, |
| 659 | RootVisitor mark_visitor, void* arg) { |
| 660 | DCHECK(klass != nullptr); |
| 661 | DCHECK(klass->IsReferenceClass()); |
| 662 | DCHECK(obj != nullptr); |
| 663 | mirror::Object* referent = GetReferenceReferent(obj); |
| 664 | if (referent != nullptr) { |
| 665 | mirror::Object* forward_address = mark_visitor(referent, arg); |
| 666 | // Null means that the object is not currently marked. |
| 667 | if (forward_address == nullptr) { |
| 668 | Thread* self = Thread::Current(); |
| 669 | // TODO: Remove these locks, and use atomic stacks for storing references? |
| 670 | // We need to check that the references haven't already been enqueued since we can end up |
| 671 | // scanning the same reference multiple times due to dirty cards. |
| 672 | if (klass->IsSoftReferenceClass()) { |
| 673 | soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj); |
| 674 | } else if (klass->IsWeakReferenceClass()) { |
| 675 | weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj); |
| 676 | } else if (klass->IsFinalizerReferenceClass()) { |
| 677 | finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj); |
| 678 | } else if (klass->IsPhantomReferenceClass()) { |
| 679 | phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj); |
| 680 | } else { |
| 681 | LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex |
| 682 | << klass->GetAccessFlags(); |
| 683 | } |
| 684 | } else if (referent != forward_address) { |
| 685 | // Referent is already marked and we need to update it. |
| 686 | SetReferenceReferent(obj, forward_address); |
| 687 | } |
| 688 | } |
| 689 | } |
| 690 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 691 | space::ImageSpace* Heap::GetImageSpace() const { |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 692 | for (const auto& space : continuous_spaces_) { |
| 693 | if (space->IsImageSpace()) { |
| 694 | return space->AsImageSpace(); |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 695 | } |
| 696 | } |
| 697 | return NULL; |
| 698 | } |
| 699 | |
Elliott Hughes | 8a8b9cb | 2012-04-13 18:29:22 -0700 | [diff] [blame] | 700 | static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) { |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 701 | size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start); |
Elliott Hughes | 8a8b9cb | 2012-04-13 18:29:22 -0700 | [diff] [blame] | 702 | if (used_bytes < chunk_size) { |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 703 | size_t chunk_free_bytes = chunk_size - used_bytes; |
| 704 | size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg); |
| 705 | max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes); |
Elliott Hughes | 8a8b9cb | 2012-04-13 18:29:22 -0700 | [diff] [blame] | 706 | } |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 707 | } |
| 708 | |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 709 | void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, bool large_object_allocation) { |
| 710 | std::ostringstream oss; |
| 711 | int64_t total_bytes_free = GetFreeMemory(); |
| 712 | oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free |
| 713 | << " free bytes"; |
| 714 | // If the allocation failed due to fragmentation, print out the largest continuous allocation. |
| 715 | if (!large_object_allocation && total_bytes_free >= byte_count) { |
| 716 | size_t max_contiguous_allocation = 0; |
| 717 | for (const auto& space : continuous_spaces_) { |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 718 | if (space->IsMallocSpace()) { |
| 719 | // To allow the Walk/InspectAll() to exclusively-lock the mutator |
| 720 | // lock, temporarily release the shared access to the mutator |
| 721 | // lock here by transitioning to the suspended state. |
| 722 | Locks::mutator_lock_->AssertSharedHeld(self); |
| 723 | self->TransitionFromRunnableToSuspended(kSuspended); |
| 724 | space->AsMallocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation); |
| 725 | self->TransitionFromSuspendedToRunnable(); |
| 726 | Locks::mutator_lock_->AssertSharedHeld(self); |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 727 | } |
| 728 | } |
| 729 | oss << "; failed due to fragmentation (largest possible contiguous allocation " |
| 730 | << max_contiguous_allocation << " bytes)"; |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 731 | } |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 732 | self->ThrowOutOfMemoryError(oss.str().c_str()); |
| 733 | } |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 734 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 735 | void Heap::Trim() { |
| 736 | uint64_t start_ns = NanoTime(); |
| 737 | // Trim the managed spaces. |
| 738 | uint64_t total_alloc_space_allocated = 0; |
| 739 | uint64_t total_alloc_space_size = 0; |
| 740 | uint64_t managed_reclaimed = 0; |
| 741 | for (const auto& space : continuous_spaces_) { |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 742 | if (space->IsMallocSpace() && !space->IsZygoteSpace()) { |
| 743 | gc::space::MallocSpace* alloc_space = space->AsMallocSpace(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 744 | total_alloc_space_size += alloc_space->Size(); |
| 745 | managed_reclaimed += alloc_space->Trim(); |
| 746 | } |
| 747 | } |
| 748 | total_alloc_space_allocated = GetBytesAllocated() - large_object_space_->GetBytesAllocated() - |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 749 | bump_pointer_space_->Size(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 750 | const float managed_utilization = static_cast<float>(total_alloc_space_allocated) / |
| 751 | static_cast<float>(total_alloc_space_size); |
| 752 | uint64_t gc_heap_end_ns = NanoTime(); |
| 753 | // Trim the native heap. |
| 754 | dlmalloc_trim(0); |
| 755 | size_t native_reclaimed = 0; |
| 756 | dlmalloc_inspect_all(DlmallocMadviseCallback, &native_reclaimed); |
| 757 | uint64_t end_ns = NanoTime(); |
| 758 | VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns) |
| 759 | << ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration=" |
| 760 | << PrettyDuration(end_ns - gc_heap_end_ns) << ", advised=" << PrettySize(native_reclaimed) |
| 761 | << ") heaps. Managed heap utilization of " << static_cast<int>(100 * managed_utilization) |
| 762 | << "%."; |
| 763 | } |
| 764 | |
| 765 | bool Heap::IsValidObjectAddress(const mirror::Object* obj) const { |
| 766 | // Note: we deliberately don't take the lock here, and mustn't test anything that would require |
| 767 | // taking the lock. |
| 768 | if (obj == nullptr) { |
Elliott Hughes | 88c5c35 | 2012-03-15 18:49:48 -0700 | [diff] [blame] | 769 | return true; |
| 770 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 771 | return IsAligned<kObjectAlignment>(obj) && IsHeapAddress(obj); |
| 772 | } |
| 773 | |
| 774 | bool Heap::IsHeapAddress(const mirror::Object* obj) const { |
| 775 | if (kMovingCollector && bump_pointer_space_->HasAddress(obj)) { |
| 776 | return true; |
Elliott Hughes | a250199 | 2011-08-26 19:39:54 -0700 | [diff] [blame] | 777 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 778 | // TODO: This probably doesn't work for large objects. |
| 779 | return FindSpaceFromObject(obj, true) != nullptr; |
Elliott Hughes | a250199 | 2011-08-26 19:39:54 -0700 | [diff] [blame] | 780 | } |
| 781 | |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 782 | bool Heap::IsLiveObjectLocked(const mirror::Object* obj, bool search_allocation_stack, |
| 783 | bool search_live_stack, bool sorted) { |
Brian Carlstrom | 7934ac2 | 2013-07-26 10:54:15 -0700 | [diff] [blame] | 784 | // Locks::heap_bitmap_lock_->AssertReaderHeld(Thread::Current()); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 785 | if (obj == nullptr || UNLIKELY(!IsAligned<kObjectAlignment>(obj))) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 786 | return false; |
| 787 | } |
Mathieu Chartier | f082d3c | 2013-07-29 17:04:07 -0700 | [diff] [blame] | 788 | space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true); |
| 789 | space::DiscontinuousSpace* d_space = NULL; |
| 790 | if (c_space != NULL) { |
| 791 | if (c_space->GetLiveBitmap()->Test(obj)) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 792 | return true; |
| 793 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 794 | } else if (bump_pointer_space_->Contains(obj) || temp_space_->Contains(obj)) { |
| 795 | return true; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 796 | } else { |
Mathieu Chartier | f082d3c | 2013-07-29 17:04:07 -0700 | [diff] [blame] | 797 | d_space = FindDiscontinuousSpaceFromObject(obj, true); |
| 798 | if (d_space != NULL) { |
| 799 | if (d_space->GetLiveObjects()->Test(obj)) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 800 | return true; |
| 801 | } |
| 802 | } |
| 803 | } |
Mathieu Chartier | f082d3c | 2013-07-29 17:04:07 -0700 | [diff] [blame] | 804 | // This is covering the allocation/live stack swapping that is done without mutators suspended. |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 805 | for (size_t i = 0; i < (sorted ? 1 : 5); ++i) { |
| 806 | if (i > 0) { |
| 807 | NanoSleep(MsToNs(10)); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 808 | } |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 809 | if (search_allocation_stack) { |
| 810 | if (sorted) { |
| 811 | if (allocation_stack_->ContainsSorted(const_cast<mirror::Object*>(obj))) { |
| 812 | return true; |
| 813 | } |
| 814 | } else if (allocation_stack_->Contains(const_cast<mirror::Object*>(obj))) { |
| 815 | return true; |
| 816 | } |
| 817 | } |
| 818 | |
| 819 | if (search_live_stack) { |
| 820 | if (sorted) { |
| 821 | if (live_stack_->ContainsSorted(const_cast<mirror::Object*>(obj))) { |
| 822 | return true; |
| 823 | } |
| 824 | } else if (live_stack_->Contains(const_cast<mirror::Object*>(obj))) { |
| 825 | return true; |
| 826 | } |
| 827 | } |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 828 | } |
Mathieu Chartier | f082d3c | 2013-07-29 17:04:07 -0700 | [diff] [blame] | 829 | // We need to check the bitmaps again since there is a race where we mark something as live and |
| 830 | // then clear the stack containing it. |
| 831 | if (c_space != NULL) { |
| 832 | if (c_space->GetLiveBitmap()->Test(obj)) { |
| 833 | return true; |
| 834 | } |
| 835 | } else { |
| 836 | d_space = FindDiscontinuousSpaceFromObject(obj, true); |
| 837 | if (d_space != NULL && d_space->GetLiveObjects()->Test(obj)) { |
| 838 | return true; |
| 839 | } |
| 840 | } |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 841 | return false; |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 842 | } |
| 843 | |
Ian Rogers | 04d7aa9 | 2013-03-16 14:29:17 -0700 | [diff] [blame] | 844 | void Heap::VerifyObjectImpl(const mirror::Object* obj) { |
| 845 | if (Thread::Current() == NULL || |
jeffhao | 2504552 | 2012-03-13 19:34:37 -0700 | [diff] [blame] | 846 | Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) { |
Elliott Hughes | 85d1545 | 2011-09-16 17:33:01 -0700 | [diff] [blame] | 847 | return; |
| 848 | } |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 849 | VerifyObjectBody(obj); |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 850 | } |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 851 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 852 | void Heap::DumpSpaces(std::ostream& stream) { |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 853 | for (const auto& space : continuous_spaces_) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 854 | accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); |
| 855 | accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 856 | stream << space << " " << *space << "\n"; |
| 857 | if (live_bitmap != nullptr) { |
| 858 | stream << live_bitmap << " " << *live_bitmap << "\n"; |
| 859 | } |
| 860 | if (mark_bitmap != nullptr) { |
| 861 | stream << mark_bitmap << " " << *mark_bitmap << "\n"; |
| 862 | } |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 863 | } |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 864 | for (const auto& space : discontinuous_spaces_) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 865 | stream << space << " " << *space << "\n"; |
Mathieu Chartier | 128c52c | 2012-10-16 14:12:41 -0700 | [diff] [blame] | 866 | } |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 867 | } |
| 868 | |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 869 | void Heap::VerifyObjectBody(const mirror::Object* obj) { |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 870 | CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj; |
| 871 | // Ignore early dawn of the universe verifications. |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 872 | if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.Load()) < 10 * KB)) { |
Ian Rogers | 62d6c77 | 2013-02-27 08:32:07 -0800 | [diff] [blame] | 873 | return; |
| 874 | } |
| 875 | const byte* raw_addr = reinterpret_cast<const byte*>(obj) + |
| 876 | mirror::Object::ClassOffset().Int32Value(); |
| 877 | const mirror::Class* c = *reinterpret_cast<mirror::Class* const *>(raw_addr); |
| 878 | if (UNLIKELY(c == NULL)) { |
| 879 | LOG(FATAL) << "Null class in object: " << obj; |
| 880 | } else if (UNLIKELY(!IsAligned<kObjectAlignment>(c))) { |
| 881 | LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj; |
| 882 | } |
| 883 | // Check obj.getClass().getClass() == obj.getClass().getClass().getClass() |
| 884 | // Note: we don't use the accessors here as they have internal sanity checks |
| 885 | // that we don't want to run |
| 886 | raw_addr = reinterpret_cast<const byte*>(c) + mirror::Object::ClassOffset().Int32Value(); |
| 887 | const mirror::Class* c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr); |
| 888 | raw_addr = reinterpret_cast<const byte*>(c_c) + mirror::Object::ClassOffset().Int32Value(); |
| 889 | const mirror::Class* c_c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr); |
| 890 | CHECK_EQ(c_c, c_c_c); |
Mathieu Chartier | 0325e62 | 2012-09-05 14:22:51 -0700 | [diff] [blame] | 891 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 892 | if (verify_object_mode_ > kVerifyAllFast) { |
Ian Rogers | 62d6c77 | 2013-02-27 08:32:07 -0800 | [diff] [blame] | 893 | // TODO: the bitmap tests below are racy if VerifyObjectBody is called without the |
| 894 | // heap_bitmap_lock_. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 895 | if (!IsLiveObjectLocked(obj)) { |
| 896 | DumpSpaces(); |
| 897 | LOG(FATAL) << "Object is dead: " << obj; |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 898 | } |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 899 | if (!IsLiveObjectLocked(c)) { |
Mathieu Chartier | dcf8d72 | 2012-08-02 14:55:54 -0700 | [diff] [blame] | 900 | LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj; |
| 901 | } |
Mathieu Chartier | dcf8d72 | 2012-08-02 14:55:54 -0700 | [diff] [blame] | 902 | } |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 903 | } |
| 904 | |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 905 | void Heap::VerificationCallback(mirror::Object* obj, void* arg) { |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 906 | DCHECK(obj != NULL); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 907 | reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 908 | } |
| 909 | |
| 910 | void Heap::VerifyHeap() { |
Ian Rogers | 50b35e2 | 2012-10-04 10:09:15 -0700 | [diff] [blame] | 911 | ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 912 | GetLiveBitmap()->Walk(Heap::VerificationCallback, this); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 913 | } |
| 914 | |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 915 | void Heap::RecordFree(int64_t freed_objects, int64_t freed_bytes) { |
| 916 | DCHECK_LE(freed_bytes, num_bytes_allocated_.Load()); |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 917 | num_bytes_allocated_.FetchAndSub(freed_bytes); |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 918 | if (Runtime::Current()->HasStatsEnabled()) { |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 919 | RuntimeStats* thread_stats = Thread::Current()->GetStats(); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 920 | thread_stats->freed_objects += freed_objects; |
Elliott Hughes | 307f75d | 2011-10-12 18:04:40 -0700 | [diff] [blame] | 921 | thread_stats->freed_bytes += freed_bytes; |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 922 | // TODO: Do this concurrently. |
| 923 | RuntimeStats* global_stats = Runtime::Current()->GetStats(); |
| 924 | global_stats->freed_objects += freed_objects; |
| 925 | global_stats->freed_bytes += freed_bytes; |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 926 | } |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 927 | } |
| 928 | |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 929 | mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator, |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 930 | size_t alloc_size, size_t* bytes_allocated, |
| 931 | mirror::Class** klass) { |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 932 | mirror::Object* ptr = nullptr; |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 933 | bool was_default_allocator = allocator == GetCurrentAllocator(); |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 934 | DCHECK(klass != nullptr); |
| 935 | SirtRef<mirror::Class> sirt_klass(self, *klass); |
Mathieu Chartier | 866fb2a | 2012-09-10 10:47:49 -0700 | [diff] [blame] | 936 | // The allocation failed. If the GC is running, block until it completes, and then retry the |
| 937 | // allocation. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 938 | collector::GcType last_gc = WaitForGcToComplete(self); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 939 | if (last_gc != collector::kGcTypeNone) { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 940 | // If we were the default allocator but the allocator changed while we were suspended, |
| 941 | // abort the allocation. |
| 942 | if (was_default_allocator && allocator != GetCurrentAllocator()) { |
| 943 | *klass = sirt_klass.get(); |
| 944 | return nullptr; |
| 945 | } |
Mathieu Chartier | 866fb2a | 2012-09-10 10:47:49 -0700 | [diff] [blame] | 946 | // A GC was in progress and we blocked, retry allocation now that memory has been freed. |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 947 | ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 948 | } |
| 949 | |
Mathieu Chartier | 866fb2a | 2012-09-10 10:47:49 -0700 | [diff] [blame] | 950 | // Loop through our different Gc types and try to Gc until we get enough free memory. |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 951 | for (collector::GcType gc_type : gc_plan_) { |
| 952 | if (ptr != nullptr) { |
| 953 | break; |
Mathieu Chartier | 866fb2a | 2012-09-10 10:47:49 -0700 | [diff] [blame] | 954 | } |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 955 | // Attempt to run the collector, if we succeed, re-try the allocation. |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 956 | bool gc_ran = |
| 957 | CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone; |
| 958 | if (was_default_allocator && allocator != GetCurrentAllocator()) { |
| 959 | *klass = sirt_klass.get(); |
| 960 | return nullptr; |
| 961 | } |
| 962 | if (gc_ran) { |
Mathieu Chartier | 866fb2a | 2012-09-10 10:47:49 -0700 | [diff] [blame] | 963 | // Did we free sufficient memory for the allocation to succeed? |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 964 | ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated); |
Mathieu Chartier | 866fb2a | 2012-09-10 10:47:49 -0700 | [diff] [blame] | 965 | } |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 966 | } |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 967 | // Allocations have failed after GCs; this is an exceptional state. |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 968 | if (ptr == nullptr) { |
| 969 | // Try harder, growing the heap if necessary. |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 970 | ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 971 | } |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 972 | if (ptr == nullptr) { |
| 973 | // Most allocations should have succeeded by now, so the heap is really full, really fragmented, |
| 974 | // or the requested size is really big. Do another GC, collecting SoftReferences this time. The |
| 975 | // VM spec requires that all SoftReferences have been collected and cleared before throwing |
| 976 | // OOME. |
| 977 | VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size) |
| 978 | << " allocation"; |
| 979 | // TODO: Run finalization, but this may cause more allocations to occur. |
| 980 | // We don't need a WaitForGcToComplete here either. |
| 981 | DCHECK(!gc_plan_.empty()); |
| 982 | CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 983 | if (was_default_allocator && allocator != GetCurrentAllocator()) { |
| 984 | *klass = sirt_klass.get(); |
| 985 | return nullptr; |
| 986 | } |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 987 | ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 988 | if (ptr == nullptr) { |
| 989 | ThrowOutOfMemoryError(self, alloc_size, false); |
| 990 | } |
| 991 | } |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 992 | *klass = sirt_klass.get(); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 993 | return ptr; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 994 | } |
| 995 | |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 996 | void Heap::SetTargetHeapUtilization(float target) { |
| 997 | DCHECK_GT(target, 0.0f); // asserted in Java code |
| 998 | DCHECK_LT(target, 1.0f); |
| 999 | target_utilization_ = target; |
| 1000 | } |
| 1001 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1002 | size_t Heap::GetObjectsAllocated() const { |
| 1003 | size_t total = 0; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1004 | for (space::AllocSpace* space : alloc_spaces_) { |
| 1005 | total += space->GetObjectsAllocated(); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1006 | } |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 1007 | return total; |
| 1008 | } |
| 1009 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1010 | size_t Heap::GetObjectsAllocatedEver() const { |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 1011 | return GetObjectsFreedEver() + GetObjectsAllocated(); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1012 | } |
| 1013 | |
| 1014 | size_t Heap::GetBytesAllocatedEver() const { |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 1015 | return GetBytesFreedEver() + GetBytesAllocated(); |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 1016 | } |
| 1017 | |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 1018 | class InstanceCounter { |
| 1019 | public: |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 1020 | InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts) |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 1021 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) |
Elliott Hughes | ec0f83d | 2013-01-15 16:54:08 -0800 | [diff] [blame] | 1022 | : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) { |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 1023 | } |
| 1024 | |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 1025 | void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { |
Elliott Hughes | ec0f83d | 2013-01-15 16:54:08 -0800 | [diff] [blame] | 1026 | for (size_t i = 0; i < classes_.size(); ++i) { |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 1027 | const mirror::Class* instance_class = o->GetClass(); |
Elliott Hughes | ec0f83d | 2013-01-15 16:54:08 -0800 | [diff] [blame] | 1028 | if (use_is_assignable_from_) { |
| 1029 | if (instance_class != NULL && classes_[i]->IsAssignableFrom(instance_class)) { |
| 1030 | ++counts_[i]; |
| 1031 | } |
| 1032 | } else { |
| 1033 | if (instance_class == classes_[i]) { |
| 1034 | ++counts_[i]; |
| 1035 | } |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 1036 | } |
| 1037 | } |
| 1038 | } |
| 1039 | |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 1040 | private: |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 1041 | const std::vector<mirror::Class*>& classes_; |
Elliott Hughes | ec0f83d | 2013-01-15 16:54:08 -0800 | [diff] [blame] | 1042 | bool use_is_assignable_from_; |
| 1043 | uint64_t* const counts_; |
| 1044 | |
| 1045 | DISALLOW_COPY_AND_ASSIGN(InstanceCounter); |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 1046 | }; |
| 1047 | |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 1048 | void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, |
Elliott Hughes | ec0f83d | 2013-01-15 16:54:08 -0800 | [diff] [blame] | 1049 | uint64_t* counts) { |
| 1050 | // We only want reachable instances, so do a GC. This also ensures that the alloc stack |
| 1051 | // is empty, so the live bitmap is the only place we need to look. |
| 1052 | Thread* self = Thread::Current(); |
| 1053 | self->TransitionFromRunnableToSuspended(kNative); |
| 1054 | CollectGarbage(false); |
| 1055 | self->TransitionFromSuspendedToRunnable(); |
| 1056 | |
| 1057 | InstanceCounter counter(classes, use_is_assignable_from, counts); |
| 1058 | ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 1059 | GetLiveBitmap()->Visit(counter); |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 1060 | } |
| 1061 | |
Elliott Hughes | 3b78c94 | 2013-01-15 17:35:41 -0800 | [diff] [blame] | 1062 | class InstanceCollector { |
| 1063 | public: |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 1064 | InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances) |
Elliott Hughes | 3b78c94 | 2013-01-15 17:35:41 -0800 | [diff] [blame] | 1065 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) |
| 1066 | : class_(c), max_count_(max_count), instances_(instances) { |
| 1067 | } |
| 1068 | |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 1069 | void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { |
| 1070 | const mirror::Class* instance_class = o->GetClass(); |
Elliott Hughes | 3b78c94 | 2013-01-15 17:35:41 -0800 | [diff] [blame] | 1071 | if (instance_class == class_) { |
| 1072 | if (max_count_ == 0 || instances_.size() < max_count_) { |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 1073 | instances_.push_back(const_cast<mirror::Object*>(o)); |
Elliott Hughes | 3b78c94 | 2013-01-15 17:35:41 -0800 | [diff] [blame] | 1074 | } |
| 1075 | } |
| 1076 | } |
| 1077 | |
| 1078 | private: |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 1079 | mirror::Class* class_; |
Elliott Hughes | 3b78c94 | 2013-01-15 17:35:41 -0800 | [diff] [blame] | 1080 | uint32_t max_count_; |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 1081 | std::vector<mirror::Object*>& instances_; |
Elliott Hughes | 3b78c94 | 2013-01-15 17:35:41 -0800 | [diff] [blame] | 1082 | |
| 1083 | DISALLOW_COPY_AND_ASSIGN(InstanceCollector); |
| 1084 | }; |
| 1085 | |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 1086 | void Heap::GetInstances(mirror::Class* c, int32_t max_count, |
| 1087 | std::vector<mirror::Object*>& instances) { |
Elliott Hughes | 3b78c94 | 2013-01-15 17:35:41 -0800 | [diff] [blame] | 1088 | // We only want reachable instances, so do a GC. This also ensures that the alloc stack |
| 1089 | // is empty, so the live bitmap is the only place we need to look. |
| 1090 | Thread* self = Thread::Current(); |
| 1091 | self->TransitionFromRunnableToSuspended(kNative); |
| 1092 | CollectGarbage(false); |
| 1093 | self->TransitionFromSuspendedToRunnable(); |
| 1094 | |
| 1095 | InstanceCollector collector(c, max_count, instances); |
| 1096 | ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 1097 | GetLiveBitmap()->Visit(collector); |
| 1098 | } |
| 1099 | |
Elliott Hughes | 0cbaff5 | 2013-01-16 15:28:01 -0800 | [diff] [blame] | 1100 | class ReferringObjectsFinder { |
| 1101 | public: |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 1102 | ReferringObjectsFinder(mirror::Object* object, int32_t max_count, |
| 1103 | std::vector<mirror::Object*>& referring_objects) |
Elliott Hughes | 0cbaff5 | 2013-01-16 15:28:01 -0800 | [diff] [blame] | 1104 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) |
| 1105 | : object_(object), max_count_(max_count), referring_objects_(referring_objects) { |
| 1106 | } |
| 1107 | |
| 1108 | // For bitmap Visit. |
| 1109 | // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for |
| 1110 | // annotalysis on visitors. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1111 | void operator()(const mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS { |
| 1112 | collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(o), *this, true); |
Elliott Hughes | 0cbaff5 | 2013-01-16 15:28:01 -0800 | [diff] [blame] | 1113 | } |
| 1114 | |
| 1115 | // For MarkSweep::VisitObjectReferences. |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 1116 | void operator()(mirror::Object* referrer, mirror::Object* object, |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 1117 | const MemberOffset&, bool) const { |
Elliott Hughes | 0cbaff5 | 2013-01-16 15:28:01 -0800 | [diff] [blame] | 1118 | if (object == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) { |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 1119 | referring_objects_.push_back(referrer); |
Elliott Hughes | 0cbaff5 | 2013-01-16 15:28:01 -0800 | [diff] [blame] | 1120 | } |
| 1121 | } |
| 1122 | |
| 1123 | private: |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 1124 | mirror::Object* object_; |
Elliott Hughes | 0cbaff5 | 2013-01-16 15:28:01 -0800 | [diff] [blame] | 1125 | uint32_t max_count_; |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 1126 | std::vector<mirror::Object*>& referring_objects_; |
Elliott Hughes | 0cbaff5 | 2013-01-16 15:28:01 -0800 | [diff] [blame] | 1127 | |
| 1128 | DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder); |
| 1129 | }; |
| 1130 | |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 1131 | void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count, |
| 1132 | std::vector<mirror::Object*>& referring_objects) { |
Elliott Hughes | 0cbaff5 | 2013-01-16 15:28:01 -0800 | [diff] [blame] | 1133 | // We only want reachable instances, so do a GC. This also ensures that the alloc stack |
| 1134 | // is empty, so the live bitmap is the only place we need to look. |
| 1135 | Thread* self = Thread::Current(); |
| 1136 | self->TransitionFromRunnableToSuspended(kNative); |
| 1137 | CollectGarbage(false); |
| 1138 | self->TransitionFromSuspendedToRunnable(); |
| 1139 | |
| 1140 | ReferringObjectsFinder finder(o, max_count, referring_objects); |
| 1141 | ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 1142 | GetLiveBitmap()->Visit(finder); |
| 1143 | } |
| 1144 | |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 1145 | void Heap::CollectGarbage(bool clear_soft_references) { |
Mathieu Chartier | 866fb2a | 2012-09-10 10:47:49 -0700 | [diff] [blame] | 1146 | // Even if we waited for a GC we still need to do another GC since weaks allocated during the |
| 1147 | // last GC will not have necessarily been cleared. |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 1148 | CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1149 | } |
| 1150 | |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1151 | void Heap::TransitionCollector(CollectorType collector_type) { |
| 1152 | if (collector_type == collector_type_) { |
| 1153 | return; |
| 1154 | } |
| 1155 | uint64_t start_time = NanoTime(); |
| 1156 | int32_t before_size = GetTotalMemory(); |
| 1157 | int32_t before_allocated = num_bytes_allocated_.Load(); |
| 1158 | ThreadList* tl = Runtime::Current()->GetThreadList(); |
| 1159 | Thread* self = Thread::Current(); |
| 1160 | ScopedThreadStateChange tsc(self, kWaitingPerformingGc); |
| 1161 | Locks::mutator_lock_->AssertNotHeld(self); |
| 1162 | // Busy wait until we can GC (StartGC can fail if we have a non-zero gc_disable_count_, this |
| 1163 | // rarely occurs however). |
| 1164 | while (!StartGC(self)) { |
| 1165 | usleep(100); |
| 1166 | } |
| 1167 | tl->SuspendAll(); |
| 1168 | switch (collector_type) { |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame^] | 1169 | case kCollectorTypeSS: |
| 1170 | case kCollectorTypeGSS: { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1171 | mprotect(temp_space_->Begin(), temp_space_->Capacity(), PROT_READ | PROT_WRITE); |
Mathieu Chartier | fc5b528 | 2014-01-09 16:15:36 -0800 | [diff] [blame] | 1172 | CHECK(main_space_ != nullptr); |
| 1173 | Compact(temp_space_, main_space_); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1174 | DCHECK(allocator_mem_map_.get() == nullptr); |
Mathieu Chartier | fc5b528 | 2014-01-09 16:15:36 -0800 | [diff] [blame] | 1175 | allocator_mem_map_.reset(main_space_->ReleaseMemMap()); |
| 1176 | madvise(main_space_->Begin(), main_space_->Size(), MADV_DONTNEED); |
Mathieu Chartier | a4b95a2 | 2014-01-09 18:08:43 -0800 | [diff] [blame] | 1177 | // RemoveSpace deletes the removed space. |
Mathieu Chartier | fc5b528 | 2014-01-09 16:15:36 -0800 | [diff] [blame] | 1178 | RemoveSpace(main_space_); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1179 | break; |
| 1180 | } |
| 1181 | case kCollectorTypeMS: |
| 1182 | // Fall through. |
| 1183 | case kCollectorTypeCMS: { |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame^] | 1184 | if (collector_type_ == kCollectorTypeSS || collector_type_ == kCollectorTypeGSS) { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1185 | // TODO: Use mem-map from temp space? |
| 1186 | MemMap* mem_map = allocator_mem_map_.release(); |
| 1187 | CHECK(mem_map != nullptr); |
| 1188 | size_t initial_size = kDefaultInitialSize; |
| 1189 | mprotect(mem_map->Begin(), initial_size, PROT_READ | PROT_WRITE); |
Mathieu Chartier | fc5b528 | 2014-01-09 16:15:36 -0800 | [diff] [blame] | 1190 | CHECK(main_space_ == nullptr); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1191 | if (kUseRosAlloc) { |
Mathieu Chartier | fc5b528 | 2014-01-09 16:15:36 -0800 | [diff] [blame] | 1192 | main_space_ = |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1193 | space::RosAllocSpace::CreateFromMemMap(mem_map, "alloc space", kPageSize, |
| 1194 | initial_size, mem_map->Size(), |
| 1195 | mem_map->Size(), low_memory_mode_); |
| 1196 | } else { |
Mathieu Chartier | fc5b528 | 2014-01-09 16:15:36 -0800 | [diff] [blame] | 1197 | main_space_ = |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1198 | space::DlMallocSpace::CreateFromMemMap(mem_map, "alloc space", kPageSize, |
| 1199 | initial_size, mem_map->Size(), |
| 1200 | mem_map->Size()); |
| 1201 | } |
Mathieu Chartier | fc5b528 | 2014-01-09 16:15:36 -0800 | [diff] [blame] | 1202 | main_space_->SetFootprintLimit(main_space_->Capacity()); |
| 1203 | AddSpace(main_space_); |
| 1204 | Compact(main_space_, bump_pointer_space_); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1205 | } |
| 1206 | break; |
| 1207 | } |
| 1208 | default: { |
| 1209 | LOG(FATAL) << "Attempted to transition to invalid collector type"; |
| 1210 | break; |
| 1211 | } |
| 1212 | } |
| 1213 | ChangeCollector(collector_type); |
| 1214 | tl->ResumeAll(); |
| 1215 | // Can't call into java code with all threads suspended. |
| 1216 | EnqueueClearedReferences(); |
| 1217 | uint64_t duration = NanoTime() - start_time; |
| 1218 | GrowForUtilization(collector::kGcTypeFull, duration); |
| 1219 | FinishGC(self, collector::kGcTypeFull); |
| 1220 | int32_t after_size = GetTotalMemory(); |
| 1221 | int32_t delta_size = before_size - after_size; |
| 1222 | int32_t after_allocated = num_bytes_allocated_.Load(); |
| 1223 | int32_t delta_allocated = before_allocated - after_allocated; |
| 1224 | const std::string saved_bytes_str = |
| 1225 | delta_size < 0 ? "-" + PrettySize(-delta_size) : PrettySize(delta_size); |
| 1226 | LOG(INFO) << "Heap transition to " << process_state_ << " took " |
| 1227 | << PrettyDuration(duration) << " " << PrettySize(before_size) << "->" |
| 1228 | << PrettySize(after_size) << " from " << PrettySize(delta_allocated) << " to " |
| 1229 | << PrettySize(delta_size) << " saved"; |
| 1230 | } |
| 1231 | |
Mathieu Chartier | 0de9f73 | 2013-11-22 17:58:48 -0800 | [diff] [blame] | 1232 | void Heap::ChangeCollector(CollectorType collector_type) { |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 1233 | // TODO: Only do this with all mutators suspended to avoid races. |
| 1234 | if (collector_type != collector_type_) { |
| 1235 | collector_type_ = collector_type; |
| 1236 | gc_plan_.clear(); |
| 1237 | switch (collector_type_) { |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame^] | 1238 | case kCollectorTypeSS: |
| 1239 | case kCollectorTypeGSS: { |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 1240 | concurrent_gc_ = false; |
| 1241 | gc_plan_.push_back(collector::kGcTypeFull); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 1242 | if (use_tlab_) { |
| 1243 | ChangeAllocator(kAllocatorTypeTLAB); |
| 1244 | } else { |
| 1245 | ChangeAllocator(kAllocatorTypeBumpPointer); |
| 1246 | } |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 1247 | break; |
| 1248 | } |
| 1249 | case kCollectorTypeMS: { |
| 1250 | concurrent_gc_ = false; |
| 1251 | gc_plan_.push_back(collector::kGcTypeSticky); |
| 1252 | gc_plan_.push_back(collector::kGcTypePartial); |
| 1253 | gc_plan_.push_back(collector::kGcTypeFull); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1254 | ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc); |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 1255 | break; |
| 1256 | } |
| 1257 | case kCollectorTypeCMS: { |
| 1258 | concurrent_gc_ = true; |
| 1259 | gc_plan_.push_back(collector::kGcTypeSticky); |
| 1260 | gc_plan_.push_back(collector::kGcTypePartial); |
| 1261 | gc_plan_.push_back(collector::kGcTypeFull); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1262 | ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc); |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 1263 | break; |
| 1264 | } |
| 1265 | default: { |
| 1266 | LOG(FATAL) << "Unimplemented"; |
| 1267 | } |
Mathieu Chartier | 0de9f73 | 2013-11-22 17:58:48 -0800 | [diff] [blame] | 1268 | } |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 1269 | if (concurrent_gc_) { |
| 1270 | concurrent_start_bytes_ = |
| 1271 | std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes; |
| 1272 | } else { |
| 1273 | concurrent_start_bytes_ = std::numeric_limits<size_t>::max(); |
Mathieu Chartier | 0de9f73 | 2013-11-22 17:58:48 -0800 | [diff] [blame] | 1274 | } |
| 1275 | } |
| 1276 | } |
| 1277 | |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 1278 | static void MarkInBitmapCallback(mirror::Object* obj, void* arg) { |
| 1279 | reinterpret_cast<accounting::SpaceBitmap*>(arg)->Set(obj); |
| 1280 | } |
| 1281 | |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 1282 | // Special compacting collector which uses sub-optimal bin packing to reduce zygote space size. |
| 1283 | class ZygoteCompactingCollector : public collector::SemiSpace { |
| 1284 | public: |
| 1285 | explicit ZygoteCompactingCollector(gc::Heap* heap) : SemiSpace(heap, "zygote collector") { |
| 1286 | } |
| 1287 | |
| 1288 | void BuildBins(space::ContinuousSpace* space) { |
| 1289 | bin_live_bitmap_ = space->GetLiveBitmap(); |
| 1290 | bin_mark_bitmap_ = space->GetMarkBitmap(); |
| 1291 | BinContext context; |
| 1292 | context.prev_ = reinterpret_cast<uintptr_t>(space->Begin()); |
| 1293 | context.collector_ = this; |
| 1294 | WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); |
| 1295 | // Note: This requires traversing the space in increasing order of object addresses. |
| 1296 | bin_live_bitmap_->Walk(Callback, reinterpret_cast<void*>(&context)); |
| 1297 | // Add the last bin which spans after the last object to the end of the space. |
| 1298 | AddBin(reinterpret_cast<uintptr_t>(space->End()) - context.prev_, context.prev_); |
| 1299 | } |
| 1300 | |
| 1301 | private: |
| 1302 | struct BinContext { |
| 1303 | uintptr_t prev_; // The end of the previous object. |
| 1304 | ZygoteCompactingCollector* collector_; |
| 1305 | }; |
| 1306 | // Maps from bin sizes to locations. |
| 1307 | std::multimap<size_t, uintptr_t> bins_; |
| 1308 | // Live bitmap of the space which contains the bins. |
| 1309 | accounting::SpaceBitmap* bin_live_bitmap_; |
| 1310 | // Mark bitmap of the space which contains the bins. |
| 1311 | accounting::SpaceBitmap* bin_mark_bitmap_; |
| 1312 | |
| 1313 | static void Callback(mirror::Object* obj, void* arg) |
| 1314 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { |
| 1315 | DCHECK(arg != nullptr); |
| 1316 | BinContext* context = reinterpret_cast<BinContext*>(arg); |
| 1317 | ZygoteCompactingCollector* collector = context->collector_; |
| 1318 | uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj); |
| 1319 | size_t bin_size = object_addr - context->prev_; |
| 1320 | // Add the bin consisting of the end of the previous object to the start of the current object. |
| 1321 | collector->AddBin(bin_size, context->prev_); |
| 1322 | context->prev_ = object_addr + RoundUp(obj->SizeOf(), kObjectAlignment); |
| 1323 | } |
| 1324 | |
| 1325 | void AddBin(size_t size, uintptr_t position) { |
| 1326 | if (size != 0) { |
| 1327 | bins_.insert(std::make_pair(size, position)); |
| 1328 | } |
| 1329 | } |
| 1330 | |
| 1331 | virtual bool ShouldSweepSpace(space::MallocSpace* space) const { |
| 1332 | // Don't sweep any spaces since we probably blasted the internal accounting of the free list |
| 1333 | // allocator. |
| 1334 | return false; |
| 1335 | } |
| 1336 | |
| 1337 | virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj) |
| 1338 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { |
| 1339 | size_t object_size = RoundUp(obj->SizeOf(), kObjectAlignment); |
Mathieu Chartier | 5dc08a6 | 2014-01-10 10:10:23 -0800 | [diff] [blame] | 1340 | mirror::Object* forward_address; |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 1341 | // Find the smallest bin which we can move obj in. |
| 1342 | auto it = bins_.lower_bound(object_size); |
| 1343 | if (it == bins_.end()) { |
| 1344 | // No available space in the bins, place it in the target space instead (grows the zygote |
| 1345 | // space). |
Mathieu Chartier | 5dc08a6 | 2014-01-10 10:10:23 -0800 | [diff] [blame] | 1346 | size_t bytes_allocated; |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 1347 | forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated); |
| 1348 | if (to_space_live_bitmap_ != nullptr) { |
| 1349 | to_space_live_bitmap_->Set(forward_address); |
| 1350 | } |
| 1351 | } else { |
| 1352 | size_t size = it->first; |
| 1353 | uintptr_t pos = it->second; |
| 1354 | bins_.erase(it); // Erase the old bin which we replace with the new smaller bin. |
| 1355 | forward_address = reinterpret_cast<mirror::Object*>(pos); |
| 1356 | // Set the live and mark bits so that sweeping system weaks works properly. |
| 1357 | bin_live_bitmap_->Set(forward_address); |
| 1358 | bin_mark_bitmap_->Set(forward_address); |
| 1359 | DCHECK_GE(size, object_size); |
| 1360 | AddBin(size - object_size, pos + object_size); // Add a new bin with the remaining space. |
| 1361 | } |
| 1362 | // Copy the object over to its new location. |
| 1363 | memcpy(reinterpret_cast<void*>(forward_address), obj, object_size); |
| 1364 | return forward_address; |
| 1365 | } |
| 1366 | }; |
| 1367 | |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 1368 | void Heap::PreZygoteFork() { |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 1369 | static Mutex zygote_creation_lock_("zygote creation lock", kZygoteCreationLock); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 1370 | Thread* self = Thread::Current(); |
| 1371 | MutexLock mu(self, zygote_creation_lock_); |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 1372 | // Try to see if we have any Zygote spaces. |
| 1373 | if (have_zygote_space_) { |
| 1374 | return; |
| 1375 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1376 | VLOG(heap) << "Starting PreZygoteFork"; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1377 | CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false); |
| 1378 | // Trim the pages at the end of the non moving space. |
| 1379 | non_moving_space_->Trim(); |
| 1380 | non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE); |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 1381 | // Change the collector to the post zygote one. |
| 1382 | ChangeCollector(post_zygote_collector_type_); |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 1383 | // TODO: Delete bump_pointer_space_ and temp_pointer_space_? |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1384 | if (semi_space_collector_ != nullptr) { |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 1385 | ZygoteCompactingCollector zygote_collector(this); |
| 1386 | zygote_collector.BuildBins(non_moving_space_); |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 1387 | // Create a new bump pointer space which we will compact into. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1388 | space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(), |
| 1389 | non_moving_space_->Limit()); |
| 1390 | // Compact the bump pointer space to a new zygote bump pointer space. |
| 1391 | temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 1392 | zygote_collector.SetFromSpace(bump_pointer_space_); |
| 1393 | zygote_collector.SetToSpace(&target_space); |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame^] | 1394 | zygote_collector.Run(kGcCauseCollectorTransition, false); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 1395 | CHECK(temp_space_->IsEmpty()); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1396 | total_objects_freed_ever_ += semi_space_collector_->GetFreedObjects(); |
| 1397 | total_bytes_freed_ever_ += semi_space_collector_->GetFreedBytes(); |
| 1398 | // Update the end and write out image. |
| 1399 | non_moving_space_->SetEnd(target_space.End()); |
| 1400 | non_moving_space_->SetLimit(target_space.Limit()); |
| 1401 | accounting::SpaceBitmap* bitmap = non_moving_space_->GetLiveBitmap(); |
| 1402 | // Record the allocations in the bitmap. |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 1403 | VLOG(heap) << "Zygote size " << non_moving_space_->Size() << " bytes"; |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 1404 | target_space.Walk(MarkInBitmapCallback, bitmap); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1405 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1406 | // Turn the current alloc space into a zygote space and obtain the new alloc space composed of |
| 1407 | // the remaining available heap memory. |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1408 | space::MallocSpace* zygote_space = non_moving_space_; |
Mathieu Chartier | fc5b528 | 2014-01-09 16:15:36 -0800 | [diff] [blame] | 1409 | main_space_ = non_moving_space_->CreateZygoteSpace("alloc space", low_memory_mode_); |
| 1410 | if (main_space_->IsRosAllocSpace()) { |
| 1411 | rosalloc_space_ = main_space_->AsRosAllocSpace(); |
| 1412 | } else if (main_space_->IsDlMallocSpace()) { |
| 1413 | dlmalloc_space_ = main_space_->AsDlMallocSpace(); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1414 | } |
Mathieu Chartier | fc5b528 | 2014-01-09 16:15:36 -0800 | [diff] [blame] | 1415 | main_space_->SetFootprintLimit(main_space_->Capacity()); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1416 | // Change the GC retention policy of the zygote space to only collect when full. |
| 1417 | zygote_space->SetGcRetentionPolicy(space::kGcRetentionPolicyFullCollect); |
Mathieu Chartier | fc5b528 | 2014-01-09 16:15:36 -0800 | [diff] [blame] | 1418 | AddSpace(main_space_); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1419 | have_zygote_space_ = true; |
Mathieu Chartier | e01b540 | 2014-01-13 14:37:11 -0800 | [diff] [blame] | 1420 | // Remove the zygote space from alloc_spaces_ array since not doing so causes crashes in |
| 1421 | // GetObjectsAllocated. This happens because the bin packing blows away the internal accounting |
| 1422 | // stored in between objects. |
| 1423 | if (zygote_space->IsAllocSpace()) { |
| 1424 | // TODO: Refactor zygote spaces to be a new space type to avoid more of these types of issues. |
| 1425 | auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), zygote_space->AsAllocSpace()); |
| 1426 | CHECK(it != alloc_spaces_.end()); |
| 1427 | alloc_spaces_.erase(it); |
| 1428 | zygote_space->InvalidateAllocator(); |
| 1429 | } |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 1430 | // Create the zygote space mod union table. |
| 1431 | accounting::ModUnionTable* mod_union_table = |
| 1432 | new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space); |
| 1433 | CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table"; |
| 1434 | AddModUnionTable(mod_union_table); |
Ian Rogers | 5f5a2c0 | 2012-09-17 10:52:08 -0700 | [diff] [blame] | 1435 | // Reset the cumulative loggers since we now have a few additional timing phases. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1436 | for (const auto& collector : garbage_collectors_) { |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 1437 | collector->ResetCumulativeStatistics(); |
Mathieu Chartier | 0325e62 | 2012-09-05 14:22:51 -0700 | [diff] [blame] | 1438 | } |
Mathieu Chartier | fc5b528 | 2014-01-09 16:15:36 -0800 | [diff] [blame] | 1439 | // Can't use RosAlloc for non moving space due to thread local buffers. |
| 1440 | // TODO: Non limited space for non-movable objects? |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1441 | space::MallocSpace* new_non_moving_space |
| 1442 | = space::DlMallocSpace::Create("Non moving dlmalloc space", 2 * MB, 64 * MB, 64 * MB, |
| 1443 | nullptr); |
| 1444 | AddSpace(new_non_moving_space, false); |
| 1445 | CHECK(new_non_moving_space != nullptr) << "Failed to create new non-moving space"; |
| 1446 | new_non_moving_space->SetFootprintLimit(new_non_moving_space->Capacity()); |
| 1447 | non_moving_space_ = new_non_moving_space; |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 1448 | } |
| 1449 | |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1450 | void Heap::FlushAllocStack() { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1451 | MarkAllocStackAsLive(allocation_stack_.get()); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1452 | allocation_stack_->Reset(); |
| 1453 | } |
| 1454 | |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1455 | void Heap::MarkAllocStack(accounting::SpaceBitmap* bitmap1, |
| 1456 | accounting::SpaceBitmap* bitmap2, |
Mathieu Chartier | db7f37d | 2014-01-10 11:09:06 -0800 | [diff] [blame] | 1457 | accounting::ObjectSet* large_objects, |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1458 | accounting::ObjectStack* stack) { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1459 | DCHECK(bitmap1 != nullptr); |
| 1460 | DCHECK(bitmap2 != nullptr); |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 1461 | mirror::Object** limit = stack->End(); |
| 1462 | for (mirror::Object** it = stack->Begin(); it != limit; ++it) { |
| 1463 | const mirror::Object* obj = *it; |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1464 | DCHECK(obj != nullptr); |
| 1465 | if (bitmap1->HasAddress(obj)) { |
| 1466 | bitmap1->Set(obj); |
| 1467 | } else if (bitmap2->HasAddress(obj)) { |
| 1468 | bitmap2->Set(obj); |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 1469 | } else { |
| 1470 | large_objects->Set(obj); |
| 1471 | } |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1472 | } |
| 1473 | } |
| 1474 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1475 | void Heap::SwapSemiSpaces() { |
| 1476 | // Swap the spaces so we allocate into the space which we just evacuated. |
| 1477 | std::swap(bump_pointer_space_, temp_space_); |
| 1478 | } |
| 1479 | |
| 1480 | void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space, |
| 1481 | space::ContinuousMemMapAllocSpace* source_space) { |
| 1482 | CHECK(kMovingCollector); |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 1483 | CHECK_NE(target_space, source_space) << "In-place compaction currently unsupported"; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1484 | if (target_space != source_space) { |
| 1485 | semi_space_collector_->SetFromSpace(source_space); |
| 1486 | semi_space_collector_->SetToSpace(target_space); |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame^] | 1487 | semi_space_collector_->Run(kGcCauseCollectorTransition, false); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1488 | } |
| 1489 | } |
Anwar Ghuloum | 67f9941 | 2013-08-12 14:19:48 -0700 | [diff] [blame] | 1490 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1491 | collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause, |
| 1492 | bool clear_soft_references) { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 1493 | Thread* self = Thread::Current(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1494 | Runtime* runtime = Runtime::Current(); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 1495 | // If the heap can't run the GC, silently fail and return that no GC was run. |
| 1496 | switch (gc_type) { |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 1497 | case collector::kGcTypePartial: { |
| 1498 | if (!have_zygote_space_) { |
| 1499 | return collector::kGcTypeNone; |
| 1500 | } |
| 1501 | break; |
| 1502 | } |
| 1503 | default: { |
| 1504 | // Other GC types don't have any special cases which makes them not runnable. The main case |
| 1505 | // here is full GC. |
| 1506 | } |
| 1507 | } |
Mathieu Chartier | 65db880 | 2012-11-20 12:36:46 -0800 | [diff] [blame] | 1508 | ScopedThreadStateChange tsc(self, kWaitingPerformingGc); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 1509 | Locks::mutator_lock_->AssertNotHeld(self); |
Ian Rogers | 120f1c7 | 2012-09-28 17:17:10 -0700 | [diff] [blame] | 1510 | if (self->IsHandlingStackOverflow()) { |
| 1511 | LOG(WARNING) << "Performing GC on a thread that is handling a stack overflow."; |
| 1512 | } |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1513 | gc_complete_lock_->AssertNotHeld(self); |
| 1514 | if (!StartGC(self)) { |
| 1515 | return collector::kGcTypeNone; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 1516 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1517 | if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) { |
| 1518 | ++runtime->GetStats()->gc_for_alloc_count; |
| 1519 | ++self->GetStats()->gc_for_alloc_count; |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 1520 | } |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1521 | uint64_t gc_start_time_ns = NanoTime(); |
Mathieu Chartier | 65db880 | 2012-11-20 12:36:46 -0800 | [diff] [blame] | 1522 | uint64_t gc_start_size = GetBytesAllocated(); |
| 1523 | // Approximate allocation rate in bytes / second. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1524 | uint64_t ms_delta = NsToMs(gc_start_time_ns - last_gc_time_ns_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1525 | // Back to back GCs can cause 0 ms of wait time in between GC invocations. |
| 1526 | if (LIKELY(ms_delta != 0)) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1527 | allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta; |
Mathieu Chartier | 65db880 | 2012-11-20 12:36:46 -0800 | [diff] [blame] | 1528 | VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s"; |
| 1529 | } |
| 1530 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1531 | DCHECK_LT(gc_type, collector::kGcTypeMax); |
| 1532 | DCHECK_NE(gc_type, collector::kGcTypeNone); |
Anwar Ghuloum | 67f9941 | 2013-08-12 14:19:48 -0700 | [diff] [blame] | 1533 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1534 | collector::GarbageCollector* collector = nullptr; |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 1535 | // TODO: Clean this up. |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame^] | 1536 | if (collector_type_ == kCollectorTypeSS || collector_type_ == kCollectorTypeGSS) { |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 1537 | DCHECK(current_allocator_ == kAllocatorTypeBumpPointer || |
| 1538 | current_allocator_ == kAllocatorTypeTLAB); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1539 | gc_type = semi_space_collector_->GetGcType(); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 1540 | CHECK(temp_space_->IsEmpty()); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1541 | semi_space_collector_->SetFromSpace(bump_pointer_space_); |
| 1542 | semi_space_collector_->SetToSpace(temp_space_); |
| 1543 | mprotect(temp_space_->Begin(), temp_space_->Capacity(), PROT_READ | PROT_WRITE); |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 1544 | collector = semi_space_collector_; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1545 | gc_type = collector::kGcTypeFull; |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1546 | } else if (current_allocator_ == kAllocatorTypeRosAlloc || |
| 1547 | current_allocator_ == kAllocatorTypeDlMalloc) { |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 1548 | for (const auto& cur_collector : garbage_collectors_) { |
| 1549 | if (cur_collector->IsConcurrent() == concurrent_gc_ && |
| 1550 | cur_collector->GetGcType() == gc_type) { |
| 1551 | collector = cur_collector; |
| 1552 | break; |
| 1553 | } |
| 1554 | } |
| 1555 | } else { |
| 1556 | LOG(FATAL) << "Invalid current allocator " << current_allocator_; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1557 | } |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 1558 | CHECK(collector != nullptr) |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1559 | << "Could not find garbage collector with concurrent=" << concurrent_gc_ |
| 1560 | << " and type=" << gc_type; |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1561 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1562 | ATRACE_BEGIN(StringPrintf("%s %s GC", PrettyCause(gc_cause), collector->GetName()).c_str()); |
| 1563 | |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame^] | 1564 | collector->Run(gc_cause, clear_soft_references); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1565 | total_objects_freed_ever_ += collector->GetFreedObjects(); |
| 1566 | total_bytes_freed_ever_ += collector->GetFreedBytes(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1567 | |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 1568 | // Enqueue cleared references. |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1569 | Locks::mutator_lock_->AssertNotHeld(self); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 1570 | EnqueueClearedReferences(); |
| 1571 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1572 | // Grow the heap so that we know when to perform the next GC. |
| 1573 | GrowForUtilization(gc_type, collector->GetDurationNs()); |
| 1574 | |
Mathieu Chartier | ca2a24d | 2013-11-25 15:12:12 -0800 | [diff] [blame] | 1575 | if (CareAboutPauseTimes()) { |
Mathieu Chartier | e53225c | 2013-08-19 10:59:11 -0700 | [diff] [blame] | 1576 | const size_t duration = collector->GetDurationNs(); |
| 1577 | std::vector<uint64_t> pauses = collector->GetPauseTimes(); |
| 1578 | // GC for alloc pauses the allocating thread, so consider it as a pause. |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 1579 | bool was_slow = duration > long_gc_log_threshold_ || |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1580 | (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_); |
Mathieu Chartier | e53225c | 2013-08-19 10:59:11 -0700 | [diff] [blame] | 1581 | if (!was_slow) { |
| 1582 | for (uint64_t pause : pauses) { |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 1583 | was_slow = was_slow || pause > long_pause_log_threshold_; |
Mathieu Chartier | e53225c | 2013-08-19 10:59:11 -0700 | [diff] [blame] | 1584 | } |
| 1585 | } |
Mathieu Chartier | e53225c | 2013-08-19 10:59:11 -0700 | [diff] [blame] | 1586 | if (was_slow) { |
| 1587 | const size_t percent_free = GetPercentFree(); |
| 1588 | const size_t current_heap_size = GetBytesAllocated(); |
| 1589 | const size_t total_memory = GetTotalMemory(); |
| 1590 | std::ostringstream pause_string; |
| 1591 | for (size_t i = 0; i < pauses.size(); ++i) { |
| 1592 | pause_string << PrettyDuration((pauses[i] / 1000) * 1000) |
| 1593 | << ((i != pauses.size() - 1) ? ", " : ""); |
| 1594 | } |
| 1595 | LOG(INFO) << gc_cause << " " << collector->GetName() |
| 1596 | << " GC freed " << collector->GetFreedObjects() << "(" |
| 1597 | << PrettySize(collector->GetFreedBytes()) << ") AllocSpace objects, " |
| 1598 | << collector->GetFreedLargeObjects() << "(" |
| 1599 | << PrettySize(collector->GetFreedLargeObjectBytes()) << ") LOS objects, " |
| 1600 | << percent_free << "% free, " << PrettySize(current_heap_size) << "/" |
| 1601 | << PrettySize(total_memory) << ", " << "paused " << pause_string.str() |
| 1602 | << " total " << PrettyDuration((duration / 1000) * 1000); |
| 1603 | if (VLOG_IS_ON(heap)) { |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 1604 | LOG(INFO) << Dumpable<TimingLogger>(collector->GetTimings()); |
Mathieu Chartier | e53225c | 2013-08-19 10:59:11 -0700 | [diff] [blame] | 1605 | } |
| 1606 | } |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1607 | } |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1608 | FinishGC(self, gc_type); |
Mathieu Chartier | 752a0e6 | 2013-06-27 11:03:27 -0700 | [diff] [blame] | 1609 | ATRACE_END(); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1610 | |
| 1611 | // Inform DDMS that a GC completed. |
Ian Rogers | 15bf2d3 | 2012-08-28 17:33:04 -0700 | [diff] [blame] | 1612 | Dbg::GcDidFinish(); |
Mathieu Chartier | 866fb2a | 2012-09-10 10:47:49 -0700 | [diff] [blame] | 1613 | return gc_type; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 1614 | } |
Mathieu Chartier | a639903 | 2012-06-11 18:49:50 -0700 | [diff] [blame] | 1615 | |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1616 | bool Heap::StartGC(Thread* self) { |
| 1617 | MutexLock mu(self, *gc_complete_lock_); |
| 1618 | // Ensure there is only one GC at a time. |
| 1619 | WaitForGcToCompleteLocked(self); |
| 1620 | // TODO: if another thread beat this one to do the GC, perhaps we should just return here? |
| 1621 | // Not doing at the moment to ensure soft references are cleared. |
| 1622 | // GC can be disabled if someone has a used GetPrimitiveArrayCritical. |
| 1623 | if (gc_disable_count_ != 0) { |
| 1624 | LOG(WARNING) << "Skipping GC due to disable count " << gc_disable_count_; |
| 1625 | return false; |
| 1626 | } |
| 1627 | is_gc_running_ = true; |
| 1628 | return true; |
| 1629 | } |
| 1630 | |
| 1631 | void Heap::FinishGC(Thread* self, collector::GcType gc_type) { |
| 1632 | MutexLock mu(self, *gc_complete_lock_); |
| 1633 | is_gc_running_ = false; |
| 1634 | last_gc_type_ = gc_type; |
| 1635 | // Wake anyone who may have been waiting for the GC to complete. |
| 1636 | gc_complete_cond_->Broadcast(self); |
| 1637 | } |
| 1638 | |
Mathieu Chartier | 423d2a3 | 2013-09-12 17:33:56 -0700 | [diff] [blame] | 1639 | static mirror::Object* RootMatchesObjectVisitor(mirror::Object* root, void* arg) { |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 1640 | mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg); |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 1641 | if (root == obj) { |
| 1642 | LOG(INFO) << "Object " << obj << " is a root"; |
| 1643 | } |
Mathieu Chartier | 423d2a3 | 2013-09-12 17:33:56 -0700 | [diff] [blame] | 1644 | return root; |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 1645 | } |
| 1646 | |
| 1647 | class ScanVisitor { |
| 1648 | public: |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 1649 | void operator()(const mirror::Object* obj) const { |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 1650 | LOG(ERROR) << "Would have rescanned object " << obj; |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 1651 | } |
| 1652 | }; |
| 1653 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1654 | // Verify a reference from an object. |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 1655 | class VerifyReferenceVisitor { |
| 1656 | public: |
Brian Carlstrom | 93ba893 | 2013-07-17 21:31:49 -0700 | [diff] [blame] | 1657 | explicit VerifyReferenceVisitor(Heap* heap) |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1658 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) |
Brian Carlstrom | 93ba893 | 2013-07-17 21:31:49 -0700 | [diff] [blame] | 1659 | : heap_(heap), failed_(false) {} |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1660 | |
| 1661 | bool Failed() const { |
| 1662 | return failed_; |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 1663 | } |
| 1664 | |
| 1665 | // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1666 | // analysis on visitors. |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 1667 | void operator()(const mirror::Object* obj, const mirror::Object* ref, |
| 1668 | const MemberOffset& offset, bool /* is_static */) const |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 1669 | NO_THREAD_SAFETY_ANALYSIS { |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 1670 | // Verify that the reference is live. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1671 | if (UNLIKELY(ref != NULL && !IsLive(ref))) { |
| 1672 | accounting::CardTable* card_table = heap_->GetCardTable(); |
| 1673 | accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get(); |
| 1674 | accounting::ObjectStack* live_stack = heap_->live_stack_.get(); |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 1675 | if (!failed_) { |
| 1676 | // Print message on only on first failure to prevent spam. |
| 1677 | LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!"; |
| 1678 | failed_ = true; |
| 1679 | } |
| 1680 | if (obj != nullptr) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1681 | byte* card_addr = card_table->CardFromAddr(obj); |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 1682 | LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset " |
| 1683 | << offset << "\n card value = " << static_cast<int>(*card_addr); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1684 | if (heap_->IsValidObjectAddress(obj->GetClass())) { |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 1685 | LOG(ERROR) << "Obj type " << PrettyTypeOf(obj); |
| 1686 | } else { |
| 1687 | LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address"; |
| 1688 | } |
| 1689 | |
| 1690 | // Attmept to find the class inside of the recently freed objects. |
| 1691 | space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1692 | if (ref_space != nullptr && ref_space->IsMallocSpace()) { |
| 1693 | space::MallocSpace* space = ref_space->AsMallocSpace(); |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 1694 | mirror::Class* ref_class = space->FindRecentFreedObject(ref); |
| 1695 | if (ref_class != nullptr) { |
| 1696 | LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class " |
| 1697 | << PrettyClass(ref_class); |
| 1698 | } else { |
| 1699 | LOG(ERROR) << "Reference " << ref << " not found as a recently freed object"; |
| 1700 | } |
| 1701 | } |
| 1702 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1703 | if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) && |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 1704 | ref->GetClass()->IsClass()) { |
| 1705 | LOG(ERROR) << "Ref type " << PrettyTypeOf(ref); |
| 1706 | } else { |
| 1707 | LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass() |
| 1708 | << ") is not a valid heap address"; |
| 1709 | } |
| 1710 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1711 | card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj)); |
| 1712 | void* cover_begin = card_table->AddrFromCard(card_addr); |
| 1713 | void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) + |
| 1714 | accounting::CardTable::kCardSize); |
| 1715 | LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin |
| 1716 | << "-" << cover_end; |
| 1717 | accounting::SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj); |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 1718 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1719 | // Print out how the object is live. |
| 1720 | if (bitmap != NULL && bitmap->Test(obj)) { |
| 1721 | LOG(ERROR) << "Object " << obj << " found in live bitmap"; |
| 1722 | } |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 1723 | if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1724 | LOG(ERROR) << "Object " << obj << " found in allocation stack"; |
| 1725 | } |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 1726 | if (live_stack->Contains(const_cast<mirror::Object*>(obj))) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1727 | LOG(ERROR) << "Object " << obj << " found in live stack"; |
| 1728 | } |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 1729 | if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) { |
| 1730 | LOG(ERROR) << "Ref " << ref << " found in allocation stack"; |
| 1731 | } |
| 1732 | if (live_stack->Contains(const_cast<mirror::Object*>(ref))) { |
| 1733 | LOG(ERROR) << "Ref " << ref << " found in live stack"; |
| 1734 | } |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1735 | // Attempt to see if the card table missed the reference. |
| 1736 | ScanVisitor scan_visitor; |
| 1737 | byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr)); |
| 1738 | card_table->Scan(bitmap, byte_cover_begin, |
Mathieu Chartier | 184e322 | 2013-08-03 14:02:57 -0700 | [diff] [blame] | 1739 | byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1740 | |
| 1741 | // Search to see if any of the roots reference our object. |
| 1742 | void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj)); |
| 1743 | Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false); |
| 1744 | |
| 1745 | // Search to see if any of the roots reference our reference. |
| 1746 | arg = const_cast<void*>(reinterpret_cast<const void*>(ref)); |
| 1747 | Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false); |
| 1748 | } else { |
| 1749 | LOG(ERROR) << "Root references dead object " << ref << "\nRef type " << PrettyTypeOf(ref); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1750 | } |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 1751 | } |
| 1752 | } |
| 1753 | |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 1754 | bool IsLive(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS { |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 1755 | return heap_->IsLiveObjectLocked(obj, true, false, true); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1756 | } |
| 1757 | |
Mathieu Chartier | 423d2a3 | 2013-09-12 17:33:56 -0700 | [diff] [blame] | 1758 | static mirror::Object* VerifyRoots(mirror::Object* root, void* arg) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1759 | VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg); |
Mathieu Chartier | 423d2a3 | 2013-09-12 17:33:56 -0700 | [diff] [blame] | 1760 | (*visitor)(nullptr, root, MemberOffset(0), true); |
| 1761 | return root; |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 1762 | } |
| 1763 | |
| 1764 | private: |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1765 | Heap* const heap_; |
| 1766 | mutable bool failed_; |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 1767 | }; |
| 1768 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1769 | // Verify all references within an object, for use with HeapBitmap::Visit. |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 1770 | class VerifyObjectVisitor { |
| 1771 | public: |
Brian Carlstrom | 93ba893 | 2013-07-17 21:31:49 -0700 | [diff] [blame] | 1772 | explicit VerifyObjectVisitor(Heap* heap) : heap_(heap), failed_(false) {} |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 1773 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1774 | void operator()(mirror::Object* obj) const |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 1775 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1776 | // Note: we are verifying the references in obj but not obj itself, this is because obj must |
| 1777 | // be live or else how did we find it in the live bitmap? |
| 1778 | VerifyReferenceVisitor visitor(heap_); |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 1779 | // The class doesn't count as a reference but we should verify it anyways. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1780 | collector::MarkSweep::VisitObjectReferences(obj, visitor, true); |
| 1781 | if (obj->GetClass()->IsReferenceClass()) { |
| 1782 | visitor(obj, heap_->GetReferenceReferent(obj), MemberOffset(0), false); |
| 1783 | } |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1784 | failed_ = failed_ || visitor.Failed(); |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 1785 | } |
| 1786 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1787 | static void VisitCallback(mirror::Object* obj, void* arg) |
| 1788 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { |
| 1789 | VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg); |
| 1790 | visitor->operator()(obj); |
| 1791 | } |
| 1792 | |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 1793 | bool Failed() const { |
| 1794 | return failed_; |
| 1795 | } |
| 1796 | |
| 1797 | private: |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1798 | Heap* const heap_; |
| 1799 | mutable bool failed_; |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 1800 | }; |
| 1801 | |
| 1802 | // Must do this with mutators suspended since we are directly accessing the allocation stacks. |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1803 | bool Heap::VerifyHeapReferences() { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 1804 | Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 1805 | // Lets sort our allocation stacks so that we can efficiently binary search them. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1806 | allocation_stack_->Sort(); |
| 1807 | live_stack_->Sort(); |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 1808 | VerifyObjectVisitor visitor(this); |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 1809 | // Verify objects in the allocation stack since these will be objects which were: |
| 1810 | // 1. Allocated prior to the GC (pre GC verification). |
| 1811 | // 2. Allocated during the GC (pre sweep GC verification). |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 1812 | // We don't want to verify the objects in the live stack since they themselves may be |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 1813 | // pointing to dead objects if they are not reachable. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1814 | VisitObjects(VerifyObjectVisitor::VisitCallback, &visitor); |
| 1815 | // Verify the roots: |
| 1816 | Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRoots, &visitor, false, false); |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 1817 | if (visitor.Failed()) { |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 1818 | // Dump mod-union tables. |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 1819 | for (const auto& table_pair : mod_union_tables_) { |
| 1820 | accounting::ModUnionTable* mod_union_table = table_pair.second; |
| 1821 | mod_union_table->Dump(LOG(ERROR) << mod_union_table->GetName() << ": "); |
| 1822 | } |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 1823 | DumpSpaces(); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1824 | return false; |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 1825 | } |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1826 | return true; |
| 1827 | } |
| 1828 | |
| 1829 | class VerifyReferenceCardVisitor { |
| 1830 | public: |
| 1831 | VerifyReferenceCardVisitor(Heap* heap, bool* failed) |
| 1832 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, |
| 1833 | Locks::heap_bitmap_lock_) |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1834 | : heap_(heap), failed_(failed) { |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1835 | } |
| 1836 | |
Mathieu Chartier | 4da7f2f | 2012-11-13 12:51:01 -0800 | [diff] [blame] | 1837 | // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for |
| 1838 | // annotalysis on visitors. |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 1839 | void operator()(const mirror::Object* obj, const mirror::Object* ref, const MemberOffset& offset, |
| 1840 | bool is_static) const NO_THREAD_SAFETY_ANALYSIS { |
Mathieu Chartier | 4da7f2f | 2012-11-13 12:51:01 -0800 | [diff] [blame] | 1841 | // Filter out class references since changing an object's class does not mark the card as dirty. |
| 1842 | // Also handles large objects, since the only reference they hold is a class reference. |
| 1843 | if (ref != NULL && !ref->IsClass()) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1844 | accounting::CardTable* card_table = heap_->GetCardTable(); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1845 | // If the object is not dirty and it is referencing something in the live stack other than |
| 1846 | // class, then it must be on a dirty card. |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 1847 | if (!card_table->AddrIsInCardTable(obj)) { |
| 1848 | LOG(ERROR) << "Object " << obj << " is not in the address range of the card table"; |
| 1849 | *failed_ = true; |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1850 | } else if (!card_table->IsDirty(obj)) { |
Mathieu Chartier | 4da7f2f | 2012-11-13 12:51:01 -0800 | [diff] [blame] | 1851 | // Card should be either kCardDirty if it got re-dirtied after we aged it, or |
| 1852 | // kCardDirty - 1 if it didnt get touched since we aged it. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1853 | accounting::ObjectStack* live_stack = heap_->live_stack_.get(); |
Mathieu Chartier | f082d3c | 2013-07-29 17:04:07 -0700 | [diff] [blame] | 1854 | if (live_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) { |
| 1855 | if (live_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) { |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1856 | LOG(ERROR) << "Object " << obj << " found in live stack"; |
| 1857 | } |
| 1858 | if (heap_->GetLiveBitmap()->Test(obj)) { |
| 1859 | LOG(ERROR) << "Object " << obj << " found in live bitmap"; |
| 1860 | } |
| 1861 | LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj) |
| 1862 | << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack"; |
| 1863 | |
| 1864 | // Print which field of the object is dead. |
| 1865 | if (!obj->IsObjectArray()) { |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 1866 | const mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass(); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1867 | CHECK(klass != NULL); |
Brian Carlstrom | ea46f95 | 2013-07-30 01:26:50 -0700 | [diff] [blame] | 1868 | const mirror::ObjectArray<mirror::ArtField>* fields = is_static ? klass->GetSFields() |
| 1869 | : klass->GetIFields(); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1870 | CHECK(fields != NULL); |
| 1871 | for (int32_t i = 0; i < fields->GetLength(); ++i) { |
Brian Carlstrom | ea46f95 | 2013-07-30 01:26:50 -0700 | [diff] [blame] | 1872 | const mirror::ArtField* cur = fields->Get(i); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1873 | if (cur->GetOffset().Int32Value() == offset.Int32Value()) { |
| 1874 | LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is " |
| 1875 | << PrettyField(cur); |
| 1876 | break; |
| 1877 | } |
| 1878 | } |
| 1879 | } else { |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 1880 | const mirror::ObjectArray<mirror::Object>* object_array = |
| 1881 | obj->AsObjectArray<mirror::Object>(); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1882 | for (int32_t i = 0; i < object_array->GetLength(); ++i) { |
| 1883 | if (object_array->Get(i) == ref) { |
| 1884 | LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref"; |
| 1885 | } |
| 1886 | } |
| 1887 | } |
| 1888 | |
| 1889 | *failed_ = true; |
| 1890 | } |
| 1891 | } |
| 1892 | } |
| 1893 | } |
| 1894 | |
| 1895 | private: |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1896 | Heap* const heap_; |
| 1897 | bool* const failed_; |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1898 | }; |
| 1899 | |
| 1900 | class VerifyLiveStackReferences { |
| 1901 | public: |
Brian Carlstrom | 93ba893 | 2013-07-17 21:31:49 -0700 | [diff] [blame] | 1902 | explicit VerifyLiveStackReferences(Heap* heap) |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1903 | : heap_(heap), |
Brian Carlstrom | 93ba893 | 2013-07-17 21:31:49 -0700 | [diff] [blame] | 1904 | failed_(false) {} |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1905 | |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 1906 | void operator()(mirror::Object* obj) const |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1907 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { |
| 1908 | VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_)); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1909 | collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(obj), visitor, true); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1910 | } |
| 1911 | |
| 1912 | bool Failed() const { |
| 1913 | return failed_; |
| 1914 | } |
| 1915 | |
| 1916 | private: |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1917 | Heap* const heap_; |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1918 | bool failed_; |
| 1919 | }; |
| 1920 | |
| 1921 | bool Heap::VerifyMissingCardMarks() { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 1922 | Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1923 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1924 | // We need to sort the live stack since we binary search it. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1925 | live_stack_->Sort(); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1926 | VerifyLiveStackReferences visitor(this); |
| 1927 | GetLiveBitmap()->Visit(visitor); |
| 1928 | |
| 1929 | // We can verify objects in the live stack since none of these should reference dead objects. |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 1930 | for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) { |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1931 | visitor(*it); |
| 1932 | } |
| 1933 | |
| 1934 | if (visitor.Failed()) { |
| 1935 | DumpSpaces(); |
| 1936 | return false; |
| 1937 | } |
| 1938 | return true; |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 1939 | } |
| 1940 | |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1941 | void Heap::SwapStacks() { |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 1942 | allocation_stack_.swap(live_stack_); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1943 | } |
| 1944 | |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 1945 | accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) { |
| 1946 | auto it = mod_union_tables_.find(space); |
| 1947 | if (it == mod_union_tables_.end()) { |
| 1948 | return nullptr; |
| 1949 | } |
| 1950 | return it->second; |
| 1951 | } |
| 1952 | |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 1953 | void Heap::ProcessCards(TimingLogger& timings) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1954 | // Clear cards and keep track of cards cleared in the mod-union table. |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 1955 | for (const auto& space : continuous_spaces_) { |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 1956 | accounting::ModUnionTable* table = FindModUnionTableFromSpace(space); |
| 1957 | if (table != nullptr) { |
| 1958 | const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" : |
| 1959 | "ImageModUnionClearCards"; |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 1960 | TimingLogger::ScopedSplit split(name, &timings); |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 1961 | table->ClearCards(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1962 | } else if (space->GetType() != space::kSpaceTypeBumpPointerSpace) { |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 1963 | TimingLogger::ScopedSplit split("AllocSpaceClearCards", &timings); |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 1964 | // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards |
| 1965 | // were dirty before the GC started. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1966 | // TODO: Don't need to use atomic. |
| 1967 | // The races are we either end up with: Aged card, unaged card. Since we have the checkpoint |
| 1968 | // roots and then we scan / update mod union tables after. We will always scan either card.// |
| 1969 | // If we end up with the non aged card, we scan it it in the pause. |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 1970 | card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor()); |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 1971 | } |
| 1972 | } |
| 1973 | } |
| 1974 | |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 1975 | static mirror::Object* IdentityCallback(mirror::Object* obj, void*) { |
| 1976 | return obj; |
| 1977 | } |
| 1978 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1979 | void Heap::PreGcVerification(collector::GarbageCollector* gc) { |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1980 | ThreadList* thread_list = Runtime::Current()->GetThreadList(); |
| 1981 | Thread* self = Thread::Current(); |
Mathieu Chartier | 4da7f2f | 2012-11-13 12:51:01 -0800 | [diff] [blame] | 1982 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1983 | if (verify_pre_gc_heap_) { |
| 1984 | thread_list->SuspendAll(); |
| 1985 | { |
| 1986 | ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 1987 | if (!VerifyHeapReferences()) { |
| 1988 | LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed"; |
| 1989 | } |
| 1990 | } |
| 1991 | thread_list->ResumeAll(); |
Mathieu Chartier | 4da7f2f | 2012-11-13 12:51:01 -0800 | [diff] [blame] | 1992 | } |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1993 | |
| 1994 | // Check that all objects which reference things in the live stack are on dirty cards. |
| 1995 | if (verify_missing_card_marks_) { |
| 1996 | thread_list->SuspendAll(); |
| 1997 | { |
| 1998 | ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 1999 | SwapStacks(); |
| 2000 | // Sort the live stack so that we can quickly binary search it later. |
| 2001 | if (!VerifyMissingCardMarks()) { |
| 2002 | LOG(FATAL) << "Pre " << gc->GetName() << " missing card mark verification failed"; |
| 2003 | } |
| 2004 | SwapStacks(); |
| 2005 | } |
| 2006 | thread_list->ResumeAll(); |
| 2007 | } |
| 2008 | |
| 2009 | if (verify_mod_union_table_) { |
| 2010 | thread_list->SuspendAll(); |
| 2011 | ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 2012 | for (const auto& table_pair : mod_union_tables_) { |
| 2013 | accounting::ModUnionTable* mod_union_table = table_pair.second; |
| 2014 | mod_union_table->UpdateAndMarkReferences(IdentityCallback, nullptr); |
| 2015 | mod_union_table->Verify(); |
| 2016 | } |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 2017 | thread_list->ResumeAll(); |
| 2018 | } |
Mathieu Chartier | 4da7f2f | 2012-11-13 12:51:01 -0800 | [diff] [blame] | 2019 | } |
| 2020 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 2021 | void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) { |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 2022 | // Called before sweeping occurs since we want to make sure we are not going so reclaim any |
| 2023 | // reachable objects. |
| 2024 | if (verify_post_gc_heap_) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 2025 | Thread* self = Thread::Current(); |
| 2026 | CHECK_NE(self->GetState(), kRunnable); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 2027 | { |
| 2028 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 2029 | // Swapping bound bitmaps does nothing. |
| 2030 | gc->SwapBitmaps(); |
| 2031 | if (!VerifyHeapReferences()) { |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 2032 | LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed"; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 2033 | } |
| 2034 | gc->SwapBitmaps(); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 2035 | } |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 2036 | } |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 2037 | } |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 2038 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 2039 | void Heap::PostGcVerification(collector::GarbageCollector* gc) { |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 2040 | if (verify_system_weaks_) { |
Anwar Ghuloum | 67f9941 | 2013-08-12 14:19:48 -0700 | [diff] [blame] | 2041 | Thread* self = Thread::Current(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 2042 | ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 2043 | collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 2044 | mark_sweep->VerifySystemWeaks(); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 2045 | } |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 2046 | } |
| 2047 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2048 | collector::GcType Heap::WaitForGcToComplete(Thread* self) { |
| 2049 | ScopedThreadStateChange tsc(self, kWaitingForGcToComplete); |
| 2050 | MutexLock mu(self, *gc_complete_lock_); |
| 2051 | return WaitForGcToCompleteLocked(self); |
| 2052 | } |
| 2053 | |
| 2054 | collector::GcType Heap::WaitForGcToCompleteLocked(Thread* self) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 2055 | collector::GcType last_gc_type = collector::kGcTypeNone; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2056 | uint64_t wait_start = NanoTime(); |
| 2057 | while (is_gc_running_) { |
| 2058 | ATRACE_BEGIN("GC: Wait For Completion"); |
| 2059 | // We must wait, change thread state then sleep on gc_complete_cond_; |
| 2060 | gc_complete_cond_->Wait(self); |
| 2061 | last_gc_type = last_gc_type_; |
Mathieu Chartier | 752a0e6 | 2013-06-27 11:03:27 -0700 | [diff] [blame] | 2062 | ATRACE_END(); |
Mathieu Chartier | 7664f5c | 2012-06-08 18:15:32 -0700 | [diff] [blame] | 2063 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2064 | uint64_t wait_time = NanoTime() - wait_start; |
| 2065 | total_wait_time_ += wait_time; |
| 2066 | if (wait_time > long_pause_log_threshold_) { |
| 2067 | LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time); |
| 2068 | } |
Mathieu Chartier | 866fb2a | 2012-09-10 10:47:49 -0700 | [diff] [blame] | 2069 | return last_gc_type; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 2070 | } |
| 2071 | |
Elliott Hughes | c967f78 | 2012-04-16 10:23:15 -0700 | [diff] [blame] | 2072 | void Heap::DumpForSigQuit(std::ostream& os) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 2073 | os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/" |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 2074 | << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n"; |
Elliott Hughes | 8b788fe | 2013-04-17 15:57:01 -0700 | [diff] [blame] | 2075 | DumpGcPerformanceInfo(os); |
Elliott Hughes | c967f78 | 2012-04-16 10:23:15 -0700 | [diff] [blame] | 2076 | } |
| 2077 | |
| 2078 | size_t Heap::GetPercentFree() { |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 2079 | return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / GetTotalMemory()); |
Elliott Hughes | c967f78 | 2012-04-16 10:23:15 -0700 | [diff] [blame] | 2080 | } |
| 2081 | |
Elliott Hughes | 4dd9b4d | 2011-12-12 18:29:24 -0800 | [diff] [blame] | 2082 | void Heap::SetIdealFootprint(size_t max_allowed_footprint) { |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 2083 | if (max_allowed_footprint > GetMaxMemory()) { |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 2084 | VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to " |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 2085 | << PrettySize(GetMaxMemory()); |
| 2086 | max_allowed_footprint = GetMaxMemory(); |
| 2087 | } |
Mathieu Chartier | 1c23e1e | 2012-10-12 14:14:11 -0700 | [diff] [blame] | 2088 | max_allowed_footprint_ = max_allowed_footprint; |
Shih-wei Liao | 8c2f641 | 2011-10-03 22:58:14 -0700 | [diff] [blame] | 2089 | } |
| 2090 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2091 | bool Heap::IsMovableObject(const mirror::Object* obj) const { |
| 2092 | if (kMovingCollector) { |
| 2093 | DCHECK(!IsInTempSpace(obj)); |
| 2094 | if (bump_pointer_space_->HasAddress(obj)) { |
| 2095 | return true; |
| 2096 | } |
Mathieu Chartier | fc5b528 | 2014-01-09 16:15:36 -0800 | [diff] [blame] | 2097 | if (main_space_ != nullptr && main_space_->HasAddress(obj)) { |
| 2098 | return true; |
| 2099 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2100 | } |
| 2101 | return false; |
| 2102 | } |
| 2103 | |
| 2104 | bool Heap::IsInTempSpace(const mirror::Object* obj) const { |
| 2105 | if (temp_space_->HasAddress(obj) && !temp_space_->Contains(obj)) { |
| 2106 | return true; |
| 2107 | } |
| 2108 | return false; |
| 2109 | } |
| 2110 | |
Mathieu Chartier | 987ccff | 2013-07-08 11:05:21 -0700 | [diff] [blame] | 2111 | void Heap::UpdateMaxNativeFootprint() { |
| 2112 | size_t native_size = native_bytes_allocated_; |
| 2113 | // TODO: Tune the native heap utilization to be a value other than the java heap utilization. |
| 2114 | size_t target_size = native_size / GetTargetHeapUtilization(); |
| 2115 | if (target_size > native_size + max_free_) { |
| 2116 | target_size = native_size + max_free_; |
| 2117 | } else if (target_size < native_size + min_free_) { |
| 2118 | target_size = native_size + min_free_; |
| 2119 | } |
| 2120 | native_footprint_gc_watermark_ = target_size; |
| 2121 | native_footprint_limit_ = 2 * target_size - native_size; |
| 2122 | } |
| 2123 | |
Mathieu Chartier | bdd0fb9 | 2013-07-02 10:16:15 -0700 | [diff] [blame] | 2124 | void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) { |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 2125 | // We know what our utilization is at this moment. |
| 2126 | // This doesn't actually resize any memory. It just lets the heap grow more when necessary. |
Mathieu Chartier | 65db880 | 2012-11-20 12:36:46 -0800 | [diff] [blame] | 2127 | const size_t bytes_allocated = GetBytesAllocated(); |
| 2128 | last_gc_size_ = bytes_allocated; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 2129 | last_gc_time_ns_ = NanoTime(); |
Mathieu Chartier | bdd0fb9 | 2013-07-02 10:16:15 -0700 | [diff] [blame] | 2130 | size_t target_size; |
| 2131 | if (gc_type != collector::kGcTypeSticky) { |
| 2132 | // Grow the heap for non sticky GC. |
| 2133 | target_size = bytes_allocated / GetTargetHeapUtilization(); |
| 2134 | if (target_size > bytes_allocated + max_free_) { |
| 2135 | target_size = bytes_allocated + max_free_; |
| 2136 | } else if (target_size < bytes_allocated + min_free_) { |
| 2137 | target_size = bytes_allocated + min_free_; |
| 2138 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2139 | native_need_to_run_finalization_ = true; |
Mathieu Chartier | bdd0fb9 | 2013-07-02 10:16:15 -0700 | [diff] [blame] | 2140 | next_gc_type_ = collector::kGcTypeSticky; |
| 2141 | } else { |
| 2142 | // Based on how close the current heap size is to the target size, decide |
| 2143 | // whether or not to do a partial or sticky GC next. |
| 2144 | if (bytes_allocated + min_free_ <= max_allowed_footprint_) { |
| 2145 | next_gc_type_ = collector::kGcTypeSticky; |
| 2146 | } else { |
| 2147 | next_gc_type_ = collector::kGcTypePartial; |
| 2148 | } |
Mathieu Chartier | bdd0fb9 | 2013-07-02 10:16:15 -0700 | [diff] [blame] | 2149 | // If we have freed enough memory, shrink the heap back down. |
| 2150 | if (bytes_allocated + max_free_ < max_allowed_footprint_) { |
| 2151 | target_size = bytes_allocated + max_free_; |
| 2152 | } else { |
| 2153 | target_size = std::max(bytes_allocated, max_allowed_footprint_); |
| 2154 | } |
| 2155 | } |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 2156 | if (!ignore_max_footprint_) { |
| 2157 | SetIdealFootprint(target_size); |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 2158 | if (concurrent_gc_) { |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 2159 | // Calculate when to perform the next ConcurrentGC. |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 2160 | // Calculate the estimated GC duration. |
| 2161 | double gc_duration_seconds = NsToMs(gc_duration) / 1000.0; |
| 2162 | // Estimate how many remaining bytes we will have when we need to start the next GC. |
| 2163 | size_t remaining_bytes = allocation_rate_ * gc_duration_seconds; |
| 2164 | remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes); |
| 2165 | if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) { |
| 2166 | // A never going to happen situation that from the estimated allocation rate we will exceed |
| 2167 | // the applications entire footprint with the given estimated allocation rate. Schedule |
| 2168 | // another GC straight away. |
| 2169 | concurrent_start_bytes_ = bytes_allocated; |
| 2170 | } else { |
| 2171 | // Start a concurrent GC when we get close to the estimated remaining bytes. When the |
| 2172 | // allocation rate is very high, remaining_bytes could tell us that we should start a GC |
| 2173 | // right away. |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 2174 | concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes, |
| 2175 | bytes_allocated); |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 2176 | } |
| 2177 | DCHECK_LE(concurrent_start_bytes_, max_allowed_footprint_); |
| 2178 | DCHECK_LE(max_allowed_footprint_, growth_limit_); |
Mathieu Chartier | 65db880 | 2012-11-20 12:36:46 -0800 | [diff] [blame] | 2179 | } |
Mathieu Chartier | 65db880 | 2012-11-20 12:36:46 -0800 | [diff] [blame] | 2180 | } |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 2181 | } |
| 2182 | |
jeffhao | c116070 | 2011-10-27 15:48:45 -0700 | [diff] [blame] | 2183 | void Heap::ClearGrowthLimit() { |
Mathieu Chartier | 80de7a6 | 2012-11-27 17:21:50 -0800 | [diff] [blame] | 2184 | growth_limit_ = capacity_; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2185 | non_moving_space_->ClearGrowthLimit(); |
jeffhao | c116070 | 2011-10-27 15:48:45 -0700 | [diff] [blame] | 2186 | } |
| 2187 | |
Elliott Hughes | adb460d | 2011-10-05 17:02:34 -0700 | [diff] [blame] | 2188 | void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset, |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 2189 | MemberOffset reference_queue_offset, |
| 2190 | MemberOffset reference_queueNext_offset, |
| 2191 | MemberOffset reference_pendingNext_offset, |
| 2192 | MemberOffset finalizer_reference_zombie_offset) { |
Elliott Hughes | adb460d | 2011-10-05 17:02:34 -0700 | [diff] [blame] | 2193 | reference_referent_offset_ = reference_referent_offset; |
| 2194 | reference_queue_offset_ = reference_queue_offset; |
| 2195 | reference_queueNext_offset_ = reference_queueNext_offset; |
| 2196 | reference_pendingNext_offset_ = reference_pendingNext_offset; |
| 2197 | finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset; |
| 2198 | CHECK_NE(reference_referent_offset_.Uint32Value(), 0U); |
| 2199 | CHECK_NE(reference_queue_offset_.Uint32Value(), 0U); |
| 2200 | CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U); |
| 2201 | CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U); |
| 2202 | CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U); |
| 2203 | } |
| 2204 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2205 | void Heap::SetReferenceReferent(mirror::Object* reference, mirror::Object* referent) { |
| 2206 | DCHECK(reference != NULL); |
| 2207 | DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U); |
| 2208 | reference->SetFieldObject(reference_referent_offset_, referent, true); |
| 2209 | } |
| 2210 | |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 2211 | mirror::Object* Heap::GetReferenceReferent(mirror::Object* reference) { |
Elliott Hughes | adb460d | 2011-10-05 17:02:34 -0700 | [diff] [blame] | 2212 | DCHECK(reference != NULL); |
| 2213 | DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U); |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 2214 | return reference->GetFieldObject<mirror::Object*>(reference_referent_offset_, true); |
Elliott Hughes | adb460d | 2011-10-05 17:02:34 -0700 | [diff] [blame] | 2215 | } |
| 2216 | |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 2217 | void Heap::AddFinalizerReference(Thread* self, mirror::Object* object) { |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 2218 | ScopedObjectAccess soa(self); |
Jeff Hao | 5d91730 | 2013-02-27 17:57:33 -0800 | [diff] [blame] | 2219 | JValue result; |
Jeff Hao | 5d91730 | 2013-02-27 17:57:33 -0800 | [diff] [blame] | 2220 | ArgArray arg_array(NULL, 0); |
| 2221 | arg_array.Append(reinterpret_cast<uint32_t>(object)); |
| 2222 | soa.DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self, |
Jeff Hao | 6474d19 | 2013-03-26 14:08:09 -0700 | [diff] [blame] | 2223 | arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V'); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 2224 | } |
| 2225 | |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 2226 | void Heap::EnqueueClearedReferences() { |
| 2227 | if (!cleared_references_.IsEmpty()) { |
Ian Rogers | 64b6d14 | 2012-10-29 16:34:15 -0700 | [diff] [blame] | 2228 | // When a runtime isn't started there are no reference queues to care about so ignore. |
| 2229 | if (LIKELY(Runtime::Current()->IsStarted())) { |
| 2230 | ScopedObjectAccess soa(Thread::Current()); |
Jeff Hao | 5d91730 | 2013-02-27 17:57:33 -0800 | [diff] [blame] | 2231 | JValue result; |
Jeff Hao | 5d91730 | 2013-02-27 17:57:33 -0800 | [diff] [blame] | 2232 | ArgArray arg_array(NULL, 0); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 2233 | arg_array.Append(reinterpret_cast<uint32_t>(cleared_references_.GetList())); |
Jeff Hao | 5d91730 | 2013-02-27 17:57:33 -0800 | [diff] [blame] | 2234 | soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(), |
Jeff Hao | 6474d19 | 2013-03-26 14:08:09 -0700 | [diff] [blame] | 2235 | arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V'); |
Ian Rogers | 64b6d14 | 2012-10-29 16:34:15 -0700 | [diff] [blame] | 2236 | } |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 2237 | cleared_references_.Clear(); |
Elliott Hughes | adb460d | 2011-10-05 17:02:34 -0700 | [diff] [blame] | 2238 | } |
| 2239 | } |
| 2240 | |
Ian Rogers | 1f53934 | 2012-10-03 21:09:42 -0700 | [diff] [blame] | 2241 | void Heap::RequestConcurrentGC(Thread* self) { |
Mathieu Chartier | 069387a | 2012-06-18 12:01:01 -0700 | [diff] [blame] | 2242 | // Make sure that we can do a concurrent GC. |
Ian Rogers | 120f1c7 | 2012-09-28 17:17:10 -0700 | [diff] [blame] | 2243 | Runtime* runtime = Runtime::Current(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2244 | if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) || |
| 2245 | self->IsHandlingStackOverflow()) { |
Ian Rogers | 120f1c7 | 2012-09-28 17:17:10 -0700 | [diff] [blame] | 2246 | return; |
| 2247 | } |
Mathieu Chartier | 987ccff | 2013-07-08 11:05:21 -0700 | [diff] [blame] | 2248 | // We already have a request pending, no reason to start more until we update |
| 2249 | // concurrent_start_bytes_. |
| 2250 | concurrent_start_bytes_ = std::numeric_limits<size_t>::max(); |
Ian Rogers | 120f1c7 | 2012-09-28 17:17:10 -0700 | [diff] [blame] | 2251 | JNIEnv* env = self->GetJniEnv(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2252 | DCHECK(WellKnownClasses::java_lang_Daemons != nullptr); |
| 2253 | DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != nullptr); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 2254 | env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, |
| 2255 | WellKnownClasses::java_lang_Daemons_requestGC); |
Mathieu Chartier | 7664f5c | 2012-06-08 18:15:32 -0700 | [diff] [blame] | 2256 | CHECK(!env->ExceptionCheck()); |
Mathieu Chartier | 7664f5c | 2012-06-08 18:15:32 -0700 | [diff] [blame] | 2257 | } |
| 2258 | |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 2259 | void Heap::ConcurrentGC(Thread* self) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2260 | if (Runtime::Current()->IsShuttingDown(self)) { |
| 2261 | return; |
Mathieu Chartier | 2542d66 | 2012-06-21 17:14:11 -0700 | [diff] [blame] | 2262 | } |
Mathieu Chartier | 65db880 | 2012-11-20 12:36:46 -0800 | [diff] [blame] | 2263 | // Wait for any GCs currently running to finish. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2264 | if (WaitForGcToComplete(self) == collector::kGcTypeNone) { |
Mathieu Chartier | f9ed0d3 | 2013-11-21 16:42:47 -0800 | [diff] [blame] | 2265 | // If the we can't run the GC type we wanted to run, find the next appropriate one and try that |
| 2266 | // instead. E.g. can't do partial, so do full instead. |
| 2267 | if (CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false) == |
| 2268 | collector::kGcTypeNone) { |
| 2269 | for (collector::GcType gc_type : gc_plan_) { |
| 2270 | // Attempt to run the collector, if we succeed, we are done. |
| 2271 | if (gc_type > next_gc_type_ && |
| 2272 | CollectGarbageInternal(gc_type, kGcCauseBackground, false) != collector::kGcTypeNone) { |
| 2273 | break; |
| 2274 | } |
| 2275 | } |
| 2276 | } |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 2277 | } |
Mathieu Chartier | 7664f5c | 2012-06-08 18:15:32 -0700 | [diff] [blame] | 2278 | } |
| 2279 | |
Elliott Hughes | 8cf5bc0 | 2012-02-02 16:32:16 -0800 | [diff] [blame] | 2280 | void Heap::RequestHeapTrim() { |
Ian Rogers | 4893188 | 2013-01-22 14:35:16 -0800 | [diff] [blame] | 2281 | // GC completed and now we must decide whether to request a heap trim (advising pages back to the |
| 2282 | // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans |
| 2283 | // a space it will hold its lock and can become a cause of jank. |
| 2284 | // Note, the large object space self trims and the Zygote space was trimmed and unchanging since |
| 2285 | // forking. |
| 2286 | |
Elliott Hughes | 8cf5bc0 | 2012-02-02 16:32:16 -0800 | [diff] [blame] | 2287 | // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap |
| 2288 | // because that only marks object heads, so a large array looks like lots of empty space. We |
| 2289 | // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional |
| 2290 | // to utilization (which is probably inversely proportional to how much benefit we can expect). |
| 2291 | // We could try mincore(2) but that's only a measure of how many pages we haven't given away, |
| 2292 | // not how much use we're making of those pages. |
Ian Rogers | 4893188 | 2013-01-22 14:35:16 -0800 | [diff] [blame] | 2293 | uint64_t ms_time = MilliTime(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2294 | // Don't bother trimming the alloc space if a heap trim occurred in the last two seconds. |
| 2295 | if (ms_time - last_trim_time_ms_ < 2 * 1000) { |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 2296 | return; |
Elliott Hughes | 8cf5bc0 | 2012-02-02 16:32:16 -0800 | [diff] [blame] | 2297 | } |
Ian Rogers | 120f1c7 | 2012-09-28 17:17:10 -0700 | [diff] [blame] | 2298 | |
| 2299 | Thread* self = Thread::Current(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2300 | Runtime* runtime = Runtime::Current(); |
| 2301 | if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self)) { |
| 2302 | // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time) |
| 2303 | // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check |
| 2304 | // as we don't hold the lock while requesting the trim). |
| 2305 | return; |
Ian Rogers | e1d490c | 2012-02-03 09:09:07 -0800 | [diff] [blame] | 2306 | } |
Ian Rogers | 4893188 | 2013-01-22 14:35:16 -0800 | [diff] [blame] | 2307 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 2308 | last_trim_time_ms_ = ms_time; |
Mathieu Chartier | c39e342 | 2013-08-07 16:41:36 -0700 | [diff] [blame] | 2309 | |
| 2310 | // Trim only if we do not currently care about pause times. |
Mathieu Chartier | ca2a24d | 2013-11-25 15:12:12 -0800 | [diff] [blame] | 2311 | if (!CareAboutPauseTimes()) { |
Mathieu Chartier | c39e342 | 2013-08-07 16:41:36 -0700 | [diff] [blame] | 2312 | JNIEnv* env = self->GetJniEnv(); |
| 2313 | DCHECK(WellKnownClasses::java_lang_Daemons != NULL); |
| 2314 | DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != NULL); |
| 2315 | env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, |
| 2316 | WellKnownClasses::java_lang_Daemons_requestHeapTrim); |
| 2317 | CHECK(!env->ExceptionCheck()); |
| 2318 | } |
Elliott Hughes | 8cf5bc0 | 2012-02-02 16:32:16 -0800 | [diff] [blame] | 2319 | } |
| 2320 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 2321 | void Heap::RevokeThreadLocalBuffers(Thread* thread) { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 2322 | if (rosalloc_space_ != nullptr) { |
| 2323 | rosalloc_space_->RevokeThreadLocalBuffers(thread); |
| 2324 | } |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 2325 | if (bump_pointer_space_ != nullptr) { |
| 2326 | bump_pointer_space_->RevokeThreadLocalBuffers(thread); |
| 2327 | } |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 2328 | } |
| 2329 | |
| 2330 | void Heap::RevokeAllThreadLocalBuffers() { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 2331 | if (rosalloc_space_ != nullptr) { |
| 2332 | rosalloc_space_->RevokeAllThreadLocalBuffers(); |
| 2333 | } |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 2334 | if (bump_pointer_space_ != nullptr) { |
| 2335 | bump_pointer_space_->RevokeAllThreadLocalBuffers(); |
| 2336 | } |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 2337 | } |
| 2338 | |
Mathieu Chartier | 987ccff | 2013-07-08 11:05:21 -0700 | [diff] [blame] | 2339 | bool Heap::IsGCRequestPending() const { |
| 2340 | return concurrent_start_bytes_ != std::numeric_limits<size_t>::max(); |
| 2341 | } |
| 2342 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2343 | void Heap::RunFinalization(JNIEnv* env) { |
| 2344 | // Can't do this in WellKnownClasses::Init since System is not properly set up at that point. |
| 2345 | if (WellKnownClasses::java_lang_System_runFinalization == nullptr) { |
| 2346 | CHECK(WellKnownClasses::java_lang_System != nullptr); |
| 2347 | WellKnownClasses::java_lang_System_runFinalization = |
| 2348 | CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V"); |
| 2349 | CHECK(WellKnownClasses::java_lang_System_runFinalization != nullptr); |
| 2350 | } |
| 2351 | env->CallStaticVoidMethod(WellKnownClasses::java_lang_System, |
| 2352 | WellKnownClasses::java_lang_System_runFinalization); |
| 2353 | } |
| 2354 | |
Ian Rogers | 1eb512d | 2013-10-18 15:42:20 -0700 | [diff] [blame] | 2355 | void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2356 | Thread* self = ThreadForEnv(env); |
| 2357 | if (native_need_to_run_finalization_) { |
| 2358 | RunFinalization(env); |
| 2359 | UpdateMaxNativeFootprint(); |
| 2360 | native_need_to_run_finalization_ = false; |
| 2361 | } |
Mathieu Chartier | 987ccff | 2013-07-08 11:05:21 -0700 | [diff] [blame] | 2362 | // Total number of native bytes allocated. |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 2363 | native_bytes_allocated_.FetchAndAdd(bytes); |
Mathieu Chartier | 987ccff | 2013-07-08 11:05:21 -0700 | [diff] [blame] | 2364 | if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) { |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 2365 | collector::GcType gc_type = have_zygote_space_ ? collector::kGcTypePartial : |
| 2366 | collector::kGcTypeFull; |
| 2367 | |
Mathieu Chartier | 987ccff | 2013-07-08 11:05:21 -0700 | [diff] [blame] | 2368 | // The second watermark is higher than the gc watermark. If you hit this it means you are |
| 2369 | // allocating native objects faster than the GC can keep up with. |
| 2370 | if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2371 | if (WaitForGcToComplete(self) != collector::kGcTypeNone) { |
| 2372 | // Just finished a GC, attempt to run finalizers. |
| 2373 | RunFinalization(env); |
| 2374 | CHECK(!env->ExceptionCheck()); |
| 2375 | } |
| 2376 | // If we still are over the watermark, attempt a GC for alloc and run finalizers. |
| 2377 | if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) { |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame^] | 2378 | CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2379 | RunFinalization(env); |
| 2380 | native_need_to_run_finalization_ = false; |
| 2381 | CHECK(!env->ExceptionCheck()); |
| 2382 | } |
| 2383 | // We have just run finalizers, update the native watermark since it is very likely that |
| 2384 | // finalizers released native managed allocations. |
| 2385 | UpdateMaxNativeFootprint(); |
| 2386 | } else if (!IsGCRequestPending()) { |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 2387 | if (concurrent_gc_) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2388 | RequestConcurrentGC(self); |
| 2389 | } else { |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 2390 | CollectGarbageInternal(gc_type, kGcCauseForAlloc, false); |
Mathieu Chartier | 987ccff | 2013-07-08 11:05:21 -0700 | [diff] [blame] | 2391 | } |
| 2392 | } |
| 2393 | } |
| 2394 | } |
| 2395 | |
Ian Rogers | 1eb512d | 2013-10-18 15:42:20 -0700 | [diff] [blame] | 2396 | void Heap::RegisterNativeFree(JNIEnv* env, int bytes) { |
Mathieu Chartier | 987ccff | 2013-07-08 11:05:21 -0700 | [diff] [blame] | 2397 | int expected_size, new_size; |
| 2398 | do { |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 2399 | expected_size = native_bytes_allocated_.Load(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2400 | new_size = expected_size - bytes; |
| 2401 | if (UNLIKELY(new_size < 0)) { |
| 2402 | ScopedObjectAccess soa(env); |
| 2403 | env->ThrowNew(WellKnownClasses::java_lang_RuntimeException, |
| 2404 | StringPrintf("Attempted to free %d native bytes with only %d native bytes " |
| 2405 | "registered as allocated", bytes, expected_size).c_str()); |
| 2406 | break; |
| 2407 | } |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 2408 | } while (!native_bytes_allocated_.CompareAndSwap(expected_size, new_size)); |
Mathieu Chartier | 987ccff | 2013-07-08 11:05:21 -0700 | [diff] [blame] | 2409 | } |
| 2410 | |
Hiroshi Yamauchi | 09b07a9 | 2013-07-15 13:17:06 -0700 | [diff] [blame] | 2411 | int64_t Heap::GetTotalMemory() const { |
| 2412 | int64_t ret = 0; |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 2413 | for (const auto& space : continuous_spaces_) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2414 | // Currently don't include the image space. |
| 2415 | if (!space->IsImageSpace()) { |
| 2416 | ret += space->Size(); |
Hiroshi Yamauchi | 09b07a9 | 2013-07-15 13:17:06 -0700 | [diff] [blame] | 2417 | } |
| 2418 | } |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 2419 | for (const auto& space : discontinuous_spaces_) { |
Hiroshi Yamauchi | 09b07a9 | 2013-07-15 13:17:06 -0700 | [diff] [blame] | 2420 | if (space->IsLargeObjectSpace()) { |
| 2421 | ret += space->AsLargeObjectSpace()->GetBytesAllocated(); |
| 2422 | } |
| 2423 | } |
| 2424 | return ret; |
| 2425 | } |
| 2426 | |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 2427 | void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) { |
| 2428 | DCHECK(mod_union_table != nullptr); |
| 2429 | mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table); |
| 2430 | } |
| 2431 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 2432 | } // namespace gc |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 2433 | } // namespace art |