blob: e9d9065e56c955c012b184e73a60bda38b439129 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070018
Mathieu Chartier752a0e62013-06-27 11:03:27 -070019#define ATRACE_TAG ATRACE_TAG_DALVIK
20#include <cutils/trace.h>
Brian Carlstrom5643b782012-02-05 12:32:53 -080021
Brian Carlstrom58ae9412011-10-04 00:56:06 -070022#include <limits>
Ian Rogers700a4022014-05-19 16:49:03 -070023#include <memory>
Mathieu Chartier31000802015-06-14 14:14:37 -070024#include <unwind.h> // For GC verification.
Carl Shapiro58551df2011-07-24 03:09:51 -070025#include <vector>
26
Mathieu Chartierc7853442015-03-27 14:35:38 -070027#include "art_field-inl.h"
Mathieu Chartierbad02672014-08-25 13:08:22 -070028#include "base/allocator.h"
Ian Rogersc7dd2952014-10-21 23:31:19 -070029#include "base/dumpable.h"
Mathieu Chartierb2f99362013-11-20 17:26:00 -080030#include "base/histogram-inl.h"
Elliott Hughes1aa246d2012-12-13 09:29:36 -080031#include "base/stl_util.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010032#include "base/time_utils.h"
Mathieu Chartier987ccff2013-07-08 11:05:21 -070033#include "common_throws.h"
Ian Rogers48931882013-01-22 14:35:16 -080034#include "cutils/sched_policy.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070035#include "debugger.h"
Elliott Hughes956af0f2014-12-11 14:34:28 -080036#include "dex_file-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070037#include "gc/accounting/atomic_stack.h"
38#include "gc/accounting/card_table-inl.h"
39#include "gc/accounting/heap_bitmap-inl.h"
40#include "gc/accounting/mod_union_table-inl.h"
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -080041#include "gc/accounting/remembered_set.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070042#include "gc/accounting/space_bitmap-inl.h"
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070043#include "gc/collector/concurrent_copying.h"
Mathieu Chartier52e4b432014-06-10 11:22:31 -070044#include "gc/collector/mark_compact.h"
Mathieu Chartier3cf22532015-07-09 15:15:09 -070045#include "gc/collector/mark_sweep.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070046#include "gc/collector/partial_mark_sweep.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070047#include "gc/collector/semi_space.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070048#include "gc/collector/sticky_mark_sweep.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070049#include "gc/reference_processor.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070050#include "gc/space/bump_pointer_space.h"
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -070051#include "gc/space/dlmalloc_space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070052#include "gc/space/image_space.h"
53#include "gc/space/large_object_space.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080054#include "gc/space/region_space.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070055#include "gc/space/rosalloc_space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070056#include "gc/space/space-inl.h"
Mathieu Chartiera1602f22014-01-13 17:19:19 -080057#include "gc/space/zygote_space.h"
Mathieu Chartiera5eae692014-12-17 17:56:03 -080058#include "gc/task_processor.h"
Mathieu Chartierd8891782014-03-02 13:28:37 -080059#include "entrypoints/quick/quick_alloc_entrypoints.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070060#include "heap-inl.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070061#include "image.h"
Mathieu Chartiereb175f72014-10-31 11:49:27 -070062#include "intern_table.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080063#include "mirror/class-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080064#include "mirror/object-inl.h"
65#include "mirror/object_array-inl.h"
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -070066#include "mirror/reference-inl.h"
Brian Carlstrom5643b782012-02-05 12:32:53 -080067#include "os.h"
Ian Rogers53b8b092014-03-13 23:45:53 -070068#include "reflection.h"
Mathieu Chartier0de9f732013-11-22 17:58:48 -080069#include "runtime.h"
Mathieu Chartier7664f5c2012-06-08 18:15:32 -070070#include "ScopedLocalRef.h"
Ian Rogers00f7d0e2012-07-19 15:28:27 -070071#include "scoped_thread_state_change.h"
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070072#include "handle_scope-inl.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070073#include "thread_list.h"
Elliott Hugheseac76672012-05-24 21:56:51 -070074#include "well_known_classes.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070075
76namespace art {
Mathieu Chartier50482232013-11-21 11:48:14 -080077
Ian Rogers1d54e732013-05-02 21:10:01 -070078namespace gc {
Carl Shapiro69759ea2011-07-21 18:13:35 -070079
Mathieu Chartier91e30632014-03-25 15:58:50 -070080static constexpr size_t kCollectorTransitionStressIterations = 0;
81static constexpr size_t kCollectorTransitionStressWait = 10 * 1000; // Microseconds
Ian Rogers1d54e732013-05-02 21:10:01 -070082// Minimum amount of remaining bytes before a concurrent GC is triggered.
Mathieu Chartier720ef762013-08-17 14:46:54 -070083static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
Mathieu Chartier74762802014-01-24 10:21:35 -080084static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
Mathieu Chartierdf86d1f2014-04-08 13:44:04 -070085// Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
Mathieu Chartier73d1e172014-04-11 17:53:48 -070086// relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
Mathieu Chartierdf86d1f2014-04-08 13:44:04 -070087// threads (lower pauses, use less memory bandwidth).
Mathieu Chartier73d1e172014-04-11 17:53:48 -070088static constexpr double kStickyGcThroughputAdjustment = 1.0;
Mathieu Chartierc1790162014-05-23 10:54:50 -070089// Whether or not we compact the zygote in PreZygoteFork.
Mathieu Chartier31f44142014-04-08 14:40:03 -070090static constexpr bool kCompactZygote = kMovingCollector;
Mathieu Chartierc1790162014-05-23 10:54:50 -070091// How many reserve entries are at the end of the allocation stack, these are only needed if the
92// allocation stack overflows.
93static constexpr size_t kAllocationStackReserveSize = 1024;
94// Default mark stack size in bytes.
95static const size_t kDefaultMarkStackSize = 64 * KB;
Zuo Wangf37a88b2014-07-10 04:26:41 -070096// Define space name.
97static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
98static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
99static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
Mathieu Chartier7247af52014-11-19 10:51:42 -0800100static const char* kNonMovingSpaceName = "non moving space";
101static const char* kZygoteSpaceName = "zygote space";
Mathieu Chartierb363f662014-07-16 13:28:58 -0700102static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
Mathieu Chartier95a505c2014-12-10 18:45:30 -0800103static constexpr bool kGCALotMode = false;
104// GC alot mode uses a small allocation stack to stress test a lot of GC.
105static constexpr size_t kGcAlotAllocationStackSize = 4 * KB /
106 sizeof(mirror::HeapReference<mirror::Object>);
107// Verify objet has a small allocation stack size since searching the allocation stack is slow.
108static constexpr size_t kVerifyObjectAllocationStackSize = 16 * KB /
109 sizeof(mirror::HeapReference<mirror::Object>);
110static constexpr size_t kDefaultAllocationStackSize = 8 * MB /
111 sizeof(mirror::HeapReference<mirror::Object>);
Mathieu Chartierb5de3bb2015-06-05 13:21:05 -0700112// System.runFinalization can deadlock with native allocations, to deal with this, we have a
113// timeout on how long we wait for finalizers to run. b/21544853
114static constexpr uint64_t kNativeAllocationFinalizeTimeout = MsToNs(250u);
Mathieu Chartier0051be62012-10-12 17:47:11 -0700115
Mathieu Chartier0051be62012-10-12 17:47:11 -0700116Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700117 double target_utilization, double foreground_heap_growth_multiplier,
118 size_t capacity, size_t non_moving_space_capacity, const std::string& image_file_name,
119 const InstructionSet image_instruction_set, CollectorType foreground_collector_type,
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700120 CollectorType background_collector_type,
121 space::LargeObjectSpaceType large_object_space_type, size_t large_object_threshold,
122 size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800123 size_t long_pause_log_threshold, size_t long_gc_log_threshold,
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700124 bool ignore_max_footprint, bool use_tlab,
125 bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap,
126 bool verify_pre_gc_rosalloc, bool verify_pre_sweeping_rosalloc,
Mathieu Chartier31000802015-06-14 14:14:37 -0700127 bool verify_post_gc_rosalloc, bool gc_stress_mode,
128 bool use_homogeneous_space_compaction_for_oom,
Zuo Wangf37a88b2014-07-10 04:26:41 -0700129 uint64_t min_interval_homogeneous_space_compaction_by_oom)
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800130 : non_moving_space_(nullptr),
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800131 rosalloc_space_(nullptr),
132 dlmalloc_space_(nullptr),
Mathieu Chartierfc5b5282014-01-09 16:15:36 -0800133 main_space_(nullptr),
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800134 collector_type_(kCollectorTypeNone),
Mathieu Chartier31f44142014-04-08 14:40:03 -0700135 foreground_collector_type_(foreground_collector_type),
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800136 background_collector_type_(background_collector_type),
Mathieu Chartier31f44142014-04-08 14:40:03 -0700137 desired_collector_type_(foreground_collector_type_),
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800138 pending_task_lock_(nullptr),
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700139 parallel_gc_threads_(parallel_gc_threads),
140 conc_gc_threads_(conc_gc_threads),
Mathieu Chartiere0a53e92013-08-05 10:17:40 -0700141 low_memory_mode_(low_memory_mode),
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700142 long_pause_log_threshold_(long_pause_log_threshold),
143 long_gc_log_threshold_(long_gc_log_threshold),
144 ignore_max_footprint_(ignore_max_footprint),
Mathieu Chartier8e4a96d2014-05-21 10:44:32 -0700145 zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
Mathieu Chartiere4cab172014-08-19 18:24:04 -0700146 zygote_space_(nullptr),
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700147 large_object_threshold_(large_object_threshold),
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800148 collector_type_running_(kCollectorTypeNone),
Ian Rogers1d54e732013-05-02 21:10:01 -0700149 last_gc_type_(collector::kGcTypeNone),
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -0700150 next_gc_type_(collector::kGcTypePartial),
Mathieu Chartier80de7a62012-11-27 17:21:50 -0800151 capacity_(capacity),
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700152 growth_limit_(growth_limit),
Mathieu Chartier0051be62012-10-12 17:47:11 -0700153 max_allowed_footprint_(initial_size),
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700154 native_footprint_gc_watermark_(initial_size),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700155 native_need_to_run_finalization_(false),
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800156 // Initially assume we perceive jank in case the process state is never updated.
157 process_state_(kProcessStateJankPerceptible),
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800158 concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
Ian Rogers1d54e732013-05-02 21:10:01 -0700159 total_bytes_freed_ever_(0),
160 total_objects_freed_ever_(0),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800161 num_bytes_allocated_(0),
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700162 native_bytes_allocated_(0),
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700163 num_bytes_freed_revoke_(0),
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700164 verify_missing_card_marks_(false),
165 verify_system_weaks_(false),
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800166 verify_pre_gc_heap_(verify_pre_gc_heap),
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700167 verify_pre_sweeping_heap_(verify_pre_sweeping_heap),
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800168 verify_post_gc_heap_(verify_post_gc_heap),
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700169 verify_mod_union_table_(false),
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800170 verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700171 verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc),
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800172 verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
Mathieu Chartier31000802015-06-14 14:14:37 -0700173 gc_stress_mode_(gc_stress_mode),
Mathieu Chartier0418ae22013-07-31 13:35:46 -0700174 /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This
175 * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
176 * verification is enabled, we limit the size of allocation stacks to speed up their
177 * searching.
178 */
Mathieu Chartier95a505c2014-12-10 18:45:30 -0800179 max_allocation_stack_size_(kGCALotMode ? kGcAlotAllocationStackSize
180 : (kVerifyObjectSupport > kVerifyObjectModeFast) ? kVerifyObjectAllocationStackSize :
181 kDefaultAllocationStackSize),
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800182 current_allocator_(kAllocatorTypeDlMalloc),
183 current_non_moving_allocator_(kAllocatorTypeNonMoving),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700184 bump_pointer_space_(nullptr),
185 temp_space_(nullptr),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800186 region_space_(nullptr),
Mathieu Chartier0051be62012-10-12 17:47:11 -0700187 min_free_(min_free),
188 max_free_(max_free),
189 target_utilization_(target_utilization),
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -0700190 foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700191 total_wait_time_(0),
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700192 total_allocation_time_(0),
Mathieu Chartier4e305412014-02-19 10:54:44 -0800193 verify_object_mode_(kVerifyObjectModeDisabled),
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800194 disable_moving_gc_count_(0),
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700195 is_running_on_memory_tool_(Runtime::Current()->IsRunningOnMemoryTool()),
Zuo Wangf37a88b2014-07-10 04:26:41 -0700196 use_tlab_(use_tlab),
197 main_space_backup_(nullptr),
Mathieu Chartierb363f662014-07-16 13:28:58 -0700198 min_interval_homogeneous_space_compaction_by_oom_(
199 min_interval_homogeneous_space_compaction_by_oom),
Zuo Wangf37a88b2014-07-10 04:26:41 -0700200 last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800201 pending_collector_transition_(nullptr),
202 pending_heap_trim_(nullptr),
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -0700203 use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom),
204 running_collection_is_blocking_(false),
205 blocking_gc_count_(0U),
206 blocking_gc_time_(0U),
207 last_update_time_gc_count_rate_histograms_( // Round down by the window duration.
208 (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration),
209 gc_count_last_window_(0U),
210 blocking_gc_count_last_window_(0U),
211 gc_count_rate_histogram_("gc count rate histogram", 1U, kGcCountRateMaxBucketCount),
212 blocking_gc_count_rate_histogram_("blocking gc count rate histogram", 1U,
Man Cao8c2ff642015-05-27 17:25:30 -0700213 kGcCountRateMaxBucketCount),
Mathieu Chartier31000802015-06-14 14:14:37 -0700214 alloc_tracking_enabled_(false),
215 backtrace_lock_(nullptr),
216 seen_backtrace_count_(0u),
217 unique_backtrace_count_(0u) {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800218 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800219 LOG(INFO) << "Heap() entering";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700220 }
Mathieu Chartier31000802015-06-14 14:14:37 -0700221 Runtime* const runtime = Runtime::Current();
Mathieu Chartier50482232013-11-21 11:48:14 -0800222 // If we aren't the zygote, switch to the default non zygote allocator. This may update the
223 // entrypoints.
Mathieu Chartier31000802015-06-14 14:14:37 -0700224 const bool is_zygote = runtime->IsZygote();
Mathieu Chartier8e219ae2014-08-19 14:29:46 -0700225 if (!is_zygote) {
Mathieu Chartier31f44142014-04-08 14:40:03 -0700226 // Background compaction is currently not supported for command line runs.
227 if (background_collector_type_ != foreground_collector_type_) {
Mathieu Chartier52ba1992014-05-07 14:39:21 -0700228 VLOG(heap) << "Disabling background compaction for non zygote";
Mathieu Chartier31f44142014-04-08 14:40:03 -0700229 background_collector_type_ = foreground_collector_type_;
Mathieu Chartierbd0a6532014-02-27 11:14:21 -0800230 }
Mathieu Chartier50482232013-11-21 11:48:14 -0800231 }
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800232 ChangeCollector(desired_collector_type_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700233 live_bitmap_.reset(new accounting::HeapBitmap(this));
234 mark_bitmap_.reset(new accounting::HeapBitmap(this));
Ian Rogers30fab402012-01-23 15:43:46 -0800235 // Requested begin for the alloc space, to follow the mapped image and oat files
Ian Rogers13735952014-10-08 12:43:28 -0700236 uint8_t* requested_alloc_space_begin = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800237 if (foreground_collector_type_ == kCollectorTypeCC) {
238 // Need to use a low address so that we can allocate a contiguous
239 // 2 * Xmx space when there's no image (dex2oat for target).
240 CHECK_GE(300 * MB, non_moving_space_capacity);
241 requested_alloc_space_begin = reinterpret_cast<uint8_t*>(300 * MB) - non_moving_space_capacity;
242 }
Brian Carlstrom5643b782012-02-05 12:32:53 -0800243 if (!image_file_name.empty()) {
Richard Uhler054a0782015-04-07 10:56:50 -0700244 ATRACE_BEGIN("ImageSpace::Create");
Alex Light64ad14d2014-08-19 14:23:13 -0700245 std::string error_msg;
Richard Uhler054a0782015-04-07 10:56:50 -0700246 auto* image_space = space::ImageSpace::Create(image_file_name.c_str(), image_instruction_set,
247 &error_msg);
248 ATRACE_END();
Alex Light64ad14d2014-08-19 14:23:13 -0700249 if (image_space != nullptr) {
250 AddSpace(image_space);
251 // Oat files referenced by image files immediately follow them in memory, ensure alloc space
252 // isn't going to get in the middle
Ian Rogers13735952014-10-08 12:43:28 -0700253 uint8_t* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
Alex Light64ad14d2014-08-19 14:23:13 -0700254 CHECK_GT(oat_file_end_addr, image_space->End());
255 requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
256 } else {
Mathieu Chartierc7853442015-03-27 14:35:38 -0700257 LOG(ERROR) << "Could not create image space with image file '" << image_file_name << "'. "
Alex Light64ad14d2014-08-19 14:23:13 -0700258 << "Attempting to fall back to imageless running. Error was: " << error_msg;
259 }
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700260 }
Zuo Wangf37a88b2014-07-10 04:26:41 -0700261 /*
262 requested_alloc_space_begin -> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700263 +- nonmoving space (non_moving_space_capacity)+-
Zuo Wangf37a88b2014-07-10 04:26:41 -0700264 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
Mathieu Chartier8e219ae2014-08-19 14:29:46 -0700265 +-????????????????????????????????????????????+-
266 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
Mathieu Chartierb363f662014-07-16 13:28:58 -0700267 +-main alloc space / bump space 1 (capacity_) +-
Zuo Wangf37a88b2014-07-10 04:26:41 -0700268 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
Mathieu Chartierb363f662014-07-16 13:28:58 -0700269 +-????????????????????????????????????????????+-
270 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
271 +-main alloc space2 / bump space 2 (capacity_)+-
Zuo Wangf37a88b2014-07-10 04:26:41 -0700272 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
273 */
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800274 // We don't have hspace compaction enabled with GSS or CC.
275 if (foreground_collector_type_ == kCollectorTypeGSS ||
276 foreground_collector_type_ == kCollectorTypeCC) {
Hiroshi Yamauchi20ed5af2014-11-17 18:05:44 -0800277 use_homogeneous_space_compaction_for_oom_ = false;
278 }
Mathieu Chartierb363f662014-07-16 13:28:58 -0700279 bool support_homogeneous_space_compaction =
Mathieu Chartier0deeb812014-08-21 18:28:20 -0700280 background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact ||
Hiroshi Yamauchi20ed5af2014-11-17 18:05:44 -0800281 use_homogeneous_space_compaction_for_oom_;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700282 // We may use the same space the main space for the non moving space if we don't need to compact
283 // from the main space.
284 // This is not the case if we support homogeneous compaction or have a moving background
285 // collector type.
Mathieu Chartierb363f662014-07-16 13:28:58 -0700286 bool separate_non_moving_space = is_zygote ||
287 support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
288 IsMovingGc(background_collector_type_);
289 if (foreground_collector_type == kCollectorTypeGSS) {
290 separate_non_moving_space = false;
291 }
292 std::unique_ptr<MemMap> main_mem_map_1;
293 std::unique_ptr<MemMap> main_mem_map_2;
Ian Rogers13735952014-10-08 12:43:28 -0700294 uint8_t* request_begin = requested_alloc_space_begin;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700295 if (request_begin != nullptr && separate_non_moving_space) {
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700296 request_begin += non_moving_space_capacity;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700297 }
298 std::string error_str;
299 std::unique_ptr<MemMap> non_moving_space_mem_map;
Richard Uhler054a0782015-04-07 10:56:50 -0700300 ATRACE_BEGIN("Create heap maps");
Mathieu Chartierb363f662014-07-16 13:28:58 -0700301 if (separate_non_moving_space) {
Mathieu Chartier7247af52014-11-19 10:51:42 -0800302 // If we are the zygote, the non moving space becomes the zygote space when we run
303 // PreZygoteFork the first time. In this case, call the map "zygote space" since we can't
304 // rename the mem map later.
305 const char* space_name = is_zygote ? kZygoteSpaceName: kNonMovingSpaceName;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700306 // Reserve the non moving mem map before the other two since it needs to be at a specific
307 // address.
308 non_moving_space_mem_map.reset(
Mathieu Chartier7247af52014-11-19 10:51:42 -0800309 MemMap::MapAnonymous(space_name, requested_alloc_space_begin,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000310 non_moving_space_capacity, PROT_READ | PROT_WRITE, true, false,
311 &error_str));
Mathieu Chartierb363f662014-07-16 13:28:58 -0700312 CHECK(non_moving_space_mem_map != nullptr) << error_str;
Mathieu Chartierc44ce2e2014-08-25 16:32:41 -0700313 // Try to reserve virtual memory at a lower address if we have a separate non moving space.
Ian Rogers13735952014-10-08 12:43:28 -0700314 request_begin = reinterpret_cast<uint8_t*>(300 * MB);
Mathieu Chartierb363f662014-07-16 13:28:58 -0700315 }
Hiroshi Yamauchi3dbf2342015-03-17 16:01:11 -0700316 // Attempt to create 2 mem maps at or after the requested begin.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800317 if (foreground_collector_type_ != kCollectorTypeCC) {
Hiroshi Yamauchi3dbf2342015-03-17 16:01:11 -0700318 if (separate_non_moving_space) {
319 main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin,
320 capacity_, &error_str));
321 } else {
322 // If no separate non-moving space, the main space must come
323 // right after the image space to avoid a gap.
324 main_mem_map_1.reset(MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity_,
325 PROT_READ | PROT_WRITE, true, false,
326 &error_str));
327 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800328 CHECK(main_mem_map_1.get() != nullptr) << error_str;
329 }
Mathieu Chartierb363f662014-07-16 13:28:58 -0700330 if (support_homogeneous_space_compaction ||
331 background_collector_type_ == kCollectorTypeSS ||
332 foreground_collector_type_ == kCollectorTypeSS) {
333 main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700334 capacity_, &error_str));
Mathieu Chartierb363f662014-07-16 13:28:58 -0700335 CHECK(main_mem_map_2.get() != nullptr) << error_str;
336 }
Richard Uhler054a0782015-04-07 10:56:50 -0700337 ATRACE_END();
338 ATRACE_BEGIN("Create spaces");
Mathieu Chartierb363f662014-07-16 13:28:58 -0700339 // Create the non moving space first so that bitmaps don't take up the address range.
340 if (separate_non_moving_space) {
Mathieu Chartier31f44142014-04-08 14:40:03 -0700341 // Non moving space is always dlmalloc since we currently don't have support for multiple
Zuo Wangf37a88b2014-07-10 04:26:41 -0700342 // active rosalloc spaces.
Mathieu Chartierb363f662014-07-16 13:28:58 -0700343 const size_t size = non_moving_space_mem_map->Size();
344 non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700345 non_moving_space_mem_map.release(), "zygote / non moving space", kDefaultStartingSize,
Mathieu Chartierb363f662014-07-16 13:28:58 -0700346 initial_size, size, size, false);
Mathieu Chartier78408882014-04-11 18:06:01 -0700347 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
Mathieu Chartierb363f662014-07-16 13:28:58 -0700348 CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
349 << requested_alloc_space_begin;
350 AddSpace(non_moving_space_);
351 }
352 // Create other spaces based on whether or not we have a moving GC.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800353 if (foreground_collector_type_ == kCollectorTypeCC) {
354 region_space_ = space::RegionSpace::Create("Region space", capacity_ * 2, request_begin);
355 AddSpace(region_space_);
Richard Uhler054a0782015-04-07 10:56:50 -0700356 } else if (IsMovingGc(foreground_collector_type_) &&
357 foreground_collector_type_ != kCollectorTypeGSS) {
Mathieu Chartierb363f662014-07-16 13:28:58 -0700358 // Create bump pointer spaces.
359 // We only to create the bump pointer if the foreground collector is a compacting GC.
360 // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
361 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
362 main_mem_map_1.release());
363 CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
364 AddSpace(bump_pointer_space_);
365 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
366 main_mem_map_2.release());
367 CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
368 AddSpace(temp_space_);
369 CHECK(separate_non_moving_space);
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700370 } else {
Mathieu Chartierb363f662014-07-16 13:28:58 -0700371 CreateMainMallocSpace(main_mem_map_1.release(), initial_size, growth_limit_, capacity_);
372 CHECK(main_space_ != nullptr);
373 AddSpace(main_space_);
374 if (!separate_non_moving_space) {
Zuo Wangf37a88b2014-07-10 04:26:41 -0700375 non_moving_space_ = main_space_;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700376 CHECK(!non_moving_space_->CanMoveObjects());
377 }
378 if (foreground_collector_type_ == kCollectorTypeGSS) {
379 CHECK_EQ(foreground_collector_type_, background_collector_type_);
380 // Create bump pointer spaces instead of a backup space.
381 main_mem_map_2.release();
382 bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space 1",
383 kGSSBumpPointerSpaceCapacity, nullptr);
384 CHECK(bump_pointer_space_ != nullptr);
385 AddSpace(bump_pointer_space_);
386 temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
387 kGSSBumpPointerSpaceCapacity, nullptr);
388 CHECK(temp_space_ != nullptr);
389 AddSpace(temp_space_);
390 } else if (main_mem_map_2.get() != nullptr) {
391 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
392 main_space_backup_.reset(CreateMallocSpaceFromMemMap(main_mem_map_2.release(), initial_size,
393 growth_limit_, capacity_, name, true));
394 CHECK(main_space_backup_.get() != nullptr);
395 // Add the space so its accounted for in the heap_begin and heap_end.
396 AddSpace(main_space_backup_.get());
Zuo Wangf37a88b2014-07-10 04:26:41 -0700397 }
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700398 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700399 CHECK(non_moving_space_ != nullptr);
Mathieu Chartierb363f662014-07-16 13:28:58 -0700400 CHECK(!non_moving_space_->CanMoveObjects());
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700401 // Allocate the large object space.
Igor Murashkinaaebaa02015-01-26 10:55:53 -0800402 if (large_object_space_type == space::LargeObjectSpaceType::kFreeList) {
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700403 large_object_space_ = space::FreeListSpace::Create("free list large object space", nullptr,
404 capacity_);
405 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
Igor Murashkinaaebaa02015-01-26 10:55:53 -0800406 } else if (large_object_space_type == space::LargeObjectSpaceType::kMap) {
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700407 large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space");
408 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700409 } else {
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700410 // Disable the large object space by making the cutoff excessively large.
411 large_object_threshold_ = std::numeric_limits<size_t>::max();
412 large_object_space_ = nullptr;
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700413 }
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700414 if (large_object_space_ != nullptr) {
415 AddSpace(large_object_space_);
416 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700417 // Compute heap capacity. Continuous spaces are sorted in order of Begin().
Mathieu Chartier590fee92013-09-13 13:46:47 -0700418 CHECK(!continuous_spaces_.empty());
419 // Relies on the spaces being sorted.
Ian Rogers13735952014-10-08 12:43:28 -0700420 uint8_t* heap_begin = continuous_spaces_.front()->Begin();
421 uint8_t* heap_end = continuous_spaces_.back()->Limit();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700422 size_t heap_capacity = heap_end - heap_begin;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700423 // Remove the main backup space since it slows down the GC to have unused extra spaces.
Mathieu Chartier0310da52014-12-01 13:40:48 -0800424 // TODO: Avoid needing to do this.
Mathieu Chartierb363f662014-07-16 13:28:58 -0700425 if (main_space_backup_.get() != nullptr) {
426 RemoveSpace(main_space_backup_.get());
427 }
Richard Uhler054a0782015-04-07 10:56:50 -0700428 ATRACE_END();
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800429 // Allocate the card table.
Richard Uhler054a0782015-04-07 10:56:50 -0700430 ATRACE_BEGIN("Create card table");
Ian Rogers1d54e732013-05-02 21:10:01 -0700431 card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700432 CHECK(card_table_.get() != nullptr) << "Failed to create card table";
Richard Uhler054a0782015-04-07 10:56:50 -0700433 ATRACE_END();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800434 if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) {
435 rb_table_.reset(new accounting::ReadBarrierTable());
436 DCHECK(rb_table_->IsAllCleared());
437 }
Mathieu Chartier4858a932015-01-23 13:18:53 -0800438 if (GetImageSpace() != nullptr) {
439 // Don't add the image mod union table if we are running without an image, this can crash if
440 // we use the CardCache implementation.
441 accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableToZygoteAllocspace(
442 "Image mod-union table", this, GetImageSpace());
443 CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
444 AddModUnionTable(mod_union_table);
445 }
Mathieu Chartier96bcd452014-06-17 09:50:02 -0700446 if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800447 accounting::RememberedSet* non_moving_space_rem_set =
448 new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
449 CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
450 AddRememberedSet(non_moving_space_rem_set);
451 }
Mathieu Chartierb363f662014-07-16 13:28:58 -0700452 // TODO: Count objects in the image space here?
Ian Rogers3e5cf302014-05-20 16:40:37 -0700453 num_bytes_allocated_.StoreRelaxed(0);
Mathieu Chartierc1790162014-05-23 10:54:50 -0700454 mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
455 kDefaultMarkStackSize));
456 const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
457 allocation_stack_.reset(accounting::ObjectStack::Create(
458 "allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
459 live_stack_.reset(accounting::ObjectStack::Create(
460 "live stack", max_allocation_stack_size_, alloc_stack_capacity));
Mathieu Chartier65db8802012-11-20 12:36:46 -0800461 // It's still too early to take a lock because there are no threads yet, but we can create locks
462 // now. We don't create it earlier to make it clear that you can't use locks during heap
463 // initialization.
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700464 gc_complete_lock_ = new Mutex("GC complete lock");
Ian Rogersc604d732012-10-14 16:09:54 -0700465 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
466 *gc_complete_lock_));
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800467 task_processor_.reset(new TaskProcessor());
Mathieu Chartier3cf22532015-07-09 15:15:09 -0700468 reference_processor_.reset(new ReferenceProcessor());
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800469 pending_task_lock_ = new Mutex("Pending task lock");
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700470 if (ignore_max_footprint_) {
471 SetIdealFootprint(std::numeric_limits<size_t>::max());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700472 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700473 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700474 CHECK_NE(max_allowed_footprint_, 0U);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800475 // Create our garbage collectors.
Mathieu Chartier50482232013-11-21 11:48:14 -0800476 for (size_t i = 0; i < 2; ++i) {
477 const bool concurrent = i != 0;
Mathieu Chartierdfe30832015-03-06 15:28:34 -0800478 if ((MayUseCollector(kCollectorTypeCMS) && concurrent) ||
479 (MayUseCollector(kCollectorTypeMS) && !concurrent)) {
480 garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
481 garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
482 garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
483 }
Mathieu Chartier50482232013-11-21 11:48:14 -0800484 }
Mathieu Chartier50482232013-11-21 11:48:14 -0800485 if (kMovingCollector) {
Mathieu Chartierdfe30832015-03-06 15:28:34 -0800486 if (MayUseCollector(kCollectorTypeSS) || MayUseCollector(kCollectorTypeGSS) ||
487 MayUseCollector(kCollectorTypeHomogeneousSpaceCompact) ||
488 use_homogeneous_space_compaction_for_oom_) {
489 // TODO: Clean this up.
490 const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
491 semi_space_collector_ = new collector::SemiSpace(this, generational,
492 generational ? "generational" : "");
493 garbage_collectors_.push_back(semi_space_collector_);
494 }
495 if (MayUseCollector(kCollectorTypeCC)) {
496 concurrent_copying_collector_ = new collector::ConcurrentCopying(this);
497 garbage_collectors_.push_back(concurrent_copying_collector_);
498 }
499 if (MayUseCollector(kCollectorTypeMC)) {
500 mark_compact_collector_ = new collector::MarkCompact(this);
501 garbage_collectors_.push_back(mark_compact_collector_);
502 }
Mathieu Chartier0325e622012-09-05 14:22:51 -0700503 }
Andreas Gampee1cb2982014-08-27 11:01:09 -0700504 if (GetImageSpace() != nullptr && non_moving_space_ != nullptr &&
505 (is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
Mathieu Chartierb363f662014-07-16 13:28:58 -0700506 // Check that there's no gap between the image space and the non moving space so that the
Andreas Gampee1cb2982014-08-27 11:01:09 -0700507 // immune region won't break (eg. due to a large object allocated in the gap). This is only
508 // required when we're the zygote or using GSS.
Mathieu Chartierb363f662014-07-16 13:28:58 -0700509 bool no_gap = MemMap::CheckNoGaps(GetImageSpace()->GetMemMap(),
510 non_moving_space_->GetMemMap());
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700511 if (!no_gap) {
David Srbecky5dedb802015-06-17 00:08:02 +0100512 PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
Vladimir Marko17a924a2015-05-08 15:17:32 +0100513 MemMap::DumpMaps(LOG(ERROR), true);
Mathieu Chartierc7853442015-03-27 14:35:38 -0700514 LOG(FATAL) << "There's a gap between the image space and the non-moving space";
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700515 }
516 }
Mathieu Chartier31000802015-06-14 14:14:37 -0700517 instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
518 if (gc_stress_mode_) {
519 backtrace_lock_ = new Mutex("GC complete lock");
520 }
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700521 if (is_running_on_memory_tool_ || gc_stress_mode_) {
Mathieu Chartier31000802015-06-14 14:14:37 -0700522 instrumentation->InstrumentQuickAllocEntryPoints();
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700523 }
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800524 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800525 LOG(INFO) << "Heap() exiting";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700526 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700527}
528
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700529MemMap* Heap::MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
530 size_t capacity, std::string* out_error_str) {
Mathieu Chartierb363f662014-07-16 13:28:58 -0700531 while (true) {
Kyungmin Leeef32b8f2014-10-23 09:32:05 +0900532 MemMap* map = MemMap::MapAnonymous(name, request_begin, capacity,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000533 PROT_READ | PROT_WRITE, true, false, out_error_str);
Mathieu Chartierb363f662014-07-16 13:28:58 -0700534 if (map != nullptr || request_begin == nullptr) {
535 return map;
536 }
537 // Retry a second time with no specified request begin.
538 request_begin = nullptr;
539 }
Mathieu Chartierb363f662014-07-16 13:28:58 -0700540}
541
Mathieu Chartierdfe30832015-03-06 15:28:34 -0800542bool Heap::MayUseCollector(CollectorType type) const {
543 return foreground_collector_type_ == type || background_collector_type_ == type;
544}
545
Zuo Wangf37a88b2014-07-10 04:26:41 -0700546space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size,
547 size_t growth_limit, size_t capacity,
548 const char* name, bool can_move_objects) {
549 space::MallocSpace* malloc_space = nullptr;
550 if (kUseRosAlloc) {
551 // Create rosalloc space.
552 malloc_space = space::RosAllocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
553 initial_size, growth_limit, capacity,
554 low_memory_mode_, can_move_objects);
555 } else {
556 malloc_space = space::DlMallocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
557 initial_size, growth_limit, capacity,
558 can_move_objects);
559 }
560 if (collector::SemiSpace::kUseRememberedSet) {
561 accounting::RememberedSet* rem_set =
562 new accounting::RememberedSet(std::string(name) + " remembered set", this, malloc_space);
563 CHECK(rem_set != nullptr) << "Failed to create main space remembered set";
564 AddRememberedSet(rem_set);
565 }
566 CHECK(malloc_space != nullptr) << "Failed to create " << name;
567 malloc_space->SetFootprintLimit(malloc_space->Capacity());
568 return malloc_space;
569}
570
Mathieu Chartier31f44142014-04-08 14:40:03 -0700571void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
572 size_t capacity) {
573 // Is background compaction is enabled?
574 bool can_move_objects = IsMovingGc(background_collector_type_) !=
Zuo Wangf37a88b2014-07-10 04:26:41 -0700575 IsMovingGc(foreground_collector_type_) || use_homogeneous_space_compaction_for_oom_;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700576 // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
577 // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
578 // from the main space to the zygote space. If background compaction is enabled, always pass in
579 // that we can move objets.
580 if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
581 // After the zygote we want this to be false if we don't have background compaction enabled so
582 // that getting primitive array elements is faster.
Mathieu Chartierb363f662014-07-16 13:28:58 -0700583 // We never have homogeneous compaction with GSS and don't need a space with movable objects.
Mathieu Chartiere4cab172014-08-19 18:24:04 -0700584 can_move_objects = !HasZygoteSpace() && foreground_collector_type_ != kCollectorTypeGSS;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700585 }
Mathieu Chartier96bcd452014-06-17 09:50:02 -0700586 if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
587 RemoveRememberedSet(main_space_);
588 }
Zuo Wangf37a88b2014-07-10 04:26:41 -0700589 const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
590 main_space_ = CreateMallocSpaceFromMemMap(mem_map, initial_size, growth_limit, capacity, name,
591 can_move_objects);
592 SetSpaceAsDefault(main_space_);
Mathieu Chartier31f44142014-04-08 14:40:03 -0700593 VLOG(heap) << "Created main space " << main_space_;
594}
595
Mathieu Chartier50482232013-11-21 11:48:14 -0800596void Heap::ChangeAllocator(AllocatorType allocator) {
Mathieu Chartier50482232013-11-21 11:48:14 -0800597 if (current_allocator_ != allocator) {
Mathieu Chartierd8891782014-03-02 13:28:37 -0800598 // These two allocators are only used internally and don't have any entrypoints.
599 CHECK_NE(allocator, kAllocatorTypeLOS);
600 CHECK_NE(allocator, kAllocatorTypeNonMoving);
Mathieu Chartier50482232013-11-21 11:48:14 -0800601 current_allocator_ = allocator;
Mathieu Chartierd8891782014-03-02 13:28:37 -0800602 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
Mathieu Chartier50482232013-11-21 11:48:14 -0800603 SetQuickAllocEntryPointsAllocator(current_allocator_);
604 Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
605 }
606}
607
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700608void Heap::DisableMovingGc() {
Mathieu Chartier31f44142014-04-08 14:40:03 -0700609 if (IsMovingGc(foreground_collector_type_)) {
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700610 foreground_collector_type_ = kCollectorTypeCMS;
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800611 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700612 if (IsMovingGc(background_collector_type_)) {
613 background_collector_type_ = foreground_collector_type_;
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800614 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700615 TransitionCollector(foreground_collector_type_);
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700616 ThreadList* tl = Runtime::Current()->GetThreadList();
617 Thread* self = Thread::Current();
618 ScopedThreadStateChange tsc(self, kSuspended);
Mathieu Chartierbf9fc582015-03-13 17:21:25 -0700619 tl->SuspendAll(__FUNCTION__);
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700620 // Something may have caused the transition to fail.
Mathieu Chartiere4927f62014-08-23 13:56:03 -0700621 if (!IsMovingGc(collector_type_) && non_moving_space_ != main_space_) {
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700622 CHECK(main_space_ != nullptr);
623 // The allocation stack may have non movable objects in it. We need to flush it since the GC
624 // can't only handle marking allocation stack objects of one non moving space and one main
625 // space.
626 {
627 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
628 FlushAllocStack();
629 }
630 main_space_->DisableMovingObjects();
631 non_moving_space_ = main_space_;
632 CHECK(!non_moving_space_->CanMoveObjects());
633 }
634 tl->ResumeAll();
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800635}
636
Mathieu Chartier15d34022014-02-26 17:16:38 -0800637std::string Heap::SafeGetClassDescriptor(mirror::Class* klass) {
638 if (!IsValidContinuousSpaceObjectAddress(klass)) {
639 return StringPrintf("<non heap address klass %p>", klass);
640 }
641 mirror::Class* component_type = klass->GetComponentType<kVerifyNone>();
642 if (IsValidContinuousSpaceObjectAddress(component_type) && klass->IsArrayClass<kVerifyNone>()) {
643 std::string result("[");
644 result += SafeGetClassDescriptor(component_type);
645 return result;
646 } else if (UNLIKELY(klass->IsPrimitive<kVerifyNone>())) {
647 return Primitive::Descriptor(klass->GetPrimitiveType<kVerifyNone>());
Mathieu Chartierc2f4d022014-03-03 16:11:42 -0800648 } else if (UNLIKELY(klass->IsProxyClass<kVerifyNone>())) {
Mathieu Chartier15d34022014-02-26 17:16:38 -0800649 return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(klass);
650 } else {
Mathieu Chartierc2f4d022014-03-03 16:11:42 -0800651 mirror::DexCache* dex_cache = klass->GetDexCache<kVerifyNone>();
Mathieu Chartier15d34022014-02-26 17:16:38 -0800652 if (!IsValidContinuousSpaceObjectAddress(dex_cache)) {
653 return StringPrintf("<non heap address dex_cache %p>", dex_cache);
654 }
655 const DexFile* dex_file = dex_cache->GetDexFile();
656 uint16_t class_def_idx = klass->GetDexClassDefIndex();
657 if (class_def_idx == DexFile::kDexNoIndex16) {
658 return "<class def not found>";
659 }
660 const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
661 const DexFile::TypeId& type_id = dex_file->GetTypeId(class_def.class_idx_);
662 return dex_file->GetTypeDescriptor(type_id);
663 }
664}
665
666std::string Heap::SafePrettyTypeOf(mirror::Object* obj) {
667 if (obj == nullptr) {
668 return "null";
669 }
670 mirror::Class* klass = obj->GetClass<kVerifyNone>();
671 if (klass == nullptr) {
672 return "(class=null)";
673 }
674 std::string result(SafeGetClassDescriptor(klass));
675 if (obj->IsClass()) {
Mathieu Chartierc2f4d022014-03-03 16:11:42 -0800676 result += "<" + SafeGetClassDescriptor(obj->AsClass<kVerifyNone>()) + ">";
Mathieu Chartier15d34022014-02-26 17:16:38 -0800677 }
678 return result;
679}
680
681void Heap::DumpObject(std::ostream& stream, mirror::Object* obj) {
682 if (obj == nullptr) {
683 stream << "(obj=null)";
684 return;
685 }
686 if (IsAligned<kObjectAlignment>(obj)) {
687 space::Space* space = nullptr;
688 // Don't use find space since it only finds spaces which actually contain objects instead of
689 // spaces which may contain objects (e.g. cleared bump pointer spaces).
690 for (const auto& cur_space : continuous_spaces_) {
691 if (cur_space->HasAddress(obj)) {
692 space = cur_space;
693 break;
694 }
695 }
Mathieu Chartier15d34022014-02-26 17:16:38 -0800696 // Unprotect all the spaces.
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800697 for (const auto& con_space : continuous_spaces_) {
698 mprotect(con_space->Begin(), con_space->Capacity(), PROT_READ | PROT_WRITE);
Mathieu Chartier15d34022014-02-26 17:16:38 -0800699 }
700 stream << "Object " << obj;
701 if (space != nullptr) {
702 stream << " in space " << *space;
703 }
Mathieu Chartierc2f4d022014-03-03 16:11:42 -0800704 mirror::Class* klass = obj->GetClass<kVerifyNone>();
Mathieu Chartier15d34022014-02-26 17:16:38 -0800705 stream << "\nclass=" << klass;
706 if (klass != nullptr) {
707 stream << " type= " << SafePrettyTypeOf(obj);
708 }
709 // Re-protect the address we faulted on.
710 mprotect(AlignDown(obj, kPageSize), kPageSize, PROT_NONE);
711 }
712}
713
Mathieu Chartier590fee92013-09-13 13:46:47 -0700714bool Heap::IsCompilingBoot() const {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800715 if (!Runtime::Current()->IsAotCompiler()) {
Alex Light64ad14d2014-08-19 14:23:13 -0700716 return false;
717 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700718 for (const auto& space : continuous_spaces_) {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800719 if (space->IsImageSpace() || space->IsZygoteSpace()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700720 return false;
721 }
722 }
723 return true;
724}
725
726bool Heap::HasImageSpace() const {
727 for (const auto& space : continuous_spaces_) {
728 if (space->IsImageSpace()) {
729 return true;
730 }
731 }
732 return false;
733}
734
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800735void Heap::IncrementDisableMovingGC(Thread* self) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700736 // Need to do this holding the lock to prevent races where the GC is about to run / running when
737 // we attempt to disable it.
Mathieu Chartiercaa82d62014-02-02 16:51:17 -0800738 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700739 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800740 ++disable_moving_gc_count_;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700741 if (IsMovingGc(collector_type_running_)) {
Mathieu Chartier89a201e2014-05-02 10:27:26 -0700742 WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800743 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700744}
745
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800746void Heap::DecrementDisableMovingGC(Thread* self) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700747 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartierb735bd92015-06-24 17:04:17 -0700748 CHECK_GT(disable_moving_gc_count_, 0U);
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800749 --disable_moving_gc_count_;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700750}
751
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800752void Heap::UpdateProcessState(ProcessState process_state) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800753 if (process_state_ != process_state) {
754 process_state_ = process_state;
Mathieu Chartier91e30632014-03-25 15:58:50 -0700755 for (size_t i = 1; i <= kCollectorTransitionStressIterations; ++i) {
756 // Start at index 1 to avoid "is always false" warning.
757 // Have iteration 1 always transition the collector.
758 TransitionCollector((((i & 1) == 1) == (process_state_ == kProcessStateJankPerceptible))
Mathieu Chartier31f44142014-04-08 14:40:03 -0700759 ? foreground_collector_type_ : background_collector_type_);
Mathieu Chartier91e30632014-03-25 15:58:50 -0700760 usleep(kCollectorTransitionStressWait);
761 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800762 if (process_state_ == kProcessStateJankPerceptible) {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800763 // Transition back to foreground right away to prevent jank.
Mathieu Chartier31f44142014-04-08 14:40:03 -0700764 RequestCollectorTransition(foreground_collector_type_, 0);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800765 } else {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800766 // Don't delay for debug builds since we may want to stress test the GC.
Zuo Wangf37a88b2014-07-10 04:26:41 -0700767 // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
768 // special handling which does a homogenous space compaction once but then doesn't transition
769 // the collector.
770 RequestCollectorTransition(background_collector_type_,
771 kIsDebugBuild ? 0 : kCollectorTransitionWait);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800772 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800773 }
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800774}
775
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700776void Heap::CreateThreadPool() {
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700777 const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
778 if (num_threads != 0) {
Mathieu Chartierbcd5e9d2013-11-13 14:33:28 -0800779 thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700780 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700781}
782
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800783// Visit objects when threads aren't suspended. If concurrent moving
784// GC, disable moving GC and suspend threads and then visit objects.
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800785void Heap::VisitObjects(ObjectCallback callback, void* arg) {
Hiroshi Yamauchi0c8c3032015-01-16 16:54:35 -0800786 Thread* self = Thread::Current();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800787 Locks::mutator_lock_->AssertSharedHeld(self);
788 DCHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)) << "Call VisitObjectsPaused() instead";
789 if (IsGcConcurrentAndMoving()) {
790 // Concurrent moving GC. Just suspending threads isn't sufficient
791 // because a collection isn't one big pause and we could suspend
792 // threads in the middle (between phases) of a concurrent moving
793 // collection where it's not easily known which objects are alive
794 // (both the region space and the non-moving space) or which
795 // copies of objects to visit, and the to-space invariant could be
796 // easily broken. Visit objects while GC isn't running by using
797 // IncrementDisableMovingGC() and threads are suspended.
798 IncrementDisableMovingGC(self);
Hiroshi Yamauchi0c8c3032015-01-16 16:54:35 -0800799 self->TransitionFromRunnableToSuspended(kWaitingForVisitObjects);
800 ThreadList* tl = Runtime::Current()->GetThreadList();
Mathieu Chartierbf9fc582015-03-13 17:21:25 -0700801 tl->SuspendAll(__FUNCTION__);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800802 VisitObjectsInternalRegionSpace(callback, arg);
Hiroshi Yamauchi0c8c3032015-01-16 16:54:35 -0800803 VisitObjectsInternal(callback, arg);
804 tl->ResumeAll();
805 self->TransitionFromSuspendedToRunnable();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800806 DecrementDisableMovingGC(self);
Hiroshi Yamauchi0c8c3032015-01-16 16:54:35 -0800807 } else {
808 // GCs can move objects, so don't allow this.
809 ScopedAssertNoThreadSuspension ants(self, "Visiting objects");
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800810 DCHECK(region_space_ == nullptr);
Hiroshi Yamauchi0c8c3032015-01-16 16:54:35 -0800811 VisitObjectsInternal(callback, arg);
812 }
813}
814
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800815// Visit objects when threads are already suspended.
816void Heap::VisitObjectsPaused(ObjectCallback callback, void* arg) {
817 Thread* self = Thread::Current();
818 Locks::mutator_lock_->AssertExclusiveHeld(self);
819 VisitObjectsInternalRegionSpace(callback, arg);
820 VisitObjectsInternal(callback, arg);
821}
822
823// Visit objects in the region spaces.
824void Heap::VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg) {
825 Thread* self = Thread::Current();
826 Locks::mutator_lock_->AssertExclusiveHeld(self);
827 if (region_space_ != nullptr) {
828 DCHECK(IsGcConcurrentAndMoving());
829 if (!zygote_creation_lock_.IsExclusiveHeld(self)) {
830 // Exclude the pre-zygote fork time where the semi-space collector
831 // calls VerifyHeapReferences() as part of the zygote compaction
832 // which then would call here without the moving GC disabled,
833 // which is fine.
834 DCHECK(IsMovingGCDisabled(self));
835 }
836 region_space_->Walk(callback, arg);
837 }
838}
839
840// Visit objects in the other spaces.
Hiroshi Yamauchi0c8c3032015-01-16 16:54:35 -0800841void Heap::VisitObjectsInternal(ObjectCallback callback, void* arg) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700842 if (bump_pointer_space_ != nullptr) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800843 // Visit objects in bump pointer space.
844 bump_pointer_space_->Walk(callback, arg);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700845 }
846 // TODO: Switch to standard begin and end to use ranged a based loop.
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800847 for (auto* it = allocation_stack_->Begin(), *end = allocation_stack_->End(); it < end; ++it) {
848 mirror::Object* const obj = it->AsMirrorPtr();
Mathieu Chartierebdf3f32014-02-13 10:23:27 -0800849 if (obj != nullptr && obj->GetClass() != nullptr) {
850 // Avoid the race condition caused by the object not yet being written into the allocation
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800851 // stack or the class not yet being written in the object. Or, if
852 // kUseThreadLocalAllocationStack, there can be nulls on the allocation stack.
Mathieu Chartierebdf3f32014-02-13 10:23:27 -0800853 callback(obj, arg);
854 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700855 }
Hiroshi Yamauchi0c8c3032015-01-16 16:54:35 -0800856 {
857 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
858 GetLiveBitmap()->Walk(callback, arg);
859 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700860}
861
862void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
Mathieu Chartier00b59152014-07-25 10:13:51 -0700863 space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
864 space::ContinuousSpace* space2 = non_moving_space_;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800865 // TODO: Generalize this to n bitmaps?
Mathieu Chartier00b59152014-07-25 10:13:51 -0700866 CHECK(space1 != nullptr);
867 CHECK(space2 != nullptr);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800868 MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700869 (large_object_space_ != nullptr ? large_object_space_->GetLiveBitmap() : nullptr),
870 stack);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700871}
872
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700873void Heap::DeleteThreadPool() {
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700874 thread_pool_.reset(nullptr);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700875}
876
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -0700877void Heap::AddSpace(space::Space* space) {
Zuo Wangf37a88b2014-07-10 04:26:41 -0700878 CHECK(space != nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700879 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
880 if (space->IsContinuousSpace()) {
881 DCHECK(!space->IsDiscontinuousSpace());
882 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
883 // Continuous spaces don't necessarily have bitmaps.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700884 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
885 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700886 if (live_bitmap != nullptr) {
Mathieu Chartier2796a162014-07-25 11:50:47 -0700887 CHECK(mark_bitmap != nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700888 live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
889 mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700890 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700891 continuous_spaces_.push_back(continuous_space);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700892 // Ensure that spaces remain sorted in increasing order of start address.
893 std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
894 [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
895 return a->Begin() < b->Begin();
896 });
Mathieu Chartier590fee92013-09-13 13:46:47 -0700897 } else {
Mathieu Chartier2796a162014-07-25 11:50:47 -0700898 CHECK(space->IsDiscontinuousSpace());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700899 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700900 live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
901 mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700902 discontinuous_spaces_.push_back(discontinuous_space);
903 }
904 if (space->IsAllocSpace()) {
905 alloc_spaces_.push_back(space->AsAllocSpace());
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700906 }
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800907}
908
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -0700909void Heap::SetSpaceAsDefault(space::ContinuousSpace* continuous_space) {
910 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
911 if (continuous_space->IsDlMallocSpace()) {
912 dlmalloc_space_ = continuous_space->AsDlMallocSpace();
913 } else if (continuous_space->IsRosAllocSpace()) {
914 rosalloc_space_ = continuous_space->AsRosAllocSpace();
915 }
916}
917
918void Heap::RemoveSpace(space::Space* space) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800919 DCHECK(space != nullptr);
920 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
921 if (space->IsContinuousSpace()) {
922 DCHECK(!space->IsDiscontinuousSpace());
923 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
924 // Continuous spaces don't necessarily have bitmaps.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700925 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
926 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800927 if (live_bitmap != nullptr) {
928 DCHECK(mark_bitmap != nullptr);
929 live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
930 mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
931 }
932 auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
933 DCHECK(it != continuous_spaces_.end());
934 continuous_spaces_.erase(it);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800935 } else {
936 DCHECK(space->IsDiscontinuousSpace());
937 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700938 live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
939 mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800940 auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
941 discontinuous_space);
942 DCHECK(it != discontinuous_spaces_.end());
943 discontinuous_spaces_.erase(it);
944 }
945 if (space->IsAllocSpace()) {
946 auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
947 DCHECK(it != alloc_spaces_.end());
948 alloc_spaces_.erase(it);
949 }
950}
951
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700952void Heap::DumpGcPerformanceInfo(std::ostream& os) {
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700953 // Dump cumulative timings.
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700954 os << "Dumping cumulative Gc timings\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700955 uint64_t total_duration = 0;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800956 // Dump cumulative loggers for each GC type.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800957 uint64_t total_paused_time = 0;
Mathieu Chartier5a487192014-04-08 11:14:54 -0700958 for (auto& collector : garbage_collectors_) {
Mathieu Chartier104fa0c2014-08-07 14:26:27 -0700959 total_duration += collector->GetCumulativeTimings().GetTotalNs();
960 total_paused_time += collector->GetTotalPausedTimeNs();
961 collector->DumpPerformanceInfo(os);
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700962 }
Ian Rogers3e5cf302014-05-20 16:40:37 -0700963 uint64_t allocation_time =
964 static_cast<uint64_t>(total_allocation_time_.LoadRelaxed()) * kTimeAdjust;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700965 if (total_duration != 0) {
Brian Carlstrom2d888622013-07-18 17:02:00 -0700966 const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700967 os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
968 os << "Mean GC size throughput: "
Ian Rogers1d54e732013-05-02 21:10:01 -0700969 << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700970 os << "Mean GC object throughput: "
Ian Rogers1d54e732013-05-02 21:10:01 -0700971 << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700972 }
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700973 uint64_t total_objects_allocated = GetObjectsAllocatedEver();
Mathieu Chartierc30a7252014-08-12 10:13:48 -0700974 os << "Total number of allocations " << total_objects_allocated << "\n";
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -0700975 os << "Total bytes allocated " << PrettySize(GetBytesAllocatedEver()) << "\n";
976 os << "Total bytes freed " << PrettySize(GetBytesFreedEver()) << "\n";
Mathieu Chartierc30a7252014-08-12 10:13:48 -0700977 os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700978 os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
979 os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
Mathieu Chartierc30a7252014-08-12 10:13:48 -0700980 os << "Total memory " << PrettySize(GetTotalMemory()) << "\n";
981 os << "Max memory " << PrettySize(GetMaxMemory()) << "\n";
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700982 if (kMeasureAllocationTime) {
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700983 os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n";
984 os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated)
985 << "\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700986 }
Mathieu Chartiere4cab172014-08-19 18:24:04 -0700987 if (HasZygoteSpace()) {
988 os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
989 }
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700990 os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -0700991 os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
992 os << "Total GC count: " << GetGcCount() << "\n";
993 os << "Total GC time: " << PrettyDuration(GetGcTime()) << "\n";
994 os << "Total blocking GC count: " << GetBlockingGcCount() << "\n";
995 os << "Total blocking GC time: " << PrettyDuration(GetBlockingGcTime()) << "\n";
996
997 {
998 MutexLock mu(Thread::Current(), *gc_complete_lock_);
999 if (gc_count_rate_histogram_.SampleSize() > 0U) {
1000 os << "Histogram of GC count per " << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1001 gc_count_rate_histogram_.DumpBins(os);
1002 os << "\n";
1003 }
1004 if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1005 os << "Histogram of blocking GC count per "
1006 << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1007 blocking_gc_count_rate_histogram_.DumpBins(os);
1008 os << "\n";
1009 }
1010 }
1011
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001012 BaseMutex::DumpAll(os);
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001013}
1014
Hiroshi Yamauchi37670172015-06-10 17:20:54 -07001015void Heap::ResetGcPerformanceInfo() {
1016 for (auto& collector : garbage_collectors_) {
1017 collector->ResetMeasurements();
1018 }
1019 total_allocation_time_.StoreRelaxed(0);
1020 total_bytes_freed_ever_ = 0;
1021 total_objects_freed_ever_ = 0;
1022 total_wait_time_ = 0;
1023 blocking_gc_count_ = 0;
1024 blocking_gc_time_ = 0;
1025 gc_count_last_window_ = 0;
1026 blocking_gc_count_last_window_ = 0;
1027 last_update_time_gc_count_rate_histograms_ = // Round down by the window duration.
1028 (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
1029 {
1030 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1031 gc_count_rate_histogram_.Reset();
1032 blocking_gc_count_rate_histogram_.Reset();
1033 }
1034}
1035
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07001036uint64_t Heap::GetGcCount() const {
1037 uint64_t gc_count = 0U;
1038 for (auto& collector : garbage_collectors_) {
1039 gc_count += collector->GetCumulativeTimings().GetIterations();
1040 }
1041 return gc_count;
1042}
1043
1044uint64_t Heap::GetGcTime() const {
1045 uint64_t gc_time = 0U;
1046 for (auto& collector : garbage_collectors_) {
1047 gc_time += collector->GetCumulativeTimings().GetTotalNs();
1048 }
1049 return gc_time;
1050}
1051
1052uint64_t Heap::GetBlockingGcCount() const {
1053 return blocking_gc_count_;
1054}
1055
1056uint64_t Heap::GetBlockingGcTime() const {
1057 return blocking_gc_time_;
1058}
1059
1060void Heap::DumpGcCountRateHistogram(std::ostream& os) const {
1061 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1062 if (gc_count_rate_histogram_.SampleSize() > 0U) {
1063 gc_count_rate_histogram_.DumpBins(os);
1064 }
1065}
1066
1067void Heap::DumpBlockingGcCountRateHistogram(std::ostream& os) const {
1068 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1069 if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1070 blocking_gc_count_rate_histogram_.DumpBins(os);
1071 }
1072}
1073
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001074Heap::~Heap() {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001075 VLOG(heap) << "Starting ~Heap()";
Mathieu Chartier590fee92013-09-13 13:46:47 -07001076 STLDeleteElements(&garbage_collectors_);
1077 // If we don't reset then the mark stack complains in its destructor.
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001078 allocation_stack_->Reset();
Man Cao8c2ff642015-05-27 17:25:30 -07001079 allocation_records_.reset();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001080 live_stack_->Reset();
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001081 STLDeleteValues(&mod_union_tables_);
Mathieu Chartier0767c9a2014-03-26 12:53:19 -07001082 STLDeleteValues(&remembered_sets_);
Ian Rogers1d54e732013-05-02 21:10:01 -07001083 STLDeleteElements(&continuous_spaces_);
1084 STLDeleteElements(&discontinuous_spaces_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001085 delete gc_complete_lock_;
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001086 delete pending_task_lock_;
Mathieu Chartier31000802015-06-14 14:14:37 -07001087 delete backtrace_lock_;
1088 if (unique_backtrace_count_.LoadRelaxed() != 0 || seen_backtrace_count_.LoadRelaxed() != 0) {
1089 LOG(INFO) << "gc stress unique=" << unique_backtrace_count_.LoadRelaxed()
1090 << " total=" << seen_backtrace_count_.LoadRelaxed() +
1091 unique_backtrace_count_.LoadRelaxed();
1092 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001093 VLOG(heap) << "Finished ~Heap()";
Carl Shapiro69759ea2011-07-21 18:13:35 -07001094}
1095
Ian Rogers1d54e732013-05-02 21:10:01 -07001096space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
1097 bool fail_ok) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001098 for (const auto& space : continuous_spaces_) {
1099 if (space->Contains(obj)) {
1100 return space;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001101 }
1102 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001103 if (!fail_ok) {
1104 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
1105 }
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001106 return nullptr;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001107}
1108
Ian Rogers1d54e732013-05-02 21:10:01 -07001109space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
1110 bool fail_ok) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001111 for (const auto& space : discontinuous_spaces_) {
1112 if (space->Contains(obj)) {
1113 return space;
Ian Rogers1d54e732013-05-02 21:10:01 -07001114 }
1115 }
1116 if (!fail_ok) {
1117 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
1118 }
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001119 return nullptr;
Ian Rogers1d54e732013-05-02 21:10:01 -07001120}
1121
1122space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
1123 space::Space* result = FindContinuousSpaceFromObject(obj, true);
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001124 if (result != nullptr) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001125 return result;
1126 }
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001127 return FindDiscontinuousSpaceFromObject(obj, fail_ok);
Ian Rogers1d54e732013-05-02 21:10:01 -07001128}
1129
1130space::ImageSpace* Heap::GetImageSpace() const {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001131 for (const auto& space : continuous_spaces_) {
1132 if (space->IsImageSpace()) {
1133 return space->AsImageSpace();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001134 }
1135 }
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001136 return nullptr;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001137}
1138
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07001139void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -07001140 std::ostringstream oss;
Ian Rogersef7d42f2014-01-06 12:55:46 -08001141 size_t total_bytes_free = GetFreeMemory();
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -07001142 oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07001143 << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM";
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -07001144 // If the allocation failed due to fragmentation, print out the largest continuous allocation.
Zuo Wangf37a88b2014-07-10 04:26:41 -07001145 if (total_bytes_free >= byte_count) {
Mathieu Chartierb363f662014-07-16 13:28:58 -07001146 space::AllocSpace* space = nullptr;
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07001147 if (allocator_type == kAllocatorTypeNonMoving) {
1148 space = non_moving_space_;
1149 } else if (allocator_type == kAllocatorTypeRosAlloc ||
1150 allocator_type == kAllocatorTypeDlMalloc) {
1151 space = main_space_;
Mathieu Chartierb363f662014-07-16 13:28:58 -07001152 } else if (allocator_type == kAllocatorTypeBumpPointer ||
1153 allocator_type == kAllocatorTypeTLAB) {
1154 space = bump_pointer_space_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001155 } else if (allocator_type == kAllocatorTypeRegion ||
1156 allocator_type == kAllocatorTypeRegionTLAB) {
1157 space = region_space_;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -07001158 }
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07001159 if (space != nullptr) {
1160 space->LogFragmentationAllocFailure(oss, byte_count);
1161 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001162 }
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -07001163 self->ThrowOutOfMemoryError(oss.str().c_str());
1164}
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001165
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001166void Heap::DoPendingCollectorTransition() {
1167 CollectorType desired_collector_type = desired_collector_type_;
Mathieu Chartierb2728552014-09-08 20:08:41 +00001168 // Launch homogeneous space compaction if it is desired.
1169 if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
1170 if (!CareAboutPauseTimes()) {
1171 PerformHomogeneousSpaceCompact();
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001172 } else {
1173 VLOG(gc) << "Homogeneous compaction ignored due to jank perceptible process state";
Mathieu Chartierb2728552014-09-08 20:08:41 +00001174 }
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001175 } else {
1176 TransitionCollector(desired_collector_type);
Mathieu Chartierb2728552014-09-08 20:08:41 +00001177 }
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001178}
1179
1180void Heap::Trim(Thread* self) {
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07001181 if (!CareAboutPauseTimes()) {
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001182 ATRACE_BEGIN("Deflating monitors");
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07001183 // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
1184 // about pauses.
1185 Runtime* runtime = Runtime::Current();
Mathieu Chartierbf9fc582015-03-13 17:21:25 -07001186 runtime->GetThreadList()->SuspendAll(__FUNCTION__);
Mathieu Chartier48ab6872014-06-24 11:21:59 -07001187 uint64_t start_time = NanoTime();
1188 size_t count = runtime->GetMonitorList()->DeflateMonitors();
1189 VLOG(heap) << "Deflating " << count << " monitors took "
1190 << PrettyDuration(NanoTime() - start_time);
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07001191 runtime->GetThreadList()->ResumeAll();
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001192 ATRACE_END();
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07001193 }
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001194 TrimIndirectReferenceTables(self);
1195 TrimSpaces(self);
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08001196}
1197
Mathieu Chartier91c2f0c2014-11-26 11:21:15 -08001198class TrimIndirectReferenceTableClosure : public Closure {
1199 public:
1200 explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
1201 }
1202 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1203 ATRACE_BEGIN("Trimming reference table");
1204 thread->GetJniEnv()->locals.Trim();
1205 ATRACE_END();
Lei Lidd9943d2015-02-02 14:24:44 +08001206 // If thread is a running mutator, then act on behalf of the trim thread.
1207 // See the code in ThreadList::RunCheckpoint.
1208 if (thread->GetState() == kRunnable) {
1209 barrier_->Pass(Thread::Current());
1210 }
Mathieu Chartier91c2f0c2014-11-26 11:21:15 -08001211 }
1212
1213 private:
1214 Barrier* const barrier_;
1215};
1216
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001217void Heap::TrimIndirectReferenceTables(Thread* self) {
1218 ScopedObjectAccess soa(self);
1219 ATRACE_BEGIN(__FUNCTION__);
1220 JavaVMExt* vm = soa.Vm();
1221 // Trim globals indirect reference table.
1222 vm->TrimGlobals();
1223 // Trim locals indirect reference tables.
1224 Barrier barrier(0);
1225 TrimIndirectReferenceTableClosure closure(&barrier);
1226 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1227 size_t barrier_count = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
Lei Lidd9943d2015-02-02 14:24:44 +08001228 if (barrier_count != 0) {
1229 barrier.Increment(self, barrier_count);
1230 }
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001231 ATRACE_END();
1232}
Mathieu Chartier91c2f0c2014-11-26 11:21:15 -08001233
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001234void Heap::TrimSpaces(Thread* self) {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08001235 {
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08001236 // Need to do this before acquiring the locks since we don't want to get suspended while
1237 // holding any locks.
1238 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001239 // Pretend we are doing a GC to prevent background compaction from deleting the space we are
1240 // trimming.
1241 MutexLock mu(self, *gc_complete_lock_);
1242 // Ensure there is only one GC at a time.
Mathieu Chartier89a201e2014-05-02 10:27:26 -07001243 WaitForGcToCompleteLocked(kGcCauseTrim, self);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001244 collector_type_running_ = kCollectorTypeHeapTrim;
1245 }
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001246 ATRACE_BEGIN(__FUNCTION__);
1247 const uint64_t start_ns = NanoTime();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001248 // Trim the managed spaces.
1249 uint64_t total_alloc_space_allocated = 0;
1250 uint64_t total_alloc_space_size = 0;
1251 uint64_t managed_reclaimed = 0;
1252 for (const auto& space : continuous_spaces_) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001253 if (space->IsMallocSpace()) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001254 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
1255 if (malloc_space->IsRosAllocSpace() || !CareAboutPauseTimes()) {
1256 // Don't trim dlmalloc spaces if we care about pauses since this can hold the space lock
1257 // for a long period of time.
1258 managed_reclaimed += malloc_space->Trim();
1259 }
1260 total_alloc_space_size += malloc_space->Size();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001261 }
1262 }
Mathieu Chartier2dbe6272014-09-16 10:43:23 -07001263 total_alloc_space_allocated = GetBytesAllocated();
1264 if (large_object_space_ != nullptr) {
1265 total_alloc_space_allocated -= large_object_space_->GetBytesAllocated();
1266 }
Mathieu Chartier31f44142014-04-08 14:40:03 -07001267 if (bump_pointer_space_ != nullptr) {
1268 total_alloc_space_allocated -= bump_pointer_space_->Size();
1269 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001270 if (region_space_ != nullptr) {
1271 total_alloc_space_allocated -= region_space_->GetBytesAllocated();
1272 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001273 const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
1274 static_cast<float>(total_alloc_space_size);
1275 uint64_t gc_heap_end_ns = NanoTime();
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001276 // We never move things in the native heap, so we can finish the GC at this point.
1277 FinishGC(self, collector::kGcTypeNone);
Christopher Ferrisc4ddc042014-05-13 14:47:50 -07001278 size_t native_reclaimed = 0;
Ian Rogers872dd822014-10-30 11:19:14 -07001279
Andreas Gampec60e1b72015-07-30 08:57:50 -07001280#ifdef __ANDROID__
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001281 // Only trim the native heap if we don't care about pauses.
1282 if (!CareAboutPauseTimes()) {
Christopher Ferrisc4ddc042014-05-13 14:47:50 -07001283#if defined(USE_DLMALLOC)
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001284 // Trim the native heap.
1285 dlmalloc_trim(0);
1286 dlmalloc_inspect_all(DlmallocMadviseCallback, &native_reclaimed);
Christopher Ferrisc4ddc042014-05-13 14:47:50 -07001287#elif defined(USE_JEMALLOC)
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001288 // Jemalloc does it's own internal trimming.
Christopher Ferrisc4ddc042014-05-13 14:47:50 -07001289#else
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001290 UNIMPLEMENTED(WARNING) << "Add trimming support";
Christopher Ferrisc4ddc042014-05-13 14:47:50 -07001291#endif
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001292 }
Andreas Gampec60e1b72015-07-30 08:57:50 -07001293#endif // __ANDROID__
Mathieu Chartier590fee92013-09-13 13:46:47 -07001294 uint64_t end_ns = NanoTime();
1295 VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
1296 << ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration="
1297 << PrettyDuration(end_ns - gc_heap_end_ns) << ", advised=" << PrettySize(native_reclaimed)
1298 << ") heaps. Managed heap utilization of " << static_cast<int>(100 * managed_utilization)
1299 << "%.";
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001300 ATRACE_END();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001301}
1302
1303bool Heap::IsValidObjectAddress(const mirror::Object* obj) const {
1304 // Note: we deliberately don't take the lock here, and mustn't test anything that would require
1305 // taking the lock.
1306 if (obj == nullptr) {
Elliott Hughes88c5c352012-03-15 18:49:48 -07001307 return true;
1308 }
Mathieu Chartier15d34022014-02-26 17:16:38 -08001309 return IsAligned<kObjectAlignment>(obj) && FindSpaceFromObject(obj, true) != nullptr;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001310}
1311
Mathieu Chartierd68ac702014-02-11 14:50:51 -08001312bool Heap::IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const {
1313 return FindContinuousSpaceFromObject(obj, true) != nullptr;
1314}
1315
Mathieu Chartier15d34022014-02-26 17:16:38 -08001316bool Heap::IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const {
1317 if (obj == nullptr || !IsAligned<kObjectAlignment>(obj)) {
1318 return false;
1319 }
1320 for (const auto& space : continuous_spaces_) {
1321 if (space->HasAddress(obj)) {
1322 return true;
1323 }
1324 }
1325 return false;
Elliott Hughesa2501992011-08-26 19:39:54 -07001326}
1327
Ian Rogersef7d42f2014-01-06 12:55:46 -08001328bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001329 bool search_live_stack, bool sorted) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001330 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
1331 return false;
1332 }
1333 if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj)) {
Mathieu Chartier4e305412014-02-19 10:54:44 -08001334 mirror::Class* klass = obj->GetClass<kVerifyNone>();
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001335 if (obj == klass) {
Mathieu Chartier9be9a7a2014-01-24 14:07:33 -08001336 // This case happens for java.lang.Class.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001337 return true;
1338 }
1339 return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
1340 } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj)) {
Mathieu Chartier4e305412014-02-19 10:54:44 -08001341 // If we are in the allocated region of the temp space, then we are probably live (e.g. during
1342 // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
1343 return temp_space_->Contains(obj);
Ian Rogers1d54e732013-05-02 21:10:01 -07001344 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001345 if (region_space_ != nullptr && region_space_->HasAddress(obj)) {
1346 return true;
1347 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001348 space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07001349 space::DiscontinuousSpace* d_space = nullptr;
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001350 if (c_space != nullptr) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001351 if (c_space->GetLiveBitmap()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001352 return true;
1353 }
1354 } else {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001355 d_space = FindDiscontinuousSpaceFromObject(obj, true);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001356 if (d_space != nullptr) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07001357 if (d_space->GetLiveBitmap()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001358 return true;
1359 }
1360 }
1361 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001362 // This is covering the allocation/live stack swapping that is done without mutators suspended.
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001363 for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
1364 if (i > 0) {
1365 NanoSleep(MsToNs(10));
Ian Rogers1d54e732013-05-02 21:10:01 -07001366 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001367 if (search_allocation_stack) {
1368 if (sorted) {
Mathieu Chartier407f7022014-02-18 14:37:05 -08001369 if (allocation_stack_->ContainsSorted(obj)) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001370 return true;
1371 }
Mathieu Chartier407f7022014-02-18 14:37:05 -08001372 } else if (allocation_stack_->Contains(obj)) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001373 return true;
1374 }
1375 }
1376
1377 if (search_live_stack) {
1378 if (sorted) {
Mathieu Chartier407f7022014-02-18 14:37:05 -08001379 if (live_stack_->ContainsSorted(obj)) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001380 return true;
1381 }
Mathieu Chartier407f7022014-02-18 14:37:05 -08001382 } else if (live_stack_->Contains(obj)) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001383 return true;
1384 }
1385 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001386 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001387 // We need to check the bitmaps again since there is a race where we mark something as live and
1388 // then clear the stack containing it.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001389 if (c_space != nullptr) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001390 if (c_space->GetLiveBitmap()->Test(obj)) {
1391 return true;
1392 }
1393 } else {
1394 d_space = FindDiscontinuousSpaceFromObject(obj, true);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07001395 if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj)) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001396 return true;
1397 }
1398 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001399 return false;
Elliott Hughes6a5bd492011-10-28 14:33:57 -07001400}
1401
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -07001402std::string Heap::DumpSpaces() const {
1403 std::ostringstream oss;
1404 DumpSpaces(oss);
1405 return oss.str();
1406}
1407
1408void Heap::DumpSpaces(std::ostream& stream) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001409 for (const auto& space : continuous_spaces_) {
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001410 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1411 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001412 stream << space << " " << *space << "\n";
1413 if (live_bitmap != nullptr) {
1414 stream << live_bitmap << " " << *live_bitmap << "\n";
1415 }
1416 if (mark_bitmap != nullptr) {
1417 stream << mark_bitmap << " " << *mark_bitmap << "\n";
1418 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001419 }
Mathieu Chartier02e25112013-08-14 16:14:24 -07001420 for (const auto& space : discontinuous_spaces_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001421 stream << space << " " << *space << "\n";
Mathieu Chartier128c52c2012-10-16 14:12:41 -07001422 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001423}
1424
Ian Rogersef7d42f2014-01-06 12:55:46 -08001425void Heap::VerifyObjectBody(mirror::Object* obj) {
Stephen Hines22c6a812014-07-16 11:03:43 -07001426 if (verify_object_mode_ == kVerifyObjectModeDisabled) {
1427 return;
1428 }
1429
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001430 // Ignore early dawn of the universe verifications.
Ian Rogers3e5cf302014-05-20 16:40:37 -07001431 if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
Ian Rogers62d6c772013-02-27 08:32:07 -08001432 return;
1433 }
Roland Levillain14d90572015-07-16 10:52:26 +01001434 CHECK_ALIGNED(obj, kObjectAlignment) << "Object isn't aligned";
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07001435 mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
Mathieu Chartier4e305412014-02-19 10:54:44 -08001436 CHECK(c != nullptr) << "Null class in object " << obj;
Roland Levillain14d90572015-07-16 10:52:26 +01001437 CHECK_ALIGNED(c, kObjectAlignment) << "Class " << c << " not aligned in object " << obj;
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001438 CHECK(VerifyClassClass(c));
Mathieu Chartier0325e622012-09-05 14:22:51 -07001439
Mathieu Chartier4e305412014-02-19 10:54:44 -08001440 if (verify_object_mode_ > kVerifyObjectModeFast) {
1441 // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -07001442 CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
Mathieu Chartierdcf8d722012-08-02 14:55:54 -07001443 }
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001444}
1445
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001446void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001447 reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001448}
1449
1450void Heap::VerifyHeap() {
Ian Rogers50b35e22012-10-04 10:09:15 -07001451 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001452 GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001453}
1454
Mathieu Chartiere76e70f2014-05-02 16:35:37 -07001455void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
Mathieu Chartier601276a2014-03-20 15:12:30 -07001456 // Use signed comparison since freed bytes can be negative when background compaction foreground
1457 // transitions occurs. This is caused by the moving objects from a bump pointer space to a
1458 // free list backed space typically increasing memory footprint due to padding and binning.
Ian Rogers3e5cf302014-05-20 16:40:37 -07001459 DCHECK_LE(freed_bytes, static_cast<int64_t>(num_bytes_allocated_.LoadRelaxed()));
Mathieu Chartiere76e70f2014-05-02 16:35:37 -07001460 // Note: This relies on 2s complement for handling negative freed_bytes.
Ian Rogers3e5cf302014-05-20 16:40:37 -07001461 num_bytes_allocated_.FetchAndSubSequentiallyConsistent(static_cast<ssize_t>(freed_bytes));
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001462 if (Runtime::Current()->HasStatsEnabled()) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001463 RuntimeStats* thread_stats = Thread::Current()->GetStats();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001464 thread_stats->freed_objects += freed_objects;
Elliott Hughes307f75d2011-10-12 18:04:40 -07001465 thread_stats->freed_bytes += freed_bytes;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001466 // TODO: Do this concurrently.
1467 RuntimeStats* global_stats = Runtime::Current()->GetStats();
1468 global_stats->freed_objects += freed_objects;
1469 global_stats->freed_bytes += freed_bytes;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001470 }
Carl Shapiro58551df2011-07-24 03:09:51 -07001471}
1472
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001473void Heap::RecordFreeRevoke() {
1474 // Subtract num_bytes_freed_revoke_ from num_bytes_allocated_ to cancel out the
1475 // the ahead-of-time, bulk counting of bytes allocated in rosalloc thread-local buffers.
1476 // If there's a concurrent revoke, ok to not necessarily reset num_bytes_freed_revoke_
1477 // all the way to zero exactly as the remainder will be subtracted at the next GC.
1478 size_t bytes_freed = num_bytes_freed_revoke_.LoadSequentiallyConsistent();
1479 CHECK_GE(num_bytes_freed_revoke_.FetchAndSubSequentiallyConsistent(bytes_freed),
1480 bytes_freed) << "num_bytes_freed_revoke_ underflow";
1481 CHECK_GE(num_bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes_freed),
1482 bytes_freed) << "num_bytes_allocated_ underflow";
1483 GetCurrentGcIteration()->SetFreedRevoke(bytes_freed);
1484}
1485
Zuo Wangf37a88b2014-07-10 04:26:41 -07001486space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const {
1487 for (const auto& space : continuous_spaces_) {
1488 if (space->AsContinuousSpace()->IsRosAllocSpace()) {
1489 if (space->AsContinuousSpace()->AsRosAllocSpace()->GetRosAlloc() == rosalloc) {
1490 return space->AsContinuousSpace()->AsRosAllocSpace();
1491 }
1492 }
1493 }
1494 return nullptr;
1495}
1496
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001497mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator,
Mathieu Chartierc528dba2013-11-26 12:00:11 -08001498 size_t alloc_size, size_t* bytes_allocated,
Ian Rogers6fac4472014-02-25 17:01:10 -08001499 size_t* usable_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001500 size_t* bytes_tl_bulk_allocated,
Mathieu Chartierc528dba2013-11-26 12:00:11 -08001501 mirror::Class** klass) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001502 bool was_default_allocator = allocator == GetCurrentAllocator();
Mathieu Chartierf4f38432014-09-03 11:21:08 -07001503 // Make sure there is no pending exception since we may need to throw an OOME.
1504 self->AssertNoPendingException();
Mathieu Chartierc528dba2013-11-26 12:00:11 -08001505 DCHECK(klass != nullptr);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001506 StackHandleScope<1> hs(self);
1507 HandleWrapper<mirror::Class> h(hs.NewHandleWrapper(klass));
1508 klass = nullptr; // Invalidate for safety.
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001509 // The allocation failed. If the GC is running, block until it completes, and then retry the
1510 // allocation.
Mathieu Chartier89a201e2014-05-02 10:27:26 -07001511 collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
Ian Rogers1d54e732013-05-02 21:10:01 -07001512 if (last_gc != collector::kGcTypeNone) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001513 // If we were the default allocator but the allocator changed while we were suspended,
1514 // abort the allocation.
1515 if (was_default_allocator && allocator != GetCurrentAllocator()) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001516 return nullptr;
1517 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001518 // A GC was in progress and we blocked, retry allocation now that memory has been freed.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001519 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001520 usable_size, bytes_tl_bulk_allocated);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001521 if (ptr != nullptr) {
1522 return ptr;
1523 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001524 }
1525
Mathieu Chartier5ae2c932014-03-28 16:22:20 -07001526 collector::GcType tried_type = next_gc_type_;
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001527 const bool gc_ran =
1528 CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1529 if (was_default_allocator && allocator != GetCurrentAllocator()) {
1530 return nullptr;
1531 }
1532 if (gc_ran) {
1533 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001534 usable_size, bytes_tl_bulk_allocated);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001535 if (ptr != nullptr) {
1536 return ptr;
Mathieu Chartier5ae2c932014-03-28 16:22:20 -07001537 }
1538 }
1539
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001540 // Loop through our different Gc types and try to Gc until we get enough free memory.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001541 for (collector::GcType gc_type : gc_plan_) {
Mathieu Chartier5ae2c932014-03-28 16:22:20 -07001542 if (gc_type == tried_type) {
1543 continue;
1544 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001545 // Attempt to run the collector, if we succeed, re-try the allocation.
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001546 const bool plan_gc_ran =
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001547 CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1548 if (was_default_allocator && allocator != GetCurrentAllocator()) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001549 return nullptr;
1550 }
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001551 if (plan_gc_ran) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001552 // Did we free sufficient memory for the allocation to succeed?
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001553 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001554 usable_size, bytes_tl_bulk_allocated);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001555 if (ptr != nullptr) {
1556 return ptr;
1557 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001558 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001559 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001560 // Allocations have failed after GCs; this is an exceptional state.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001561 // Try harder, growing the heap if necessary.
1562 mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001563 usable_size, bytes_tl_bulk_allocated);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001564 if (ptr != nullptr) {
1565 return ptr;
Carl Shapiro69759ea2011-07-21 18:13:35 -07001566 }
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001567 // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
1568 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
1569 // VM spec requires that all SoftReferences have been collected and cleared before throwing
1570 // OOME.
1571 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
1572 << " allocation";
1573 // TODO: Run finalization, but this may cause more allocations to occur.
1574 // We don't need a WaitForGcToComplete here either.
1575 DCHECK(!gc_plan_.empty());
1576 CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
1577 if (was_default_allocator && allocator != GetCurrentAllocator()) {
1578 return nullptr;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001579 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001580 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size,
1581 bytes_tl_bulk_allocated);
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001582 if (ptr == nullptr) {
Zuo Wangf37a88b2014-07-10 04:26:41 -07001583 const uint64_t current_time = NanoTime();
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001584 switch (allocator) {
1585 case kAllocatorTypeRosAlloc:
1586 // Fall-through.
1587 case kAllocatorTypeDlMalloc: {
1588 if (use_homogeneous_space_compaction_for_oom_ &&
1589 current_time - last_time_homogeneous_space_compaction_by_oom_ >
1590 min_interval_homogeneous_space_compaction_by_oom_) {
1591 last_time_homogeneous_space_compaction_by_oom_ = current_time;
1592 HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
1593 switch (result) {
1594 case HomogeneousSpaceCompactResult::kSuccess:
1595 // If the allocation succeeded, we delayed an oom.
1596 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001597 usable_size, bytes_tl_bulk_allocated);
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001598 if (ptr != nullptr) {
1599 count_delayed_oom_++;
1600 }
1601 break;
1602 case HomogeneousSpaceCompactResult::kErrorReject:
1603 // Reject due to disabled moving GC.
1604 break;
1605 case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
1606 // Throw OOM by default.
1607 break;
1608 default: {
Ian Rogers2c4257b2014-10-24 14:20:06 -07001609 UNIMPLEMENTED(FATAL) << "homogeneous space compaction result: "
1610 << static_cast<size_t>(result);
1611 UNREACHABLE();
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001612 }
Zuo Wangf37a88b2014-07-10 04:26:41 -07001613 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001614 // Always print that we ran homogeneous space compation since this can cause jank.
1615 VLOG(heap) << "Ran heap homogeneous space compaction, "
1616 << " requested defragmentation "
1617 << count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1618 << " performed defragmentation "
1619 << count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1620 << " ignored homogeneous space compaction "
1621 << count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1622 << " delayed count = "
1623 << count_delayed_oom_.LoadSequentiallyConsistent();
Zuo Wangf37a88b2014-07-10 04:26:41 -07001624 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001625 break;
Zuo Wangf37a88b2014-07-10 04:26:41 -07001626 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001627 case kAllocatorTypeNonMoving: {
1628 // Try to transition the heap if the allocation failure was due to the space being full.
1629 if (!IsOutOfMemoryOnAllocation<false>(allocator, alloc_size)) {
1630 // If we aren't out of memory then the OOM was probably from the non moving space being
1631 // full. Attempt to disable compaction and turn the main space into a non moving space.
1632 DisableMovingGc();
1633 // If we are still a moving GC then something must have caused the transition to fail.
1634 if (IsMovingGc(collector_type_)) {
1635 MutexLock mu(self, *gc_complete_lock_);
1636 // If we couldn't disable moving GC, just throw OOME and return null.
1637 LOG(WARNING) << "Couldn't disable moving GC with disable GC count "
1638 << disable_moving_gc_count_;
1639 } else {
1640 LOG(WARNING) << "Disabled moving GC due to the non moving space being full";
1641 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001642 usable_size, bytes_tl_bulk_allocated);
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001643 }
1644 }
1645 break;
1646 }
1647 default: {
1648 // Do nothing for others allocators.
1649 }
Zuo Wangf37a88b2014-07-10 04:26:41 -07001650 }
1651 }
1652 // If the allocation hasn't succeeded by this point, throw an OOM error.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001653 if (ptr == nullptr) {
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07001654 ThrowOutOfMemoryError(self, alloc_size, allocator);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001655 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001656 return ptr;
Carl Shapiro69759ea2011-07-21 18:13:35 -07001657}
1658
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001659void Heap::SetTargetHeapUtilization(float target) {
1660 DCHECK_GT(target, 0.0f); // asserted in Java code
1661 DCHECK_LT(target, 1.0f);
1662 target_utilization_ = target;
1663}
1664
Ian Rogers1d54e732013-05-02 21:10:01 -07001665size_t Heap::GetObjectsAllocated() const {
Mathieu Chartierb43390c2015-05-12 10:47:11 -07001666 Thread* self = Thread::Current();
1667 ScopedThreadStateChange tsc(self, kWaitingForGetObjectsAllocated);
1668 auto* tl = Runtime::Current()->GetThreadList();
1669 // Need SuspendAll here to prevent lock violation if RosAlloc does it during InspectAll.
1670 tl->SuspendAll(__FUNCTION__);
Ian Rogers1d54e732013-05-02 21:10:01 -07001671 size_t total = 0;
Mathieu Chartierb43390c2015-05-12 10:47:11 -07001672 {
1673 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1674 for (space::AllocSpace* space : alloc_spaces_) {
1675 total += space->GetObjectsAllocated();
1676 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001677 }
Mathieu Chartierb43390c2015-05-12 10:47:11 -07001678 tl->ResumeAll();
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001679 return total;
1680}
1681
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07001682uint64_t Heap::GetObjectsAllocatedEver() const {
Mathieu Chartier4edd8472015-06-01 10:47:36 -07001683 uint64_t total = GetObjectsFreedEver();
1684 // If we are detached, we can't use GetObjectsAllocated since we can't change thread states.
1685 if (Thread::Current() != nullptr) {
1686 total += GetObjectsAllocated();
1687 }
1688 return total;
Ian Rogers1d54e732013-05-02 21:10:01 -07001689}
1690
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07001691uint64_t Heap::GetBytesAllocatedEver() const {
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001692 return GetBytesFreedEver() + GetBytesAllocated();
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001693}
1694
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001695class InstanceCounter {
1696 public:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001697 InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts)
Mathieu Chartier90443472015-07-16 20:32:27 -07001698 SHARED_REQUIRES(Locks::mutator_lock_)
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001699 : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001700 }
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001701 static void Callback(mirror::Object* obj, void* arg)
Mathieu Chartier90443472015-07-16 20:32:27 -07001702 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001703 InstanceCounter* instance_counter = reinterpret_cast<InstanceCounter*>(arg);
1704 mirror::Class* instance_class = obj->GetClass();
1705 CHECK(instance_class != nullptr);
1706 for (size_t i = 0; i < instance_counter->classes_.size(); ++i) {
Mathieu Chartierf1820852015-07-10 13:19:51 -07001707 mirror::Class* klass = instance_counter->classes_[i];
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001708 if (instance_counter->use_is_assignable_from_) {
Mathieu Chartierf1820852015-07-10 13:19:51 -07001709 if (klass != nullptr && klass->IsAssignableFrom(instance_class)) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001710 ++instance_counter->counts_[i];
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001711 }
Mathieu Chartierf1820852015-07-10 13:19:51 -07001712 } else if (instance_class == klass) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001713 ++instance_counter->counts_[i];
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001714 }
1715 }
1716 }
1717
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001718 private:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001719 const std::vector<mirror::Class*>& classes_;
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001720 bool use_is_assignable_from_;
1721 uint64_t* const counts_;
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001722 DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001723};
1724
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001725void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001726 uint64_t* counts) {
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001727 InstanceCounter counter(classes, use_is_assignable_from, counts);
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001728 VisitObjects(InstanceCounter::Callback, &counter);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001729}
1730
Elliott Hughes3b78c942013-01-15 17:35:41 -08001731class InstanceCollector {
1732 public:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001733 InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
Mathieu Chartier90443472015-07-16 20:32:27 -07001734 SHARED_REQUIRES(Locks::mutator_lock_)
Elliott Hughes3b78c942013-01-15 17:35:41 -08001735 : class_(c), max_count_(max_count), instances_(instances) {
1736 }
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001737 static void Callback(mirror::Object* obj, void* arg)
Mathieu Chartier90443472015-07-16 20:32:27 -07001738 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001739 DCHECK(arg != nullptr);
1740 InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -07001741 if (obj->GetClass() == instance_collector->class_) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001742 if (instance_collector->max_count_ == 0 ||
1743 instance_collector->instances_.size() < instance_collector->max_count_) {
1744 instance_collector->instances_.push_back(obj);
Elliott Hughes3b78c942013-01-15 17:35:41 -08001745 }
1746 }
1747 }
1748
1749 private:
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -07001750 const mirror::Class* const class_;
1751 const uint32_t max_count_;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001752 std::vector<mirror::Object*>& instances_;
Elliott Hughes3b78c942013-01-15 17:35:41 -08001753 DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
1754};
1755
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001756void Heap::GetInstances(mirror::Class* c, int32_t max_count,
1757 std::vector<mirror::Object*>& instances) {
Elliott Hughes3b78c942013-01-15 17:35:41 -08001758 InstanceCollector collector(c, max_count, instances);
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001759 VisitObjects(&InstanceCollector::Callback, &collector);
Elliott Hughes3b78c942013-01-15 17:35:41 -08001760}
1761
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001762class ReferringObjectsFinder {
1763 public:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001764 ReferringObjectsFinder(mirror::Object* object, int32_t max_count,
1765 std::vector<mirror::Object*>& referring_objects)
Mathieu Chartier90443472015-07-16 20:32:27 -07001766 SHARED_REQUIRES(Locks::mutator_lock_)
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001767 : object_(object), max_count_(max_count), referring_objects_(referring_objects) {
1768 }
1769
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001770 static void Callback(mirror::Object* obj, void* arg)
Mathieu Chartier90443472015-07-16 20:32:27 -07001771 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001772 reinterpret_cast<ReferringObjectsFinder*>(arg)->operator()(obj);
1773 }
1774
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001775 // For bitmap Visit.
1776 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1777 // annotalysis on visitors.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001778 void operator()(mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07001779 o->VisitReferences<true>(*this, VoidFunctor());
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001780 }
1781
Mathieu Chartier3b05e9b2014-03-25 09:29:43 -07001782 // For Object::VisitReferences.
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001783 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001784 SHARED_REQUIRES(Locks::mutator_lock_) {
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07001785 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
Mathieu Chartier407f7022014-02-18 14:37:05 -08001786 if (ref == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
1787 referring_objects_.push_back(obj);
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001788 }
1789 }
1790
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001791 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
1792 const {}
1793 void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
1794
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001795 private:
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -07001796 const mirror::Object* const object_;
1797 const uint32_t max_count_;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001798 std::vector<mirror::Object*>& referring_objects_;
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001799 DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
1800};
1801
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001802void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
1803 std::vector<mirror::Object*>& referring_objects) {
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001804 ReferringObjectsFinder finder(o, max_count, referring_objects);
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001805 VisitObjects(&ReferringObjectsFinder::Callback, &finder);
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001806}
1807
Ian Rogers30fab402012-01-23 15:43:46 -08001808void Heap::CollectGarbage(bool clear_soft_references) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001809 // Even if we waited for a GC we still need to do another GC since weaks allocated during the
1810 // last GC will not have necessarily been cleared.
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001811 CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001812}
1813
Zuo Wangf37a88b2014-07-10 04:26:41 -07001814HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
1815 Thread* self = Thread::Current();
1816 // Inc requested homogeneous space compaction.
1817 count_requested_homogeneous_space_compaction_++;
1818 // Store performed homogeneous space compaction at a new request arrival.
1819 ThreadList* tl = Runtime::Current()->GetThreadList();
1820 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1821 Locks::mutator_lock_->AssertNotHeld(self);
1822 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001823 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
Zuo Wangf37a88b2014-07-10 04:26:41 -07001824 MutexLock mu(self, *gc_complete_lock_);
1825 // Ensure there is only one GC at a time.
1826 WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
1827 // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable count
1828 // is non zero.
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001829 // If the collector type changed to something which doesn't benefit from homogeneous space compaction,
Zuo Wangf37a88b2014-07-10 04:26:41 -07001830 // exit.
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001831 if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) ||
1832 !main_space_->CanMoveObjects()) {
Zuo Wangf37a88b2014-07-10 04:26:41 -07001833 return HomogeneousSpaceCompactResult::kErrorReject;
1834 }
1835 collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
1836 }
1837 if (Runtime::Current()->IsShuttingDown(self)) {
1838 // Don't allow heap transitions to happen if the runtime is shutting down since these can
1839 // cause objects to get finalized.
1840 FinishGC(self, collector::kGcTypeNone);
1841 return HomogeneousSpaceCompactResult::kErrorVMShuttingDown;
1842 }
1843 // Suspend all threads.
Mathieu Chartierbf9fc582015-03-13 17:21:25 -07001844 tl->SuspendAll(__FUNCTION__);
Zuo Wangf37a88b2014-07-10 04:26:41 -07001845 uint64_t start_time = NanoTime();
1846 // Launch compaction.
Mathieu Chartierb363f662014-07-16 13:28:58 -07001847 space::MallocSpace* to_space = main_space_backup_.release();
Zuo Wangf37a88b2014-07-10 04:26:41 -07001848 space::MallocSpace* from_space = main_space_;
1849 to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1850 const uint64_t space_size_before_compaction = from_space->Size();
Mathieu Chartierb363f662014-07-16 13:28:58 -07001851 AddSpace(to_space);
Mathieu Chartier0310da52014-12-01 13:40:48 -08001852 // Make sure that we will have enough room to copy.
1853 CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit());
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08001854 collector::GarbageCollector* collector = Compact(to_space, from_space,
1855 kGcCauseHomogeneousSpaceCompact);
Zuo Wangf37a88b2014-07-10 04:26:41 -07001856 const uint64_t space_size_after_compaction = to_space->Size();
Mathieu Chartierb363f662014-07-16 13:28:58 -07001857 main_space_ = to_space;
1858 main_space_backup_.reset(from_space);
1859 RemoveSpace(from_space);
Zuo Wangf37a88b2014-07-10 04:26:41 -07001860 SetSpaceAsDefault(main_space_); // Set as default to reset the proper dlmalloc space.
1861 // Update performed homogeneous space compaction count.
1862 count_performed_homogeneous_space_compaction_++;
1863 // Print statics log and resume all threads.
1864 uint64_t duration = NanoTime() - start_time;
Mathieu Chartier98172a62014-09-02 12:33:25 -07001865 VLOG(heap) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
1866 << PrettySize(space_size_before_compaction) << " -> "
1867 << PrettySize(space_size_after_compaction) << " compact-ratio: "
1868 << std::fixed << static_cast<double>(space_size_after_compaction) /
1869 static_cast<double>(space_size_before_compaction);
Zuo Wangf37a88b2014-07-10 04:26:41 -07001870 tl->ResumeAll();
1871 // Finish GC.
Mathieu Chartier3cf22532015-07-09 15:15:09 -07001872 reference_processor_->EnqueueClearedReferences(self);
Zuo Wangf37a88b2014-07-10 04:26:41 -07001873 GrowForUtilization(semi_space_collector_);
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08001874 LogGC(kGcCauseHomogeneousSpaceCompact, collector);
Zuo Wangf37a88b2014-07-10 04:26:41 -07001875 FinishGC(self, collector::kGcTypeFull);
1876 return HomogeneousSpaceCompactResult::kSuccess;
1877}
1878
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001879void Heap::TransitionCollector(CollectorType collector_type) {
1880 if (collector_type == collector_type_) {
1881 return;
1882 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001883 VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
1884 << " -> " << static_cast<int>(collector_type);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001885 uint64_t start_time = NanoTime();
Ian Rogers3e5cf302014-05-20 16:40:37 -07001886 uint32_t before_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
Mathieu Chartier52e4b432014-06-10 11:22:31 -07001887 Runtime* const runtime = Runtime::Current();
1888 ThreadList* const tl = runtime->GetThreadList();
1889 Thread* const self = Thread::Current();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001890 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1891 Locks::mutator_lock_->AssertNotHeld(self);
Mathieu Chartier1d27b342014-01-28 12:51:09 -08001892 // Busy wait until we can GC (StartGC can fail if we have a non-zero
1893 // compacting_gc_disable_count_, this should rarely occurs).
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001894 for (;;) {
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08001895 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001896 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08001897 MutexLock mu(self, *gc_complete_lock_);
1898 // Ensure there is only one GC at a time.
Mathieu Chartier89a201e2014-05-02 10:27:26 -07001899 WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self);
Mathieu Chartiere4927f62014-08-23 13:56:03 -07001900 // Currently we only need a heap transition if we switch from a moving collector to a
1901 // non-moving one, or visa versa.
1902 const bool copying_transition = IsMovingGc(collector_type_) != IsMovingGc(collector_type);
Mathieu Chartierb38d4832014-04-10 10:56:55 -07001903 // If someone else beat us to it and changed the collector before we could, exit.
1904 // This is safe to do before the suspend all since we set the collector_type_running_ before
1905 // we exit the loop. If another thread attempts to do the heap transition before we exit,
1906 // then it would get blocked on WaitForGcToCompleteLocked.
1907 if (collector_type == collector_type_) {
1908 return;
1909 }
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08001910 // GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released.
1911 if (!copying_transition || disable_moving_gc_count_ == 0) {
1912 // TODO: Not hard code in semi-space collector?
1913 collector_type_running_ = copying_transition ? kCollectorTypeSS : collector_type;
1914 break;
1915 }
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001916 }
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08001917 usleep(1000);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001918 }
Mathieu Chartier52e4b432014-06-10 11:22:31 -07001919 if (runtime->IsShuttingDown(self)) {
Hiroshi Yamauchia6a8d142014-05-12 16:57:33 -07001920 // Don't allow heap transitions to happen if the runtime is shutting down since these can
1921 // cause objects to get finalized.
1922 FinishGC(self, collector::kGcTypeNone);
1923 return;
1924 }
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08001925 collector::GarbageCollector* collector = nullptr;
Mathieu Chartierbf9fc582015-03-13 17:21:25 -07001926 tl->SuspendAll(__FUNCTION__);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001927 switch (collector_type) {
Mathieu Chartierb363f662014-07-16 13:28:58 -07001928 case kCollectorTypeSS: {
Mathieu Chartier31f44142014-04-08 14:40:03 -07001929 if (!IsMovingGc(collector_type_)) {
Mathieu Chartierb363f662014-07-16 13:28:58 -07001930 // Create the bump pointer space from the backup space.
1931 CHECK(main_space_backup_ != nullptr);
1932 std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap());
Mathieu Chartier31f44142014-04-08 14:40:03 -07001933 // We are transitioning from non moving GC -> moving GC, since we copied from the bump
1934 // pointer space last transition it will be protected.
Mathieu Chartierb363f662014-07-16 13:28:58 -07001935 CHECK(mem_map != nullptr);
1936 mem_map->Protect(PROT_READ | PROT_WRITE);
1937 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
1938 mem_map.release());
1939 AddSpace(bump_pointer_space_);
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08001940 collector = Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
Mathieu Chartierb363f662014-07-16 13:28:58 -07001941 // Use the now empty main space mem map for the bump pointer temp space.
1942 mem_map.reset(main_space_->ReleaseMemMap());
Mathieu Chartier00b59152014-07-25 10:13:51 -07001943 // Unset the pointers just in case.
1944 if (dlmalloc_space_ == main_space_) {
1945 dlmalloc_space_ = nullptr;
1946 } else if (rosalloc_space_ == main_space_) {
1947 rosalloc_space_ = nullptr;
1948 }
Mathieu Chartier2796a162014-07-25 11:50:47 -07001949 // Remove the main space so that we don't try to trim it, this doens't work for debug
1950 // builds since RosAlloc attempts to read the magic number from a protected page.
1951 RemoveSpace(main_space_);
Mathieu Chartierc5a83472014-07-23 18:45:17 -07001952 RemoveRememberedSet(main_space_);
Mathieu Chartier2796a162014-07-25 11:50:47 -07001953 delete main_space_; // Delete the space since it has been removed.
Mathieu Chartierc5a83472014-07-23 18:45:17 -07001954 main_space_ = nullptr;
Mathieu Chartier2796a162014-07-25 11:50:47 -07001955 RemoveRememberedSet(main_space_backup_.get());
1956 main_space_backup_.reset(nullptr); // Deletes the space.
Mathieu Chartierb363f662014-07-16 13:28:58 -07001957 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
1958 mem_map.release());
1959 AddSpace(temp_space_);
Mathieu Chartier31f44142014-04-08 14:40:03 -07001960 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001961 break;
1962 }
1963 case kCollectorTypeMS:
1964 // Fall through.
1965 case kCollectorTypeCMS: {
Mathieu Chartier31f44142014-04-08 14:40:03 -07001966 if (IsMovingGc(collector_type_)) {
Mathieu Chartierb363f662014-07-16 13:28:58 -07001967 CHECK(temp_space_ != nullptr);
1968 std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
1969 RemoveSpace(temp_space_);
1970 temp_space_ = nullptr;
Mathieu Chartier36dab362014-07-30 14:59:56 -07001971 mem_map->Protect(PROT_READ | PROT_WRITE);
Mathieu Chartier0310da52014-12-01 13:40:48 -08001972 CreateMainMallocSpace(mem_map.get(), kDefaultInitialSize,
1973 std::min(mem_map->Size(), growth_limit_), mem_map->Size());
Mathieu Chartierb363f662014-07-16 13:28:58 -07001974 mem_map.release();
Mathieu Chartier31f44142014-04-08 14:40:03 -07001975 // Compact to the main space from the bump pointer space, don't need to swap semispaces.
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -07001976 AddSpace(main_space_);
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08001977 collector = Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
Mathieu Chartierb363f662014-07-16 13:28:58 -07001978 mem_map.reset(bump_pointer_space_->ReleaseMemMap());
1979 RemoveSpace(bump_pointer_space_);
1980 bump_pointer_space_ = nullptr;
1981 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
Hiroshi Yamauchic1276c82014-08-07 10:27:17 -07001982 // Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
1983 if (kIsDebugBuild && kUseRosAlloc) {
1984 mem_map->Protect(PROT_READ | PROT_WRITE);
1985 }
Mathieu Chartier0310da52014-12-01 13:40:48 -08001986 main_space_backup_.reset(CreateMallocSpaceFromMemMap(
1987 mem_map.get(), kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_),
1988 mem_map->Size(), name, true));
Hiroshi Yamauchic1276c82014-08-07 10:27:17 -07001989 if (kIsDebugBuild && kUseRosAlloc) {
1990 mem_map->Protect(PROT_NONE);
1991 }
Mathieu Chartierb363f662014-07-16 13:28:58 -07001992 mem_map.release();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001993 }
1994 break;
1995 }
1996 default: {
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001997 LOG(FATAL) << "Attempted to transition to invalid collector type "
1998 << static_cast<size_t>(collector_type);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001999 break;
2000 }
2001 }
2002 ChangeCollector(collector_type);
2003 tl->ResumeAll();
2004 // Can't call into java code with all threads suspended.
Mathieu Chartier3cf22532015-07-09 15:15:09 -07002005 reference_processor_->EnqueueClearedReferences(self);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002006 uint64_t duration = NanoTime() - start_time;
Mathieu Chartierafe49982014-03-27 10:55:04 -07002007 GrowForUtilization(semi_space_collector_);
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08002008 DCHECK(collector != nullptr);
2009 LogGC(kGcCauseCollectorTransition, collector);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002010 FinishGC(self, collector::kGcTypeFull);
Ian Rogers3e5cf302014-05-20 16:40:37 -07002011 int32_t after_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002012 int32_t delta_allocated = before_allocated - after_allocated;
Mathieu Chartier19d46b42014-06-17 15:04:40 -07002013 std::string saved_str;
2014 if (delta_allocated >= 0) {
2015 saved_str = " saved at least " + PrettySize(delta_allocated);
2016 } else {
2017 saved_str = " expanded " + PrettySize(-delta_allocated);
2018 }
Mathieu Chartier98172a62014-09-02 12:33:25 -07002019 VLOG(heap) << "Heap transition to " << process_state_ << " took "
Mathieu Chartier19d46b42014-06-17 15:04:40 -07002020 << PrettyDuration(duration) << saved_str;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002021}
2022
Mathieu Chartier0de9f732013-11-22 17:58:48 -08002023void Heap::ChangeCollector(CollectorType collector_type) {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002024 // TODO: Only do this with all mutators suspended to avoid races.
2025 if (collector_type != collector_type_) {
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002026 if (collector_type == kCollectorTypeMC) {
2027 // Don't allow mark compact unless support is compiled in.
2028 CHECK(kMarkCompactSupport);
2029 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002030 collector_type_ = collector_type;
2031 gc_plan_.clear();
2032 switch (collector_type_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002033 case kCollectorTypeCC: {
2034 gc_plan_.push_back(collector::kGcTypeFull);
2035 if (use_tlab_) {
2036 ChangeAllocator(kAllocatorTypeRegionTLAB);
2037 } else {
2038 ChangeAllocator(kAllocatorTypeRegion);
2039 }
2040 break;
2041 }
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002042 case kCollectorTypeMC: // Fall-through.
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07002043 case kCollectorTypeSS: // Fall-through.
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08002044 case kCollectorTypeGSS: {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002045 gc_plan_.push_back(collector::kGcTypeFull);
Mathieu Chartier692fafd2013-11-29 17:24:40 -08002046 if (use_tlab_) {
2047 ChangeAllocator(kAllocatorTypeTLAB);
2048 } else {
2049 ChangeAllocator(kAllocatorTypeBumpPointer);
2050 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002051 break;
2052 }
2053 case kCollectorTypeMS: {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002054 gc_plan_.push_back(collector::kGcTypeSticky);
2055 gc_plan_.push_back(collector::kGcTypePartial);
2056 gc_plan_.push_back(collector::kGcTypeFull);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002057 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002058 break;
2059 }
2060 case kCollectorTypeCMS: {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002061 gc_plan_.push_back(collector::kGcTypeSticky);
2062 gc_plan_.push_back(collector::kGcTypePartial);
2063 gc_plan_.push_back(collector::kGcTypeFull);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002064 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002065 break;
2066 }
2067 default: {
Ian Rogers2c4257b2014-10-24 14:20:06 -07002068 UNIMPLEMENTED(FATAL);
2069 UNREACHABLE();
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002070 }
Mathieu Chartier0de9f732013-11-22 17:58:48 -08002071 }
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07002072 if (IsGcConcurrent()) {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002073 concurrent_start_bytes_ =
2074 std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
2075 } else {
2076 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Mathieu Chartier0de9f732013-11-22 17:58:48 -08002077 }
2078 }
2079}
2080
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002081// Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
Ian Rogers6fac4472014-02-25 17:01:10 -08002082class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002083 public:
Evgenii Stepanov1e133742015-05-20 12:30:59 -07002084 explicit ZygoteCompactingCollector(gc::Heap* heap,
2085 bool is_running_on_memory_tool)
2086 : SemiSpace(heap, false, "zygote collector"),
2087 bin_live_bitmap_(nullptr),
2088 bin_mark_bitmap_(nullptr),
2089 is_running_on_memory_tool_(is_running_on_memory_tool) {}
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002090
2091 void BuildBins(space::ContinuousSpace* space) {
2092 bin_live_bitmap_ = space->GetLiveBitmap();
2093 bin_mark_bitmap_ = space->GetMarkBitmap();
2094 BinContext context;
2095 context.prev_ = reinterpret_cast<uintptr_t>(space->Begin());
2096 context.collector_ = this;
2097 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2098 // Note: This requires traversing the space in increasing order of object addresses.
2099 bin_live_bitmap_->Walk(Callback, reinterpret_cast<void*>(&context));
2100 // Add the last bin which spans after the last object to the end of the space.
2101 AddBin(reinterpret_cast<uintptr_t>(space->End()) - context.prev_, context.prev_);
2102 }
2103
2104 private:
2105 struct BinContext {
2106 uintptr_t prev_; // The end of the previous object.
2107 ZygoteCompactingCollector* collector_;
2108 };
2109 // Maps from bin sizes to locations.
2110 std::multimap<size_t, uintptr_t> bins_;
2111 // Live bitmap of the space which contains the bins.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07002112 accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002113 // Mark bitmap of the space which contains the bins.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07002114 accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
Evgenii Stepanov1e133742015-05-20 12:30:59 -07002115 const bool is_running_on_memory_tool_;
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002116
2117 static void Callback(mirror::Object* obj, void* arg)
Mathieu Chartier90443472015-07-16 20:32:27 -07002118 SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002119 DCHECK(arg != nullptr);
2120 BinContext* context = reinterpret_cast<BinContext*>(arg);
2121 ZygoteCompactingCollector* collector = context->collector_;
2122 uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
2123 size_t bin_size = object_addr - context->prev_;
2124 // Add the bin consisting of the end of the previous object to the start of the current object.
2125 collector->AddBin(bin_size, context->prev_);
2126 context->prev_ = object_addr + RoundUp(obj->SizeOf(), kObjectAlignment);
2127 }
2128
2129 void AddBin(size_t size, uintptr_t position) {
Evgenii Stepanov1e133742015-05-20 12:30:59 -07002130 if (is_running_on_memory_tool_) {
2131 MEMORY_TOOL_MAKE_DEFINED(reinterpret_cast<void*>(position), size);
2132 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002133 if (size != 0) {
2134 bins_.insert(std::make_pair(size, position));
2135 }
2136 }
2137
Mathieu Chartiera1602f22014-01-13 17:19:19 -08002138 virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const {
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002139 // Don't sweep any spaces since we probably blasted the internal accounting of the free list
2140 // allocator.
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07002141 UNUSED(space);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002142 return false;
2143 }
2144
2145 virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
Mathieu Chartier90443472015-07-16 20:32:27 -07002146 REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
Hiroshi Yamauchi8711d1f2015-03-13 16:48:55 -07002147 size_t obj_size = obj->SizeOf();
2148 size_t alloc_size = RoundUp(obj_size, kObjectAlignment);
Mathieu Chartier5dc08a62014-01-10 10:10:23 -08002149 mirror::Object* forward_address;
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002150 // Find the smallest bin which we can move obj in.
Hiroshi Yamauchi8711d1f2015-03-13 16:48:55 -07002151 auto it = bins_.lower_bound(alloc_size);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002152 if (it == bins_.end()) {
2153 // No available space in the bins, place it in the target space instead (grows the zygote
2154 // space).
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07002155 size_t bytes_allocated, dummy;
Hiroshi Yamauchi8711d1f2015-03-13 16:48:55 -07002156 forward_address = to_space_->Alloc(self_, alloc_size, &bytes_allocated, nullptr, &dummy);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002157 if (to_space_live_bitmap_ != nullptr) {
2158 to_space_live_bitmap_->Set(forward_address);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002159 } else {
2160 GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
2161 GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002162 }
2163 } else {
2164 size_t size = it->first;
2165 uintptr_t pos = it->second;
2166 bins_.erase(it); // Erase the old bin which we replace with the new smaller bin.
2167 forward_address = reinterpret_cast<mirror::Object*>(pos);
2168 // Set the live and mark bits so that sweeping system weaks works properly.
2169 bin_live_bitmap_->Set(forward_address);
2170 bin_mark_bitmap_->Set(forward_address);
Hiroshi Yamauchi8711d1f2015-03-13 16:48:55 -07002171 DCHECK_GE(size, alloc_size);
2172 // Add a new bin with the remaining space.
2173 AddBin(size - alloc_size, pos + alloc_size);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002174 }
Hiroshi Yamauchi8711d1f2015-03-13 16:48:55 -07002175 // Copy the object over to its new location. Don't use alloc_size to avoid valgrind error.
2176 memcpy(reinterpret_cast<void*>(forward_address), obj, obj_size);
Hiroshi Yamauchi624468c2014-03-31 15:14:47 -07002177 if (kUseBakerOrBrooksReadBarrier) {
2178 obj->AssertReadBarrierPointer();
2179 if (kUseBrooksReadBarrier) {
2180 DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj);
2181 forward_address->SetReadBarrierPointer(forward_address);
2182 }
2183 forward_address->AssertReadBarrierPointer();
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -08002184 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002185 return forward_address;
2186 }
2187};
2188
Mathieu Chartiera1602f22014-01-13 17:19:19 -08002189void Heap::UnBindBitmaps() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002190 TimingLogger::ScopedTiming t("UnBindBitmaps", GetCurrentGcIteration()->GetTimings());
Mathieu Chartiera1602f22014-01-13 17:19:19 -08002191 for (const auto& space : GetContinuousSpaces()) {
2192 if (space->IsContinuousMemMapAllocSpace()) {
2193 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
2194 if (alloc_space->HasBoundBitmaps()) {
2195 alloc_space->UnBindBitmaps();
2196 }
2197 }
2198 }
2199}
2200
Mathieu Chartiercc236d72012-07-20 10:29:05 -07002201void Heap::PreZygoteFork() {
Mathieu Chartierfaed9952015-03-31 16:28:53 -07002202 if (!HasZygoteSpace()) {
2203 // We still want to GC in case there is some unreachable non moving objects that could cause a
2204 // suboptimal bin packing when we compact the zygote space.
2205 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
2206 }
Ian Rogers81d425b2012-09-27 16:03:43 -07002207 Thread* self = Thread::Current();
2208 MutexLock mu(self, zygote_creation_lock_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -07002209 // Try to see if we have any Zygote spaces.
Mathieu Chartiere4cab172014-08-19 18:24:04 -07002210 if (HasZygoteSpace()) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -07002211 return;
2212 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -07002213 Runtime::Current()->GetInternTable()->SwapPostZygoteWithPreZygote();
Mathieu Chartierc2e20622014-11-03 11:41:47 -08002214 Runtime::Current()->GetClassLinker()->MoveClassTableToPreZygote();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002215 VLOG(heap) << "Starting PreZygoteFork";
Mathieu Chartier590fee92013-09-13 13:46:47 -07002216 // Trim the pages at the end of the non moving space.
2217 non_moving_space_->Trim();
Mathieu Chartier31f44142014-04-08 14:40:03 -07002218 // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
2219 // there.
Mathieu Chartier590fee92013-09-13 13:46:47 -07002220 non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07002221 const bool same_space = non_moving_space_ == main_space_;
Mathieu Chartier31f44142014-04-08 14:40:03 -07002222 if (kCompactZygote) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002223 // Temporarily disable rosalloc verification because the zygote
2224 // compaction will mess up the rosalloc internal metadata.
2225 ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
Evgenii Stepanov1e133742015-05-20 12:30:59 -07002226 ZygoteCompactingCollector zygote_collector(this, is_running_on_memory_tool_);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002227 zygote_collector.BuildBins(non_moving_space_);
Mathieu Chartier50482232013-11-21 11:48:14 -08002228 // Create a new bump pointer space which we will compact into.
Mathieu Chartier590fee92013-09-13 13:46:47 -07002229 space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
2230 non_moving_space_->Limit());
2231 // Compact the bump pointer space to a new zygote bump pointer space.
Mathieu Chartier31f44142014-04-08 14:40:03 -07002232 bool reset_main_space = false;
2233 if (IsMovingGc(collector_type_)) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002234 if (collector_type_ == kCollectorTypeCC) {
2235 zygote_collector.SetFromSpace(region_space_);
2236 } else {
2237 zygote_collector.SetFromSpace(bump_pointer_space_);
2238 }
Mathieu Chartier31f44142014-04-08 14:40:03 -07002239 } else {
2240 CHECK(main_space_ != nullptr);
Hiroshi Yamauchid04495e2015-03-11 19:09:07 -07002241 CHECK_NE(main_space_, non_moving_space_)
2242 << "Does not make sense to compact within the same space";
Mathieu Chartier31f44142014-04-08 14:40:03 -07002243 // Copy from the main space.
2244 zygote_collector.SetFromSpace(main_space_);
2245 reset_main_space = true;
2246 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002247 zygote_collector.SetToSpace(&target_space);
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -07002248 zygote_collector.SetSwapSemiSpaces(false);
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08002249 zygote_collector.Run(kGcCauseCollectorTransition, false);
Mathieu Chartier31f44142014-04-08 14:40:03 -07002250 if (reset_main_space) {
2251 main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2252 madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
2253 MemMap* mem_map = main_space_->ReleaseMemMap();
2254 RemoveSpace(main_space_);
Mathieu Chartier96bcd452014-06-17 09:50:02 -07002255 space::Space* old_main_space = main_space_;
Mathieu Chartier0310da52014-12-01 13:40:48 -08002256 CreateMainMallocSpace(mem_map, kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_),
2257 mem_map->Size());
Mathieu Chartier96bcd452014-06-17 09:50:02 -07002258 delete old_main_space;
Mathieu Chartier31f44142014-04-08 14:40:03 -07002259 AddSpace(main_space_);
2260 } else {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002261 if (collector_type_ == kCollectorTypeCC) {
2262 region_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2263 } else {
2264 bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2265 }
Mathieu Chartier31f44142014-04-08 14:40:03 -07002266 }
2267 if (temp_space_ != nullptr) {
2268 CHECK(temp_space_->IsEmpty());
2269 }
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002270 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2271 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002272 // Update the end and write out image.
2273 non_moving_space_->SetEnd(target_space.End());
2274 non_moving_space_->SetLimit(target_space.Limit());
Mathieu Chartierfaed9952015-03-31 16:28:53 -07002275 VLOG(heap) << "Create zygote space with size=" << non_moving_space_->Size() << " bytes";
Mathieu Chartier357e9be2012-08-01 11:00:14 -07002276 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07002277 // Change the collector to the post zygote one.
Mathieu Chartier31f44142014-04-08 14:40:03 -07002278 ChangeCollector(foreground_collector_type_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -08002279 // Save the old space so that we can remove it after we complete creating the zygote space.
2280 space::MallocSpace* old_alloc_space = non_moving_space_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002281 // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
Mathieu Chartiera1602f22014-01-13 17:19:19 -08002282 // the remaining available space.
2283 // Remove the old space before creating the zygote space since creating the zygote space sets
Mathieu Chartier2cebb242015-04-21 16:50:40 -07002284 // the old alloc space's bitmaps to null.
Mathieu Chartiera1602f22014-01-13 17:19:19 -08002285 RemoveSpace(old_alloc_space);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002286 if (collector::SemiSpace::kUseRememberedSet) {
2287 // Sanity bound check.
2288 FindRememberedSetFromSpace(old_alloc_space)->AssertAllDirtyCardsAreWithinSpace();
2289 // Remove the remembered set for the now zygote space (the old
2290 // non-moving space). Note now that we have compacted objects into
2291 // the zygote space, the data in the remembered set is no longer
2292 // needed. The zygote space will instead have a mod-union table
2293 // from this point on.
2294 RemoveRememberedSet(old_alloc_space);
2295 }
Mathieu Chartier7247af52014-11-19 10:51:42 -08002296 // Remaining space becomes the new non moving space.
2297 zygote_space_ = old_alloc_space->CreateZygoteSpace(kNonMovingSpaceName, low_memory_mode_,
Mathieu Chartiere4cab172014-08-19 18:24:04 -07002298 &non_moving_space_);
Mathieu Chartierb363f662014-07-16 13:28:58 -07002299 CHECK(!non_moving_space_->CanMoveObjects());
2300 if (same_space) {
2301 main_space_ = non_moving_space_;
2302 SetSpaceAsDefault(main_space_);
2303 }
Mathieu Chartiera1602f22014-01-13 17:19:19 -08002304 delete old_alloc_space;
Mathieu Chartiere4cab172014-08-19 18:24:04 -07002305 CHECK(HasZygoteSpace()) << "Failed creating zygote space";
2306 AddSpace(zygote_space_);
Mathieu Chartier31f44142014-04-08 14:40:03 -07002307 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
2308 AddSpace(non_moving_space_);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002309 // Create the zygote space mod union table.
2310 accounting::ModUnionTable* mod_union_table =
Mathieu Chartiere4cab172014-08-19 18:24:04 -07002311 new accounting::ModUnionTableCardCache("zygote space mod-union table", this,
2312 zygote_space_);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002313 CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
Mathieu Chartiere4cab172014-08-19 18:24:04 -07002314 // Set all the cards in the mod-union table since we don't know which objects contain references
2315 // to large objects.
2316 mod_union_table->SetCards();
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002317 AddModUnionTable(mod_union_table);
Mathieu Chartierf6c2a272015-06-03 17:32:42 -07002318 large_object_space_->SetAllLargeObjectsAsZygoteObjects(self);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002319 if (collector::SemiSpace::kUseRememberedSet) {
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002320 // Add a new remembered set for the post-zygote non-moving space.
2321 accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
2322 new accounting::RememberedSet("Post-zygote non-moving space remembered set", this,
2323 non_moving_space_);
2324 CHECK(post_zygote_non_moving_space_rem_set != nullptr)
2325 << "Failed to create post-zygote non-moving space remembered set";
2326 AddRememberedSet(post_zygote_non_moving_space_rem_set);
2327 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07002328}
2329
Mathieu Chartier357e9be2012-08-01 11:00:14 -07002330void Heap::FlushAllocStack() {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002331 MarkAllocStackAsLive(allocation_stack_.get());
Mathieu Chartier357e9be2012-08-01 11:00:14 -07002332 allocation_stack_->Reset();
2333}
2334
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07002335void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
2336 accounting::ContinuousSpaceBitmap* bitmap2,
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07002337 accounting::LargeObjectBitmap* large_objects,
Ian Rogers1d54e732013-05-02 21:10:01 -07002338 accounting::ObjectStack* stack) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002339 DCHECK(bitmap1 != nullptr);
2340 DCHECK(bitmap2 != nullptr);
Mathieu Chartiercb535da2015-01-23 13:50:03 -08002341 const auto* limit = stack->End();
2342 for (auto* it = stack->Begin(); it != limit; ++it) {
2343 const mirror::Object* obj = it->AsMirrorPtr();
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002344 if (!kUseThreadLocalAllocationStack || obj != nullptr) {
2345 if (bitmap1->HasAddress(obj)) {
2346 bitmap1->Set(obj);
2347 } else if (bitmap2->HasAddress(obj)) {
2348 bitmap2->Set(obj);
2349 } else {
Mathieu Chartier2dbe6272014-09-16 10:43:23 -07002350 DCHECK(large_objects != nullptr);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002351 large_objects->Set(obj);
2352 }
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07002353 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07002354 }
2355}
2356
Mathieu Chartier590fee92013-09-13 13:46:47 -07002357void Heap::SwapSemiSpaces() {
Mathieu Chartier31f44142014-04-08 14:40:03 -07002358 CHECK(bump_pointer_space_ != nullptr);
2359 CHECK(temp_space_ != nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002360 std::swap(bump_pointer_space_, temp_space_);
2361}
2362
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08002363collector::GarbageCollector* Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
2364 space::ContinuousMemMapAllocSpace* source_space,
2365 GcCause gc_cause) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002366 CHECK(kMovingCollector);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002367 if (target_space != source_space) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002368 // Don't swap spaces since this isn't a typical semi space collection.
2369 semi_space_collector_->SetSwapSemiSpaces(false);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002370 semi_space_collector_->SetFromSpace(source_space);
2371 semi_space_collector_->SetToSpace(target_space);
Zuo Wangf37a88b2014-07-10 04:26:41 -07002372 semi_space_collector_->Run(gc_cause, false);
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08002373 return semi_space_collector_;
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002374 } else {
2375 CHECK(target_space->IsBumpPointerSpace())
2376 << "In-place compaction is only supported for bump pointer spaces";
2377 mark_compact_collector_->SetSpace(target_space->AsBumpPointerSpace());
2378 mark_compact_collector_->Run(kGcCauseCollectorTransition, false);
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08002379 return mark_compact_collector_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002380 }
2381}
Anwar Ghuloum67f99412013-08-12 14:19:48 -07002382
Ian Rogers1d54e732013-05-02 21:10:01 -07002383collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause,
2384 bool clear_soft_references) {
Ian Rogers81d425b2012-09-27 16:03:43 -07002385 Thread* self = Thread::Current();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002386 Runtime* runtime = Runtime::Current();
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002387 // If the heap can't run the GC, silently fail and return that no GC was run.
2388 switch (gc_type) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002389 case collector::kGcTypePartial: {
Mathieu Chartiere4cab172014-08-19 18:24:04 -07002390 if (!HasZygoteSpace()) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002391 return collector::kGcTypeNone;
2392 }
2393 break;
2394 }
2395 default: {
2396 // Other GC types don't have any special cases which makes them not runnable. The main case
2397 // here is full GC.
2398 }
2399 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08002400 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
Ian Rogers81d425b2012-09-27 16:03:43 -07002401 Locks::mutator_lock_->AssertNotHeld(self);
Ian Rogers120f1c72012-09-28 17:17:10 -07002402 if (self->IsHandlingStackOverflow()) {
Mathieu Chartier50c138f2015-01-07 16:00:03 -08002403 // If we are throwing a stack overflow error we probably don't have enough remaining stack
2404 // space to run the GC.
2405 return collector::kGcTypeNone;
Ian Rogers120f1c72012-09-28 17:17:10 -07002406 }
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002407 bool compacting_gc;
2408 {
2409 gc_complete_lock_->AssertNotHeld(self);
Andreas Gampe277ccbd2014-11-03 21:36:10 -08002410 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002411 MutexLock mu(self, *gc_complete_lock_);
2412 // Ensure there is only one GC at a time.
Mathieu Chartier89a201e2014-05-02 10:27:26 -07002413 WaitForGcToCompleteLocked(gc_cause, self);
Mathieu Chartier31f44142014-04-08 14:40:03 -07002414 compacting_gc = IsMovingGc(collector_type_);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002415 // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
2416 if (compacting_gc && disable_moving_gc_count_ != 0) {
2417 LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
2418 return collector::kGcTypeNone;
2419 }
2420 collector_type_running_ = collector_type_;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002421 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07002422 if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
2423 ++runtime->GetStats()->gc_for_alloc_count;
2424 ++self->GetStats()->gc_for_alloc_count;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002425 }
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08002426 const uint64_t bytes_allocated_before_gc = GetBytesAllocated();
2427 // Approximate heap size.
2428 ATRACE_INT("Heap size (KB)", bytes_allocated_before_gc / KB);
Mathieu Chartier65db8802012-11-20 12:36:46 -08002429
Ian Rogers1d54e732013-05-02 21:10:01 -07002430 DCHECK_LT(gc_type, collector::kGcTypeMax);
2431 DCHECK_NE(gc_type, collector::kGcTypeNone);
Anwar Ghuloum67f99412013-08-12 14:19:48 -07002432
Mathieu Chartier590fee92013-09-13 13:46:47 -07002433 collector::GarbageCollector* collector = nullptr;
Mathieu Chartier50482232013-11-21 11:48:14 -08002434 // TODO: Clean this up.
Mathieu Chartier1d27b342014-01-28 12:51:09 -08002435 if (compacting_gc) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -08002436 DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002437 current_allocator_ == kAllocatorTypeTLAB ||
2438 current_allocator_ == kAllocatorTypeRegion ||
2439 current_allocator_ == kAllocatorTypeRegionTLAB);
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002440 switch (collector_type_) {
2441 case kCollectorTypeSS:
2442 // Fall-through.
2443 case kCollectorTypeGSS:
2444 semi_space_collector_->SetFromSpace(bump_pointer_space_);
2445 semi_space_collector_->SetToSpace(temp_space_);
2446 semi_space_collector_->SetSwapSemiSpaces(true);
2447 collector = semi_space_collector_;
2448 break;
2449 case kCollectorTypeCC:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002450 concurrent_copying_collector_->SetRegionSpace(region_space_);
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002451 collector = concurrent_copying_collector_;
2452 break;
2453 case kCollectorTypeMC:
2454 mark_compact_collector_->SetSpace(bump_pointer_space_);
2455 collector = mark_compact_collector_;
2456 break;
2457 default:
2458 LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07002459 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002460 if (collector != mark_compact_collector_ && collector != concurrent_copying_collector_) {
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002461 temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2462 CHECK(temp_space_->IsEmpty());
2463 }
2464 gc_type = collector::kGcTypeFull; // TODO: Not hard code this in.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002465 } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
2466 current_allocator_ == kAllocatorTypeDlMalloc) {
Mathieu Chartierafe49982014-03-27 10:55:04 -07002467 collector = FindCollectorByGcType(gc_type);
Mathieu Chartier50482232013-11-21 11:48:14 -08002468 } else {
2469 LOG(FATAL) << "Invalid current allocator " << current_allocator_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002470 }
Mathieu Chartier08cef222014-10-22 17:18:34 -07002471 if (IsGcConcurrent()) {
2472 // Disable concurrent GC check so that we don't have spammy JNI requests.
2473 // This gets recalculated in GrowForUtilization. It is important that it is disabled /
2474 // calculated in the same thread so that there aren't any races that can cause it to become
2475 // permanantly disabled. b/17942071
2476 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2477 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002478 CHECK(collector != nullptr)
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07002479 << "Could not find garbage collector with collector_type="
2480 << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002481 collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002482 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2483 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
Mathieu Chartiera5eae692014-12-17 17:56:03 -08002484 RequestTrim(self);
Mathieu Chartier39e32612013-11-12 16:28:05 -08002485 // Enqueue cleared references.
Mathieu Chartier3cf22532015-07-09 15:15:09 -07002486 reference_processor_->EnqueueClearedReferences(self);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002487 // Grow the heap so that we know when to perform the next GC.
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08002488 GrowForUtilization(collector, bytes_allocated_before_gc);
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08002489 LogGC(gc_cause, collector);
2490 FinishGC(self, gc_type);
2491 // Inform DDMS that a GC completed.
2492 Dbg::GcDidFinish();
2493 return gc_type;
2494}
2495
2496void Heap::LogGC(GcCause gc_cause, collector::GarbageCollector* collector) {
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002497 const size_t duration = GetCurrentGcIteration()->GetDurationNs();
2498 const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002499 // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002500 // (mutator time blocked >= long_pause_log_threshold_).
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002501 bool log_gc = gc_cause == kGcCauseExplicit;
2502 if (!log_gc && CareAboutPauseTimes()) {
Mathieu Chartiere53225c2013-08-19 10:59:11 -07002503 // GC for alloc pauses the allocating thread, so consider it as a pause.
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002504 log_gc = duration > long_gc_log_threshold_ ||
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002505 (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002506 for (uint64_t pause : pause_times) {
2507 log_gc = log_gc || pause >= long_pause_log_threshold_;
Mathieu Chartiere53225c2013-08-19 10:59:11 -07002508 }
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002509 }
2510 if (log_gc) {
2511 const size_t percent_free = GetPercentFree();
2512 const size_t current_heap_size = GetBytesAllocated();
2513 const size_t total_memory = GetTotalMemory();
2514 std::ostringstream pause_string;
2515 for (size_t i = 0; i < pause_times.size(); ++i) {
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08002516 pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
2517 << ((i != pause_times.size() - 1) ? "," : "");
Mathieu Chartiere53225c2013-08-19 10:59:11 -07002518 }
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002519 LOG(INFO) << gc_cause << " " << collector->GetName()
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002520 << " GC freed " << current_gc_iteration_.GetFreedObjects() << "("
2521 << PrettySize(current_gc_iteration_.GetFreedBytes()) << ") AllocSpace objects, "
2522 << current_gc_iteration_.GetFreedLargeObjects() << "("
2523 << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, "
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002524 << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
2525 << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
2526 << " total " << PrettyDuration((duration / 1000) * 1000);
Ian Rogersc7dd2952014-10-21 23:31:19 -07002527 VLOG(heap) << Dumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002528 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002529}
Mathieu Chartiera6399032012-06-11 18:49:50 -07002530
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002531void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
2532 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002533 collector_type_running_ = kCollectorTypeNone;
2534 if (gc_type != collector::kGcTypeNone) {
2535 last_gc_type_ = gc_type;
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07002536
2537 // Update stats.
2538 ++gc_count_last_window_;
2539 if (running_collection_is_blocking_) {
2540 // If the currently running collection was a blocking one,
2541 // increment the counters and reset the flag.
2542 ++blocking_gc_count_;
2543 blocking_gc_time_ += GetCurrentGcIteration()->GetDurationNs();
2544 ++blocking_gc_count_last_window_;
2545 }
2546 // Update the gc count rate histograms if due.
2547 UpdateGcCountRateHistograms();
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002548 }
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07002549 // Reset.
2550 running_collection_is_blocking_ = false;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002551 // Wake anyone who may have been waiting for the GC to complete.
2552 gc_complete_cond_->Broadcast(self);
2553}
2554
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07002555void Heap::UpdateGcCountRateHistograms() {
2556 // Invariant: if the time since the last update includes more than
2557 // one windows, all the GC runs (if > 0) must have happened in first
2558 // window because otherwise the update must have already taken place
2559 // at an earlier GC run. So, we report the non-first windows with
2560 // zero counts to the histograms.
2561 DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2562 uint64_t now = NanoTime();
2563 DCHECK_GE(now, last_update_time_gc_count_rate_histograms_);
2564 uint64_t time_since_last_update = now - last_update_time_gc_count_rate_histograms_;
2565 uint64_t num_of_windows = time_since_last_update / kGcCountRateHistogramWindowDuration;
2566 if (time_since_last_update >= kGcCountRateHistogramWindowDuration) {
2567 // Record the first window.
2568 gc_count_rate_histogram_.AddValue(gc_count_last_window_ - 1); // Exclude the current run.
2569 blocking_gc_count_rate_histogram_.AddValue(running_collection_is_blocking_ ?
2570 blocking_gc_count_last_window_ - 1 : blocking_gc_count_last_window_);
2571 // Record the other windows (with zero counts).
2572 for (uint64_t i = 0; i < num_of_windows - 1; ++i) {
2573 gc_count_rate_histogram_.AddValue(0);
2574 blocking_gc_count_rate_histogram_.AddValue(0);
2575 }
2576 // Update the last update time and reset the counters.
2577 last_update_time_gc_count_rate_histograms_ =
2578 (now / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
2579 gc_count_last_window_ = 1; // Include the current run.
2580 blocking_gc_count_last_window_ = running_collection_is_blocking_ ? 1 : 0;
2581 }
2582 DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2583}
2584
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002585class RootMatchesObjectVisitor : public SingleRootVisitor {
2586 public:
2587 explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
2588
2589 void VisitRoot(mirror::Object* root, const RootInfo& info)
Mathieu Chartier90443472015-07-16 20:32:27 -07002590 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002591 if (root == obj_) {
2592 LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
2593 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002594 }
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002595
2596 private:
2597 const mirror::Object* const obj_;
2598};
2599
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002600
2601class ScanVisitor {
2602 public:
Brian Carlstromdf629502013-07-17 22:39:56 -07002603 void operator()(const mirror::Object* obj) const {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002604 LOG(ERROR) << "Would have rescanned object " << obj;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002605 }
2606};
2607
Ian Rogers1d54e732013-05-02 21:10:01 -07002608// Verify a reference from an object.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002609class VerifyReferenceVisitor : public SingleRootVisitor {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002610 public:
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002611 explicit VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
Mathieu Chartier90443472015-07-16 20:32:27 -07002612 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002613 : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
Ian Rogers1d54e732013-05-02 21:10:01 -07002614
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002615 size_t GetFailureCount() const {
Mathieu Chartiere9e55ac2014-05-21 17:48:25 -07002616 return fail_count_->LoadSequentiallyConsistent();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002617 }
2618
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002619 void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -07002620 SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07002621 if (verify_referent_) {
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002622 VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset());
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07002623 }
Mathieu Chartier407f7022014-02-18 14:37:05 -08002624 }
2625
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002626 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -07002627 SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002628 VerifyReference(obj, obj->GetFieldObject<mirror::Object>(offset), offset);
Mathieu Chartier407f7022014-02-18 14:37:05 -08002629 }
2630
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002631 bool IsLive(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
2632 return heap_->IsLiveObjectLocked(obj, true, false, true);
2633 }
2634
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002635 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
2636 SHARED_REQUIRES(Locks::mutator_lock_) {
2637 if (!root->IsNull()) {
2638 VisitRoot(root);
2639 }
2640 }
2641 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
2642 SHARED_REQUIRES(Locks::mutator_lock_) {
2643 const_cast<VerifyReferenceVisitor*>(this)->VisitRoot(
2644 root->AsMirrorPtr(), RootInfo(kRootVMInternal));
2645 }
2646
2647 virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE
Mathieu Chartier90443472015-07-16 20:32:27 -07002648 SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002649 if (root == nullptr) {
2650 LOG(ERROR) << "Root is null with info " << root_info.GetType();
2651 } else if (!VerifyReference(nullptr, root, MemberOffset(0))) {
2652 LOG(ERROR) << "Root " << root << " is dead with type " << PrettyTypeOf(root)
Mathieu Chartiere34fa1d2015-01-14 14:55:47 -08002653 << " thread_id= " << root_info.GetThreadId() << " root_type= " << root_info.GetType();
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002654 }
2655 }
2656
2657 private:
Mathieu Chartier407f7022014-02-18 14:37:05 -08002658 // TODO: Fix the no thread safety analysis.
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002659 // Returns false on failure.
2660 bool VerifyReference(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002661 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002662 if (ref == nullptr || IsLive(ref)) {
2663 // Verify that the reference is live.
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002664 return true;
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002665 }
Mathieu Chartiere9e55ac2014-05-21 17:48:25 -07002666 if (fail_count_->FetchAndAddSequentiallyConsistent(1) == 0) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002667 // Print message on only on first failure to prevent spam.
2668 LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002669 }
2670 if (obj != nullptr) {
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002671 // Only do this part for non roots.
Ian Rogers1d54e732013-05-02 21:10:01 -07002672 accounting::CardTable* card_table = heap_->GetCardTable();
2673 accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
2674 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
Ian Rogers13735952014-10-08 12:43:28 -07002675 uint8_t* card_addr = card_table->CardFromAddr(obj);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002676 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
2677 << offset << "\n card value = " << static_cast<int>(*card_addr);
2678 if (heap_->IsValidObjectAddress(obj->GetClass())) {
2679 LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
2680 } else {
2681 LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002682 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002683
Mathieu Chartierb363f662014-07-16 13:28:58 -07002684 // Attempt to find the class inside of the recently freed objects.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002685 space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
2686 if (ref_space != nullptr && ref_space->IsMallocSpace()) {
2687 space::MallocSpace* space = ref_space->AsMallocSpace();
2688 mirror::Class* ref_class = space->FindRecentFreedObject(ref);
2689 if (ref_class != nullptr) {
2690 LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
2691 << PrettyClass(ref_class);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002692 } else {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002693 LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002694 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002695 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002696
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002697 if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
2698 ref->GetClass()->IsClass()) {
2699 LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
2700 } else {
2701 LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
2702 << ") is not a valid heap address";
2703 }
2704
Ian Rogers13735952014-10-08 12:43:28 -07002705 card_table->CheckAddrIsInCardTable(reinterpret_cast<const uint8_t*>(obj));
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002706 void* cover_begin = card_table->AddrFromCard(card_addr);
2707 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
2708 accounting::CardTable::kCardSize);
2709 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
2710 << "-" << cover_end;
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07002711 accounting::ContinuousSpaceBitmap* bitmap =
2712 heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002713
2714 if (bitmap == nullptr) {
2715 LOG(ERROR) << "Object " << obj << " has no bitmap";
Mathieu Chartier4e305412014-02-19 10:54:44 -08002716 if (!VerifyClassClass(obj->GetClass())) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002717 LOG(ERROR) << "Object " << obj << " failed class verification!";
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002718 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002719 } else {
Ian Rogers1d54e732013-05-02 21:10:01 -07002720 // Print out how the object is live.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002721 if (bitmap->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002722 LOG(ERROR) << "Object " << obj << " found in live bitmap";
2723 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002724 if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002725 LOG(ERROR) << "Object " << obj << " found in allocation stack";
2726 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002727 if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002728 LOG(ERROR) << "Object " << obj << " found in live stack";
2729 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002730 if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
2731 LOG(ERROR) << "Ref " << ref << " found in allocation stack";
2732 }
2733 if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
2734 LOG(ERROR) << "Ref " << ref << " found in live stack";
2735 }
Ian Rogers1d54e732013-05-02 21:10:01 -07002736 // Attempt to see if the card table missed the reference.
2737 ScanVisitor scan_visitor;
Ian Rogers13735952014-10-08 12:43:28 -07002738 uint8_t* byte_cover_begin = reinterpret_cast<uint8_t*>(card_table->AddrFromCard(card_addr));
Lei Li727b2942015-01-15 11:26:34 +08002739 card_table->Scan<false>(bitmap, byte_cover_begin,
2740 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002741 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002742
2743 // Search to see if any of the roots reference our object.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002744 RootMatchesObjectVisitor visitor1(obj);
2745 Runtime::Current()->VisitRoots(&visitor1);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002746 // Search to see if any of the roots reference our reference.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002747 RootMatchesObjectVisitor visitor2(ref);
2748 Runtime::Current()->VisitRoots(&visitor2);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002749 }
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002750 return false;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002751 }
2752
Ian Rogers1d54e732013-05-02 21:10:01 -07002753 Heap* const heap_;
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002754 Atomic<size_t>* const fail_count_;
2755 const bool verify_referent_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002756};
2757
Ian Rogers1d54e732013-05-02 21:10:01 -07002758// Verify all references within an object, for use with HeapBitmap::Visit.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002759class VerifyObjectVisitor {
2760 public:
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002761 explicit VerifyObjectVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
2762 : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07002763 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002764
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002765 void operator()(mirror::Object* obj)
Mathieu Chartier90443472015-07-16 20:32:27 -07002766 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002767 // Note: we are verifying the references in obj but not obj itself, this is because obj must
2768 // be live or else how did we find it in the live bitmap?
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002769 VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002770 // The class doesn't count as a reference but we should verify it anyways.
Mathieu Chartier407f7022014-02-18 14:37:05 -08002771 obj->VisitReferences<true>(visitor, visitor);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002772 }
2773
Mathieu Chartier590fee92013-09-13 13:46:47 -07002774 static void VisitCallback(mirror::Object* obj, void* arg)
Mathieu Chartier90443472015-07-16 20:32:27 -07002775 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002776 VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
2777 visitor->operator()(obj);
2778 }
2779
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002780 void VerifyRoots() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002781 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2782 VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
2783 Runtime::Current()->VisitRoots(&visitor);
2784 }
2785
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002786 size_t GetFailureCount() const {
Mathieu Chartiere9e55ac2014-05-21 17:48:25 -07002787 return fail_count_->LoadSequentiallyConsistent();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002788 }
2789
2790 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07002791 Heap* const heap_;
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002792 Atomic<size_t>* const fail_count_;
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07002793 const bool verify_referent_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002794};
2795
Mathieu Chartierc1790162014-05-23 10:54:50 -07002796void Heap::PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
2797 // Slow path, the allocation stack push back must have already failed.
2798 DCHECK(!allocation_stack_->AtomicPushBack(*obj));
2799 do {
2800 // TODO: Add handle VerifyObject.
2801 StackHandleScope<1> hs(self);
2802 HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2803 // Push our object into the reserve region of the allocaiton stack. This is only required due
2804 // to heap verification requiring that roots are live (either in the live bitmap or in the
2805 // allocation stack).
2806 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
2807 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
2808 } while (!allocation_stack_->AtomicPushBack(*obj));
2809}
2810
2811void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
2812 // Slow path, the allocation stack push back must have already failed.
2813 DCHECK(!self->PushOnThreadLocalAllocationStack(*obj));
Mathieu Chartiercb535da2015-01-23 13:50:03 -08002814 StackReference<mirror::Object>* start_address;
2815 StackReference<mirror::Object>* end_address;
Mathieu Chartierc1790162014-05-23 10:54:50 -07002816 while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
2817 &end_address)) {
2818 // TODO: Add handle VerifyObject.
2819 StackHandleScope<1> hs(self);
2820 HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2821 // Push our object into the reserve region of the allocaiton stack. This is only required due
2822 // to heap verification requiring that roots are live (either in the live bitmap or in the
2823 // allocation stack).
2824 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
2825 // Push into the reserve allocation stack.
2826 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
2827 }
2828 self->SetThreadLocalAllocationStack(start_address, end_address);
2829 // Retry on the new thread-local allocation stack.
2830 CHECK(self->PushOnThreadLocalAllocationStack(*obj)); // Must succeed.
2831}
2832
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002833// Must do this with mutators suspended since we are directly accessing the allocation stacks.
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002834size_t Heap::VerifyHeapReferences(bool verify_referents) {
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08002835 Thread* self = Thread::Current();
2836 Locks::mutator_lock_->AssertExclusiveHeld(self);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002837 // Lets sort our allocation stacks so that we can efficiently binary search them.
Ian Rogers1d54e732013-05-02 21:10:01 -07002838 allocation_stack_->Sort();
2839 live_stack_->Sort();
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08002840 // Since we sorted the allocation stack content, need to revoke all
2841 // thread-local allocation stacks.
2842 RevokeAllThreadLocalAllocationStacks(self);
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002843 Atomic<size_t> fail_count_(0);
2844 VerifyObjectVisitor visitor(this, &fail_count_, verify_referents);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002845 // Verify objects in the allocation stack since these will be objects which were:
2846 // 1. Allocated prior to the GC (pre GC verification).
2847 // 2. Allocated during the GC (pre sweep GC verification).
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002848 // We don't want to verify the objects in the live stack since they themselves may be
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002849 // pointing to dead objects if they are not reachable.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002850 VisitObjectsPaused(VerifyObjectVisitor::VisitCallback, &visitor);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002851 // Verify the roots:
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002852 visitor.VerifyRoots();
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002853 if (visitor.GetFailureCount() > 0) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002854 // Dump mod-union tables.
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002855 for (const auto& table_pair : mod_union_tables_) {
2856 accounting::ModUnionTable* mod_union_table = table_pair.second;
2857 mod_union_table->Dump(LOG(ERROR) << mod_union_table->GetName() << ": ");
2858 }
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002859 // Dump remembered sets.
2860 for (const auto& table_pair : remembered_sets_) {
2861 accounting::RememberedSet* remembered_set = table_pair.second;
2862 remembered_set->Dump(LOG(ERROR) << remembered_set->GetName() << ": ");
2863 }
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -07002864 DumpSpaces(LOG(ERROR));
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002865 }
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002866 return visitor.GetFailureCount();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002867}
2868
2869class VerifyReferenceCardVisitor {
2870 public:
2871 VerifyReferenceCardVisitor(Heap* heap, bool* failed)
Mathieu Chartier90443472015-07-16 20:32:27 -07002872 SHARED_REQUIRES(Locks::mutator_lock_,
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002873 Locks::heap_bitmap_lock_)
Ian Rogers1d54e732013-05-02 21:10:01 -07002874 : heap_(heap), failed_(failed) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002875 }
2876
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002877 // There is no card marks for native roots on a class.
2878 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
2879 const {}
2880 void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
2881
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002882 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
2883 // annotalysis on visitors.
Mathieu Chartier407f7022014-02-18 14:37:05 -08002884 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
2885 NO_THREAD_SAFETY_ANALYSIS {
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07002886 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002887 // Filter out class references since changing an object's class does not mark the card as dirty.
2888 // Also handles large objects, since the only reference they hold is a class reference.
Mathieu Chartier407f7022014-02-18 14:37:05 -08002889 if (ref != nullptr && !ref->IsClass()) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002890 accounting::CardTable* card_table = heap_->GetCardTable();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002891 // If the object is not dirty and it is referencing something in the live stack other than
2892 // class, then it must be on a dirty card.
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07002893 if (!card_table->AddrIsInCardTable(obj)) {
2894 LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
2895 *failed_ = true;
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002896 } else if (!card_table->IsDirty(obj)) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002897 // TODO: Check mod-union tables.
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002898 // Card should be either kCardDirty if it got re-dirtied after we aged it, or
2899 // kCardDirty - 1 if it didnt get touched since we aged it.
Ian Rogers1d54e732013-05-02 21:10:01 -07002900 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
Mathieu Chartier407f7022014-02-18 14:37:05 -08002901 if (live_stack->ContainsSorted(ref)) {
2902 if (live_stack->ContainsSorted(obj)) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002903 LOG(ERROR) << "Object " << obj << " found in live stack";
2904 }
2905 if (heap_->GetLiveBitmap()->Test(obj)) {
2906 LOG(ERROR) << "Object " << obj << " found in live bitmap";
2907 }
2908 LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj)
2909 << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack";
2910
2911 // Print which field of the object is dead.
2912 if (!obj->IsObjectArray()) {
Ian Rogersef7d42f2014-01-06 12:55:46 -08002913 mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
Mathieu Chartierc7853442015-03-27 14:35:38 -07002914 CHECK(klass != nullptr);
Mathieu Chartierc0fe56a2015-08-11 13:01:23 -07002915 for (ArtField& field : (is_static ? klass->GetSFields() : klass->GetIFields())) {
Mathieu Chartier54d220e2015-07-30 16:20:06 -07002916 if (field.GetOffset().Int32Value() == offset.Int32Value()) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002917 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
Mathieu Chartier54d220e2015-07-30 16:20:06 -07002918 << PrettyField(&field);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002919 break;
2920 }
2921 }
2922 } else {
Ian Rogersef7d42f2014-01-06 12:55:46 -08002923 mirror::ObjectArray<mirror::Object>* object_array =
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002924 obj->AsObjectArray<mirror::Object>();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002925 for (int32_t i = 0; i < object_array->GetLength(); ++i) {
2926 if (object_array->Get(i) == ref) {
2927 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
2928 }
2929 }
2930 }
2931
2932 *failed_ = true;
2933 }
2934 }
2935 }
2936 }
2937
2938 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07002939 Heap* const heap_;
2940 bool* const failed_;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002941};
2942
2943class VerifyLiveStackReferences {
2944 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07002945 explicit VerifyLiveStackReferences(Heap* heap)
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002946 : heap_(heap),
Brian Carlstrom93ba8932013-07-17 21:31:49 -07002947 failed_(false) {}
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002948
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002949 void operator()(mirror::Object* obj) const
Mathieu Chartier90443472015-07-16 20:32:27 -07002950 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002951 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07002952 obj->VisitReferences<true>(visitor, VoidFunctor());
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002953 }
2954
2955 bool Failed() const {
2956 return failed_;
2957 }
2958
2959 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07002960 Heap* const heap_;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002961 bool failed_;
2962};
2963
2964bool Heap::VerifyMissingCardMarks() {
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08002965 Thread* self = Thread::Current();
2966 Locks::mutator_lock_->AssertExclusiveHeld(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002967 // We need to sort the live stack since we binary search it.
Ian Rogers1d54e732013-05-02 21:10:01 -07002968 live_stack_->Sort();
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08002969 // Since we sorted the allocation stack content, need to revoke all
2970 // thread-local allocation stacks.
2971 RevokeAllThreadLocalAllocationStacks(self);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002972 VerifyLiveStackReferences visitor(this);
2973 GetLiveBitmap()->Visit(visitor);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002974 // We can verify objects in the live stack since none of these should reference dead objects.
Mathieu Chartiercb535da2015-01-23 13:50:03 -08002975 for (auto* it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
2976 if (!kUseThreadLocalAllocationStack || it->AsMirrorPtr() != nullptr) {
2977 visitor(it->AsMirrorPtr());
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002978 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002979 }
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -07002980 return !visitor.Failed();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002981}
2982
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002983void Heap::SwapStacks(Thread* self) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07002984 UNUSED(self);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002985 if (kUseThreadLocalAllocationStack) {
2986 live_stack_->AssertAllZero();
2987 }
Mathieu Chartierd22d5482012-11-06 17:14:12 -08002988 allocation_stack_.swap(live_stack_);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002989}
2990
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002991void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002992 // This must be called only during the pause.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002993 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002994 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
2995 MutexLock mu2(self, *Locks::thread_list_lock_);
2996 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
2997 for (Thread* t : thread_list) {
2998 t->RevokeThreadLocalAllocationStack();
2999 }
3000}
3001
Ian Rogers68d8b422014-07-17 11:09:10 -07003002void Heap::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
3003 if (kIsDebugBuild) {
3004 if (rosalloc_space_ != nullptr) {
3005 rosalloc_space_->AssertThreadLocalBuffersAreRevoked(thread);
3006 }
3007 if (bump_pointer_space_ != nullptr) {
3008 bump_pointer_space_->AssertThreadLocalBuffersAreRevoked(thread);
3009 }
3010 }
3011}
3012
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07003013void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
3014 if (kIsDebugBuild) {
3015 if (bump_pointer_space_ != nullptr) {
3016 bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
3017 }
3018 }
3019}
3020
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003021accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
3022 auto it = mod_union_tables_.find(space);
3023 if (it == mod_union_tables_.end()) {
3024 return nullptr;
3025 }
3026 return it->second;
3027}
3028
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003029accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) {
3030 auto it = remembered_sets_.find(space);
3031 if (it == remembered_sets_.end()) {
3032 return nullptr;
3033 }
3034 return it->second;
3035}
3036
Lei Li4add3b42015-01-15 11:55:26 +08003037void Heap::ProcessCards(TimingLogger* timings, bool use_rem_sets, bool process_alloc_space_cards,
3038 bool clear_alloc_space_cards) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07003039 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
Ian Rogers1d54e732013-05-02 21:10:01 -07003040 // Clear cards and keep track of cards cleared in the mod-union table.
Mathieu Chartier02e25112013-08-14 16:14:24 -07003041 for (const auto& space : continuous_spaces_) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003042 accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003043 accounting::RememberedSet* rem_set = FindRememberedSetFromSpace(space);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003044 if (table != nullptr) {
3045 const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
3046 "ImageModUnionClearCards";
Andreas Gampe277ccbd2014-11-03 21:36:10 -08003047 TimingLogger::ScopedTiming t2(name, timings);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003048 table->ClearCards();
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003049 } else if (use_rem_sets && rem_set != nullptr) {
3050 DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
3051 << static_cast<int>(collector_type_);
Andreas Gampe277ccbd2014-11-03 21:36:10 -08003052 TimingLogger::ScopedTiming t2("AllocSpaceRemSetClearCards", timings);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003053 rem_set->ClearCards();
Lei Li4add3b42015-01-15 11:55:26 +08003054 } else if (process_alloc_space_cards) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08003055 TimingLogger::ScopedTiming t2("AllocSpaceClearCards", timings);
Lei Li4add3b42015-01-15 11:55:26 +08003056 if (clear_alloc_space_cards) {
3057 card_table_->ClearCardRange(space->Begin(), space->End());
3058 } else {
3059 // No mod union table for the AllocSpace. Age the cards so that the GC knows that these
3060 // cards were dirty before the GC started.
3061 // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
3062 // -> clean(cleaning thread).
3063 // The races are we either end up with: Aged card, unaged card. Since we have the
3064 // checkpoint roots and then we scan / update mod union tables after. We will always
3065 // scan either card. If we end up with the non aged card, we scan it it in the pause.
3066 card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(),
3067 VoidFunctor());
3068 }
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07003069 }
3070 }
3071}
3072
Mathieu Chartier97509952015-07-13 14:35:43 -07003073struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor {
3074 virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE {
3075 return obj;
3076 }
3077 virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>*) OVERRIDE {
3078 }
3079};
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003080
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003081void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
3082 Thread* const self = Thread::Current();
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07003083 TimingLogger* const timings = current_gc_iteration_.GetTimings();
Mathieu Chartierf5997b42014-06-20 10:37:54 -07003084 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003085 if (verify_pre_gc_heap_) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08003086 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyHeapReferences", timings);
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07003087 size_t failures = VerifyHeapReferences();
3088 if (failures > 0) {
3089 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3090 << " failures";
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003091 }
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08003092 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003093 // Check that all objects which reference things in the live stack are on dirty cards.
3094 if (verify_missing_card_marks_) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08003095 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyMissingCardMarks", timings);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003096 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
3097 SwapStacks(self);
3098 // Sort the live stack so that we can quickly binary search it later.
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -07003099 CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
3100 << " missing card mark verification failed\n" << DumpSpaces();
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003101 SwapStacks(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003102 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003103 if (verify_mod_union_table_) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08003104 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyModUnionTables", timings);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003105 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003106 for (const auto& table_pair : mod_union_tables_) {
3107 accounting::ModUnionTable* mod_union_table = table_pair.second;
Mathieu Chartier97509952015-07-13 14:35:43 -07003108 IdentityMarkHeapReferenceVisitor visitor;
3109 mod_union_table->UpdateAndMarkReferences(&visitor);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003110 mod_union_table->Verify();
3111 }
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003112 }
3113}
3114
3115void Heap::PreGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier0651d412014-04-29 14:37:57 -07003116 if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003117 collector::GarbageCollector::ScopedPause pause(gc);
3118 PreGcVerificationPaused(gc);
3119 }
3120}
3121
3122void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07003123 UNUSED(gc);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003124 // TODO: Add a new runtime option for this?
3125 if (verify_pre_gc_rosalloc_) {
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07003126 RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003127 }
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08003128}
3129
Ian Rogers1d54e732013-05-02 21:10:01 -07003130void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003131 Thread* const self = Thread::Current();
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07003132 TimingLogger* const timings = current_gc_iteration_.GetTimings();
Mathieu Chartierf5997b42014-06-20 10:37:54 -07003133 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003134 // Called before sweeping occurs since we want to make sure we are not going so reclaim any
3135 // reachable objects.
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003136 if (verify_pre_sweeping_heap_) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08003137 TimingLogger::ScopedTiming t2("(Paused)PostSweepingVerifyHeapReferences", timings);
Ian Rogers1d54e732013-05-02 21:10:01 -07003138 CHECK_NE(self->GetState(), kRunnable);
Hiroshi Yamauchi0c8c3032015-01-16 16:54:35 -08003139 {
3140 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3141 // Swapping bound bitmaps does nothing.
3142 gc->SwapBitmaps();
3143 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07003144 // Pass in false since concurrent reference processing can mean that the reference referents
3145 // may point to dead objects at the point which PreSweepingGcVerification is called.
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07003146 size_t failures = VerifyHeapReferences(false);
3147 if (failures > 0) {
3148 LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed with " << failures
3149 << " failures";
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003150 }
Hiroshi Yamauchi0c8c3032015-01-16 16:54:35 -08003151 {
3152 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3153 gc->SwapBitmaps();
3154 }
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003155 }
3156 if (verify_pre_sweeping_rosalloc_) {
3157 RosAllocVerification(timings, "PreSweepingRosAllocVerification");
3158 }
3159}
3160
3161void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
3162 // Only pause if we have to do some verification.
3163 Thread* const self = Thread::Current();
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07003164 TimingLogger* const timings = GetCurrentGcIteration()->GetTimings();
Mathieu Chartierf5997b42014-06-20 10:37:54 -07003165 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003166 if (verify_system_weaks_) {
3167 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
3168 collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
3169 mark_sweep->VerifySystemWeaks();
3170 }
3171 if (verify_post_gc_rosalloc_) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07003172 RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification");
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003173 }
3174 if (verify_post_gc_heap_) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08003175 TimingLogger::ScopedTiming t2("(Paused)PostGcVerifyHeapReferences", timings);
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07003176 size_t failures = VerifyHeapReferences();
3177 if (failures > 0) {
3178 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3179 << " failures";
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003180 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003181 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003182}
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003183
Ian Rogers1d54e732013-05-02 21:10:01 -07003184void Heap::PostGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003185 if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
3186 collector::GarbageCollector::ScopedPause pause(gc);
Mathieu Chartierd35326f2014-08-18 15:02:59 -07003187 PostGcVerificationPaused(gc);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003188 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07003189}
3190
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003191void Heap::RosAllocVerification(TimingLogger* timings, const char* name) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07003192 TimingLogger::ScopedTiming t(name, timings);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003193 for (const auto& space : continuous_spaces_) {
3194 if (space->IsRosAllocSpace()) {
3195 VLOG(heap) << name << " : " << space->GetName();
3196 space->AsRosAllocSpace()->Verify();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08003197 }
3198 }
3199}
3200
Mathieu Chartier89a201e2014-05-02 10:27:26 -07003201collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) {
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08003202 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartier590fee92013-09-13 13:46:47 -07003203 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartier89a201e2014-05-02 10:27:26 -07003204 return WaitForGcToCompleteLocked(cause, self);
Mathieu Chartier590fee92013-09-13 13:46:47 -07003205}
3206
Mathieu Chartier89a201e2014-05-02 10:27:26 -07003207collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
Ian Rogers1d54e732013-05-02 21:10:01 -07003208 collector::GcType last_gc_type = collector::kGcTypeNone;
Mathieu Chartier590fee92013-09-13 13:46:47 -07003209 uint64_t wait_start = NanoTime();
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08003210 while (collector_type_running_ != kCollectorTypeNone) {
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07003211 if (self != task_processor_->GetRunningThread()) {
3212 // The current thread is about to wait for a currently running
3213 // collection to finish. If the waiting thread is not the heap
3214 // task daemon thread, the currently running collection is
3215 // considered as a blocking GC.
3216 running_collection_is_blocking_ = true;
3217 VLOG(gc) << "Waiting for a blocking GC " << cause;
3218 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07003219 ATRACE_BEGIN("GC: Wait For Completion");
3220 // We must wait, change thread state then sleep on gc_complete_cond_;
3221 gc_complete_cond_->Wait(self);
3222 last_gc_type = last_gc_type_;
Mathieu Chartier752a0e62013-06-27 11:03:27 -07003223 ATRACE_END();
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07003224 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07003225 uint64_t wait_time = NanoTime() - wait_start;
3226 total_wait_time_ += wait_time;
3227 if (wait_time > long_pause_log_threshold_) {
Mathieu Chartier89a201e2014-05-02 10:27:26 -07003228 LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time)
3229 << " for cause " << cause;
Mathieu Chartier590fee92013-09-13 13:46:47 -07003230 }
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07003231 if (self != task_processor_->GetRunningThread()) {
3232 // The current thread is about to run a collection. If the thread
3233 // is not the heap task daemon thread, it's considered as a
3234 // blocking GC (i.e., blocking itself).
3235 running_collection_is_blocking_ = true;
3236 VLOG(gc) << "Starting a blocking GC " << cause;
3237 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07003238 return last_gc_type;
Carl Shapiro69759ea2011-07-21 18:13:35 -07003239}
3240
Elliott Hughesc967f782012-04-16 10:23:15 -07003241void Heap::DumpForSigQuit(std::ostream& os) {
Ian Rogers1d54e732013-05-02 21:10:01 -07003242 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
Mathieu Chartier2fde5332012-09-14 14:51:54 -07003243 << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
Elliott Hughes8b788fe2013-04-17 15:57:01 -07003244 DumpGcPerformanceInfo(os);
Elliott Hughesc967f782012-04-16 10:23:15 -07003245}
3246
3247size_t Heap::GetPercentFree() {
Mathieu Chartierd30e1d62014-06-09 13:25:22 -07003248 return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / max_allowed_footprint_);
Elliott Hughesc967f782012-04-16 10:23:15 -07003249}
3250
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08003251void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07003252 if (max_allowed_footprint > GetMaxMemory()) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003253 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
Mathieu Chartier2fde5332012-09-14 14:51:54 -07003254 << PrettySize(GetMaxMemory());
3255 max_allowed_footprint = GetMaxMemory();
3256 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07003257 max_allowed_footprint_ = max_allowed_footprint;
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07003258}
3259
Mathieu Chartier590fee92013-09-13 13:46:47 -07003260bool Heap::IsMovableObject(const mirror::Object* obj) const {
3261 if (kMovingCollector) {
Mathieu Chartier31f44142014-04-08 14:40:03 -07003262 space::Space* space = FindContinuousSpaceFromObject(obj, true);
3263 if (space != nullptr) {
3264 // TODO: Check large object?
3265 return space->CanMoveObjects();
Mathieu Chartier590fee92013-09-13 13:46:47 -07003266 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07003267 }
3268 return false;
3269}
3270
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003271void Heap::UpdateMaxNativeFootprint() {
Ian Rogers3e5cf302014-05-20 16:40:37 -07003272 size_t native_size = native_bytes_allocated_.LoadRelaxed();
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003273 // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
3274 size_t target_size = native_size / GetTargetHeapUtilization();
3275 if (target_size > native_size + max_free_) {
3276 target_size = native_size + max_free_;
3277 } else if (target_size < native_size + min_free_) {
3278 target_size = native_size + min_free_;
3279 }
Mathieu Chartier8ec31f92014-09-03 10:30:11 -07003280 native_footprint_gc_watermark_ = std::min(growth_limit_, target_size);
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003281}
3282
Mathieu Chartierafe49982014-03-27 10:55:04 -07003283collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
3284 for (const auto& collector : garbage_collectors_) {
3285 if (collector->GetCollectorType() == collector_type_ &&
3286 collector->GetGcType() == gc_type) {
3287 return collector;
3288 }
3289 }
3290 return nullptr;
3291}
3292
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07003293double Heap::HeapGrowthMultiplier() const {
3294 // If we don't care about pause times we are background, so return 1.0.
3295 if (!CareAboutPauseTimes() || IsLowMemoryMode()) {
3296 return 1.0;
3297 }
3298 return foreground_heap_growth_multiplier_;
3299}
3300
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08003301void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
3302 uint64_t bytes_allocated_before_gc) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07003303 // We know what our utilization is at this moment.
3304 // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07003305 const uint64_t bytes_allocated = GetBytesAllocated();
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07003306 uint64_t target_size;
Mathieu Chartierafe49982014-03-27 10:55:04 -07003307 collector::GcType gc_type = collector_ran->GetGcType();
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08003308 const double multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for
3309 // foreground.
3310 const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier);
3311 const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier);
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07003312 if (gc_type != collector::kGcTypeSticky) {
3313 // Grow the heap for non sticky GC.
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08003314 ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07003315 CHECK_GE(delta, 0);
3316 target_size = bytes_allocated + delta * multiplier;
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08003317 target_size = std::min(target_size, bytes_allocated + adjusted_max_free);
3318 target_size = std::max(target_size, bytes_allocated + adjusted_min_free);
Mathieu Chartier590fee92013-09-13 13:46:47 -07003319 native_need_to_run_finalization_ = true;
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07003320 next_gc_type_ = collector::kGcTypeSticky;
3321 } else {
Mathieu Chartierafe49982014-03-27 10:55:04 -07003322 collector::GcType non_sticky_gc_type =
Mathieu Chartiere4cab172014-08-19 18:24:04 -07003323 HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
Mathieu Chartierafe49982014-03-27 10:55:04 -07003324 // Find what the next non sticky collector will be.
3325 collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
3326 // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
3327 // do another sticky collection next.
3328 // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
3329 // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
3330 // if the sticky GC throughput always remained >= the full/partial throughput.
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07003331 if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
Mathieu Chartierafe49982014-03-27 10:55:04 -07003332 non_sticky_collector->GetEstimatedMeanThroughput() &&
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07003333 non_sticky_collector->NumberOfIterations() > 0 &&
Mathieu Chartierafe49982014-03-27 10:55:04 -07003334 bytes_allocated <= max_allowed_footprint_) {
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07003335 next_gc_type_ = collector::kGcTypeSticky;
3336 } else {
Mathieu Chartierafe49982014-03-27 10:55:04 -07003337 next_gc_type_ = non_sticky_gc_type;
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07003338 }
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07003339 // If we have freed enough memory, shrink the heap back down.
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08003340 if (bytes_allocated + adjusted_max_free < max_allowed_footprint_) {
3341 target_size = bytes_allocated + adjusted_max_free;
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07003342 } else {
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07003343 target_size = std::max(bytes_allocated, static_cast<uint64_t>(max_allowed_footprint_));
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07003344 }
3345 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -07003346 if (!ignore_max_footprint_) {
3347 SetIdealFootprint(target_size);
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07003348 if (IsGcConcurrent()) {
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08003349 const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() +
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003350 current_gc_iteration_.GetFreedLargeObjectBytes() +
3351 current_gc_iteration_.GetFreedRevokeBytes();
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08003352 // Bytes allocated will shrink by freed_bytes after the GC runs, so if we want to figure out
3353 // how many bytes were allocated during the GC we need to add freed_bytes back on.
3354 CHECK_GE(bytes_allocated + freed_bytes, bytes_allocated_before_gc);
3355 const uint64_t bytes_allocated_during_gc = bytes_allocated + freed_bytes -
3356 bytes_allocated_before_gc;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07003357 // Calculate when to perform the next ConcurrentGC.
Mathieu Chartier2775ee42013-08-20 17:43:47 -07003358 // Calculate the estimated GC duration.
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07003359 const double gc_duration_seconds = NsToMs(current_gc_iteration_.GetDurationNs()) / 1000.0;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07003360 // Estimate how many remaining bytes we will have when we need to start the next GC.
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08003361 size_t remaining_bytes = bytes_allocated_during_gc * gc_duration_seconds;
Mathieu Chartier74762802014-01-24 10:21:35 -08003362 remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07003363 remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
3364 if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
3365 // A never going to happen situation that from the estimated allocation rate we will exceed
3366 // the applications entire footprint with the given estimated allocation rate. Schedule
Mathieu Chartier74762802014-01-24 10:21:35 -08003367 // another GC nearly straight away.
3368 remaining_bytes = kMinConcurrentRemainingBytes;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07003369 }
Mathieu Chartier74762802014-01-24 10:21:35 -08003370 DCHECK_LE(remaining_bytes, max_allowed_footprint_);
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07003371 DCHECK_LE(max_allowed_footprint_, GetMaxMemory());
Mathieu Chartier74762802014-01-24 10:21:35 -08003372 // Start a concurrent GC when we get close to the estimated remaining bytes. When the
3373 // allocation rate is very high, remaining_bytes could tell us that we should start a GC
3374 // right away.
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07003375 concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
3376 static_cast<size_t>(bytes_allocated));
Mathieu Chartier65db8802012-11-20 12:36:46 -08003377 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08003378 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07003379}
3380
Mathieu Chartier379d09f2015-01-08 11:28:13 -08003381void Heap::ClampGrowthLimit() {
Mathieu Chartierddac4232015-04-02 10:08:03 -07003382 // Use heap bitmap lock to guard against races with BindLiveToMarkBitmap.
3383 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Mathieu Chartier379d09f2015-01-08 11:28:13 -08003384 capacity_ = growth_limit_;
3385 for (const auto& space : continuous_spaces_) {
3386 if (space->IsMallocSpace()) {
3387 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3388 malloc_space->ClampGrowthLimit();
3389 }
3390 }
3391 // This space isn't added for performance reasons.
3392 if (main_space_backup_.get() != nullptr) {
3393 main_space_backup_->ClampGrowthLimit();
3394 }
3395}
3396
jeffhaoc1160702011-10-27 15:48:45 -07003397void Heap::ClearGrowthLimit() {
Mathieu Chartier80de7a62012-11-27 17:21:50 -08003398 growth_limit_ = capacity_;
Mathieu Chartier0310da52014-12-01 13:40:48 -08003399 for (const auto& space : continuous_spaces_) {
3400 if (space->IsMallocSpace()) {
3401 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3402 malloc_space->ClearGrowthLimit();
3403 malloc_space->SetFootprintLimit(malloc_space->Capacity());
3404 }
3405 }
3406 // This space isn't added for performance reasons.
3407 if (main_space_backup_.get() != nullptr) {
3408 main_space_backup_->ClearGrowthLimit();
3409 main_space_backup_->SetFootprintLimit(main_space_backup_->Capacity());
3410 }
jeffhaoc1160702011-10-27 15:48:45 -07003411}
3412
Mathieu Chartier8668c3c2014-04-24 16:48:11 -07003413void Heap::AddFinalizerReference(Thread* self, mirror::Object** object) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003414 ScopedObjectAccess soa(self);
Mathieu Chartier8668c3c2014-04-24 16:48:11 -07003415 ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object));
Ian Rogers53b8b092014-03-13 23:45:53 -07003416 jvalue args[1];
3417 args[0].l = arg.get();
3418 InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args);
Mathieu Chartier8668c3c2014-04-24 16:48:11 -07003419 // Restore object in case it gets moved.
3420 *object = soa.Decode<mirror::Object*>(arg.get());
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003421}
3422
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003423void Heap::RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07003424 StackHandleScope<1> hs(self);
3425 HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003426 RequestConcurrentGC(self, force_full);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07003427}
3428
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003429class Heap::ConcurrentGCTask : public HeapTask {
3430 public:
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003431 explicit ConcurrentGCTask(uint64_t target_time, bool force_full)
3432 : HeapTask(target_time), force_full_(force_full) { }
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003433 virtual void Run(Thread* self) OVERRIDE {
3434 gc::Heap* heap = Runtime::Current()->GetHeap();
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003435 heap->ConcurrentGC(self, force_full_);
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003436 heap->ClearConcurrentGCRequest();
Ian Rogers120f1c72012-09-28 17:17:10 -07003437 }
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003438
3439 private:
3440 const bool force_full_; // If true, force full (or partial) collection.
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003441};
3442
Mathieu Chartier90443472015-07-16 20:32:27 -07003443static bool CanAddHeapTask(Thread* self) REQUIRES(!Locks::runtime_shutdown_lock_) {
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003444 Runtime* runtime = Runtime::Current();
3445 return runtime != nullptr && runtime->IsFinishedStarting() && !runtime->IsShuttingDown(self) &&
3446 !self->IsHandlingStackOverflow();
3447}
3448
3449void Heap::ClearConcurrentGCRequest() {
3450 concurrent_gc_pending_.StoreRelaxed(false);
3451}
3452
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003453void Heap::RequestConcurrentGC(Thread* self, bool force_full) {
Mathieu Chartierac195162015-02-20 18:44:28 +00003454 if (CanAddHeapTask(self) &&
3455 concurrent_gc_pending_.CompareExchangeStrongSequentiallyConsistent(false, true)) {
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003456 task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime(), // Start straight away.
3457 force_full));
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003458 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07003459}
3460
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003461void Heap::ConcurrentGC(Thread* self, bool force_full) {
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003462 if (!Runtime::Current()->IsShuttingDown(self)) {
3463 // Wait for any GCs currently running to finish.
3464 if (WaitForGcToComplete(kGcCauseBackground, self) == collector::kGcTypeNone) {
3465 // If the we can't run the GC type we wanted to run, find the next appropriate one and try that
3466 // instead. E.g. can't do partial, so do full instead.
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003467 collector::GcType next_gc_type = next_gc_type_;
3468 // If forcing full and next gc type is sticky, override with a non-sticky type.
3469 if (force_full && next_gc_type == collector::kGcTypeSticky) {
3470 next_gc_type = HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
3471 }
3472 if (CollectGarbageInternal(next_gc_type, kGcCauseBackground, false) ==
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003473 collector::kGcTypeNone) {
3474 for (collector::GcType gc_type : gc_plan_) {
3475 // Attempt to run the collector, if we succeed, we are done.
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003476 if (gc_type > next_gc_type &&
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003477 CollectGarbageInternal(gc_type, kGcCauseBackground, false) !=
3478 collector::kGcTypeNone) {
3479 break;
3480 }
Mathieu Chartierf9ed0d32013-11-21 16:42:47 -08003481 }
3482 }
3483 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07003484 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07003485}
3486
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003487class Heap::CollectorTransitionTask : public HeapTask {
3488 public:
3489 explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) { }
3490 virtual void Run(Thread* self) OVERRIDE {
3491 gc::Heap* heap = Runtime::Current()->GetHeap();
3492 heap->DoPendingCollectorTransition();
3493 heap->ClearPendingCollectorTransition(self);
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08003494 }
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003495};
3496
3497void Heap::ClearPendingCollectorTransition(Thread* self) {
3498 MutexLock mu(self, *pending_task_lock_);
3499 pending_collector_transition_ = nullptr;
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08003500}
3501
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003502void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
3503 Thread* self = Thread::Current();
3504 desired_collector_type_ = desired_collector_type;
3505 if (desired_collector_type_ == collector_type_ || !CanAddHeapTask(self)) {
3506 return;
3507 }
3508 CollectorTransitionTask* added_task = nullptr;
3509 const uint64_t target_time = NanoTime() + delta_time;
3510 {
3511 MutexLock mu(self, *pending_task_lock_);
3512 // If we have an existing collector transition, update the targe time to be the new target.
3513 if (pending_collector_transition_ != nullptr) {
3514 task_processor_->UpdateTargetRunTime(self, pending_collector_transition_, target_time);
3515 return;
3516 }
3517 added_task = new CollectorTransitionTask(target_time);
3518 pending_collector_transition_ = added_task;
3519 }
3520 task_processor_->AddTask(self, added_task);
3521}
3522
3523class Heap::HeapTrimTask : public HeapTask {
3524 public:
3525 explicit HeapTrimTask(uint64_t delta_time) : HeapTask(NanoTime() + delta_time) { }
3526 virtual void Run(Thread* self) OVERRIDE {
3527 gc::Heap* heap = Runtime::Current()->GetHeap();
3528 heap->Trim(self);
3529 heap->ClearPendingTrim(self);
3530 }
3531};
3532
3533void Heap::ClearPendingTrim(Thread* self) {
3534 MutexLock mu(self, *pending_task_lock_);
3535 pending_heap_trim_ = nullptr;
3536}
3537
3538void Heap::RequestTrim(Thread* self) {
3539 if (!CanAddHeapTask(self)) {
3540 return;
3541 }
Ian Rogers48931882013-01-22 14:35:16 -08003542 // GC completed and now we must decide whether to request a heap trim (advising pages back to the
3543 // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
3544 // a space it will hold its lock and can become a cause of jank.
3545 // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
3546 // forking.
3547
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08003548 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
3549 // because that only marks object heads, so a large array looks like lots of empty space. We
3550 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
3551 // to utilization (which is probably inversely proportional to how much benefit we can expect).
3552 // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
3553 // not how much use we're making of those pages.
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003554 HeapTrimTask* added_task = nullptr;
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07003555 {
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003556 MutexLock mu(self, *pending_task_lock_);
3557 if (pending_heap_trim_ != nullptr) {
3558 // Already have a heap trim request in task processor, ignore this request.
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07003559 return;
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08003560 }
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003561 added_task = new HeapTrimTask(kHeapTrimWait);
3562 pending_heap_trim_ = added_task;
Mathieu Chartierc39e3422013-08-07 16:41:36 -07003563 }
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003564 task_processor_->AddTask(self, added_task);
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08003565}
3566
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07003567void Heap::RevokeThreadLocalBuffers(Thread* thread) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08003568 if (rosalloc_space_ != nullptr) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003569 size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3570 if (freed_bytes_revoke > 0U) {
3571 num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3572 CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3573 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08003574 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -08003575 if (bump_pointer_space_ != nullptr) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003576 CHECK_EQ(bump_pointer_space_->RevokeThreadLocalBuffers(thread), 0U);
Mathieu Chartier692fafd2013-11-29 17:24:40 -08003577 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08003578 if (region_space_ != nullptr) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003579 CHECK_EQ(region_space_->RevokeThreadLocalBuffers(thread), 0U);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08003580 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07003581}
3582
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07003583void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
3584 if (rosalloc_space_ != nullptr) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003585 size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3586 if (freed_bytes_revoke > 0U) {
3587 num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3588 CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3589 }
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07003590 }
3591}
3592
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07003593void Heap::RevokeAllThreadLocalBuffers() {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08003594 if (rosalloc_space_ != nullptr) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003595 size_t freed_bytes_revoke = rosalloc_space_->RevokeAllThreadLocalBuffers();
3596 if (freed_bytes_revoke > 0U) {
3597 num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3598 CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3599 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08003600 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -08003601 if (bump_pointer_space_ != nullptr) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003602 CHECK_EQ(bump_pointer_space_->RevokeAllThreadLocalBuffers(), 0U);
Mathieu Chartier692fafd2013-11-29 17:24:40 -08003603 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08003604 if (region_space_ != nullptr) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003605 CHECK_EQ(region_space_->RevokeAllThreadLocalBuffers(), 0U);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08003606 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07003607}
3608
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003609bool Heap::IsGCRequestPending() const {
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003610 return concurrent_gc_pending_.LoadRelaxed();
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003611}
3612
Mathieu Chartierb5de3bb2015-06-05 13:21:05 -07003613void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) {
3614 env->CallStaticVoidMethod(WellKnownClasses::dalvik_system_VMRuntime,
3615 WellKnownClasses::dalvik_system_VMRuntime_runFinalization,
3616 static_cast<jlong>(timeout));
Mathieu Chartier590fee92013-09-13 13:46:47 -07003617}
3618
Mathieu Chartier8ec31f92014-09-03 10:30:11 -07003619void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07003620 Thread* self = ThreadForEnv(env);
3621 if (native_need_to_run_finalization_) {
Mathieu Chartierb5de3bb2015-06-05 13:21:05 -07003622 RunFinalization(env, kNativeAllocationFinalizeTimeout);
Mathieu Chartier590fee92013-09-13 13:46:47 -07003623 UpdateMaxNativeFootprint();
3624 native_need_to_run_finalization_ = false;
3625 }
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003626 // Total number of native bytes allocated.
Ian Rogers3e5cf302014-05-20 16:40:37 -07003627 size_t new_native_bytes_allocated = native_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes);
3628 new_native_bytes_allocated += bytes;
3629 if (new_native_bytes_allocated > native_footprint_gc_watermark_) {
Mathieu Chartiere4cab172014-08-19 18:24:04 -07003630 collector::GcType gc_type = HasZygoteSpace() ? collector::kGcTypePartial :
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08003631 collector::kGcTypeFull;
3632
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003633 // The second watermark is higher than the gc watermark. If you hit this it means you are
3634 // allocating native objects faster than the GC can keep up with.
Mathieu Chartier08487452014-09-02 16:21:01 -07003635 if (new_native_bytes_allocated > growth_limit_) {
Mathieu Chartier89a201e2014-05-02 10:27:26 -07003636 if (WaitForGcToComplete(kGcCauseForNativeAlloc, self) != collector::kGcTypeNone) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07003637 // Just finished a GC, attempt to run finalizers.
Mathieu Chartierb5de3bb2015-06-05 13:21:05 -07003638 RunFinalization(env, kNativeAllocationFinalizeTimeout);
Mathieu Chartier590fee92013-09-13 13:46:47 -07003639 CHECK(!env->ExceptionCheck());
Lin Zang60e27162015-03-10 18:53:21 +08003640 // Native bytes allocated may be updated by finalization, refresh it.
3641 new_native_bytes_allocated = native_bytes_allocated_.LoadRelaxed();
Mathieu Chartier590fee92013-09-13 13:46:47 -07003642 }
3643 // If we still are over the watermark, attempt a GC for alloc and run finalizers.
Mathieu Chartier08487452014-09-02 16:21:01 -07003644 if (new_native_bytes_allocated > growth_limit_) {
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08003645 CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
Mathieu Chartierb5de3bb2015-06-05 13:21:05 -07003646 RunFinalization(env, kNativeAllocationFinalizeTimeout);
Mathieu Chartier590fee92013-09-13 13:46:47 -07003647 native_need_to_run_finalization_ = false;
3648 CHECK(!env->ExceptionCheck());
3649 }
3650 // We have just run finalizers, update the native watermark since it is very likely that
3651 // finalizers released native managed allocations.
3652 UpdateMaxNativeFootprint();
3653 } else if (!IsGCRequestPending()) {
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07003654 if (IsGcConcurrent()) {
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003655 RequestConcurrentGC(self, true); // Request non-sticky type.
Mathieu Chartier590fee92013-09-13 13:46:47 -07003656 } else {
Hiroshi Yamauchid20aba12014-04-11 15:31:09 -07003657 CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003658 }
3659 }
3660 }
3661}
3662
Mathieu Chartier8ec31f92014-09-03 10:30:11 -07003663void Heap::RegisterNativeFree(JNIEnv* env, size_t bytes) {
3664 size_t expected_size;
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003665 do {
Ian Rogers3e5cf302014-05-20 16:40:37 -07003666 expected_size = native_bytes_allocated_.LoadRelaxed();
Mathieu Chartier8ec31f92014-09-03 10:30:11 -07003667 if (UNLIKELY(bytes > expected_size)) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07003668 ScopedObjectAccess soa(env);
3669 env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
Mathieu Chartier8ec31f92014-09-03 10:30:11 -07003670 StringPrintf("Attempted to free %zd native bytes with only %zd native bytes "
Mathieu Chartier590fee92013-09-13 13:46:47 -07003671 "registered as allocated", bytes, expected_size).c_str());
3672 break;
3673 }
Mathieu Chartier8ec31f92014-09-03 10:30:11 -07003674 } while (!native_bytes_allocated_.CompareExchangeWeakRelaxed(expected_size,
3675 expected_size - bytes));
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003676}
3677
Ian Rogersef7d42f2014-01-06 12:55:46 -08003678size_t Heap::GetTotalMemory() const {
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07003679 return std::max(max_allowed_footprint_, GetBytesAllocated());
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -07003680}
3681
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003682void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
3683 DCHECK(mod_union_table != nullptr);
3684 mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
3685}
3686
Mathieu Chartierc645f1d2014-03-06 18:11:53 -08003687void Heap::CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003688 CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
Ian Rogers1ff3c982014-08-12 02:30:58 -07003689 (c->IsVariableSize() || c->GetObjectSize() == byte_count));
Mathieu Chartierc645f1d2014-03-06 18:11:53 -08003690 CHECK_GE(byte_count, sizeof(mirror::Object));
3691}
3692
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003693void Heap::AddRememberedSet(accounting::RememberedSet* remembered_set) {
3694 CHECK(remembered_set != nullptr);
3695 space::Space* space = remembered_set->GetSpace();
3696 CHECK(space != nullptr);
Mathieu Chartier8e4a96d2014-05-21 10:44:32 -07003697 CHECK(remembered_sets_.find(space) == remembered_sets_.end()) << space;
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003698 remembered_sets_.Put(space, remembered_set);
Mathieu Chartier8e4a96d2014-05-21 10:44:32 -07003699 CHECK(remembered_sets_.find(space) != remembered_sets_.end()) << space;
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003700}
3701
3702void Heap::RemoveRememberedSet(space::Space* space) {
3703 CHECK(space != nullptr);
3704 auto it = remembered_sets_.find(space);
3705 CHECK(it != remembered_sets_.end());
Mathieu Chartier5189e242014-07-24 11:11:05 -07003706 delete it->second;
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003707 remembered_sets_.erase(it);
3708 CHECK(remembered_sets_.find(space) == remembered_sets_.end());
3709}
3710
Mathieu Chartier4aeec172014-03-27 16:09:46 -07003711void Heap::ClearMarkedObjects() {
3712 // Clear all of the spaces' mark bitmaps.
3713 for (const auto& space : GetContinuousSpaces()) {
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07003714 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartier4aeec172014-03-27 16:09:46 -07003715 if (space->GetLiveBitmap() != mark_bitmap) {
3716 mark_bitmap->Clear();
3717 }
3718 }
3719 // Clear the marked objects in the discontinous space object sets.
3720 for (const auto& space : GetDiscontinuousSpaces()) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07003721 space->GetMarkBitmap()->Clear();
Mathieu Chartier4aeec172014-03-27 16:09:46 -07003722 }
3723}
3724
Man Cao8c2ff642015-05-27 17:25:30 -07003725void Heap::SetAllocationRecords(AllocRecordObjectMap* records) {
3726 allocation_records_.reset(records);
3727}
3728
Man Cao1ed11b92015-06-11 22:47:35 -07003729void Heap::VisitAllocationRecords(RootVisitor* visitor) const {
3730 if (IsAllocTrackingEnabled()) {
3731 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
3732 if (IsAllocTrackingEnabled()) {
3733 GetAllocationRecords()->VisitRoots(visitor);
3734 }
3735 }
3736}
3737
Mathieu Chartier97509952015-07-13 14:35:43 -07003738void Heap::SweepAllocationRecords(IsMarkedVisitor* visitor) const {
Man Cao8c2ff642015-05-27 17:25:30 -07003739 if (IsAllocTrackingEnabled()) {
3740 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
3741 if (IsAllocTrackingEnabled()) {
Mathieu Chartier97509952015-07-13 14:35:43 -07003742 GetAllocationRecords()->SweepAllocationRecords(visitor);
Man Cao8c2ff642015-05-27 17:25:30 -07003743 }
3744 }
3745}
3746
Man Cao42c3c332015-06-23 16:38:25 -07003747void Heap::AllowNewAllocationRecords() const {
3748 if (IsAllocTrackingEnabled()) {
3749 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
3750 if (IsAllocTrackingEnabled()) {
3751 GetAllocationRecords()->AllowNewAllocationRecords();
3752 }
3753 }
3754}
3755
3756void Heap::DisallowNewAllocationRecords() const {
3757 if (IsAllocTrackingEnabled()) {
3758 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
3759 if (IsAllocTrackingEnabled()) {
3760 GetAllocationRecords()->DisallowNewAllocationRecords();
3761 }
3762 }
3763}
3764
Mathieu Chartier31000802015-06-14 14:14:37 -07003765// Based on debug malloc logic from libc/bionic/debug_stacktrace.cpp.
3766class StackCrawlState {
3767 public:
3768 StackCrawlState(uintptr_t* frames, size_t max_depth, size_t skip_count)
3769 : frames_(frames), frame_count_(0), max_depth_(max_depth), skip_count_(skip_count) {
3770 }
3771 size_t GetFrameCount() const {
3772 return frame_count_;
3773 }
3774 static _Unwind_Reason_Code Callback(_Unwind_Context* context, void* arg) {
3775 auto* const state = reinterpret_cast<StackCrawlState*>(arg);
3776 const uintptr_t ip = _Unwind_GetIP(context);
3777 // The first stack frame is get_backtrace itself. Skip it.
3778 if (ip != 0 && state->skip_count_ > 0) {
3779 --state->skip_count_;
3780 return _URC_NO_REASON;
3781 }
3782 // ip may be off for ARM but it shouldn't matter since we only use it for hashing.
3783 state->frames_[state->frame_count_] = ip;
3784 state->frame_count_++;
3785 return state->frame_count_ >= state->max_depth_ ? _URC_END_OF_STACK : _URC_NO_REASON;
3786 }
3787
3788 private:
3789 uintptr_t* const frames_;
3790 size_t frame_count_;
3791 const size_t max_depth_;
3792 size_t skip_count_;
3793};
3794
3795static size_t get_backtrace(uintptr_t* frames, size_t max_depth) {
3796 StackCrawlState state(frames, max_depth, 0u);
3797 _Unwind_Backtrace(&StackCrawlState::Callback, &state);
3798 return state.GetFrameCount();
3799}
3800
3801void Heap::CheckGcStressMode(Thread* self, mirror::Object** obj) {
3802 auto* const runtime = Runtime::Current();
3803 if (gc_stress_mode_ && runtime->GetClassLinker()->IsInitialized() &&
3804 !runtime->IsActiveTransaction() && mirror::Class::HasJavaLangClass()) {
3805 // Check if we should GC.
3806 bool new_backtrace = false;
3807 {
3808 static constexpr size_t kMaxFrames = 16u;
3809 uintptr_t backtrace[kMaxFrames];
3810 const size_t frames = get_backtrace(backtrace, kMaxFrames);
3811 uint64_t hash = 0;
3812 for (size_t i = 0; i < frames; ++i) {
3813 hash = hash * 2654435761 + backtrace[i];
3814 hash += (hash >> 13) ^ (hash << 6);
3815 }
3816 MutexLock mu(self, *backtrace_lock_);
3817 new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
3818 if (new_backtrace) {
3819 seen_backtraces_.insert(hash);
3820 }
3821 }
3822 if (new_backtrace) {
3823 StackHandleScope<1> hs(self);
3824 auto h = hs.NewHandleWrapper(obj);
3825 CollectGarbage(false);
3826 unique_backtrace_count_.FetchAndAddSequentiallyConsistent(1);
3827 } else {
3828 seen_backtrace_count_.FetchAndAddSequentiallyConsistent(1);
3829 }
3830 }
3831}
3832
Ian Rogers1d54e732013-05-02 21:10:01 -07003833} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07003834} // namespace art