blob: 5c219cc8713a7785bbeaa6120d9e3c1f714e40c0 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070018
Brian Carlstrom58ae9412011-10-04 00:56:06 -070019#include <limits>
Ian Rogers700a4022014-05-19 16:49:03 -070020#include <memory>
Mathieu Chartier31000802015-06-14 14:14:37 -070021#include <unwind.h> // For GC verification.
Carl Shapiro58551df2011-07-24 03:09:51 -070022#include <vector>
23
Andreas Gampe27fa96c2016-10-07 15:05:24 -070024#include "allocation_listener.h"
Mathieu Chartierc7853442015-03-27 14:35:38 -070025#include "art_field-inl.h"
Mathieu Chartierbad02672014-08-25 13:08:22 -070026#include "base/allocator.h"
Mathieu Chartier8d447252015-10-26 10:21:14 -070027#include "base/arena_allocator.h"
Ian Rogersc7dd2952014-10-21 23:31:19 -070028#include "base/dumpable.h"
Mathieu Chartierb2f99362013-11-20 17:26:00 -080029#include "base/histogram-inl.h"
Elliott Hughes1aa246d2012-12-13 09:29:36 -080030#include "base/stl_util.h"
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -080031#include "base/systrace.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010032#include "base/time_utils.h"
Mathieu Chartier987ccff2013-07-08 11:05:21 -070033#include "common_throws.h"
Ian Rogers48931882013-01-22 14:35:16 -080034#include "cutils/sched_policy.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070035#include "debugger.h"
Elliott Hughes956af0f2014-12-11 14:34:28 -080036#include "dex_file-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070037#include "gc/accounting/atomic_stack.h"
38#include "gc/accounting/card_table-inl.h"
39#include "gc/accounting/heap_bitmap-inl.h"
40#include "gc/accounting/mod_union_table-inl.h"
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -080041#include "gc/accounting/remembered_set.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070042#include "gc/accounting/space_bitmap-inl.h"
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070043#include "gc/collector/concurrent_copying.h"
Mathieu Chartier52e4b432014-06-10 11:22:31 -070044#include "gc/collector/mark_compact.h"
Mathieu Chartier3cf22532015-07-09 15:15:09 -070045#include "gc/collector/mark_sweep.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070046#include "gc/collector/partial_mark_sweep.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070047#include "gc/collector/semi_space.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070048#include "gc/collector/sticky_mark_sweep.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070049#include "gc/reference_processor.h"
Hiroshi Yamauchi3b1d1b72016-10-12 11:53:57 -070050#include "gc/scoped_gc_critical_section.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070051#include "gc/space/bump_pointer_space.h"
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -070052#include "gc/space/dlmalloc_space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070053#include "gc/space/image_space.h"
54#include "gc/space/large_object_space.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080055#include "gc/space/region_space.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070056#include "gc/space/rosalloc_space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070057#include "gc/space/space-inl.h"
Mathieu Chartiera1602f22014-01-13 17:19:19 -080058#include "gc/space/zygote_space.h"
Mathieu Chartiera5eae692014-12-17 17:56:03 -080059#include "gc/task_processor.h"
Mathieu Chartierd8891782014-03-02 13:28:37 -080060#include "entrypoints/quick/quick_alloc_entrypoints.h"
Andreas Gampe9b8c5882016-10-21 15:27:46 -070061#include "gc_pause_listener.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070062#include "heap-inl.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070063#include "image.h"
Mathieu Chartiereb175f72014-10-31 11:49:27 -070064#include "intern_table.h"
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +000065#include "jit/jit.h"
66#include "jit/jit_code_cache.h"
Mathieu Chartier0795f232016-09-27 18:43:30 -070067#include "obj_ptr-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080068#include "mirror/class-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080069#include "mirror/object-inl.h"
70#include "mirror/object_array-inl.h"
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -070071#include "mirror/reference-inl.h"
Brian Carlstrom5643b782012-02-05 12:32:53 -080072#include "os.h"
Ian Rogers53b8b092014-03-13 23:45:53 -070073#include "reflection.h"
Mathieu Chartier0de9f732013-11-22 17:58:48 -080074#include "runtime.h"
Mathieu Chartier7664f5c2012-06-08 18:15:32 -070075#include "ScopedLocalRef.h"
Mathieu Chartier0795f232016-09-27 18:43:30 -070076#include "scoped_thread_state_change-inl.h"
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070077#include "handle_scope-inl.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070078#include "thread_list.h"
Elliott Hugheseac76672012-05-24 21:56:51 -070079#include "well_known_classes.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070080
81namespace art {
Mathieu Chartier50482232013-11-21 11:48:14 -080082
Ian Rogers1d54e732013-05-02 21:10:01 -070083namespace gc {
Carl Shapiro69759ea2011-07-21 18:13:35 -070084
Mathieu Chartier91e30632014-03-25 15:58:50 -070085static constexpr size_t kCollectorTransitionStressIterations = 0;
86static constexpr size_t kCollectorTransitionStressWait = 10 * 1000; // Microseconds
Ian Rogers1d54e732013-05-02 21:10:01 -070087// Minimum amount of remaining bytes before a concurrent GC is triggered.
Mathieu Chartier720ef762013-08-17 14:46:54 -070088static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
Mathieu Chartier74762802014-01-24 10:21:35 -080089static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
Mathieu Chartierdf86d1f2014-04-08 13:44:04 -070090// Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
Mathieu Chartier73d1e172014-04-11 17:53:48 -070091// relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
Mathieu Chartierdf86d1f2014-04-08 13:44:04 -070092// threads (lower pauses, use less memory bandwidth).
Mathieu Chartier73d1e172014-04-11 17:53:48 -070093static constexpr double kStickyGcThroughputAdjustment = 1.0;
Mathieu Chartierc1790162014-05-23 10:54:50 -070094// Whether or not we compact the zygote in PreZygoteFork.
Mathieu Chartier31f44142014-04-08 14:40:03 -070095static constexpr bool kCompactZygote = kMovingCollector;
Mathieu Chartierc1790162014-05-23 10:54:50 -070096// How many reserve entries are at the end of the allocation stack, these are only needed if the
97// allocation stack overflows.
98static constexpr size_t kAllocationStackReserveSize = 1024;
99// Default mark stack size in bytes.
100static const size_t kDefaultMarkStackSize = 64 * KB;
Zuo Wangf37a88b2014-07-10 04:26:41 -0700101// Define space name.
102static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
103static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
104static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
Mathieu Chartier7247af52014-11-19 10:51:42 -0800105static const char* kNonMovingSpaceName = "non moving space";
106static const char* kZygoteSpaceName = "zygote space";
Mathieu Chartierb363f662014-07-16 13:28:58 -0700107static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
Mathieu Chartier95a505c2014-12-10 18:45:30 -0800108static constexpr bool kGCALotMode = false;
109// GC alot mode uses a small allocation stack to stress test a lot of GC.
110static constexpr size_t kGcAlotAllocationStackSize = 4 * KB /
111 sizeof(mirror::HeapReference<mirror::Object>);
112// Verify objet has a small allocation stack size since searching the allocation stack is slow.
113static constexpr size_t kVerifyObjectAllocationStackSize = 16 * KB /
114 sizeof(mirror::HeapReference<mirror::Object>);
115static constexpr size_t kDefaultAllocationStackSize = 8 * MB /
116 sizeof(mirror::HeapReference<mirror::Object>);
Mathieu Chartierb5de3bb2015-06-05 13:21:05 -0700117// System.runFinalization can deadlock with native allocations, to deal with this, we have a
118// timeout on how long we wait for finalizers to run. b/21544853
119static constexpr uint64_t kNativeAllocationFinalizeTimeout = MsToNs(250u);
Mathieu Chartier0051be62012-10-12 17:47:11 -0700120
Andreas Gampeace0dc12016-01-20 13:33:13 -0800121// For deterministic compilation, we need the heap to be at a well-known address.
122static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000;
Hiroshi Yamauchib62f2e62016-03-23 15:51:24 -0700123// Dump the rosalloc stats on SIGQUIT.
124static constexpr bool kDumpRosAllocStatsOnSigQuit = false;
Andreas Gampeace0dc12016-01-20 13:33:13 -0800125
Mathieu Chartier5d2a3f72016-05-11 11:35:39 -0700126static constexpr size_t kNativeAllocationHistogramBuckets = 16;
127
Hiroshi Yamauchib6bab0f2016-07-18 17:07:26 -0700128// Extra added to the heap growth multiplier. Used to adjust the GC ergonomics for the read barrier
129// config.
130static constexpr double kExtraHeapGrowthMultiplier = kUseReadBarrier ? 1.0 : 0.0;
131
Mathieu Chartierf8cb1782016-03-18 18:45:41 -0700132static inline bool CareAboutPauseTimes() {
133 return Runtime::Current()->InJankPerceptibleProcessState();
134}
135
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700136Heap::Heap(size_t initial_size,
137 size_t growth_limit,
138 size_t min_free,
139 size_t max_free,
140 double target_utilization,
141 double foreground_heap_growth_multiplier,
142 size_t capacity,
143 size_t non_moving_space_capacity,
144 const std::string& image_file_name,
145 const InstructionSet image_instruction_set,
146 CollectorType foreground_collector_type,
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700147 CollectorType background_collector_type,
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700148 space::LargeObjectSpaceType large_object_space_type,
149 size_t large_object_threshold,
150 size_t parallel_gc_threads,
151 size_t conc_gc_threads,
152 bool low_memory_mode,
153 size_t long_pause_log_threshold,
154 size_t long_gc_log_threshold,
155 bool ignore_max_footprint,
156 bool use_tlab,
157 bool verify_pre_gc_heap,
158 bool verify_pre_sweeping_heap,
159 bool verify_post_gc_heap,
160 bool verify_pre_gc_rosalloc,
161 bool verify_pre_sweeping_rosalloc,
162 bool verify_post_gc_rosalloc,
163 bool gc_stress_mode,
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700164 bool measure_gc_performance,
Mathieu Chartier31000802015-06-14 14:14:37 -0700165 bool use_homogeneous_space_compaction_for_oom,
Zuo Wangf37a88b2014-07-10 04:26:41 -0700166 uint64_t min_interval_homogeneous_space_compaction_by_oom)
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800167 : non_moving_space_(nullptr),
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800168 rosalloc_space_(nullptr),
169 dlmalloc_space_(nullptr),
Mathieu Chartierfc5b5282014-01-09 16:15:36 -0800170 main_space_(nullptr),
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800171 collector_type_(kCollectorTypeNone),
Mathieu Chartier31f44142014-04-08 14:40:03 -0700172 foreground_collector_type_(foreground_collector_type),
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800173 background_collector_type_(background_collector_type),
Mathieu Chartier31f44142014-04-08 14:40:03 -0700174 desired_collector_type_(foreground_collector_type_),
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800175 pending_task_lock_(nullptr),
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700176 parallel_gc_threads_(parallel_gc_threads),
177 conc_gc_threads_(conc_gc_threads),
Mathieu Chartiere0a53e92013-08-05 10:17:40 -0700178 low_memory_mode_(low_memory_mode),
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700179 long_pause_log_threshold_(long_pause_log_threshold),
180 long_gc_log_threshold_(long_gc_log_threshold),
181 ignore_max_footprint_(ignore_max_footprint),
Mathieu Chartier8e4a96d2014-05-21 10:44:32 -0700182 zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
Mathieu Chartiere4cab172014-08-19 18:24:04 -0700183 zygote_space_(nullptr),
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700184 large_object_threshold_(large_object_threshold),
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700185 disable_thread_flip_count_(0),
186 thread_flip_running_(false),
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800187 collector_type_running_(kCollectorTypeNone),
Ian Rogers1d54e732013-05-02 21:10:01 -0700188 last_gc_type_(collector::kGcTypeNone),
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -0700189 next_gc_type_(collector::kGcTypePartial),
Mathieu Chartier80de7a62012-11-27 17:21:50 -0800190 capacity_(capacity),
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700191 growth_limit_(growth_limit),
Mathieu Chartier0051be62012-10-12 17:47:11 -0700192 max_allowed_footprint_(initial_size),
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700193 native_footprint_gc_watermark_(initial_size),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700194 native_need_to_run_finalization_(false),
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800195 concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
Ian Rogers1d54e732013-05-02 21:10:01 -0700196 total_bytes_freed_ever_(0),
197 total_objects_freed_ever_(0),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800198 num_bytes_allocated_(0),
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700199 native_bytes_allocated_(0),
Mathieu Chartier5d2a3f72016-05-11 11:35:39 -0700200 native_histogram_lock_("Native allocation lock"),
201 native_allocation_histogram_("Native allocation sizes",
202 1U,
203 kNativeAllocationHistogramBuckets),
204 native_free_histogram_("Native free sizes", 1U, kNativeAllocationHistogramBuckets),
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700205 num_bytes_freed_revoke_(0),
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700206 verify_missing_card_marks_(false),
207 verify_system_weaks_(false),
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800208 verify_pre_gc_heap_(verify_pre_gc_heap),
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700209 verify_pre_sweeping_heap_(verify_pre_sweeping_heap),
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800210 verify_post_gc_heap_(verify_post_gc_heap),
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700211 verify_mod_union_table_(false),
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800212 verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700213 verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc),
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800214 verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
Mathieu Chartier31000802015-06-14 14:14:37 -0700215 gc_stress_mode_(gc_stress_mode),
Mathieu Chartier0418ae22013-07-31 13:35:46 -0700216 /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This
217 * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
218 * verification is enabled, we limit the size of allocation stacks to speed up their
219 * searching.
220 */
Mathieu Chartier95a505c2014-12-10 18:45:30 -0800221 max_allocation_stack_size_(kGCALotMode ? kGcAlotAllocationStackSize
222 : (kVerifyObjectSupport > kVerifyObjectModeFast) ? kVerifyObjectAllocationStackSize :
223 kDefaultAllocationStackSize),
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800224 current_allocator_(kAllocatorTypeDlMalloc),
225 current_non_moving_allocator_(kAllocatorTypeNonMoving),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700226 bump_pointer_space_(nullptr),
227 temp_space_(nullptr),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800228 region_space_(nullptr),
Mathieu Chartier0051be62012-10-12 17:47:11 -0700229 min_free_(min_free),
230 max_free_(max_free),
231 target_utilization_(target_utilization),
Hiroshi Yamauchib6bab0f2016-07-18 17:07:26 -0700232 foreground_heap_growth_multiplier_(
233 foreground_heap_growth_multiplier + kExtraHeapGrowthMultiplier),
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700234 total_wait_time_(0),
Mathieu Chartier4e305412014-02-19 10:54:44 -0800235 verify_object_mode_(kVerifyObjectModeDisabled),
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800236 disable_moving_gc_count_(0),
Vladimir Marko8da690f2016-08-11 18:25:53 +0100237 semi_space_collector_(nullptr),
238 mark_compact_collector_(nullptr),
239 concurrent_copying_collector_(nullptr),
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700240 is_running_on_memory_tool_(Runtime::Current()->IsRunningOnMemoryTool()),
Zuo Wangf37a88b2014-07-10 04:26:41 -0700241 use_tlab_(use_tlab),
242 main_space_backup_(nullptr),
Mathieu Chartierb363f662014-07-16 13:28:58 -0700243 min_interval_homogeneous_space_compaction_by_oom_(
244 min_interval_homogeneous_space_compaction_by_oom),
Zuo Wangf37a88b2014-07-10 04:26:41 -0700245 last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800246 pending_collector_transition_(nullptr),
247 pending_heap_trim_(nullptr),
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -0700248 use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom),
249 running_collection_is_blocking_(false),
250 blocking_gc_count_(0U),
251 blocking_gc_time_(0U),
252 last_update_time_gc_count_rate_histograms_( // Round down by the window duration.
253 (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration),
254 gc_count_last_window_(0U),
255 blocking_gc_count_last_window_(0U),
256 gc_count_rate_histogram_("gc count rate histogram", 1U, kGcCountRateMaxBucketCount),
257 blocking_gc_count_rate_histogram_("blocking gc count rate histogram", 1U,
Man Cao8c2ff642015-05-27 17:25:30 -0700258 kGcCountRateMaxBucketCount),
Mathieu Chartier31000802015-06-14 14:14:37 -0700259 alloc_tracking_enabled_(false),
260 backtrace_lock_(nullptr),
261 seen_backtrace_count_(0u),
Mathieu Chartier51168372015-08-12 16:40:32 -0700262 unique_backtrace_count_(0u),
Jeff Haodcdc85b2015-12-04 14:06:18 -0800263 gc_disabled_for_shutdown_(false) {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800264 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800265 LOG(INFO) << "Heap() entering";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700266 }
Hiroshi Yamauchi1b0adbf2016-11-14 17:35:12 -0800267 if (kUseReadBarrier) {
268 CHECK_EQ(foreground_collector_type_, kCollectorTypeCC);
269 CHECK_EQ(background_collector_type_, kCollectorTypeCCBackground);
270 }
Mathieu Chartier8261d022016-08-08 09:41:04 -0700271 CHECK_GE(large_object_threshold, kMinLargeObjectThreshold);
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800272 ScopedTrace trace(__FUNCTION__);
Mathieu Chartier31000802015-06-14 14:14:37 -0700273 Runtime* const runtime = Runtime::Current();
Mathieu Chartier50482232013-11-21 11:48:14 -0800274 // If we aren't the zygote, switch to the default non zygote allocator. This may update the
275 // entrypoints.
Mathieu Chartier31000802015-06-14 14:14:37 -0700276 const bool is_zygote = runtime->IsZygote();
Mathieu Chartier8e219ae2014-08-19 14:29:46 -0700277 if (!is_zygote) {
Mathieu Chartier31f44142014-04-08 14:40:03 -0700278 // Background compaction is currently not supported for command line runs.
279 if (background_collector_type_ != foreground_collector_type_) {
Mathieu Chartier52ba1992014-05-07 14:39:21 -0700280 VLOG(heap) << "Disabling background compaction for non zygote";
Mathieu Chartier31f44142014-04-08 14:40:03 -0700281 background_collector_type_ = foreground_collector_type_;
Mathieu Chartierbd0a6532014-02-27 11:14:21 -0800282 }
Mathieu Chartier50482232013-11-21 11:48:14 -0800283 }
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800284 ChangeCollector(desired_collector_type_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700285 live_bitmap_.reset(new accounting::HeapBitmap(this));
286 mark_bitmap_.reset(new accounting::HeapBitmap(this));
Ian Rogers30fab402012-01-23 15:43:46 -0800287 // Requested begin for the alloc space, to follow the mapped image and oat files
Ian Rogers13735952014-10-08 12:43:28 -0700288 uint8_t* requested_alloc_space_begin = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800289 if (foreground_collector_type_ == kCollectorTypeCC) {
290 // Need to use a low address so that we can allocate a contiguous
291 // 2 * Xmx space when there's no image (dex2oat for target).
292 CHECK_GE(300 * MB, non_moving_space_capacity);
293 requested_alloc_space_begin = reinterpret_cast<uint8_t*>(300 * MB) - non_moving_space_capacity;
294 }
Jeff Haodcdc85b2015-12-04 14:06:18 -0800295
296 // Load image space(s).
Brian Carlstrom5643b782012-02-05 12:32:53 -0800297 if (!image_file_name.empty()) {
Jeff Haodcdc85b2015-12-04 14:06:18 -0800298 // For code reuse, handle this like a work queue.
299 std::vector<std::string> image_file_names;
300 image_file_names.push_back(image_file_name);
Andreas Gampe8994a042015-12-30 19:03:17 +0000301 // The loaded spaces. Secondary images may fail to load, in which case we need to remove
302 // already added spaces.
303 std::vector<space::Space*> added_image_spaces;
Mathieu Chartier582b68f2016-02-04 09:50:22 -0800304 uint8_t* const original_requested_alloc_space_begin = requested_alloc_space_begin;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800305 for (size_t index = 0; index < image_file_names.size(); ++index) {
306 std::string& image_name = image_file_names[index];
Jeff Haodcdc85b2015-12-04 14:06:18 -0800307 std::string error_msg;
Andreas Gampea463b6a2016-08-12 21:53:32 -0700308 std::unique_ptr<space::ImageSpace> boot_image_space_uptr = space::ImageSpace::CreateBootImage(
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800309 image_name.c_str(),
310 image_instruction_set,
311 index > 0,
312 &error_msg);
Andreas Gampea463b6a2016-08-12 21:53:32 -0700313 if (boot_image_space_uptr != nullptr) {
314 space::ImageSpace* boot_image_space = boot_image_space_uptr.release();
Jeff Haodcdc85b2015-12-04 14:06:18 -0800315 AddSpace(boot_image_space);
Andreas Gampe8994a042015-12-30 19:03:17 +0000316 added_image_spaces.push_back(boot_image_space);
Jeff Haodcdc85b2015-12-04 14:06:18 -0800317 // Oat files referenced by image files immediately follow them in memory, ensure alloc space
318 // isn't going to get in the middle
319 uint8_t* oat_file_end_addr = boot_image_space->GetImageHeader().GetOatFileEnd();
320 CHECK_GT(oat_file_end_addr, boot_image_space->End());
321 requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
322 boot_image_spaces_.push_back(boot_image_space);
323
324 if (index == 0) {
325 // If this was the first space, check whether there are more images to load.
326 const OatFile* boot_oat_file = boot_image_space->GetOatFile();
327 if (boot_oat_file == nullptr) {
328 continue;
329 }
330
331 const OatHeader& boot_oat_header = boot_oat_file->GetOatHeader();
332 const char* boot_classpath =
Jeff Haof0192c82016-03-28 20:39:50 -0700333 boot_oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey);
Jeff Haodcdc85b2015-12-04 14:06:18 -0800334 if (boot_classpath == nullptr) {
335 continue;
336 }
337
Mathieu Chartier866d8742016-09-21 15:24:18 -0700338 space::ImageSpace::ExtractMultiImageLocations(image_file_name,
339 boot_classpath,
340 &image_file_names);
Jeff Haodcdc85b2015-12-04 14:06:18 -0800341 }
342 } else {
343 LOG(ERROR) << "Could not create image space with image file '" << image_file_name << "'. "
344 << "Attempting to fall back to imageless running. Error was: " << error_msg
345 << "\nAttempted image: " << image_name;
Andreas Gampe8994a042015-12-30 19:03:17 +0000346 // Remove already loaded spaces.
347 for (space::Space* loaded_space : added_image_spaces) {
348 RemoveSpace(loaded_space);
Mathieu Chartierb08f3052016-02-02 17:24:39 -0800349 delete loaded_space;
Andreas Gampe8994a042015-12-30 19:03:17 +0000350 }
Mathieu Chartierb08f3052016-02-02 17:24:39 -0800351 boot_image_spaces_.clear();
Mathieu Chartier582b68f2016-02-04 09:50:22 -0800352 requested_alloc_space_begin = original_requested_alloc_space_begin;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800353 break;
354 }
Alex Light64ad14d2014-08-19 14:23:13 -0700355 }
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700356 }
Zuo Wangf37a88b2014-07-10 04:26:41 -0700357 /*
358 requested_alloc_space_begin -> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700359 +- nonmoving space (non_moving_space_capacity)+-
Zuo Wangf37a88b2014-07-10 04:26:41 -0700360 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
Mathieu Chartier8e219ae2014-08-19 14:29:46 -0700361 +-????????????????????????????????????????????+-
362 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
Mathieu Chartierb363f662014-07-16 13:28:58 -0700363 +-main alloc space / bump space 1 (capacity_) +-
Zuo Wangf37a88b2014-07-10 04:26:41 -0700364 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
Mathieu Chartierb363f662014-07-16 13:28:58 -0700365 +-????????????????????????????????????????????+-
366 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
367 +-main alloc space2 / bump space 2 (capacity_)+-
Zuo Wangf37a88b2014-07-10 04:26:41 -0700368 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
369 */
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800370 // We don't have hspace compaction enabled with GSS or CC.
371 if (foreground_collector_type_ == kCollectorTypeGSS ||
372 foreground_collector_type_ == kCollectorTypeCC) {
Hiroshi Yamauchi20ed5af2014-11-17 18:05:44 -0800373 use_homogeneous_space_compaction_for_oom_ = false;
374 }
Mathieu Chartierb363f662014-07-16 13:28:58 -0700375 bool support_homogeneous_space_compaction =
Mathieu Chartier0deeb812014-08-21 18:28:20 -0700376 background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact ||
Hiroshi Yamauchi20ed5af2014-11-17 18:05:44 -0800377 use_homogeneous_space_compaction_for_oom_;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700378 // We may use the same space the main space for the non moving space if we don't need to compact
379 // from the main space.
380 // This is not the case if we support homogeneous compaction or have a moving background
381 // collector type.
Mathieu Chartierb363f662014-07-16 13:28:58 -0700382 bool separate_non_moving_space = is_zygote ||
383 support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
384 IsMovingGc(background_collector_type_);
Mathieu Chartier76ce9172016-01-27 10:44:20 -0800385 if (foreground_collector_type_ == kCollectorTypeGSS) {
Mathieu Chartierb363f662014-07-16 13:28:58 -0700386 separate_non_moving_space = false;
387 }
388 std::unique_ptr<MemMap> main_mem_map_1;
389 std::unique_ptr<MemMap> main_mem_map_2;
Andreas Gampeace0dc12016-01-20 13:33:13 -0800390
391 // Gross hack to make dex2oat deterministic.
Mathieu Chartierc68e77b2016-01-28 09:49:55 -0800392 if (foreground_collector_type_ == kCollectorTypeMS &&
393 requested_alloc_space_begin == nullptr &&
394 Runtime::Current()->IsAotCompiler()) {
395 // Currently only enabled for MS collector since that is what the deterministic dex2oat uses.
396 // b/26849108
Andreas Gampeace0dc12016-01-20 13:33:13 -0800397 requested_alloc_space_begin = reinterpret_cast<uint8_t*>(kAllocSpaceBeginForDeterministicAoT);
398 }
Ian Rogers13735952014-10-08 12:43:28 -0700399 uint8_t* request_begin = requested_alloc_space_begin;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700400 if (request_begin != nullptr && separate_non_moving_space) {
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700401 request_begin += non_moving_space_capacity;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700402 }
403 std::string error_str;
404 std::unique_ptr<MemMap> non_moving_space_mem_map;
405 if (separate_non_moving_space) {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800406 ScopedTrace trace2("Create separate non moving space");
Mathieu Chartier7247af52014-11-19 10:51:42 -0800407 // If we are the zygote, the non moving space becomes the zygote space when we run
408 // PreZygoteFork the first time. In this case, call the map "zygote space" since we can't
409 // rename the mem map later.
410 const char* space_name = is_zygote ? kZygoteSpaceName: kNonMovingSpaceName;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700411 // Reserve the non moving mem map before the other two since it needs to be at a specific
412 // address.
413 non_moving_space_mem_map.reset(
Mathieu Chartier7247af52014-11-19 10:51:42 -0800414 MemMap::MapAnonymous(space_name, requested_alloc_space_begin,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000415 non_moving_space_capacity, PROT_READ | PROT_WRITE, true, false,
416 &error_str));
Mathieu Chartierb363f662014-07-16 13:28:58 -0700417 CHECK(non_moving_space_mem_map != nullptr) << error_str;
Mathieu Chartierc44ce2e2014-08-25 16:32:41 -0700418 // Try to reserve virtual memory at a lower address if we have a separate non moving space.
Ian Rogers13735952014-10-08 12:43:28 -0700419 request_begin = reinterpret_cast<uint8_t*>(300 * MB);
Mathieu Chartierb363f662014-07-16 13:28:58 -0700420 }
Hiroshi Yamauchi3dbf2342015-03-17 16:01:11 -0700421 // Attempt to create 2 mem maps at or after the requested begin.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800422 if (foreground_collector_type_ != kCollectorTypeCC) {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800423 ScopedTrace trace2("Create main mem map");
Mathieu Chartier966f5332016-01-25 12:53:03 -0800424 if (separate_non_moving_space || !is_zygote) {
425 main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0],
426 request_begin,
427 capacity_,
428 &error_str));
Hiroshi Yamauchi3dbf2342015-03-17 16:01:11 -0700429 } else {
Mathieu Chartier966f5332016-01-25 12:53:03 -0800430 // If no separate non-moving space and we are the zygote, the main space must come right
431 // after the image space to avoid a gap. This is required since we want the zygote space to
432 // be adjacent to the image space.
Hiroshi Yamauchi3dbf2342015-03-17 16:01:11 -0700433 main_mem_map_1.reset(MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity_,
434 PROT_READ | PROT_WRITE, true, false,
435 &error_str));
436 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800437 CHECK(main_mem_map_1.get() != nullptr) << error_str;
438 }
Mathieu Chartierb363f662014-07-16 13:28:58 -0700439 if (support_homogeneous_space_compaction ||
440 background_collector_type_ == kCollectorTypeSS ||
441 foreground_collector_type_ == kCollectorTypeSS) {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800442 ScopedTrace trace2("Create main mem map 2");
Mathieu Chartierb363f662014-07-16 13:28:58 -0700443 main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700444 capacity_, &error_str));
Mathieu Chartierb363f662014-07-16 13:28:58 -0700445 CHECK(main_mem_map_2.get() != nullptr) << error_str;
446 }
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800447
Mathieu Chartierb363f662014-07-16 13:28:58 -0700448 // Create the non moving space first so that bitmaps don't take up the address range.
449 if (separate_non_moving_space) {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800450 ScopedTrace trace2("Add non moving space");
Mathieu Chartier31f44142014-04-08 14:40:03 -0700451 // Non moving space is always dlmalloc since we currently don't have support for multiple
Zuo Wangf37a88b2014-07-10 04:26:41 -0700452 // active rosalloc spaces.
Mathieu Chartierb363f662014-07-16 13:28:58 -0700453 const size_t size = non_moving_space_mem_map->Size();
454 non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700455 non_moving_space_mem_map.release(), "zygote / non moving space", kDefaultStartingSize,
Mathieu Chartierb363f662014-07-16 13:28:58 -0700456 initial_size, size, size, false);
Mathieu Chartier78408882014-04-11 18:06:01 -0700457 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
Mathieu Chartierb363f662014-07-16 13:28:58 -0700458 CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
459 << requested_alloc_space_begin;
460 AddSpace(non_moving_space_);
461 }
462 // Create other spaces based on whether or not we have a moving GC.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800463 if (foreground_collector_type_ == kCollectorTypeCC) {
Hiroshi Yamauchi47995302016-06-10 14:59:43 -0700464 region_space_ = space::RegionSpace::Create("main space (region space)", capacity_ * 2, request_begin);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800465 AddSpace(region_space_);
Richard Uhler054a0782015-04-07 10:56:50 -0700466 } else if (IsMovingGc(foreground_collector_type_) &&
467 foreground_collector_type_ != kCollectorTypeGSS) {
Mathieu Chartierb363f662014-07-16 13:28:58 -0700468 // Create bump pointer spaces.
469 // We only to create the bump pointer if the foreground collector is a compacting GC.
470 // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
471 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
472 main_mem_map_1.release());
473 CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
474 AddSpace(bump_pointer_space_);
475 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
476 main_mem_map_2.release());
477 CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
478 AddSpace(temp_space_);
479 CHECK(separate_non_moving_space);
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700480 } else {
Mathieu Chartierb363f662014-07-16 13:28:58 -0700481 CreateMainMallocSpace(main_mem_map_1.release(), initial_size, growth_limit_, capacity_);
482 CHECK(main_space_ != nullptr);
483 AddSpace(main_space_);
484 if (!separate_non_moving_space) {
Zuo Wangf37a88b2014-07-10 04:26:41 -0700485 non_moving_space_ = main_space_;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700486 CHECK(!non_moving_space_->CanMoveObjects());
487 }
488 if (foreground_collector_type_ == kCollectorTypeGSS) {
489 CHECK_EQ(foreground_collector_type_, background_collector_type_);
490 // Create bump pointer spaces instead of a backup space.
491 main_mem_map_2.release();
492 bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space 1",
493 kGSSBumpPointerSpaceCapacity, nullptr);
494 CHECK(bump_pointer_space_ != nullptr);
495 AddSpace(bump_pointer_space_);
496 temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
497 kGSSBumpPointerSpaceCapacity, nullptr);
498 CHECK(temp_space_ != nullptr);
499 AddSpace(temp_space_);
500 } else if (main_mem_map_2.get() != nullptr) {
501 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
502 main_space_backup_.reset(CreateMallocSpaceFromMemMap(main_mem_map_2.release(), initial_size,
503 growth_limit_, capacity_, name, true));
504 CHECK(main_space_backup_.get() != nullptr);
505 // Add the space so its accounted for in the heap_begin and heap_end.
506 AddSpace(main_space_backup_.get());
Zuo Wangf37a88b2014-07-10 04:26:41 -0700507 }
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700508 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700509 CHECK(non_moving_space_ != nullptr);
Mathieu Chartierb363f662014-07-16 13:28:58 -0700510 CHECK(!non_moving_space_->CanMoveObjects());
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700511 // Allocate the large object space.
Igor Murashkinaaebaa02015-01-26 10:55:53 -0800512 if (large_object_space_type == space::LargeObjectSpaceType::kFreeList) {
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700513 large_object_space_ = space::FreeListSpace::Create("free list large object space", nullptr,
514 capacity_);
515 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
Igor Murashkinaaebaa02015-01-26 10:55:53 -0800516 } else if (large_object_space_type == space::LargeObjectSpaceType::kMap) {
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700517 large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space");
518 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700519 } else {
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700520 // Disable the large object space by making the cutoff excessively large.
521 large_object_threshold_ = std::numeric_limits<size_t>::max();
522 large_object_space_ = nullptr;
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700523 }
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700524 if (large_object_space_ != nullptr) {
525 AddSpace(large_object_space_);
526 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700527 // Compute heap capacity. Continuous spaces are sorted in order of Begin().
Mathieu Chartier590fee92013-09-13 13:46:47 -0700528 CHECK(!continuous_spaces_.empty());
529 // Relies on the spaces being sorted.
Ian Rogers13735952014-10-08 12:43:28 -0700530 uint8_t* heap_begin = continuous_spaces_.front()->Begin();
531 uint8_t* heap_end = continuous_spaces_.back()->Limit();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700532 size_t heap_capacity = heap_end - heap_begin;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700533 // Remove the main backup space since it slows down the GC to have unused extra spaces.
Mathieu Chartier0310da52014-12-01 13:40:48 -0800534 // TODO: Avoid needing to do this.
Mathieu Chartierb363f662014-07-16 13:28:58 -0700535 if (main_space_backup_.get() != nullptr) {
536 RemoveSpace(main_space_backup_.get());
537 }
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800538 // Allocate the card table.
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800539 // We currently don't support dynamically resizing the card table.
540 // Since we don't know where in the low_4gb the app image will be located, make the card table
541 // cover the whole low_4gb. TODO: Extend the card table in AddSpace.
542 UNUSED(heap_capacity);
543 // Start at 64 KB, we can be sure there are no spaces mapped this low since the address range is
544 // reserved by the kernel.
545 static constexpr size_t kMinHeapAddress = 4 * KB;
546 card_table_.reset(accounting::CardTable::Create(reinterpret_cast<uint8_t*>(kMinHeapAddress),
547 4 * GB - kMinHeapAddress));
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700548 CHECK(card_table_.get() != nullptr) << "Failed to create card table";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800549 if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) {
550 rb_table_.reset(new accounting::ReadBarrierTable());
551 DCHECK(rb_table_->IsAllCleared());
552 }
Jeff Haodcdc85b2015-12-04 14:06:18 -0800553 if (HasBootImageSpace()) {
Mathieu Chartier4858a932015-01-23 13:18:53 -0800554 // Don't add the image mod union table if we are running without an image, this can crash if
555 // we use the CardCache implementation.
Jeff Haodcdc85b2015-12-04 14:06:18 -0800556 for (space::ImageSpace* image_space : GetBootImageSpaces()) {
557 accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableToZygoteAllocspace(
558 "Image mod-union table", this, image_space);
559 CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
560 AddModUnionTable(mod_union_table);
561 }
Mathieu Chartier4858a932015-01-23 13:18:53 -0800562 }
Mathieu Chartier96bcd452014-06-17 09:50:02 -0700563 if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800564 accounting::RememberedSet* non_moving_space_rem_set =
565 new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
566 CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
567 AddRememberedSet(non_moving_space_rem_set);
568 }
Mathieu Chartierb363f662014-07-16 13:28:58 -0700569 // TODO: Count objects in the image space here?
Ian Rogers3e5cf302014-05-20 16:40:37 -0700570 num_bytes_allocated_.StoreRelaxed(0);
Mathieu Chartierc1790162014-05-23 10:54:50 -0700571 mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
572 kDefaultMarkStackSize));
573 const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
574 allocation_stack_.reset(accounting::ObjectStack::Create(
575 "allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
576 live_stack_.reset(accounting::ObjectStack::Create(
577 "live stack", max_allocation_stack_size_, alloc_stack_capacity));
Mathieu Chartier65db8802012-11-20 12:36:46 -0800578 // It's still too early to take a lock because there are no threads yet, but we can create locks
579 // now. We don't create it earlier to make it clear that you can't use locks during heap
580 // initialization.
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700581 gc_complete_lock_ = new Mutex("GC complete lock");
Ian Rogersc604d732012-10-14 16:09:54 -0700582 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
583 *gc_complete_lock_));
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700584 thread_flip_lock_ = new Mutex("GC thread flip lock");
585 thread_flip_cond_.reset(new ConditionVariable("GC thread flip condition variable",
586 *thread_flip_lock_));
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800587 task_processor_.reset(new TaskProcessor());
Mathieu Chartier3cf22532015-07-09 15:15:09 -0700588 reference_processor_.reset(new ReferenceProcessor());
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800589 pending_task_lock_ = new Mutex("Pending task lock");
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700590 if (ignore_max_footprint_) {
591 SetIdealFootprint(std::numeric_limits<size_t>::max());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700592 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700593 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700594 CHECK_NE(max_allowed_footprint_, 0U);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800595 // Create our garbage collectors.
Mathieu Chartier50482232013-11-21 11:48:14 -0800596 for (size_t i = 0; i < 2; ++i) {
597 const bool concurrent = i != 0;
Mathieu Chartierdfe30832015-03-06 15:28:34 -0800598 if ((MayUseCollector(kCollectorTypeCMS) && concurrent) ||
599 (MayUseCollector(kCollectorTypeMS) && !concurrent)) {
600 garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
601 garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
602 garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
603 }
Mathieu Chartier50482232013-11-21 11:48:14 -0800604 }
Mathieu Chartier50482232013-11-21 11:48:14 -0800605 if (kMovingCollector) {
Mathieu Chartierdfe30832015-03-06 15:28:34 -0800606 if (MayUseCollector(kCollectorTypeSS) || MayUseCollector(kCollectorTypeGSS) ||
607 MayUseCollector(kCollectorTypeHomogeneousSpaceCompact) ||
608 use_homogeneous_space_compaction_for_oom_) {
609 // TODO: Clean this up.
610 const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
611 semi_space_collector_ = new collector::SemiSpace(this, generational,
612 generational ? "generational" : "");
613 garbage_collectors_.push_back(semi_space_collector_);
614 }
615 if (MayUseCollector(kCollectorTypeCC)) {
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700616 concurrent_copying_collector_ = new collector::ConcurrentCopying(this,
617 "",
618 measure_gc_performance);
Hiroshi Yamauchi4af14172016-10-25 11:55:10 -0700619 DCHECK(region_space_ != nullptr);
620 concurrent_copying_collector_->SetRegionSpace(region_space_);
Mathieu Chartierdfe30832015-03-06 15:28:34 -0800621 garbage_collectors_.push_back(concurrent_copying_collector_);
622 }
623 if (MayUseCollector(kCollectorTypeMC)) {
624 mark_compact_collector_ = new collector::MarkCompact(this);
625 garbage_collectors_.push_back(mark_compact_collector_);
626 }
Mathieu Chartier0325e622012-09-05 14:22:51 -0700627 }
Jeff Haodcdc85b2015-12-04 14:06:18 -0800628 if (!GetBootImageSpaces().empty() && non_moving_space_ != nullptr &&
Andreas Gampee1cb2982014-08-27 11:01:09 -0700629 (is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
Mathieu Chartierb363f662014-07-16 13:28:58 -0700630 // Check that there's no gap between the image space and the non moving space so that the
Andreas Gampee1cb2982014-08-27 11:01:09 -0700631 // immune region won't break (eg. due to a large object allocated in the gap). This is only
632 // required when we're the zygote or using GSS.
Mathieu Chartiera06ba052016-01-06 13:51:52 -0800633 // Space with smallest Begin().
634 space::ImageSpace* first_space = nullptr;
635 for (space::ImageSpace* space : boot_image_spaces_) {
636 if (first_space == nullptr || space->Begin() < first_space->Begin()) {
637 first_space = space;
638 }
639 }
640 bool no_gap = MemMap::CheckNoGaps(first_space->GetMemMap(), non_moving_space_->GetMemMap());
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700641 if (!no_gap) {
David Srbecky5dedb802015-06-17 00:08:02 +0100642 PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700643 MemMap::DumpMaps(LOG_STREAM(ERROR), true);
Mathieu Chartierc7853442015-03-27 14:35:38 -0700644 LOG(FATAL) << "There's a gap between the image space and the non-moving space";
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700645 }
646 }
Mathieu Chartier31000802015-06-14 14:14:37 -0700647 instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
648 if (gc_stress_mode_) {
649 backtrace_lock_ = new Mutex("GC complete lock");
650 }
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700651 if (is_running_on_memory_tool_ || gc_stress_mode_) {
Mathieu Chartier31000802015-06-14 14:14:37 -0700652 instrumentation->InstrumentQuickAllocEntryPoints();
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700653 }
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800654 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800655 LOG(INFO) << "Heap() exiting";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700656 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700657}
658
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700659MemMap* Heap::MapAnonymousPreferredAddress(const char* name,
660 uint8_t* request_begin,
661 size_t capacity,
662 std::string* out_error_str) {
Mathieu Chartierb363f662014-07-16 13:28:58 -0700663 while (true) {
Kyungmin Leeef32b8f2014-10-23 09:32:05 +0900664 MemMap* map = MemMap::MapAnonymous(name, request_begin, capacity,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000665 PROT_READ | PROT_WRITE, true, false, out_error_str);
Mathieu Chartierb363f662014-07-16 13:28:58 -0700666 if (map != nullptr || request_begin == nullptr) {
667 return map;
668 }
669 // Retry a second time with no specified request begin.
670 request_begin = nullptr;
671 }
Mathieu Chartierb363f662014-07-16 13:28:58 -0700672}
673
Mathieu Chartierdfe30832015-03-06 15:28:34 -0800674bool Heap::MayUseCollector(CollectorType type) const {
675 return foreground_collector_type_ == type || background_collector_type_ == type;
676}
677
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700678space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map,
679 size_t initial_size,
680 size_t growth_limit,
681 size_t capacity,
682 const char* name,
683 bool can_move_objects) {
Zuo Wangf37a88b2014-07-10 04:26:41 -0700684 space::MallocSpace* malloc_space = nullptr;
685 if (kUseRosAlloc) {
686 // Create rosalloc space.
687 malloc_space = space::RosAllocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
688 initial_size, growth_limit, capacity,
689 low_memory_mode_, can_move_objects);
690 } else {
691 malloc_space = space::DlMallocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
692 initial_size, growth_limit, capacity,
693 can_move_objects);
694 }
695 if (collector::SemiSpace::kUseRememberedSet) {
696 accounting::RememberedSet* rem_set =
697 new accounting::RememberedSet(std::string(name) + " remembered set", this, malloc_space);
698 CHECK(rem_set != nullptr) << "Failed to create main space remembered set";
699 AddRememberedSet(rem_set);
700 }
701 CHECK(malloc_space != nullptr) << "Failed to create " << name;
702 malloc_space->SetFootprintLimit(malloc_space->Capacity());
703 return malloc_space;
704}
705
Mathieu Chartier31f44142014-04-08 14:40:03 -0700706void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
707 size_t capacity) {
708 // Is background compaction is enabled?
709 bool can_move_objects = IsMovingGc(background_collector_type_) !=
Zuo Wangf37a88b2014-07-10 04:26:41 -0700710 IsMovingGc(foreground_collector_type_) || use_homogeneous_space_compaction_for_oom_;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700711 // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
712 // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
713 // from the main space to the zygote space. If background compaction is enabled, always pass in
714 // that we can move objets.
715 if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
716 // After the zygote we want this to be false if we don't have background compaction enabled so
717 // that getting primitive array elements is faster.
Mathieu Chartierb363f662014-07-16 13:28:58 -0700718 // We never have homogeneous compaction with GSS and don't need a space with movable objects.
Mathieu Chartiere4cab172014-08-19 18:24:04 -0700719 can_move_objects = !HasZygoteSpace() && foreground_collector_type_ != kCollectorTypeGSS;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700720 }
Mathieu Chartier96bcd452014-06-17 09:50:02 -0700721 if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
722 RemoveRememberedSet(main_space_);
723 }
Zuo Wangf37a88b2014-07-10 04:26:41 -0700724 const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
725 main_space_ = CreateMallocSpaceFromMemMap(mem_map, initial_size, growth_limit, capacity, name,
726 can_move_objects);
727 SetSpaceAsDefault(main_space_);
Mathieu Chartier31f44142014-04-08 14:40:03 -0700728 VLOG(heap) << "Created main space " << main_space_;
729}
730
Mathieu Chartier50482232013-11-21 11:48:14 -0800731void Heap::ChangeAllocator(AllocatorType allocator) {
Mathieu Chartier50482232013-11-21 11:48:14 -0800732 if (current_allocator_ != allocator) {
Mathieu Chartierd8891782014-03-02 13:28:37 -0800733 // These two allocators are only used internally and don't have any entrypoints.
734 CHECK_NE(allocator, kAllocatorTypeLOS);
735 CHECK_NE(allocator, kAllocatorTypeNonMoving);
Mathieu Chartier50482232013-11-21 11:48:14 -0800736 current_allocator_ = allocator;
Mathieu Chartierd8891782014-03-02 13:28:37 -0800737 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
Mathieu Chartier50482232013-11-21 11:48:14 -0800738 SetQuickAllocEntryPointsAllocator(current_allocator_);
739 Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
740 }
741}
742
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700743void Heap::DisableMovingGc() {
Hiroshi Yamauchi60985b72016-08-24 13:53:12 -0700744 CHECK(!kUseReadBarrier);
Mathieu Chartier31f44142014-04-08 14:40:03 -0700745 if (IsMovingGc(foreground_collector_type_)) {
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700746 foreground_collector_type_ = kCollectorTypeCMS;
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800747 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700748 if (IsMovingGc(background_collector_type_)) {
749 background_collector_type_ = foreground_collector_type_;
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800750 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700751 TransitionCollector(foreground_collector_type_);
Mathieu Chartier4f55e222015-09-04 13:26:21 -0700752 Thread* const self = Thread::Current();
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700753 ScopedThreadStateChange tsc(self, kSuspended);
Mathieu Chartier4f55e222015-09-04 13:26:21 -0700754 ScopedSuspendAll ssa(__FUNCTION__);
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700755 // Something may have caused the transition to fail.
Mathieu Chartiere4927f62014-08-23 13:56:03 -0700756 if (!IsMovingGc(collector_type_) && non_moving_space_ != main_space_) {
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700757 CHECK(main_space_ != nullptr);
758 // The allocation stack may have non movable objects in it. We need to flush it since the GC
759 // can't only handle marking allocation stack objects of one non moving space and one main
760 // space.
761 {
762 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
763 FlushAllocStack();
764 }
765 main_space_->DisableMovingObjects();
766 non_moving_space_ = main_space_;
767 CHECK(!non_moving_space_->CanMoveObjects());
768 }
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800769}
770
Mathieu Chartier590fee92013-09-13 13:46:47 -0700771bool Heap::IsCompilingBoot() const {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800772 if (!Runtime::Current()->IsAotCompiler()) {
Alex Light64ad14d2014-08-19 14:23:13 -0700773 return false;
774 }
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -0800775 ScopedObjectAccess soa(Thread::Current());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700776 for (const auto& space : continuous_spaces_) {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800777 if (space->IsImageSpace() || space->IsZygoteSpace()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700778 return false;
779 }
780 }
781 return true;
782}
783
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800784void Heap::IncrementDisableMovingGC(Thread* self) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700785 // Need to do this holding the lock to prevent races where the GC is about to run / running when
786 // we attempt to disable it.
Mathieu Chartiercaa82d62014-02-02 16:51:17 -0800787 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700788 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800789 ++disable_moving_gc_count_;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700790 if (IsMovingGc(collector_type_running_)) {
Mathieu Chartier89a201e2014-05-02 10:27:26 -0700791 WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800792 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700793}
794
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800795void Heap::DecrementDisableMovingGC(Thread* self) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700796 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartierb735bd92015-06-24 17:04:17 -0700797 CHECK_GT(disable_moving_gc_count_, 0U);
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800798 --disable_moving_gc_count_;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700799}
800
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700801void Heap::IncrementDisableThreadFlip(Thread* self) {
802 // Supposed to be called by mutators. If thread_flip_running_ is true, block. Otherwise, go ahead.
803 CHECK(kUseReadBarrier);
Hiroshi Yamauchi20a0be02016-02-19 15:44:06 -0800804 bool is_nested = self->GetDisableThreadFlipCount() > 0;
805 self->IncrementDisableThreadFlipCount();
806 if (is_nested) {
807 // If this is a nested JNI critical section enter, we don't need to wait or increment the global
808 // counter. The global counter is incremented only once for a thread for the outermost enter.
809 return;
810 }
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700811 ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
812 MutexLock mu(self, *thread_flip_lock_);
813 bool has_waited = false;
814 uint64_t wait_start = NanoTime();
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700815 if (thread_flip_running_) {
Hiroshi Yamauchi6fb276b2016-08-26 10:39:29 -0700816 ATRACE_BEGIN("IncrementDisableThreadFlip");
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700817 while (thread_flip_running_) {
818 has_waited = true;
819 thread_flip_cond_->Wait(self);
820 }
Hiroshi Yamauchi6fb276b2016-08-26 10:39:29 -0700821 ATRACE_END();
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700822 }
823 ++disable_thread_flip_count_;
824 if (has_waited) {
825 uint64_t wait_time = NanoTime() - wait_start;
826 total_wait_time_ += wait_time;
827 if (wait_time > long_pause_log_threshold_) {
828 LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
829 }
830 }
831}
832
833void Heap::DecrementDisableThreadFlip(Thread* self) {
834 // Supposed to be called by mutators. Decrement disable_thread_flip_count_ and potentially wake up
835 // the GC waiting before doing a thread flip.
836 CHECK(kUseReadBarrier);
Hiroshi Yamauchi20a0be02016-02-19 15:44:06 -0800837 self->DecrementDisableThreadFlipCount();
838 bool is_outermost = self->GetDisableThreadFlipCount() == 0;
839 if (!is_outermost) {
840 // If this is not an outermost JNI critical exit, we don't need to decrement the global counter.
841 // The global counter is decremented only once for a thread for the outermost exit.
842 return;
843 }
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700844 MutexLock mu(self, *thread_flip_lock_);
845 CHECK_GT(disable_thread_flip_count_, 0U);
846 --disable_thread_flip_count_;
Hiroshi Yamauchi20a0be02016-02-19 15:44:06 -0800847 if (disable_thread_flip_count_ == 0) {
848 // Potentially notify the GC thread blocking to begin a thread flip.
849 thread_flip_cond_->Broadcast(self);
850 }
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700851}
852
853void Heap::ThreadFlipBegin(Thread* self) {
854 // Supposed to be called by GC. Set thread_flip_running_ to be true. If disable_thread_flip_count_
855 // > 0, block. Otherwise, go ahead.
856 CHECK(kUseReadBarrier);
857 ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
858 MutexLock mu(self, *thread_flip_lock_);
859 bool has_waited = false;
860 uint64_t wait_start = NanoTime();
861 CHECK(!thread_flip_running_);
Hiroshi Yamauchi20a0be02016-02-19 15:44:06 -0800862 // Set this to true before waiting so that frequent JNI critical enter/exits won't starve
863 // GC. This like a writer preference of a reader-writer lock.
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700864 thread_flip_running_ = true;
865 while (disable_thread_flip_count_ > 0) {
866 has_waited = true;
867 thread_flip_cond_->Wait(self);
868 }
869 if (has_waited) {
870 uint64_t wait_time = NanoTime() - wait_start;
871 total_wait_time_ += wait_time;
872 if (wait_time > long_pause_log_threshold_) {
873 LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
874 }
875 }
876}
877
878void Heap::ThreadFlipEnd(Thread* self) {
879 // Supposed to be called by GC. Set thread_flip_running_ to false and potentially wake up mutators
880 // waiting before doing a JNI critical.
881 CHECK(kUseReadBarrier);
882 MutexLock mu(self, *thread_flip_lock_);
883 CHECK(thread_flip_running_);
884 thread_flip_running_ = false;
Hiroshi Yamauchi20a0be02016-02-19 15:44:06 -0800885 // Potentially notify mutator threads blocking to enter a JNI critical section.
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700886 thread_flip_cond_->Broadcast(self);
887}
888
Mathieu Chartierf8cb1782016-03-18 18:45:41 -0700889void Heap::UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state) {
890 if (old_process_state != new_process_state) {
891 const bool jank_perceptible = new_process_state == kProcessStateJankPerceptible;
Mathieu Chartier91e30632014-03-25 15:58:50 -0700892 for (size_t i = 1; i <= kCollectorTransitionStressIterations; ++i) {
893 // Start at index 1 to avoid "is always false" warning.
894 // Have iteration 1 always transition the collector.
Mathieu Chartierf8cb1782016-03-18 18:45:41 -0700895 TransitionCollector((((i & 1) == 1) == jank_perceptible)
896 ? foreground_collector_type_
897 : background_collector_type_);
Mathieu Chartier91e30632014-03-25 15:58:50 -0700898 usleep(kCollectorTransitionStressWait);
899 }
Mathieu Chartierf8cb1782016-03-18 18:45:41 -0700900 if (jank_perceptible) {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800901 // Transition back to foreground right away to prevent jank.
Mathieu Chartier31f44142014-04-08 14:40:03 -0700902 RequestCollectorTransition(foreground_collector_type_, 0);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800903 } else {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800904 // Don't delay for debug builds since we may want to stress test the GC.
Zuo Wangf37a88b2014-07-10 04:26:41 -0700905 // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
906 // special handling which does a homogenous space compaction once but then doesn't transition
Hiroshi Yamauchi60985b72016-08-24 13:53:12 -0700907 // the collector. Similarly, we invoke a full compaction for kCollectorTypeCC but don't
908 // transition the collector.
Zuo Wangf37a88b2014-07-10 04:26:41 -0700909 RequestCollectorTransition(background_collector_type_,
910 kIsDebugBuild ? 0 : kCollectorTransitionWait);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800911 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800912 }
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800913}
914
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700915void Heap::CreateThreadPool() {
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700916 const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
917 if (num_threads != 0) {
Mathieu Chartierbcd5e9d2013-11-13 14:33:28 -0800918 thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700919 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700920}
921
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800922// Visit objects when threads aren't suspended. If concurrent moving
923// GC, disable moving GC and suspend threads and then visit objects.
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800924void Heap::VisitObjects(ObjectCallback callback, void* arg) {
Hiroshi Yamauchi0c8c3032015-01-16 16:54:35 -0800925 Thread* self = Thread::Current();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800926 Locks::mutator_lock_->AssertSharedHeld(self);
927 DCHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)) << "Call VisitObjectsPaused() instead";
928 if (IsGcConcurrentAndMoving()) {
929 // Concurrent moving GC. Just suspending threads isn't sufficient
930 // because a collection isn't one big pause and we could suspend
931 // threads in the middle (between phases) of a concurrent moving
932 // collection where it's not easily known which objects are alive
933 // (both the region space and the non-moving space) or which
934 // copies of objects to visit, and the to-space invariant could be
935 // easily broken. Visit objects while GC isn't running by using
936 // IncrementDisableMovingGC() and threads are suspended.
937 IncrementDisableMovingGC(self);
Mathieu Chartierf1d666e2015-09-03 16:13:34 -0700938 {
939 ScopedThreadSuspension sts(self, kWaitingForVisitObjects);
Mathieu Chartier4f55e222015-09-04 13:26:21 -0700940 ScopedSuspendAll ssa(__FUNCTION__);
Mathieu Chartierf1d666e2015-09-03 16:13:34 -0700941 VisitObjectsInternalRegionSpace(callback, arg);
942 VisitObjectsInternal(callback, arg);
Mathieu Chartierf1d666e2015-09-03 16:13:34 -0700943 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800944 DecrementDisableMovingGC(self);
Hiroshi Yamauchi0c8c3032015-01-16 16:54:35 -0800945 } else {
Mathieu Chartieraea9bfb2016-10-12 19:19:56 -0700946 // Since concurrent moving GC has thread suspension, also poison ObjPtr the normal case to
947 // catch bugs.
948 self->PoisonObjectPointers();
Hiroshi Yamauchi0c8c3032015-01-16 16:54:35 -0800949 // GCs can move objects, so don't allow this.
Mathieu Chartier268764d2016-09-13 12:09:38 -0700950 ScopedAssertNoThreadSuspension ants("Visiting objects");
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800951 DCHECK(region_space_ == nullptr);
Hiroshi Yamauchi0c8c3032015-01-16 16:54:35 -0800952 VisitObjectsInternal(callback, arg);
Mathieu Chartieraea9bfb2016-10-12 19:19:56 -0700953 self->PoisonObjectPointers();
Hiroshi Yamauchi0c8c3032015-01-16 16:54:35 -0800954 }
955}
956
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800957// Visit objects when threads are already suspended.
958void Heap::VisitObjectsPaused(ObjectCallback callback, void* arg) {
959 Thread* self = Thread::Current();
960 Locks::mutator_lock_->AssertExclusiveHeld(self);
961 VisitObjectsInternalRegionSpace(callback, arg);
962 VisitObjectsInternal(callback, arg);
963}
964
965// Visit objects in the region spaces.
966void Heap::VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg) {
967 Thread* self = Thread::Current();
968 Locks::mutator_lock_->AssertExclusiveHeld(self);
969 if (region_space_ != nullptr) {
970 DCHECK(IsGcConcurrentAndMoving());
971 if (!zygote_creation_lock_.IsExclusiveHeld(self)) {
972 // Exclude the pre-zygote fork time where the semi-space collector
973 // calls VerifyHeapReferences() as part of the zygote compaction
974 // which then would call here without the moving GC disabled,
975 // which is fine.
976 DCHECK(IsMovingGCDisabled(self));
977 }
978 region_space_->Walk(callback, arg);
979 }
980}
981
982// Visit objects in the other spaces.
Hiroshi Yamauchi0c8c3032015-01-16 16:54:35 -0800983void Heap::VisitObjectsInternal(ObjectCallback callback, void* arg) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700984 if (bump_pointer_space_ != nullptr) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800985 // Visit objects in bump pointer space.
986 bump_pointer_space_->Walk(callback, arg);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700987 }
988 // TODO: Switch to standard begin and end to use ranged a based loop.
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800989 for (auto* it = allocation_stack_->Begin(), *end = allocation_stack_->End(); it < end; ++it) {
990 mirror::Object* const obj = it->AsMirrorPtr();
Mathieu Chartierebdf3f32014-02-13 10:23:27 -0800991 if (obj != nullptr && obj->GetClass() != nullptr) {
992 // Avoid the race condition caused by the object not yet being written into the allocation
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800993 // stack or the class not yet being written in the object. Or, if
994 // kUseThreadLocalAllocationStack, there can be nulls on the allocation stack.
Mathieu Chartierebdf3f32014-02-13 10:23:27 -0800995 callback(obj, arg);
996 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700997 }
Hiroshi Yamauchi0c8c3032015-01-16 16:54:35 -0800998 {
999 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1000 GetLiveBitmap()->Walk(callback, arg);
1001 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001002}
1003
1004void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
Mathieu Chartier00b59152014-07-25 10:13:51 -07001005 space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
1006 space::ContinuousSpace* space2 = non_moving_space_;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001007 // TODO: Generalize this to n bitmaps?
Mathieu Chartier00b59152014-07-25 10:13:51 -07001008 CHECK(space1 != nullptr);
1009 CHECK(space2 != nullptr);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001010 MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
Mathieu Chartier2dbe6272014-09-16 10:43:23 -07001011 (large_object_space_ != nullptr ? large_object_space_->GetLiveBitmap() : nullptr),
1012 stack);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001013}
1014
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001015void Heap::DeleteThreadPool() {
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001016 thread_pool_.reset(nullptr);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001017}
1018
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -07001019void Heap::AddSpace(space::Space* space) {
Zuo Wangf37a88b2014-07-10 04:26:41 -07001020 CHECK(space != nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001021 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1022 if (space->IsContinuousSpace()) {
1023 DCHECK(!space->IsDiscontinuousSpace());
1024 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
1025 // Continuous spaces don't necessarily have bitmaps.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001026 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
1027 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001028 if (live_bitmap != nullptr) {
Mathieu Chartier2796a162014-07-25 11:50:47 -07001029 CHECK(mark_bitmap != nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001030 live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
1031 mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001032 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001033 continuous_spaces_.push_back(continuous_space);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001034 // Ensure that spaces remain sorted in increasing order of start address.
1035 std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
1036 [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
1037 return a->Begin() < b->Begin();
1038 });
Mathieu Chartier590fee92013-09-13 13:46:47 -07001039 } else {
Mathieu Chartier2796a162014-07-25 11:50:47 -07001040 CHECK(space->IsDiscontinuousSpace());
Mathieu Chartier590fee92013-09-13 13:46:47 -07001041 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07001042 live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
1043 mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
Mathieu Chartier590fee92013-09-13 13:46:47 -07001044 discontinuous_spaces_.push_back(discontinuous_space);
1045 }
1046 if (space->IsAllocSpace()) {
1047 alloc_spaces_.push_back(space->AsAllocSpace());
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001048 }
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001049}
1050
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -07001051void Heap::SetSpaceAsDefault(space::ContinuousSpace* continuous_space) {
1052 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1053 if (continuous_space->IsDlMallocSpace()) {
1054 dlmalloc_space_ = continuous_space->AsDlMallocSpace();
1055 } else if (continuous_space->IsRosAllocSpace()) {
1056 rosalloc_space_ = continuous_space->AsRosAllocSpace();
1057 }
1058}
1059
1060void Heap::RemoveSpace(space::Space* space) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001061 DCHECK(space != nullptr);
1062 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1063 if (space->IsContinuousSpace()) {
1064 DCHECK(!space->IsDiscontinuousSpace());
1065 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
1066 // Continuous spaces don't necessarily have bitmaps.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001067 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
1068 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001069 if (live_bitmap != nullptr) {
1070 DCHECK(mark_bitmap != nullptr);
1071 live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
1072 mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
1073 }
1074 auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
1075 DCHECK(it != continuous_spaces_.end());
1076 continuous_spaces_.erase(it);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001077 } else {
1078 DCHECK(space->IsDiscontinuousSpace());
1079 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07001080 live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
1081 mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001082 auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
1083 discontinuous_space);
1084 DCHECK(it != discontinuous_spaces_.end());
1085 discontinuous_spaces_.erase(it);
1086 }
1087 if (space->IsAllocSpace()) {
1088 auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
1089 DCHECK(it != alloc_spaces_.end());
1090 alloc_spaces_.erase(it);
1091 }
1092}
1093
Elliott Hughes8b788fe2013-04-17 15:57:01 -07001094void Heap::DumpGcPerformanceInfo(std::ostream& os) {
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001095 // Dump cumulative timings.
Elliott Hughes8b788fe2013-04-17 15:57:01 -07001096 os << "Dumping cumulative Gc timings\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001097 uint64_t total_duration = 0;
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001098 // Dump cumulative loggers for each GC type.
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001099 uint64_t total_paused_time = 0;
Mathieu Chartier5a487192014-04-08 11:14:54 -07001100 for (auto& collector : garbage_collectors_) {
Mathieu Chartier104fa0c2014-08-07 14:26:27 -07001101 total_duration += collector->GetCumulativeTimings().GetTotalNs();
1102 total_paused_time += collector->GetTotalPausedTimeNs();
1103 collector->DumpPerformanceInfo(os);
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001104 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001105 if (total_duration != 0) {
Brian Carlstrom2d888622013-07-18 17:02:00 -07001106 const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
Elliott Hughes8b788fe2013-04-17 15:57:01 -07001107 os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
1108 os << "Mean GC size throughput: "
Ian Rogers1d54e732013-05-02 21:10:01 -07001109 << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
Elliott Hughes8b788fe2013-04-17 15:57:01 -07001110 os << "Mean GC object throughput: "
Ian Rogers1d54e732013-05-02 21:10:01 -07001111 << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001112 }
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07001113 uint64_t total_objects_allocated = GetObjectsAllocatedEver();
Mathieu Chartierc30a7252014-08-12 10:13:48 -07001114 os << "Total number of allocations " << total_objects_allocated << "\n";
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07001115 os << "Total bytes allocated " << PrettySize(GetBytesAllocatedEver()) << "\n";
1116 os << "Total bytes freed " << PrettySize(GetBytesFreedEver()) << "\n";
Mathieu Chartierc30a7252014-08-12 10:13:48 -07001117 os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07001118 os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
1119 os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
Mathieu Chartierc30a7252014-08-12 10:13:48 -07001120 os << "Total memory " << PrettySize(GetTotalMemory()) << "\n";
1121 os << "Max memory " << PrettySize(GetMaxMemory()) << "\n";
Mathieu Chartiere4cab172014-08-19 18:24:04 -07001122 if (HasZygoteSpace()) {
1123 os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
1124 }
Elliott Hughes8b788fe2013-04-17 15:57:01 -07001125 os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07001126 os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
1127 os << "Total GC count: " << GetGcCount() << "\n";
1128 os << "Total GC time: " << PrettyDuration(GetGcTime()) << "\n";
1129 os << "Total blocking GC count: " << GetBlockingGcCount() << "\n";
1130 os << "Total blocking GC time: " << PrettyDuration(GetBlockingGcTime()) << "\n";
1131
1132 {
1133 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1134 if (gc_count_rate_histogram_.SampleSize() > 0U) {
1135 os << "Histogram of GC count per " << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1136 gc_count_rate_histogram_.DumpBins(os);
1137 os << "\n";
1138 }
1139 if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1140 os << "Histogram of blocking GC count per "
1141 << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1142 blocking_gc_count_rate_histogram_.DumpBins(os);
1143 os << "\n";
1144 }
1145 }
1146
Hiroshi Yamauchib62f2e62016-03-23 15:51:24 -07001147 if (kDumpRosAllocStatsOnSigQuit && rosalloc_space_ != nullptr) {
1148 rosalloc_space_->DumpStats(os);
1149 }
1150
Mathieu Chartier5d2a3f72016-05-11 11:35:39 -07001151 {
1152 MutexLock mu(Thread::Current(), native_histogram_lock_);
1153 if (native_allocation_histogram_.SampleSize() > 0u) {
1154 os << "Histogram of native allocation ";
1155 native_allocation_histogram_.DumpBins(os);
1156 os << " bucket size " << native_allocation_histogram_.BucketWidth() << "\n";
1157 }
1158 if (native_free_histogram_.SampleSize() > 0u) {
1159 os << "Histogram of native free ";
1160 native_free_histogram_.DumpBins(os);
1161 os << " bucket size " << native_free_histogram_.BucketWidth() << "\n";
1162 }
1163 }
1164
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001165 BaseMutex::DumpAll(os);
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001166}
1167
Hiroshi Yamauchi37670172015-06-10 17:20:54 -07001168void Heap::ResetGcPerformanceInfo() {
1169 for (auto& collector : garbage_collectors_) {
1170 collector->ResetMeasurements();
1171 }
Hiroshi Yamauchi37670172015-06-10 17:20:54 -07001172 total_bytes_freed_ever_ = 0;
1173 total_objects_freed_ever_ = 0;
1174 total_wait_time_ = 0;
1175 blocking_gc_count_ = 0;
1176 blocking_gc_time_ = 0;
1177 gc_count_last_window_ = 0;
1178 blocking_gc_count_last_window_ = 0;
1179 last_update_time_gc_count_rate_histograms_ = // Round down by the window duration.
1180 (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
1181 {
1182 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1183 gc_count_rate_histogram_.Reset();
1184 blocking_gc_count_rate_histogram_.Reset();
1185 }
1186}
1187
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07001188uint64_t Heap::GetGcCount() const {
1189 uint64_t gc_count = 0U;
1190 for (auto& collector : garbage_collectors_) {
1191 gc_count += collector->GetCumulativeTimings().GetIterations();
1192 }
1193 return gc_count;
1194}
1195
1196uint64_t Heap::GetGcTime() const {
1197 uint64_t gc_time = 0U;
1198 for (auto& collector : garbage_collectors_) {
1199 gc_time += collector->GetCumulativeTimings().GetTotalNs();
1200 }
1201 return gc_time;
1202}
1203
1204uint64_t Heap::GetBlockingGcCount() const {
1205 return blocking_gc_count_;
1206}
1207
1208uint64_t Heap::GetBlockingGcTime() const {
1209 return blocking_gc_time_;
1210}
1211
1212void Heap::DumpGcCountRateHistogram(std::ostream& os) const {
1213 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1214 if (gc_count_rate_histogram_.SampleSize() > 0U) {
1215 gc_count_rate_histogram_.DumpBins(os);
1216 }
1217}
1218
1219void Heap::DumpBlockingGcCountRateHistogram(std::ostream& os) const {
1220 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1221 if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1222 blocking_gc_count_rate_histogram_.DumpBins(os);
1223 }
1224}
1225
Andreas Gampe27fa96c2016-10-07 15:05:24 -07001226ALWAYS_INLINE
1227static inline AllocationListener* GetAndOverwriteAllocationListener(
1228 Atomic<AllocationListener*>* storage, AllocationListener* new_value) {
1229 AllocationListener* old;
1230 do {
1231 old = storage->LoadSequentiallyConsistent();
1232 } while (!storage->CompareExchangeStrongSequentiallyConsistent(old, new_value));
1233 return old;
1234}
1235
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001236Heap::~Heap() {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001237 VLOG(heap) << "Starting ~Heap()";
Mathieu Chartier590fee92013-09-13 13:46:47 -07001238 STLDeleteElements(&garbage_collectors_);
1239 // If we don't reset then the mark stack complains in its destructor.
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001240 allocation_stack_->Reset();
Man Cao8c2ff642015-05-27 17:25:30 -07001241 allocation_records_.reset();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001242 live_stack_->Reset();
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001243 STLDeleteValues(&mod_union_tables_);
Mathieu Chartier0767c9a2014-03-26 12:53:19 -07001244 STLDeleteValues(&remembered_sets_);
Ian Rogers1d54e732013-05-02 21:10:01 -07001245 STLDeleteElements(&continuous_spaces_);
1246 STLDeleteElements(&discontinuous_spaces_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001247 delete gc_complete_lock_;
Andreas Gampe6be4f2a2015-11-10 13:34:17 -08001248 delete thread_flip_lock_;
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001249 delete pending_task_lock_;
Mathieu Chartier31000802015-06-14 14:14:37 -07001250 delete backtrace_lock_;
1251 if (unique_backtrace_count_.LoadRelaxed() != 0 || seen_backtrace_count_.LoadRelaxed() != 0) {
1252 LOG(INFO) << "gc stress unique=" << unique_backtrace_count_.LoadRelaxed()
1253 << " total=" << seen_backtrace_count_.LoadRelaxed() +
1254 unique_backtrace_count_.LoadRelaxed();
1255 }
Andreas Gampe27fa96c2016-10-07 15:05:24 -07001256
Mathieu Chartier590fee92013-09-13 13:46:47 -07001257 VLOG(heap) << "Finished ~Heap()";
Carl Shapiro69759ea2011-07-21 18:13:35 -07001258}
1259
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001260
1261space::ContinuousSpace* Heap::FindContinuousSpaceFromAddress(const mirror::Object* addr) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001262 for (const auto& space : continuous_spaces_) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001263 if (space->Contains(addr)) {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001264 return space;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001265 }
1266 }
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001267 return nullptr;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001268}
1269
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001270space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
1271 bool fail_ok) const {
1272 space::ContinuousSpace* space = FindContinuousSpaceFromAddress(obj.Ptr());
1273 if (space != nullptr) {
1274 return space;
1275 }
1276 if (!fail_ok) {
1277 LOG(FATAL) << "object " << obj << " not inside any spaces!";
1278 }
1279 return nullptr;
1280}
1281
1282space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
Ian Rogers1d54e732013-05-02 21:10:01 -07001283 bool fail_ok) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001284 for (const auto& space : discontinuous_spaces_) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001285 if (space->Contains(obj.Ptr())) {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001286 return space;
Ian Rogers1d54e732013-05-02 21:10:01 -07001287 }
1288 }
1289 if (!fail_ok) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001290 LOG(FATAL) << "object " << obj << " not inside any spaces!";
Ian Rogers1d54e732013-05-02 21:10:01 -07001291 }
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001292 return nullptr;
Ian Rogers1d54e732013-05-02 21:10:01 -07001293}
1294
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001295space::Space* Heap::FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const {
Ian Rogers1d54e732013-05-02 21:10:01 -07001296 space::Space* result = FindContinuousSpaceFromObject(obj, true);
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001297 if (result != nullptr) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001298 return result;
1299 }
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001300 return FindDiscontinuousSpaceFromObject(obj, fail_ok);
Ian Rogers1d54e732013-05-02 21:10:01 -07001301}
1302
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001303space::Space* Heap::FindSpaceFromAddress(const void* addr) const {
1304 for (const auto& space : continuous_spaces_) {
1305 if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
1306 return space;
1307 }
1308 }
1309 for (const auto& space : discontinuous_spaces_) {
1310 if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
1311 return space;
1312 }
1313 }
1314 return nullptr;
1315}
1316
1317
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07001318void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
Mathieu Chartiere8f3f032016-04-04 16:49:44 -07001319 // If we're in a stack overflow, do not create a new exception. It would require running the
1320 // constructor, which will of course still be in a stack overflow.
1321 if (self->IsHandlingStackOverflow()) {
1322 self->SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
1323 return;
1324 }
1325
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -07001326 std::ostringstream oss;
Ian Rogersef7d42f2014-01-06 12:55:46 -08001327 size_t total_bytes_free = GetFreeMemory();
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -07001328 oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07001329 << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM";
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -07001330 // If the allocation failed due to fragmentation, print out the largest continuous allocation.
Zuo Wangf37a88b2014-07-10 04:26:41 -07001331 if (total_bytes_free >= byte_count) {
Mathieu Chartierb363f662014-07-16 13:28:58 -07001332 space::AllocSpace* space = nullptr;
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07001333 if (allocator_type == kAllocatorTypeNonMoving) {
1334 space = non_moving_space_;
1335 } else if (allocator_type == kAllocatorTypeRosAlloc ||
1336 allocator_type == kAllocatorTypeDlMalloc) {
1337 space = main_space_;
Mathieu Chartierb363f662014-07-16 13:28:58 -07001338 } else if (allocator_type == kAllocatorTypeBumpPointer ||
1339 allocator_type == kAllocatorTypeTLAB) {
1340 space = bump_pointer_space_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001341 } else if (allocator_type == kAllocatorTypeRegion ||
1342 allocator_type == kAllocatorTypeRegionTLAB) {
1343 space = region_space_;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -07001344 }
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07001345 if (space != nullptr) {
1346 space->LogFragmentationAllocFailure(oss, byte_count);
1347 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001348 }
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -07001349 self->ThrowOutOfMemoryError(oss.str().c_str());
1350}
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001351
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001352void Heap::DoPendingCollectorTransition() {
1353 CollectorType desired_collector_type = desired_collector_type_;
Mathieu Chartierb2728552014-09-08 20:08:41 +00001354 // Launch homogeneous space compaction if it is desired.
1355 if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
1356 if (!CareAboutPauseTimes()) {
1357 PerformHomogeneousSpaceCompact();
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001358 } else {
1359 VLOG(gc) << "Homogeneous compaction ignored due to jank perceptible process state";
Mathieu Chartierb2728552014-09-08 20:08:41 +00001360 }
Hiroshi Yamauchi60985b72016-08-24 13:53:12 -07001361 } else if (desired_collector_type == kCollectorTypeCCBackground) {
1362 DCHECK(kUseReadBarrier);
1363 if (!CareAboutPauseTimes()) {
1364 // Invoke CC full compaction.
1365 CollectGarbageInternal(collector::kGcTypeFull,
1366 kGcCauseCollectorTransition,
1367 /*clear_soft_references*/false);
1368 } else {
1369 VLOG(gc) << "CC background compaction ignored due to jank perceptible process state";
1370 }
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001371 } else {
1372 TransitionCollector(desired_collector_type);
Mathieu Chartierb2728552014-09-08 20:08:41 +00001373 }
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001374}
1375
1376void Heap::Trim(Thread* self) {
Mathieu Chartier8d447252015-10-26 10:21:14 -07001377 Runtime* const runtime = Runtime::Current();
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07001378 if (!CareAboutPauseTimes()) {
1379 // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
1380 // about pauses.
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -08001381 ScopedTrace trace("Deflating monitors");
Hiroshi Yamauchi3b1d1b72016-10-12 11:53:57 -07001382 // Avoid race conditions on the lock word for CC.
1383 ScopedGCCriticalSection gcs(self, kGcCauseTrim, kCollectorTypeHeapTrim);
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -08001384 ScopedSuspendAll ssa(__FUNCTION__);
1385 uint64_t start_time = NanoTime();
1386 size_t count = runtime->GetMonitorList()->DeflateMonitors();
1387 VLOG(heap) << "Deflating " << count << " monitors took "
1388 << PrettyDuration(NanoTime() - start_time);
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07001389 }
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001390 TrimIndirectReferenceTables(self);
1391 TrimSpaces(self);
Mathieu Chartier8d447252015-10-26 10:21:14 -07001392 // Trim arenas that may have been used by JIT or verifier.
Mathieu Chartier8d447252015-10-26 10:21:14 -07001393 runtime->GetArenaPool()->TrimMaps();
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08001394}
1395
Mathieu Chartier91c2f0c2014-11-26 11:21:15 -08001396class TrimIndirectReferenceTableClosure : public Closure {
1397 public:
1398 explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
1399 }
1400 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier91c2f0c2014-11-26 11:21:15 -08001401 thread->GetJniEnv()->locals.Trim();
Lei Lidd9943d2015-02-02 14:24:44 +08001402 // If thread is a running mutator, then act on behalf of the trim thread.
1403 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -07001404 barrier_->Pass(Thread::Current());
Mathieu Chartier91c2f0c2014-11-26 11:21:15 -08001405 }
1406
1407 private:
1408 Barrier* const barrier_;
1409};
1410
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001411void Heap::TrimIndirectReferenceTables(Thread* self) {
1412 ScopedObjectAccess soa(self);
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -08001413 ScopedTrace trace(__PRETTY_FUNCTION__);
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001414 JavaVMExt* vm = soa.Vm();
1415 // Trim globals indirect reference table.
1416 vm->TrimGlobals();
1417 // Trim locals indirect reference tables.
1418 Barrier barrier(0);
1419 TrimIndirectReferenceTableClosure closure(&barrier);
1420 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1421 size_t barrier_count = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
Lei Lidd9943d2015-02-02 14:24:44 +08001422 if (barrier_count != 0) {
1423 barrier.Increment(self, barrier_count);
1424 }
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001425}
Mathieu Chartier91c2f0c2014-11-26 11:21:15 -08001426
Mathieu Chartieraa516822015-10-02 15:53:37 -07001427void Heap::StartGC(Thread* self, GcCause cause, CollectorType collector_type) {
1428 MutexLock mu(self, *gc_complete_lock_);
1429 // Ensure there is only one GC at a time.
1430 WaitForGcToCompleteLocked(cause, self);
1431 collector_type_running_ = collector_type;
1432}
1433
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001434void Heap::TrimSpaces(Thread* self) {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08001435 {
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08001436 // Need to do this before acquiring the locks since we don't want to get suspended while
1437 // holding any locks.
1438 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001439 // Pretend we are doing a GC to prevent background compaction from deleting the space we are
1440 // trimming.
Mathieu Chartieraa516822015-10-02 15:53:37 -07001441 StartGC(self, kGcCauseTrim, kCollectorTypeHeapTrim);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001442 }
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -08001443 ScopedTrace trace(__PRETTY_FUNCTION__);
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001444 const uint64_t start_ns = NanoTime();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001445 // Trim the managed spaces.
1446 uint64_t total_alloc_space_allocated = 0;
1447 uint64_t total_alloc_space_size = 0;
1448 uint64_t managed_reclaimed = 0;
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08001449 {
1450 ScopedObjectAccess soa(self);
1451 for (const auto& space : continuous_spaces_) {
1452 if (space->IsMallocSpace()) {
1453 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
1454 if (malloc_space->IsRosAllocSpace() || !CareAboutPauseTimes()) {
1455 // Don't trim dlmalloc spaces if we care about pauses since this can hold the space lock
1456 // for a long period of time.
1457 managed_reclaimed += malloc_space->Trim();
1458 }
1459 total_alloc_space_size += malloc_space->Size();
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001460 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001461 }
1462 }
Mathieu Chartier2dbe6272014-09-16 10:43:23 -07001463 total_alloc_space_allocated = GetBytesAllocated();
1464 if (large_object_space_ != nullptr) {
1465 total_alloc_space_allocated -= large_object_space_->GetBytesAllocated();
1466 }
Mathieu Chartier31f44142014-04-08 14:40:03 -07001467 if (bump_pointer_space_ != nullptr) {
1468 total_alloc_space_allocated -= bump_pointer_space_->Size();
1469 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001470 if (region_space_ != nullptr) {
1471 total_alloc_space_allocated -= region_space_->GetBytesAllocated();
1472 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001473 const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
1474 static_cast<float>(total_alloc_space_size);
1475 uint64_t gc_heap_end_ns = NanoTime();
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001476 // We never move things in the native heap, so we can finish the GC at this point.
1477 FinishGC(self, collector::kGcTypeNone);
Ian Rogers872dd822014-10-30 11:19:14 -07001478
Mathieu Chartier590fee92013-09-13 13:46:47 -07001479 VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
Dimitry Ivanove6465bc2015-12-14 18:55:02 -08001480 << ", advised=" << PrettySize(managed_reclaimed) << ") heap. Managed heap utilization of "
1481 << static_cast<int>(100 * managed_utilization) << "%.";
Mathieu Chartier590fee92013-09-13 13:46:47 -07001482}
1483
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001484bool Heap::IsValidObjectAddress(const void* addr) const {
1485 if (addr == nullptr) {
Elliott Hughes88c5c352012-03-15 18:49:48 -07001486 return true;
1487 }
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001488 return IsAligned<kObjectAlignment>(addr) && FindSpaceFromAddress(addr) != nullptr;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001489}
1490
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001491bool Heap::IsNonDiscontinuousSpaceHeapAddress(const void* addr) const {
1492 return FindContinuousSpaceFromAddress(reinterpret_cast<const mirror::Object*>(addr)) != nullptr;
Mathieu Chartierd68ac702014-02-11 14:50:51 -08001493}
1494
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001495bool Heap::IsLiveObjectLocked(ObjPtr<mirror::Object> obj,
1496 bool search_allocation_stack,
1497 bool search_live_stack,
1498 bool sorted) {
1499 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj.Ptr()))) {
Mathieu Chartier15d34022014-02-26 17:16:38 -08001500 return false;
1501 }
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001502 if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj.Ptr())) {
Mathieu Chartier4e305412014-02-19 10:54:44 -08001503 mirror::Class* klass = obj->GetClass<kVerifyNone>();
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001504 if (obj == klass) {
Mathieu Chartier9be9a7a2014-01-24 14:07:33 -08001505 // This case happens for java.lang.Class.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001506 return true;
1507 }
1508 return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001509 } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj.Ptr())) {
Mathieu Chartier4e305412014-02-19 10:54:44 -08001510 // If we are in the allocated region of the temp space, then we are probably live (e.g. during
1511 // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001512 return temp_space_->Contains(obj.Ptr());
Ian Rogers1d54e732013-05-02 21:10:01 -07001513 }
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001514 if (region_space_ != nullptr && region_space_->HasAddress(obj.Ptr())) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001515 return true;
1516 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001517 space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07001518 space::DiscontinuousSpace* d_space = nullptr;
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001519 if (c_space != nullptr) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001520 if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001521 return true;
1522 }
1523 } else {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001524 d_space = FindDiscontinuousSpaceFromObject(obj, true);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001525 if (d_space != nullptr) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001526 if (d_space->GetLiveBitmap()->Test(obj.Ptr())) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001527 return true;
1528 }
1529 }
1530 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001531 // This is covering the allocation/live stack swapping that is done without mutators suspended.
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001532 for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
1533 if (i > 0) {
1534 NanoSleep(MsToNs(10));
Ian Rogers1d54e732013-05-02 21:10:01 -07001535 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001536 if (search_allocation_stack) {
1537 if (sorted) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001538 if (allocation_stack_->ContainsSorted(obj.Ptr())) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001539 return true;
1540 }
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001541 } else if (allocation_stack_->Contains(obj.Ptr())) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001542 return true;
1543 }
1544 }
1545
1546 if (search_live_stack) {
1547 if (sorted) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001548 if (live_stack_->ContainsSorted(obj.Ptr())) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001549 return true;
1550 }
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001551 } else if (live_stack_->Contains(obj.Ptr())) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001552 return true;
1553 }
1554 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001555 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001556 // We need to check the bitmaps again since there is a race where we mark something as live and
1557 // then clear the stack containing it.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001558 if (c_space != nullptr) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001559 if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001560 return true;
1561 }
1562 } else {
1563 d_space = FindDiscontinuousSpaceFromObject(obj, true);
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001564 if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj.Ptr())) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001565 return true;
1566 }
1567 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001568 return false;
Elliott Hughes6a5bd492011-10-28 14:33:57 -07001569}
1570
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -07001571std::string Heap::DumpSpaces() const {
1572 std::ostringstream oss;
1573 DumpSpaces(oss);
1574 return oss.str();
1575}
1576
1577void Heap::DumpSpaces(std::ostream& stream) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001578 for (const auto& space : continuous_spaces_) {
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001579 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1580 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001581 stream << space << " " << *space << "\n";
1582 if (live_bitmap != nullptr) {
1583 stream << live_bitmap << " " << *live_bitmap << "\n";
1584 }
1585 if (mark_bitmap != nullptr) {
1586 stream << mark_bitmap << " " << *mark_bitmap << "\n";
1587 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001588 }
Mathieu Chartier02e25112013-08-14 16:14:24 -07001589 for (const auto& space : discontinuous_spaces_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001590 stream << space << " " << *space << "\n";
Mathieu Chartier128c52c2012-10-16 14:12:41 -07001591 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001592}
1593
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001594void Heap::VerifyObjectBody(ObjPtr<mirror::Object> obj) {
Stephen Hines22c6a812014-07-16 11:03:43 -07001595 if (verify_object_mode_ == kVerifyObjectModeDisabled) {
1596 return;
1597 }
1598
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001599 // Ignore early dawn of the universe verifications.
Ian Rogers3e5cf302014-05-20 16:40:37 -07001600 if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
Ian Rogers62d6c772013-02-27 08:32:07 -08001601 return;
1602 }
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001603 CHECK_ALIGNED(obj.Ptr(), kObjectAlignment) << "Object isn't aligned";
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07001604 mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
Mathieu Chartier4e305412014-02-19 10:54:44 -08001605 CHECK(c != nullptr) << "Null class in object " << obj;
Roland Levillain14d90572015-07-16 10:52:26 +01001606 CHECK_ALIGNED(c, kObjectAlignment) << "Class " << c << " not aligned in object " << obj;
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001607 CHECK(VerifyClassClass(c));
Mathieu Chartier0325e622012-09-05 14:22:51 -07001608
Mathieu Chartier4e305412014-02-19 10:54:44 -08001609 if (verify_object_mode_ > kVerifyObjectModeFast) {
1610 // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -07001611 CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
Mathieu Chartierdcf8d722012-08-02 14:55:54 -07001612 }
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001613}
1614
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001615void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001616 reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001617}
1618
1619void Heap::VerifyHeap() {
Ian Rogers50b35e22012-10-04 10:09:15 -07001620 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001621 GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001622}
1623
Mathieu Chartiere76e70f2014-05-02 16:35:37 -07001624void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
Mathieu Chartier601276a2014-03-20 15:12:30 -07001625 // Use signed comparison since freed bytes can be negative when background compaction foreground
1626 // transitions occurs. This is caused by the moving objects from a bump pointer space to a
1627 // free list backed space typically increasing memory footprint due to padding and binning.
Ian Rogers3e5cf302014-05-20 16:40:37 -07001628 DCHECK_LE(freed_bytes, static_cast<int64_t>(num_bytes_allocated_.LoadRelaxed()));
Mathieu Chartiere76e70f2014-05-02 16:35:37 -07001629 // Note: This relies on 2s complement for handling negative freed_bytes.
Ian Rogers3e5cf302014-05-20 16:40:37 -07001630 num_bytes_allocated_.FetchAndSubSequentiallyConsistent(static_cast<ssize_t>(freed_bytes));
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001631 if (Runtime::Current()->HasStatsEnabled()) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001632 RuntimeStats* thread_stats = Thread::Current()->GetStats();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001633 thread_stats->freed_objects += freed_objects;
Elliott Hughes307f75d2011-10-12 18:04:40 -07001634 thread_stats->freed_bytes += freed_bytes;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001635 // TODO: Do this concurrently.
1636 RuntimeStats* global_stats = Runtime::Current()->GetStats();
1637 global_stats->freed_objects += freed_objects;
1638 global_stats->freed_bytes += freed_bytes;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001639 }
Carl Shapiro58551df2011-07-24 03:09:51 -07001640}
1641
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001642void Heap::RecordFreeRevoke() {
1643 // Subtract num_bytes_freed_revoke_ from num_bytes_allocated_ to cancel out the
1644 // the ahead-of-time, bulk counting of bytes allocated in rosalloc thread-local buffers.
1645 // If there's a concurrent revoke, ok to not necessarily reset num_bytes_freed_revoke_
1646 // all the way to zero exactly as the remainder will be subtracted at the next GC.
1647 size_t bytes_freed = num_bytes_freed_revoke_.LoadSequentiallyConsistent();
1648 CHECK_GE(num_bytes_freed_revoke_.FetchAndSubSequentiallyConsistent(bytes_freed),
1649 bytes_freed) << "num_bytes_freed_revoke_ underflow";
1650 CHECK_GE(num_bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes_freed),
1651 bytes_freed) << "num_bytes_allocated_ underflow";
1652 GetCurrentGcIteration()->SetFreedRevoke(bytes_freed);
1653}
1654
Zuo Wangf37a88b2014-07-10 04:26:41 -07001655space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const {
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08001656 if (rosalloc_space_ != nullptr && rosalloc_space_->GetRosAlloc() == rosalloc) {
1657 return rosalloc_space_;
1658 }
Zuo Wangf37a88b2014-07-10 04:26:41 -07001659 for (const auto& space : continuous_spaces_) {
1660 if (space->AsContinuousSpace()->IsRosAllocSpace()) {
1661 if (space->AsContinuousSpace()->AsRosAllocSpace()->GetRosAlloc() == rosalloc) {
1662 return space->AsContinuousSpace()->AsRosAllocSpace();
1663 }
1664 }
1665 }
1666 return nullptr;
1667}
1668
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001669static inline bool EntrypointsInstrumented() REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartiereebc3af2016-02-29 18:13:38 -08001670 instrumentation::Instrumentation* const instrumentation =
1671 Runtime::Current()->GetInstrumentation();
1672 return instrumentation != nullptr && instrumentation->AllocEntrypointsInstrumented();
1673}
1674
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07001675mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
1676 AllocatorType allocator,
Mathieu Chartiereebc3af2016-02-29 18:13:38 -08001677 bool instrumented,
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07001678 size_t alloc_size,
1679 size_t* bytes_allocated,
Ian Rogers6fac4472014-02-25 17:01:10 -08001680 size_t* usable_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001681 size_t* bytes_tl_bulk_allocated,
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001682 ObjPtr<mirror::Class>* klass) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001683 bool was_default_allocator = allocator == GetCurrentAllocator();
Mathieu Chartierf4f38432014-09-03 11:21:08 -07001684 // Make sure there is no pending exception since we may need to throw an OOME.
1685 self->AssertNoPendingException();
Mathieu Chartierc528dba2013-11-26 12:00:11 -08001686 DCHECK(klass != nullptr);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001687 StackHandleScope<1> hs(self);
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001688 HandleWrapperObjPtr<mirror::Class> h(hs.NewHandleWrapper(klass));
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001689 // The allocation failed. If the GC is running, block until it completes, and then retry the
1690 // allocation.
Mathieu Chartier89a201e2014-05-02 10:27:26 -07001691 collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
Mathieu Chartiereebc3af2016-02-29 18:13:38 -08001692 // If we were the default allocator but the allocator changed while we were suspended,
1693 // abort the allocation.
1694 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1695 (!instrumented && EntrypointsInstrumented())) {
1696 return nullptr;
1697 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001698 if (last_gc != collector::kGcTypeNone) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001699 // A GC was in progress and we blocked, retry allocation now that memory has been freed.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001700 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001701 usable_size, bytes_tl_bulk_allocated);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001702 if (ptr != nullptr) {
1703 return ptr;
1704 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001705 }
1706
Mathieu Chartier5ae2c932014-03-28 16:22:20 -07001707 collector::GcType tried_type = next_gc_type_;
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001708 const bool gc_ran =
1709 CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
Mathieu Chartiereebc3af2016-02-29 18:13:38 -08001710 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1711 (!instrumented && EntrypointsInstrumented())) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001712 return nullptr;
1713 }
1714 if (gc_ran) {
1715 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001716 usable_size, bytes_tl_bulk_allocated);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001717 if (ptr != nullptr) {
1718 return ptr;
Mathieu Chartier5ae2c932014-03-28 16:22:20 -07001719 }
1720 }
1721
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001722 // Loop through our different Gc types and try to Gc until we get enough free memory.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001723 for (collector::GcType gc_type : gc_plan_) {
Mathieu Chartier5ae2c932014-03-28 16:22:20 -07001724 if (gc_type == tried_type) {
1725 continue;
1726 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001727 // Attempt to run the collector, if we succeed, re-try the allocation.
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001728 const bool plan_gc_ran =
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001729 CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
Mathieu Chartiereebc3af2016-02-29 18:13:38 -08001730 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1731 (!instrumented && EntrypointsInstrumented())) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001732 return nullptr;
1733 }
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001734 if (plan_gc_ran) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001735 // Did we free sufficient memory for the allocation to succeed?
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001736 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001737 usable_size, bytes_tl_bulk_allocated);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001738 if (ptr != nullptr) {
1739 return ptr;
1740 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001741 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001742 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001743 // Allocations have failed after GCs; this is an exceptional state.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001744 // Try harder, growing the heap if necessary.
1745 mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001746 usable_size, bytes_tl_bulk_allocated);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001747 if (ptr != nullptr) {
1748 return ptr;
Carl Shapiro69759ea2011-07-21 18:13:35 -07001749 }
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001750 // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
1751 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
1752 // VM spec requires that all SoftReferences have been collected and cleared before throwing
1753 // OOME.
1754 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
1755 << " allocation";
1756 // TODO: Run finalization, but this may cause more allocations to occur.
1757 // We don't need a WaitForGcToComplete here either.
1758 DCHECK(!gc_plan_.empty());
1759 CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
Mathieu Chartiereebc3af2016-02-29 18:13:38 -08001760 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1761 (!instrumented && EntrypointsInstrumented())) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001762 return nullptr;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001763 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001764 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size,
1765 bytes_tl_bulk_allocated);
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001766 if (ptr == nullptr) {
Zuo Wangf37a88b2014-07-10 04:26:41 -07001767 const uint64_t current_time = NanoTime();
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001768 switch (allocator) {
1769 case kAllocatorTypeRosAlloc:
1770 // Fall-through.
1771 case kAllocatorTypeDlMalloc: {
1772 if (use_homogeneous_space_compaction_for_oom_ &&
1773 current_time - last_time_homogeneous_space_compaction_by_oom_ >
1774 min_interval_homogeneous_space_compaction_by_oom_) {
1775 last_time_homogeneous_space_compaction_by_oom_ = current_time;
1776 HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
Mathieu Chartiereebc3af2016-02-29 18:13:38 -08001777 // Thread suspension could have occurred.
1778 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1779 (!instrumented && EntrypointsInstrumented())) {
1780 return nullptr;
1781 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001782 switch (result) {
1783 case HomogeneousSpaceCompactResult::kSuccess:
1784 // If the allocation succeeded, we delayed an oom.
1785 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001786 usable_size, bytes_tl_bulk_allocated);
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001787 if (ptr != nullptr) {
1788 count_delayed_oom_++;
1789 }
1790 break;
1791 case HomogeneousSpaceCompactResult::kErrorReject:
1792 // Reject due to disabled moving GC.
1793 break;
1794 case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
1795 // Throw OOM by default.
1796 break;
1797 default: {
Ian Rogers2c4257b2014-10-24 14:20:06 -07001798 UNIMPLEMENTED(FATAL) << "homogeneous space compaction result: "
1799 << static_cast<size_t>(result);
1800 UNREACHABLE();
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001801 }
Zuo Wangf37a88b2014-07-10 04:26:41 -07001802 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001803 // Always print that we ran homogeneous space compation since this can cause jank.
1804 VLOG(heap) << "Ran heap homogeneous space compaction, "
1805 << " requested defragmentation "
1806 << count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1807 << " performed defragmentation "
1808 << count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1809 << " ignored homogeneous space compaction "
1810 << count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1811 << " delayed count = "
1812 << count_delayed_oom_.LoadSequentiallyConsistent();
Zuo Wangf37a88b2014-07-10 04:26:41 -07001813 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001814 break;
Zuo Wangf37a88b2014-07-10 04:26:41 -07001815 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001816 case kAllocatorTypeNonMoving: {
Hiroshi Yamauchi60985b72016-08-24 13:53:12 -07001817 if (kUseReadBarrier) {
1818 // DisableMovingGc() isn't compatible with CC.
1819 break;
1820 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001821 // Try to transition the heap if the allocation failure was due to the space being full.
Mathieu Chartier5ace2012016-11-30 10:15:41 -08001822 if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow*/ false)) {
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001823 // If we aren't out of memory then the OOM was probably from the non moving space being
1824 // full. Attempt to disable compaction and turn the main space into a non moving space.
1825 DisableMovingGc();
Mathieu Chartiereebc3af2016-02-29 18:13:38 -08001826 // Thread suspension could have occurred.
1827 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1828 (!instrumented && EntrypointsInstrumented())) {
1829 return nullptr;
1830 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001831 // If we are still a moving GC then something must have caused the transition to fail.
1832 if (IsMovingGc(collector_type_)) {
1833 MutexLock mu(self, *gc_complete_lock_);
1834 // If we couldn't disable moving GC, just throw OOME and return null.
1835 LOG(WARNING) << "Couldn't disable moving GC with disable GC count "
1836 << disable_moving_gc_count_;
1837 } else {
1838 LOG(WARNING) << "Disabled moving GC due to the non moving space being full";
1839 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001840 usable_size, bytes_tl_bulk_allocated);
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001841 }
1842 }
1843 break;
1844 }
1845 default: {
1846 // Do nothing for others allocators.
1847 }
Zuo Wangf37a88b2014-07-10 04:26:41 -07001848 }
1849 }
1850 // If the allocation hasn't succeeded by this point, throw an OOM error.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001851 if (ptr == nullptr) {
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07001852 ThrowOutOfMemoryError(self, alloc_size, allocator);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001853 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001854 return ptr;
Carl Shapiro69759ea2011-07-21 18:13:35 -07001855}
1856
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001857void Heap::SetTargetHeapUtilization(float target) {
1858 DCHECK_GT(target, 0.0f); // asserted in Java code
1859 DCHECK_LT(target, 1.0f);
1860 target_utilization_ = target;
1861}
1862
Ian Rogers1d54e732013-05-02 21:10:01 -07001863size_t Heap::GetObjectsAllocated() const {
Mathieu Chartier4f55e222015-09-04 13:26:21 -07001864 Thread* const self = Thread::Current();
Mathieu Chartierb43390c2015-05-12 10:47:11 -07001865 ScopedThreadStateChange tsc(self, kWaitingForGetObjectsAllocated);
Mathieu Chartierb43390c2015-05-12 10:47:11 -07001866 // Need SuspendAll here to prevent lock violation if RosAlloc does it during InspectAll.
Mathieu Chartier4f55e222015-09-04 13:26:21 -07001867 ScopedSuspendAll ssa(__FUNCTION__);
1868 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Ian Rogers1d54e732013-05-02 21:10:01 -07001869 size_t total = 0;
Mathieu Chartier4f55e222015-09-04 13:26:21 -07001870 for (space::AllocSpace* space : alloc_spaces_) {
1871 total += space->GetObjectsAllocated();
Ian Rogers1d54e732013-05-02 21:10:01 -07001872 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001873 return total;
1874}
1875
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07001876uint64_t Heap::GetObjectsAllocatedEver() const {
Mathieu Chartier4edd8472015-06-01 10:47:36 -07001877 uint64_t total = GetObjectsFreedEver();
1878 // If we are detached, we can't use GetObjectsAllocated since we can't change thread states.
1879 if (Thread::Current() != nullptr) {
1880 total += GetObjectsAllocated();
1881 }
1882 return total;
Ian Rogers1d54e732013-05-02 21:10:01 -07001883}
1884
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07001885uint64_t Heap::GetBytesAllocatedEver() const {
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001886 return GetBytesFreedEver() + GetBytesAllocated();
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001887}
1888
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001889class InstanceCounter {
1890 public:
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001891 InstanceCounter(const std::vector<Handle<mirror::Class>>& classes,
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07001892 bool use_is_assignable_from,
1893 uint64_t* counts)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001894 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07001895 : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {}
1896
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001897 static void Callback(mirror::Object* obj, void* arg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001898 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001899 InstanceCounter* instance_counter = reinterpret_cast<InstanceCounter*>(arg);
1900 mirror::Class* instance_class = obj->GetClass();
1901 CHECK(instance_class != nullptr);
1902 for (size_t i = 0; i < instance_counter->classes_.size(); ++i) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001903 ObjPtr<mirror::Class> klass = instance_counter->classes_[i].Get();
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001904 if (instance_counter->use_is_assignable_from_) {
Mathieu Chartierf1820852015-07-10 13:19:51 -07001905 if (klass != nullptr && klass->IsAssignableFrom(instance_class)) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001906 ++instance_counter->counts_[i];
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001907 }
Mathieu Chartierf1820852015-07-10 13:19:51 -07001908 } else if (instance_class == klass) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001909 ++instance_counter->counts_[i];
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001910 }
1911 }
1912 }
1913
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001914 private:
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001915 const std::vector<Handle<mirror::Class>>& classes_;
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001916 bool use_is_assignable_from_;
1917 uint64_t* const counts_;
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001918 DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001919};
1920
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001921void Heap::CountInstances(const std::vector<Handle<mirror::Class>>& classes,
1922 bool use_is_assignable_from,
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001923 uint64_t* counts) {
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001924 InstanceCounter counter(classes, use_is_assignable_from, counts);
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001925 VisitObjects(InstanceCounter::Callback, &counter);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001926}
1927
Elliott Hughes3b78c942013-01-15 17:35:41 -08001928class InstanceCollector {
1929 public:
Mathieu Chartier2d855952016-10-12 19:37:59 -07001930 InstanceCollector(VariableSizedHandleScope& scope,
1931 Handle<mirror::Class> c,
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001932 int32_t max_count,
Mathieu Chartier2d855952016-10-12 19:37:59 -07001933 std::vector<Handle<mirror::Object>>& instances)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001934 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier2d855952016-10-12 19:37:59 -07001935 : scope_(scope),
1936 class_(c),
1937 max_count_(max_count),
1938 instances_(instances) {}
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001939
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001940 static void Callback(mirror::Object* obj, void* arg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001941 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001942 DCHECK(arg != nullptr);
1943 InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001944 if (obj->GetClass() == instance_collector->class_.Get()) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001945 if (instance_collector->max_count_ == 0 ||
1946 instance_collector->instances_.size() < instance_collector->max_count_) {
Mathieu Chartier2d855952016-10-12 19:37:59 -07001947 instance_collector->instances_.push_back(instance_collector->scope_.NewHandle(obj));
Elliott Hughes3b78c942013-01-15 17:35:41 -08001948 }
1949 }
1950 }
1951
1952 private:
Mathieu Chartier2d855952016-10-12 19:37:59 -07001953 VariableSizedHandleScope& scope_;
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001954 Handle<mirror::Class> const class_;
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -07001955 const uint32_t max_count_;
Mathieu Chartier2d855952016-10-12 19:37:59 -07001956 std::vector<Handle<mirror::Object>>& instances_;
Elliott Hughes3b78c942013-01-15 17:35:41 -08001957 DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
1958};
1959
Mathieu Chartier2d855952016-10-12 19:37:59 -07001960void Heap::GetInstances(VariableSizedHandleScope& scope,
1961 Handle<mirror::Class> c,
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07001962 int32_t max_count,
Mathieu Chartier2d855952016-10-12 19:37:59 -07001963 std::vector<Handle<mirror::Object>>& instances) {
1964 InstanceCollector collector(scope, c, max_count, instances);
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001965 VisitObjects(&InstanceCollector::Callback, &collector);
Elliott Hughes3b78c942013-01-15 17:35:41 -08001966}
1967
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001968class ReferringObjectsFinder {
1969 public:
Mathieu Chartieraea9bfb2016-10-12 19:19:56 -07001970 ReferringObjectsFinder(VariableSizedHandleScope& scope,
1971 Handle<mirror::Object> object,
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07001972 int32_t max_count,
Mathieu Chartieraea9bfb2016-10-12 19:19:56 -07001973 std::vector<Handle<mirror::Object>>& referring_objects)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001974 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartieraea9bfb2016-10-12 19:19:56 -07001975 : scope_(scope),
1976 object_(object),
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001977 max_count_(max_count),
1978 referring_objects_(referring_objects) {}
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001979
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001980 static void Callback(mirror::Object* obj, void* arg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001981 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001982 reinterpret_cast<ReferringObjectsFinder*>(arg)->operator()(obj);
1983 }
1984
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001985 // For bitmap Visit.
1986 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1987 // annotalysis on visitors.
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001988 void operator()(ObjPtr<mirror::Object> o) const NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier059ef3d2015-08-18 13:54:21 -07001989 o->VisitReferences(*this, VoidFunctor());
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001990 }
1991
Mathieu Chartier3b05e9b2014-03-25 09:29:43 -07001992 // For Object::VisitReferences.
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001993 void operator()(ObjPtr<mirror::Object> obj,
1994 MemberOffset offset,
1995 bool is_static ATTRIBUTE_UNUSED) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001996 REQUIRES_SHARED(Locks::mutator_lock_) {
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07001997 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
Mathieu Chartieraea9bfb2016-10-12 19:19:56 -07001998 if (ref == object_.Get() && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
1999 referring_objects_.push_back(scope_.NewHandle(obj));
Elliott Hughes0cbaff52013-01-16 15:28:01 -08002000 }
2001 }
2002
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002003 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
2004 const {}
2005 void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
2006
Elliott Hughes0cbaff52013-01-16 15:28:01 -08002007 private:
Mathieu Chartieraea9bfb2016-10-12 19:19:56 -07002008 VariableSizedHandleScope& scope_;
2009 Handle<mirror::Object> const object_;
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -07002010 const uint32_t max_count_;
Mathieu Chartieraea9bfb2016-10-12 19:19:56 -07002011 std::vector<Handle<mirror::Object>>& referring_objects_;
Elliott Hughes0cbaff52013-01-16 15:28:01 -08002012 DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
2013};
2014
Mathieu Chartieraea9bfb2016-10-12 19:19:56 -07002015void Heap::GetReferringObjects(VariableSizedHandleScope& scope,
2016 Handle<mirror::Object> o,
Mathieu Chartier9d156d52016-10-06 17:44:26 -07002017 int32_t max_count,
Mathieu Chartieraea9bfb2016-10-12 19:19:56 -07002018 std::vector<Handle<mirror::Object>>& referring_objects) {
2019 ReferringObjectsFinder finder(scope, o, max_count, referring_objects);
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08002020 VisitObjects(&ReferringObjectsFinder::Callback, &finder);
Elliott Hughes0cbaff52013-01-16 15:28:01 -08002021}
2022
Ian Rogers30fab402012-01-23 15:43:46 -08002023void Heap::CollectGarbage(bool clear_soft_references) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07002024 // Even if we waited for a GC we still need to do another GC since weaks allocated during the
2025 // last GC will not have necessarily been cleared.
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002026 CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
Carl Shapiro69759ea2011-07-21 18:13:35 -07002027}
2028
Mathieu Chartierdb00eaf2015-08-31 17:10:05 -07002029bool Heap::SupportHomogeneousSpaceCompactAndCollectorTransitions() const {
2030 return main_space_backup_.get() != nullptr && main_space_ != nullptr &&
2031 foreground_collector_type_ == kCollectorTypeCMS;
2032}
2033
Zuo Wangf37a88b2014-07-10 04:26:41 -07002034HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
2035 Thread* self = Thread::Current();
2036 // Inc requested homogeneous space compaction.
2037 count_requested_homogeneous_space_compaction_++;
2038 // Store performed homogeneous space compaction at a new request arrival.
Zuo Wangf37a88b2014-07-10 04:26:41 -07002039 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2040 Locks::mutator_lock_->AssertNotHeld(self);
2041 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08002042 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
Zuo Wangf37a88b2014-07-10 04:26:41 -07002043 MutexLock mu(self, *gc_complete_lock_);
2044 // Ensure there is only one GC at a time.
2045 WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
2046 // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable count
2047 // is non zero.
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07002048 // If the collector type changed to something which doesn't benefit from homogeneous space compaction,
Zuo Wangf37a88b2014-07-10 04:26:41 -07002049 // exit.
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07002050 if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) ||
2051 !main_space_->CanMoveObjects()) {
Mathieu Chartierdb00eaf2015-08-31 17:10:05 -07002052 return kErrorReject;
2053 }
2054 if (!SupportHomogeneousSpaceCompactAndCollectorTransitions()) {
2055 return kErrorUnsupported;
Zuo Wangf37a88b2014-07-10 04:26:41 -07002056 }
2057 collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
2058 }
2059 if (Runtime::Current()->IsShuttingDown(self)) {
2060 // Don't allow heap transitions to happen if the runtime is shutting down since these can
2061 // cause objects to get finalized.
2062 FinishGC(self, collector::kGcTypeNone);
2063 return HomogeneousSpaceCompactResult::kErrorVMShuttingDown;
2064 }
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002065 collector::GarbageCollector* collector;
2066 {
2067 ScopedSuspendAll ssa(__FUNCTION__);
2068 uint64_t start_time = NanoTime();
2069 // Launch compaction.
2070 space::MallocSpace* to_space = main_space_backup_.release();
2071 space::MallocSpace* from_space = main_space_;
2072 to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2073 const uint64_t space_size_before_compaction = from_space->Size();
2074 AddSpace(to_space);
2075 // Make sure that we will have enough room to copy.
2076 CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit());
2077 collector = Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
2078 const uint64_t space_size_after_compaction = to_space->Size();
2079 main_space_ = to_space;
2080 main_space_backup_.reset(from_space);
2081 RemoveSpace(from_space);
2082 SetSpaceAsDefault(main_space_); // Set as default to reset the proper dlmalloc space.
2083 // Update performed homogeneous space compaction count.
2084 count_performed_homogeneous_space_compaction_++;
2085 // Print statics log and resume all threads.
2086 uint64_t duration = NanoTime() - start_time;
2087 VLOG(heap) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
2088 << PrettySize(space_size_before_compaction) << " -> "
2089 << PrettySize(space_size_after_compaction) << " compact-ratio: "
2090 << std::fixed << static_cast<double>(space_size_after_compaction) /
2091 static_cast<double>(space_size_before_compaction);
2092 }
Zuo Wangf37a88b2014-07-10 04:26:41 -07002093 // Finish GC.
Mathieu Chartier3cf22532015-07-09 15:15:09 -07002094 reference_processor_->EnqueueClearedReferences(self);
Zuo Wangf37a88b2014-07-10 04:26:41 -07002095 GrowForUtilization(semi_space_collector_);
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08002096 LogGC(kGcCauseHomogeneousSpaceCompact, collector);
Zuo Wangf37a88b2014-07-10 04:26:41 -07002097 FinishGC(self, collector::kGcTypeFull);
Mathieu Chartier598302a2015-09-23 14:52:39 -07002098 {
2099 ScopedObjectAccess soa(self);
2100 soa.Vm()->UnloadNativeLibraries();
2101 }
Zuo Wangf37a88b2014-07-10 04:26:41 -07002102 return HomogeneousSpaceCompactResult::kSuccess;
2103}
2104
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002105void Heap::TransitionCollector(CollectorType collector_type) {
2106 if (collector_type == collector_type_) {
2107 return;
2108 }
Hiroshi Yamauchia01d0662016-08-30 17:44:41 -07002109 // Collector transition must not happen with CC
2110 CHECK(!kUseReadBarrier);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002111 VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
2112 << " -> " << static_cast<int>(collector_type);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002113 uint64_t start_time = NanoTime();
Ian Rogers3e5cf302014-05-20 16:40:37 -07002114 uint32_t before_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002115 Runtime* const runtime = Runtime::Current();
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002116 Thread* const self = Thread::Current();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002117 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2118 Locks::mutator_lock_->AssertNotHeld(self);
Mathieu Chartier1d27b342014-01-28 12:51:09 -08002119 // Busy wait until we can GC (StartGC can fail if we have a non-zero
2120 // compacting_gc_disable_count_, this should rarely occurs).
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002121 for (;;) {
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08002122 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08002123 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08002124 MutexLock mu(self, *gc_complete_lock_);
2125 // Ensure there is only one GC at a time.
Mathieu Chartier89a201e2014-05-02 10:27:26 -07002126 WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self);
Mathieu Chartiere4927f62014-08-23 13:56:03 -07002127 // Currently we only need a heap transition if we switch from a moving collector to a
2128 // non-moving one, or visa versa.
2129 const bool copying_transition = IsMovingGc(collector_type_) != IsMovingGc(collector_type);
Mathieu Chartierb38d4832014-04-10 10:56:55 -07002130 // If someone else beat us to it and changed the collector before we could, exit.
2131 // This is safe to do before the suspend all since we set the collector_type_running_ before
2132 // we exit the loop. If another thread attempts to do the heap transition before we exit,
2133 // then it would get blocked on WaitForGcToCompleteLocked.
2134 if (collector_type == collector_type_) {
2135 return;
2136 }
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08002137 // GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released.
2138 if (!copying_transition || disable_moving_gc_count_ == 0) {
2139 // TODO: Not hard code in semi-space collector?
2140 collector_type_running_ = copying_transition ? kCollectorTypeSS : collector_type;
2141 break;
2142 }
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002143 }
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08002144 usleep(1000);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002145 }
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002146 if (runtime->IsShuttingDown(self)) {
Hiroshi Yamauchia6a8d142014-05-12 16:57:33 -07002147 // Don't allow heap transitions to happen if the runtime is shutting down since these can
2148 // cause objects to get finalized.
2149 FinishGC(self, collector::kGcTypeNone);
2150 return;
2151 }
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08002152 collector::GarbageCollector* collector = nullptr;
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002153 {
2154 ScopedSuspendAll ssa(__FUNCTION__);
2155 switch (collector_type) {
2156 case kCollectorTypeSS: {
2157 if (!IsMovingGc(collector_type_)) {
2158 // Create the bump pointer space from the backup space.
2159 CHECK(main_space_backup_ != nullptr);
2160 std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap());
2161 // We are transitioning from non moving GC -> moving GC, since we copied from the bump
2162 // pointer space last transition it will be protected.
2163 CHECK(mem_map != nullptr);
Hiroshi Yamauchic1276c82014-08-07 10:27:17 -07002164 mem_map->Protect(PROT_READ | PROT_WRITE);
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002165 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
2166 mem_map.release());
2167 AddSpace(bump_pointer_space_);
2168 collector = Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
2169 // Use the now empty main space mem map for the bump pointer temp space.
2170 mem_map.reset(main_space_->ReleaseMemMap());
2171 // Unset the pointers just in case.
2172 if (dlmalloc_space_ == main_space_) {
2173 dlmalloc_space_ = nullptr;
2174 } else if (rosalloc_space_ == main_space_) {
2175 rosalloc_space_ = nullptr;
2176 }
2177 // Remove the main space so that we don't try to trim it, this doens't work for debug
2178 // builds since RosAlloc attempts to read the magic number from a protected page.
2179 RemoveSpace(main_space_);
2180 RemoveRememberedSet(main_space_);
2181 delete main_space_; // Delete the space since it has been removed.
2182 main_space_ = nullptr;
2183 RemoveRememberedSet(main_space_backup_.get());
2184 main_space_backup_.reset(nullptr); // Deletes the space.
2185 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
2186 mem_map.release());
2187 AddSpace(temp_space_);
Hiroshi Yamauchic1276c82014-08-07 10:27:17 -07002188 }
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002189 break;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002190 }
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002191 case kCollectorTypeMS:
2192 // Fall through.
2193 case kCollectorTypeCMS: {
2194 if (IsMovingGc(collector_type_)) {
2195 CHECK(temp_space_ != nullptr);
2196 std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
2197 RemoveSpace(temp_space_);
2198 temp_space_ = nullptr;
2199 mem_map->Protect(PROT_READ | PROT_WRITE);
2200 CreateMainMallocSpace(mem_map.get(),
2201 kDefaultInitialSize,
2202 std::min(mem_map->Size(), growth_limit_),
2203 mem_map->Size());
2204 mem_map.release();
2205 // Compact to the main space from the bump pointer space, don't need to swap semispaces.
2206 AddSpace(main_space_);
2207 collector = Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
2208 mem_map.reset(bump_pointer_space_->ReleaseMemMap());
2209 RemoveSpace(bump_pointer_space_);
2210 bump_pointer_space_ = nullptr;
2211 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
2212 // Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
2213 if (kIsDebugBuild && kUseRosAlloc) {
2214 mem_map->Protect(PROT_READ | PROT_WRITE);
2215 }
2216 main_space_backup_.reset(CreateMallocSpaceFromMemMap(
2217 mem_map.get(),
2218 kDefaultInitialSize,
2219 std::min(mem_map->Size(), growth_limit_),
2220 mem_map->Size(),
2221 name,
2222 true));
2223 if (kIsDebugBuild && kUseRosAlloc) {
2224 mem_map->Protect(PROT_NONE);
2225 }
2226 mem_map.release();
2227 }
2228 break;
2229 }
2230 default: {
2231 LOG(FATAL) << "Attempted to transition to invalid collector type "
2232 << static_cast<size_t>(collector_type);
2233 break;
2234 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002235 }
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002236 ChangeCollector(collector_type);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002237 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002238 // Can't call into java code with all threads suspended.
Mathieu Chartier3cf22532015-07-09 15:15:09 -07002239 reference_processor_->EnqueueClearedReferences(self);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002240 uint64_t duration = NanoTime() - start_time;
Mathieu Chartierafe49982014-03-27 10:55:04 -07002241 GrowForUtilization(semi_space_collector_);
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08002242 DCHECK(collector != nullptr);
2243 LogGC(kGcCauseCollectorTransition, collector);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002244 FinishGC(self, collector::kGcTypeFull);
Mathieu Chartier598302a2015-09-23 14:52:39 -07002245 {
2246 ScopedObjectAccess soa(self);
2247 soa.Vm()->UnloadNativeLibraries();
2248 }
Ian Rogers3e5cf302014-05-20 16:40:37 -07002249 int32_t after_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002250 int32_t delta_allocated = before_allocated - after_allocated;
Mathieu Chartier19d46b42014-06-17 15:04:40 -07002251 std::string saved_str;
2252 if (delta_allocated >= 0) {
2253 saved_str = " saved at least " + PrettySize(delta_allocated);
2254 } else {
2255 saved_str = " expanded " + PrettySize(-delta_allocated);
2256 }
Mathieu Chartierf8cb1782016-03-18 18:45:41 -07002257 VLOG(heap) << "Collector transition to " << collector_type << " took "
2258 << PrettyDuration(duration) << saved_str;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002259}
2260
Mathieu Chartier0de9f732013-11-22 17:58:48 -08002261void Heap::ChangeCollector(CollectorType collector_type) {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002262 // TODO: Only do this with all mutators suspended to avoid races.
2263 if (collector_type != collector_type_) {
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002264 if (collector_type == kCollectorTypeMC) {
2265 // Don't allow mark compact unless support is compiled in.
2266 CHECK(kMarkCompactSupport);
2267 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002268 collector_type_ = collector_type;
2269 gc_plan_.clear();
2270 switch (collector_type_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002271 case kCollectorTypeCC: {
2272 gc_plan_.push_back(collector::kGcTypeFull);
2273 if (use_tlab_) {
2274 ChangeAllocator(kAllocatorTypeRegionTLAB);
2275 } else {
2276 ChangeAllocator(kAllocatorTypeRegion);
2277 }
2278 break;
2279 }
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002280 case kCollectorTypeMC: // Fall-through.
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07002281 case kCollectorTypeSS: // Fall-through.
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08002282 case kCollectorTypeGSS: {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002283 gc_plan_.push_back(collector::kGcTypeFull);
Mathieu Chartier692fafd2013-11-29 17:24:40 -08002284 if (use_tlab_) {
2285 ChangeAllocator(kAllocatorTypeTLAB);
2286 } else {
2287 ChangeAllocator(kAllocatorTypeBumpPointer);
2288 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002289 break;
2290 }
2291 case kCollectorTypeMS: {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002292 gc_plan_.push_back(collector::kGcTypeSticky);
2293 gc_plan_.push_back(collector::kGcTypePartial);
2294 gc_plan_.push_back(collector::kGcTypeFull);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002295 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002296 break;
2297 }
2298 case kCollectorTypeCMS: {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002299 gc_plan_.push_back(collector::kGcTypeSticky);
2300 gc_plan_.push_back(collector::kGcTypePartial);
2301 gc_plan_.push_back(collector::kGcTypeFull);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002302 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002303 break;
2304 }
2305 default: {
Ian Rogers2c4257b2014-10-24 14:20:06 -07002306 UNIMPLEMENTED(FATAL);
2307 UNREACHABLE();
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002308 }
Mathieu Chartier0de9f732013-11-22 17:58:48 -08002309 }
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07002310 if (IsGcConcurrent()) {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002311 concurrent_start_bytes_ =
2312 std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
2313 } else {
2314 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Mathieu Chartier0de9f732013-11-22 17:58:48 -08002315 }
2316 }
2317}
2318
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002319// Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
Ian Rogers6fac4472014-02-25 17:01:10 -08002320class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002321 public:
Roland Levillain3887c462015-08-12 18:15:42 +01002322 ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool)
Evgenii Stepanov1e133742015-05-20 12:30:59 -07002323 : SemiSpace(heap, false, "zygote collector"),
2324 bin_live_bitmap_(nullptr),
2325 bin_mark_bitmap_(nullptr),
2326 is_running_on_memory_tool_(is_running_on_memory_tool) {}
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002327
2328 void BuildBins(space::ContinuousSpace* space) {
2329 bin_live_bitmap_ = space->GetLiveBitmap();
2330 bin_mark_bitmap_ = space->GetMarkBitmap();
2331 BinContext context;
2332 context.prev_ = reinterpret_cast<uintptr_t>(space->Begin());
2333 context.collector_ = this;
2334 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2335 // Note: This requires traversing the space in increasing order of object addresses.
2336 bin_live_bitmap_->Walk(Callback, reinterpret_cast<void*>(&context));
2337 // Add the last bin which spans after the last object to the end of the space.
2338 AddBin(reinterpret_cast<uintptr_t>(space->End()) - context.prev_, context.prev_);
2339 }
2340
2341 private:
2342 struct BinContext {
2343 uintptr_t prev_; // The end of the previous object.
2344 ZygoteCompactingCollector* collector_;
2345 };
2346 // Maps from bin sizes to locations.
2347 std::multimap<size_t, uintptr_t> bins_;
2348 // Live bitmap of the space which contains the bins.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07002349 accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002350 // Mark bitmap of the space which contains the bins.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07002351 accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
Evgenii Stepanov1e133742015-05-20 12:30:59 -07002352 const bool is_running_on_memory_tool_;
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002353
2354 static void Callback(mirror::Object* obj, void* arg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07002355 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002356 DCHECK(arg != nullptr);
2357 BinContext* context = reinterpret_cast<BinContext*>(arg);
2358 ZygoteCompactingCollector* collector = context->collector_;
2359 uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
2360 size_t bin_size = object_addr - context->prev_;
2361 // Add the bin consisting of the end of the previous object to the start of the current object.
2362 collector->AddBin(bin_size, context->prev_);
2363 context->prev_ = object_addr + RoundUp(obj->SizeOf(), kObjectAlignment);
2364 }
2365
2366 void AddBin(size_t size, uintptr_t position) {
Evgenii Stepanov1e133742015-05-20 12:30:59 -07002367 if (is_running_on_memory_tool_) {
2368 MEMORY_TOOL_MAKE_DEFINED(reinterpret_cast<void*>(position), size);
2369 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002370 if (size != 0) {
2371 bins_.insert(std::make_pair(size, position));
2372 }
2373 }
2374
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07002375 virtual bool ShouldSweepSpace(space::ContinuousSpace* space ATTRIBUTE_UNUSED) const {
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002376 // Don't sweep any spaces since we probably blasted the internal accounting of the free list
2377 // allocator.
2378 return false;
2379 }
2380
2381 virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
Mathieu Chartier90443472015-07-16 20:32:27 -07002382 REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
Hiroshi Yamauchi8711d1f2015-03-13 16:48:55 -07002383 size_t obj_size = obj->SizeOf();
2384 size_t alloc_size = RoundUp(obj_size, kObjectAlignment);
Mathieu Chartier5dc08a62014-01-10 10:10:23 -08002385 mirror::Object* forward_address;
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002386 // Find the smallest bin which we can move obj in.
Hiroshi Yamauchi8711d1f2015-03-13 16:48:55 -07002387 auto it = bins_.lower_bound(alloc_size);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002388 if (it == bins_.end()) {
2389 // No available space in the bins, place it in the target space instead (grows the zygote
2390 // space).
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07002391 size_t bytes_allocated, dummy;
Hiroshi Yamauchi8711d1f2015-03-13 16:48:55 -07002392 forward_address = to_space_->Alloc(self_, alloc_size, &bytes_allocated, nullptr, &dummy);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002393 if (to_space_live_bitmap_ != nullptr) {
2394 to_space_live_bitmap_->Set(forward_address);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002395 } else {
2396 GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
2397 GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002398 }
2399 } else {
2400 size_t size = it->first;
2401 uintptr_t pos = it->second;
2402 bins_.erase(it); // Erase the old bin which we replace with the new smaller bin.
2403 forward_address = reinterpret_cast<mirror::Object*>(pos);
2404 // Set the live and mark bits so that sweeping system weaks works properly.
2405 bin_live_bitmap_->Set(forward_address);
2406 bin_mark_bitmap_->Set(forward_address);
Hiroshi Yamauchi8711d1f2015-03-13 16:48:55 -07002407 DCHECK_GE(size, alloc_size);
2408 // Add a new bin with the remaining space.
2409 AddBin(size - alloc_size, pos + alloc_size);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002410 }
Hiroshi Yamauchi8711d1f2015-03-13 16:48:55 -07002411 // Copy the object over to its new location. Don't use alloc_size to avoid valgrind error.
2412 memcpy(reinterpret_cast<void*>(forward_address), obj, obj_size);
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07002413 if (kUseBakerReadBarrier) {
2414 obj->AssertReadBarrierState();
2415 forward_address->AssertReadBarrierState();
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -08002416 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002417 return forward_address;
2418 }
2419};
2420
Mathieu Chartiera1602f22014-01-13 17:19:19 -08002421void Heap::UnBindBitmaps() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002422 TimingLogger::ScopedTiming t("UnBindBitmaps", GetCurrentGcIteration()->GetTimings());
Mathieu Chartiera1602f22014-01-13 17:19:19 -08002423 for (const auto& space : GetContinuousSpaces()) {
2424 if (space->IsContinuousMemMapAllocSpace()) {
2425 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
2426 if (alloc_space->HasBoundBitmaps()) {
2427 alloc_space->UnBindBitmaps();
2428 }
2429 }
2430 }
2431}
2432
Mathieu Chartiercc236d72012-07-20 10:29:05 -07002433void Heap::PreZygoteFork() {
Mathieu Chartierfaed9952015-03-31 16:28:53 -07002434 if (!HasZygoteSpace()) {
2435 // We still want to GC in case there is some unreachable non moving objects that could cause a
2436 // suboptimal bin packing when we compact the zygote space.
2437 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
Mathieu Chartier76ce9172016-01-27 10:44:20 -08002438 // Trim the pages at the end of the non moving space. Trim while not holding zygote lock since
2439 // the trim process may require locking the mutator lock.
2440 non_moving_space_->Trim();
Mathieu Chartierfaed9952015-03-31 16:28:53 -07002441 }
Ian Rogers81d425b2012-09-27 16:03:43 -07002442 Thread* self = Thread::Current();
2443 MutexLock mu(self, zygote_creation_lock_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -07002444 // Try to see if we have any Zygote spaces.
Mathieu Chartiere4cab172014-08-19 18:24:04 -07002445 if (HasZygoteSpace()) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -07002446 return;
2447 }
Mathieu Chartierea0831f2015-12-29 13:17:37 -08002448 Runtime::Current()->GetInternTable()->AddNewTable();
Mathieu Chartierc2e20622014-11-03 11:41:47 -08002449 Runtime::Current()->GetClassLinker()->MoveClassTableToPreZygote();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002450 VLOG(heap) << "Starting PreZygoteFork";
Mathieu Chartier31f44142014-04-08 14:40:03 -07002451 // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
2452 // there.
Mathieu Chartier590fee92013-09-13 13:46:47 -07002453 non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07002454 const bool same_space = non_moving_space_ == main_space_;
Mathieu Chartier31f44142014-04-08 14:40:03 -07002455 if (kCompactZygote) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002456 // Temporarily disable rosalloc verification because the zygote
2457 // compaction will mess up the rosalloc internal metadata.
2458 ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
Evgenii Stepanov1e133742015-05-20 12:30:59 -07002459 ZygoteCompactingCollector zygote_collector(this, is_running_on_memory_tool_);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002460 zygote_collector.BuildBins(non_moving_space_);
Mathieu Chartier50482232013-11-21 11:48:14 -08002461 // Create a new bump pointer space which we will compact into.
Mathieu Chartier590fee92013-09-13 13:46:47 -07002462 space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
2463 non_moving_space_->Limit());
2464 // Compact the bump pointer space to a new zygote bump pointer space.
Mathieu Chartier31f44142014-04-08 14:40:03 -07002465 bool reset_main_space = false;
2466 if (IsMovingGc(collector_type_)) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002467 if (collector_type_ == kCollectorTypeCC) {
2468 zygote_collector.SetFromSpace(region_space_);
2469 } else {
2470 zygote_collector.SetFromSpace(bump_pointer_space_);
2471 }
Mathieu Chartier31f44142014-04-08 14:40:03 -07002472 } else {
2473 CHECK(main_space_ != nullptr);
Hiroshi Yamauchid04495e2015-03-11 19:09:07 -07002474 CHECK_NE(main_space_, non_moving_space_)
2475 << "Does not make sense to compact within the same space";
Mathieu Chartier31f44142014-04-08 14:40:03 -07002476 // Copy from the main space.
2477 zygote_collector.SetFromSpace(main_space_);
2478 reset_main_space = true;
2479 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002480 zygote_collector.SetToSpace(&target_space);
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -07002481 zygote_collector.SetSwapSemiSpaces(false);
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08002482 zygote_collector.Run(kGcCauseCollectorTransition, false);
Mathieu Chartier31f44142014-04-08 14:40:03 -07002483 if (reset_main_space) {
2484 main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2485 madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
2486 MemMap* mem_map = main_space_->ReleaseMemMap();
2487 RemoveSpace(main_space_);
Mathieu Chartier96bcd452014-06-17 09:50:02 -07002488 space::Space* old_main_space = main_space_;
Mathieu Chartier0310da52014-12-01 13:40:48 -08002489 CreateMainMallocSpace(mem_map, kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_),
2490 mem_map->Size());
Mathieu Chartier96bcd452014-06-17 09:50:02 -07002491 delete old_main_space;
Mathieu Chartier31f44142014-04-08 14:40:03 -07002492 AddSpace(main_space_);
2493 } else {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002494 if (collector_type_ == kCollectorTypeCC) {
2495 region_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
Mathieu Chartier7ec38dc2016-10-07 15:24:46 -07002496 // Evacuated everything out of the region space, clear the mark bitmap.
2497 region_space_->GetMarkBitmap()->Clear();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002498 } else {
2499 bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2500 }
Mathieu Chartier31f44142014-04-08 14:40:03 -07002501 }
2502 if (temp_space_ != nullptr) {
2503 CHECK(temp_space_->IsEmpty());
2504 }
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002505 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2506 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002507 // Update the end and write out image.
2508 non_moving_space_->SetEnd(target_space.End());
2509 non_moving_space_->SetLimit(target_space.Limit());
Mathieu Chartierfaed9952015-03-31 16:28:53 -07002510 VLOG(heap) << "Create zygote space with size=" << non_moving_space_->Size() << " bytes";
Mathieu Chartier357e9be2012-08-01 11:00:14 -07002511 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07002512 // Change the collector to the post zygote one.
Mathieu Chartier31f44142014-04-08 14:40:03 -07002513 ChangeCollector(foreground_collector_type_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -08002514 // Save the old space so that we can remove it after we complete creating the zygote space.
2515 space::MallocSpace* old_alloc_space = non_moving_space_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002516 // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
Mathieu Chartiera1602f22014-01-13 17:19:19 -08002517 // the remaining available space.
2518 // Remove the old space before creating the zygote space since creating the zygote space sets
Mathieu Chartier2cebb242015-04-21 16:50:40 -07002519 // the old alloc space's bitmaps to null.
Mathieu Chartiera1602f22014-01-13 17:19:19 -08002520 RemoveSpace(old_alloc_space);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002521 if (collector::SemiSpace::kUseRememberedSet) {
2522 // Sanity bound check.
2523 FindRememberedSetFromSpace(old_alloc_space)->AssertAllDirtyCardsAreWithinSpace();
2524 // Remove the remembered set for the now zygote space (the old
2525 // non-moving space). Note now that we have compacted objects into
2526 // the zygote space, the data in the remembered set is no longer
2527 // needed. The zygote space will instead have a mod-union table
2528 // from this point on.
2529 RemoveRememberedSet(old_alloc_space);
2530 }
Mathieu Chartier7247af52014-11-19 10:51:42 -08002531 // Remaining space becomes the new non moving space.
2532 zygote_space_ = old_alloc_space->CreateZygoteSpace(kNonMovingSpaceName, low_memory_mode_,
Mathieu Chartiere4cab172014-08-19 18:24:04 -07002533 &non_moving_space_);
Mathieu Chartierb363f662014-07-16 13:28:58 -07002534 CHECK(!non_moving_space_->CanMoveObjects());
2535 if (same_space) {
2536 main_space_ = non_moving_space_;
2537 SetSpaceAsDefault(main_space_);
2538 }
Mathieu Chartiera1602f22014-01-13 17:19:19 -08002539 delete old_alloc_space;
Mathieu Chartiere4cab172014-08-19 18:24:04 -07002540 CHECK(HasZygoteSpace()) << "Failed creating zygote space";
2541 AddSpace(zygote_space_);
Mathieu Chartier31f44142014-04-08 14:40:03 -07002542 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
2543 AddSpace(non_moving_space_);
Mathieu Chartier36a270a2016-07-28 18:08:51 -07002544 if (kUseBakerReadBarrier && gc::collector::ConcurrentCopying::kGrayDirtyImmuneObjects) {
2545 // Treat all of the objects in the zygote as marked to avoid unnecessary dirty pages. This is
2546 // safe since we mark all of the objects that may reference non immune objects as gray.
2547 zygote_space_->GetLiveBitmap()->VisitMarkedRange(
2548 reinterpret_cast<uintptr_t>(zygote_space_->Begin()),
2549 reinterpret_cast<uintptr_t>(zygote_space_->Limit()),
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07002550 [](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier36a270a2016-07-28 18:08:51 -07002551 CHECK(obj->AtomicSetMarkBit(0, 1));
2552 });
2553 }
2554
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002555 // Create the zygote space mod union table.
2556 accounting::ModUnionTable* mod_union_table =
Mathieu Chartier962cd7a2016-08-16 12:15:59 -07002557 new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space_);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002558 CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
Mathieu Chartier962cd7a2016-08-16 12:15:59 -07002559
2560 if (collector_type_ != kCollectorTypeCC) {
2561 // Set all the cards in the mod-union table since we don't know which objects contain references
2562 // to large objects.
2563 mod_union_table->SetCards();
2564 } else {
2565 // For CC we never collect zygote large objects. This means we do not need to set the cards for
2566 // the zygote mod-union table and we can also clear all of the existing image mod-union tables.
2567 // The existing mod-union tables are only for image spaces and may only reference zygote and
2568 // image objects.
2569 for (auto& pair : mod_union_tables_) {
2570 CHECK(pair.first->IsImageSpace());
2571 CHECK(!pair.first->AsImageSpace()->GetImageHeader().IsAppImage());
2572 accounting::ModUnionTable* table = pair.second;
2573 table->ClearTable();
2574 }
2575 }
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002576 AddModUnionTable(mod_union_table);
Mathieu Chartierf6c2a272015-06-03 17:32:42 -07002577 large_object_space_->SetAllLargeObjectsAsZygoteObjects(self);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002578 if (collector::SemiSpace::kUseRememberedSet) {
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002579 // Add a new remembered set for the post-zygote non-moving space.
2580 accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
2581 new accounting::RememberedSet("Post-zygote non-moving space remembered set", this,
2582 non_moving_space_);
2583 CHECK(post_zygote_non_moving_space_rem_set != nullptr)
2584 << "Failed to create post-zygote non-moving space remembered set";
2585 AddRememberedSet(post_zygote_non_moving_space_rem_set);
2586 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07002587}
2588
Mathieu Chartier357e9be2012-08-01 11:00:14 -07002589void Heap::FlushAllocStack() {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002590 MarkAllocStackAsLive(allocation_stack_.get());
Mathieu Chartier357e9be2012-08-01 11:00:14 -07002591 allocation_stack_->Reset();
2592}
2593
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07002594void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
2595 accounting::ContinuousSpaceBitmap* bitmap2,
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07002596 accounting::LargeObjectBitmap* large_objects,
Ian Rogers1d54e732013-05-02 21:10:01 -07002597 accounting::ObjectStack* stack) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002598 DCHECK(bitmap1 != nullptr);
2599 DCHECK(bitmap2 != nullptr);
Mathieu Chartiercb535da2015-01-23 13:50:03 -08002600 const auto* limit = stack->End();
2601 for (auto* it = stack->Begin(); it != limit; ++it) {
2602 const mirror::Object* obj = it->AsMirrorPtr();
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002603 if (!kUseThreadLocalAllocationStack || obj != nullptr) {
2604 if (bitmap1->HasAddress(obj)) {
2605 bitmap1->Set(obj);
2606 } else if (bitmap2->HasAddress(obj)) {
2607 bitmap2->Set(obj);
2608 } else {
Mathieu Chartier2dbe6272014-09-16 10:43:23 -07002609 DCHECK(large_objects != nullptr);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002610 large_objects->Set(obj);
2611 }
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07002612 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07002613 }
2614}
2615
Mathieu Chartier590fee92013-09-13 13:46:47 -07002616void Heap::SwapSemiSpaces() {
Mathieu Chartier31f44142014-04-08 14:40:03 -07002617 CHECK(bump_pointer_space_ != nullptr);
2618 CHECK(temp_space_ != nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002619 std::swap(bump_pointer_space_, temp_space_);
2620}
2621
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08002622collector::GarbageCollector* Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
2623 space::ContinuousMemMapAllocSpace* source_space,
2624 GcCause gc_cause) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002625 CHECK(kMovingCollector);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002626 if (target_space != source_space) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002627 // Don't swap spaces since this isn't a typical semi space collection.
2628 semi_space_collector_->SetSwapSemiSpaces(false);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002629 semi_space_collector_->SetFromSpace(source_space);
2630 semi_space_collector_->SetToSpace(target_space);
Zuo Wangf37a88b2014-07-10 04:26:41 -07002631 semi_space_collector_->Run(gc_cause, false);
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08002632 return semi_space_collector_;
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002633 } else {
2634 CHECK(target_space->IsBumpPointerSpace())
2635 << "In-place compaction is only supported for bump pointer spaces";
2636 mark_compact_collector_->SetSpace(target_space->AsBumpPointerSpace());
2637 mark_compact_collector_->Run(kGcCauseCollectorTransition, false);
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08002638 return mark_compact_collector_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002639 }
2640}
Anwar Ghuloum67f99412013-08-12 14:19:48 -07002641
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07002642collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
2643 GcCause gc_cause,
Ian Rogers1d54e732013-05-02 21:10:01 -07002644 bool clear_soft_references) {
Ian Rogers81d425b2012-09-27 16:03:43 -07002645 Thread* self = Thread::Current();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002646 Runtime* runtime = Runtime::Current();
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002647 // If the heap can't run the GC, silently fail and return that no GC was run.
2648 switch (gc_type) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002649 case collector::kGcTypePartial: {
Mathieu Chartiere4cab172014-08-19 18:24:04 -07002650 if (!HasZygoteSpace()) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002651 return collector::kGcTypeNone;
2652 }
2653 break;
2654 }
2655 default: {
2656 // Other GC types don't have any special cases which makes them not runnable. The main case
2657 // here is full GC.
2658 }
2659 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08002660 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
Ian Rogers81d425b2012-09-27 16:03:43 -07002661 Locks::mutator_lock_->AssertNotHeld(self);
Ian Rogers120f1c72012-09-28 17:17:10 -07002662 if (self->IsHandlingStackOverflow()) {
Mathieu Chartier50c138f2015-01-07 16:00:03 -08002663 // If we are throwing a stack overflow error we probably don't have enough remaining stack
2664 // space to run the GC.
2665 return collector::kGcTypeNone;
Ian Rogers120f1c72012-09-28 17:17:10 -07002666 }
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002667 bool compacting_gc;
2668 {
2669 gc_complete_lock_->AssertNotHeld(self);
Andreas Gampe277ccbd2014-11-03 21:36:10 -08002670 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002671 MutexLock mu(self, *gc_complete_lock_);
2672 // Ensure there is only one GC at a time.
Mathieu Chartier89a201e2014-05-02 10:27:26 -07002673 WaitForGcToCompleteLocked(gc_cause, self);
Mathieu Chartier31f44142014-04-08 14:40:03 -07002674 compacting_gc = IsMovingGc(collector_type_);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002675 // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
2676 if (compacting_gc && disable_moving_gc_count_ != 0) {
2677 LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
2678 return collector::kGcTypeNone;
2679 }
Mathieu Chartier51168372015-08-12 16:40:32 -07002680 if (gc_disabled_for_shutdown_) {
2681 return collector::kGcTypeNone;
2682 }
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002683 collector_type_running_ = collector_type_;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002684 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07002685 if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
2686 ++runtime->GetStats()->gc_for_alloc_count;
2687 ++self->GetStats()->gc_for_alloc_count;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002688 }
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08002689 const uint64_t bytes_allocated_before_gc = GetBytesAllocated();
2690 // Approximate heap size.
2691 ATRACE_INT("Heap size (KB)", bytes_allocated_before_gc / KB);
Mathieu Chartier65db8802012-11-20 12:36:46 -08002692
Ian Rogers1d54e732013-05-02 21:10:01 -07002693 DCHECK_LT(gc_type, collector::kGcTypeMax);
2694 DCHECK_NE(gc_type, collector::kGcTypeNone);
Anwar Ghuloum67f99412013-08-12 14:19:48 -07002695
Mathieu Chartier590fee92013-09-13 13:46:47 -07002696 collector::GarbageCollector* collector = nullptr;
Mathieu Chartier50482232013-11-21 11:48:14 -08002697 // TODO: Clean this up.
Mathieu Chartier1d27b342014-01-28 12:51:09 -08002698 if (compacting_gc) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -08002699 DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002700 current_allocator_ == kAllocatorTypeTLAB ||
2701 current_allocator_ == kAllocatorTypeRegion ||
2702 current_allocator_ == kAllocatorTypeRegionTLAB);
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002703 switch (collector_type_) {
2704 case kCollectorTypeSS:
2705 // Fall-through.
2706 case kCollectorTypeGSS:
2707 semi_space_collector_->SetFromSpace(bump_pointer_space_);
2708 semi_space_collector_->SetToSpace(temp_space_);
2709 semi_space_collector_->SetSwapSemiSpaces(true);
2710 collector = semi_space_collector_;
2711 break;
2712 case kCollectorTypeCC:
2713 collector = concurrent_copying_collector_;
2714 break;
2715 case kCollectorTypeMC:
2716 mark_compact_collector_->SetSpace(bump_pointer_space_);
2717 collector = mark_compact_collector_;
2718 break;
2719 default:
2720 LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07002721 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002722 if (collector != mark_compact_collector_ && collector != concurrent_copying_collector_) {
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002723 temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
Hiroshi Yamauchi6edb9ae2016-02-08 14:18:21 -08002724 if (kIsDebugBuild) {
2725 // Try to read each page of the memory map in case mprotect didn't work properly b/19894268.
2726 temp_space_->GetMemMap()->TryReadable();
2727 }
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002728 CHECK(temp_space_->IsEmpty());
2729 }
2730 gc_type = collector::kGcTypeFull; // TODO: Not hard code this in.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002731 } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
2732 current_allocator_ == kAllocatorTypeDlMalloc) {
Mathieu Chartierafe49982014-03-27 10:55:04 -07002733 collector = FindCollectorByGcType(gc_type);
Mathieu Chartier50482232013-11-21 11:48:14 -08002734 } else {
2735 LOG(FATAL) << "Invalid current allocator " << current_allocator_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002736 }
Mathieu Chartier08cef222014-10-22 17:18:34 -07002737 if (IsGcConcurrent()) {
2738 // Disable concurrent GC check so that we don't have spammy JNI requests.
2739 // This gets recalculated in GrowForUtilization. It is important that it is disabled /
2740 // calculated in the same thread so that there aren't any races that can cause it to become
2741 // permanantly disabled. b/17942071
2742 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2743 }
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +00002744
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002745 CHECK(collector != nullptr)
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07002746 << "Could not find garbage collector with collector_type="
2747 << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002748 collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002749 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2750 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
Mathieu Chartiera5eae692014-12-17 17:56:03 -08002751 RequestTrim(self);
Mathieu Chartier39e32612013-11-12 16:28:05 -08002752 // Enqueue cleared references.
Mathieu Chartier3cf22532015-07-09 15:15:09 -07002753 reference_processor_->EnqueueClearedReferences(self);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002754 // Grow the heap so that we know when to perform the next GC.
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08002755 GrowForUtilization(collector, bytes_allocated_before_gc);
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08002756 LogGC(gc_cause, collector);
2757 FinishGC(self, gc_type);
2758 // Inform DDMS that a GC completed.
2759 Dbg::GcDidFinish();
Mathieu Chartier598302a2015-09-23 14:52:39 -07002760 // Unload native libraries for class unloading. We do this after calling FinishGC to prevent
2761 // deadlocks in case the JNI_OnUnload function does allocations.
2762 {
2763 ScopedObjectAccess soa(self);
2764 soa.Vm()->UnloadNativeLibraries();
2765 }
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08002766 return gc_type;
2767}
2768
2769void Heap::LogGC(GcCause gc_cause, collector::GarbageCollector* collector) {
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002770 const size_t duration = GetCurrentGcIteration()->GetDurationNs();
2771 const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002772 // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002773 // (mutator time blocked >= long_pause_log_threshold_).
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002774 bool log_gc = gc_cause == kGcCauseExplicit;
2775 if (!log_gc && CareAboutPauseTimes()) {
Mathieu Chartiere53225c2013-08-19 10:59:11 -07002776 // GC for alloc pauses the allocating thread, so consider it as a pause.
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002777 log_gc = duration > long_gc_log_threshold_ ||
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002778 (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002779 for (uint64_t pause : pause_times) {
2780 log_gc = log_gc || pause >= long_pause_log_threshold_;
Mathieu Chartiere53225c2013-08-19 10:59:11 -07002781 }
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002782 }
2783 if (log_gc) {
2784 const size_t percent_free = GetPercentFree();
2785 const size_t current_heap_size = GetBytesAllocated();
2786 const size_t total_memory = GetTotalMemory();
2787 std::ostringstream pause_string;
2788 for (size_t i = 0; i < pause_times.size(); ++i) {
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08002789 pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
2790 << ((i != pause_times.size() - 1) ? "," : "");
Mathieu Chartiere53225c2013-08-19 10:59:11 -07002791 }
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002792 LOG(INFO) << gc_cause << " " << collector->GetName()
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002793 << " GC freed " << current_gc_iteration_.GetFreedObjects() << "("
2794 << PrettySize(current_gc_iteration_.GetFreedBytes()) << ") AllocSpace objects, "
2795 << current_gc_iteration_.GetFreedLargeObjects() << "("
2796 << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, "
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002797 << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
2798 << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
2799 << " total " << PrettyDuration((duration / 1000) * 1000);
Ian Rogersc7dd2952014-10-21 23:31:19 -07002800 VLOG(heap) << Dumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002801 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002802}
Mathieu Chartiera6399032012-06-11 18:49:50 -07002803
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002804void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
2805 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002806 collector_type_running_ = kCollectorTypeNone;
2807 if (gc_type != collector::kGcTypeNone) {
2808 last_gc_type_ = gc_type;
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07002809
2810 // Update stats.
2811 ++gc_count_last_window_;
2812 if (running_collection_is_blocking_) {
2813 // If the currently running collection was a blocking one,
2814 // increment the counters and reset the flag.
2815 ++blocking_gc_count_;
2816 blocking_gc_time_ += GetCurrentGcIteration()->GetDurationNs();
2817 ++blocking_gc_count_last_window_;
2818 }
2819 // Update the gc count rate histograms if due.
2820 UpdateGcCountRateHistograms();
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002821 }
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07002822 // Reset.
2823 running_collection_is_blocking_ = false;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002824 // Wake anyone who may have been waiting for the GC to complete.
2825 gc_complete_cond_->Broadcast(self);
2826}
2827
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07002828void Heap::UpdateGcCountRateHistograms() {
2829 // Invariant: if the time since the last update includes more than
2830 // one windows, all the GC runs (if > 0) must have happened in first
2831 // window because otherwise the update must have already taken place
2832 // at an earlier GC run. So, we report the non-first windows with
2833 // zero counts to the histograms.
2834 DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2835 uint64_t now = NanoTime();
2836 DCHECK_GE(now, last_update_time_gc_count_rate_histograms_);
2837 uint64_t time_since_last_update = now - last_update_time_gc_count_rate_histograms_;
2838 uint64_t num_of_windows = time_since_last_update / kGcCountRateHistogramWindowDuration;
2839 if (time_since_last_update >= kGcCountRateHistogramWindowDuration) {
2840 // Record the first window.
2841 gc_count_rate_histogram_.AddValue(gc_count_last_window_ - 1); // Exclude the current run.
2842 blocking_gc_count_rate_histogram_.AddValue(running_collection_is_blocking_ ?
2843 blocking_gc_count_last_window_ - 1 : blocking_gc_count_last_window_);
2844 // Record the other windows (with zero counts).
2845 for (uint64_t i = 0; i < num_of_windows - 1; ++i) {
2846 gc_count_rate_histogram_.AddValue(0);
2847 blocking_gc_count_rate_histogram_.AddValue(0);
2848 }
2849 // Update the last update time and reset the counters.
2850 last_update_time_gc_count_rate_histograms_ =
2851 (now / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
2852 gc_count_last_window_ = 1; // Include the current run.
2853 blocking_gc_count_last_window_ = running_collection_is_blocking_ ? 1 : 0;
2854 }
2855 DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2856}
2857
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002858class RootMatchesObjectVisitor : public SingleRootVisitor {
2859 public:
2860 explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
2861
2862 void VisitRoot(mirror::Object* root, const RootInfo& info)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07002863 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002864 if (root == obj_) {
2865 LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
2866 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002867 }
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002868
2869 private:
2870 const mirror::Object* const obj_;
2871};
2872
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002873
2874class ScanVisitor {
2875 public:
Brian Carlstromdf629502013-07-17 22:39:56 -07002876 void operator()(const mirror::Object* obj) const {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002877 LOG(ERROR) << "Would have rescanned object " << obj;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002878 }
2879};
2880
Ian Rogers1d54e732013-05-02 21:10:01 -07002881// Verify a reference from an object.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002882class VerifyReferenceVisitor : public SingleRootVisitor {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002883 public:
Roland Levillain3887c462015-08-12 18:15:42 +01002884 VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07002885 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002886 : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
Ian Rogers1d54e732013-05-02 21:10:01 -07002887
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002888 size_t GetFailureCount() const {
Mathieu Chartiere9e55ac2014-05-21 17:48:25 -07002889 return fail_count_->LoadSequentiallyConsistent();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002890 }
2891
Mathieu Chartier31e88222016-10-14 18:43:19 -07002892 void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED, ObjPtr<mirror::Reference> ref) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07002893 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07002894 if (verify_referent_) {
Mathieu Chartier31e88222016-10-14 18:43:19 -07002895 VerifyReference(ref.Ptr(), ref->GetReferent(), mirror::Reference::ReferentOffset());
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07002896 }
Mathieu Chartier407f7022014-02-18 14:37:05 -08002897 }
2898
Mathieu Chartier31e88222016-10-14 18:43:19 -07002899 void operator()(ObjPtr<mirror::Object> obj,
2900 MemberOffset offset,
2901 bool is_static ATTRIBUTE_UNUSED) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07002902 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier31e88222016-10-14 18:43:19 -07002903 VerifyReference(obj.Ptr(), obj->GetFieldObject<mirror::Object>(offset), offset);
Mathieu Chartier407f7022014-02-18 14:37:05 -08002904 }
2905
Mathieu Chartier31e88222016-10-14 18:43:19 -07002906 bool IsLive(ObjPtr<mirror::Object> obj) const NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002907 return heap_->IsLiveObjectLocked(obj, true, false, true);
2908 }
2909
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002910 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07002911 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002912 if (!root->IsNull()) {
2913 VisitRoot(root);
2914 }
2915 }
2916 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07002917 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002918 const_cast<VerifyReferenceVisitor*>(this)->VisitRoot(
2919 root->AsMirrorPtr(), RootInfo(kRootVMInternal));
2920 }
2921
2922 virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07002923 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002924 if (root == nullptr) {
2925 LOG(ERROR) << "Root is null with info " << root_info.GetType();
2926 } else if (!VerifyReference(nullptr, root, MemberOffset(0))) {
David Sehr709b0702016-10-13 09:12:37 -07002927 LOG(ERROR) << "Root " << root << " is dead with type " << mirror::Object::PrettyTypeOf(root)
Mathieu Chartiere34fa1d2015-01-14 14:55:47 -08002928 << " thread_id= " << root_info.GetThreadId() << " root_type= " << root_info.GetType();
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002929 }
2930 }
2931
2932 private:
Mathieu Chartier407f7022014-02-18 14:37:05 -08002933 // TODO: Fix the no thread safety analysis.
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002934 // Returns false on failure.
2935 bool VerifyReference(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002936 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002937 if (ref == nullptr || IsLive(ref)) {
2938 // Verify that the reference is live.
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002939 return true;
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002940 }
Mathieu Chartiere9e55ac2014-05-21 17:48:25 -07002941 if (fail_count_->FetchAndAddSequentiallyConsistent(1) == 0) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002942 // Print message on only on first failure to prevent spam.
2943 LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002944 }
2945 if (obj != nullptr) {
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002946 // Only do this part for non roots.
Ian Rogers1d54e732013-05-02 21:10:01 -07002947 accounting::CardTable* card_table = heap_->GetCardTable();
2948 accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
2949 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
Ian Rogers13735952014-10-08 12:43:28 -07002950 uint8_t* card_addr = card_table->CardFromAddr(obj);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002951 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
2952 << offset << "\n card value = " << static_cast<int>(*card_addr);
2953 if (heap_->IsValidObjectAddress(obj->GetClass())) {
David Sehr709b0702016-10-13 09:12:37 -07002954 LOG(ERROR) << "Obj type " << obj->PrettyTypeOf();
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002955 } else {
2956 LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002957 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002958
Mathieu Chartierb363f662014-07-16 13:28:58 -07002959 // Attempt to find the class inside of the recently freed objects.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002960 space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
2961 if (ref_space != nullptr && ref_space->IsMallocSpace()) {
2962 space::MallocSpace* space = ref_space->AsMallocSpace();
2963 mirror::Class* ref_class = space->FindRecentFreedObject(ref);
2964 if (ref_class != nullptr) {
2965 LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
David Sehr709b0702016-10-13 09:12:37 -07002966 << ref_class->PrettyClass();
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002967 } else {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002968 LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002969 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002970 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002971
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002972 if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
2973 ref->GetClass()->IsClass()) {
David Sehr709b0702016-10-13 09:12:37 -07002974 LOG(ERROR) << "Ref type " << ref->PrettyTypeOf();
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002975 } else {
2976 LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
2977 << ") is not a valid heap address";
2978 }
2979
Ian Rogers13735952014-10-08 12:43:28 -07002980 card_table->CheckAddrIsInCardTable(reinterpret_cast<const uint8_t*>(obj));
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002981 void* cover_begin = card_table->AddrFromCard(card_addr);
2982 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
2983 accounting::CardTable::kCardSize);
2984 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
2985 << "-" << cover_end;
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07002986 accounting::ContinuousSpaceBitmap* bitmap =
2987 heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002988
2989 if (bitmap == nullptr) {
2990 LOG(ERROR) << "Object " << obj << " has no bitmap";
Mathieu Chartier4e305412014-02-19 10:54:44 -08002991 if (!VerifyClassClass(obj->GetClass())) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002992 LOG(ERROR) << "Object " << obj << " failed class verification!";
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002993 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002994 } else {
Ian Rogers1d54e732013-05-02 21:10:01 -07002995 // Print out how the object is live.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002996 if (bitmap->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002997 LOG(ERROR) << "Object " << obj << " found in live bitmap";
2998 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002999 if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -07003000 LOG(ERROR) << "Object " << obj << " found in allocation stack";
3001 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07003002 if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -07003003 LOG(ERROR) << "Object " << obj << " found in live stack";
3004 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07003005 if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
3006 LOG(ERROR) << "Ref " << ref << " found in allocation stack";
3007 }
3008 if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
3009 LOG(ERROR) << "Ref " << ref << " found in live stack";
3010 }
Ian Rogers1d54e732013-05-02 21:10:01 -07003011 // Attempt to see if the card table missed the reference.
3012 ScanVisitor scan_visitor;
Ian Rogers13735952014-10-08 12:43:28 -07003013 uint8_t* byte_cover_begin = reinterpret_cast<uint8_t*>(card_table->AddrFromCard(card_addr));
Lei Li727b2942015-01-15 11:26:34 +08003014 card_table->Scan<false>(bitmap, byte_cover_begin,
3015 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003016 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08003017
3018 // Search to see if any of the roots reference our object.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07003019 RootMatchesObjectVisitor visitor1(obj);
3020 Runtime::Current()->VisitRoots(&visitor1);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08003021 // Search to see if any of the roots reference our reference.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07003022 RootMatchesObjectVisitor visitor2(ref);
3023 Runtime::Current()->VisitRoots(&visitor2);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003024 }
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07003025 return false;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003026 }
3027
Ian Rogers1d54e732013-05-02 21:10:01 -07003028 Heap* const heap_;
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07003029 Atomic<size_t>* const fail_count_;
3030 const bool verify_referent_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003031};
3032
Ian Rogers1d54e732013-05-02 21:10:01 -07003033// Verify all references within an object, for use with HeapBitmap::Visit.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003034class VerifyObjectVisitor {
3035 public:
Roland Levillain3887c462015-08-12 18:15:42 +01003036 VerifyObjectVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07003037 : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003038
Mathieu Chartierda7c6502015-07-23 16:01:26 -07003039 void operator()(mirror::Object* obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07003040 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Ian Rogers1d54e732013-05-02 21:10:01 -07003041 // Note: we are verifying the references in obj but not obj itself, this is because obj must
3042 // be live or else how did we find it in the live bitmap?
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07003043 VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07003044 // The class doesn't count as a reference but we should verify it anyways.
Mathieu Chartier059ef3d2015-08-18 13:54:21 -07003045 obj->VisitReferences(visitor, visitor);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003046 }
3047
Mathieu Chartier590fee92013-09-13 13:46:47 -07003048 static void VisitCallback(mirror::Object* obj, void* arg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07003049 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07003050 VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
3051 visitor->operator()(obj);
3052 }
3053
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07003054 void VerifyRoots() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07003055 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
3056 VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
3057 Runtime::Current()->VisitRoots(&visitor);
3058 }
3059
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07003060 size_t GetFailureCount() const {
Mathieu Chartiere9e55ac2014-05-21 17:48:25 -07003061 return fail_count_->LoadSequentiallyConsistent();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003062 }
3063
3064 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07003065 Heap* const heap_;
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07003066 Atomic<size_t>* const fail_count_;
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07003067 const bool verify_referent_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003068};
3069
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003070void Heap::PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj) {
Mathieu Chartierc1790162014-05-23 10:54:50 -07003071 // Slow path, the allocation stack push back must have already failed.
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003072 DCHECK(!allocation_stack_->AtomicPushBack(obj->Ptr()));
Mathieu Chartierc1790162014-05-23 10:54:50 -07003073 do {
3074 // TODO: Add handle VerifyObject.
3075 StackHandleScope<1> hs(self);
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003076 HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
Mathieu Chartierc1790162014-05-23 10:54:50 -07003077 // Push our object into the reserve region of the allocaiton stack. This is only required due
3078 // to heap verification requiring that roots are live (either in the live bitmap or in the
3079 // allocation stack).
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003080 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
Mathieu Chartierc1790162014-05-23 10:54:50 -07003081 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003082 } while (!allocation_stack_->AtomicPushBack(obj->Ptr()));
Mathieu Chartierc1790162014-05-23 10:54:50 -07003083}
3084
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003085void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self,
3086 ObjPtr<mirror::Object>* obj) {
Mathieu Chartierc1790162014-05-23 10:54:50 -07003087 // Slow path, the allocation stack push back must have already failed.
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003088 DCHECK(!self->PushOnThreadLocalAllocationStack(obj->Ptr()));
Mathieu Chartiercb535da2015-01-23 13:50:03 -08003089 StackReference<mirror::Object>* start_address;
3090 StackReference<mirror::Object>* end_address;
Mathieu Chartierc1790162014-05-23 10:54:50 -07003091 while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
3092 &end_address)) {
3093 // TODO: Add handle VerifyObject.
3094 StackHandleScope<1> hs(self);
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003095 HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
Mathieu Chartierc1790162014-05-23 10:54:50 -07003096 // Push our object into the reserve region of the allocaiton stack. This is only required due
3097 // to heap verification requiring that roots are live (either in the live bitmap or in the
3098 // allocation stack).
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003099 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
Mathieu Chartierc1790162014-05-23 10:54:50 -07003100 // Push into the reserve allocation stack.
3101 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
3102 }
3103 self->SetThreadLocalAllocationStack(start_address, end_address);
3104 // Retry on the new thread-local allocation stack.
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003105 CHECK(self->PushOnThreadLocalAllocationStack(obj->Ptr())); // Must succeed.
Mathieu Chartierc1790162014-05-23 10:54:50 -07003106}
3107
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003108// Must do this with mutators suspended since we are directly accessing the allocation stacks.
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07003109size_t Heap::VerifyHeapReferences(bool verify_referents) {
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08003110 Thread* self = Thread::Current();
3111 Locks::mutator_lock_->AssertExclusiveHeld(self);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003112 // Lets sort our allocation stacks so that we can efficiently binary search them.
Ian Rogers1d54e732013-05-02 21:10:01 -07003113 allocation_stack_->Sort();
3114 live_stack_->Sort();
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08003115 // Since we sorted the allocation stack content, need to revoke all
3116 // thread-local allocation stacks.
3117 RevokeAllThreadLocalAllocationStacks(self);
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07003118 Atomic<size_t> fail_count_(0);
3119 VerifyObjectVisitor visitor(this, &fail_count_, verify_referents);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07003120 // Verify objects in the allocation stack since these will be objects which were:
3121 // 1. Allocated prior to the GC (pre GC verification).
3122 // 2. Allocated during the GC (pre sweep GC verification).
Mathieu Chartier0f72e412013-09-06 16:40:01 -07003123 // We don't want to verify the objects in the live stack since they themselves may be
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003124 // pointing to dead objects if they are not reachable.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08003125 VisitObjectsPaused(VerifyObjectVisitor::VisitCallback, &visitor);
Mathieu Chartier590fee92013-09-13 13:46:47 -07003126 // Verify the roots:
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07003127 visitor.VerifyRoots();
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07003128 if (visitor.GetFailureCount() > 0) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07003129 // Dump mod-union tables.
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003130 for (const auto& table_pair : mod_union_tables_) {
3131 accounting::ModUnionTable* mod_union_table = table_pair.second;
Andreas Gampe3fec9ac2016-09-13 10:47:28 -07003132 mod_union_table->Dump(LOG_STREAM(ERROR) << mod_union_table->GetName() << ": ");
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003133 }
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003134 // Dump remembered sets.
3135 for (const auto& table_pair : remembered_sets_) {
3136 accounting::RememberedSet* remembered_set = table_pair.second;
Andreas Gampe3fec9ac2016-09-13 10:47:28 -07003137 remembered_set->Dump(LOG_STREAM(ERROR) << remembered_set->GetName() << ": ");
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003138 }
Andreas Gampe3fec9ac2016-09-13 10:47:28 -07003139 DumpSpaces(LOG_STREAM(ERROR));
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003140 }
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07003141 return visitor.GetFailureCount();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003142}
3143
3144class VerifyReferenceCardVisitor {
3145 public:
3146 VerifyReferenceCardVisitor(Heap* heap, bool* failed)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07003147 REQUIRES_SHARED(Locks::mutator_lock_,
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003148 Locks::heap_bitmap_lock_)
Ian Rogers1d54e732013-05-02 21:10:01 -07003149 : heap_(heap), failed_(failed) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003150 }
3151
Mathieu Chartierda7c6502015-07-23 16:01:26 -07003152 // There is no card marks for native roots on a class.
3153 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
3154 const {}
3155 void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
3156
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08003157 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
3158 // annotalysis on visitors.
Mathieu Chartier407f7022014-02-18 14:37:05 -08003159 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
3160 NO_THREAD_SAFETY_ANALYSIS {
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07003161 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08003162 // Filter out class references since changing an object's class does not mark the card as dirty.
3163 // Also handles large objects, since the only reference they hold is a class reference.
Mathieu Chartier407f7022014-02-18 14:37:05 -08003164 if (ref != nullptr && !ref->IsClass()) {
Ian Rogers1d54e732013-05-02 21:10:01 -07003165 accounting::CardTable* card_table = heap_->GetCardTable();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003166 // If the object is not dirty and it is referencing something in the live stack other than
3167 // class, then it must be on a dirty card.
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07003168 if (!card_table->AddrIsInCardTable(obj)) {
3169 LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
3170 *failed_ = true;
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003171 } else if (!card_table->IsDirty(obj)) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08003172 // TODO: Check mod-union tables.
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08003173 // Card should be either kCardDirty if it got re-dirtied after we aged it, or
3174 // kCardDirty - 1 if it didnt get touched since we aged it.
Ian Rogers1d54e732013-05-02 21:10:01 -07003175 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
Mathieu Chartier407f7022014-02-18 14:37:05 -08003176 if (live_stack->ContainsSorted(ref)) {
3177 if (live_stack->ContainsSorted(obj)) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003178 LOG(ERROR) << "Object " << obj << " found in live stack";
3179 }
3180 if (heap_->GetLiveBitmap()->Test(obj)) {
3181 LOG(ERROR) << "Object " << obj << " found in live bitmap";
3182 }
David Sehr709b0702016-10-13 09:12:37 -07003183 LOG(ERROR) << "Object " << obj << " " << mirror::Object::PrettyTypeOf(obj)
3184 << " references " << ref << " " << mirror::Object::PrettyTypeOf(ref)
3185 << " in live stack";
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003186
3187 // Print which field of the object is dead.
3188 if (!obj->IsObjectArray()) {
Ian Rogersef7d42f2014-01-06 12:55:46 -08003189 mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
Mathieu Chartierc7853442015-03-27 14:35:38 -07003190 CHECK(klass != nullptr);
Mathieu Chartierc0fe56a2015-08-11 13:01:23 -07003191 for (ArtField& field : (is_static ? klass->GetSFields() : klass->GetIFields())) {
Mathieu Chartier54d220e2015-07-30 16:20:06 -07003192 if (field.GetOffset().Int32Value() == offset.Int32Value()) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003193 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
David Sehr709b0702016-10-13 09:12:37 -07003194 << field.PrettyField();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003195 break;
3196 }
3197 }
3198 } else {
Ian Rogersef7d42f2014-01-06 12:55:46 -08003199 mirror::ObjectArray<mirror::Object>* object_array =
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08003200 obj->AsObjectArray<mirror::Object>();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003201 for (int32_t i = 0; i < object_array->GetLength(); ++i) {
3202 if (object_array->Get(i) == ref) {
3203 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
3204 }
3205 }
3206 }
3207
3208 *failed_ = true;
3209 }
3210 }
3211 }
3212 }
3213
3214 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07003215 Heap* const heap_;
3216 bool* const failed_;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003217};
3218
3219class VerifyLiveStackReferences {
3220 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07003221 explicit VerifyLiveStackReferences(Heap* heap)
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003222 : heap_(heap),
Brian Carlstrom93ba8932013-07-17 21:31:49 -07003223 failed_(false) {}
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003224
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003225 void operator()(mirror::Object* obj) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07003226 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003227 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
Mathieu Chartier059ef3d2015-08-18 13:54:21 -07003228 obj->VisitReferences(visitor, VoidFunctor());
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003229 }
3230
3231 bool Failed() const {
3232 return failed_;
3233 }
3234
3235 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07003236 Heap* const heap_;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003237 bool failed_;
3238};
3239
3240bool Heap::VerifyMissingCardMarks() {
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08003241 Thread* self = Thread::Current();
3242 Locks::mutator_lock_->AssertExclusiveHeld(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003243 // We need to sort the live stack since we binary search it.
Ian Rogers1d54e732013-05-02 21:10:01 -07003244 live_stack_->Sort();
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08003245 // Since we sorted the allocation stack content, need to revoke all
3246 // thread-local allocation stacks.
3247 RevokeAllThreadLocalAllocationStacks(self);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003248 VerifyLiveStackReferences visitor(this);
3249 GetLiveBitmap()->Visit(visitor);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003250 // We can verify objects in the live stack since none of these should reference dead objects.
Mathieu Chartiercb535da2015-01-23 13:50:03 -08003251 for (auto* it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
3252 if (!kUseThreadLocalAllocationStack || it->AsMirrorPtr() != nullptr) {
3253 visitor(it->AsMirrorPtr());
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08003254 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003255 }
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -07003256 return !visitor.Failed();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003257}
3258
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07003259void Heap::SwapStacks() {
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08003260 if (kUseThreadLocalAllocationStack) {
3261 live_stack_->AssertAllZero();
3262 }
Mathieu Chartierd22d5482012-11-06 17:14:12 -08003263 allocation_stack_.swap(live_stack_);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003264}
3265
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08003266void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08003267 // This must be called only during the pause.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08003268 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08003269 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
3270 MutexLock mu2(self, *Locks::thread_list_lock_);
3271 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
3272 for (Thread* t : thread_list) {
3273 t->RevokeThreadLocalAllocationStack();
3274 }
3275}
3276
Ian Rogers68d8b422014-07-17 11:09:10 -07003277void Heap::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
3278 if (kIsDebugBuild) {
3279 if (rosalloc_space_ != nullptr) {
3280 rosalloc_space_->AssertThreadLocalBuffersAreRevoked(thread);
3281 }
3282 if (bump_pointer_space_ != nullptr) {
3283 bump_pointer_space_->AssertThreadLocalBuffersAreRevoked(thread);
3284 }
3285 }
3286}
3287
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07003288void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
3289 if (kIsDebugBuild) {
3290 if (bump_pointer_space_ != nullptr) {
3291 bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
3292 }
3293 }
3294}
3295
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003296accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
3297 auto it = mod_union_tables_.find(space);
3298 if (it == mod_union_tables_.end()) {
3299 return nullptr;
3300 }
3301 return it->second;
3302}
3303
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003304accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) {
3305 auto it = remembered_sets_.find(space);
3306 if (it == remembered_sets_.end()) {
3307 return nullptr;
3308 }
3309 return it->second;
3310}
3311
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07003312void Heap::ProcessCards(TimingLogger* timings,
3313 bool use_rem_sets,
3314 bool process_alloc_space_cards,
Lei Li4add3b42015-01-15 11:55:26 +08003315 bool clear_alloc_space_cards) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07003316 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
Ian Rogers1d54e732013-05-02 21:10:01 -07003317 // Clear cards and keep track of cards cleared in the mod-union table.
Mathieu Chartier02e25112013-08-14 16:14:24 -07003318 for (const auto& space : continuous_spaces_) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003319 accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003320 accounting::RememberedSet* rem_set = FindRememberedSetFromSpace(space);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003321 if (table != nullptr) {
3322 const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
3323 "ImageModUnionClearCards";
Andreas Gampe277ccbd2014-11-03 21:36:10 -08003324 TimingLogger::ScopedTiming t2(name, timings);
Mathieu Chartier6e6078a2016-10-24 15:45:41 -07003325 table->ProcessCards();
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003326 } else if (use_rem_sets && rem_set != nullptr) {
3327 DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
3328 << static_cast<int>(collector_type_);
Andreas Gampe277ccbd2014-11-03 21:36:10 -08003329 TimingLogger::ScopedTiming t2("AllocSpaceRemSetClearCards", timings);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003330 rem_set->ClearCards();
Lei Li4add3b42015-01-15 11:55:26 +08003331 } else if (process_alloc_space_cards) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08003332 TimingLogger::ScopedTiming t2("AllocSpaceClearCards", timings);
Lei Li4add3b42015-01-15 11:55:26 +08003333 if (clear_alloc_space_cards) {
Mathieu Chartierfbc31082016-01-24 11:59:56 -08003334 uint8_t* end = space->End();
3335 if (space->IsImageSpace()) {
3336 // Image space end is the end of the mirror objects, it is not necessarily page or card
3337 // aligned. Align up so that the check in ClearCardRange does not fail.
3338 end = AlignUp(end, accounting::CardTable::kCardSize);
3339 }
3340 card_table_->ClearCardRange(space->Begin(), end);
Lei Li4add3b42015-01-15 11:55:26 +08003341 } else {
3342 // No mod union table for the AllocSpace. Age the cards so that the GC knows that these
3343 // cards were dirty before the GC started.
3344 // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
3345 // -> clean(cleaning thread).
3346 // The races are we either end up with: Aged card, unaged card. Since we have the
3347 // checkpoint roots and then we scan / update mod union tables after. We will always
3348 // scan either card. If we end up with the non aged card, we scan it it in the pause.
3349 card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(),
3350 VoidFunctor());
3351 }
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07003352 }
3353 }
3354}
3355
Mathieu Chartier97509952015-07-13 14:35:43 -07003356struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor {
3357 virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE {
3358 return obj;
3359 }
3360 virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>*) OVERRIDE {
3361 }
3362};
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003363
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003364void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
3365 Thread* const self = Thread::Current();
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07003366 TimingLogger* const timings = current_gc_iteration_.GetTimings();
Mathieu Chartierf5997b42014-06-20 10:37:54 -07003367 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003368 if (verify_pre_gc_heap_) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08003369 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyHeapReferences", timings);
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07003370 size_t failures = VerifyHeapReferences();
3371 if (failures > 0) {
3372 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3373 << " failures";
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003374 }
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08003375 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003376 // Check that all objects which reference things in the live stack are on dirty cards.
3377 if (verify_missing_card_marks_) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08003378 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyMissingCardMarks", timings);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003379 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07003380 SwapStacks();
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003381 // Sort the live stack so that we can quickly binary search it later.
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -07003382 CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
3383 << " missing card mark verification failed\n" << DumpSpaces();
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07003384 SwapStacks();
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003385 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003386 if (verify_mod_union_table_) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08003387 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyModUnionTables", timings);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003388 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003389 for (const auto& table_pair : mod_union_tables_) {
3390 accounting::ModUnionTable* mod_union_table = table_pair.second;
Mathieu Chartier97509952015-07-13 14:35:43 -07003391 IdentityMarkHeapReferenceVisitor visitor;
3392 mod_union_table->UpdateAndMarkReferences(&visitor);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003393 mod_union_table->Verify();
3394 }
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003395 }
3396}
3397
3398void Heap::PreGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier0651d412014-04-29 14:37:57 -07003399 if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003400 collector::GarbageCollector::ScopedPause pause(gc);
3401 PreGcVerificationPaused(gc);
3402 }
3403}
3404
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07003405void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc ATTRIBUTE_UNUSED) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003406 // TODO: Add a new runtime option for this?
3407 if (verify_pre_gc_rosalloc_) {
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07003408 RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003409 }
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08003410}
3411
Ian Rogers1d54e732013-05-02 21:10:01 -07003412void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003413 Thread* const self = Thread::Current();
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07003414 TimingLogger* const timings = current_gc_iteration_.GetTimings();
Mathieu Chartierf5997b42014-06-20 10:37:54 -07003415 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003416 // Called before sweeping occurs since we want to make sure we are not going so reclaim any
3417 // reachable objects.
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003418 if (verify_pre_sweeping_heap_) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08003419 TimingLogger::ScopedTiming t2("(Paused)PostSweepingVerifyHeapReferences", timings);
Ian Rogers1d54e732013-05-02 21:10:01 -07003420 CHECK_NE(self->GetState(), kRunnable);
Hiroshi Yamauchi0c8c3032015-01-16 16:54:35 -08003421 {
3422 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3423 // Swapping bound bitmaps does nothing.
3424 gc->SwapBitmaps();
3425 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07003426 // Pass in false since concurrent reference processing can mean that the reference referents
3427 // may point to dead objects at the point which PreSweepingGcVerification is called.
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07003428 size_t failures = VerifyHeapReferences(false);
3429 if (failures > 0) {
3430 LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed with " << failures
3431 << " failures";
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003432 }
Hiroshi Yamauchi0c8c3032015-01-16 16:54:35 -08003433 {
3434 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3435 gc->SwapBitmaps();
3436 }
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003437 }
3438 if (verify_pre_sweeping_rosalloc_) {
3439 RosAllocVerification(timings, "PreSweepingRosAllocVerification");
3440 }
3441}
3442
3443void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
3444 // Only pause if we have to do some verification.
3445 Thread* const self = Thread::Current();
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07003446 TimingLogger* const timings = GetCurrentGcIteration()->GetTimings();
Mathieu Chartierf5997b42014-06-20 10:37:54 -07003447 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003448 if (verify_system_weaks_) {
3449 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
3450 collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
3451 mark_sweep->VerifySystemWeaks();
3452 }
3453 if (verify_post_gc_rosalloc_) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07003454 RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification");
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003455 }
3456 if (verify_post_gc_heap_) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08003457 TimingLogger::ScopedTiming t2("(Paused)PostGcVerifyHeapReferences", timings);
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07003458 size_t failures = VerifyHeapReferences();
3459 if (failures > 0) {
3460 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3461 << " failures";
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003462 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003463 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003464}
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003465
Ian Rogers1d54e732013-05-02 21:10:01 -07003466void Heap::PostGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003467 if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
3468 collector::GarbageCollector::ScopedPause pause(gc);
Mathieu Chartierd35326f2014-08-18 15:02:59 -07003469 PostGcVerificationPaused(gc);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003470 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07003471}
3472
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003473void Heap::RosAllocVerification(TimingLogger* timings, const char* name) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07003474 TimingLogger::ScopedTiming t(name, timings);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003475 for (const auto& space : continuous_spaces_) {
3476 if (space->IsRosAllocSpace()) {
3477 VLOG(heap) << name << " : " << space->GetName();
3478 space->AsRosAllocSpace()->Verify();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08003479 }
3480 }
3481}
3482
Mathieu Chartier89a201e2014-05-02 10:27:26 -07003483collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) {
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08003484 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartier590fee92013-09-13 13:46:47 -07003485 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartier89a201e2014-05-02 10:27:26 -07003486 return WaitForGcToCompleteLocked(cause, self);
Mathieu Chartier590fee92013-09-13 13:46:47 -07003487}
3488
Mathieu Chartier89a201e2014-05-02 10:27:26 -07003489collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
Ian Rogers1d54e732013-05-02 21:10:01 -07003490 collector::GcType last_gc_type = collector::kGcTypeNone;
Mathieu Chartier590fee92013-09-13 13:46:47 -07003491 uint64_t wait_start = NanoTime();
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08003492 while (collector_type_running_ != kCollectorTypeNone) {
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07003493 if (self != task_processor_->GetRunningThread()) {
3494 // The current thread is about to wait for a currently running
3495 // collection to finish. If the waiting thread is not the heap
3496 // task daemon thread, the currently running collection is
3497 // considered as a blocking GC.
3498 running_collection_is_blocking_ = true;
3499 VLOG(gc) << "Waiting for a blocking GC " << cause;
3500 }
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -08003501 ScopedTrace trace("GC: Wait For Completion");
Mathieu Chartier590fee92013-09-13 13:46:47 -07003502 // We must wait, change thread state then sleep on gc_complete_cond_;
3503 gc_complete_cond_->Wait(self);
3504 last_gc_type = last_gc_type_;
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07003505 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07003506 uint64_t wait_time = NanoTime() - wait_start;
3507 total_wait_time_ += wait_time;
3508 if (wait_time > long_pause_log_threshold_) {
Mathieu Chartier89a201e2014-05-02 10:27:26 -07003509 LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time)
3510 << " for cause " << cause;
Mathieu Chartier590fee92013-09-13 13:46:47 -07003511 }
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07003512 if (self != task_processor_->GetRunningThread()) {
3513 // The current thread is about to run a collection. If the thread
3514 // is not the heap task daemon thread, it's considered as a
3515 // blocking GC (i.e., blocking itself).
3516 running_collection_is_blocking_ = true;
3517 VLOG(gc) << "Starting a blocking GC " << cause;
3518 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07003519 return last_gc_type;
Carl Shapiro69759ea2011-07-21 18:13:35 -07003520}
3521
Elliott Hughesc967f782012-04-16 10:23:15 -07003522void Heap::DumpForSigQuit(std::ostream& os) {
Ian Rogers1d54e732013-05-02 21:10:01 -07003523 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
Mathieu Chartier2fde5332012-09-14 14:51:54 -07003524 << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
Elliott Hughes8b788fe2013-04-17 15:57:01 -07003525 DumpGcPerformanceInfo(os);
Elliott Hughesc967f782012-04-16 10:23:15 -07003526}
3527
3528size_t Heap::GetPercentFree() {
Mathieu Chartierd30e1d62014-06-09 13:25:22 -07003529 return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / max_allowed_footprint_);
Elliott Hughesc967f782012-04-16 10:23:15 -07003530}
3531
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08003532void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07003533 if (max_allowed_footprint > GetMaxMemory()) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003534 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
Mathieu Chartier2fde5332012-09-14 14:51:54 -07003535 << PrettySize(GetMaxMemory());
3536 max_allowed_footprint = GetMaxMemory();
3537 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07003538 max_allowed_footprint_ = max_allowed_footprint;
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07003539}
3540
Mathieu Chartier0795f232016-09-27 18:43:30 -07003541bool Heap::IsMovableObject(ObjPtr<mirror::Object> obj) const {
Mathieu Chartier590fee92013-09-13 13:46:47 -07003542 if (kMovingCollector) {
Mathieu Chartier1cc62e42016-10-03 18:01:28 -07003543 space::Space* space = FindContinuousSpaceFromObject(obj.Ptr(), true);
Mathieu Chartier31f44142014-04-08 14:40:03 -07003544 if (space != nullptr) {
3545 // TODO: Check large object?
3546 return space->CanMoveObjects();
Mathieu Chartier590fee92013-09-13 13:46:47 -07003547 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07003548 }
3549 return false;
3550}
3551
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003552void Heap::UpdateMaxNativeFootprint() {
Ian Rogers3e5cf302014-05-20 16:40:37 -07003553 size_t native_size = native_bytes_allocated_.LoadRelaxed();
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003554 // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
3555 size_t target_size = native_size / GetTargetHeapUtilization();
3556 if (target_size > native_size + max_free_) {
3557 target_size = native_size + max_free_;
3558 } else if (target_size < native_size + min_free_) {
3559 target_size = native_size + min_free_;
3560 }
Mathieu Chartier8ec31f92014-09-03 10:30:11 -07003561 native_footprint_gc_watermark_ = std::min(growth_limit_, target_size);
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003562}
3563
Mathieu Chartierafe49982014-03-27 10:55:04 -07003564collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
3565 for (const auto& collector : garbage_collectors_) {
3566 if (collector->GetCollectorType() == collector_type_ &&
3567 collector->GetGcType() == gc_type) {
3568 return collector;
3569 }
3570 }
3571 return nullptr;
3572}
3573
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07003574double Heap::HeapGrowthMultiplier() const {
3575 // If we don't care about pause times we are background, so return 1.0.
3576 if (!CareAboutPauseTimes() || IsLowMemoryMode()) {
3577 return 1.0;
3578 }
3579 return foreground_heap_growth_multiplier_;
3580}
3581
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08003582void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
3583 uint64_t bytes_allocated_before_gc) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07003584 // We know what our utilization is at this moment.
3585 // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07003586 const uint64_t bytes_allocated = GetBytesAllocated();
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07003587 uint64_t target_size;
Mathieu Chartierafe49982014-03-27 10:55:04 -07003588 collector::GcType gc_type = collector_ran->GetGcType();
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08003589 const double multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for
3590 // foreground.
3591 const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier);
3592 const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier);
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07003593 if (gc_type != collector::kGcTypeSticky) {
3594 // Grow the heap for non sticky GC.
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08003595 ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07003596 CHECK_GE(delta, 0);
3597 target_size = bytes_allocated + delta * multiplier;
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08003598 target_size = std::min(target_size, bytes_allocated + adjusted_max_free);
3599 target_size = std::max(target_size, bytes_allocated + adjusted_min_free);
Mathieu Chartier590fee92013-09-13 13:46:47 -07003600 native_need_to_run_finalization_ = true;
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07003601 next_gc_type_ = collector::kGcTypeSticky;
3602 } else {
Mathieu Chartierafe49982014-03-27 10:55:04 -07003603 collector::GcType non_sticky_gc_type =
Mathieu Chartiere4cab172014-08-19 18:24:04 -07003604 HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
Mathieu Chartierafe49982014-03-27 10:55:04 -07003605 // Find what the next non sticky collector will be.
3606 collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
3607 // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
3608 // do another sticky collection next.
3609 // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
3610 // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
3611 // if the sticky GC throughput always remained >= the full/partial throughput.
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07003612 if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
Mathieu Chartierafe49982014-03-27 10:55:04 -07003613 non_sticky_collector->GetEstimatedMeanThroughput() &&
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07003614 non_sticky_collector->NumberOfIterations() > 0 &&
Mathieu Chartierafe49982014-03-27 10:55:04 -07003615 bytes_allocated <= max_allowed_footprint_) {
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07003616 next_gc_type_ = collector::kGcTypeSticky;
3617 } else {
Mathieu Chartierafe49982014-03-27 10:55:04 -07003618 next_gc_type_ = non_sticky_gc_type;
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07003619 }
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07003620 // If we have freed enough memory, shrink the heap back down.
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08003621 if (bytes_allocated + adjusted_max_free < max_allowed_footprint_) {
3622 target_size = bytes_allocated + adjusted_max_free;
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07003623 } else {
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07003624 target_size = std::max(bytes_allocated, static_cast<uint64_t>(max_allowed_footprint_));
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07003625 }
3626 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -07003627 if (!ignore_max_footprint_) {
3628 SetIdealFootprint(target_size);
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07003629 if (IsGcConcurrent()) {
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08003630 const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() +
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003631 current_gc_iteration_.GetFreedLargeObjectBytes() +
3632 current_gc_iteration_.GetFreedRevokeBytes();
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08003633 // Bytes allocated will shrink by freed_bytes after the GC runs, so if we want to figure out
3634 // how many bytes were allocated during the GC we need to add freed_bytes back on.
3635 CHECK_GE(bytes_allocated + freed_bytes, bytes_allocated_before_gc);
3636 const uint64_t bytes_allocated_during_gc = bytes_allocated + freed_bytes -
3637 bytes_allocated_before_gc;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07003638 // Calculate when to perform the next ConcurrentGC.
Mathieu Chartier2775ee42013-08-20 17:43:47 -07003639 // Calculate the estimated GC duration.
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07003640 const double gc_duration_seconds = NsToMs(current_gc_iteration_.GetDurationNs()) / 1000.0;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07003641 // Estimate how many remaining bytes we will have when we need to start the next GC.
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08003642 size_t remaining_bytes = bytes_allocated_during_gc * gc_duration_seconds;
Mathieu Chartier74762802014-01-24 10:21:35 -08003643 remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07003644 remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
3645 if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
3646 // A never going to happen situation that from the estimated allocation rate we will exceed
3647 // the applications entire footprint with the given estimated allocation rate. Schedule
Mathieu Chartier74762802014-01-24 10:21:35 -08003648 // another GC nearly straight away.
3649 remaining_bytes = kMinConcurrentRemainingBytes;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07003650 }
Mathieu Chartier74762802014-01-24 10:21:35 -08003651 DCHECK_LE(remaining_bytes, max_allowed_footprint_);
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07003652 DCHECK_LE(max_allowed_footprint_, GetMaxMemory());
Mathieu Chartier74762802014-01-24 10:21:35 -08003653 // Start a concurrent GC when we get close to the estimated remaining bytes. When the
3654 // allocation rate is very high, remaining_bytes could tell us that we should start a GC
3655 // right away.
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07003656 concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
3657 static_cast<size_t>(bytes_allocated));
Mathieu Chartier65db8802012-11-20 12:36:46 -08003658 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08003659 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07003660}
3661
Mathieu Chartier379d09f2015-01-08 11:28:13 -08003662void Heap::ClampGrowthLimit() {
Mathieu Chartierddac4232015-04-02 10:08:03 -07003663 // Use heap bitmap lock to guard against races with BindLiveToMarkBitmap.
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08003664 ScopedObjectAccess soa(Thread::Current());
3665 WriterMutexLock mu(soa.Self(), *Locks::heap_bitmap_lock_);
Mathieu Chartier379d09f2015-01-08 11:28:13 -08003666 capacity_ = growth_limit_;
3667 for (const auto& space : continuous_spaces_) {
3668 if (space->IsMallocSpace()) {
3669 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3670 malloc_space->ClampGrowthLimit();
3671 }
3672 }
3673 // This space isn't added for performance reasons.
3674 if (main_space_backup_.get() != nullptr) {
3675 main_space_backup_->ClampGrowthLimit();
3676 }
3677}
3678
jeffhaoc1160702011-10-27 15:48:45 -07003679void Heap::ClearGrowthLimit() {
Mathieu Chartier80de7a62012-11-27 17:21:50 -08003680 growth_limit_ = capacity_;
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08003681 ScopedObjectAccess soa(Thread::Current());
Mathieu Chartier0310da52014-12-01 13:40:48 -08003682 for (const auto& space : continuous_spaces_) {
3683 if (space->IsMallocSpace()) {
3684 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3685 malloc_space->ClearGrowthLimit();
3686 malloc_space->SetFootprintLimit(malloc_space->Capacity());
3687 }
3688 }
3689 // This space isn't added for performance reasons.
3690 if (main_space_backup_.get() != nullptr) {
3691 main_space_backup_->ClearGrowthLimit();
3692 main_space_backup_->SetFootprintLimit(main_space_backup_->Capacity());
3693 }
jeffhaoc1160702011-10-27 15:48:45 -07003694}
3695
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003696void Heap::AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003697 ScopedObjectAccess soa(self);
Mathieu Chartier8668c3c2014-04-24 16:48:11 -07003698 ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object));
Ian Rogers53b8b092014-03-13 23:45:53 -07003699 jvalue args[1];
3700 args[0].l = arg.get();
3701 InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args);
Mathieu Chartier8668c3c2014-04-24 16:48:11 -07003702 // Restore object in case it gets moved.
Mathieu Chartier28bd2e42016-10-04 13:54:57 -07003703 *object = soa.Decode<mirror::Object>(arg.get());
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003704}
3705
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003706void Heap::RequestConcurrentGCAndSaveObject(Thread* self,
3707 bool force_full,
3708 ObjPtr<mirror::Object>* obj) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07003709 StackHandleScope<1> hs(self);
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003710 HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003711 RequestConcurrentGC(self, force_full);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07003712}
3713
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003714class Heap::ConcurrentGCTask : public HeapTask {
3715 public:
Roland Levillain3887c462015-08-12 18:15:42 +01003716 ConcurrentGCTask(uint64_t target_time, bool force_full)
3717 : HeapTask(target_time), force_full_(force_full) { }
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003718 virtual void Run(Thread* self) OVERRIDE {
3719 gc::Heap* heap = Runtime::Current()->GetHeap();
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003720 heap->ConcurrentGC(self, force_full_);
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003721 heap->ClearConcurrentGCRequest();
Ian Rogers120f1c72012-09-28 17:17:10 -07003722 }
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003723
3724 private:
3725 const bool force_full_; // If true, force full (or partial) collection.
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003726};
3727
Mathieu Chartier90443472015-07-16 20:32:27 -07003728static bool CanAddHeapTask(Thread* self) REQUIRES(!Locks::runtime_shutdown_lock_) {
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003729 Runtime* runtime = Runtime::Current();
3730 return runtime != nullptr && runtime->IsFinishedStarting() && !runtime->IsShuttingDown(self) &&
3731 !self->IsHandlingStackOverflow();
3732}
3733
3734void Heap::ClearConcurrentGCRequest() {
3735 concurrent_gc_pending_.StoreRelaxed(false);
3736}
3737
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003738void Heap::RequestConcurrentGC(Thread* self, bool force_full) {
Mathieu Chartierac195162015-02-20 18:44:28 +00003739 if (CanAddHeapTask(self) &&
3740 concurrent_gc_pending_.CompareExchangeStrongSequentiallyConsistent(false, true)) {
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003741 task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime(), // Start straight away.
3742 force_full));
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003743 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07003744}
3745
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003746void Heap::ConcurrentGC(Thread* self, bool force_full) {
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003747 if (!Runtime::Current()->IsShuttingDown(self)) {
3748 // Wait for any GCs currently running to finish.
3749 if (WaitForGcToComplete(kGcCauseBackground, self) == collector::kGcTypeNone) {
3750 // If the we can't run the GC type we wanted to run, find the next appropriate one and try that
3751 // instead. E.g. can't do partial, so do full instead.
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003752 collector::GcType next_gc_type = next_gc_type_;
3753 // If forcing full and next gc type is sticky, override with a non-sticky type.
3754 if (force_full && next_gc_type == collector::kGcTypeSticky) {
3755 next_gc_type = HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
3756 }
3757 if (CollectGarbageInternal(next_gc_type, kGcCauseBackground, false) ==
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003758 collector::kGcTypeNone) {
3759 for (collector::GcType gc_type : gc_plan_) {
3760 // Attempt to run the collector, if we succeed, we are done.
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003761 if (gc_type > next_gc_type &&
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003762 CollectGarbageInternal(gc_type, kGcCauseBackground, false) !=
3763 collector::kGcTypeNone) {
3764 break;
3765 }
Mathieu Chartierf9ed0d32013-11-21 16:42:47 -08003766 }
3767 }
3768 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07003769 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07003770}
3771
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003772class Heap::CollectorTransitionTask : public HeapTask {
3773 public:
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07003774 explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) {}
3775
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003776 virtual void Run(Thread* self) OVERRIDE {
3777 gc::Heap* heap = Runtime::Current()->GetHeap();
3778 heap->DoPendingCollectorTransition();
3779 heap->ClearPendingCollectorTransition(self);
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08003780 }
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003781};
3782
3783void Heap::ClearPendingCollectorTransition(Thread* self) {
3784 MutexLock mu(self, *pending_task_lock_);
3785 pending_collector_transition_ = nullptr;
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08003786}
3787
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003788void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
3789 Thread* self = Thread::Current();
3790 desired_collector_type_ = desired_collector_type;
3791 if (desired_collector_type_ == collector_type_ || !CanAddHeapTask(self)) {
3792 return;
3793 }
Hiroshi Yamauchi60985b72016-08-24 13:53:12 -07003794 if (collector_type_ == kCollectorTypeCC) {
3795 // For CC, we invoke a full compaction when going to the background, but the collector type
3796 // doesn't change.
3797 DCHECK_EQ(desired_collector_type_, kCollectorTypeCCBackground);
3798 }
3799 DCHECK_NE(collector_type_, kCollectorTypeCCBackground);
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003800 CollectorTransitionTask* added_task = nullptr;
3801 const uint64_t target_time = NanoTime() + delta_time;
3802 {
3803 MutexLock mu(self, *pending_task_lock_);
3804 // If we have an existing collector transition, update the targe time to be the new target.
3805 if (pending_collector_transition_ != nullptr) {
3806 task_processor_->UpdateTargetRunTime(self, pending_collector_transition_, target_time);
3807 return;
3808 }
3809 added_task = new CollectorTransitionTask(target_time);
3810 pending_collector_transition_ = added_task;
3811 }
3812 task_processor_->AddTask(self, added_task);
3813}
3814
3815class Heap::HeapTrimTask : public HeapTask {
3816 public:
3817 explicit HeapTrimTask(uint64_t delta_time) : HeapTask(NanoTime() + delta_time) { }
3818 virtual void Run(Thread* self) OVERRIDE {
3819 gc::Heap* heap = Runtime::Current()->GetHeap();
3820 heap->Trim(self);
3821 heap->ClearPendingTrim(self);
3822 }
3823};
3824
3825void Heap::ClearPendingTrim(Thread* self) {
3826 MutexLock mu(self, *pending_task_lock_);
3827 pending_heap_trim_ = nullptr;
3828}
3829
3830void Heap::RequestTrim(Thread* self) {
3831 if (!CanAddHeapTask(self)) {
3832 return;
3833 }
Ian Rogers48931882013-01-22 14:35:16 -08003834 // GC completed and now we must decide whether to request a heap trim (advising pages back to the
3835 // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
3836 // a space it will hold its lock and can become a cause of jank.
3837 // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
3838 // forking.
3839
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08003840 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
3841 // because that only marks object heads, so a large array looks like lots of empty space. We
3842 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
3843 // to utilization (which is probably inversely proportional to how much benefit we can expect).
3844 // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
3845 // not how much use we're making of those pages.
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003846 HeapTrimTask* added_task = nullptr;
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07003847 {
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003848 MutexLock mu(self, *pending_task_lock_);
3849 if (pending_heap_trim_ != nullptr) {
3850 // Already have a heap trim request in task processor, ignore this request.
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07003851 return;
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08003852 }
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003853 added_task = new HeapTrimTask(kHeapTrimWait);
3854 pending_heap_trim_ = added_task;
Mathieu Chartierc39e3422013-08-07 16:41:36 -07003855 }
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003856 task_processor_->AddTask(self, added_task);
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08003857}
3858
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07003859void Heap::RevokeThreadLocalBuffers(Thread* thread) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08003860 if (rosalloc_space_ != nullptr) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003861 size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3862 if (freed_bytes_revoke > 0U) {
3863 num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3864 CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3865 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08003866 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -08003867 if (bump_pointer_space_ != nullptr) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003868 CHECK_EQ(bump_pointer_space_->RevokeThreadLocalBuffers(thread), 0U);
Mathieu Chartier692fafd2013-11-29 17:24:40 -08003869 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08003870 if (region_space_ != nullptr) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003871 CHECK_EQ(region_space_->RevokeThreadLocalBuffers(thread), 0U);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08003872 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07003873}
3874
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07003875void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
3876 if (rosalloc_space_ != nullptr) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003877 size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3878 if (freed_bytes_revoke > 0U) {
3879 num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3880 CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3881 }
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07003882 }
3883}
3884
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07003885void Heap::RevokeAllThreadLocalBuffers() {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08003886 if (rosalloc_space_ != nullptr) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003887 size_t freed_bytes_revoke = rosalloc_space_->RevokeAllThreadLocalBuffers();
3888 if (freed_bytes_revoke > 0U) {
3889 num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3890 CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3891 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08003892 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -08003893 if (bump_pointer_space_ != nullptr) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003894 CHECK_EQ(bump_pointer_space_->RevokeAllThreadLocalBuffers(), 0U);
Mathieu Chartier692fafd2013-11-29 17:24:40 -08003895 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08003896 if (region_space_ != nullptr) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003897 CHECK_EQ(region_space_->RevokeAllThreadLocalBuffers(), 0U);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08003898 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07003899}
3900
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003901bool Heap::IsGCRequestPending() const {
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003902 return concurrent_gc_pending_.LoadRelaxed();
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003903}
3904
Mathieu Chartierb5de3bb2015-06-05 13:21:05 -07003905void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) {
3906 env->CallStaticVoidMethod(WellKnownClasses::dalvik_system_VMRuntime,
3907 WellKnownClasses::dalvik_system_VMRuntime_runFinalization,
3908 static_cast<jlong>(timeout));
Mathieu Chartier590fee92013-09-13 13:46:47 -07003909}
3910
Mathieu Chartier8ec31f92014-09-03 10:30:11 -07003911void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07003912 Thread* self = ThreadForEnv(env);
Mathieu Chartier5d2a3f72016-05-11 11:35:39 -07003913 {
3914 MutexLock mu(self, native_histogram_lock_);
3915 native_allocation_histogram_.AddValue(bytes);
3916 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07003917 if (native_need_to_run_finalization_) {
Mathieu Chartierb5de3bb2015-06-05 13:21:05 -07003918 RunFinalization(env, kNativeAllocationFinalizeTimeout);
Mathieu Chartier590fee92013-09-13 13:46:47 -07003919 UpdateMaxNativeFootprint();
3920 native_need_to_run_finalization_ = false;
3921 }
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003922 // Total number of native bytes allocated.
Ian Rogers3e5cf302014-05-20 16:40:37 -07003923 size_t new_native_bytes_allocated = native_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes);
3924 new_native_bytes_allocated += bytes;
3925 if (new_native_bytes_allocated > native_footprint_gc_watermark_) {
Mathieu Chartiere4cab172014-08-19 18:24:04 -07003926 collector::GcType gc_type = HasZygoteSpace() ? collector::kGcTypePartial :
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08003927 collector::kGcTypeFull;
3928
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003929 // The second watermark is higher than the gc watermark. If you hit this it means you are
3930 // allocating native objects faster than the GC can keep up with.
Mathieu Chartier08487452014-09-02 16:21:01 -07003931 if (new_native_bytes_allocated > growth_limit_) {
Mathieu Chartier89a201e2014-05-02 10:27:26 -07003932 if (WaitForGcToComplete(kGcCauseForNativeAlloc, self) != collector::kGcTypeNone) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07003933 // Just finished a GC, attempt to run finalizers.
Mathieu Chartierb5de3bb2015-06-05 13:21:05 -07003934 RunFinalization(env, kNativeAllocationFinalizeTimeout);
Mathieu Chartier590fee92013-09-13 13:46:47 -07003935 CHECK(!env->ExceptionCheck());
Lin Zang60e27162015-03-10 18:53:21 +08003936 // Native bytes allocated may be updated by finalization, refresh it.
3937 new_native_bytes_allocated = native_bytes_allocated_.LoadRelaxed();
Mathieu Chartier590fee92013-09-13 13:46:47 -07003938 }
3939 // If we still are over the watermark, attempt a GC for alloc and run finalizers.
Mathieu Chartier08487452014-09-02 16:21:01 -07003940 if (new_native_bytes_allocated > growth_limit_) {
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08003941 CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
Mathieu Chartierb5de3bb2015-06-05 13:21:05 -07003942 RunFinalization(env, kNativeAllocationFinalizeTimeout);
Mathieu Chartier590fee92013-09-13 13:46:47 -07003943 native_need_to_run_finalization_ = false;
3944 CHECK(!env->ExceptionCheck());
3945 }
3946 // We have just run finalizers, update the native watermark since it is very likely that
3947 // finalizers released native managed allocations.
3948 UpdateMaxNativeFootprint();
3949 } else if (!IsGCRequestPending()) {
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07003950 if (IsGcConcurrent()) {
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003951 RequestConcurrentGC(self, true); // Request non-sticky type.
Mathieu Chartier590fee92013-09-13 13:46:47 -07003952 } else {
Hiroshi Yamauchid20aba12014-04-11 15:31:09 -07003953 CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003954 }
3955 }
3956 }
3957}
3958
Mathieu Chartier8ec31f92014-09-03 10:30:11 -07003959void Heap::RegisterNativeFree(JNIEnv* env, size_t bytes) {
3960 size_t expected_size;
Mathieu Chartier5d2a3f72016-05-11 11:35:39 -07003961 {
3962 MutexLock mu(Thread::Current(), native_histogram_lock_);
3963 native_free_histogram_.AddValue(bytes);
3964 }
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003965 do {
Ian Rogers3e5cf302014-05-20 16:40:37 -07003966 expected_size = native_bytes_allocated_.LoadRelaxed();
Mathieu Chartier8ec31f92014-09-03 10:30:11 -07003967 if (UNLIKELY(bytes > expected_size)) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07003968 ScopedObjectAccess soa(env);
3969 env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
Mathieu Chartier8ec31f92014-09-03 10:30:11 -07003970 StringPrintf("Attempted to free %zd native bytes with only %zd native bytes "
Mathieu Chartier590fee92013-09-13 13:46:47 -07003971 "registered as allocated", bytes, expected_size).c_str());
3972 break;
3973 }
Mathieu Chartier8ec31f92014-09-03 10:30:11 -07003974 } while (!native_bytes_allocated_.CompareExchangeWeakRelaxed(expected_size,
3975 expected_size - bytes));
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003976}
3977
Ian Rogersef7d42f2014-01-06 12:55:46 -08003978size_t Heap::GetTotalMemory() const {
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07003979 return std::max(max_allowed_footprint_, GetBytesAllocated());
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -07003980}
3981
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003982void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
3983 DCHECK(mod_union_table != nullptr);
3984 mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
3985}
3986
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003987void Heap::CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count) {
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003988 CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
Mathieu Chartier52a7f5c2015-08-18 18:35:52 -07003989 (c->IsVariableSize() || c->GetObjectSize() == byte_count)) << c->GetClassFlags();
Mathieu Chartierc645f1d2014-03-06 18:11:53 -08003990 CHECK_GE(byte_count, sizeof(mirror::Object));
3991}
3992
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003993void Heap::AddRememberedSet(accounting::RememberedSet* remembered_set) {
3994 CHECK(remembered_set != nullptr);
3995 space::Space* space = remembered_set->GetSpace();
3996 CHECK(space != nullptr);
Mathieu Chartier8e4a96d2014-05-21 10:44:32 -07003997 CHECK(remembered_sets_.find(space) == remembered_sets_.end()) << space;
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003998 remembered_sets_.Put(space, remembered_set);
Mathieu Chartier8e4a96d2014-05-21 10:44:32 -07003999 CHECK(remembered_sets_.find(space) != remembered_sets_.end()) << space;
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08004000}
4001
4002void Heap::RemoveRememberedSet(space::Space* space) {
4003 CHECK(space != nullptr);
4004 auto it = remembered_sets_.find(space);
4005 CHECK(it != remembered_sets_.end());
Mathieu Chartier5189e242014-07-24 11:11:05 -07004006 delete it->second;
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08004007 remembered_sets_.erase(it);
4008 CHECK(remembered_sets_.find(space) == remembered_sets_.end());
4009}
4010
Mathieu Chartier4aeec172014-03-27 16:09:46 -07004011void Heap::ClearMarkedObjects() {
4012 // Clear all of the spaces' mark bitmaps.
4013 for (const auto& space : GetContinuousSpaces()) {
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07004014 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartier4aeec172014-03-27 16:09:46 -07004015 if (space->GetLiveBitmap() != mark_bitmap) {
4016 mark_bitmap->Clear();
4017 }
4018 }
4019 // Clear the marked objects in the discontinous space object sets.
4020 for (const auto& space : GetDiscontinuousSpaces()) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07004021 space->GetMarkBitmap()->Clear();
Mathieu Chartier4aeec172014-03-27 16:09:46 -07004022 }
4023}
4024
Man Cao8c2ff642015-05-27 17:25:30 -07004025void Heap::SetAllocationRecords(AllocRecordObjectMap* records) {
4026 allocation_records_.reset(records);
4027}
4028
Man Cao1ed11b92015-06-11 22:47:35 -07004029void Heap::VisitAllocationRecords(RootVisitor* visitor) const {
4030 if (IsAllocTrackingEnabled()) {
4031 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4032 if (IsAllocTrackingEnabled()) {
4033 GetAllocationRecords()->VisitRoots(visitor);
4034 }
4035 }
4036}
4037
Mathieu Chartier97509952015-07-13 14:35:43 -07004038void Heap::SweepAllocationRecords(IsMarkedVisitor* visitor) const {
Man Cao8c2ff642015-05-27 17:25:30 -07004039 if (IsAllocTrackingEnabled()) {
4040 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4041 if (IsAllocTrackingEnabled()) {
Mathieu Chartier97509952015-07-13 14:35:43 -07004042 GetAllocationRecords()->SweepAllocationRecords(visitor);
Man Cao8c2ff642015-05-27 17:25:30 -07004043 }
4044 }
4045}
4046
Man Cao42c3c332015-06-23 16:38:25 -07004047void Heap::AllowNewAllocationRecords() const {
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -07004048 CHECK(!kUseReadBarrier);
Hiroshi Yamauchi6f0c6cd2016-03-18 17:17:52 -07004049 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4050 AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4051 if (allocation_records != nullptr) {
4052 allocation_records->AllowNewAllocationRecords();
Man Cao42c3c332015-06-23 16:38:25 -07004053 }
4054}
4055
4056void Heap::DisallowNewAllocationRecords() const {
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -07004057 CHECK(!kUseReadBarrier);
Hiroshi Yamauchi6f0c6cd2016-03-18 17:17:52 -07004058 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4059 AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4060 if (allocation_records != nullptr) {
4061 allocation_records->DisallowNewAllocationRecords();
Man Cao42c3c332015-06-23 16:38:25 -07004062 }
4063}
4064
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -07004065void Heap::BroadcastForNewAllocationRecords() const {
Hiroshi Yamauchi6f0c6cd2016-03-18 17:17:52 -07004066 // Always broadcast without checking IsAllocTrackingEnabled() because IsAllocTrackingEnabled() may
4067 // be set to false while some threads are waiting for system weak access in
4068 // AllocRecordObjectMap::RecordAllocation() and we may fail to wake them up. b/27467554.
4069 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4070 AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4071 if (allocation_records != nullptr) {
4072 allocation_records->BroadcastForNewAllocationRecords();
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -07004073 }
4074}
4075
Mathieu Chartier31000802015-06-14 14:14:37 -07004076// Based on debug malloc logic from libc/bionic/debug_stacktrace.cpp.
4077class StackCrawlState {
4078 public:
4079 StackCrawlState(uintptr_t* frames, size_t max_depth, size_t skip_count)
4080 : frames_(frames), frame_count_(0), max_depth_(max_depth), skip_count_(skip_count) {
4081 }
4082 size_t GetFrameCount() const {
4083 return frame_count_;
4084 }
4085 static _Unwind_Reason_Code Callback(_Unwind_Context* context, void* arg) {
4086 auto* const state = reinterpret_cast<StackCrawlState*>(arg);
4087 const uintptr_t ip = _Unwind_GetIP(context);
4088 // The first stack frame is get_backtrace itself. Skip it.
4089 if (ip != 0 && state->skip_count_ > 0) {
4090 --state->skip_count_;
4091 return _URC_NO_REASON;
4092 }
4093 // ip may be off for ARM but it shouldn't matter since we only use it for hashing.
4094 state->frames_[state->frame_count_] = ip;
4095 state->frame_count_++;
4096 return state->frame_count_ >= state->max_depth_ ? _URC_END_OF_STACK : _URC_NO_REASON;
4097 }
4098
4099 private:
4100 uintptr_t* const frames_;
4101 size_t frame_count_;
4102 const size_t max_depth_;
4103 size_t skip_count_;
4104};
4105
4106static size_t get_backtrace(uintptr_t* frames, size_t max_depth) {
4107 StackCrawlState state(frames, max_depth, 0u);
4108 _Unwind_Backtrace(&StackCrawlState::Callback, &state);
4109 return state.GetFrameCount();
4110}
4111
Mathieu Chartier9d156d52016-10-06 17:44:26 -07004112void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) {
Mathieu Chartier31000802015-06-14 14:14:37 -07004113 auto* const runtime = Runtime::Current();
4114 if (gc_stress_mode_ && runtime->GetClassLinker()->IsInitialized() &&
4115 !runtime->IsActiveTransaction() && mirror::Class::HasJavaLangClass()) {
4116 // Check if we should GC.
4117 bool new_backtrace = false;
4118 {
4119 static constexpr size_t kMaxFrames = 16u;
4120 uintptr_t backtrace[kMaxFrames];
4121 const size_t frames = get_backtrace(backtrace, kMaxFrames);
4122 uint64_t hash = 0;
4123 for (size_t i = 0; i < frames; ++i) {
4124 hash = hash * 2654435761 + backtrace[i];
4125 hash += (hash >> 13) ^ (hash << 6);
4126 }
4127 MutexLock mu(self, *backtrace_lock_);
4128 new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
4129 if (new_backtrace) {
4130 seen_backtraces_.insert(hash);
4131 }
4132 }
4133 if (new_backtrace) {
4134 StackHandleScope<1> hs(self);
4135 auto h = hs.NewHandleWrapper(obj);
4136 CollectGarbage(false);
4137 unique_backtrace_count_.FetchAndAddSequentiallyConsistent(1);
4138 } else {
4139 seen_backtrace_count_.FetchAndAddSequentiallyConsistent(1);
4140 }
4141 }
4142}
4143
Mathieu Chartier51168372015-08-12 16:40:32 -07004144void Heap::DisableGCForShutdown() {
4145 Thread* const self = Thread::Current();
4146 CHECK(Runtime::Current()->IsShuttingDown(self));
4147 MutexLock mu(self, *gc_complete_lock_);
4148 gc_disabled_for_shutdown_ = true;
4149}
4150
Mathieu Chartier9d156d52016-10-06 17:44:26 -07004151bool Heap::ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const {
Mathieu Chartierfbc31082016-01-24 11:59:56 -08004152 for (gc::space::ImageSpace* space : boot_image_spaces_) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -07004153 if (space->HasAddress(obj.Ptr())) {
Mathieu Chartierfbc31082016-01-24 11:59:56 -08004154 return true;
4155 }
4156 }
4157 return false;
4158}
4159
Mingyao Yang6ea1a0e2016-01-29 12:12:49 -08004160bool Heap::IsInBootImageOatFile(const void* p) const {
4161 for (gc::space::ImageSpace* space : boot_image_spaces_) {
4162 if (space->GetOatFile()->Contains(p)) {
4163 return true;
4164 }
4165 }
4166 return false;
4167}
4168
Mathieu Chartierfbc31082016-01-24 11:59:56 -08004169void Heap::GetBootImagesSize(uint32_t* boot_image_begin,
4170 uint32_t* boot_image_end,
4171 uint32_t* boot_oat_begin,
4172 uint32_t* boot_oat_end) {
4173 DCHECK(boot_image_begin != nullptr);
4174 DCHECK(boot_image_end != nullptr);
4175 DCHECK(boot_oat_begin != nullptr);
4176 DCHECK(boot_oat_end != nullptr);
4177 *boot_image_begin = 0u;
4178 *boot_image_end = 0u;
4179 *boot_oat_begin = 0u;
4180 *boot_oat_end = 0u;
4181 for (gc::space::ImageSpace* space_ : GetBootImageSpaces()) {
4182 const uint32_t image_begin = PointerToLowMemUInt32(space_->Begin());
4183 const uint32_t image_size = space_->GetImageHeader().GetImageSize();
4184 if (*boot_image_begin == 0 || image_begin < *boot_image_begin) {
4185 *boot_image_begin = image_begin;
4186 }
4187 *boot_image_end = std::max(*boot_image_end, image_begin + image_size);
4188 const OatFile* boot_oat_file = space_->GetOatFile();
4189 const uint32_t oat_begin = PointerToLowMemUInt32(boot_oat_file->Begin());
4190 const uint32_t oat_size = boot_oat_file->Size();
4191 if (*boot_oat_begin == 0 || oat_begin < *boot_oat_begin) {
4192 *boot_oat_begin = oat_begin;
4193 }
4194 *boot_oat_end = std::max(*boot_oat_end, oat_begin + oat_size);
4195 }
4196}
4197
Andreas Gampe27fa96c2016-10-07 15:05:24 -07004198void Heap::SetAllocationListener(AllocationListener* l) {
4199 AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, l);
4200
4201 if (old == nullptr) {
4202 Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
4203 }
4204}
4205
4206void Heap::RemoveAllocationListener() {
4207 AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, nullptr);
4208
4209 if (old != nullptr) {
Andreas Gampe172ec8e2016-10-12 13:50:20 -07004210 Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
Andreas Gampe27fa96c2016-10-07 15:05:24 -07004211 }
4212}
4213
Andreas Gampe9b8c5882016-10-21 15:27:46 -07004214void Heap::SetGcPauseListener(GcPauseListener* l) {
4215 gc_pause_listener_.StoreRelaxed(l);
4216}
4217
4218void Heap::RemoveGcPauseListener() {
4219 gc_pause_listener_.StoreRelaxed(nullptr);
4220}
Andreas Gampe27fa96c2016-10-07 15:05:24 -07004221
Mathieu Chartier5ace2012016-11-30 10:15:41 -08004222mirror::Object* Heap::AllocWithNewTLAB(Thread* self,
4223 size_t alloc_size,
4224 bool grow,
4225 size_t* bytes_allocated,
4226 size_t* usable_size,
4227 size_t* bytes_tl_bulk_allocated) {
4228 const AllocatorType allocator_type = GetCurrentAllocator();
4229 if (allocator_type == kAllocatorTypeTLAB) {
4230 DCHECK(bump_pointer_space_ != nullptr);
4231 const size_t new_tlab_size = alloc_size + kDefaultTLABSize;
4232 if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, new_tlab_size, grow))) {
4233 return nullptr;
4234 }
4235 // Try allocating a new thread local buffer, if the allocation fails the space must be
4236 // full so return null.
4237 if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) {
4238 return nullptr;
4239 }
4240 *bytes_tl_bulk_allocated = new_tlab_size;
4241 } else {
4242 DCHECK(allocator_type == kAllocatorTypeRegionTLAB);
4243 DCHECK(region_space_ != nullptr);
4244 if (space::RegionSpace::kRegionSize >= alloc_size) {
4245 // Non-large. Check OOME for a tlab.
4246 if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type,
4247 space::RegionSpace::kRegionSize,
4248 grow))) {
4249 // Try to allocate a tlab.
4250 if (!region_space_->AllocNewTlab(self)) {
4251 // Failed to allocate a tlab. Try non-tlab.
4252 return region_space_->AllocNonvirtual<false>(alloc_size,
4253 bytes_allocated,
4254 usable_size,
4255 bytes_tl_bulk_allocated);
4256 }
4257 *bytes_tl_bulk_allocated = space::RegionSpace::kRegionSize;
4258 // Fall-through to using the TLAB below.
4259 } else {
4260 // Check OOME for a non-tlab allocation.
4261 if (!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow)) {
4262 return region_space_->AllocNonvirtual<false>(alloc_size,
4263 bytes_allocated,
4264 usable_size,
4265 bytes_tl_bulk_allocated);
4266 }
4267 // Neither tlab or non-tlab works. Give up.
4268 return nullptr;
4269 }
4270 } else {
4271 // Large. Check OOME.
4272 if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow))) {
4273 return region_space_->AllocNonvirtual<false>(alloc_size,
4274 bytes_allocated,
4275 usable_size,
4276 bytes_tl_bulk_allocated);
4277 }
4278 return nullptr;
4279 }
4280 }
4281 // Refilled TLAB, return.
4282 mirror::Object* ret = self->AllocTlab(alloc_size);
4283 DCHECK(ret != nullptr);
4284 *bytes_allocated = alloc_size;
4285 *usable_size = alloc_size;
4286 return ret;
4287}
4288
Ian Rogers1d54e732013-05-02 21:10:01 -07004289} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07004290} // namespace art