Elliott Hughes | 2faa5f1 | 2012-01-30 14:42:07 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 16 | |
Brian Carlstrom | 578bbdc | 2011-07-21 14:07:47 -0700 | [diff] [blame] | 17 | #include "heap.h" |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 18 | |
Brian Carlstrom | 58ae941 | 2011-10-04 00:56:06 -0700 | [diff] [blame] | 19 | #include <limits> |
Alex Light | 986914b | 2019-11-19 01:12:25 +0000 | [diff] [blame] | 20 | #include "android-base/thread_annotations.h" |
Hans Boehm | b587072 | 2018-12-13 16:25:05 -0800 | [diff] [blame] | 21 | #if defined(__BIONIC__) || defined(__GLIBC__) |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 22 | #include <malloc.h> // For mallinfo() |
Hans Boehm | b587072 | 2018-12-13 16:25:05 -0800 | [diff] [blame] | 23 | #endif |
Ian Rogers | 700a402 | 2014-05-19 16:49:03 -0700 | [diff] [blame] | 24 | #include <memory> |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 25 | #include <vector> |
| 26 | |
Andreas Gampe | 46ee31b | 2016-12-14 10:11:49 -0800 | [diff] [blame] | 27 | #include "android-base/stringprintf.h" |
| 28 | |
Andreas Gampe | 27fa96c | 2016-10-07 15:05:24 -0700 | [diff] [blame] | 29 | #include "allocation_listener.h" |
Mathieu Chartier | c785344 | 2015-03-27 14:35:38 -0700 | [diff] [blame] | 30 | #include "art_field-inl.h" |
Mathieu Chartier | 3458359 | 2017-03-23 23:51:34 -0700 | [diff] [blame] | 31 | #include "backtrace_helper.h" |
Mathieu Chartier | bad0267 | 2014-08-25 13:08:22 -0700 | [diff] [blame] | 32 | #include "base/allocator.h" |
Mathieu Chartier | 8d44725 | 2015-10-26 10:21:14 -0700 | [diff] [blame] | 33 | #include "base/arena_allocator.h" |
Ian Rogers | c7dd295 | 2014-10-21 23:31:19 -0700 | [diff] [blame] | 34 | #include "base/dumpable.h" |
David Sehr | 891a50e | 2017-10-27 17:01:07 -0700 | [diff] [blame] | 35 | #include "base/file_utils.h" |
Mathieu Chartier | b2f9936 | 2013-11-20 17:26:00 -0800 | [diff] [blame] | 36 | #include "base/histogram-inl.h" |
Andreas Gampe | 170331f | 2017-12-07 18:41:03 -0800 | [diff] [blame] | 37 | #include "base/logging.h" // For VLOG. |
Hiroshi Yamauchi | 55113ed | 2017-02-10 15:12:46 -0800 | [diff] [blame] | 38 | #include "base/memory_tool.h" |
Alex Light | 6683446 | 2019-04-08 16:28:29 +0000 | [diff] [blame] | 39 | #include "base/mutex.h" |
David Sehr | c431b9d | 2018-03-02 12:01:51 -0800 | [diff] [blame] | 40 | #include "base/os.h" |
Elliott Hughes | 1aa246d | 2012-12-13 09:29:36 -0800 | [diff] [blame] | 41 | #include "base/stl_util.h" |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 42 | #include "base/systrace.h" |
Vladimir Marko | 80afd02 | 2015-05-19 18:08:00 +0100 | [diff] [blame] | 43 | #include "base/time_utils.h" |
Andreas Gampe | 97b2811 | 2018-12-04 09:09:12 -0800 | [diff] [blame] | 44 | #include "base/utils.h" |
Alex Light | c18eba3 | 2019-09-24 14:36:27 -0700 | [diff] [blame] | 45 | #include "class_root.h" |
Mathieu Chartier | 987ccff | 2013-07-08 11:05:21 -0700 | [diff] [blame] | 46 | #include "common_throws.h" |
Elliott Hughes | 767a147 | 2011-10-26 18:49:02 -0700 | [diff] [blame] | 47 | #include "debugger.h" |
David Sehr | 9e734c7 | 2018-01-04 17:56:19 -0800 | [diff] [blame] | 48 | #include "dex/dex_file-inl.h" |
Steven Moreland | e431e27 | 2017-07-18 16:53:49 -0700 | [diff] [blame] | 49 | #include "entrypoints/quick/quick_alloc_entrypoints.h" |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 50 | #include "gc/accounting/card_table-inl.h" |
| 51 | #include "gc/accounting/heap_bitmap-inl.h" |
| 52 | #include "gc/accounting/mod_union_table-inl.h" |
Andreas Gampe | d490129 | 2017-05-30 18:41:34 -0700 | [diff] [blame] | 53 | #include "gc/accounting/read_barrier_table.h" |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 54 | #include "gc/accounting/remembered_set.h" |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 55 | #include "gc/accounting/space_bitmap-inl.h" |
Hiroshi Yamauchi | d5307ec | 2014-03-27 21:07:51 -0700 | [diff] [blame] | 56 | #include "gc/collector/concurrent_copying.h" |
Mathieu Chartier | 3cf2253 | 2015-07-09 15:15:09 -0700 | [diff] [blame] | 57 | #include "gc/collector/mark_sweep.h" |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 58 | #include "gc/collector/partial_mark_sweep.h" |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 59 | #include "gc/collector/semi_space.h" |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 60 | #include "gc/collector/sticky_mark_sweep.h" |
Hans Boehm | fb8b4e2 | 2018-09-05 16:45:42 -0700 | [diff] [blame] | 61 | #include "gc/racing_check.h" |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 62 | #include "gc/reference_processor.h" |
Hiroshi Yamauchi | 3b1d1b7 | 2016-10-12 11:53:57 -0700 | [diff] [blame] | 63 | #include "gc/scoped_gc_critical_section.h" |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 64 | #include "gc/space/bump_pointer_space.h" |
Hiroshi Yamauchi | 50b2928 | 2013-07-30 13:58:37 -0700 | [diff] [blame] | 65 | #include "gc/space/dlmalloc_space-inl.h" |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 66 | #include "gc/space/image_space.h" |
| 67 | #include "gc/space/large_object_space.h" |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 68 | #include "gc/space/region_space.h" |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 69 | #include "gc/space/rosalloc_space-inl.h" |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 70 | #include "gc/space/space-inl.h" |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 71 | #include "gc/space/zygote_space.h" |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 72 | #include "gc/task_processor.h" |
Mathieu Chartier | 1ca6890 | 2017-04-18 11:26:22 -0700 | [diff] [blame] | 73 | #include "gc/verification.h" |
Andreas Gampe | 9b8c588 | 2016-10-21 15:27:46 -0700 | [diff] [blame] | 74 | #include "gc_pause_listener.h" |
Andreas Gampe | d490129 | 2017-05-30 18:41:34 -0700 | [diff] [blame] | 75 | #include "gc_root.h" |
Steven Moreland | e431e27 | 2017-07-18 16:53:49 -0700 | [diff] [blame] | 76 | #include "handle_scope-inl.h" |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 77 | #include "heap-inl.h" |
Andreas Gampe | 351c447 | 2017-07-12 19:32:55 -0700 | [diff] [blame] | 78 | #include "heap-visit-objects-inl.h" |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 79 | #include "image.h" |
Mathieu Chartier | eb175f7 | 2014-10-31 11:49:27 -0700 | [diff] [blame] | 80 | #include "intern_table.h" |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 81 | #include "jit/jit.h" |
| 82 | #include "jit/jit_code_cache.h" |
Vladimir Marko | a3ad0cd | 2018-05-04 10:06:38 +0100 | [diff] [blame] | 83 | #include "jni/java_vm_ext.h" |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 84 | #include "mirror/class-inl.h" |
Alex Light | c18eba3 | 2019-09-24 14:36:27 -0700 | [diff] [blame] | 85 | #include "mirror/executable-inl.h" |
| 86 | #include "mirror/field.h" |
| 87 | #include "mirror/method_handle_impl.h" |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 88 | #include "mirror/object-inl.h" |
Andreas Gampe | c6ea7d0 | 2017-02-01 16:46:28 -0800 | [diff] [blame] | 89 | #include "mirror/object-refvisitor-inl.h" |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 90 | #include "mirror/object_array-inl.h" |
Mathieu Chartier | 8fa2dad | 2014-03-13 12:22:56 -0700 | [diff] [blame] | 91 | #include "mirror/reference-inl.h" |
Alex Light | c18eba3 | 2019-09-24 14:36:27 -0700 | [diff] [blame] | 92 | #include "mirror/var_handle.h" |
Andreas Gampe | 373a9b5 | 2017-10-18 09:01:57 -0700 | [diff] [blame] | 93 | #include "nativehelper/scoped_local_ref.h" |
Steven Moreland | e431e27 | 2017-07-18 16:53:49 -0700 | [diff] [blame] | 94 | #include "obj_ptr-inl.h" |
Ian Rogers | 53b8b09 | 2014-03-13 23:45:53 -0700 | [diff] [blame] | 95 | #include "reflection.h" |
Mathieu Chartier | 0de9f73 | 2013-11-22 17:58:48 -0800 | [diff] [blame] | 96 | #include "runtime.h" |
Mathieu Chartier | 0795f23 | 2016-09-27 18:43:30 -0700 | [diff] [blame] | 97 | #include "scoped_thread_state_change-inl.h" |
Elliott Hughes | 8d768a9 | 2011-09-14 16:35:25 -0700 | [diff] [blame] | 98 | #include "thread_list.h" |
Andreas Gampe | 90b936d | 2017-01-31 08:58:55 -0800 | [diff] [blame] | 99 | #include "verify_object-inl.h" |
Elliott Hughes | eac7667 | 2012-05-24 21:56:51 -0700 | [diff] [blame] | 100 | #include "well_known_classes.h" |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 101 | |
| 102 | namespace art { |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 103 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 104 | namespace gc { |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 105 | |
Andreas Gampe | ed56b5e | 2017-10-19 12:58:19 -0700 | [diff] [blame] | 106 | DEFINE_RUNTIME_DEBUG_FLAG(Heap, kStressCollectorTransition); |
| 107 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 108 | // Minimum amount of remaining bytes before a concurrent GC is triggered. |
Mathieu Chartier | 720ef76 | 2013-08-17 14:46:54 -0700 | [diff] [blame] | 109 | static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB; |
Mathieu Chartier | 7476280 | 2014-01-24 10:21:35 -0800 | [diff] [blame] | 110 | static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB; |
Mathieu Chartier | df86d1f | 2014-04-08 13:44:04 -0700 | [diff] [blame] | 111 | // Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 112 | // relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator |
Mathieu Chartier | df86d1f | 2014-04-08 13:44:04 -0700 | [diff] [blame] | 113 | // threads (lower pauses, use less memory bandwidth). |
Albert Mingkun Yang | 0b4d146 | 2018-11-29 13:25:35 +0000 | [diff] [blame] | 114 | static double GetStickyGcThroughputAdjustment(bool use_generational_cc) { |
| 115 | return use_generational_cc ? 0.5 : 1.0; |
| 116 | } |
Mathieu Chartier | c179016 | 2014-05-23 10:54:50 -0700 | [diff] [blame] | 117 | // Whether or not we compact the zygote in PreZygoteFork. |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 118 | static constexpr bool kCompactZygote = kMovingCollector; |
Mathieu Chartier | c179016 | 2014-05-23 10:54:50 -0700 | [diff] [blame] | 119 | // How many reserve entries are at the end of the allocation stack, these are only needed if the |
| 120 | // allocation stack overflows. |
| 121 | static constexpr size_t kAllocationStackReserveSize = 1024; |
| 122 | // Default mark stack size in bytes. |
| 123 | static const size_t kDefaultMarkStackSize = 64 * KB; |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 124 | // Define space name. |
| 125 | static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"}; |
| 126 | static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"}; |
| 127 | static const char* kMemMapSpaceName[2] = {"main space", "main space 1"}; |
Mathieu Chartier | 7247af5 | 2014-11-19 10:51:42 -0800 | [diff] [blame] | 128 | static const char* kNonMovingSpaceName = "non moving space"; |
| 129 | static const char* kZygoteSpaceName = "zygote space"; |
Mathieu Chartier | 95a505c | 2014-12-10 18:45:30 -0800 | [diff] [blame] | 130 | static constexpr bool kGCALotMode = false; |
| 131 | // GC alot mode uses a small allocation stack to stress test a lot of GC. |
| 132 | static constexpr size_t kGcAlotAllocationStackSize = 4 * KB / |
| 133 | sizeof(mirror::HeapReference<mirror::Object>); |
| 134 | // Verify objet has a small allocation stack size since searching the allocation stack is slow. |
| 135 | static constexpr size_t kVerifyObjectAllocationStackSize = 16 * KB / |
| 136 | sizeof(mirror::HeapReference<mirror::Object>); |
| 137 | static constexpr size_t kDefaultAllocationStackSize = 8 * MB / |
| 138 | sizeof(mirror::HeapReference<mirror::Object>); |
Mathieu Chartier | 0051be6 | 2012-10-12 17:47:11 -0700 | [diff] [blame] | 139 | |
Andreas Gampe | ace0dc1 | 2016-01-20 13:33:13 -0800 | [diff] [blame] | 140 | // For deterministic compilation, we need the heap to be at a well-known address. |
| 141 | static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000; |
Hiroshi Yamauchi | b62f2e6 | 2016-03-23 15:51:24 -0700 | [diff] [blame] | 142 | // Dump the rosalloc stats on SIGQUIT. |
| 143 | static constexpr bool kDumpRosAllocStatsOnSigQuit = false; |
Andreas Gampe | ace0dc1 | 2016-01-20 13:33:13 -0800 | [diff] [blame] | 144 | |
Hiroshi Yamauchi | 3c3c4a1 | 2017-02-21 16:49:59 -0800 | [diff] [blame] | 145 | static const char* kRegionSpaceName = "main space (region space)"; |
| 146 | |
Mathieu Chartier | 6bc7774 | 2017-04-18 17:46:23 -0700 | [diff] [blame] | 147 | // If true, we log all GCs in the both the foreground and background. Used for debugging. |
| 148 | static constexpr bool kLogAllGCs = false; |
| 149 | |
Mathieu Chartier | a98a282 | 2017-05-24 16:14:10 -0700 | [diff] [blame] | 150 | // Use Max heap for 2 seconds, this is smaller than the usual 5s window since we don't want to leave |
| 151 | // allocate with relaxed ergonomics for that long. |
| 152 | static constexpr size_t kPostForkMaxHeapDurationMS = 2000; |
| 153 | |
Hiroshi Yamauchi | 3c3c4a1 | 2017-02-21 16:49:59 -0800 | [diff] [blame] | 154 | #if defined(__LP64__) || !defined(ADDRESS_SANITIZER) |
| 155 | // 300 MB (0x12c00000) - (default non-moving space capacity). |
Mathieu Chartier | fa4ea82 | 2018-03-02 13:48:54 -0800 | [diff] [blame] | 156 | uint8_t* const Heap::kPreferredAllocSpaceBegin = |
| 157 | reinterpret_cast<uint8_t*>(300 * MB - kDefaultNonMovingSpaceCapacity); |
Hiroshi Yamauchi | 3c3c4a1 | 2017-02-21 16:49:59 -0800 | [diff] [blame] | 158 | #else |
Andreas Gampe | e8857fe | 2017-05-03 08:28:13 -0700 | [diff] [blame] | 159 | #ifdef __ANDROID__ |
| 160 | // For 32-bit Android, use 0x20000000 because asan reserves 0x04000000 - 0x20000000. |
Mathieu Chartier | fa4ea82 | 2018-03-02 13:48:54 -0800 | [diff] [blame] | 161 | uint8_t* const Heap::kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x20000000); |
Andreas Gampe | e8857fe | 2017-05-03 08:28:13 -0700 | [diff] [blame] | 162 | #else |
| 163 | // For 32-bit host, use 0x40000000 because asan uses most of the space below this. |
Mathieu Chartier | fa4ea82 | 2018-03-02 13:48:54 -0800 | [diff] [blame] | 164 | uint8_t* const Heap::kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x40000000); |
Andreas Gampe | e8857fe | 2017-05-03 08:28:13 -0700 | [diff] [blame] | 165 | #endif |
Hiroshi Yamauchi | 3c3c4a1 | 2017-02-21 16:49:59 -0800 | [diff] [blame] | 166 | #endif |
| 167 | |
Mathieu Chartier | f8cb178 | 2016-03-18 18:45:41 -0700 | [diff] [blame] | 168 | static inline bool CareAboutPauseTimes() { |
| 169 | return Runtime::Current()->InJankPerceptibleProcessState(); |
| 170 | } |
| 171 | |
Vladimir Marko | 7cde458 | 2019-07-05 13:26:11 +0100 | [diff] [blame] | 172 | static void VerifyBootImagesContiguity(const std::vector<gc::space::ImageSpace*>& image_spaces) { |
| 173 | uint32_t boot_image_size = 0u; |
| 174 | for (size_t i = 0u, num_spaces = image_spaces.size(); i != num_spaces; ) { |
| 175 | const ImageHeader& image_header = image_spaces[i]->GetImageHeader(); |
| 176 | uint32_t reservation_size = image_header.GetImageReservationSize(); |
Vladimir Marko | d0036ac | 2019-11-21 11:47:12 +0000 | [diff] [blame] | 177 | uint32_t image_count = image_header.GetImageSpaceCount(); |
Vladimir Marko | 7cde458 | 2019-07-05 13:26:11 +0100 | [diff] [blame] | 178 | |
Vladimir Marko | d0036ac | 2019-11-21 11:47:12 +0000 | [diff] [blame] | 179 | CHECK_NE(image_count, 0u); |
| 180 | CHECK_LE(image_count, num_spaces - i); |
Vladimir Marko | 7cde458 | 2019-07-05 13:26:11 +0100 | [diff] [blame] | 181 | CHECK_NE(reservation_size, 0u); |
Vladimir Marko | d0036ac | 2019-11-21 11:47:12 +0000 | [diff] [blame] | 182 | for (size_t j = 1u; j != image_count; ++j) { |
Vladimir Marko | 7cde458 | 2019-07-05 13:26:11 +0100 | [diff] [blame] | 183 | CHECK_EQ(image_spaces[i + j]->GetImageHeader().GetComponentCount(), 0u); |
| 184 | CHECK_EQ(image_spaces[i + j]->GetImageHeader().GetImageReservationSize(), 0u); |
| 185 | } |
| 186 | |
| 187 | // Check the start of the heap. |
| 188 | CHECK_EQ(image_spaces[0]->Begin() + boot_image_size, image_spaces[i]->Begin()); |
| 189 | // Check contiguous layout of images and oat files. |
| 190 | const uint8_t* current_heap = image_spaces[i]->Begin(); |
| 191 | const uint8_t* current_oat = image_spaces[i]->GetImageHeader().GetOatFileBegin(); |
Vladimir Marko | d0036ac | 2019-11-21 11:47:12 +0000 | [diff] [blame] | 192 | for (size_t j = 0u; j != image_count; ++j) { |
Vladimir Marko | 7cde458 | 2019-07-05 13:26:11 +0100 | [diff] [blame] | 193 | const ImageHeader& current_header = image_spaces[i + j]->GetImageHeader(); |
| 194 | CHECK_EQ(current_heap, image_spaces[i + j]->Begin()); |
| 195 | CHECK_EQ(current_oat, current_header.GetOatFileBegin()); |
| 196 | current_heap += RoundUp(current_header.GetImageSize(), kPageSize); |
| 197 | CHECK_GT(current_header.GetOatFileEnd(), current_header.GetOatFileBegin()); |
| 198 | current_oat = current_header.GetOatFileEnd(); |
| 199 | } |
| 200 | // Check that oat files start at the end of images. |
| 201 | CHECK_EQ(current_heap, image_spaces[i]->GetImageHeader().GetOatFileBegin()); |
| 202 | // Check that the reservation size equals the size of images and oat files. |
| 203 | CHECK_EQ(reservation_size, static_cast<size_t>(current_oat - image_spaces[i]->Begin())); |
| 204 | |
| 205 | boot_image_size += reservation_size; |
Vladimir Marko | d0036ac | 2019-11-21 11:47:12 +0000 | [diff] [blame] | 206 | i += image_count; |
Vladimir Marko | 7cde458 | 2019-07-05 13:26:11 +0100 | [diff] [blame] | 207 | } |
| 208 | } |
| 209 | |
Mathieu Chartier | a4f6af9 | 2015-08-11 17:35:25 -0700 | [diff] [blame] | 210 | Heap::Heap(size_t initial_size, |
| 211 | size_t growth_limit, |
| 212 | size_t min_free, |
| 213 | size_t max_free, |
| 214 | double target_utilization, |
| 215 | double foreground_heap_growth_multiplier, |
Hans Boehm | bb2467b | 2019-03-29 22:55:06 -0700 | [diff] [blame] | 216 | size_t stop_for_native_allocs, |
Mathieu Chartier | a4f6af9 | 2015-08-11 17:35:25 -0700 | [diff] [blame] | 217 | size_t capacity, |
| 218 | size_t non_moving_space_capacity, |
Vladimir Marko | d190851 | 2018-11-22 14:57:28 +0000 | [diff] [blame] | 219 | const std::vector<std::string>& boot_class_path, |
| 220 | const std::vector<std::string>& boot_class_path_locations, |
Mathieu Chartier | a4f6af9 | 2015-08-11 17:35:25 -0700 | [diff] [blame] | 221 | const std::string& image_file_name, |
| 222 | const InstructionSet image_instruction_set, |
| 223 | CollectorType foreground_collector_type, |
Mathieu Chartier | 2dbe627 | 2014-09-16 10:43:23 -0700 | [diff] [blame] | 224 | CollectorType background_collector_type, |
Mathieu Chartier | a4f6af9 | 2015-08-11 17:35:25 -0700 | [diff] [blame] | 225 | space::LargeObjectSpaceType large_object_space_type, |
| 226 | size_t large_object_threshold, |
| 227 | size_t parallel_gc_threads, |
| 228 | size_t conc_gc_threads, |
| 229 | bool low_memory_mode, |
| 230 | size_t long_pause_log_threshold, |
| 231 | size_t long_gc_log_threshold, |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 232 | bool ignore_target_footprint, |
Mathieu Chartier | a4f6af9 | 2015-08-11 17:35:25 -0700 | [diff] [blame] | 233 | bool use_tlab, |
| 234 | bool verify_pre_gc_heap, |
| 235 | bool verify_pre_sweeping_heap, |
| 236 | bool verify_post_gc_heap, |
| 237 | bool verify_pre_gc_rosalloc, |
| 238 | bool verify_pre_sweeping_rosalloc, |
| 239 | bool verify_post_gc_rosalloc, |
| 240 | bool gc_stress_mode, |
Mathieu Chartier | 56fe258 | 2016-07-14 13:30:03 -0700 | [diff] [blame] | 241 | bool measure_gc_performance, |
Mathieu Chartier | 3100080 | 2015-06-14 14:14:37 -0700 | [diff] [blame] | 242 | bool use_homogeneous_space_compaction_for_oom, |
Albert Mingkun Yang | 0b4d146 | 2018-11-29 13:25:35 +0000 | [diff] [blame] | 243 | bool use_generational_cc, |
Albert Mingkun Yang | de94ea7 | 2018-11-16 10:15:49 +0000 | [diff] [blame] | 244 | uint64_t min_interval_homogeneous_space_compaction_by_oom, |
| 245 | bool dump_region_info_before_gc, |
Andreas Gampe | 8682354 | 2019-02-25 09:38:49 -0800 | [diff] [blame] | 246 | bool dump_region_info_after_gc, |
| 247 | space::ImageSpaceLoadingOrder image_space_loading_order) |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 248 | : non_moving_space_(nullptr), |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 249 | rosalloc_space_(nullptr), |
| 250 | dlmalloc_space_(nullptr), |
Mathieu Chartier | fc5b528 | 2014-01-09 16:15:36 -0800 | [diff] [blame] | 251 | main_space_(nullptr), |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 252 | collector_type_(kCollectorTypeNone), |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 253 | foreground_collector_type_(foreground_collector_type), |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 254 | background_collector_type_(background_collector_type), |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 255 | desired_collector_type_(foreground_collector_type_), |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 256 | pending_task_lock_(nullptr), |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 257 | parallel_gc_threads_(parallel_gc_threads), |
| 258 | conc_gc_threads_(conc_gc_threads), |
Mathieu Chartier | e0a53e9 | 2013-08-05 10:17:40 -0700 | [diff] [blame] | 259 | low_memory_mode_(low_memory_mode), |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 260 | long_pause_log_threshold_(long_pause_log_threshold), |
| 261 | long_gc_log_threshold_(long_gc_log_threshold), |
Albert Mingkun Yang | 2d7329b | 2018-11-30 19:58:18 +0000 | [diff] [blame] | 262 | process_cpu_start_time_ns_(ProcessCpuNanoTime()), |
Albert Mingkun Yang | 6e0d325 | 2018-12-10 15:22:45 +0000 | [diff] [blame] | 263 | pre_gc_last_process_cpu_time_ns_(process_cpu_start_time_ns_), |
| 264 | post_gc_last_process_cpu_time_ns_(process_cpu_start_time_ns_), |
| 265 | pre_gc_weighted_allocated_bytes_(0.0), |
| 266 | post_gc_weighted_allocated_bytes_(0.0), |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 267 | ignore_target_footprint_(ignore_target_footprint), |
Mathieu Chartier | 8e4a96d | 2014-05-21 10:44:32 -0700 | [diff] [blame] | 268 | zygote_creation_lock_("zygote creation lock", kZygoteCreationLock), |
Mathieu Chartier | e4cab17 | 2014-08-19 18:24:04 -0700 | [diff] [blame] | 269 | zygote_space_(nullptr), |
Mathieu Chartier | 2dbe627 | 2014-09-16 10:43:23 -0700 | [diff] [blame] | 270 | large_object_threshold_(large_object_threshold), |
Hiroshi Yamauchi | 76f55b0 | 2015-08-21 16:10:39 -0700 | [diff] [blame] | 271 | disable_thread_flip_count_(0), |
| 272 | thread_flip_running_(false), |
Mathieu Chartier | d5a89ee | 2014-01-31 09:55:13 -0800 | [diff] [blame] | 273 | collector_type_running_(kCollectorTypeNone), |
Mathieu Chartier | 40112dd | 2017-06-26 17:49:09 -0700 | [diff] [blame] | 274 | last_gc_cause_(kGcCauseNone), |
Mathieu Chartier | 183009a | 2017-02-16 21:19:28 -0800 | [diff] [blame] | 275 | thread_running_gc_(nullptr), |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 276 | last_gc_type_(collector::kGcTypeNone), |
Mathieu Chartier | bdd0fb9 | 2013-07-02 10:16:15 -0700 | [diff] [blame] | 277 | next_gc_type_(collector::kGcTypePartial), |
Mathieu Chartier | 80de7a6 | 2012-11-27 17:21:50 -0800 | [diff] [blame] | 278 | capacity_(capacity), |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 279 | growth_limit_(growth_limit), |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 280 | target_footprint_(initial_size), |
Lokesh Gidra | acd7060 | 2019-12-05 17:46:25 -0800 | [diff] [blame] | 281 | // Using kPostMonitorLock as a lock at kDefaultMutexLevel is acquired after |
| 282 | // this one. |
| 283 | process_state_update_lock_("process state update lock", kPostMonitorLock), |
| 284 | min_foreground_target_footprint_(0), |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 285 | concurrent_start_bytes_(std::numeric_limits<size_t>::max()), |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 286 | total_bytes_freed_ever_(0), |
| 287 | total_objects_freed_ever_(0), |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 288 | num_bytes_allocated_(0), |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 289 | native_bytes_registered_(0), |
Richard Uhler | caaa2b0 | 2017-02-01 09:54:17 +0000 | [diff] [blame] | 290 | old_native_bytes_allocated_(0), |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 291 | native_objects_notified_(0), |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 292 | num_bytes_freed_revoke_(0), |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 293 | verify_missing_card_marks_(false), |
| 294 | verify_system_weaks_(false), |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 295 | verify_pre_gc_heap_(verify_pre_gc_heap), |
Mathieu Chartier | 6f365cc | 2014-04-23 12:42:27 -0700 | [diff] [blame] | 296 | verify_pre_sweeping_heap_(verify_pre_sweeping_heap), |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 297 | verify_post_gc_heap_(verify_post_gc_heap), |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 298 | verify_mod_union_table_(false), |
Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 299 | verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc), |
Mathieu Chartier | 6f365cc | 2014-04-23 12:42:27 -0700 | [diff] [blame] | 300 | verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc), |
Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 301 | verify_post_gc_rosalloc_(verify_post_gc_rosalloc), |
Mathieu Chartier | 3100080 | 2015-06-14 14:14:37 -0700 | [diff] [blame] | 302 | gc_stress_mode_(gc_stress_mode), |
Hans Boehm | d972b42 | 2017-09-11 12:57:00 -0700 | [diff] [blame] | 303 | /* For GC a lot mode, we limit the allocation stacks to be kGcAlotInterval allocations. This |
Mathieu Chartier | 0418ae2 | 2013-07-31 13:35:46 -0700 | [diff] [blame] | 304 | * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap |
| 305 | * verification is enabled, we limit the size of allocation stacks to speed up their |
| 306 | * searching. |
| 307 | */ |
Mathieu Chartier | 95a505c | 2014-12-10 18:45:30 -0800 | [diff] [blame] | 308 | max_allocation_stack_size_(kGCALotMode ? kGcAlotAllocationStackSize |
| 309 | : (kVerifyObjectSupport > kVerifyObjectModeFast) ? kVerifyObjectAllocationStackSize : |
| 310 | kDefaultAllocationStackSize), |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 311 | current_allocator_(kAllocatorTypeDlMalloc), |
| 312 | current_non_moving_allocator_(kAllocatorTypeNonMoving), |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 313 | bump_pointer_space_(nullptr), |
| 314 | temp_space_(nullptr), |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 315 | region_space_(nullptr), |
Mathieu Chartier | 0051be6 | 2012-10-12 17:47:11 -0700 | [diff] [blame] | 316 | min_free_(min_free), |
| 317 | max_free_(max_free), |
| 318 | target_utilization_(target_utilization), |
Mathieu Chartier | 11c273d | 2017-10-15 20:54:45 -0700 | [diff] [blame] | 319 | foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier), |
Hans Boehm | bb2467b | 2019-03-29 22:55:06 -0700 | [diff] [blame] | 320 | stop_for_native_allocs_(stop_for_native_allocs), |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 321 | total_wait_time_(0), |
Mathieu Chartier | 4e30541 | 2014-02-19 10:54:44 -0800 | [diff] [blame] | 322 | verify_object_mode_(kVerifyObjectModeDisabled), |
Mathieu Chartier | 1d27b34 | 2014-01-28 12:51:09 -0800 | [diff] [blame] | 323 | disable_moving_gc_count_(0), |
Vladimir Marko | 8da690f | 2016-08-11 18:25:53 +0100 | [diff] [blame] | 324 | semi_space_collector_(nullptr), |
Mathieu Chartier | 8d1a996 | 2016-08-17 16:39:45 -0700 | [diff] [blame] | 325 | active_concurrent_copying_collector_(nullptr), |
| 326 | young_concurrent_copying_collector_(nullptr), |
Vladimir Marko | 8da690f | 2016-08-11 18:25:53 +0100 | [diff] [blame] | 327 | concurrent_copying_collector_(nullptr), |
Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 328 | is_running_on_memory_tool_(Runtime::Current()->IsRunningOnMemoryTool()), |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 329 | use_tlab_(use_tlab), |
| 330 | main_space_backup_(nullptr), |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 331 | min_interval_homogeneous_space_compaction_by_oom_( |
| 332 | min_interval_homogeneous_space_compaction_by_oom), |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 333 | last_time_homogeneous_space_compaction_by_oom_(NanoTime()), |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 334 | pending_collector_transition_(nullptr), |
| 335 | pending_heap_trim_(nullptr), |
Hiroshi Yamauchi | a1c9f01 | 2015-04-02 10:18:12 -0700 | [diff] [blame] | 336 | use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom), |
Albert Mingkun Yang | 0b4d146 | 2018-11-29 13:25:35 +0000 | [diff] [blame] | 337 | use_generational_cc_(use_generational_cc), |
Hiroshi Yamauchi | a1c9f01 | 2015-04-02 10:18:12 -0700 | [diff] [blame] | 338 | running_collection_is_blocking_(false), |
| 339 | blocking_gc_count_(0U), |
| 340 | blocking_gc_time_(0U), |
| 341 | last_update_time_gc_count_rate_histograms_( // Round down by the window duration. |
| 342 | (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration), |
| 343 | gc_count_last_window_(0U), |
| 344 | blocking_gc_count_last_window_(0U), |
| 345 | gc_count_rate_histogram_("gc count rate histogram", 1U, kGcCountRateMaxBucketCount), |
| 346 | blocking_gc_count_rate_histogram_("blocking gc count rate histogram", 1U, |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 347 | kGcCountRateMaxBucketCount), |
Mathieu Chartier | 3100080 | 2015-06-14 14:14:37 -0700 | [diff] [blame] | 348 | alloc_tracking_enabled_(false), |
Mathieu Chartier | 0a20607 | 2019-03-28 12:29:22 -0700 | [diff] [blame] | 349 | alloc_record_depth_(AllocRecordObjectMap::kDefaultAllocStackDepth), |
Mathieu Chartier | 3100080 | 2015-06-14 14:14:37 -0700 | [diff] [blame] | 350 | backtrace_lock_(nullptr), |
| 351 | seen_backtrace_count_(0u), |
Mathieu Chartier | 5116837 | 2015-08-12 16:40:32 -0700 | [diff] [blame] | 352 | unique_backtrace_count_(0u), |
Albert Mingkun Yang | de94ea7 | 2018-11-16 10:15:49 +0000 | [diff] [blame] | 353 | gc_disabled_for_shutdown_(false), |
| 354 | dump_region_info_before_gc_(dump_region_info_before_gc), |
Vladimir Marko | 7cde458 | 2019-07-05 13:26:11 +0100 | [diff] [blame] | 355 | dump_region_info_after_gc_(dump_region_info_after_gc), |
| 356 | boot_image_spaces_(), |
| 357 | boot_images_start_address_(0u), |
| 358 | boot_images_size_(0u) { |
Elliott Hughes | 4dd9b4d | 2011-12-12 18:29:24 -0800 | [diff] [blame] | 359 | if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 360 | LOG(INFO) << "Heap() entering"; |
Brian Carlstrom | 0a5b14d | 2011-09-27 13:29:15 -0700 | [diff] [blame] | 361 | } |
Hiroshi Yamauchi | 1b0adbf | 2016-11-14 17:35:12 -0800 | [diff] [blame] | 362 | if (kUseReadBarrier) { |
| 363 | CHECK_EQ(foreground_collector_type_, kCollectorTypeCC); |
| 364 | CHECK_EQ(background_collector_type_, kCollectorTypeCCBackground); |
Mathieu Chartier | 4059487 | 2019-04-10 16:51:06 -0700 | [diff] [blame] | 365 | } else if (background_collector_type_ != gc::kCollectorTypeHomogeneousSpaceCompact) { |
Mathieu Chartier | b52df53 | 2019-04-09 14:10:59 -0700 | [diff] [blame] | 366 | CHECK_EQ(IsMovingGc(foreground_collector_type_), IsMovingGc(background_collector_type_)) |
Mathieu Chartier | 4059487 | 2019-04-10 16:51:06 -0700 | [diff] [blame] | 367 | << "Changing from " << foreground_collector_type_ << " to " |
| 368 | << background_collector_type_ << " (or visa versa) is not supported."; |
Hiroshi Yamauchi | 1b0adbf | 2016-11-14 17:35:12 -0800 | [diff] [blame] | 369 | } |
Mathieu Chartier | 1ca6890 | 2017-04-18 11:26:22 -0700 | [diff] [blame] | 370 | verification_.reset(new Verification(this)); |
Mathieu Chartier | 8261d02 | 2016-08-08 09:41:04 -0700 | [diff] [blame] | 371 | CHECK_GE(large_object_threshold, kMinLargeObjectThreshold); |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 372 | ScopedTrace trace(__FUNCTION__); |
Mathieu Chartier | 3100080 | 2015-06-14 14:14:37 -0700 | [diff] [blame] | 373 | Runtime* const runtime = Runtime::Current(); |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 374 | // If we aren't the zygote, switch to the default non zygote allocator. This may update the |
| 375 | // entrypoints. |
Mathieu Chartier | 3100080 | 2015-06-14 14:14:37 -0700 | [diff] [blame] | 376 | const bool is_zygote = runtime->IsZygote(); |
Mathieu Chartier | 8e219ae | 2014-08-19 14:29:46 -0700 | [diff] [blame] | 377 | if (!is_zygote) { |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 378 | // Background compaction is currently not supported for command line runs. |
| 379 | if (background_collector_type_ != foreground_collector_type_) { |
Mathieu Chartier | 52ba199 | 2014-05-07 14:39:21 -0700 | [diff] [blame] | 380 | VLOG(heap) << "Disabling background compaction for non zygote"; |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 381 | background_collector_type_ = foreground_collector_type_; |
Mathieu Chartier | bd0a653 | 2014-02-27 11:14:21 -0800 | [diff] [blame] | 382 | } |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 383 | } |
Mathieu Chartier | a5f9de0 | 2014-02-28 16:48:42 -0800 | [diff] [blame] | 384 | ChangeCollector(desired_collector_type_); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 385 | live_bitmap_.reset(new accounting::HeapBitmap(this)); |
| 386 | mark_bitmap_.reset(new accounting::HeapBitmap(this)); |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 387 | |
Mathieu Chartier | f75dce4 | 2019-04-08 09:36:23 -0700 | [diff] [blame] | 388 | // We don't have hspace compaction enabled with CC. |
| 389 | if (foreground_collector_type_ == kCollectorTypeCC) { |
Hiroshi Yamauchi | 20ed5af | 2014-11-17 18:05:44 -0800 | [diff] [blame] | 390 | use_homogeneous_space_compaction_for_oom_ = false; |
| 391 | } |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 392 | bool support_homogeneous_space_compaction = |
Mathieu Chartier | 0deeb81 | 2014-08-21 18:28:20 -0700 | [diff] [blame] | 393 | background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact || |
Hiroshi Yamauchi | 20ed5af | 2014-11-17 18:05:44 -0800 | [diff] [blame] | 394 | use_homogeneous_space_compaction_for_oom_; |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 395 | // We may use the same space the main space for the non moving space if we don't need to compact |
| 396 | // from the main space. |
| 397 | // This is not the case if we support homogeneous compaction or have a moving background |
| 398 | // collector type. |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 399 | bool separate_non_moving_space = is_zygote || |
| 400 | support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) || |
| 401 | IsMovingGc(background_collector_type_); |
Vladimir Marko | d44d703 | 2018-08-30 13:02:31 +0100 | [diff] [blame] | 402 | |
| 403 | // Requested begin for the alloc space, to follow the mapped image and oat files |
| 404 | uint8_t* request_begin = nullptr; |
| 405 | // Calculate the extra space required after the boot image, see allocations below. |
Vladimir Marko | 4df2d80 | 2018-09-27 16:42:44 +0000 | [diff] [blame] | 406 | size_t heap_reservation_size = 0u; |
| 407 | if (separate_non_moving_space) { |
| 408 | heap_reservation_size = non_moving_space_capacity; |
Mathieu Chartier | f75dce4 | 2019-04-08 09:36:23 -0700 | [diff] [blame] | 409 | } else if (foreground_collector_type_ != kCollectorTypeCC && is_zygote) { |
Vladimir Marko | 4df2d80 | 2018-09-27 16:42:44 +0000 | [diff] [blame] | 410 | heap_reservation_size = capacity_; |
| 411 | } |
Vladimir Marko | d44d703 | 2018-08-30 13:02:31 +0100 | [diff] [blame] | 412 | heap_reservation_size = RoundUp(heap_reservation_size, kPageSize); |
| 413 | // Load image space(s). |
| 414 | std::vector<std::unique_ptr<space::ImageSpace>> boot_image_spaces; |
| 415 | MemMap heap_reservation; |
Vladimir Marko | d190851 | 2018-11-22 14:57:28 +0000 | [diff] [blame] | 416 | if (space::ImageSpace::LoadBootImage(boot_class_path, |
| 417 | boot_class_path_locations, |
| 418 | image_file_name, |
Vladimir Marko | d44d703 | 2018-08-30 13:02:31 +0100 | [diff] [blame] | 419 | image_instruction_set, |
Andreas Gampe | 8682354 | 2019-02-25 09:38:49 -0800 | [diff] [blame] | 420 | image_space_loading_order, |
Vladimir Marko | 3364d18 | 2019-03-13 13:55:01 +0000 | [diff] [blame] | 421 | runtime->ShouldRelocate(), |
| 422 | /*executable=*/ !runtime->IsAotCompiler(), |
| 423 | is_zygote, |
Vladimir Marko | d44d703 | 2018-08-30 13:02:31 +0100 | [diff] [blame] | 424 | heap_reservation_size, |
| 425 | &boot_image_spaces, |
| 426 | &heap_reservation)) { |
| 427 | DCHECK_EQ(heap_reservation_size, heap_reservation.IsValid() ? heap_reservation.Size() : 0u); |
| 428 | DCHECK(!boot_image_spaces.empty()); |
| 429 | request_begin = boot_image_spaces.back()->GetImageHeader().GetOatFileEnd(); |
| 430 | DCHECK(!heap_reservation.IsValid() || request_begin == heap_reservation.Begin()) |
| 431 | << "request_begin=" << static_cast<const void*>(request_begin) |
| 432 | << " heap_reservation.Begin()=" << static_cast<const void*>(heap_reservation.Begin()); |
| 433 | for (std::unique_ptr<space::ImageSpace>& space : boot_image_spaces) { |
| 434 | boot_image_spaces_.push_back(space.get()); |
| 435 | AddSpace(space.release()); |
| 436 | } |
Vladimir Marko | 7cde458 | 2019-07-05 13:26:11 +0100 | [diff] [blame] | 437 | boot_images_start_address_ = PointerToLowMemUInt32(boot_image_spaces_.front()->Begin()); |
| 438 | uint32_t boot_images_end = |
| 439 | PointerToLowMemUInt32(boot_image_spaces_.back()->GetImageHeader().GetOatFileEnd()); |
| 440 | boot_images_size_ = boot_images_end - boot_images_start_address_; |
| 441 | if (kIsDebugBuild) { |
| 442 | VerifyBootImagesContiguity(boot_image_spaces_); |
| 443 | } |
Vladimir Marko | d44d703 | 2018-08-30 13:02:31 +0100 | [diff] [blame] | 444 | } else { |
| 445 | if (foreground_collector_type_ == kCollectorTypeCC) { |
| 446 | // Need to use a low address so that we can allocate a contiguous 2 * Xmx space |
| 447 | // when there's no image (dex2oat for target). |
| 448 | request_begin = kPreferredAllocSpaceBegin; |
| 449 | } |
| 450 | // Gross hack to make dex2oat deterministic. |
| 451 | if (foreground_collector_type_ == kCollectorTypeMS && Runtime::Current()->IsAotCompiler()) { |
| 452 | // Currently only enabled for MS collector since that is what the deterministic dex2oat uses. |
| 453 | // b/26849108 |
| 454 | request_begin = reinterpret_cast<uint8_t*>(kAllocSpaceBeginForDeterministicAoT); |
| 455 | } |
| 456 | } |
| 457 | |
| 458 | /* |
| 459 | requested_alloc_space_begin -> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- |
| 460 | +- nonmoving space (non_moving_space_capacity)+- |
| 461 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- |
| 462 | +-????????????????????????????????????????????+- |
| 463 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- |
| 464 | +-main alloc space / bump space 1 (capacity_) +- |
| 465 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- |
| 466 | +-????????????????????????????????????????????+- |
| 467 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- |
| 468 | +-main alloc space2 / bump space 2 (capacity_)+- |
| 469 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- |
| 470 | */ |
| 471 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 472 | MemMap main_mem_map_1; |
| 473 | MemMap main_mem_map_2; |
Andreas Gampe | ace0dc1 | 2016-01-20 13:33:13 -0800 | [diff] [blame] | 474 | |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 475 | std::string error_str; |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 476 | MemMap non_moving_space_mem_map; |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 477 | if (separate_non_moving_space) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 478 | ScopedTrace trace2("Create separate non moving space"); |
Mathieu Chartier | 7247af5 | 2014-11-19 10:51:42 -0800 | [diff] [blame] | 479 | // If we are the zygote, the non moving space becomes the zygote space when we run |
| 480 | // PreZygoteFork the first time. In this case, call the map "zygote space" since we can't |
| 481 | // rename the mem map later. |
Roland Levillain | 5e8d5f0 | 2016-10-18 18:03:43 +0100 | [diff] [blame] | 482 | const char* space_name = is_zygote ? kZygoteSpaceName : kNonMovingSpaceName; |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 483 | // Reserve the non moving mem map before the other two since it needs to be at a specific |
| 484 | // address. |
Vladimir Marko | d44d703 | 2018-08-30 13:02:31 +0100 | [diff] [blame] | 485 | DCHECK_EQ(heap_reservation.IsValid(), !boot_image_spaces_.empty()); |
| 486 | if (heap_reservation.IsValid()) { |
| 487 | non_moving_space_mem_map = heap_reservation.RemapAtEnd( |
| 488 | heap_reservation.Begin(), space_name, PROT_READ | PROT_WRITE, &error_str); |
| 489 | } else { |
| 490 | non_moving_space_mem_map = MapAnonymousPreferredAddress( |
| 491 | space_name, request_begin, non_moving_space_capacity, &error_str); |
| 492 | } |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 493 | CHECK(non_moving_space_mem_map.IsValid()) << error_str; |
Vladimir Marko | d44d703 | 2018-08-30 13:02:31 +0100 | [diff] [blame] | 494 | DCHECK(!heap_reservation.IsValid()); |
Mathieu Chartier | c44ce2e | 2014-08-25 16:32:41 -0700 | [diff] [blame] | 495 | // Try to reserve virtual memory at a lower address if we have a separate non moving space. |
Hiroshi Yamauchi | 3c3c4a1 | 2017-02-21 16:49:59 -0800 | [diff] [blame] | 496 | request_begin = kPreferredAllocSpaceBegin + non_moving_space_capacity; |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 497 | } |
Hiroshi Yamauchi | 3dbf234 | 2015-03-17 16:01:11 -0700 | [diff] [blame] | 498 | // Attempt to create 2 mem maps at or after the requested begin. |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 499 | if (foreground_collector_type_ != kCollectorTypeCC) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 500 | ScopedTrace trace2("Create main mem map"); |
Mathieu Chartier | f75dce4 | 2019-04-08 09:36:23 -0700 | [diff] [blame] | 501 | if (separate_non_moving_space || !is_zygote) { |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 502 | main_mem_map_1 = MapAnonymousPreferredAddress( |
| 503 | kMemMapSpaceName[0], request_begin, capacity_, &error_str); |
Hiroshi Yamauchi | 3dbf234 | 2015-03-17 16:01:11 -0700 | [diff] [blame] | 504 | } else { |
Mathieu Chartier | f75dce4 | 2019-04-08 09:36:23 -0700 | [diff] [blame] | 505 | // If no separate non-moving space and we are the zygote, the main space must come right after |
| 506 | // the image space to avoid a gap. This is required since we want the zygote space to be |
| 507 | // adjacent to the image space. |
Vladimir Marko | d44d703 | 2018-08-30 13:02:31 +0100 | [diff] [blame] | 508 | DCHECK_EQ(heap_reservation.IsValid(), !boot_image_spaces_.empty()); |
| 509 | main_mem_map_1 = MemMap::MapAnonymous( |
| 510 | kMemMapSpaceName[0], |
| 511 | request_begin, |
| 512 | capacity_, |
| 513 | PROT_READ | PROT_WRITE, |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 514 | /* low_4gb= */ true, |
| 515 | /* reuse= */ false, |
Vladimir Marko | d44d703 | 2018-08-30 13:02:31 +0100 | [diff] [blame] | 516 | heap_reservation.IsValid() ? &heap_reservation : nullptr, |
| 517 | &error_str); |
Hiroshi Yamauchi | 3dbf234 | 2015-03-17 16:01:11 -0700 | [diff] [blame] | 518 | } |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 519 | CHECK(main_mem_map_1.IsValid()) << error_str; |
Vladimir Marko | d44d703 | 2018-08-30 13:02:31 +0100 | [diff] [blame] | 520 | DCHECK(!heap_reservation.IsValid()); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 521 | } |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 522 | if (support_homogeneous_space_compaction || |
| 523 | background_collector_type_ == kCollectorTypeSS || |
| 524 | foreground_collector_type_ == kCollectorTypeSS) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 525 | ScopedTrace trace2("Create main mem map 2"); |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 526 | main_mem_map_2 = MapAnonymousPreferredAddress( |
| 527 | kMemMapSpaceName[1], main_mem_map_1.End(), capacity_, &error_str); |
| 528 | CHECK(main_mem_map_2.IsValid()) << error_str; |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 529 | } |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 530 | |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 531 | // Create the non moving space first so that bitmaps don't take up the address range. |
| 532 | if (separate_non_moving_space) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 533 | ScopedTrace trace2("Add non moving space"); |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 534 | // Non moving space is always dlmalloc since we currently don't have support for multiple |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 535 | // active rosalloc spaces. |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 536 | const size_t size = non_moving_space_mem_map.Size(); |
Vladimir Marko | bd5e5f6 | 2018-09-07 11:21:34 +0100 | [diff] [blame] | 537 | const void* non_moving_space_mem_map_begin = non_moving_space_mem_map.Begin(); |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 538 | non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(std::move(non_moving_space_mem_map), |
| 539 | "zygote / non moving space", |
| 540 | kDefaultStartingSize, |
| 541 | initial_size, |
| 542 | size, |
| 543 | size, |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 544 | /* can_move_objects= */ false); |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 545 | CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space " |
Vladimir Marko | bd5e5f6 | 2018-09-07 11:21:34 +0100 | [diff] [blame] | 546 | << non_moving_space_mem_map_begin; |
| 547 | non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity()); |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 548 | AddSpace(non_moving_space_); |
| 549 | } |
| 550 | // Create other spaces based on whether or not we have a moving GC. |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 551 | if (foreground_collector_type_ == kCollectorTypeCC) { |
Hiroshi Yamauchi | 3c3c4a1 | 2017-02-21 16:49:59 -0800 | [diff] [blame] | 552 | CHECK(separate_non_moving_space); |
Roland Levillain | 8f7ea9a | 2018-01-26 17:27:59 +0000 | [diff] [blame] | 553 | // Reserve twice the capacity, to allow evacuating every region for explicit GCs. |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 554 | MemMap region_space_mem_map = |
| 555 | space::RegionSpace::CreateMemMap(kRegionSpaceName, capacity_ * 2, request_begin); |
| 556 | CHECK(region_space_mem_map.IsValid()) << "No region space mem map"; |
Albert Mingkun Yang | 0b4d146 | 2018-11-29 13:25:35 +0000 | [diff] [blame] | 557 | region_space_ = space::RegionSpace::Create( |
| 558 | kRegionSpaceName, std::move(region_space_mem_map), use_generational_cc_); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 559 | AddSpace(region_space_); |
Mathieu Chartier | f75dce4 | 2019-04-08 09:36:23 -0700 | [diff] [blame] | 560 | } else if (IsMovingGc(foreground_collector_type_)) { |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 561 | // Create bump pointer spaces. |
| 562 | // We only to create the bump pointer if the foreground collector is a compacting GC. |
| 563 | // TODO: Place bump-pointer spaces somewhere to minimize size of card table. |
| 564 | bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1", |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 565 | std::move(main_mem_map_1)); |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 566 | CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space"; |
| 567 | AddSpace(bump_pointer_space_); |
| 568 | temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2", |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 569 | std::move(main_mem_map_2)); |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 570 | CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space"; |
| 571 | AddSpace(temp_space_); |
| 572 | CHECK(separate_non_moving_space); |
Hiroshi Yamauchi | 5ccd498 | 2014-03-11 12:19:04 -0700 | [diff] [blame] | 573 | } else { |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 574 | CreateMainMallocSpace(std::move(main_mem_map_1), initial_size, growth_limit_, capacity_); |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 575 | CHECK(main_space_ != nullptr); |
| 576 | AddSpace(main_space_); |
| 577 | if (!separate_non_moving_space) { |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 578 | non_moving_space_ = main_space_; |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 579 | CHECK(!non_moving_space_->CanMoveObjects()); |
| 580 | } |
Mathieu Chartier | f75dce4 | 2019-04-08 09:36:23 -0700 | [diff] [blame] | 581 | if (main_mem_map_2.IsValid()) { |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 582 | const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1]; |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 583 | main_space_backup_.reset(CreateMallocSpaceFromMemMap(std::move(main_mem_map_2), |
| 584 | initial_size, |
| 585 | growth_limit_, |
| 586 | capacity_, |
| 587 | name, |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 588 | /* can_move_objects= */ true)); |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 589 | CHECK(main_space_backup_.get() != nullptr); |
| 590 | // Add the space so its accounted for in the heap_begin and heap_end. |
| 591 | AddSpace(main_space_backup_.get()); |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 592 | } |
Hiroshi Yamauchi | 5ccd498 | 2014-03-11 12:19:04 -0700 | [diff] [blame] | 593 | } |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 594 | CHECK(non_moving_space_ != nullptr); |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 595 | CHECK(!non_moving_space_->CanMoveObjects()); |
Mathieu Chartier | eb5710e | 2013-07-25 15:19:42 -0700 | [diff] [blame] | 596 | // Allocate the large object space. |
Igor Murashkin | aaebaa0 | 2015-01-26 10:55:53 -0800 | [diff] [blame] | 597 | if (large_object_space_type == space::LargeObjectSpaceType::kFreeList) { |
Vladimir Marko | 1130659 | 2018-10-26 14:22:59 +0100 | [diff] [blame] | 598 | large_object_space_ = space::FreeListSpace::Create("free list large object space", capacity_); |
Mathieu Chartier | 2dbe627 | 2014-09-16 10:43:23 -0700 | [diff] [blame] | 599 | CHECK(large_object_space_ != nullptr) << "Failed to create large object space"; |
Igor Murashkin | aaebaa0 | 2015-01-26 10:55:53 -0800 | [diff] [blame] | 600 | } else if (large_object_space_type == space::LargeObjectSpaceType::kMap) { |
Mathieu Chartier | 2dbe627 | 2014-09-16 10:43:23 -0700 | [diff] [blame] | 601 | large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space"); |
| 602 | CHECK(large_object_space_ != nullptr) << "Failed to create large object space"; |
Mathieu Chartier | eb5710e | 2013-07-25 15:19:42 -0700 | [diff] [blame] | 603 | } else { |
Mathieu Chartier | 2dbe627 | 2014-09-16 10:43:23 -0700 | [diff] [blame] | 604 | // Disable the large object space by making the cutoff excessively large. |
| 605 | large_object_threshold_ = std::numeric_limits<size_t>::max(); |
| 606 | large_object_space_ = nullptr; |
Mathieu Chartier | eb5710e | 2013-07-25 15:19:42 -0700 | [diff] [blame] | 607 | } |
Mathieu Chartier | 2dbe627 | 2014-09-16 10:43:23 -0700 | [diff] [blame] | 608 | if (large_object_space_ != nullptr) { |
| 609 | AddSpace(large_object_space_); |
| 610 | } |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 611 | // Compute heap capacity. Continuous spaces are sorted in order of Begin(). |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 612 | CHECK(!continuous_spaces_.empty()); |
| 613 | // Relies on the spaces being sorted. |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 614 | uint8_t* heap_begin = continuous_spaces_.front()->Begin(); |
| 615 | uint8_t* heap_end = continuous_spaces_.back()->Limit(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 616 | size_t heap_capacity = heap_end - heap_begin; |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 617 | // Remove the main backup space since it slows down the GC to have unused extra spaces. |
Mathieu Chartier | 0310da5 | 2014-12-01 13:40:48 -0800 | [diff] [blame] | 618 | // TODO: Avoid needing to do this. |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 619 | if (main_space_backup_.get() != nullptr) { |
| 620 | RemoveSpace(main_space_backup_.get()); |
| 621 | } |
Elliott Hughes | 6c9c06d | 2011-11-07 16:43:47 -0800 | [diff] [blame] | 622 | // Allocate the card table. |
Mathieu Chartier | fbc3108 | 2016-01-24 11:59:56 -0800 | [diff] [blame] | 623 | // We currently don't support dynamically resizing the card table. |
| 624 | // Since we don't know where in the low_4gb the app image will be located, make the card table |
| 625 | // cover the whole low_4gb. TODO: Extend the card table in AddSpace. |
| 626 | UNUSED(heap_capacity); |
Roland Levillain | 8f7ea9a | 2018-01-26 17:27:59 +0000 | [diff] [blame] | 627 | // Start at 4 KB, we can be sure there are no spaces mapped this low since the address range is |
Mathieu Chartier | fbc3108 | 2016-01-24 11:59:56 -0800 | [diff] [blame] | 628 | // reserved by the kernel. |
| 629 | static constexpr size_t kMinHeapAddress = 4 * KB; |
| 630 | card_table_.reset(accounting::CardTable::Create(reinterpret_cast<uint8_t*>(kMinHeapAddress), |
| 631 | 4 * GB - kMinHeapAddress)); |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 632 | CHECK(card_table_.get() != nullptr) << "Failed to create card table"; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 633 | if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) { |
| 634 | rb_table_.reset(new accounting::ReadBarrierTable()); |
| 635 | DCHECK(rb_table_->IsAllCleared()); |
| 636 | } |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 637 | if (HasBootImageSpace()) { |
Mathieu Chartier | 4858a93 | 2015-01-23 13:18:53 -0800 | [diff] [blame] | 638 | // Don't add the image mod union table if we are running without an image, this can crash if |
| 639 | // we use the CardCache implementation. |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 640 | for (space::ImageSpace* image_space : GetBootImageSpaces()) { |
| 641 | accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableToZygoteAllocspace( |
| 642 | "Image mod-union table", this, image_space); |
| 643 | CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table"; |
| 644 | AddModUnionTable(mod_union_table); |
| 645 | } |
Mathieu Chartier | 4858a93 | 2015-01-23 13:18:53 -0800 | [diff] [blame] | 646 | } |
Mathieu Chartier | 96bcd45 | 2014-06-17 09:50:02 -0700 | [diff] [blame] | 647 | if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) { |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 648 | accounting::RememberedSet* non_moving_space_rem_set = |
| 649 | new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_); |
| 650 | CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set"; |
| 651 | AddRememberedSet(non_moving_space_rem_set); |
| 652 | } |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 653 | // TODO: Count objects in the image space here? |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 654 | num_bytes_allocated_.store(0, std::memory_order_relaxed); |
Mathieu Chartier | c179016 | 2014-05-23 10:54:50 -0700 | [diff] [blame] | 655 | mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize, |
| 656 | kDefaultMarkStackSize)); |
| 657 | const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize; |
| 658 | allocation_stack_.reset(accounting::ObjectStack::Create( |
| 659 | "allocation stack", max_allocation_stack_size_, alloc_stack_capacity)); |
| 660 | live_stack_.reset(accounting::ObjectStack::Create( |
| 661 | "live stack", max_allocation_stack_size_, alloc_stack_capacity)); |
Mathieu Chartier | 65db880 | 2012-11-20 12:36:46 -0800 | [diff] [blame] | 662 | // It's still too early to take a lock because there are no threads yet, but we can create locks |
| 663 | // now. We don't create it earlier to make it clear that you can't use locks during heap |
| 664 | // initialization. |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 665 | gc_complete_lock_ = new Mutex("GC complete lock"); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 666 | gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable", |
| 667 | *gc_complete_lock_)); |
Richard Uhler | caaa2b0 | 2017-02-01 09:54:17 +0000 | [diff] [blame] | 668 | |
Hiroshi Yamauchi | 76f55b0 | 2015-08-21 16:10:39 -0700 | [diff] [blame] | 669 | thread_flip_lock_ = new Mutex("GC thread flip lock"); |
| 670 | thread_flip_cond_.reset(new ConditionVariable("GC thread flip condition variable", |
| 671 | *thread_flip_lock_)); |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 672 | task_processor_.reset(new TaskProcessor()); |
Mathieu Chartier | 3cf2253 | 2015-07-09 15:15:09 -0700 | [diff] [blame] | 673 | reference_processor_.reset(new ReferenceProcessor()); |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 674 | pending_task_lock_ = new Mutex("Pending task lock"); |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 675 | if (ignore_target_footprint_) { |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 676 | SetIdealFootprint(std::numeric_limits<size_t>::max()); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 677 | concurrent_start_bytes_ = std::numeric_limits<size_t>::max(); |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 678 | } |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 679 | CHECK_NE(target_footprint_.load(std::memory_order_relaxed), 0U); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 680 | // Create our garbage collectors. |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 681 | for (size_t i = 0; i < 2; ++i) { |
| 682 | const bool concurrent = i != 0; |
Mathieu Chartier | dfe3083 | 2015-03-06 15:28:34 -0800 | [diff] [blame] | 683 | if ((MayUseCollector(kCollectorTypeCMS) && concurrent) || |
| 684 | (MayUseCollector(kCollectorTypeMS) && !concurrent)) { |
| 685 | garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent)); |
| 686 | garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent)); |
| 687 | garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent)); |
| 688 | } |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 689 | } |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 690 | if (kMovingCollector) { |
Mathieu Chartier | f75dce4 | 2019-04-08 09:36:23 -0700 | [diff] [blame] | 691 | if (MayUseCollector(kCollectorTypeSS) || |
Mathieu Chartier | dfe3083 | 2015-03-06 15:28:34 -0800 | [diff] [blame] | 692 | MayUseCollector(kCollectorTypeHomogeneousSpaceCompact) || |
| 693 | use_homogeneous_space_compaction_for_oom_) { |
Mathieu Chartier | f75dce4 | 2019-04-08 09:36:23 -0700 | [diff] [blame] | 694 | semi_space_collector_ = new collector::SemiSpace(this); |
Mathieu Chartier | dfe3083 | 2015-03-06 15:28:34 -0800 | [diff] [blame] | 695 | garbage_collectors_.push_back(semi_space_collector_); |
| 696 | } |
| 697 | if (MayUseCollector(kCollectorTypeCC)) { |
Mathieu Chartier | 56fe258 | 2016-07-14 13:30:03 -0700 | [diff] [blame] | 698 | concurrent_copying_collector_ = new collector::ConcurrentCopying(this, |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 699 | /*young_gen=*/false, |
Albert Mingkun Yang | 0b4d146 | 2018-11-29 13:25:35 +0000 | [diff] [blame] | 700 | use_generational_cc_, |
Mathieu Chartier | 56fe258 | 2016-07-14 13:30:03 -0700 | [diff] [blame] | 701 | "", |
| 702 | measure_gc_performance); |
Albert Mingkun Yang | 0b4d146 | 2018-11-29 13:25:35 +0000 | [diff] [blame] | 703 | if (use_generational_cc_) { |
Mathieu Chartier | 8d1a996 | 2016-08-17 16:39:45 -0700 | [diff] [blame] | 704 | young_concurrent_copying_collector_ = new collector::ConcurrentCopying( |
| 705 | this, |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 706 | /*young_gen=*/true, |
Albert Mingkun Yang | 0b4d146 | 2018-11-29 13:25:35 +0000 | [diff] [blame] | 707 | use_generational_cc_, |
Mathieu Chartier | 8d1a996 | 2016-08-17 16:39:45 -0700 | [diff] [blame] | 708 | "young", |
| 709 | measure_gc_performance); |
| 710 | } |
| 711 | active_concurrent_copying_collector_ = concurrent_copying_collector_; |
Hiroshi Yamauchi | 4af1417 | 2016-10-25 11:55:10 -0700 | [diff] [blame] | 712 | DCHECK(region_space_ != nullptr); |
| 713 | concurrent_copying_collector_->SetRegionSpace(region_space_); |
Albert Mingkun Yang | 0b4d146 | 2018-11-29 13:25:35 +0000 | [diff] [blame] | 714 | if (use_generational_cc_) { |
Mathieu Chartier | 8d1a996 | 2016-08-17 16:39:45 -0700 | [diff] [blame] | 715 | young_concurrent_copying_collector_->SetRegionSpace(region_space_); |
Lokesh Gidra | 1c34b71 | 2018-12-18 13:41:58 -0800 | [diff] [blame] | 716 | // At this point, non-moving space should be created. |
| 717 | DCHECK(non_moving_space_ != nullptr); |
| 718 | concurrent_copying_collector_->CreateInterRegionRefBitmaps(); |
Mathieu Chartier | 8d1a996 | 2016-08-17 16:39:45 -0700 | [diff] [blame] | 719 | } |
Mathieu Chartier | dfe3083 | 2015-03-06 15:28:34 -0800 | [diff] [blame] | 720 | garbage_collectors_.push_back(concurrent_copying_collector_); |
Albert Mingkun Yang | 0b4d146 | 2018-11-29 13:25:35 +0000 | [diff] [blame] | 721 | if (use_generational_cc_) { |
Mathieu Chartier | 8d1a996 | 2016-08-17 16:39:45 -0700 | [diff] [blame] | 722 | garbage_collectors_.push_back(young_concurrent_copying_collector_); |
| 723 | } |
Mathieu Chartier | dfe3083 | 2015-03-06 15:28:34 -0800 | [diff] [blame] | 724 | } |
Mathieu Chartier | 0325e62 | 2012-09-05 14:22:51 -0700 | [diff] [blame] | 725 | } |
Jeff Hao | dcdc85b | 2015-12-04 14:06:18 -0800 | [diff] [blame] | 726 | if (!GetBootImageSpaces().empty() && non_moving_space_ != nullptr && |
Mathieu Chartier | f75dce4 | 2019-04-08 09:36:23 -0700 | [diff] [blame] | 727 | (is_zygote || separate_non_moving_space)) { |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 728 | // Check that there's no gap between the image space and the non moving space so that the |
Andreas Gampe | e1cb298 | 2014-08-27 11:01:09 -0700 | [diff] [blame] | 729 | // immune region won't break (eg. due to a large object allocated in the gap). This is only |
Mathieu Chartier | f75dce4 | 2019-04-08 09:36:23 -0700 | [diff] [blame] | 730 | // required when we're the zygote. |
Mathieu Chartier | a06ba05 | 2016-01-06 13:51:52 -0800 | [diff] [blame] | 731 | // Space with smallest Begin(). |
| 732 | space::ImageSpace* first_space = nullptr; |
| 733 | for (space::ImageSpace* space : boot_image_spaces_) { |
| 734 | if (first_space == nullptr || space->Begin() < first_space->Begin()) { |
| 735 | first_space = space; |
| 736 | } |
| 737 | } |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 738 | bool no_gap = MemMap::CheckNoGaps(*first_space->GetMemMap(), *non_moving_space_->GetMemMap()); |
Hiroshi Yamauchi | 3eed93d | 2014-06-04 11:43:59 -0700 | [diff] [blame] | 739 | if (!no_gap) { |
David Srbecky | 5dedb80 | 2015-06-17 00:08:02 +0100 | [diff] [blame] | 740 | PrintFileToLog("/proc/self/maps", LogSeverity::ERROR); |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 741 | MemMap::DumpMaps(LOG_STREAM(ERROR), /* terse= */ true); |
Mathieu Chartier | c785344 | 2015-03-27 14:35:38 -0700 | [diff] [blame] | 742 | LOG(FATAL) << "There's a gap between the image space and the non-moving space"; |
Hiroshi Yamauchi | 3eed93d | 2014-06-04 11:43:59 -0700 | [diff] [blame] | 743 | } |
| 744 | } |
Mathieu Chartier | 3100080 | 2015-06-14 14:14:37 -0700 | [diff] [blame] | 745 | instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation(); |
| 746 | if (gc_stress_mode_) { |
| 747 | backtrace_lock_ = new Mutex("GC complete lock"); |
| 748 | } |
Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 749 | if (is_running_on_memory_tool_ || gc_stress_mode_) { |
Mathieu Chartier | 3100080 | 2015-06-14 14:14:37 -0700 | [diff] [blame] | 750 | instrumentation->InstrumentQuickAllocEntryPoints(); |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 751 | } |
Elliott Hughes | 4dd9b4d | 2011-12-12 18:29:24 -0800 | [diff] [blame] | 752 | if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 753 | LOG(INFO) << "Heap() exiting"; |
Brian Carlstrom | 0a5b14d | 2011-09-27 13:29:15 -0700 | [diff] [blame] | 754 | } |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 755 | } |
| 756 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 757 | MemMap Heap::MapAnonymousPreferredAddress(const char* name, |
| 758 | uint8_t* request_begin, |
| 759 | size_t capacity, |
| 760 | std::string* out_error_str) { |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 761 | while (true) { |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 762 | MemMap map = MemMap::MapAnonymous(name, |
| 763 | request_begin, |
| 764 | capacity, |
| 765 | PROT_READ | PROT_WRITE, |
Vladimir Marko | 1130659 | 2018-10-26 14:22:59 +0100 | [diff] [blame] | 766 | /*low_4gb=*/ true, |
| 767 | /*reuse=*/ false, |
| 768 | /*reservation=*/ nullptr, |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 769 | out_error_str); |
| 770 | if (map.IsValid() || request_begin == nullptr) { |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 771 | return map; |
| 772 | } |
| 773 | // Retry a second time with no specified request begin. |
| 774 | request_begin = nullptr; |
| 775 | } |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 776 | } |
| 777 | |
Mathieu Chartier | dfe3083 | 2015-03-06 15:28:34 -0800 | [diff] [blame] | 778 | bool Heap::MayUseCollector(CollectorType type) const { |
| 779 | return foreground_collector_type_ == type || background_collector_type_ == type; |
| 780 | } |
| 781 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 782 | space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap&& mem_map, |
Mathieu Chartier | a4f6af9 | 2015-08-11 17:35:25 -0700 | [diff] [blame] | 783 | size_t initial_size, |
| 784 | size_t growth_limit, |
| 785 | size_t capacity, |
| 786 | const char* name, |
| 787 | bool can_move_objects) { |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 788 | space::MallocSpace* malloc_space = nullptr; |
| 789 | if (kUseRosAlloc) { |
| 790 | // Create rosalloc space. |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 791 | malloc_space = space::RosAllocSpace::CreateFromMemMap(std::move(mem_map), |
| 792 | name, |
| 793 | kDefaultStartingSize, |
| 794 | initial_size, |
| 795 | growth_limit, |
| 796 | capacity, |
| 797 | low_memory_mode_, |
| 798 | can_move_objects); |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 799 | } else { |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 800 | malloc_space = space::DlMallocSpace::CreateFromMemMap(std::move(mem_map), |
| 801 | name, |
| 802 | kDefaultStartingSize, |
| 803 | initial_size, |
| 804 | growth_limit, |
| 805 | capacity, |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 806 | can_move_objects); |
| 807 | } |
| 808 | if (collector::SemiSpace::kUseRememberedSet) { |
| 809 | accounting::RememberedSet* rem_set = |
| 810 | new accounting::RememberedSet(std::string(name) + " remembered set", this, malloc_space); |
| 811 | CHECK(rem_set != nullptr) << "Failed to create main space remembered set"; |
| 812 | AddRememberedSet(rem_set); |
| 813 | } |
| 814 | CHECK(malloc_space != nullptr) << "Failed to create " << name; |
| 815 | malloc_space->SetFootprintLimit(malloc_space->Capacity()); |
| 816 | return malloc_space; |
| 817 | } |
| 818 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 819 | void Heap::CreateMainMallocSpace(MemMap&& mem_map, |
| 820 | size_t initial_size, |
| 821 | size_t growth_limit, |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 822 | size_t capacity) { |
| 823 | // Is background compaction is enabled? |
| 824 | bool can_move_objects = IsMovingGc(background_collector_type_) != |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 825 | IsMovingGc(foreground_collector_type_) || use_homogeneous_space_compaction_for_oom_; |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 826 | // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will |
| 827 | // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact |
| 828 | // from the main space to the zygote space. If background compaction is enabled, always pass in |
| 829 | // that we can move objets. |
| 830 | if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) { |
| 831 | // After the zygote we want this to be false if we don't have background compaction enabled so |
| 832 | // that getting primitive array elements is faster. |
Mathieu Chartier | f75dce4 | 2019-04-08 09:36:23 -0700 | [diff] [blame] | 833 | can_move_objects = !HasZygoteSpace(); |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 834 | } |
Mathieu Chartier | 96bcd45 | 2014-06-17 09:50:02 -0700 | [diff] [blame] | 835 | if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) { |
| 836 | RemoveRememberedSet(main_space_); |
| 837 | } |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 838 | const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0]; |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 839 | main_space_ = CreateMallocSpaceFromMemMap(std::move(mem_map), |
| 840 | initial_size, |
| 841 | growth_limit, |
| 842 | capacity, name, |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 843 | can_move_objects); |
| 844 | SetSpaceAsDefault(main_space_); |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 845 | VLOG(heap) << "Created main space " << main_space_; |
| 846 | } |
| 847 | |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 848 | void Heap::ChangeAllocator(AllocatorType allocator) { |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 849 | if (current_allocator_ != allocator) { |
Mathieu Chartier | d889178 | 2014-03-02 13:28:37 -0800 | [diff] [blame] | 850 | // These two allocators are only used internally and don't have any entrypoints. |
| 851 | CHECK_NE(allocator, kAllocatorTypeLOS); |
| 852 | CHECK_NE(allocator, kAllocatorTypeNonMoving); |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 853 | current_allocator_ = allocator; |
Mathieu Chartier | d889178 | 2014-03-02 13:28:37 -0800 | [diff] [blame] | 854 | MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_); |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 855 | SetQuickAllocEntryPointsAllocator(current_allocator_); |
| 856 | Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints(); |
| 857 | } |
| 858 | } |
| 859 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 860 | bool Heap::IsCompilingBoot() const { |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 861 | if (!Runtime::Current()->IsAotCompiler()) { |
Alex Light | 64ad14d | 2014-08-19 14:23:13 -0700 | [diff] [blame] | 862 | return false; |
| 863 | } |
Mathieu Chartier | a9d82fe | 2016-01-25 20:06:11 -0800 | [diff] [blame] | 864 | ScopedObjectAccess soa(Thread::Current()); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 865 | for (const auto& space : continuous_spaces_) { |
Mathieu Chartier | 4e30541 | 2014-02-19 10:54:44 -0800 | [diff] [blame] | 866 | if (space->IsImageSpace() || space->IsZygoteSpace()) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 867 | return false; |
| 868 | } |
| 869 | } |
| 870 | return true; |
| 871 | } |
| 872 | |
Mathieu Chartier | 1d27b34 | 2014-01-28 12:51:09 -0800 | [diff] [blame] | 873 | void Heap::IncrementDisableMovingGC(Thread* self) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 874 | // Need to do this holding the lock to prevent races where the GC is about to run / running when |
| 875 | // we attempt to disable it. |
Mathieu Chartier | caa82d6 | 2014-02-02 16:51:17 -0800 | [diff] [blame] | 876 | ScopedThreadStateChange tsc(self, kWaitingForGcToComplete); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 877 | MutexLock mu(self, *gc_complete_lock_); |
Mathieu Chartier | 1d27b34 | 2014-01-28 12:51:09 -0800 | [diff] [blame] | 878 | ++disable_moving_gc_count_; |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 879 | if (IsMovingGc(collector_type_running_)) { |
Mathieu Chartier | 89a201e | 2014-05-02 10:27:26 -0700 | [diff] [blame] | 880 | WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self); |
Mathieu Chartier | d5a89ee | 2014-01-31 09:55:13 -0800 | [diff] [blame] | 881 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 882 | } |
| 883 | |
Mathieu Chartier | 1d27b34 | 2014-01-28 12:51:09 -0800 | [diff] [blame] | 884 | void Heap::DecrementDisableMovingGC(Thread* self) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 885 | MutexLock mu(self, *gc_complete_lock_); |
Mathieu Chartier | b735bd9 | 2015-06-24 17:04:17 -0700 | [diff] [blame] | 886 | CHECK_GT(disable_moving_gc_count_, 0U); |
Mathieu Chartier | 1d27b34 | 2014-01-28 12:51:09 -0800 | [diff] [blame] | 887 | --disable_moving_gc_count_; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 888 | } |
| 889 | |
Hiroshi Yamauchi | 76f55b0 | 2015-08-21 16:10:39 -0700 | [diff] [blame] | 890 | void Heap::IncrementDisableThreadFlip(Thread* self) { |
| 891 | // Supposed to be called by mutators. If thread_flip_running_ is true, block. Otherwise, go ahead. |
| 892 | CHECK(kUseReadBarrier); |
Hiroshi Yamauchi | 20a0be0 | 2016-02-19 15:44:06 -0800 | [diff] [blame] | 893 | bool is_nested = self->GetDisableThreadFlipCount() > 0; |
| 894 | self->IncrementDisableThreadFlipCount(); |
| 895 | if (is_nested) { |
| 896 | // If this is a nested JNI critical section enter, we don't need to wait or increment the global |
| 897 | // counter. The global counter is incremented only once for a thread for the outermost enter. |
| 898 | return; |
| 899 | } |
Hiroshi Yamauchi | 76f55b0 | 2015-08-21 16:10:39 -0700 | [diff] [blame] | 900 | ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip); |
| 901 | MutexLock mu(self, *thread_flip_lock_); |
Alex Light | 6683446 | 2019-04-08 16:28:29 +0000 | [diff] [blame] | 902 | thread_flip_cond_->CheckSafeToWait(self); |
Hiroshi Yamauchi | 76f55b0 | 2015-08-21 16:10:39 -0700 | [diff] [blame] | 903 | bool has_waited = false; |
Eric Holk | 6f5e729 | 2020-02-25 15:10:50 -0800 | [diff] [blame^] | 904 | uint64_t wait_start = 0; |
Hiroshi Yamauchi | ee23582 | 2016-08-19 17:03:27 -0700 | [diff] [blame] | 905 | if (thread_flip_running_) { |
Eric Holk | 6f5e729 | 2020-02-25 15:10:50 -0800 | [diff] [blame^] | 906 | wait_start = NanoTime(); |
Andreas Gampe | 9b827ab | 2017-12-07 19:32:48 -0800 | [diff] [blame] | 907 | ScopedTrace trace("IncrementDisableThreadFlip"); |
Hiroshi Yamauchi | ee23582 | 2016-08-19 17:03:27 -0700 | [diff] [blame] | 908 | while (thread_flip_running_) { |
| 909 | has_waited = true; |
| 910 | thread_flip_cond_->Wait(self); |
| 911 | } |
Hiroshi Yamauchi | 76f55b0 | 2015-08-21 16:10:39 -0700 | [diff] [blame] | 912 | } |
| 913 | ++disable_thread_flip_count_; |
| 914 | if (has_waited) { |
| 915 | uint64_t wait_time = NanoTime() - wait_start; |
| 916 | total_wait_time_ += wait_time; |
| 917 | if (wait_time > long_pause_log_threshold_) { |
| 918 | LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time); |
| 919 | } |
| 920 | } |
| 921 | } |
| 922 | |
| 923 | void Heap::DecrementDisableThreadFlip(Thread* self) { |
| 924 | // Supposed to be called by mutators. Decrement disable_thread_flip_count_ and potentially wake up |
| 925 | // the GC waiting before doing a thread flip. |
| 926 | CHECK(kUseReadBarrier); |
Hiroshi Yamauchi | 20a0be0 | 2016-02-19 15:44:06 -0800 | [diff] [blame] | 927 | self->DecrementDisableThreadFlipCount(); |
| 928 | bool is_outermost = self->GetDisableThreadFlipCount() == 0; |
| 929 | if (!is_outermost) { |
| 930 | // If this is not an outermost JNI critical exit, we don't need to decrement the global counter. |
| 931 | // The global counter is decremented only once for a thread for the outermost exit. |
| 932 | return; |
| 933 | } |
Hiroshi Yamauchi | 76f55b0 | 2015-08-21 16:10:39 -0700 | [diff] [blame] | 934 | MutexLock mu(self, *thread_flip_lock_); |
| 935 | CHECK_GT(disable_thread_flip_count_, 0U); |
| 936 | --disable_thread_flip_count_; |
Hiroshi Yamauchi | 20a0be0 | 2016-02-19 15:44:06 -0800 | [diff] [blame] | 937 | if (disable_thread_flip_count_ == 0) { |
| 938 | // Potentially notify the GC thread blocking to begin a thread flip. |
| 939 | thread_flip_cond_->Broadcast(self); |
| 940 | } |
Hiroshi Yamauchi | 76f55b0 | 2015-08-21 16:10:39 -0700 | [diff] [blame] | 941 | } |
| 942 | |
| 943 | void Heap::ThreadFlipBegin(Thread* self) { |
| 944 | // Supposed to be called by GC. Set thread_flip_running_ to be true. If disable_thread_flip_count_ |
| 945 | // > 0, block. Otherwise, go ahead. |
| 946 | CHECK(kUseReadBarrier); |
| 947 | ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip); |
| 948 | MutexLock mu(self, *thread_flip_lock_); |
Alex Light | 6683446 | 2019-04-08 16:28:29 +0000 | [diff] [blame] | 949 | thread_flip_cond_->CheckSafeToWait(self); |
Hiroshi Yamauchi | 76f55b0 | 2015-08-21 16:10:39 -0700 | [diff] [blame] | 950 | bool has_waited = false; |
| 951 | uint64_t wait_start = NanoTime(); |
| 952 | CHECK(!thread_flip_running_); |
Hiroshi Yamauchi | 20a0be0 | 2016-02-19 15:44:06 -0800 | [diff] [blame] | 953 | // Set this to true before waiting so that frequent JNI critical enter/exits won't starve |
| 954 | // GC. This like a writer preference of a reader-writer lock. |
Hiroshi Yamauchi | 76f55b0 | 2015-08-21 16:10:39 -0700 | [diff] [blame] | 955 | thread_flip_running_ = true; |
| 956 | while (disable_thread_flip_count_ > 0) { |
| 957 | has_waited = true; |
| 958 | thread_flip_cond_->Wait(self); |
| 959 | } |
| 960 | if (has_waited) { |
| 961 | uint64_t wait_time = NanoTime() - wait_start; |
| 962 | total_wait_time_ += wait_time; |
| 963 | if (wait_time > long_pause_log_threshold_) { |
| 964 | LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time); |
| 965 | } |
| 966 | } |
| 967 | } |
| 968 | |
| 969 | void Heap::ThreadFlipEnd(Thread* self) { |
| 970 | // Supposed to be called by GC. Set thread_flip_running_ to false and potentially wake up mutators |
| 971 | // waiting before doing a JNI critical. |
| 972 | CHECK(kUseReadBarrier); |
| 973 | MutexLock mu(self, *thread_flip_lock_); |
| 974 | CHECK(thread_flip_running_); |
| 975 | thread_flip_running_ = false; |
Hiroshi Yamauchi | 20a0be0 | 2016-02-19 15:44:06 -0800 | [diff] [blame] | 976 | // Potentially notify mutator threads blocking to enter a JNI critical section. |
Hiroshi Yamauchi | 76f55b0 | 2015-08-21 16:10:39 -0700 | [diff] [blame] | 977 | thread_flip_cond_->Broadcast(self); |
| 978 | } |
| 979 | |
Lokesh Gidra | acd7060 | 2019-12-05 17:46:25 -0800 | [diff] [blame] | 980 | void Heap::GrowHeapOnJankPerceptibleSwitch() { |
| 981 | MutexLock mu(Thread::Current(), process_state_update_lock_); |
| 982 | size_t orig_target_footprint = target_footprint_.load(std::memory_order_relaxed); |
| 983 | if (orig_target_footprint < min_foreground_target_footprint_) { |
| 984 | target_footprint_.compare_exchange_strong(orig_target_footprint, |
| 985 | min_foreground_target_footprint_, |
| 986 | std::memory_order_relaxed); |
| 987 | } |
| 988 | min_foreground_target_footprint_ = 0; |
| 989 | } |
| 990 | |
Mathieu Chartier | f8cb178 | 2016-03-18 18:45:41 -0700 | [diff] [blame] | 991 | void Heap::UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state) { |
| 992 | if (old_process_state != new_process_state) { |
| 993 | const bool jank_perceptible = new_process_state == kProcessStateJankPerceptible; |
Mathieu Chartier | f8cb178 | 2016-03-18 18:45:41 -0700 | [diff] [blame] | 994 | if (jank_perceptible) { |
Mathieu Chartier | a5f9de0 | 2014-02-28 16:48:42 -0800 | [diff] [blame] | 995 | // Transition back to foreground right away to prevent jank. |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 996 | RequestCollectorTransition(foreground_collector_type_, 0); |
Lokesh Gidra | acd7060 | 2019-12-05 17:46:25 -0800 | [diff] [blame] | 997 | GrowHeapOnJankPerceptibleSwitch(); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 998 | } else { |
Mathieu Chartier | a5f9de0 | 2014-02-28 16:48:42 -0800 | [diff] [blame] | 999 | // Don't delay for debug builds since we may want to stress test the GC. |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 1000 | // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have |
| 1001 | // special handling which does a homogenous space compaction once but then doesn't transition |
Hiroshi Yamauchi | 60985b7 | 2016-08-24 13:53:12 -0700 | [diff] [blame] | 1002 | // the collector. Similarly, we invoke a full compaction for kCollectorTypeCC but don't |
| 1003 | // transition the collector. |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 1004 | RequestCollectorTransition(background_collector_type_, |
Andreas Gampe | ed56b5e | 2017-10-19 12:58:19 -0700 | [diff] [blame] | 1005 | kStressCollectorTransition |
| 1006 | ? 0 |
| 1007 | : kCollectorTransitionWait); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1008 | } |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1009 | } |
Mathieu Chartier | ca2a24d | 2013-11-25 15:12:12 -0800 | [diff] [blame] | 1010 | } |
| 1011 | |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1012 | void Heap::CreateThreadPool() { |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 1013 | const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_); |
| 1014 | if (num_threads != 0) { |
Mathieu Chartier | bcd5e9d | 2013-11-13 14:33:28 -0800 | [diff] [blame] | 1015 | thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads)); |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 1016 | } |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1017 | } |
| 1018 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1019 | void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) { |
Mathieu Chartier | 00b5915 | 2014-07-25 10:13:51 -0700 | [diff] [blame] | 1020 | space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_; |
| 1021 | space::ContinuousSpace* space2 = non_moving_space_; |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1022 | // TODO: Generalize this to n bitmaps? |
Mathieu Chartier | 00b5915 | 2014-07-25 10:13:51 -0700 | [diff] [blame] | 1023 | CHECK(space1 != nullptr); |
| 1024 | CHECK(space2 != nullptr); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1025 | MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(), |
Mathieu Chartier | 2dbe627 | 2014-09-16 10:43:23 -0700 | [diff] [blame] | 1026 | (large_object_space_ != nullptr ? large_object_space_->GetLiveBitmap() : nullptr), |
| 1027 | stack); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1028 | } |
| 1029 | |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1030 | void Heap::DeleteThreadPool() { |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 1031 | thread_pool_.reset(nullptr); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1032 | } |
| 1033 | |
Mathieu Chartier | 1b54f9c | 2014-04-30 16:45:02 -0700 | [diff] [blame] | 1034 | void Heap::AddSpace(space::Space* space) { |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 1035 | CHECK(space != nullptr); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1036 | WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); |
| 1037 | if (space->IsContinuousSpace()) { |
| 1038 | DCHECK(!space->IsDiscontinuousSpace()); |
| 1039 | space::ContinuousSpace* continuous_space = space->AsContinuousSpace(); |
| 1040 | // Continuous spaces don't necessarily have bitmaps. |
Mathieu Chartier | a8e8f9c | 2014-04-09 14:51:05 -0700 | [diff] [blame] | 1041 | accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap(); |
| 1042 | accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap(); |
Mathieu Chartier | ecc8230 | 2017-02-16 10:20:12 -0800 | [diff] [blame] | 1043 | // The region space bitmap is not added since VisitObjects visits the region space objects with |
| 1044 | // special handling. |
| 1045 | if (live_bitmap != nullptr && !space->IsRegionSpace()) { |
Mathieu Chartier | 2796a16 | 2014-07-25 11:50:47 -0700 | [diff] [blame] | 1046 | CHECK(mark_bitmap != nullptr); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1047 | live_bitmap_->AddContinuousSpaceBitmap(live_bitmap); |
| 1048 | mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1049 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1050 | continuous_spaces_.push_back(continuous_space); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1051 | // Ensure that spaces remain sorted in increasing order of start address. |
| 1052 | std::sort(continuous_spaces_.begin(), continuous_spaces_.end(), |
| 1053 | [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) { |
| 1054 | return a->Begin() < b->Begin(); |
| 1055 | }); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1056 | } else { |
Mathieu Chartier | 2796a16 | 2014-07-25 11:50:47 -0700 | [diff] [blame] | 1057 | CHECK(space->IsDiscontinuousSpace()); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1058 | space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace(); |
Mathieu Chartier | bbd695c | 2014-04-16 09:48:48 -0700 | [diff] [blame] | 1059 | live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap()); |
| 1060 | mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap()); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1061 | discontinuous_spaces_.push_back(discontinuous_space); |
| 1062 | } |
| 1063 | if (space->IsAllocSpace()) { |
| 1064 | alloc_spaces_.push_back(space->AsAllocSpace()); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1065 | } |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1066 | } |
| 1067 | |
Mathieu Chartier | 1b54f9c | 2014-04-30 16:45:02 -0700 | [diff] [blame] | 1068 | void Heap::SetSpaceAsDefault(space::ContinuousSpace* continuous_space) { |
| 1069 | WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); |
| 1070 | if (continuous_space->IsDlMallocSpace()) { |
| 1071 | dlmalloc_space_ = continuous_space->AsDlMallocSpace(); |
| 1072 | } else if (continuous_space->IsRosAllocSpace()) { |
| 1073 | rosalloc_space_ = continuous_space->AsRosAllocSpace(); |
| 1074 | } |
| 1075 | } |
| 1076 | |
| 1077 | void Heap::RemoveSpace(space::Space* space) { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1078 | DCHECK(space != nullptr); |
| 1079 | WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); |
| 1080 | if (space->IsContinuousSpace()) { |
| 1081 | DCHECK(!space->IsDiscontinuousSpace()); |
| 1082 | space::ContinuousSpace* continuous_space = space->AsContinuousSpace(); |
| 1083 | // Continuous spaces don't necessarily have bitmaps. |
Mathieu Chartier | a8e8f9c | 2014-04-09 14:51:05 -0700 | [diff] [blame] | 1084 | accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap(); |
| 1085 | accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap(); |
Mathieu Chartier | ecc8230 | 2017-02-16 10:20:12 -0800 | [diff] [blame] | 1086 | if (live_bitmap != nullptr && !space->IsRegionSpace()) { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1087 | DCHECK(mark_bitmap != nullptr); |
| 1088 | live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap); |
| 1089 | mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap); |
| 1090 | } |
| 1091 | auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space); |
| 1092 | DCHECK(it != continuous_spaces_.end()); |
| 1093 | continuous_spaces_.erase(it); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1094 | } else { |
| 1095 | DCHECK(space->IsDiscontinuousSpace()); |
| 1096 | space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace(); |
Mathieu Chartier | bbd695c | 2014-04-16 09:48:48 -0700 | [diff] [blame] | 1097 | live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap()); |
| 1098 | mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap()); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1099 | auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(), |
| 1100 | discontinuous_space); |
| 1101 | DCHECK(it != discontinuous_spaces_.end()); |
| 1102 | discontinuous_spaces_.erase(it); |
| 1103 | } |
| 1104 | if (space->IsAllocSpace()) { |
| 1105 | auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace()); |
| 1106 | DCHECK(it != alloc_spaces_.end()); |
| 1107 | alloc_spaces_.erase(it); |
| 1108 | } |
| 1109 | } |
| 1110 | |
Albert Mingkun Yang | 6e0d325 | 2018-12-10 15:22:45 +0000 | [diff] [blame] | 1111 | double Heap::CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns, |
| 1112 | uint64_t current_process_cpu_time) const { |
Albert Mingkun Yang | 2d7329b | 2018-11-30 19:58:18 +0000 | [diff] [blame] | 1113 | uint64_t bytes_allocated = GetBytesAllocated(); |
Albert Mingkun Yang | 6e0d325 | 2018-12-10 15:22:45 +0000 | [diff] [blame] | 1114 | double weight = current_process_cpu_time - gc_last_process_cpu_time_ns; |
| 1115 | return weight * bytes_allocated; |
| 1116 | } |
| 1117 | |
| 1118 | void Heap::CalculatePreGcWeightedAllocatedBytes() { |
| 1119 | uint64_t current_process_cpu_time = ProcessCpuNanoTime(); |
| 1120 | pre_gc_weighted_allocated_bytes_ += |
| 1121 | CalculateGcWeightedAllocatedBytes(pre_gc_last_process_cpu_time_ns_, current_process_cpu_time); |
| 1122 | pre_gc_last_process_cpu_time_ns_ = current_process_cpu_time; |
| 1123 | } |
| 1124 | |
| 1125 | void Heap::CalculatePostGcWeightedAllocatedBytes() { |
| 1126 | uint64_t current_process_cpu_time = ProcessCpuNanoTime(); |
| 1127 | post_gc_weighted_allocated_bytes_ += |
| 1128 | CalculateGcWeightedAllocatedBytes(post_gc_last_process_cpu_time_ns_, current_process_cpu_time); |
| 1129 | post_gc_last_process_cpu_time_ns_ = current_process_cpu_time; |
Albert Mingkun Yang | 2d7329b | 2018-11-30 19:58:18 +0000 | [diff] [blame] | 1130 | } |
| 1131 | |
Albert Mingkun Yang | d6e178e | 2018-11-19 12:58:30 +0000 | [diff] [blame] | 1132 | uint64_t Heap::GetTotalGcCpuTime() { |
| 1133 | uint64_t sum = 0; |
Albert Mingkun Yang | 1c42e75 | 2018-11-19 16:10:24 +0000 | [diff] [blame] | 1134 | for (auto* collector : garbage_collectors_) { |
Albert Mingkun Yang | d6e178e | 2018-11-19 12:58:30 +0000 | [diff] [blame] | 1135 | sum += collector->GetTotalCpuTime(); |
| 1136 | } |
| 1137 | return sum; |
| 1138 | } |
| 1139 | |
Elliott Hughes | 8b788fe | 2013-04-17 15:57:01 -0700 | [diff] [blame] | 1140 | void Heap::DumpGcPerformanceInfo(std::ostream& os) { |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 1141 | // Dump cumulative timings. |
Elliott Hughes | 8b788fe | 2013-04-17 15:57:01 -0700 | [diff] [blame] | 1142 | os << "Dumping cumulative Gc timings\n"; |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 1143 | uint64_t total_duration = 0; |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1144 | // Dump cumulative loggers for each GC type. |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1145 | uint64_t total_paused_time = 0; |
Albert Mingkun Yang | 1c42e75 | 2018-11-19 16:10:24 +0000 | [diff] [blame] | 1146 | for (auto* collector : garbage_collectors_) { |
Mathieu Chartier | 104fa0c | 2014-08-07 14:26:27 -0700 | [diff] [blame] | 1147 | total_duration += collector->GetCumulativeTimings().GetTotalNs(); |
| 1148 | total_paused_time += collector->GetTotalPausedTimeNs(); |
| 1149 | collector->DumpPerformanceInfo(os); |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 1150 | } |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 1151 | if (total_duration != 0) { |
Lokesh Gidra | a65859d | 2019-04-11 12:27:38 -0700 | [diff] [blame] | 1152 | const double total_seconds = total_duration / 1.0e9; |
| 1153 | const double total_cpu_seconds = GetTotalGcCpuTime() / 1.0e9; |
Elliott Hughes | 8b788fe | 2013-04-17 15:57:01 -0700 | [diff] [blame] | 1154 | os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n"; |
| 1155 | os << "Mean GC size throughput: " |
Lokesh Gidra | a65859d | 2019-04-11 12:27:38 -0700 | [diff] [blame] | 1156 | << PrettySize(GetBytesFreedEver() / total_seconds) << "/s" |
| 1157 | << " per cpu-time: " |
| 1158 | << PrettySize(GetBytesFreedEver() / total_cpu_seconds) << "/s\n"; |
Elliott Hughes | 8b788fe | 2013-04-17 15:57:01 -0700 | [diff] [blame] | 1159 | os << "Mean GC object throughput: " |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1160 | << (GetObjectsFreedEver() / total_seconds) << " objects/s\n"; |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 1161 | } |
Mathieu Chartier | dd162fb | 2014-08-06 17:06:33 -0700 | [diff] [blame] | 1162 | uint64_t total_objects_allocated = GetObjectsAllocatedEver(); |
Mathieu Chartier | c30a725 | 2014-08-12 10:13:48 -0700 | [diff] [blame] | 1163 | os << "Total number of allocations " << total_objects_allocated << "\n"; |
Hiroshi Yamauchi | a1c9f01 | 2015-04-02 10:18:12 -0700 | [diff] [blame] | 1164 | os << "Total bytes allocated " << PrettySize(GetBytesAllocatedEver()) << "\n"; |
| 1165 | os << "Total bytes freed " << PrettySize(GetBytesFreedEver()) << "\n"; |
Mathieu Chartier | c30a725 | 2014-08-12 10:13:48 -0700 | [diff] [blame] | 1166 | os << "Free memory " << PrettySize(GetFreeMemory()) << "\n"; |
Mathieu Chartier | dd162fb | 2014-08-06 17:06:33 -0700 | [diff] [blame] | 1167 | os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n"; |
| 1168 | os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n"; |
Mathieu Chartier | c30a725 | 2014-08-12 10:13:48 -0700 | [diff] [blame] | 1169 | os << "Total memory " << PrettySize(GetTotalMemory()) << "\n"; |
| 1170 | os << "Max memory " << PrettySize(GetMaxMemory()) << "\n"; |
Mathieu Chartier | e4cab17 | 2014-08-19 18:24:04 -0700 | [diff] [blame] | 1171 | if (HasZygoteSpace()) { |
| 1172 | os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n"; |
| 1173 | } |
Elliott Hughes | 8b788fe | 2013-04-17 15:57:01 -0700 | [diff] [blame] | 1174 | os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n"; |
Hiroshi Yamauchi | a1c9f01 | 2015-04-02 10:18:12 -0700 | [diff] [blame] | 1175 | os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n"; |
| 1176 | os << "Total GC count: " << GetGcCount() << "\n"; |
| 1177 | os << "Total GC time: " << PrettyDuration(GetGcTime()) << "\n"; |
| 1178 | os << "Total blocking GC count: " << GetBlockingGcCount() << "\n"; |
| 1179 | os << "Total blocking GC time: " << PrettyDuration(GetBlockingGcTime()) << "\n"; |
| 1180 | |
| 1181 | { |
| 1182 | MutexLock mu(Thread::Current(), *gc_complete_lock_); |
| 1183 | if (gc_count_rate_histogram_.SampleSize() > 0U) { |
| 1184 | os << "Histogram of GC count per " << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: "; |
| 1185 | gc_count_rate_histogram_.DumpBins(os); |
| 1186 | os << "\n"; |
| 1187 | } |
| 1188 | if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) { |
| 1189 | os << "Histogram of blocking GC count per " |
| 1190 | << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: "; |
| 1191 | blocking_gc_count_rate_histogram_.DumpBins(os); |
| 1192 | os << "\n"; |
| 1193 | } |
| 1194 | } |
| 1195 | |
Hiroshi Yamauchi | b62f2e6 | 2016-03-23 15:51:24 -0700 | [diff] [blame] | 1196 | if (kDumpRosAllocStatsOnSigQuit && rosalloc_space_ != nullptr) { |
| 1197 | rosalloc_space_->DumpStats(os); |
| 1198 | } |
| 1199 | |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 1200 | os << "Native bytes total: " << GetNativeBytes() |
| 1201 | << " registered: " << native_bytes_registered_.load(std::memory_order_relaxed) << "\n"; |
| 1202 | |
| 1203 | os << "Total native bytes at last GC: " |
| 1204 | << old_native_bytes_allocated_.load(std::memory_order_relaxed) << "\n"; |
Mathieu Chartier | 5d2a3f7 | 2016-05-11 11:35:39 -0700 | [diff] [blame] | 1205 | |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1206 | BaseMutex::DumpAll(os); |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 1207 | } |
| 1208 | |
Hiroshi Yamauchi | 3767017 | 2015-06-10 17:20:54 -0700 | [diff] [blame] | 1209 | void Heap::ResetGcPerformanceInfo() { |
Albert Mingkun Yang | 1c42e75 | 2018-11-19 16:10:24 +0000 | [diff] [blame] | 1210 | for (auto* collector : garbage_collectors_) { |
Hiroshi Yamauchi | 3767017 | 2015-06-10 17:20:54 -0700 | [diff] [blame] | 1211 | collector->ResetMeasurements(); |
| 1212 | } |
Albert Mingkun Yang | 2d7329b | 2018-11-30 19:58:18 +0000 | [diff] [blame] | 1213 | |
| 1214 | process_cpu_start_time_ns_ = ProcessCpuNanoTime(); |
Albert Mingkun Yang | 6e0d325 | 2018-12-10 15:22:45 +0000 | [diff] [blame] | 1215 | |
| 1216 | pre_gc_last_process_cpu_time_ns_ = process_cpu_start_time_ns_; |
| 1217 | pre_gc_weighted_allocated_bytes_ = 0u; |
| 1218 | |
| 1219 | post_gc_last_process_cpu_time_ns_ = process_cpu_start_time_ns_; |
| 1220 | post_gc_weighted_allocated_bytes_ = 0u; |
Albert Mingkun Yang | 2d7329b | 2018-11-30 19:58:18 +0000 | [diff] [blame] | 1221 | |
Hans Boehm | 4c6d765 | 2019-11-01 09:23:19 -0700 | [diff] [blame] | 1222 | total_bytes_freed_ever_.store(0); |
| 1223 | total_objects_freed_ever_.store(0); |
Hiroshi Yamauchi | 3767017 | 2015-06-10 17:20:54 -0700 | [diff] [blame] | 1224 | total_wait_time_ = 0; |
| 1225 | blocking_gc_count_ = 0; |
| 1226 | blocking_gc_time_ = 0; |
| 1227 | gc_count_last_window_ = 0; |
| 1228 | blocking_gc_count_last_window_ = 0; |
| 1229 | last_update_time_gc_count_rate_histograms_ = // Round down by the window duration. |
| 1230 | (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration; |
| 1231 | { |
| 1232 | MutexLock mu(Thread::Current(), *gc_complete_lock_); |
| 1233 | gc_count_rate_histogram_.Reset(); |
| 1234 | blocking_gc_count_rate_histogram_.Reset(); |
| 1235 | } |
| 1236 | } |
| 1237 | |
Hiroshi Yamauchi | a1c9f01 | 2015-04-02 10:18:12 -0700 | [diff] [blame] | 1238 | uint64_t Heap::GetGcCount() const { |
| 1239 | uint64_t gc_count = 0U; |
Albert Mingkun Yang | 1c42e75 | 2018-11-19 16:10:24 +0000 | [diff] [blame] | 1240 | for (auto* collector : garbage_collectors_) { |
Hiroshi Yamauchi | a1c9f01 | 2015-04-02 10:18:12 -0700 | [diff] [blame] | 1241 | gc_count += collector->GetCumulativeTimings().GetIterations(); |
| 1242 | } |
| 1243 | return gc_count; |
| 1244 | } |
| 1245 | |
| 1246 | uint64_t Heap::GetGcTime() const { |
| 1247 | uint64_t gc_time = 0U; |
Albert Mingkun Yang | 1c42e75 | 2018-11-19 16:10:24 +0000 | [diff] [blame] | 1248 | for (auto* collector : garbage_collectors_) { |
Hiroshi Yamauchi | a1c9f01 | 2015-04-02 10:18:12 -0700 | [diff] [blame] | 1249 | gc_time += collector->GetCumulativeTimings().GetTotalNs(); |
| 1250 | } |
| 1251 | return gc_time; |
| 1252 | } |
| 1253 | |
| 1254 | uint64_t Heap::GetBlockingGcCount() const { |
| 1255 | return blocking_gc_count_; |
| 1256 | } |
| 1257 | |
| 1258 | uint64_t Heap::GetBlockingGcTime() const { |
| 1259 | return blocking_gc_time_; |
| 1260 | } |
| 1261 | |
| 1262 | void Heap::DumpGcCountRateHistogram(std::ostream& os) const { |
| 1263 | MutexLock mu(Thread::Current(), *gc_complete_lock_); |
| 1264 | if (gc_count_rate_histogram_.SampleSize() > 0U) { |
| 1265 | gc_count_rate_histogram_.DumpBins(os); |
| 1266 | } |
| 1267 | } |
| 1268 | |
| 1269 | void Heap::DumpBlockingGcCountRateHistogram(std::ostream& os) const { |
| 1270 | MutexLock mu(Thread::Current(), *gc_complete_lock_); |
| 1271 | if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) { |
| 1272 | blocking_gc_count_rate_histogram_.DumpBins(os); |
| 1273 | } |
| 1274 | } |
| 1275 | |
Andreas Gampe | 27fa96c | 2016-10-07 15:05:24 -0700 | [diff] [blame] | 1276 | ALWAYS_INLINE |
| 1277 | static inline AllocationListener* GetAndOverwriteAllocationListener( |
| 1278 | Atomic<AllocationListener*>* storage, AllocationListener* new_value) { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 1279 | return storage->exchange(new_value); |
Andreas Gampe | 27fa96c | 2016-10-07 15:05:24 -0700 | [diff] [blame] | 1280 | } |
| 1281 | |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1282 | Heap::~Heap() { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1283 | VLOG(heap) << "Starting ~Heap()"; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1284 | STLDeleteElements(&garbage_collectors_); |
| 1285 | // If we don't reset then the mark stack complains in its destructor. |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1286 | allocation_stack_->Reset(); |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 1287 | allocation_records_.reset(); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1288 | live_stack_->Reset(); |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 1289 | STLDeleteValues(&mod_union_tables_); |
Mathieu Chartier | 0767c9a | 2014-03-26 12:53:19 -0700 | [diff] [blame] | 1290 | STLDeleteValues(&remembered_sets_); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1291 | STLDeleteElements(&continuous_spaces_); |
| 1292 | STLDeleteElements(&discontinuous_spaces_); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 1293 | delete gc_complete_lock_; |
Andreas Gampe | 6be4f2a | 2015-11-10 13:34:17 -0800 | [diff] [blame] | 1294 | delete thread_flip_lock_; |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 1295 | delete pending_task_lock_; |
Mathieu Chartier | 3100080 | 2015-06-14 14:14:37 -0700 | [diff] [blame] | 1296 | delete backtrace_lock_; |
Hans Boehm | fb8b4e2 | 2018-09-05 16:45:42 -0700 | [diff] [blame] | 1297 | uint64_t unique_count = unique_backtrace_count_.load(); |
| 1298 | uint64_t seen_count = seen_backtrace_count_.load(); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 1299 | if (unique_count != 0 || seen_count != 0) { |
| 1300 | LOG(INFO) << "gc stress unique=" << unique_count << " total=" << (unique_count + seen_count); |
Mathieu Chartier | 3100080 | 2015-06-14 14:14:37 -0700 | [diff] [blame] | 1301 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1302 | VLOG(heap) << "Finished ~Heap()"; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1303 | } |
| 1304 | |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1305 | |
| 1306 | space::ContinuousSpace* Heap::FindContinuousSpaceFromAddress(const mirror::Object* addr) const { |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 1307 | for (const auto& space : continuous_spaces_) { |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1308 | if (space->Contains(addr)) { |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 1309 | return space; |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 1310 | } |
| 1311 | } |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 1312 | return nullptr; |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 1313 | } |
| 1314 | |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1315 | space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(ObjPtr<mirror::Object> obj, |
| 1316 | bool fail_ok) const { |
| 1317 | space::ContinuousSpace* space = FindContinuousSpaceFromAddress(obj.Ptr()); |
| 1318 | if (space != nullptr) { |
| 1319 | return space; |
| 1320 | } |
| 1321 | if (!fail_ok) { |
| 1322 | LOG(FATAL) << "object " << obj << " not inside any spaces!"; |
| 1323 | } |
| 1324 | return nullptr; |
| 1325 | } |
| 1326 | |
| 1327 | space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object> obj, |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1328 | bool fail_ok) const { |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 1329 | for (const auto& space : discontinuous_spaces_) { |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1330 | if (space->Contains(obj.Ptr())) { |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 1331 | return space; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1332 | } |
| 1333 | } |
| 1334 | if (!fail_ok) { |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1335 | LOG(FATAL) << "object " << obj << " not inside any spaces!"; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1336 | } |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 1337 | return nullptr; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1338 | } |
| 1339 | |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1340 | space::Space* Heap::FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1341 | space::Space* result = FindContinuousSpaceFromObject(obj, true); |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 1342 | if (result != nullptr) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1343 | return result; |
| 1344 | } |
Ian Rogers | 6a3c1fc | 2014-10-31 00:33:20 -0700 | [diff] [blame] | 1345 | return FindDiscontinuousSpaceFromObject(obj, fail_ok); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1346 | } |
| 1347 | |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1348 | space::Space* Heap::FindSpaceFromAddress(const void* addr) const { |
| 1349 | for (const auto& space : continuous_spaces_) { |
| 1350 | if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) { |
| 1351 | return space; |
| 1352 | } |
| 1353 | } |
| 1354 | for (const auto& space : discontinuous_spaces_) { |
| 1355 | if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) { |
| 1356 | return space; |
| 1357 | } |
| 1358 | } |
| 1359 | return nullptr; |
| 1360 | } |
| 1361 | |
Roland Levillain | 5fcf1ea | 2018-10-30 11:58:08 +0000 | [diff] [blame] | 1362 | std::string Heap::DumpSpaceNameFromAddress(const void* addr) const { |
| 1363 | space::Space* space = FindSpaceFromAddress(addr); |
| 1364 | return (space != nullptr) ? space->GetName() : "no space"; |
| 1365 | } |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1366 | |
Hiroshi Yamauchi | 654dd48 | 2014-07-09 12:54:32 -0700 | [diff] [blame] | 1367 | void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) { |
Mathieu Chartier | e8f3f03 | 2016-04-04 16:49:44 -0700 | [diff] [blame] | 1368 | // If we're in a stack overflow, do not create a new exception. It would require running the |
| 1369 | // constructor, which will of course still be in a stack overflow. |
| 1370 | if (self->IsHandlingStackOverflow()) { |
Roland Levillain | 7b0e844 | 2018-04-11 18:27:47 +0100 | [diff] [blame] | 1371 | self->SetException( |
| 1372 | Runtime::Current()->GetPreAllocatedOutOfMemoryErrorWhenHandlingStackOverflow()); |
Mathieu Chartier | e8f3f03 | 2016-04-04 16:49:44 -0700 | [diff] [blame] | 1373 | return; |
| 1374 | } |
| 1375 | |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 1376 | std::ostringstream oss; |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 1377 | size_t total_bytes_free = GetFreeMemory(); |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 1378 | oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free |
Mathieu Chartier | a9033d7 | 2016-12-01 17:41:17 -0800 | [diff] [blame] | 1379 | << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM," |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 1380 | << " target footprint " << target_footprint_.load(std::memory_order_relaxed) |
| 1381 | << ", growth limit " |
Mathieu Chartier | a9033d7 | 2016-12-01 17:41:17 -0800 | [diff] [blame] | 1382 | << growth_limit_; |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 1383 | // If the allocation failed due to fragmentation, print out the largest continuous allocation. |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 1384 | if (total_bytes_free >= byte_count) { |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 1385 | space::AllocSpace* space = nullptr; |
Hiroshi Yamauchi | 654dd48 | 2014-07-09 12:54:32 -0700 | [diff] [blame] | 1386 | if (allocator_type == kAllocatorTypeNonMoving) { |
| 1387 | space = non_moving_space_; |
| 1388 | } else if (allocator_type == kAllocatorTypeRosAlloc || |
| 1389 | allocator_type == kAllocatorTypeDlMalloc) { |
| 1390 | space = main_space_; |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 1391 | } else if (allocator_type == kAllocatorTypeBumpPointer || |
| 1392 | allocator_type == kAllocatorTypeTLAB) { |
| 1393 | space = bump_pointer_space_; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 1394 | } else if (allocator_type == kAllocatorTypeRegion || |
| 1395 | allocator_type == kAllocatorTypeRegionTLAB) { |
| 1396 | space = region_space_; |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 1397 | } |
Hiroshi Yamauchi | 654dd48 | 2014-07-09 12:54:32 -0700 | [diff] [blame] | 1398 | if (space != nullptr) { |
| 1399 | space->LogFragmentationAllocFailure(oss, byte_count); |
| 1400 | } |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 1401 | } |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 1402 | self->ThrowOutOfMemoryError(oss.str().c_str()); |
| 1403 | } |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 1404 | |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 1405 | void Heap::DoPendingCollectorTransition() { |
| 1406 | CollectorType desired_collector_type = desired_collector_type_; |
Mathieu Chartier | b272855 | 2014-09-08 20:08:41 +0000 | [diff] [blame] | 1407 | // Launch homogeneous space compaction if it is desired. |
| 1408 | if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) { |
| 1409 | if (!CareAboutPauseTimes()) { |
| 1410 | PerformHomogeneousSpaceCompact(); |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 1411 | } else { |
| 1412 | VLOG(gc) << "Homogeneous compaction ignored due to jank perceptible process state"; |
Mathieu Chartier | b272855 | 2014-09-08 20:08:41 +0000 | [diff] [blame] | 1413 | } |
Hiroshi Yamauchi | 60985b7 | 2016-08-24 13:53:12 -0700 | [diff] [blame] | 1414 | } else if (desired_collector_type == kCollectorTypeCCBackground) { |
| 1415 | DCHECK(kUseReadBarrier); |
| 1416 | if (!CareAboutPauseTimes()) { |
| 1417 | // Invoke CC full compaction. |
| 1418 | CollectGarbageInternal(collector::kGcTypeFull, |
| 1419 | kGcCauseCollectorTransition, |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 1420 | /*clear_soft_references=*/false); |
Hiroshi Yamauchi | 60985b7 | 2016-08-24 13:53:12 -0700 | [diff] [blame] | 1421 | } else { |
| 1422 | VLOG(gc) << "CC background compaction ignored due to jank perceptible process state"; |
| 1423 | } |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 1424 | } else { |
Mathieu Chartier | b52df53 | 2019-04-09 14:10:59 -0700 | [diff] [blame] | 1425 | CHECK_EQ(desired_collector_type, collector_type_) << "Unsupported collector transition"; |
Mathieu Chartier | b272855 | 2014-09-08 20:08:41 +0000 | [diff] [blame] | 1426 | } |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 1427 | } |
| 1428 | |
| 1429 | void Heap::Trim(Thread* self) { |
Mathieu Chartier | 8d44725 | 2015-10-26 10:21:14 -0700 | [diff] [blame] | 1430 | Runtime* const runtime = Runtime::Current(); |
Mathieu Chartier | 440e4ce | 2014-03-31 16:36:35 -0700 | [diff] [blame] | 1431 | if (!CareAboutPauseTimes()) { |
| 1432 | // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care |
| 1433 | // about pauses. |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 1434 | ScopedTrace trace("Deflating monitors"); |
Hiroshi Yamauchi | 3b1d1b7 | 2016-10-12 11:53:57 -0700 | [diff] [blame] | 1435 | // Avoid race conditions on the lock word for CC. |
| 1436 | ScopedGCCriticalSection gcs(self, kGcCauseTrim, kCollectorTypeHeapTrim); |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 1437 | ScopedSuspendAll ssa(__FUNCTION__); |
| 1438 | uint64_t start_time = NanoTime(); |
| 1439 | size_t count = runtime->GetMonitorList()->DeflateMonitors(); |
| 1440 | VLOG(heap) << "Deflating " << count << " monitors took " |
| 1441 | << PrettyDuration(NanoTime() - start_time); |
Mathieu Chartier | 440e4ce | 2014-03-31 16:36:35 -0700 | [diff] [blame] | 1442 | } |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 1443 | TrimIndirectReferenceTables(self); |
| 1444 | TrimSpaces(self); |
Mathieu Chartier | 8d44725 | 2015-10-26 10:21:14 -0700 | [diff] [blame] | 1445 | // Trim arenas that may have been used by JIT or verifier. |
Mathieu Chartier | 8d44725 | 2015-10-26 10:21:14 -0700 | [diff] [blame] | 1446 | runtime->GetArenaPool()->TrimMaps(); |
Mathieu Chartier | a5f9de0 | 2014-02-28 16:48:42 -0800 | [diff] [blame] | 1447 | } |
| 1448 | |
Mathieu Chartier | 91c2f0c | 2014-11-26 11:21:15 -0800 | [diff] [blame] | 1449 | class TrimIndirectReferenceTableClosure : public Closure { |
| 1450 | public: |
| 1451 | explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) { |
| 1452 | } |
Roland Levillain | f73caca | 2018-08-24 17:19:07 +0100 | [diff] [blame] | 1453 | void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS { |
Ian Rogers | 55256cb | 2017-12-21 17:07:11 -0800 | [diff] [blame] | 1454 | thread->GetJniEnv()->TrimLocals(); |
Lei Li | dd9943d | 2015-02-02 14:24:44 +0800 | [diff] [blame] | 1455 | // If thread is a running mutator, then act on behalf of the trim thread. |
| 1456 | // See the code in ThreadList::RunCheckpoint. |
Mathieu Chartier | 10d2508 | 2015-10-28 18:36:09 -0700 | [diff] [blame] | 1457 | barrier_->Pass(Thread::Current()); |
Mathieu Chartier | 91c2f0c | 2014-11-26 11:21:15 -0800 | [diff] [blame] | 1458 | } |
| 1459 | |
| 1460 | private: |
| 1461 | Barrier* const barrier_; |
| 1462 | }; |
| 1463 | |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 1464 | void Heap::TrimIndirectReferenceTables(Thread* self) { |
| 1465 | ScopedObjectAccess soa(self); |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 1466 | ScopedTrace trace(__PRETTY_FUNCTION__); |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 1467 | JavaVMExt* vm = soa.Vm(); |
| 1468 | // Trim globals indirect reference table. |
| 1469 | vm->TrimGlobals(); |
| 1470 | // Trim locals indirect reference tables. |
| 1471 | Barrier barrier(0); |
| 1472 | TrimIndirectReferenceTableClosure closure(&barrier); |
| 1473 | ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); |
| 1474 | size_t barrier_count = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure); |
Lei Li | dd9943d | 2015-02-02 14:24:44 +0800 | [diff] [blame] | 1475 | if (barrier_count != 0) { |
| 1476 | barrier.Increment(self, barrier_count); |
| 1477 | } |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 1478 | } |
Mathieu Chartier | 91c2f0c | 2014-11-26 11:21:15 -0800 | [diff] [blame] | 1479 | |
Mathieu Chartier | aa51682 | 2015-10-02 15:53:37 -0700 | [diff] [blame] | 1480 | void Heap::StartGC(Thread* self, GcCause cause, CollectorType collector_type) { |
Mathieu Chartier | b93d5b1 | 2017-05-19 13:05:06 -0700 | [diff] [blame] | 1481 | // Need to do this before acquiring the locks since we don't want to get suspended while |
| 1482 | // holding any locks. |
| 1483 | ScopedThreadStateChange tsc(self, kWaitingForGcToComplete); |
Mathieu Chartier | aa51682 | 2015-10-02 15:53:37 -0700 | [diff] [blame] | 1484 | MutexLock mu(self, *gc_complete_lock_); |
| 1485 | // Ensure there is only one GC at a time. |
| 1486 | WaitForGcToCompleteLocked(cause, self); |
| 1487 | collector_type_running_ = collector_type; |
Mathieu Chartier | 40112dd | 2017-06-26 17:49:09 -0700 | [diff] [blame] | 1488 | last_gc_cause_ = cause; |
Mathieu Chartier | 183009a | 2017-02-16 21:19:28 -0800 | [diff] [blame] | 1489 | thread_running_gc_ = self; |
Mathieu Chartier | aa51682 | 2015-10-02 15:53:37 -0700 | [diff] [blame] | 1490 | } |
| 1491 | |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 1492 | void Heap::TrimSpaces(Thread* self) { |
Mathieu Chartier | b93d5b1 | 2017-05-19 13:05:06 -0700 | [diff] [blame] | 1493 | // Pretend we are doing a GC to prevent background compaction from deleting the space we are |
| 1494 | // trimming. |
| 1495 | StartGC(self, kGcCauseTrim, kCollectorTypeHeapTrim); |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 1496 | ScopedTrace trace(__PRETTY_FUNCTION__); |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 1497 | const uint64_t start_ns = NanoTime(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1498 | // Trim the managed spaces. |
| 1499 | uint64_t total_alloc_space_allocated = 0; |
| 1500 | uint64_t total_alloc_space_size = 0; |
| 1501 | uint64_t managed_reclaimed = 0; |
Mathieu Chartier | a9d82fe | 2016-01-25 20:06:11 -0800 | [diff] [blame] | 1502 | { |
| 1503 | ScopedObjectAccess soa(self); |
| 1504 | for (const auto& space : continuous_spaces_) { |
| 1505 | if (space->IsMallocSpace()) { |
| 1506 | gc::space::MallocSpace* malloc_space = space->AsMallocSpace(); |
| 1507 | if (malloc_space->IsRosAllocSpace() || !CareAboutPauseTimes()) { |
| 1508 | // Don't trim dlmalloc spaces if we care about pauses since this can hold the space lock |
| 1509 | // for a long period of time. |
| 1510 | managed_reclaimed += malloc_space->Trim(); |
| 1511 | } |
| 1512 | total_alloc_space_size += malloc_space->Size(); |
Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 1513 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1514 | } |
| 1515 | } |
Mathieu Chartier | 2dbe627 | 2014-09-16 10:43:23 -0700 | [diff] [blame] | 1516 | total_alloc_space_allocated = GetBytesAllocated(); |
| 1517 | if (large_object_space_ != nullptr) { |
| 1518 | total_alloc_space_allocated -= large_object_space_->GetBytesAllocated(); |
| 1519 | } |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 1520 | if (bump_pointer_space_ != nullptr) { |
| 1521 | total_alloc_space_allocated -= bump_pointer_space_->Size(); |
| 1522 | } |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 1523 | if (region_space_ != nullptr) { |
| 1524 | total_alloc_space_allocated -= region_space_->GetBytesAllocated(); |
| 1525 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1526 | const float managed_utilization = static_cast<float>(total_alloc_space_allocated) / |
| 1527 | static_cast<float>(total_alloc_space_size); |
| 1528 | uint64_t gc_heap_end_ns = NanoTime(); |
Mathieu Chartier | d5a89ee | 2014-01-31 09:55:13 -0800 | [diff] [blame] | 1529 | // We never move things in the native heap, so we can finish the GC at this point. |
| 1530 | FinishGC(self, collector::kGcTypeNone); |
Ian Rogers | 872dd82 | 2014-10-30 11:19:14 -0700 | [diff] [blame] | 1531 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1532 | VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns) |
Dimitry Ivanov | e6465bc | 2015-12-14 18:55:02 -0800 | [diff] [blame] | 1533 | << ", advised=" << PrettySize(managed_reclaimed) << ") heap. Managed heap utilization of " |
| 1534 | << static_cast<int>(100 * managed_utilization) << "%."; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1535 | } |
| 1536 | |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1537 | bool Heap::IsValidObjectAddress(const void* addr) const { |
| 1538 | if (addr == nullptr) { |
Elliott Hughes | 88c5c35 | 2012-03-15 18:49:48 -0700 | [diff] [blame] | 1539 | return true; |
| 1540 | } |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1541 | return IsAligned<kObjectAlignment>(addr) && FindSpaceFromAddress(addr) != nullptr; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1542 | } |
| 1543 | |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1544 | bool Heap::IsNonDiscontinuousSpaceHeapAddress(const void* addr) const { |
| 1545 | return FindContinuousSpaceFromAddress(reinterpret_cast<const mirror::Object*>(addr)) != nullptr; |
Mathieu Chartier | d68ac70 | 2014-02-11 14:50:51 -0800 | [diff] [blame] | 1546 | } |
| 1547 | |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1548 | bool Heap::IsLiveObjectLocked(ObjPtr<mirror::Object> obj, |
| 1549 | bool search_allocation_stack, |
| 1550 | bool search_live_stack, |
| 1551 | bool sorted) { |
| 1552 | if (UNLIKELY(!IsAligned<kObjectAlignment>(obj.Ptr()))) { |
Mathieu Chartier | 15d3402 | 2014-02-26 17:16:38 -0800 | [diff] [blame] | 1553 | return false; |
| 1554 | } |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1555 | if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj.Ptr())) { |
Mathieu Chartier | 4e30541 | 2014-02-19 10:54:44 -0800 | [diff] [blame] | 1556 | mirror::Class* klass = obj->GetClass<kVerifyNone>(); |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 1557 | if (obj == klass) { |
Mathieu Chartier | 9be9a7a | 2014-01-24 14:07:33 -0800 | [diff] [blame] | 1558 | // This case happens for java.lang.Class. |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 1559 | return true; |
| 1560 | } |
| 1561 | return VerifyClassClass(klass) && IsLiveObjectLocked(klass); |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1562 | } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj.Ptr())) { |
Mathieu Chartier | 4e30541 | 2014-02-19 10:54:44 -0800 | [diff] [blame] | 1563 | // If we are in the allocated region of the temp space, then we are probably live (e.g. during |
| 1564 | // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained. |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1565 | return temp_space_->Contains(obj.Ptr()); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1566 | } |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1567 | if (region_space_ != nullptr && region_space_->HasAddress(obj.Ptr())) { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 1568 | return true; |
| 1569 | } |
Mathieu Chartier | f082d3c | 2013-07-29 17:04:07 -0700 | [diff] [blame] | 1570 | space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true); |
Mathieu Chartier | bbd695c | 2014-04-16 09:48:48 -0700 | [diff] [blame] | 1571 | space::DiscontinuousSpace* d_space = nullptr; |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 1572 | if (c_space != nullptr) { |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1573 | if (c_space->GetLiveBitmap()->Test(obj.Ptr())) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1574 | return true; |
| 1575 | } |
| 1576 | } else { |
Mathieu Chartier | f082d3c | 2013-07-29 17:04:07 -0700 | [diff] [blame] | 1577 | d_space = FindDiscontinuousSpaceFromObject(obj, true); |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 1578 | if (d_space != nullptr) { |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1579 | if (d_space->GetLiveBitmap()->Test(obj.Ptr())) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1580 | return true; |
| 1581 | } |
| 1582 | } |
| 1583 | } |
Mathieu Chartier | f082d3c | 2013-07-29 17:04:07 -0700 | [diff] [blame] | 1584 | // This is covering the allocation/live stack swapping that is done without mutators suspended. |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 1585 | for (size_t i = 0; i < (sorted ? 1 : 5); ++i) { |
| 1586 | if (i > 0) { |
| 1587 | NanoSleep(MsToNs(10)); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1588 | } |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 1589 | if (search_allocation_stack) { |
| 1590 | if (sorted) { |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1591 | if (allocation_stack_->ContainsSorted(obj.Ptr())) { |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 1592 | return true; |
| 1593 | } |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1594 | } else if (allocation_stack_->Contains(obj.Ptr())) { |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 1595 | return true; |
| 1596 | } |
| 1597 | } |
| 1598 | |
| 1599 | if (search_live_stack) { |
| 1600 | if (sorted) { |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1601 | if (live_stack_->ContainsSorted(obj.Ptr())) { |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 1602 | return true; |
| 1603 | } |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1604 | } else if (live_stack_->Contains(obj.Ptr())) { |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 1605 | return true; |
| 1606 | } |
| 1607 | } |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1608 | } |
Mathieu Chartier | f082d3c | 2013-07-29 17:04:07 -0700 | [diff] [blame] | 1609 | // We need to check the bitmaps again since there is a race where we mark something as live and |
| 1610 | // then clear the stack containing it. |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 1611 | if (c_space != nullptr) { |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1612 | if (c_space->GetLiveBitmap()->Test(obj.Ptr())) { |
Mathieu Chartier | f082d3c | 2013-07-29 17:04:07 -0700 | [diff] [blame] | 1613 | return true; |
| 1614 | } |
| 1615 | } else { |
| 1616 | d_space = FindDiscontinuousSpaceFromObject(obj, true); |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1617 | if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj.Ptr())) { |
Mathieu Chartier | f082d3c | 2013-07-29 17:04:07 -0700 | [diff] [blame] | 1618 | return true; |
| 1619 | } |
| 1620 | } |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1621 | return false; |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 1622 | } |
| 1623 | |
Mathieu Chartier | 4c13a3f | 2014-07-14 14:57:16 -0700 | [diff] [blame] | 1624 | std::string Heap::DumpSpaces() const { |
| 1625 | std::ostringstream oss; |
| 1626 | DumpSpaces(oss); |
| 1627 | return oss.str(); |
| 1628 | } |
| 1629 | |
| 1630 | void Heap::DumpSpaces(std::ostream& stream) const { |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 1631 | for (const auto& space : continuous_spaces_) { |
Mathieu Chartier | a8e8f9c | 2014-04-09 14:51:05 -0700 | [diff] [blame] | 1632 | accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); |
| 1633 | accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1634 | stream << space << " " << *space << "\n"; |
| 1635 | if (live_bitmap != nullptr) { |
| 1636 | stream << live_bitmap << " " << *live_bitmap << "\n"; |
| 1637 | } |
| 1638 | if (mark_bitmap != nullptr) { |
| 1639 | stream << mark_bitmap << " " << *mark_bitmap << "\n"; |
| 1640 | } |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 1641 | } |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 1642 | for (const auto& space : discontinuous_spaces_) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1643 | stream << space << " " << *space << "\n"; |
Mathieu Chartier | 128c52c | 2012-10-16 14:12:41 -0700 | [diff] [blame] | 1644 | } |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 1645 | } |
| 1646 | |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1647 | void Heap::VerifyObjectBody(ObjPtr<mirror::Object> obj) { |
Stephen Hines | 22c6a81 | 2014-07-16 11:03:43 -0700 | [diff] [blame] | 1648 | if (verify_object_mode_ == kVerifyObjectModeDisabled) { |
| 1649 | return; |
| 1650 | } |
| 1651 | |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 1652 | // Ignore early dawn of the universe verifications. |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 1653 | if (UNLIKELY(num_bytes_allocated_.load(std::memory_order_relaxed) < 10 * KB)) { |
Ian Rogers | 62d6c77 | 2013-02-27 08:32:07 -0800 | [diff] [blame] | 1654 | return; |
| 1655 | } |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1656 | CHECK_ALIGNED(obj.Ptr(), kObjectAlignment) << "Object isn't aligned"; |
Ian Rogers | b0fa5dc | 2014-04-28 16:47:08 -0700 | [diff] [blame] | 1657 | mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset()); |
Mathieu Chartier | 4e30541 | 2014-02-19 10:54:44 -0800 | [diff] [blame] | 1658 | CHECK(c != nullptr) << "Null class in object " << obj; |
Roland Levillain | 14d9057 | 2015-07-16 10:52:26 +0100 | [diff] [blame] | 1659 | CHECK_ALIGNED(c, kObjectAlignment) << "Class " << c << " not aligned in object " << obj; |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 1660 | CHECK(VerifyClassClass(c)); |
Mathieu Chartier | 0325e62 | 2012-09-05 14:22:51 -0700 | [diff] [blame] | 1661 | |
Mathieu Chartier | 4e30541 | 2014-02-19 10:54:44 -0800 | [diff] [blame] | 1662 | if (verify_object_mode_ > kVerifyObjectModeFast) { |
| 1663 | // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock. |
Mathieu Chartier | 4c13a3f | 2014-07-14 14:57:16 -0700 | [diff] [blame] | 1664 | CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces(); |
Mathieu Chartier | dcf8d72 | 2012-08-02 14:55:54 -0700 | [diff] [blame] | 1665 | } |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 1666 | } |
| 1667 | |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 1668 | void Heap::VerifyHeap() { |
Ian Rogers | 50b35e2 | 2012-10-04 10:09:15 -0700 | [diff] [blame] | 1669 | ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); |
Andreas Gampe | 0c18338 | 2017-07-13 22:26:24 -0700 | [diff] [blame] | 1670 | auto visitor = [&](mirror::Object* obj) { |
| 1671 | VerifyObjectBody(obj); |
| 1672 | }; |
| 1673 | // Technically we need the mutator lock here to call Visit. However, VerifyObjectBody is already |
| 1674 | // NO_THREAD_SAFETY_ANALYSIS. |
| 1675 | auto no_thread_safety_analysis = [&]() NO_THREAD_SAFETY_ANALYSIS { |
| 1676 | GetLiveBitmap()->Visit(visitor); |
| 1677 | }; |
| 1678 | no_thread_safety_analysis(); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 1679 | } |
| 1680 | |
Mathieu Chartier | e76e70f | 2014-05-02 16:35:37 -0700 | [diff] [blame] | 1681 | void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) { |
Mathieu Chartier | 601276a | 2014-03-20 15:12:30 -0700 | [diff] [blame] | 1682 | // Use signed comparison since freed bytes can be negative when background compaction foreground |
Hans Boehm | a253c2d | 2019-05-13 12:38:54 -0700 | [diff] [blame] | 1683 | // transitions occurs. This is typically due to objects moving from a bump pointer space to a |
| 1684 | // free list backed space, which may increase memory footprint due to padding and binning. |
Hans Boehm | fb8b4e2 | 2018-09-05 16:45:42 -0700 | [diff] [blame] | 1685 | RACING_DCHECK_LE(freed_bytes, |
| 1686 | static_cast<int64_t>(num_bytes_allocated_.load(std::memory_order_relaxed))); |
Mathieu Chartier | e76e70f | 2014-05-02 16:35:37 -0700 | [diff] [blame] | 1687 | // Note: This relies on 2s complement for handling negative freed_bytes. |
Hans Boehm | fb8b4e2 | 2018-09-05 16:45:42 -0700 | [diff] [blame] | 1688 | num_bytes_allocated_.fetch_sub(static_cast<ssize_t>(freed_bytes), std::memory_order_relaxed); |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 1689 | if (Runtime::Current()->HasStatsEnabled()) { |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 1690 | RuntimeStats* thread_stats = Thread::Current()->GetStats(); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1691 | thread_stats->freed_objects += freed_objects; |
Elliott Hughes | 307f75d | 2011-10-12 18:04:40 -0700 | [diff] [blame] | 1692 | thread_stats->freed_bytes += freed_bytes; |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 1693 | // TODO: Do this concurrently. |
| 1694 | RuntimeStats* global_stats = Runtime::Current()->GetStats(); |
| 1695 | global_stats->freed_objects += freed_objects; |
| 1696 | global_stats->freed_bytes += freed_bytes; |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 1697 | } |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 1698 | } |
| 1699 | |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 1700 | void Heap::RecordFreeRevoke() { |
| 1701 | // Subtract num_bytes_freed_revoke_ from num_bytes_allocated_ to cancel out the |
Roland Levillain | ef01222 | 2017-06-21 16:28:06 +0100 | [diff] [blame] | 1702 | // ahead-of-time, bulk counting of bytes allocated in rosalloc thread-local buffers. |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 1703 | // If there's a concurrent revoke, ok to not necessarily reset num_bytes_freed_revoke_ |
| 1704 | // all the way to zero exactly as the remainder will be subtracted at the next GC. |
Hans Boehm | fb8b4e2 | 2018-09-05 16:45:42 -0700 | [diff] [blame] | 1705 | size_t bytes_freed = num_bytes_freed_revoke_.load(std::memory_order_relaxed); |
| 1706 | CHECK_GE(num_bytes_freed_revoke_.fetch_sub(bytes_freed, std::memory_order_relaxed), |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 1707 | bytes_freed) << "num_bytes_freed_revoke_ underflow"; |
Hans Boehm | fb8b4e2 | 2018-09-05 16:45:42 -0700 | [diff] [blame] | 1708 | CHECK_GE(num_bytes_allocated_.fetch_sub(bytes_freed, std::memory_order_relaxed), |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 1709 | bytes_freed) << "num_bytes_allocated_ underflow"; |
| 1710 | GetCurrentGcIteration()->SetFreedRevoke(bytes_freed); |
| 1711 | } |
| 1712 | |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 1713 | space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const { |
Mathieu Chartier | a9d82fe | 2016-01-25 20:06:11 -0800 | [diff] [blame] | 1714 | if (rosalloc_space_ != nullptr && rosalloc_space_->GetRosAlloc() == rosalloc) { |
| 1715 | return rosalloc_space_; |
| 1716 | } |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 1717 | for (const auto& space : continuous_spaces_) { |
| 1718 | if (space->AsContinuousSpace()->IsRosAllocSpace()) { |
| 1719 | if (space->AsContinuousSpace()->AsRosAllocSpace()->GetRosAlloc() == rosalloc) { |
| 1720 | return space->AsContinuousSpace()->AsRosAllocSpace(); |
| 1721 | } |
| 1722 | } |
| 1723 | } |
| 1724 | return nullptr; |
| 1725 | } |
| 1726 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1727 | static inline bool EntrypointsInstrumented() REQUIRES_SHARED(Locks::mutator_lock_) { |
Mathieu Chartier | eebc3af | 2016-02-29 18:13:38 -0800 | [diff] [blame] | 1728 | instrumentation::Instrumentation* const instrumentation = |
| 1729 | Runtime::Current()->GetInstrumentation(); |
| 1730 | return instrumentation != nullptr && instrumentation->AllocEntrypointsInstrumented(); |
| 1731 | } |
| 1732 | |
Mathieu Chartier | a4f6af9 | 2015-08-11 17:35:25 -0700 | [diff] [blame] | 1733 | mirror::Object* Heap::AllocateInternalWithGc(Thread* self, |
| 1734 | AllocatorType allocator, |
Mathieu Chartier | eebc3af | 2016-02-29 18:13:38 -0800 | [diff] [blame] | 1735 | bool instrumented, |
Mathieu Chartier | a4f6af9 | 2015-08-11 17:35:25 -0700 | [diff] [blame] | 1736 | size_t alloc_size, |
| 1737 | size_t* bytes_allocated, |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 1738 | size_t* usable_size, |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 1739 | size_t* bytes_tl_bulk_allocated, |
Mathieu Chartier | dc540df | 2019-11-15 17:11:44 -0800 | [diff] [blame] | 1740 | ObjPtr<mirror::Class>* klass) { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1741 | bool was_default_allocator = allocator == GetCurrentAllocator(); |
Mathieu Chartier | f4f3843 | 2014-09-03 11:21:08 -0700 | [diff] [blame] | 1742 | // Make sure there is no pending exception since we may need to throw an OOME. |
| 1743 | self->AssertNoPendingException(); |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 1744 | DCHECK(klass != nullptr); |
Alex Light | 986914b | 2019-11-19 01:12:25 +0000 | [diff] [blame] | 1745 | |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 1746 | StackHandleScope<1> hs(self); |
Alex Light | 986914b | 2019-11-19 01:12:25 +0000 | [diff] [blame] | 1747 | HandleWrapperObjPtr<mirror::Class> h_klass(hs.NewHandleWrapper(klass)); |
| 1748 | |
Alex Light | 001e5b3 | 2019-12-17 15:30:33 -0800 | [diff] [blame] | 1749 | auto send_object_pre_alloc = |
| 1750 | [&]() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_) { |
| 1751 | if (UNLIKELY(instrumented)) { |
| 1752 | AllocationListener* l = alloc_listener_.load(std::memory_order_seq_cst); |
| 1753 | if (UNLIKELY(l != nullptr) && UNLIKELY(l->HasPreAlloc())) { |
| 1754 | l->PreObjectAllocated(self, h_klass, &alloc_size); |
| 1755 | } |
| 1756 | } |
| 1757 | }; |
Alex Light | 986914b | 2019-11-19 01:12:25 +0000 | [diff] [blame] | 1758 | #define PERFORM_SUSPENDING_OPERATION(op) \ |
| 1759 | [&]() REQUIRES(Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_) { \ |
Mathieu Chartier | dc540df | 2019-11-15 17:11:44 -0800 | [diff] [blame] | 1760 | ScopedAllowThreadSuspension ats; \ |
Alex Light | 986914b | 2019-11-19 01:12:25 +0000 | [diff] [blame] | 1761 | auto res = (op); \ |
| 1762 | send_object_pre_alloc(); \ |
| 1763 | return res; \ |
| 1764 | }() |
| 1765 | |
Mathieu Chartier | 866fb2a | 2012-09-10 10:47:49 -0700 | [diff] [blame] | 1766 | // The allocation failed. If the GC is running, block until it completes, and then retry the |
| 1767 | // allocation. |
Mathieu Chartier | dc540df | 2019-11-15 17:11:44 -0800 | [diff] [blame] | 1768 | collector::GcType last_gc = |
| 1769 | PERFORM_SUSPENDING_OPERATION(WaitForGcToComplete(kGcCauseForAlloc, self)); |
Mathieu Chartier | eebc3af | 2016-02-29 18:13:38 -0800 | [diff] [blame] | 1770 | // If we were the default allocator but the allocator changed while we were suspended, |
| 1771 | // abort the allocation. |
| 1772 | if ((was_default_allocator && allocator != GetCurrentAllocator()) || |
| 1773 | (!instrumented && EntrypointsInstrumented())) { |
| 1774 | return nullptr; |
| 1775 | } |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1776 | if (last_gc != collector::kGcTypeNone) { |
Mathieu Chartier | 866fb2a | 2012-09-10 10:47:49 -0700 | [diff] [blame] | 1777 | // A GC was in progress and we blocked, retry allocation now that memory has been freed. |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 1778 | mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 1779 | usable_size, bytes_tl_bulk_allocated); |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 1780 | if (ptr != nullptr) { |
| 1781 | return ptr; |
| 1782 | } |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1783 | } |
| 1784 | |
Mathieu Chartier | 5ae2c93 | 2014-03-28 16:22:20 -0700 | [diff] [blame] | 1785 | collector::GcType tried_type = next_gc_type_; |
Alex Light | 986914b | 2019-11-19 01:12:25 +0000 | [diff] [blame] | 1786 | const bool gc_ran = PERFORM_SUSPENDING_OPERATION( |
| 1787 | CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone); |
| 1788 | |
Mathieu Chartier | eebc3af | 2016-02-29 18:13:38 -0800 | [diff] [blame] | 1789 | if ((was_default_allocator && allocator != GetCurrentAllocator()) || |
| 1790 | (!instrumented && EntrypointsInstrumented())) { |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 1791 | return nullptr; |
| 1792 | } |
| 1793 | if (gc_ran) { |
| 1794 | mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 1795 | usable_size, bytes_tl_bulk_allocated); |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 1796 | if (ptr != nullptr) { |
| 1797 | return ptr; |
Mathieu Chartier | 5ae2c93 | 2014-03-28 16:22:20 -0700 | [diff] [blame] | 1798 | } |
| 1799 | } |
| 1800 | |
Mathieu Chartier | 866fb2a | 2012-09-10 10:47:49 -0700 | [diff] [blame] | 1801 | // Loop through our different Gc types and try to Gc until we get enough free memory. |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 1802 | for (collector::GcType gc_type : gc_plan_) { |
Mathieu Chartier | 5ae2c93 | 2014-03-28 16:22:20 -0700 | [diff] [blame] | 1803 | if (gc_type == tried_type) { |
| 1804 | continue; |
| 1805 | } |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 1806 | // Attempt to run the collector, if we succeed, re-try the allocation. |
Alex Light | 986914b | 2019-11-19 01:12:25 +0000 | [diff] [blame] | 1807 | const bool plan_gc_ran = PERFORM_SUSPENDING_OPERATION( |
| 1808 | CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone); |
Mathieu Chartier | eebc3af | 2016-02-29 18:13:38 -0800 | [diff] [blame] | 1809 | if ((was_default_allocator && allocator != GetCurrentAllocator()) || |
| 1810 | (!instrumented && EntrypointsInstrumented())) { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1811 | return nullptr; |
| 1812 | } |
Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 1813 | if (plan_gc_ran) { |
Mathieu Chartier | 866fb2a | 2012-09-10 10:47:49 -0700 | [diff] [blame] | 1814 | // Did we free sufficient memory for the allocation to succeed? |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 1815 | mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 1816 | usable_size, bytes_tl_bulk_allocated); |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 1817 | if (ptr != nullptr) { |
| 1818 | return ptr; |
| 1819 | } |
Mathieu Chartier | 866fb2a | 2012-09-10 10:47:49 -0700 | [diff] [blame] | 1820 | } |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 1821 | } |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 1822 | // Allocations have failed after GCs; this is an exceptional state. |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 1823 | // Try harder, growing the heap if necessary. |
| 1824 | mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 1825 | usable_size, bytes_tl_bulk_allocated); |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 1826 | if (ptr != nullptr) { |
| 1827 | return ptr; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1828 | } |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 1829 | // Most allocations should have succeeded by now, so the heap is really full, really fragmented, |
| 1830 | // or the requested size is really big. Do another GC, collecting SoftReferences this time. The |
| 1831 | // VM spec requires that all SoftReferences have been collected and cleared before throwing |
| 1832 | // OOME. |
| 1833 | VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size) |
| 1834 | << " allocation"; |
| 1835 | // TODO: Run finalization, but this may cause more allocations to occur. |
| 1836 | // We don't need a WaitForGcToComplete here either. |
| 1837 | DCHECK(!gc_plan_.empty()); |
Alex Light | 986914b | 2019-11-19 01:12:25 +0000 | [diff] [blame] | 1838 | PERFORM_SUSPENDING_OPERATION(CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true)); |
Mathieu Chartier | eebc3af | 2016-02-29 18:13:38 -0800 | [diff] [blame] | 1839 | if ((was_default_allocator && allocator != GetCurrentAllocator()) || |
| 1840 | (!instrumented && EntrypointsInstrumented())) { |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 1841 | return nullptr; |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 1842 | } |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 1843 | ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size, |
| 1844 | bytes_tl_bulk_allocated); |
Mathieu Chartier | 6a7824d | 2014-08-22 14:53:04 -0700 | [diff] [blame] | 1845 | if (ptr == nullptr) { |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 1846 | const uint64_t current_time = NanoTime(); |
Mathieu Chartier | 6a7824d | 2014-08-22 14:53:04 -0700 | [diff] [blame] | 1847 | switch (allocator) { |
| 1848 | case kAllocatorTypeRosAlloc: |
| 1849 | // Fall-through. |
| 1850 | case kAllocatorTypeDlMalloc: { |
| 1851 | if (use_homogeneous_space_compaction_for_oom_ && |
| 1852 | current_time - last_time_homogeneous_space_compaction_by_oom_ > |
| 1853 | min_interval_homogeneous_space_compaction_by_oom_) { |
| 1854 | last_time_homogeneous_space_compaction_by_oom_ = current_time; |
Alex Light | 986914b | 2019-11-19 01:12:25 +0000 | [diff] [blame] | 1855 | HomogeneousSpaceCompactResult result = |
| 1856 | PERFORM_SUSPENDING_OPERATION(PerformHomogeneousSpaceCompact()); |
Mathieu Chartier | eebc3af | 2016-02-29 18:13:38 -0800 | [diff] [blame] | 1857 | // Thread suspension could have occurred. |
| 1858 | if ((was_default_allocator && allocator != GetCurrentAllocator()) || |
| 1859 | (!instrumented && EntrypointsInstrumented())) { |
| 1860 | return nullptr; |
| 1861 | } |
Mathieu Chartier | 6a7824d | 2014-08-22 14:53:04 -0700 | [diff] [blame] | 1862 | switch (result) { |
| 1863 | case HomogeneousSpaceCompactResult::kSuccess: |
| 1864 | // If the allocation succeeded, we delayed an oom. |
| 1865 | ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 1866 | usable_size, bytes_tl_bulk_allocated); |
Mathieu Chartier | 6a7824d | 2014-08-22 14:53:04 -0700 | [diff] [blame] | 1867 | if (ptr != nullptr) { |
| 1868 | count_delayed_oom_++; |
| 1869 | } |
| 1870 | break; |
| 1871 | case HomogeneousSpaceCompactResult::kErrorReject: |
| 1872 | // Reject due to disabled moving GC. |
| 1873 | break; |
| 1874 | case HomogeneousSpaceCompactResult::kErrorVMShuttingDown: |
| 1875 | // Throw OOM by default. |
| 1876 | break; |
| 1877 | default: { |
Ian Rogers | 2c4257b | 2014-10-24 14:20:06 -0700 | [diff] [blame] | 1878 | UNIMPLEMENTED(FATAL) << "homogeneous space compaction result: " |
| 1879 | << static_cast<size_t>(result); |
| 1880 | UNREACHABLE(); |
Mathieu Chartier | 6a7824d | 2014-08-22 14:53:04 -0700 | [diff] [blame] | 1881 | } |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 1882 | } |
Mathieu Chartier | 6a7824d | 2014-08-22 14:53:04 -0700 | [diff] [blame] | 1883 | // Always print that we ran homogeneous space compation since this can cause jank. |
| 1884 | VLOG(heap) << "Ran heap homogeneous space compaction, " |
| 1885 | << " requested defragmentation " |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 1886 | << count_requested_homogeneous_space_compaction_.load() |
Mathieu Chartier | 6a7824d | 2014-08-22 14:53:04 -0700 | [diff] [blame] | 1887 | << " performed defragmentation " |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 1888 | << count_performed_homogeneous_space_compaction_.load() |
Mathieu Chartier | 6a7824d | 2014-08-22 14:53:04 -0700 | [diff] [blame] | 1889 | << " ignored homogeneous space compaction " |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 1890 | << count_ignored_homogeneous_space_compaction_.load() |
Mathieu Chartier | 6a7824d | 2014-08-22 14:53:04 -0700 | [diff] [blame] | 1891 | << " delayed count = " |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 1892 | << count_delayed_oom_.load(); |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 1893 | } |
Mathieu Chartier | 6a7824d | 2014-08-22 14:53:04 -0700 | [diff] [blame] | 1894 | break; |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 1895 | } |
Mathieu Chartier | 6a7824d | 2014-08-22 14:53:04 -0700 | [diff] [blame] | 1896 | default: { |
| 1897 | // Do nothing for others allocators. |
| 1898 | } |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 1899 | } |
| 1900 | } |
Alex Light | 986914b | 2019-11-19 01:12:25 +0000 | [diff] [blame] | 1901 | #undef PERFORM_SUSPENDING_OPERATION |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 1902 | // If the allocation hasn't succeeded by this point, throw an OOM error. |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 1903 | if (ptr == nullptr) { |
Mathieu Chartier | dc540df | 2019-11-15 17:11:44 -0800 | [diff] [blame] | 1904 | ScopedAllowThreadSuspension ats; |
Hiroshi Yamauchi | 654dd48 | 2014-07-09 12:54:32 -0700 | [diff] [blame] | 1905 | ThrowOutOfMemoryError(self, alloc_size, allocator); |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 1906 | } |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 1907 | return ptr; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1908 | } |
| 1909 | |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 1910 | void Heap::SetTargetHeapUtilization(float target) { |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 1911 | DCHECK_GT(target, 0.1f); // asserted in Java code |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 1912 | DCHECK_LT(target, 1.0f); |
| 1913 | target_utilization_ = target; |
| 1914 | } |
| 1915 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1916 | size_t Heap::GetObjectsAllocated() const { |
Mathieu Chartier | 4f55e22 | 2015-09-04 13:26:21 -0700 | [diff] [blame] | 1917 | Thread* const self = Thread::Current(); |
Mathieu Chartier | b43390c | 2015-05-12 10:47:11 -0700 | [diff] [blame] | 1918 | ScopedThreadStateChange tsc(self, kWaitingForGetObjectsAllocated); |
Roland Levillain | ef01222 | 2017-06-21 16:28:06 +0100 | [diff] [blame] | 1919 | // Prevent GC running during GetObjectsAllocated since we may get a checkpoint request that tells |
Mathieu Chartier | e8649c7 | 2017-03-03 18:02:18 -0800 | [diff] [blame] | 1920 | // us to suspend while we are doing SuspendAll. b/35232978 |
| 1921 | gc::ScopedGCCriticalSection gcs(Thread::Current(), |
| 1922 | gc::kGcCauseGetObjectsAllocated, |
| 1923 | gc::kCollectorTypeGetObjectsAllocated); |
Mathieu Chartier | b43390c | 2015-05-12 10:47:11 -0700 | [diff] [blame] | 1924 | // Need SuspendAll here to prevent lock violation if RosAlloc does it during InspectAll. |
Mathieu Chartier | 4f55e22 | 2015-09-04 13:26:21 -0700 | [diff] [blame] | 1925 | ScopedSuspendAll ssa(__FUNCTION__); |
| 1926 | ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1927 | size_t total = 0; |
Mathieu Chartier | 4f55e22 | 2015-09-04 13:26:21 -0700 | [diff] [blame] | 1928 | for (space::AllocSpace* space : alloc_spaces_) { |
| 1929 | total += space->GetObjectsAllocated(); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1930 | } |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 1931 | return total; |
| 1932 | } |
| 1933 | |
Mathieu Chartier | dd162fb | 2014-08-06 17:06:33 -0700 | [diff] [blame] | 1934 | uint64_t Heap::GetObjectsAllocatedEver() const { |
Mathieu Chartier | 4edd847 | 2015-06-01 10:47:36 -0700 | [diff] [blame] | 1935 | uint64_t total = GetObjectsFreedEver(); |
| 1936 | // If we are detached, we can't use GetObjectsAllocated since we can't change thread states. |
| 1937 | if (Thread::Current() != nullptr) { |
| 1938 | total += GetObjectsAllocated(); |
| 1939 | } |
| 1940 | return total; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1941 | } |
| 1942 | |
Mathieu Chartier | dd162fb | 2014-08-06 17:06:33 -0700 | [diff] [blame] | 1943 | uint64_t Heap::GetBytesAllocatedEver() const { |
Hans Boehm | 4c6d765 | 2019-11-01 09:23:19 -0700 | [diff] [blame] | 1944 | // Force the returned value to be monotonically increasing, in the sense that if this is called |
| 1945 | // at A and B, such that A happens-before B, then the call at B returns a value no smaller than |
| 1946 | // that at A. This is not otherwise guaranteed, since num_bytes_allocated_ is decremented first, |
| 1947 | // and total_bytes_freed_ever_ is incremented later. |
| 1948 | static std::atomic<uint64_t> max_bytes_so_far(0); |
| 1949 | uint64_t so_far = max_bytes_so_far.load(std::memory_order_relaxed); |
| 1950 | uint64_t current_bytes = GetBytesFreedEver(std::memory_order_acquire); |
| 1951 | current_bytes += GetBytesAllocated(); |
| 1952 | do { |
| 1953 | if (current_bytes <= so_far) { |
| 1954 | return so_far; |
| 1955 | } |
| 1956 | } while (!max_bytes_so_far.compare_exchange_weak(so_far /* updated */, |
| 1957 | current_bytes, std::memory_order_relaxed)); |
| 1958 | return current_bytes; |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 1959 | } |
| 1960 | |
Richard Uhler | 660be6f | 2017-11-22 16:12:29 +0000 | [diff] [blame] | 1961 | // Check whether the given object is an instance of the given class. |
| 1962 | static bool MatchesClass(mirror::Object* obj, |
| 1963 | Handle<mirror::Class> h_class, |
| 1964 | bool use_is_assignable_from) REQUIRES_SHARED(Locks::mutator_lock_) { |
| 1965 | mirror::Class* instance_class = obj->GetClass(); |
| 1966 | CHECK(instance_class != nullptr); |
| 1967 | ObjPtr<mirror::Class> klass = h_class.Get(); |
| 1968 | if (use_is_assignable_from) { |
| 1969 | return klass != nullptr && klass->IsAssignableFrom(instance_class); |
| 1970 | } |
| 1971 | return instance_class == klass; |
| 1972 | } |
| 1973 | |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 1974 | void Heap::CountInstances(const std::vector<Handle<mirror::Class>>& classes, |
| 1975 | bool use_is_assignable_from, |
Elliott Hughes | ec0f83d | 2013-01-15 16:54:08 -0800 | [diff] [blame] | 1976 | uint64_t* counts) { |
Andreas Gampe | 1c158a0 | 2017-07-13 17:26:19 -0700 | [diff] [blame] | 1977 | auto instance_counter = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { |
Andreas Gampe | 1c158a0 | 2017-07-13 17:26:19 -0700 | [diff] [blame] | 1978 | for (size_t i = 0; i < classes.size(); ++i) { |
Richard Uhler | 660be6f | 2017-11-22 16:12:29 +0000 | [diff] [blame] | 1979 | if (MatchesClass(obj, classes[i], use_is_assignable_from)) { |
Andreas Gampe | 1c158a0 | 2017-07-13 17:26:19 -0700 | [diff] [blame] | 1980 | ++counts[i]; |
Elliott Hughes | 3b78c94 | 2013-01-15 17:35:41 -0800 | [diff] [blame] | 1981 | } |
| 1982 | } |
Andreas Gampe | 1c158a0 | 2017-07-13 17:26:19 -0700 | [diff] [blame] | 1983 | }; |
| 1984 | VisitObjects(instance_counter); |
Elliott Hughes | 3b78c94 | 2013-01-15 17:35:41 -0800 | [diff] [blame] | 1985 | } |
| 1986 | |
Andreas Gampe | 1c158a0 | 2017-07-13 17:26:19 -0700 | [diff] [blame] | 1987 | void Heap::GetInstances(VariableSizedHandleScope& scope, |
| 1988 | Handle<mirror::Class> h_class, |
Richard Uhler | 660be6f | 2017-11-22 16:12:29 +0000 | [diff] [blame] | 1989 | bool use_is_assignable_from, |
Andreas Gampe | 1c158a0 | 2017-07-13 17:26:19 -0700 | [diff] [blame] | 1990 | int32_t max_count, |
| 1991 | std::vector<Handle<mirror::Object>>& instances) { |
| 1992 | DCHECK_GE(max_count, 0); |
| 1993 | auto instance_collector = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { |
Richard Uhler | 660be6f | 2017-11-22 16:12:29 +0000 | [diff] [blame] | 1994 | if (MatchesClass(obj, h_class, use_is_assignable_from)) { |
Andreas Gampe | 1c158a0 | 2017-07-13 17:26:19 -0700 | [diff] [blame] | 1995 | if (max_count == 0 || instances.size() < static_cast<size_t>(max_count)) { |
| 1996 | instances.push_back(scope.NewHandle(obj)); |
| 1997 | } |
Elliott Hughes | 0cbaff5 | 2013-01-16 15:28:01 -0800 | [diff] [blame] | 1998 | } |
Andreas Gampe | 1c158a0 | 2017-07-13 17:26:19 -0700 | [diff] [blame] | 1999 | }; |
| 2000 | VisitObjects(instance_collector); |
| 2001 | } |
Elliott Hughes | 0cbaff5 | 2013-01-16 15:28:01 -0800 | [diff] [blame] | 2002 | |
Mathieu Chartier | aea9bfb | 2016-10-12 19:19:56 -0700 | [diff] [blame] | 2003 | void Heap::GetReferringObjects(VariableSizedHandleScope& scope, |
| 2004 | Handle<mirror::Object> o, |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 2005 | int32_t max_count, |
Mathieu Chartier | aea9bfb | 2016-10-12 19:19:56 -0700 | [diff] [blame] | 2006 | std::vector<Handle<mirror::Object>>& referring_objects) { |
Andreas Gampe | 1c158a0 | 2017-07-13 17:26:19 -0700 | [diff] [blame] | 2007 | class ReferringObjectsFinder { |
| 2008 | public: |
| 2009 | ReferringObjectsFinder(VariableSizedHandleScope& scope_in, |
| 2010 | Handle<mirror::Object> object_in, |
| 2011 | int32_t max_count_in, |
| 2012 | std::vector<Handle<mirror::Object>>& referring_objects_in) |
| 2013 | REQUIRES_SHARED(Locks::mutator_lock_) |
| 2014 | : scope_(scope_in), |
| 2015 | object_(object_in), |
| 2016 | max_count_(max_count_in), |
| 2017 | referring_objects_(referring_objects_in) {} |
| 2018 | |
| 2019 | // For Object::VisitReferences. |
| 2020 | void operator()(ObjPtr<mirror::Object> obj, |
| 2021 | MemberOffset offset, |
| 2022 | bool is_static ATTRIBUTE_UNUSED) const |
| 2023 | REQUIRES_SHARED(Locks::mutator_lock_) { |
| 2024 | mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset); |
| 2025 | if (ref == object_.Get() && (max_count_ == 0 || referring_objects_.size() < max_count_)) { |
| 2026 | referring_objects_.push_back(scope_.NewHandle(obj)); |
| 2027 | } |
| 2028 | } |
| 2029 | |
| 2030 | void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) |
| 2031 | const {} |
| 2032 | void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {} |
| 2033 | |
| 2034 | private: |
| 2035 | VariableSizedHandleScope& scope_; |
| 2036 | Handle<mirror::Object> const object_; |
| 2037 | const uint32_t max_count_; |
| 2038 | std::vector<Handle<mirror::Object>>& referring_objects_; |
| 2039 | DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder); |
| 2040 | }; |
Mathieu Chartier | aea9bfb | 2016-10-12 19:19:56 -0700 | [diff] [blame] | 2041 | ReferringObjectsFinder finder(scope, o, max_count, referring_objects); |
Andreas Gampe | 1c158a0 | 2017-07-13 17:26:19 -0700 | [diff] [blame] | 2042 | auto referring_objects_finder = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { |
| 2043 | obj->VisitReferences(finder, VoidFunctor()); |
| 2044 | }; |
| 2045 | VisitObjects(referring_objects_finder); |
Elliott Hughes | 0cbaff5 | 2013-01-16 15:28:01 -0800 | [diff] [blame] | 2046 | } |
| 2047 | |
Andreas Gampe | 94c589d | 2017-12-27 12:43:01 -0800 | [diff] [blame] | 2048 | void Heap::CollectGarbage(bool clear_soft_references, GcCause cause) { |
Mathieu Chartier | 866fb2a | 2012-09-10 10:47:49 -0700 | [diff] [blame] | 2049 | // Even if we waited for a GC we still need to do another GC since weaks allocated during the |
| 2050 | // last GC will not have necessarily been cleared. |
Andreas Gampe | 94c589d | 2017-12-27 12:43:01 -0800 | [diff] [blame] | 2051 | CollectGarbageInternal(gc_plan_.back(), cause, clear_soft_references); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 2052 | } |
| 2053 | |
Mathieu Chartier | db00eaf | 2015-08-31 17:10:05 -0700 | [diff] [blame] | 2054 | bool Heap::SupportHomogeneousSpaceCompactAndCollectorTransitions() const { |
| 2055 | return main_space_backup_.get() != nullptr && main_space_ != nullptr && |
| 2056 | foreground_collector_type_ == kCollectorTypeCMS; |
| 2057 | } |
| 2058 | |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 2059 | HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() { |
| 2060 | Thread* self = Thread::Current(); |
| 2061 | // Inc requested homogeneous space compaction. |
| 2062 | count_requested_homogeneous_space_compaction_++; |
| 2063 | // Store performed homogeneous space compaction at a new request arrival. |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 2064 | ScopedThreadStateChange tsc(self, kWaitingPerformingGc); |
Ziang Wan | 92db59b | 2019-07-22 21:19:24 +0000 | [diff] [blame] | 2065 | Locks::mutator_lock_->AssertNotHeld(self); |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 2066 | { |
Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 2067 | ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete); |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 2068 | MutexLock mu(self, *gc_complete_lock_); |
| 2069 | // Ensure there is only one GC at a time. |
| 2070 | WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self); |
Roland Levillain | 2ae376f | 2018-01-30 11:35:11 +0000 | [diff] [blame] | 2071 | // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable |
| 2072 | // count is non zero. |
| 2073 | // If the collector type changed to something which doesn't benefit from homogeneous space |
| 2074 | // compaction, exit. |
Mathieu Chartier | 6a7824d | 2014-08-22 14:53:04 -0700 | [diff] [blame] | 2075 | if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) || |
| 2076 | !main_space_->CanMoveObjects()) { |
Mathieu Chartier | db00eaf | 2015-08-31 17:10:05 -0700 | [diff] [blame] | 2077 | return kErrorReject; |
| 2078 | } |
| 2079 | if (!SupportHomogeneousSpaceCompactAndCollectorTransitions()) { |
| 2080 | return kErrorUnsupported; |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 2081 | } |
| 2082 | collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact; |
| 2083 | } |
| 2084 | if (Runtime::Current()->IsShuttingDown(self)) { |
| 2085 | // Don't allow heap transitions to happen if the runtime is shutting down since these can |
| 2086 | // cause objects to get finalized. |
| 2087 | FinishGC(self, collector::kGcTypeNone); |
| 2088 | return HomogeneousSpaceCompactResult::kErrorVMShuttingDown; |
| 2089 | } |
Mathieu Chartier | 4f55e22 | 2015-09-04 13:26:21 -0700 | [diff] [blame] | 2090 | collector::GarbageCollector* collector; |
| 2091 | { |
| 2092 | ScopedSuspendAll ssa(__FUNCTION__); |
| 2093 | uint64_t start_time = NanoTime(); |
| 2094 | // Launch compaction. |
| 2095 | space::MallocSpace* to_space = main_space_backup_.release(); |
| 2096 | space::MallocSpace* from_space = main_space_; |
| 2097 | to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE); |
| 2098 | const uint64_t space_size_before_compaction = from_space->Size(); |
| 2099 | AddSpace(to_space); |
| 2100 | // Make sure that we will have enough room to copy. |
| 2101 | CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit()); |
| 2102 | collector = Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact); |
| 2103 | const uint64_t space_size_after_compaction = to_space->Size(); |
| 2104 | main_space_ = to_space; |
| 2105 | main_space_backup_.reset(from_space); |
| 2106 | RemoveSpace(from_space); |
| 2107 | SetSpaceAsDefault(main_space_); // Set as default to reset the proper dlmalloc space. |
| 2108 | // Update performed homogeneous space compaction count. |
| 2109 | count_performed_homogeneous_space_compaction_++; |
| 2110 | // Print statics log and resume all threads. |
| 2111 | uint64_t duration = NanoTime() - start_time; |
| 2112 | VLOG(heap) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: " |
| 2113 | << PrettySize(space_size_before_compaction) << " -> " |
| 2114 | << PrettySize(space_size_after_compaction) << " compact-ratio: " |
| 2115 | << std::fixed << static_cast<double>(space_size_after_compaction) / |
| 2116 | static_cast<double>(space_size_before_compaction); |
| 2117 | } |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 2118 | // Finish GC. |
Alex Light | e302088 | 2019-05-13 16:35:02 -0700 | [diff] [blame] | 2119 | // Get the references we need to enqueue. |
| 2120 | SelfDeletingTask* clear = reference_processor_->CollectClearedReferences(self); |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 2121 | GrowForUtilization(semi_space_collector_); |
Hiroshi Yamauchi | e4d9987 | 2015-02-26 12:53:45 -0800 | [diff] [blame] | 2122 | LogGC(kGcCauseHomogeneousSpaceCompact, collector); |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 2123 | FinishGC(self, collector::kGcTypeFull); |
Alex Light | e302088 | 2019-05-13 16:35:02 -0700 | [diff] [blame] | 2124 | // Enqueue any references after losing the GC locks. |
| 2125 | clear->Run(self); |
| 2126 | clear->Finalize(); |
Mathieu Chartier | 598302a | 2015-09-23 14:52:39 -0700 | [diff] [blame] | 2127 | { |
| 2128 | ScopedObjectAccess soa(self); |
| 2129 | soa.Vm()->UnloadNativeLibraries(); |
| 2130 | } |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 2131 | return HomogeneousSpaceCompactResult::kSuccess; |
| 2132 | } |
| 2133 | |
Mathieu Chartier | 0de9f73 | 2013-11-22 17:58:48 -0800 | [diff] [blame] | 2134 | void Heap::ChangeCollector(CollectorType collector_type) { |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 2135 | // TODO: Only do this with all mutators suspended to avoid races. |
| 2136 | if (collector_type != collector_type_) { |
| 2137 | collector_type_ = collector_type; |
| 2138 | gc_plan_.clear(); |
| 2139 | switch (collector_type_) { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 2140 | case kCollectorTypeCC: { |
Albert Mingkun Yang | 0b4d146 | 2018-11-29 13:25:35 +0000 | [diff] [blame] | 2141 | if (use_generational_cc_) { |
Mathieu Chartier | 8d1a996 | 2016-08-17 16:39:45 -0700 | [diff] [blame] | 2142 | gc_plan_.push_back(collector::kGcTypeSticky); |
| 2143 | } |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 2144 | gc_plan_.push_back(collector::kGcTypeFull); |
| 2145 | if (use_tlab_) { |
| 2146 | ChangeAllocator(kAllocatorTypeRegionTLAB); |
| 2147 | } else { |
| 2148 | ChangeAllocator(kAllocatorTypeRegion); |
| 2149 | } |
| 2150 | break; |
| 2151 | } |
Mathieu Chartier | f75dce4 | 2019-04-08 09:36:23 -0700 | [diff] [blame] | 2152 | case kCollectorTypeSS: { |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 2153 | gc_plan_.push_back(collector::kGcTypeFull); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 2154 | if (use_tlab_) { |
| 2155 | ChangeAllocator(kAllocatorTypeTLAB); |
| 2156 | } else { |
| 2157 | ChangeAllocator(kAllocatorTypeBumpPointer); |
| 2158 | } |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 2159 | break; |
| 2160 | } |
| 2161 | case kCollectorTypeMS: { |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 2162 | gc_plan_.push_back(collector::kGcTypeSticky); |
| 2163 | gc_plan_.push_back(collector::kGcTypePartial); |
| 2164 | gc_plan_.push_back(collector::kGcTypeFull); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 2165 | ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc); |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 2166 | break; |
| 2167 | } |
| 2168 | case kCollectorTypeCMS: { |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 2169 | gc_plan_.push_back(collector::kGcTypeSticky); |
| 2170 | gc_plan_.push_back(collector::kGcTypePartial); |
| 2171 | gc_plan_.push_back(collector::kGcTypeFull); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 2172 | ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc); |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 2173 | break; |
| 2174 | } |
| 2175 | default: { |
Ian Rogers | 2c4257b | 2014-10-24 14:20:06 -0700 | [diff] [blame] | 2176 | UNIMPLEMENTED(FATAL); |
| 2177 | UNREACHABLE(); |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 2178 | } |
Mathieu Chartier | 0de9f73 | 2013-11-22 17:58:48 -0800 | [diff] [blame] | 2179 | } |
Hiroshi Yamauchi | 3e41780 | 2014-03-20 12:03:02 -0700 | [diff] [blame] | 2180 | if (IsGcConcurrent()) { |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 2181 | concurrent_start_bytes_ = |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 2182 | UnsignedDifference(target_footprint_.load(std::memory_order_relaxed), |
| 2183 | kMinConcurrentRemainingBytes); |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 2184 | } else { |
| 2185 | concurrent_start_bytes_ = std::numeric_limits<size_t>::max(); |
Mathieu Chartier | 0de9f73 | 2013-11-22 17:58:48 -0800 | [diff] [blame] | 2186 | } |
| 2187 | } |
| 2188 | } |
| 2189 | |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 2190 | // Special compacting collector which uses sub-optimal bin packing to reduce zygote space size. |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 2191 | class ZygoteCompactingCollector final : public collector::SemiSpace { |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 2192 | public: |
Roland Levillain | 3887c46 | 2015-08-12 18:15:42 +0100 | [diff] [blame] | 2193 | ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool) |
Mathieu Chartier | f75dce4 | 2019-04-08 09:36:23 -0700 | [diff] [blame] | 2194 | : SemiSpace(heap, "zygote collector"), |
Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 2195 | bin_live_bitmap_(nullptr), |
| 2196 | bin_mark_bitmap_(nullptr), |
| 2197 | is_running_on_memory_tool_(is_running_on_memory_tool) {} |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 2198 | |
Andreas Gampe | 0c18338 | 2017-07-13 22:26:24 -0700 | [diff] [blame] | 2199 | void BuildBins(space::ContinuousSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) { |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 2200 | bin_live_bitmap_ = space->GetLiveBitmap(); |
| 2201 | bin_mark_bitmap_ = space->GetMarkBitmap(); |
Andreas Gampe | 0c18338 | 2017-07-13 22:26:24 -0700 | [diff] [blame] | 2202 | uintptr_t prev = reinterpret_cast<uintptr_t>(space->Begin()); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 2203 | WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); |
| 2204 | // Note: This requires traversing the space in increasing order of object addresses. |
Andreas Gampe | 0c18338 | 2017-07-13 22:26:24 -0700 | [diff] [blame] | 2205 | auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { |
| 2206 | uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj); |
| 2207 | size_t bin_size = object_addr - prev; |
| 2208 | // Add the bin consisting of the end of the previous object to the start of the current object. |
| 2209 | AddBin(bin_size, prev); |
| 2210 | prev = object_addr + RoundUp(obj->SizeOf<kDefaultVerifyFlags>(), kObjectAlignment); |
| 2211 | }; |
| 2212 | bin_live_bitmap_->Walk(visitor); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 2213 | // Add the last bin which spans after the last object to the end of the space. |
Andreas Gampe | 0c18338 | 2017-07-13 22:26:24 -0700 | [diff] [blame] | 2214 | AddBin(reinterpret_cast<uintptr_t>(space->End()) - prev, prev); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 2215 | } |
| 2216 | |
| 2217 | private: |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 2218 | // Maps from bin sizes to locations. |
| 2219 | std::multimap<size_t, uintptr_t> bins_; |
| 2220 | // Live bitmap of the space which contains the bins. |
Mathieu Chartier | a8e8f9c | 2014-04-09 14:51:05 -0700 | [diff] [blame] | 2221 | accounting::ContinuousSpaceBitmap* bin_live_bitmap_; |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 2222 | // Mark bitmap of the space which contains the bins. |
Mathieu Chartier | a8e8f9c | 2014-04-09 14:51:05 -0700 | [diff] [blame] | 2223 | accounting::ContinuousSpaceBitmap* bin_mark_bitmap_; |
Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 2224 | const bool is_running_on_memory_tool_; |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 2225 | |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 2226 | void AddBin(size_t size, uintptr_t position) { |
Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 2227 | if (is_running_on_memory_tool_) { |
| 2228 | MEMORY_TOOL_MAKE_DEFINED(reinterpret_cast<void*>(position), size); |
| 2229 | } |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 2230 | if (size != 0) { |
| 2231 | bins_.insert(std::make_pair(size, position)); |
| 2232 | } |
| 2233 | } |
| 2234 | |
Andreas Gampe | fa6a1b0 | 2018-09-07 08:11:55 -0700 | [diff] [blame] | 2235 | bool ShouldSweepSpace(space::ContinuousSpace* space ATTRIBUTE_UNUSED) const override { |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 2236 | // Don't sweep any spaces since we probably blasted the internal accounting of the free list |
| 2237 | // allocator. |
| 2238 | return false; |
| 2239 | } |
| 2240 | |
Andreas Gampe | fa6a1b0 | 2018-09-07 08:11:55 -0700 | [diff] [blame] | 2241 | mirror::Object* MarkNonForwardedObject(mirror::Object* obj) override |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 2242 | REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { |
Mathieu Chartier | d08f66f | 2017-04-13 11:47:53 -0700 | [diff] [blame] | 2243 | size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>(); |
Hiroshi Yamauchi | 8711d1f | 2015-03-13 16:48:55 -0700 | [diff] [blame] | 2244 | size_t alloc_size = RoundUp(obj_size, kObjectAlignment); |
Mathieu Chartier | 5dc08a6 | 2014-01-10 10:10:23 -0800 | [diff] [blame] | 2245 | mirror::Object* forward_address; |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 2246 | // Find the smallest bin which we can move obj in. |
Hiroshi Yamauchi | 8711d1f | 2015-03-13 16:48:55 -0700 | [diff] [blame] | 2247 | auto it = bins_.lower_bound(alloc_size); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 2248 | if (it == bins_.end()) { |
| 2249 | // No available space in the bins, place it in the target space instead (grows the zygote |
| 2250 | // space). |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 2251 | size_t bytes_allocated, dummy; |
Hiroshi Yamauchi | 8711d1f | 2015-03-13 16:48:55 -0700 | [diff] [blame] | 2252 | forward_address = to_space_->Alloc(self_, alloc_size, &bytes_allocated, nullptr, &dummy); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 2253 | if (to_space_live_bitmap_ != nullptr) { |
| 2254 | to_space_live_bitmap_->Set(forward_address); |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 2255 | } else { |
| 2256 | GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address); |
| 2257 | GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 2258 | } |
| 2259 | } else { |
| 2260 | size_t size = it->first; |
| 2261 | uintptr_t pos = it->second; |
| 2262 | bins_.erase(it); // Erase the old bin which we replace with the new smaller bin. |
| 2263 | forward_address = reinterpret_cast<mirror::Object*>(pos); |
| 2264 | // Set the live and mark bits so that sweeping system weaks works properly. |
| 2265 | bin_live_bitmap_->Set(forward_address); |
| 2266 | bin_mark_bitmap_->Set(forward_address); |
Hiroshi Yamauchi | 8711d1f | 2015-03-13 16:48:55 -0700 | [diff] [blame] | 2267 | DCHECK_GE(size, alloc_size); |
| 2268 | // Add a new bin with the remaining space. |
| 2269 | AddBin(size - alloc_size, pos + alloc_size); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 2270 | } |
Roland Levillain | 05e34f4 | 2018-05-24 13:19:05 +0000 | [diff] [blame] | 2271 | // Copy the object over to its new location. |
| 2272 | // Historical note: We did not use `alloc_size` to avoid a Valgrind error. |
Hiroshi Yamauchi | 8711d1f | 2015-03-13 16:48:55 -0700 | [diff] [blame] | 2273 | memcpy(reinterpret_cast<void*>(forward_address), obj, obj_size); |
Hiroshi Yamauchi | 12b58b2 | 2016-11-01 11:55:29 -0700 | [diff] [blame] | 2274 | if (kUseBakerReadBarrier) { |
| 2275 | obj->AssertReadBarrierState(); |
| 2276 | forward_address->AssertReadBarrierState(); |
Hiroshi Yamauchi | 9d04a20 | 2014-01-31 13:35:49 -0800 | [diff] [blame] | 2277 | } |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 2278 | return forward_address; |
| 2279 | } |
| 2280 | }; |
| 2281 | |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 2282 | void Heap::UnBindBitmaps() { |
Mathieu Chartier | f5997b4 | 2014-06-20 10:37:54 -0700 | [diff] [blame] | 2283 | TimingLogger::ScopedTiming t("UnBindBitmaps", GetCurrentGcIteration()->GetTimings()); |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 2284 | for (const auto& space : GetContinuousSpaces()) { |
| 2285 | if (space->IsContinuousMemMapAllocSpace()) { |
| 2286 | space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); |
Mathieu Chartier | 7c50274 | 2019-08-01 12:47:18 -0700 | [diff] [blame] | 2287 | if (alloc_space->GetLiveBitmap() != nullptr && alloc_space->HasBoundBitmaps()) { |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 2288 | alloc_space->UnBindBitmaps(); |
| 2289 | } |
| 2290 | } |
| 2291 | } |
| 2292 | } |
| 2293 | |
Hans Boehm | 4c6d765 | 2019-11-01 09:23:19 -0700 | [diff] [blame] | 2294 | void Heap::IncrementFreedEver() { |
| 2295 | // Counters are updated only by us, but may be read concurrently. |
| 2296 | // The updates should become visible after the corresponding live object info. |
| 2297 | total_objects_freed_ever_.store(total_objects_freed_ever_.load(std::memory_order_relaxed) |
| 2298 | + GetCurrentGcIteration()->GetFreedObjects() |
| 2299 | + GetCurrentGcIteration()->GetFreedLargeObjects(), |
| 2300 | std::memory_order_release); |
| 2301 | total_bytes_freed_ever_.store(total_bytes_freed_ever_.load(std::memory_order_relaxed) |
| 2302 | + GetCurrentGcIteration()->GetFreedBytes() |
| 2303 | + GetCurrentGcIteration()->GetFreedLargeObjectBytes(), |
| 2304 | std::memory_order_release); |
| 2305 | } |
| 2306 | |
Hans Boehm | 65c18a2 | 2020-01-03 23:37:13 +0000 | [diff] [blame] | 2307 | #pragma clang diagnostic push |
| 2308 | #if !ART_USE_FUTEXES |
| 2309 | // Frame gets too large, perhaps due to Bionic pthread_mutex_lock size. We don't care. |
| 2310 | # pragma clang diagnostic ignored "-Wframe-larger-than=" |
| 2311 | #endif |
| 2312 | // This has a large frame, but shouldn't be run anywhere near the stack limit. |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 2313 | void Heap::PreZygoteFork() { |
Mathieu Chartier | faed995 | 2015-03-31 16:28:53 -0700 | [diff] [blame] | 2314 | if (!HasZygoteSpace()) { |
| 2315 | // We still want to GC in case there is some unreachable non moving objects that could cause a |
| 2316 | // suboptimal bin packing when we compact the zygote space. |
| 2317 | CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false); |
Mathieu Chartier | 76ce917 | 2016-01-27 10:44:20 -0800 | [diff] [blame] | 2318 | // Trim the pages at the end of the non moving space. Trim while not holding zygote lock since |
| 2319 | // the trim process may require locking the mutator lock. |
| 2320 | non_moving_space_->Trim(); |
Mathieu Chartier | faed995 | 2015-03-31 16:28:53 -0700 | [diff] [blame] | 2321 | } |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 2322 | Thread* self = Thread::Current(); |
| 2323 | MutexLock mu(self, zygote_creation_lock_); |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 2324 | // Try to see if we have any Zygote spaces. |
Mathieu Chartier | e4cab17 | 2014-08-19 18:24:04 -0700 | [diff] [blame] | 2325 | if (HasZygoteSpace()) { |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 2326 | return; |
| 2327 | } |
Mathieu Chartier | ea0831f | 2015-12-29 13:17:37 -0800 | [diff] [blame] | 2328 | Runtime::Current()->GetInternTable()->AddNewTable(); |
Mathieu Chartier | c2e2062 | 2014-11-03 11:41:47 -0800 | [diff] [blame] | 2329 | Runtime::Current()->GetClassLinker()->MoveClassTableToPreZygote(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2330 | VLOG(heap) << "Starting PreZygoteFork"; |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 2331 | // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote |
| 2332 | // there. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2333 | non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE); |
Mathieu Chartier | 6a7824d | 2014-08-22 14:53:04 -0700 | [diff] [blame] | 2334 | const bool same_space = non_moving_space_ == main_space_; |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 2335 | if (kCompactZygote) { |
Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 2336 | // Temporarily disable rosalloc verification because the zygote |
| 2337 | // compaction will mess up the rosalloc internal metadata. |
| 2338 | ScopedDisableRosAllocVerification disable_rosalloc_verif(this); |
Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 2339 | ZygoteCompactingCollector zygote_collector(this, is_running_on_memory_tool_); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 2340 | zygote_collector.BuildBins(non_moving_space_); |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 2341 | // Create a new bump pointer space which we will compact into. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2342 | space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(), |
| 2343 | non_moving_space_->Limit()); |
| 2344 | // Compact the bump pointer space to a new zygote bump pointer space. |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 2345 | bool reset_main_space = false; |
| 2346 | if (IsMovingGc(collector_type_)) { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 2347 | if (collector_type_ == kCollectorTypeCC) { |
| 2348 | zygote_collector.SetFromSpace(region_space_); |
| 2349 | } else { |
| 2350 | zygote_collector.SetFromSpace(bump_pointer_space_); |
| 2351 | } |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 2352 | } else { |
| 2353 | CHECK(main_space_ != nullptr); |
Hiroshi Yamauchi | d04495e | 2015-03-11 19:09:07 -0700 | [diff] [blame] | 2354 | CHECK_NE(main_space_, non_moving_space_) |
| 2355 | << "Does not make sense to compact within the same space"; |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 2356 | // Copy from the main space. |
| 2357 | zygote_collector.SetFromSpace(main_space_); |
| 2358 | reset_main_space = true; |
| 2359 | } |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 2360 | zygote_collector.SetToSpace(&target_space); |
Mathieu Chartier | 1b54f9c | 2014-04-30 16:45:02 -0700 | [diff] [blame] | 2361 | zygote_collector.SetSwapSemiSpaces(false); |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 2362 | zygote_collector.Run(kGcCauseCollectorTransition, false); |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 2363 | if (reset_main_space) { |
| 2364 | main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE); |
| 2365 | madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED); |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 2366 | MemMap mem_map = main_space_->ReleaseMemMap(); |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 2367 | RemoveSpace(main_space_); |
Mathieu Chartier | 96bcd45 | 2014-06-17 09:50:02 -0700 | [diff] [blame] | 2368 | space::Space* old_main_space = main_space_; |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 2369 | CreateMainMallocSpace(std::move(mem_map), |
| 2370 | kDefaultInitialSize, |
| 2371 | std::min(mem_map.Size(), growth_limit_), |
| 2372 | mem_map.Size()); |
Mathieu Chartier | 96bcd45 | 2014-06-17 09:50:02 -0700 | [diff] [blame] | 2373 | delete old_main_space; |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 2374 | AddSpace(main_space_); |
| 2375 | } else { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 2376 | if (collector_type_ == kCollectorTypeCC) { |
| 2377 | region_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE); |
Mathieu Chartier | 7ec38dc | 2016-10-07 15:24:46 -0700 | [diff] [blame] | 2378 | // Evacuated everything out of the region space, clear the mark bitmap. |
| 2379 | region_space_->GetMarkBitmap()->Clear(); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 2380 | } else { |
| 2381 | bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE); |
| 2382 | } |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 2383 | } |
| 2384 | if (temp_space_ != nullptr) { |
| 2385 | CHECK(temp_space_->IsEmpty()); |
| 2386 | } |
Hans Boehm | 4c6d765 | 2019-11-01 09:23:19 -0700 | [diff] [blame] | 2387 | IncrementFreedEver(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2388 | // Update the end and write out image. |
| 2389 | non_moving_space_->SetEnd(target_space.End()); |
| 2390 | non_moving_space_->SetLimit(target_space.Limit()); |
Mathieu Chartier | faed995 | 2015-03-31 16:28:53 -0700 | [diff] [blame] | 2391 | VLOG(heap) << "Create zygote space with size=" << non_moving_space_->Size() << " bytes"; |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 2392 | } |
Mathieu Chartier | 6a7824d | 2014-08-22 14:53:04 -0700 | [diff] [blame] | 2393 | // Change the collector to the post zygote one. |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 2394 | ChangeCollector(foreground_collector_type_); |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 2395 | // Save the old space so that we can remove it after we complete creating the zygote space. |
| 2396 | space::MallocSpace* old_alloc_space = non_moving_space_; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2397 | // Turn the current alloc space into a zygote space and obtain the new alloc space composed of |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 2398 | // the remaining available space. |
| 2399 | // Remove the old space before creating the zygote space since creating the zygote space sets |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 2400 | // the old alloc space's bitmaps to null. |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 2401 | RemoveSpace(old_alloc_space); |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 2402 | if (collector::SemiSpace::kUseRememberedSet) { |
| 2403 | // Sanity bound check. |
| 2404 | FindRememberedSetFromSpace(old_alloc_space)->AssertAllDirtyCardsAreWithinSpace(); |
| 2405 | // Remove the remembered set for the now zygote space (the old |
| 2406 | // non-moving space). Note now that we have compacted objects into |
| 2407 | // the zygote space, the data in the remembered set is no longer |
| 2408 | // needed. The zygote space will instead have a mod-union table |
| 2409 | // from this point on. |
| 2410 | RemoveRememberedSet(old_alloc_space); |
| 2411 | } |
Mathieu Chartier | 7247af5 | 2014-11-19 10:51:42 -0800 | [diff] [blame] | 2412 | // Remaining space becomes the new non moving space. |
| 2413 | zygote_space_ = old_alloc_space->CreateZygoteSpace(kNonMovingSpaceName, low_memory_mode_, |
Mathieu Chartier | e4cab17 | 2014-08-19 18:24:04 -0700 | [diff] [blame] | 2414 | &non_moving_space_); |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 2415 | CHECK(!non_moving_space_->CanMoveObjects()); |
| 2416 | if (same_space) { |
| 2417 | main_space_ = non_moving_space_; |
| 2418 | SetSpaceAsDefault(main_space_); |
| 2419 | } |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 2420 | delete old_alloc_space; |
Mathieu Chartier | e4cab17 | 2014-08-19 18:24:04 -0700 | [diff] [blame] | 2421 | CHECK(HasZygoteSpace()) << "Failed creating zygote space"; |
| 2422 | AddSpace(zygote_space_); |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 2423 | non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity()); |
| 2424 | AddSpace(non_moving_space_); |
Lokesh Gidra | 8787cf8 | 2019-07-11 12:50:31 -0700 | [diff] [blame] | 2425 | constexpr bool set_mark_bit = kUseBakerReadBarrier |
| 2426 | && gc::collector::ConcurrentCopying::kGrayDirtyImmuneObjects; |
| 2427 | if (set_mark_bit) { |
Mathieu Chartier | 36a270a | 2016-07-28 18:08:51 -0700 | [diff] [blame] | 2428 | // Treat all of the objects in the zygote as marked to avoid unnecessary dirty pages. This is |
| 2429 | // safe since we mark all of the objects that may reference non immune objects as gray. |
Lokesh Gidra | 52c468a | 2019-07-18 18:16:04 -0700 | [diff] [blame] | 2430 | zygote_space_->SetMarkBitInLiveObjects(); |
Mathieu Chartier | 36a270a | 2016-07-28 18:08:51 -0700 | [diff] [blame] | 2431 | } |
| 2432 | |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 2433 | // Create the zygote space mod union table. |
| 2434 | accounting::ModUnionTable* mod_union_table = |
Mathieu Chartier | 962cd7a | 2016-08-16 12:15:59 -0700 | [diff] [blame] | 2435 | new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space_); |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 2436 | CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table"; |
Mathieu Chartier | 962cd7a | 2016-08-16 12:15:59 -0700 | [diff] [blame] | 2437 | |
| 2438 | if (collector_type_ != kCollectorTypeCC) { |
| 2439 | // Set all the cards in the mod-union table since we don't know which objects contain references |
| 2440 | // to large objects. |
| 2441 | mod_union_table->SetCards(); |
| 2442 | } else { |
Mathieu Chartier | 55c05f5 | 2017-04-11 11:12:28 -0700 | [diff] [blame] | 2443 | // Make sure to clear the zygote space cards so that we don't dirty pages in the next GC. There |
| 2444 | // may be dirty cards from the zygote compaction or reference processing. These cards are not |
| 2445 | // necessary to have marked since the zygote space may not refer to any objects not in the |
| 2446 | // zygote or image spaces at this point. |
| 2447 | mod_union_table->ProcessCards(); |
| 2448 | mod_union_table->ClearTable(); |
| 2449 | |
Mathieu Chartier | 962cd7a | 2016-08-16 12:15:59 -0700 | [diff] [blame] | 2450 | // For CC we never collect zygote large objects. This means we do not need to set the cards for |
| 2451 | // the zygote mod-union table and we can also clear all of the existing image mod-union tables. |
| 2452 | // The existing mod-union tables are only for image spaces and may only reference zygote and |
| 2453 | // image objects. |
| 2454 | for (auto& pair : mod_union_tables_) { |
| 2455 | CHECK(pair.first->IsImageSpace()); |
| 2456 | CHECK(!pair.first->AsImageSpace()->GetImageHeader().IsAppImage()); |
| 2457 | accounting::ModUnionTable* table = pair.second; |
| 2458 | table->ClearTable(); |
| 2459 | } |
| 2460 | } |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 2461 | AddModUnionTable(mod_union_table); |
Lokesh Gidra | 8787cf8 | 2019-07-11 12:50:31 -0700 | [diff] [blame] | 2462 | large_object_space_->SetAllLargeObjectsAsZygoteObjects(self, set_mark_bit); |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 2463 | if (collector::SemiSpace::kUseRememberedSet) { |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 2464 | // Add a new remembered set for the post-zygote non-moving space. |
| 2465 | accounting::RememberedSet* post_zygote_non_moving_space_rem_set = |
| 2466 | new accounting::RememberedSet("Post-zygote non-moving space remembered set", this, |
| 2467 | non_moving_space_); |
| 2468 | CHECK(post_zygote_non_moving_space_rem_set != nullptr) |
| 2469 | << "Failed to create post-zygote non-moving space remembered set"; |
| 2470 | AddRememberedSet(post_zygote_non_moving_space_rem_set); |
| 2471 | } |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 2472 | } |
Hans Boehm | 65c18a2 | 2020-01-03 23:37:13 +0000 | [diff] [blame] | 2473 | #pragma clang diagnostic pop |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 2474 | |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 2475 | void Heap::FlushAllocStack() { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 2476 | MarkAllocStackAsLive(allocation_stack_.get()); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 2477 | allocation_stack_->Reset(); |
| 2478 | } |
| 2479 | |
Mathieu Chartier | a8e8f9c | 2014-04-09 14:51:05 -0700 | [diff] [blame] | 2480 | void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1, |
| 2481 | accounting::ContinuousSpaceBitmap* bitmap2, |
Mathieu Chartier | bbd695c | 2014-04-16 09:48:48 -0700 | [diff] [blame] | 2482 | accounting::LargeObjectBitmap* large_objects, |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 2483 | accounting::ObjectStack* stack) { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 2484 | DCHECK(bitmap1 != nullptr); |
| 2485 | DCHECK(bitmap2 != nullptr); |
Mathieu Chartier | cb535da | 2015-01-23 13:50:03 -0800 | [diff] [blame] | 2486 | const auto* limit = stack->End(); |
| 2487 | for (auto* it = stack->Begin(); it != limit; ++it) { |
| 2488 | const mirror::Object* obj = it->AsMirrorPtr(); |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 2489 | if (!kUseThreadLocalAllocationStack || obj != nullptr) { |
| 2490 | if (bitmap1->HasAddress(obj)) { |
| 2491 | bitmap1->Set(obj); |
| 2492 | } else if (bitmap2->HasAddress(obj)) { |
| 2493 | bitmap2->Set(obj); |
| 2494 | } else { |
Mathieu Chartier | 2dbe627 | 2014-09-16 10:43:23 -0700 | [diff] [blame] | 2495 | DCHECK(large_objects != nullptr); |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 2496 | large_objects->Set(obj); |
| 2497 | } |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 2498 | } |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 2499 | } |
| 2500 | } |
| 2501 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2502 | void Heap::SwapSemiSpaces() { |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 2503 | CHECK(bump_pointer_space_ != nullptr); |
| 2504 | CHECK(temp_space_ != nullptr); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2505 | std::swap(bump_pointer_space_, temp_space_); |
| 2506 | } |
| 2507 | |
Hiroshi Yamauchi | e4d9987 | 2015-02-26 12:53:45 -0800 | [diff] [blame] | 2508 | collector::GarbageCollector* Heap::Compact(space::ContinuousMemMapAllocSpace* target_space, |
| 2509 | space::ContinuousMemMapAllocSpace* source_space, |
| 2510 | GcCause gc_cause) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2511 | CHECK(kMovingCollector); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2512 | if (target_space != source_space) { |
Mathieu Chartier | 6f365cc | 2014-04-23 12:42:27 -0700 | [diff] [blame] | 2513 | // Don't swap spaces since this isn't a typical semi space collection. |
| 2514 | semi_space_collector_->SetSwapSemiSpaces(false); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2515 | semi_space_collector_->SetFromSpace(source_space); |
| 2516 | semi_space_collector_->SetToSpace(target_space); |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 2517 | semi_space_collector_->Run(gc_cause, false); |
Hiroshi Yamauchi | e4d9987 | 2015-02-26 12:53:45 -0800 | [diff] [blame] | 2518 | return semi_space_collector_; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2519 | } |
Mathieu Chartier | f8e5d8c | 2018-04-06 13:35:37 -0700 | [diff] [blame] | 2520 | LOG(FATAL) << "Unsupported"; |
| 2521 | UNREACHABLE(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2522 | } |
Anwar Ghuloum | 67f9941 | 2013-08-12 14:19:48 -0700 | [diff] [blame] | 2523 | |
Mathieu Chartier | 34afcde | 2017-06-30 15:31:11 -0700 | [diff] [blame] | 2524 | void Heap::TraceHeapSize(size_t heap_size) { |
Orion Hodson | 119733d | 2019-01-30 15:14:41 +0000 | [diff] [blame] | 2525 | ATraceIntegerValue("Heap size (KB)", heap_size / KB); |
Mathieu Chartier | 34afcde | 2017-06-30 15:31:11 -0700 | [diff] [blame] | 2526 | } |
| 2527 | |
Hans Boehm | 13e951d | 2019-11-01 16:48:28 -0700 | [diff] [blame] | 2528 | #if defined(__GLIBC__) |
| 2529 | # define IF_GLIBC(x) x |
| 2530 | #else |
| 2531 | # define IF_GLIBC(x) |
| 2532 | #endif |
| 2533 | |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 2534 | size_t Heap::GetNativeBytes() { |
| 2535 | size_t malloc_bytes; |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 2536 | #if defined(__BIONIC__) || defined(__GLIBC__) |
Hans Boehm | 13e951d | 2019-11-01 16:48:28 -0700 | [diff] [blame] | 2537 | IF_GLIBC(size_t mmapped_bytes;) |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 2538 | struct mallinfo mi = mallinfo(); |
| 2539 | // In spite of the documentation, the jemalloc version of this call seems to do what we want, |
| 2540 | // and it is thread-safe. |
| 2541 | if (sizeof(size_t) > sizeof(mi.uordblks) && sizeof(size_t) > sizeof(mi.hblkhd)) { |
| 2542 | // Shouldn't happen, but glibc declares uordblks as int. |
| 2543 | // Avoiding sign extension gets us correct behavior for another 2 GB. |
| 2544 | malloc_bytes = (unsigned int)mi.uordblks; |
Hans Boehm | 13e951d | 2019-11-01 16:48:28 -0700 | [diff] [blame] | 2545 | IF_GLIBC(mmapped_bytes = (unsigned int)mi.hblkhd;) |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 2546 | } else { |
| 2547 | malloc_bytes = mi.uordblks; |
Hans Boehm | 13e951d | 2019-11-01 16:48:28 -0700 | [diff] [blame] | 2548 | IF_GLIBC(mmapped_bytes = mi.hblkhd;) |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 2549 | } |
Hans Boehm | 13e951d | 2019-11-01 16:48:28 -0700 | [diff] [blame] | 2550 | // From the spec, it appeared mmapped_bytes <= malloc_bytes. Reality was sometimes |
| 2551 | // dramatically different. (b/119580449 was an early bug.) If so, we try to fudge it. |
| 2552 | // However, malloc implementations seem to interpret hblkhd differently, namely as |
| 2553 | // mapped blocks backing the entire heap (e.g. jemalloc) vs. large objects directly |
| 2554 | // allocated via mmap (e.g. glibc). Thus we now only do this for glibc, where it |
| 2555 | // previously helped, and which appears to use a reading of the spec compatible |
| 2556 | // with our adjustment. |
| 2557 | #if defined(__GLIBC__) |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 2558 | if (mmapped_bytes > malloc_bytes) { |
| 2559 | malloc_bytes = mmapped_bytes; |
| 2560 | } |
Hans Boehm | 13e951d | 2019-11-01 16:48:28 -0700 | [diff] [blame] | 2561 | #endif // GLIBC |
| 2562 | #else // Neither Bionic nor Glibc |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 2563 | // We should hit this case only in contexts in which GC triggering is not critical. Effectively |
| 2564 | // disable GC triggering based on malloc(). |
| 2565 | malloc_bytes = 1000; |
| 2566 | #endif |
| 2567 | return malloc_bytes + native_bytes_registered_.load(std::memory_order_relaxed); |
| 2568 | // An alternative would be to get RSS from /proc/self/statm. Empirically, that's no |
| 2569 | // more expensive, and it would allow us to count memory allocated by means other than malloc. |
| 2570 | // However it would change as pages are unmapped and remapped due to memory pressure, among |
| 2571 | // other things. It seems risky to trigger GCs as a result of such changes. |
| 2572 | } |
| 2573 | |
Mathieu Chartier | a4f6af9 | 2015-08-11 17:35:25 -0700 | [diff] [blame] | 2574 | collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, |
| 2575 | GcCause gc_cause, |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 2576 | bool clear_soft_references) { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 2577 | Thread* self = Thread::Current(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2578 | Runtime* runtime = Runtime::Current(); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 2579 | // If the heap can't run the GC, silently fail and return that no GC was run. |
| 2580 | switch (gc_type) { |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 2581 | case collector::kGcTypePartial: { |
Mathieu Chartier | e4cab17 | 2014-08-19 18:24:04 -0700 | [diff] [blame] | 2582 | if (!HasZygoteSpace()) { |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 2583 | return collector::kGcTypeNone; |
| 2584 | } |
| 2585 | break; |
| 2586 | } |
| 2587 | default: { |
| 2588 | // Other GC types don't have any special cases which makes them not runnable. The main case |
| 2589 | // here is full GC. |
| 2590 | } |
| 2591 | } |
Mathieu Chartier | 65db880 | 2012-11-20 12:36:46 -0800 | [diff] [blame] | 2592 | ScopedThreadStateChange tsc(self, kWaitingPerformingGc); |
Ziang Wan | 92db59b | 2019-07-22 21:19:24 +0000 | [diff] [blame] | 2593 | Locks::mutator_lock_->AssertNotHeld(self); |
Ian Rogers | 120f1c7 | 2012-09-28 17:17:10 -0700 | [diff] [blame] | 2594 | if (self->IsHandlingStackOverflow()) { |
Mathieu Chartier | 50c138f | 2015-01-07 16:00:03 -0800 | [diff] [blame] | 2595 | // If we are throwing a stack overflow error we probably don't have enough remaining stack |
| 2596 | // space to run the GC. |
| 2597 | return collector::kGcTypeNone; |
Ian Rogers | 120f1c7 | 2012-09-28 17:17:10 -0700 | [diff] [blame] | 2598 | } |
Mathieu Chartier | d5a89ee | 2014-01-31 09:55:13 -0800 | [diff] [blame] | 2599 | bool compacting_gc; |
| 2600 | { |
| 2601 | gc_complete_lock_->AssertNotHeld(self); |
Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 2602 | ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete); |
Mathieu Chartier | d5a89ee | 2014-01-31 09:55:13 -0800 | [diff] [blame] | 2603 | MutexLock mu(self, *gc_complete_lock_); |
| 2604 | // Ensure there is only one GC at a time. |
Mathieu Chartier | 89a201e | 2014-05-02 10:27:26 -0700 | [diff] [blame] | 2605 | WaitForGcToCompleteLocked(gc_cause, self); |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 2606 | compacting_gc = IsMovingGc(collector_type_); |
Mathieu Chartier | d5a89ee | 2014-01-31 09:55:13 -0800 | [diff] [blame] | 2607 | // GC can be disabled if someone has a used GetPrimitiveArrayCritical. |
| 2608 | if (compacting_gc && disable_moving_gc_count_ != 0) { |
| 2609 | LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_; |
| 2610 | return collector::kGcTypeNone; |
| 2611 | } |
Mathieu Chartier | 5116837 | 2015-08-12 16:40:32 -0700 | [diff] [blame] | 2612 | if (gc_disabled_for_shutdown_) { |
| 2613 | return collector::kGcTypeNone; |
| 2614 | } |
Mathieu Chartier | d5a89ee | 2014-01-31 09:55:13 -0800 | [diff] [blame] | 2615 | collector_type_running_ = collector_type_; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 2616 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2617 | if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) { |
| 2618 | ++runtime->GetStats()->gc_for_alloc_count; |
| 2619 | ++self->GetStats()->gc_for_alloc_count; |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 2620 | } |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 2621 | const size_t bytes_allocated_before_gc = GetBytesAllocated(); |
Richard Uhler | caaa2b0 | 2017-02-01 09:54:17 +0000 | [diff] [blame] | 2622 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 2623 | DCHECK_LT(gc_type, collector::kGcTypeMax); |
| 2624 | DCHECK_NE(gc_type, collector::kGcTypeNone); |
Anwar Ghuloum | 67f9941 | 2013-08-12 14:19:48 -0700 | [diff] [blame] | 2625 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2626 | collector::GarbageCollector* collector = nullptr; |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 2627 | // TODO: Clean this up. |
Mathieu Chartier | 1d27b34 | 2014-01-28 12:51:09 -0800 | [diff] [blame] | 2628 | if (compacting_gc) { |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 2629 | DCHECK(current_allocator_ == kAllocatorTypeBumpPointer || |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 2630 | current_allocator_ == kAllocatorTypeTLAB || |
| 2631 | current_allocator_ == kAllocatorTypeRegion || |
| 2632 | current_allocator_ == kAllocatorTypeRegionTLAB); |
Mathieu Chartier | 52e4b43 | 2014-06-10 11:22:31 -0700 | [diff] [blame] | 2633 | switch (collector_type_) { |
| 2634 | case kCollectorTypeSS: |
Mathieu Chartier | 52e4b43 | 2014-06-10 11:22:31 -0700 | [diff] [blame] | 2635 | semi_space_collector_->SetFromSpace(bump_pointer_space_); |
| 2636 | semi_space_collector_->SetToSpace(temp_space_); |
| 2637 | semi_space_collector_->SetSwapSemiSpaces(true); |
| 2638 | collector = semi_space_collector_; |
| 2639 | break; |
| 2640 | case kCollectorTypeCC: |
Albert Mingkun Yang | 0b4d146 | 2018-11-29 13:25:35 +0000 | [diff] [blame] | 2641 | if (use_generational_cc_) { |
Mathieu Chartier | 8d1a996 | 2016-08-17 16:39:45 -0700 | [diff] [blame] | 2642 | // TODO: Other threads must do the flip checkpoint before they start poking at |
| 2643 | // active_concurrent_copying_collector_. So we should not concurrency here. |
| 2644 | active_concurrent_copying_collector_ = (gc_type == collector::kGcTypeSticky) ? |
| 2645 | young_concurrent_copying_collector_ : concurrent_copying_collector_; |
Lokesh Gidra | 1c34b71 | 2018-12-18 13:41:58 -0800 | [diff] [blame] | 2646 | DCHECK(active_concurrent_copying_collector_->RegionSpace() == region_space_); |
Mathieu Chartier | 8d1a996 | 2016-08-17 16:39:45 -0700 | [diff] [blame] | 2647 | } |
| 2648 | collector = active_concurrent_copying_collector_; |
Mathieu Chartier | 52e4b43 | 2014-06-10 11:22:31 -0700 | [diff] [blame] | 2649 | break; |
Mathieu Chartier | 52e4b43 | 2014-06-10 11:22:31 -0700 | [diff] [blame] | 2650 | default: |
| 2651 | LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_); |
Hiroshi Yamauchi | d5307ec | 2014-03-27 21:07:51 -0700 | [diff] [blame] | 2652 | } |
Mathieu Chartier | 8d1a996 | 2016-08-17 16:39:45 -0700 | [diff] [blame] | 2653 | if (collector != active_concurrent_copying_collector_) { |
Mathieu Chartier | 52e4b43 | 2014-06-10 11:22:31 -0700 | [diff] [blame] | 2654 | temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE); |
Hiroshi Yamauchi | 6edb9ae | 2016-02-08 14:18:21 -0800 | [diff] [blame] | 2655 | if (kIsDebugBuild) { |
| 2656 | // Try to read each page of the memory map in case mprotect didn't work properly b/19894268. |
| 2657 | temp_space_->GetMemMap()->TryReadable(); |
| 2658 | } |
Mathieu Chartier | 52e4b43 | 2014-06-10 11:22:31 -0700 | [diff] [blame] | 2659 | CHECK(temp_space_->IsEmpty()); |
| 2660 | } |
| 2661 | gc_type = collector::kGcTypeFull; // TODO: Not hard code this in. |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 2662 | } else if (current_allocator_ == kAllocatorTypeRosAlloc || |
| 2663 | current_allocator_ == kAllocatorTypeDlMalloc) { |
Mathieu Chartier | afe4998 | 2014-03-27 10:55:04 -0700 | [diff] [blame] | 2664 | collector = FindCollectorByGcType(gc_type); |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 2665 | } else { |
| 2666 | LOG(FATAL) << "Invalid current allocator " << current_allocator_; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2667 | } |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 2668 | |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 2669 | CHECK(collector != nullptr) |
Hiroshi Yamauchi | 3e41780 | 2014-03-20 12:03:02 -0700 | [diff] [blame] | 2670 | << "Could not find garbage collector with collector_type=" |
| 2671 | << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type; |
Mathieu Chartier | 6f365cc | 2014-04-23 12:42:27 -0700 | [diff] [blame] | 2672 | collector->Run(gc_cause, clear_soft_references || runtime->IsZygote()); |
Hans Boehm | 4c6d765 | 2019-11-01 09:23:19 -0700 | [diff] [blame] | 2673 | IncrementFreedEver(); |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 2674 | RequestTrim(self); |
Alex Light | e302088 | 2019-05-13 16:35:02 -0700 | [diff] [blame] | 2675 | // Collect cleared references. |
| 2676 | SelfDeletingTask* clear = reference_processor_->CollectClearedReferences(self); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 2677 | // Grow the heap so that we know when to perform the next GC. |
Mathieu Chartier | e2c2f6e | 2014-12-16 18:49:31 -0800 | [diff] [blame] | 2678 | GrowForUtilization(collector, bytes_allocated_before_gc); |
Hiroshi Yamauchi | e4d9987 | 2015-02-26 12:53:45 -0800 | [diff] [blame] | 2679 | LogGC(gc_cause, collector); |
| 2680 | FinishGC(self, gc_type); |
Alex Light | e302088 | 2019-05-13 16:35:02 -0700 | [diff] [blame] | 2681 | // Actually enqueue all cleared references. Do this after the GC has officially finished since |
| 2682 | // otherwise we can deadlock. |
| 2683 | clear->Run(self); |
| 2684 | clear->Finalize(); |
Hiroshi Yamauchi | e4d9987 | 2015-02-26 12:53:45 -0800 | [diff] [blame] | 2685 | // Inform DDMS that a GC completed. |
| 2686 | Dbg::GcDidFinish(); |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 2687 | |
| 2688 | old_native_bytes_allocated_.store(GetNativeBytes()); |
| 2689 | |
Mathieu Chartier | 598302a | 2015-09-23 14:52:39 -0700 | [diff] [blame] | 2690 | // Unload native libraries for class unloading. We do this after calling FinishGC to prevent |
| 2691 | // deadlocks in case the JNI_OnUnload function does allocations. |
| 2692 | { |
| 2693 | ScopedObjectAccess soa(self); |
| 2694 | soa.Vm()->UnloadNativeLibraries(); |
| 2695 | } |
Hiroshi Yamauchi | e4d9987 | 2015-02-26 12:53:45 -0800 | [diff] [blame] | 2696 | return gc_type; |
| 2697 | } |
| 2698 | |
| 2699 | void Heap::LogGC(GcCause gc_cause, collector::GarbageCollector* collector) { |
Mathieu Chartier | 10fb83a | 2014-06-15 15:15:43 -0700 | [diff] [blame] | 2700 | const size_t duration = GetCurrentGcIteration()->GetDurationNs(); |
| 2701 | const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes(); |
Mathieu Chartier | 62ab87b | 2014-04-28 12:22:07 -0700 | [diff] [blame] | 2702 | // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC |
Mathieu Chartier | f5997b4 | 2014-06-20 10:37:54 -0700 | [diff] [blame] | 2703 | // (mutator time blocked >= long_pause_log_threshold_). |
Mathieu Chartier | 6bc7774 | 2017-04-18 17:46:23 -0700 | [diff] [blame] | 2704 | bool log_gc = kLogAllGCs || gc_cause == kGcCauseExplicit; |
Mathieu Chartier | 62ab87b | 2014-04-28 12:22:07 -0700 | [diff] [blame] | 2705 | if (!log_gc && CareAboutPauseTimes()) { |
Mathieu Chartier | e53225c | 2013-08-19 10:59:11 -0700 | [diff] [blame] | 2706 | // GC for alloc pauses the allocating thread, so consider it as a pause. |
Mathieu Chartier | 62ab87b | 2014-04-28 12:22:07 -0700 | [diff] [blame] | 2707 | log_gc = duration > long_gc_log_threshold_ || |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 2708 | (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_); |
Mathieu Chartier | 62ab87b | 2014-04-28 12:22:07 -0700 | [diff] [blame] | 2709 | for (uint64_t pause : pause_times) { |
| 2710 | log_gc = log_gc || pause >= long_pause_log_threshold_; |
Mathieu Chartier | e53225c | 2013-08-19 10:59:11 -0700 | [diff] [blame] | 2711 | } |
Mathieu Chartier | 62ab87b | 2014-04-28 12:22:07 -0700 | [diff] [blame] | 2712 | } |
| 2713 | if (log_gc) { |
| 2714 | const size_t percent_free = GetPercentFree(); |
| 2715 | const size_t current_heap_size = GetBytesAllocated(); |
| 2716 | const size_t total_memory = GetTotalMemory(); |
| 2717 | std::ostringstream pause_string; |
| 2718 | for (size_t i = 0; i < pause_times.size(); ++i) { |
Hiroshi Yamauchi | e4d9987 | 2015-02-26 12:53:45 -0800 | [diff] [blame] | 2719 | pause_string << PrettyDuration((pause_times[i] / 1000) * 1000) |
| 2720 | << ((i != pause_times.size() - 1) ? "," : ""); |
Mathieu Chartier | e53225c | 2013-08-19 10:59:11 -0700 | [diff] [blame] | 2721 | } |
Mathieu Chartier | 62ab87b | 2014-04-28 12:22:07 -0700 | [diff] [blame] | 2722 | LOG(INFO) << gc_cause << " " << collector->GetName() |
Mathieu Chartier | 10fb83a | 2014-06-15 15:15:43 -0700 | [diff] [blame] | 2723 | << " GC freed " << current_gc_iteration_.GetFreedObjects() << "(" |
| 2724 | << PrettySize(current_gc_iteration_.GetFreedBytes()) << ") AllocSpace objects, " |
| 2725 | << current_gc_iteration_.GetFreedLargeObjects() << "(" |
| 2726 | << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, " |
Mathieu Chartier | 62ab87b | 2014-04-28 12:22:07 -0700 | [diff] [blame] | 2727 | << percent_free << "% free, " << PrettySize(current_heap_size) << "/" |
| 2728 | << PrettySize(total_memory) << ", " << "paused " << pause_string.str() |
| 2729 | << " total " << PrettyDuration((duration / 1000) * 1000); |
Ian Rogers | c7dd295 | 2014-10-21 23:31:19 -0700 | [diff] [blame] | 2730 | VLOG(heap) << Dumpable<TimingLogger>(*current_gc_iteration_.GetTimings()); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 2731 | } |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 2732 | } |
Mathieu Chartier | a639903 | 2012-06-11 18:49:50 -0700 | [diff] [blame] | 2733 | |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 2734 | void Heap::FinishGC(Thread* self, collector::GcType gc_type) { |
| 2735 | MutexLock mu(self, *gc_complete_lock_); |
Mathieu Chartier | d5a89ee | 2014-01-31 09:55:13 -0800 | [diff] [blame] | 2736 | collector_type_running_ = kCollectorTypeNone; |
| 2737 | if (gc_type != collector::kGcTypeNone) { |
| 2738 | last_gc_type_ = gc_type; |
Hiroshi Yamauchi | a1c9f01 | 2015-04-02 10:18:12 -0700 | [diff] [blame] | 2739 | |
| 2740 | // Update stats. |
| 2741 | ++gc_count_last_window_; |
| 2742 | if (running_collection_is_blocking_) { |
| 2743 | // If the currently running collection was a blocking one, |
| 2744 | // increment the counters and reset the flag. |
| 2745 | ++blocking_gc_count_; |
| 2746 | blocking_gc_time_ += GetCurrentGcIteration()->GetDurationNs(); |
| 2747 | ++blocking_gc_count_last_window_; |
| 2748 | } |
| 2749 | // Update the gc count rate histograms if due. |
| 2750 | UpdateGcCountRateHistograms(); |
Mathieu Chartier | d5a89ee | 2014-01-31 09:55:13 -0800 | [diff] [blame] | 2751 | } |
Hiroshi Yamauchi | a1c9f01 | 2015-04-02 10:18:12 -0700 | [diff] [blame] | 2752 | // Reset. |
| 2753 | running_collection_is_blocking_ = false; |
Mathieu Chartier | 183009a | 2017-02-16 21:19:28 -0800 | [diff] [blame] | 2754 | thread_running_gc_ = nullptr; |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 2755 | // Wake anyone who may have been waiting for the GC to complete. |
| 2756 | gc_complete_cond_->Broadcast(self); |
| 2757 | } |
| 2758 | |
Hiroshi Yamauchi | a1c9f01 | 2015-04-02 10:18:12 -0700 | [diff] [blame] | 2759 | void Heap::UpdateGcCountRateHistograms() { |
| 2760 | // Invariant: if the time since the last update includes more than |
| 2761 | // one windows, all the GC runs (if > 0) must have happened in first |
| 2762 | // window because otherwise the update must have already taken place |
| 2763 | // at an earlier GC run. So, we report the non-first windows with |
| 2764 | // zero counts to the histograms. |
| 2765 | DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U); |
| 2766 | uint64_t now = NanoTime(); |
| 2767 | DCHECK_GE(now, last_update_time_gc_count_rate_histograms_); |
| 2768 | uint64_t time_since_last_update = now - last_update_time_gc_count_rate_histograms_; |
| 2769 | uint64_t num_of_windows = time_since_last_update / kGcCountRateHistogramWindowDuration; |
Vincent Palomares | cc17d07 | 2019-01-28 11:14:01 -0800 | [diff] [blame] | 2770 | |
| 2771 | // The computed number of windows can be incoherently high if NanoTime() is not monotonic. |
| 2772 | // Setting a limit on its maximum value reduces the impact on CPU time in such cases. |
| 2773 | if (num_of_windows > kGcCountRateHistogramMaxNumMissedWindows) { |
| 2774 | LOG(WARNING) << "Reducing the number of considered missed Gc histogram windows from " |
| 2775 | << num_of_windows << " to " << kGcCountRateHistogramMaxNumMissedWindows; |
| 2776 | num_of_windows = kGcCountRateHistogramMaxNumMissedWindows; |
| 2777 | } |
| 2778 | |
Hiroshi Yamauchi | a1c9f01 | 2015-04-02 10:18:12 -0700 | [diff] [blame] | 2779 | if (time_since_last_update >= kGcCountRateHistogramWindowDuration) { |
| 2780 | // Record the first window. |
| 2781 | gc_count_rate_histogram_.AddValue(gc_count_last_window_ - 1); // Exclude the current run. |
| 2782 | blocking_gc_count_rate_histogram_.AddValue(running_collection_is_blocking_ ? |
| 2783 | blocking_gc_count_last_window_ - 1 : blocking_gc_count_last_window_); |
| 2784 | // Record the other windows (with zero counts). |
| 2785 | for (uint64_t i = 0; i < num_of_windows - 1; ++i) { |
| 2786 | gc_count_rate_histogram_.AddValue(0); |
| 2787 | blocking_gc_count_rate_histogram_.AddValue(0); |
| 2788 | } |
| 2789 | // Update the last update time and reset the counters. |
| 2790 | last_update_time_gc_count_rate_histograms_ = |
| 2791 | (now / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration; |
| 2792 | gc_count_last_window_ = 1; // Include the current run. |
| 2793 | blocking_gc_count_last_window_ = running_collection_is_blocking_ ? 1 : 0; |
| 2794 | } |
| 2795 | DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U); |
| 2796 | } |
| 2797 | |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 2798 | class RootMatchesObjectVisitor : public SingleRootVisitor { |
| 2799 | public: |
| 2800 | explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { } |
| 2801 | |
| 2802 | void VisitRoot(mirror::Object* root, const RootInfo& info) |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 2803 | override REQUIRES_SHARED(Locks::mutator_lock_) { |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 2804 | if (root == obj_) { |
| 2805 | LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString(); |
| 2806 | } |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 2807 | } |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 2808 | |
| 2809 | private: |
| 2810 | const mirror::Object* const obj_; |
| 2811 | }; |
| 2812 | |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 2813 | |
| 2814 | class ScanVisitor { |
| 2815 | public: |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 2816 | void operator()(const mirror::Object* obj) const { |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 2817 | LOG(ERROR) << "Would have rescanned object " << obj; |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 2818 | } |
| 2819 | }; |
| 2820 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 2821 | // Verify a reference from an object. |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 2822 | class VerifyReferenceVisitor : public SingleRootVisitor { |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 2823 | public: |
Orion Hodson | 4a01cc3 | 2018-03-26 15:46:18 +0100 | [diff] [blame] | 2824 | VerifyReferenceVisitor(Thread* self, Heap* heap, size_t* fail_count, bool verify_referent) |
Andreas Gampe | 351c447 | 2017-07-12 19:32:55 -0700 | [diff] [blame] | 2825 | REQUIRES_SHARED(Locks::mutator_lock_) |
Orion Hodson | 4a01cc3 | 2018-03-26 15:46:18 +0100 | [diff] [blame] | 2826 | : self_(self), heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) { |
| 2827 | CHECK_EQ(self_, Thread::Current()); |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 2828 | } |
| 2829 | |
Mathieu Chartier | 31e8822 | 2016-10-14 18:43:19 -0700 | [diff] [blame] | 2830 | void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED, ObjPtr<mirror::Reference> ref) const |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 2831 | REQUIRES_SHARED(Locks::mutator_lock_) { |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 2832 | if (verify_referent_) { |
Mathieu Chartier | 31e8822 | 2016-10-14 18:43:19 -0700 | [diff] [blame] | 2833 | VerifyReference(ref.Ptr(), ref->GetReferent(), mirror::Reference::ReferentOffset()); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 2834 | } |
Mathieu Chartier | 407f702 | 2014-02-18 14:37:05 -0800 | [diff] [blame] | 2835 | } |
| 2836 | |
Mathieu Chartier | 31e8822 | 2016-10-14 18:43:19 -0700 | [diff] [blame] | 2837 | void operator()(ObjPtr<mirror::Object> obj, |
| 2838 | MemberOffset offset, |
| 2839 | bool is_static ATTRIBUTE_UNUSED) const |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 2840 | REQUIRES_SHARED(Locks::mutator_lock_) { |
Mathieu Chartier | 31e8822 | 2016-10-14 18:43:19 -0700 | [diff] [blame] | 2841 | VerifyReference(obj.Ptr(), obj->GetFieldObject<mirror::Object>(offset), offset); |
Mathieu Chartier | 407f702 | 2014-02-18 14:37:05 -0800 | [diff] [blame] | 2842 | } |
| 2843 | |
Mathieu Chartier | 31e8822 | 2016-10-14 18:43:19 -0700 | [diff] [blame] | 2844 | bool IsLive(ObjPtr<mirror::Object> obj) const NO_THREAD_SAFETY_ANALYSIS { |
Mathieu Chartier | 8ab7e78 | 2014-05-19 16:55:27 -0700 | [diff] [blame] | 2845 | return heap_->IsLiveObjectLocked(obj, true, false, true); |
| 2846 | } |
| 2847 | |
Mathieu Chartier | da7c650 | 2015-07-23 16:01:26 -0700 | [diff] [blame] | 2848 | void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 2849 | REQUIRES_SHARED(Locks::mutator_lock_) { |
Mathieu Chartier | da7c650 | 2015-07-23 16:01:26 -0700 | [diff] [blame] | 2850 | if (!root->IsNull()) { |
| 2851 | VisitRoot(root); |
| 2852 | } |
| 2853 | } |
| 2854 | void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 2855 | REQUIRES_SHARED(Locks::mutator_lock_) { |
Mathieu Chartier | da7c650 | 2015-07-23 16:01:26 -0700 | [diff] [blame] | 2856 | const_cast<VerifyReferenceVisitor*>(this)->VisitRoot( |
| 2857 | root->AsMirrorPtr(), RootInfo(kRootVMInternal)); |
| 2858 | } |
| 2859 | |
Roland Levillain | f73caca | 2018-08-24 17:19:07 +0100 | [diff] [blame] | 2860 | void VisitRoot(mirror::Object* root, const RootInfo& root_info) override |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 2861 | REQUIRES_SHARED(Locks::mutator_lock_) { |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 2862 | if (root == nullptr) { |
| 2863 | LOG(ERROR) << "Root is null with info " << root_info.GetType(); |
| 2864 | } else if (!VerifyReference(nullptr, root, MemberOffset(0))) { |
David Sehr | 709b070 | 2016-10-13 09:12:37 -0700 | [diff] [blame] | 2865 | LOG(ERROR) << "Root " << root << " is dead with type " << mirror::Object::PrettyTypeOf(root) |
Mathieu Chartier | e34fa1d | 2015-01-14 14:55:47 -0800 | [diff] [blame] | 2866 | << " thread_id= " << root_info.GetThreadId() << " root_type= " << root_info.GetType(); |
Mathieu Chartier | 8ab7e78 | 2014-05-19 16:55:27 -0700 | [diff] [blame] | 2867 | } |
| 2868 | } |
| 2869 | |
| 2870 | private: |
Mathieu Chartier | 407f702 | 2014-02-18 14:37:05 -0800 | [diff] [blame] | 2871 | // TODO: Fix the no thread safety analysis. |
Mathieu Chartier | 8ab7e78 | 2014-05-19 16:55:27 -0700 | [diff] [blame] | 2872 | // Returns false on failure. |
| 2873 | bool VerifyReference(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 2874 | NO_THREAD_SAFETY_ANALYSIS { |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 2875 | if (ref == nullptr || IsLive(ref)) { |
| 2876 | // Verify that the reference is live. |
Mathieu Chartier | 8ab7e78 | 2014-05-19 16:55:27 -0700 | [diff] [blame] | 2877 | return true; |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 2878 | } |
Orion Hodson | 4a01cc3 | 2018-03-26 15:46:18 +0100 | [diff] [blame] | 2879 | CHECK_EQ(self_, Thread::Current()); // fail_count_ is private to the calling thread. |
| 2880 | *fail_count_ += 1; |
| 2881 | if (*fail_count_ == 1) { |
| 2882 | // Only print message for the first failure to prevent spam. |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 2883 | LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!"; |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 2884 | } |
| 2885 | if (obj != nullptr) { |
Mathieu Chartier | 8ab7e78 | 2014-05-19 16:55:27 -0700 | [diff] [blame] | 2886 | // Only do this part for non roots. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 2887 | accounting::CardTable* card_table = heap_->GetCardTable(); |
| 2888 | accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get(); |
| 2889 | accounting::ObjectStack* live_stack = heap_->live_stack_.get(); |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 2890 | uint8_t* card_addr = card_table->CardFromAddr(obj); |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 2891 | LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset " |
| 2892 | << offset << "\n card value = " << static_cast<int>(*card_addr); |
| 2893 | if (heap_->IsValidObjectAddress(obj->GetClass())) { |
David Sehr | 709b070 | 2016-10-13 09:12:37 -0700 | [diff] [blame] | 2894 | LOG(ERROR) << "Obj type " << obj->PrettyTypeOf(); |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 2895 | } else { |
| 2896 | LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address"; |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 2897 | } |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 2898 | |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 2899 | // Attempt to find the class inside of the recently freed objects. |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 2900 | space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true); |
| 2901 | if (ref_space != nullptr && ref_space->IsMallocSpace()) { |
| 2902 | space::MallocSpace* space = ref_space->AsMallocSpace(); |
| 2903 | mirror::Class* ref_class = space->FindRecentFreedObject(ref); |
| 2904 | if (ref_class != nullptr) { |
| 2905 | LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class " |
David Sehr | 709b070 | 2016-10-13 09:12:37 -0700 | [diff] [blame] | 2906 | << ref_class->PrettyClass(); |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 2907 | } else { |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 2908 | LOG(ERROR) << "Reference " << ref << " not found as a recently freed object"; |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 2909 | } |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 2910 | } |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 2911 | |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 2912 | if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) && |
| 2913 | ref->GetClass()->IsClass()) { |
David Sehr | 709b070 | 2016-10-13 09:12:37 -0700 | [diff] [blame] | 2914 | LOG(ERROR) << "Ref type " << ref->PrettyTypeOf(); |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 2915 | } else { |
| 2916 | LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass() |
| 2917 | << ") is not a valid heap address"; |
| 2918 | } |
| 2919 | |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 2920 | card_table->CheckAddrIsInCardTable(reinterpret_cast<const uint8_t*>(obj)); |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 2921 | void* cover_begin = card_table->AddrFromCard(card_addr); |
| 2922 | void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) + |
| 2923 | accounting::CardTable::kCardSize); |
| 2924 | LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin |
| 2925 | << "-" << cover_end; |
Mathieu Chartier | a8e8f9c | 2014-04-09 14:51:05 -0700 | [diff] [blame] | 2926 | accounting::ContinuousSpaceBitmap* bitmap = |
| 2927 | heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj); |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 2928 | |
| 2929 | if (bitmap == nullptr) { |
| 2930 | LOG(ERROR) << "Object " << obj << " has no bitmap"; |
Mathieu Chartier | 4e30541 | 2014-02-19 10:54:44 -0800 | [diff] [blame] | 2931 | if (!VerifyClassClass(obj->GetClass())) { |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 2932 | LOG(ERROR) << "Object " << obj << " failed class verification!"; |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 2933 | } |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 2934 | } else { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 2935 | // Print out how the object is live. |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 2936 | if (bitmap->Test(obj)) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 2937 | LOG(ERROR) << "Object " << obj << " found in live bitmap"; |
| 2938 | } |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 2939 | if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 2940 | LOG(ERROR) << "Object " << obj << " found in allocation stack"; |
| 2941 | } |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 2942 | if (live_stack->Contains(const_cast<mirror::Object*>(obj))) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 2943 | LOG(ERROR) << "Object " << obj << " found in live stack"; |
| 2944 | } |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 2945 | if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) { |
| 2946 | LOG(ERROR) << "Ref " << ref << " found in allocation stack"; |
| 2947 | } |
| 2948 | if (live_stack->Contains(const_cast<mirror::Object*>(ref))) { |
| 2949 | LOG(ERROR) << "Ref " << ref << " found in live stack"; |
| 2950 | } |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 2951 | // Attempt to see if the card table missed the reference. |
| 2952 | ScanVisitor scan_visitor; |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 2953 | uint8_t* byte_cover_begin = reinterpret_cast<uint8_t*>(card_table->AddrFromCard(card_addr)); |
Lei Li | 727b294 | 2015-01-15 11:26:34 +0800 | [diff] [blame] | 2954 | card_table->Scan<false>(bitmap, byte_cover_begin, |
| 2955 | byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 2956 | } |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 2957 | |
| 2958 | // Search to see if any of the roots reference our object. |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 2959 | RootMatchesObjectVisitor visitor1(obj); |
| 2960 | Runtime::Current()->VisitRoots(&visitor1); |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 2961 | // Search to see if any of the roots reference our reference. |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 2962 | RootMatchesObjectVisitor visitor2(ref); |
| 2963 | Runtime::Current()->VisitRoots(&visitor2); |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 2964 | } |
Mathieu Chartier | 8ab7e78 | 2014-05-19 16:55:27 -0700 | [diff] [blame] | 2965 | return false; |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 2966 | } |
| 2967 | |
Orion Hodson | 4a01cc3 | 2018-03-26 15:46:18 +0100 | [diff] [blame] | 2968 | Thread* const self_; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 2969 | Heap* const heap_; |
Orion Hodson | 4a01cc3 | 2018-03-26 15:46:18 +0100 | [diff] [blame] | 2970 | size_t* const fail_count_; |
Mathieu Chartier | 8ab7e78 | 2014-05-19 16:55:27 -0700 | [diff] [blame] | 2971 | const bool verify_referent_; |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 2972 | }; |
| 2973 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 2974 | // Verify all references within an object, for use with HeapBitmap::Visit. |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 2975 | class VerifyObjectVisitor { |
| 2976 | public: |
Orion Hodson | 4a01cc3 | 2018-03-26 15:46:18 +0100 | [diff] [blame] | 2977 | VerifyObjectVisitor(Thread* self, Heap* heap, size_t* fail_count, bool verify_referent) |
| 2978 | : self_(self), heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {} |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 2979 | |
Andreas Gampe | 351c447 | 2017-07-12 19:32:55 -0700 | [diff] [blame] | 2980 | void operator()(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 2981 | // Note: we are verifying the references in obj but not obj itself, this is because obj must |
| 2982 | // be live or else how did we find it in the live bitmap? |
Orion Hodson | 4a01cc3 | 2018-03-26 15:46:18 +0100 | [diff] [blame] | 2983 | VerifyReferenceVisitor visitor(self_, heap_, fail_count_, verify_referent_); |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 2984 | // The class doesn't count as a reference but we should verify it anyways. |
Mathieu Chartier | 059ef3d | 2015-08-18 13:54:21 -0700 | [diff] [blame] | 2985 | obj->VisitReferences(visitor, visitor); |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 2986 | } |
| 2987 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 2988 | void VerifyRoots() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) { |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 2989 | ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); |
Orion Hodson | 4a01cc3 | 2018-03-26 15:46:18 +0100 | [diff] [blame] | 2990 | VerifyReferenceVisitor visitor(self_, heap_, fail_count_, verify_referent_); |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 2991 | Runtime::Current()->VisitRoots(&visitor); |
| 2992 | } |
| 2993 | |
Orion Hodson | 4a01cc3 | 2018-03-26 15:46:18 +0100 | [diff] [blame] | 2994 | uint32_t GetFailureCount() const REQUIRES(Locks::mutator_lock_) { |
| 2995 | CHECK_EQ(self_, Thread::Current()); |
| 2996 | return *fail_count_; |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 2997 | } |
| 2998 | |
| 2999 | private: |
Orion Hodson | 4a01cc3 | 2018-03-26 15:46:18 +0100 | [diff] [blame] | 3000 | Thread* const self_; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 3001 | Heap* const heap_; |
Orion Hodson | 4a01cc3 | 2018-03-26 15:46:18 +0100 | [diff] [blame] | 3002 | size_t* const fail_count_; |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 3003 | const bool verify_referent_; |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 3004 | }; |
| 3005 | |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 3006 | void Heap::PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj) { |
Mathieu Chartier | c179016 | 2014-05-23 10:54:50 -0700 | [diff] [blame] | 3007 | // Slow path, the allocation stack push back must have already failed. |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 3008 | DCHECK(!allocation_stack_->AtomicPushBack(obj->Ptr())); |
Mathieu Chartier | c179016 | 2014-05-23 10:54:50 -0700 | [diff] [blame] | 3009 | do { |
| 3010 | // TODO: Add handle VerifyObject. |
| 3011 | StackHandleScope<1> hs(self); |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 3012 | HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj)); |
Hans Boehm | d972b42 | 2017-09-11 12:57:00 -0700 | [diff] [blame] | 3013 | // Push our object into the reserve region of the allocation stack. This is only required due |
Mathieu Chartier | c179016 | 2014-05-23 10:54:50 -0700 | [diff] [blame] | 3014 | // to heap verification requiring that roots are live (either in the live bitmap or in the |
| 3015 | // allocation stack). |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 3016 | CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr())); |
Mathieu Chartier | c179016 | 2014-05-23 10:54:50 -0700 | [diff] [blame] | 3017 | CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false); |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 3018 | } while (!allocation_stack_->AtomicPushBack(obj->Ptr())); |
Mathieu Chartier | c179016 | 2014-05-23 10:54:50 -0700 | [diff] [blame] | 3019 | } |
| 3020 | |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 3021 | void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self, |
| 3022 | ObjPtr<mirror::Object>* obj) { |
Mathieu Chartier | c179016 | 2014-05-23 10:54:50 -0700 | [diff] [blame] | 3023 | // Slow path, the allocation stack push back must have already failed. |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 3024 | DCHECK(!self->PushOnThreadLocalAllocationStack(obj->Ptr())); |
Mathieu Chartier | cb535da | 2015-01-23 13:50:03 -0800 | [diff] [blame] | 3025 | StackReference<mirror::Object>* start_address; |
| 3026 | StackReference<mirror::Object>* end_address; |
Mathieu Chartier | c179016 | 2014-05-23 10:54:50 -0700 | [diff] [blame] | 3027 | while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address, |
| 3028 | &end_address)) { |
| 3029 | // TODO: Add handle VerifyObject. |
| 3030 | StackHandleScope<1> hs(self); |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 3031 | HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj)); |
Mathieu Chartier | c179016 | 2014-05-23 10:54:50 -0700 | [diff] [blame] | 3032 | // Push our object into the reserve region of the allocaiton stack. This is only required due |
| 3033 | // to heap verification requiring that roots are live (either in the live bitmap or in the |
| 3034 | // allocation stack). |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 3035 | CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr())); |
Mathieu Chartier | c179016 | 2014-05-23 10:54:50 -0700 | [diff] [blame] | 3036 | // Push into the reserve allocation stack. |
| 3037 | CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false); |
| 3038 | } |
| 3039 | self->SetThreadLocalAllocationStack(start_address, end_address); |
| 3040 | // Retry on the new thread-local allocation stack. |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 3041 | CHECK(self->PushOnThreadLocalAllocationStack(obj->Ptr())); // Must succeed. |
Mathieu Chartier | c179016 | 2014-05-23 10:54:50 -0700 | [diff] [blame] | 3042 | } |
| 3043 | |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 3044 | // Must do this with mutators suspended since we are directly accessing the allocation stacks. |
Mathieu Chartier | 8ab7e78 | 2014-05-19 16:55:27 -0700 | [diff] [blame] | 3045 | size_t Heap::VerifyHeapReferences(bool verify_referents) { |
Hiroshi Yamauchi | 1ed9061 | 2014-02-14 15:00:51 -0800 | [diff] [blame] | 3046 | Thread* self = Thread::Current(); |
| 3047 | Locks::mutator_lock_->AssertExclusiveHeld(self); |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 3048 | // Lets sort our allocation stacks so that we can efficiently binary search them. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 3049 | allocation_stack_->Sort(); |
| 3050 | live_stack_->Sort(); |
Hiroshi Yamauchi | 1ed9061 | 2014-02-14 15:00:51 -0800 | [diff] [blame] | 3051 | // Since we sorted the allocation stack content, need to revoke all |
| 3052 | // thread-local allocation stacks. |
| 3053 | RevokeAllThreadLocalAllocationStacks(self); |
Orion Hodson | 4a01cc3 | 2018-03-26 15:46:18 +0100 | [diff] [blame] | 3054 | size_t fail_count = 0; |
| 3055 | VerifyObjectVisitor visitor(self, this, &fail_count, verify_referents); |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 3056 | // Verify objects in the allocation stack since these will be objects which were: |
| 3057 | // 1. Allocated prior to the GC (pre GC verification). |
| 3058 | // 2. Allocated during the GC (pre sweep GC verification). |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 3059 | // We don't want to verify the objects in the live stack since they themselves may be |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 3060 | // pointing to dead objects if they are not reachable. |
Andreas Gampe | 351c447 | 2017-07-12 19:32:55 -0700 | [diff] [blame] | 3061 | VisitObjectsPaused(visitor); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 3062 | // Verify the roots: |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 3063 | visitor.VerifyRoots(); |
Mathieu Chartier | 8ab7e78 | 2014-05-19 16:55:27 -0700 | [diff] [blame] | 3064 | if (visitor.GetFailureCount() > 0) { |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 3065 | // Dump mod-union tables. |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 3066 | for (const auto& table_pair : mod_union_tables_) { |
| 3067 | accounting::ModUnionTable* mod_union_table = table_pair.second; |
Andreas Gampe | 3fec9ac | 2016-09-13 10:47:28 -0700 | [diff] [blame] | 3068 | mod_union_table->Dump(LOG_STREAM(ERROR) << mod_union_table->GetName() << ": "); |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 3069 | } |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 3070 | // Dump remembered sets. |
| 3071 | for (const auto& table_pair : remembered_sets_) { |
| 3072 | accounting::RememberedSet* remembered_set = table_pair.second; |
Andreas Gampe | 3fec9ac | 2016-09-13 10:47:28 -0700 | [diff] [blame] | 3073 | remembered_set->Dump(LOG_STREAM(ERROR) << remembered_set->GetName() << ": "); |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 3074 | } |
Andreas Gampe | 3fec9ac | 2016-09-13 10:47:28 -0700 | [diff] [blame] | 3075 | DumpSpaces(LOG_STREAM(ERROR)); |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 3076 | } |
Mathieu Chartier | 8ab7e78 | 2014-05-19 16:55:27 -0700 | [diff] [blame] | 3077 | return visitor.GetFailureCount(); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 3078 | } |
| 3079 | |
| 3080 | class VerifyReferenceCardVisitor { |
| 3081 | public: |
| 3082 | VerifyReferenceCardVisitor(Heap* heap, bool* failed) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 3083 | REQUIRES_SHARED(Locks::mutator_lock_, |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 3084 | Locks::heap_bitmap_lock_) |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 3085 | : heap_(heap), failed_(failed) { |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 3086 | } |
| 3087 | |
Mathieu Chartier | da7c650 | 2015-07-23 16:01:26 -0700 | [diff] [blame] | 3088 | // There is no card marks for native roots on a class. |
| 3089 | void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) |
| 3090 | const {} |
| 3091 | void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {} |
| 3092 | |
Mathieu Chartier | 4da7f2f | 2012-11-13 12:51:01 -0800 | [diff] [blame] | 3093 | // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for |
| 3094 | // annotalysis on visitors. |
Mathieu Chartier | 407f702 | 2014-02-18 14:37:05 -0800 | [diff] [blame] | 3095 | void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const |
| 3096 | NO_THREAD_SAFETY_ANALYSIS { |
Ian Rogers | b0fa5dc | 2014-04-28 16:47:08 -0700 | [diff] [blame] | 3097 | mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset); |
Mathieu Chartier | 4da7f2f | 2012-11-13 12:51:01 -0800 | [diff] [blame] | 3098 | // Filter out class references since changing an object's class does not mark the card as dirty. |
| 3099 | // Also handles large objects, since the only reference they hold is a class reference. |
Mathieu Chartier | 407f702 | 2014-02-18 14:37:05 -0800 | [diff] [blame] | 3100 | if (ref != nullptr && !ref->IsClass()) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 3101 | accounting::CardTable* card_table = heap_->GetCardTable(); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 3102 | // If the object is not dirty and it is referencing something in the live stack other than |
| 3103 | // class, then it must be on a dirty card. |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 3104 | if (!card_table->AddrIsInCardTable(obj)) { |
| 3105 | LOG(ERROR) << "Object " << obj << " is not in the address range of the card table"; |
| 3106 | *failed_ = true; |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 3107 | } else if (!card_table->IsDirty(obj)) { |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 3108 | // TODO: Check mod-union tables. |
Mathieu Chartier | 4da7f2f | 2012-11-13 12:51:01 -0800 | [diff] [blame] | 3109 | // Card should be either kCardDirty if it got re-dirtied after we aged it, or |
| 3110 | // kCardDirty - 1 if it didnt get touched since we aged it. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 3111 | accounting::ObjectStack* live_stack = heap_->live_stack_.get(); |
Mathieu Chartier | 407f702 | 2014-02-18 14:37:05 -0800 | [diff] [blame] | 3112 | if (live_stack->ContainsSorted(ref)) { |
| 3113 | if (live_stack->ContainsSorted(obj)) { |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 3114 | LOG(ERROR) << "Object " << obj << " found in live stack"; |
| 3115 | } |
| 3116 | if (heap_->GetLiveBitmap()->Test(obj)) { |
| 3117 | LOG(ERROR) << "Object " << obj << " found in live bitmap"; |
| 3118 | } |
David Sehr | 709b070 | 2016-10-13 09:12:37 -0700 | [diff] [blame] | 3119 | LOG(ERROR) << "Object " << obj << " " << mirror::Object::PrettyTypeOf(obj) |
| 3120 | << " references " << ref << " " << mirror::Object::PrettyTypeOf(ref) |
| 3121 | << " in live stack"; |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 3122 | |
| 3123 | // Print which field of the object is dead. |
| 3124 | if (!obj->IsObjectArray()) { |
Vladimir Marko | 4617d58 | 2019-03-28 13:48:31 +0000 | [diff] [blame] | 3125 | ObjPtr<mirror::Class> klass = is_static ? obj->AsClass() : obj->GetClass(); |
Mathieu Chartier | c785344 | 2015-03-27 14:35:38 -0700 | [diff] [blame] | 3126 | CHECK(klass != nullptr); |
Mathieu Chartier | c0fe56a | 2015-08-11 13:01:23 -0700 | [diff] [blame] | 3127 | for (ArtField& field : (is_static ? klass->GetSFields() : klass->GetIFields())) { |
Mathieu Chartier | 54d220e | 2015-07-30 16:20:06 -0700 | [diff] [blame] | 3128 | if (field.GetOffset().Int32Value() == offset.Int32Value()) { |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 3129 | LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is " |
David Sehr | 709b070 | 2016-10-13 09:12:37 -0700 | [diff] [blame] | 3130 | << field.PrettyField(); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 3131 | break; |
| 3132 | } |
| 3133 | } |
| 3134 | } else { |
Vladimir Marko | 4617d58 | 2019-03-28 13:48:31 +0000 | [diff] [blame] | 3135 | ObjPtr<mirror::ObjectArray<mirror::Object>> object_array = |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 3136 | obj->AsObjectArray<mirror::Object>(); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 3137 | for (int32_t i = 0; i < object_array->GetLength(); ++i) { |
| 3138 | if (object_array->Get(i) == ref) { |
| 3139 | LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref"; |
| 3140 | } |
| 3141 | } |
| 3142 | } |
| 3143 | |
| 3144 | *failed_ = true; |
| 3145 | } |
| 3146 | } |
| 3147 | } |
| 3148 | } |
| 3149 | |
| 3150 | private: |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 3151 | Heap* const heap_; |
| 3152 | bool* const failed_; |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 3153 | }; |
| 3154 | |
| 3155 | class VerifyLiveStackReferences { |
| 3156 | public: |
Brian Carlstrom | 93ba893 | 2013-07-17 21:31:49 -0700 | [diff] [blame] | 3157 | explicit VerifyLiveStackReferences(Heap* heap) |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 3158 | : heap_(heap), |
Brian Carlstrom | 93ba893 | 2013-07-17 21:31:49 -0700 | [diff] [blame] | 3159 | failed_(false) {} |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 3160 | |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 3161 | void operator()(mirror::Object* obj) const |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 3162 | REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 3163 | VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_)); |
Mathieu Chartier | 059ef3d | 2015-08-18 13:54:21 -0700 | [diff] [blame] | 3164 | obj->VisitReferences(visitor, VoidFunctor()); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 3165 | } |
| 3166 | |
| 3167 | bool Failed() const { |
| 3168 | return failed_; |
| 3169 | } |
| 3170 | |
| 3171 | private: |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 3172 | Heap* const heap_; |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 3173 | bool failed_; |
| 3174 | }; |
| 3175 | |
| 3176 | bool Heap::VerifyMissingCardMarks() { |
Hiroshi Yamauchi | 1ed9061 | 2014-02-14 15:00:51 -0800 | [diff] [blame] | 3177 | Thread* self = Thread::Current(); |
| 3178 | Locks::mutator_lock_->AssertExclusiveHeld(self); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 3179 | // We need to sort the live stack since we binary search it. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 3180 | live_stack_->Sort(); |
Hiroshi Yamauchi | 1ed9061 | 2014-02-14 15:00:51 -0800 | [diff] [blame] | 3181 | // Since we sorted the allocation stack content, need to revoke all |
| 3182 | // thread-local allocation stacks. |
| 3183 | RevokeAllThreadLocalAllocationStacks(self); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 3184 | VerifyLiveStackReferences visitor(this); |
| 3185 | GetLiveBitmap()->Visit(visitor); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 3186 | // We can verify objects in the live stack since none of these should reference dead objects. |
Mathieu Chartier | cb535da | 2015-01-23 13:50:03 -0800 | [diff] [blame] | 3187 | for (auto* it = live_stack_->Begin(); it != live_stack_->End(); ++it) { |
| 3188 | if (!kUseThreadLocalAllocationStack || it->AsMirrorPtr() != nullptr) { |
| 3189 | visitor(it->AsMirrorPtr()); |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 3190 | } |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 3191 | } |
Mathieu Chartier | 4c13a3f | 2014-07-14 14:57:16 -0700 | [diff] [blame] | 3192 | return !visitor.Failed(); |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 3193 | } |
| 3194 | |
Mathieu Chartier | a4f6af9 | 2015-08-11 17:35:25 -0700 | [diff] [blame] | 3195 | void Heap::SwapStacks() { |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 3196 | if (kUseThreadLocalAllocationStack) { |
| 3197 | live_stack_->AssertAllZero(); |
| 3198 | } |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 3199 | allocation_stack_.swap(live_stack_); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 3200 | } |
| 3201 | |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 3202 | void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) { |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 3203 | // This must be called only during the pause. |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 3204 | DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self)); |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 3205 | MutexLock mu(self, *Locks::runtime_shutdown_lock_); |
| 3206 | MutexLock mu2(self, *Locks::thread_list_lock_); |
| 3207 | std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); |
| 3208 | for (Thread* t : thread_list) { |
| 3209 | t->RevokeThreadLocalAllocationStack(); |
| 3210 | } |
| 3211 | } |
| 3212 | |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 3213 | void Heap::AssertThreadLocalBuffersAreRevoked(Thread* thread) { |
| 3214 | if (kIsDebugBuild) { |
| 3215 | if (rosalloc_space_ != nullptr) { |
| 3216 | rosalloc_space_->AssertThreadLocalBuffersAreRevoked(thread); |
| 3217 | } |
| 3218 | if (bump_pointer_space_ != nullptr) { |
| 3219 | bump_pointer_space_->AssertThreadLocalBuffersAreRevoked(thread); |
| 3220 | } |
| 3221 | } |
| 3222 | } |
| 3223 | |
Hiroshi Yamauchi | c93c530 | 2014-03-20 16:15:37 -0700 | [diff] [blame] | 3224 | void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() { |
| 3225 | if (kIsDebugBuild) { |
| 3226 | if (bump_pointer_space_ != nullptr) { |
| 3227 | bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked(); |
| 3228 | } |
| 3229 | } |
| 3230 | } |
| 3231 | |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 3232 | accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) { |
| 3233 | auto it = mod_union_tables_.find(space); |
| 3234 | if (it == mod_union_tables_.end()) { |
| 3235 | return nullptr; |
| 3236 | } |
| 3237 | return it->second; |
| 3238 | } |
| 3239 | |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 3240 | accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) { |
| 3241 | auto it = remembered_sets_.find(space); |
| 3242 | if (it == remembered_sets_.end()) { |
| 3243 | return nullptr; |
| 3244 | } |
| 3245 | return it->second; |
| 3246 | } |
| 3247 | |
Mathieu Chartier | a4f6af9 | 2015-08-11 17:35:25 -0700 | [diff] [blame] | 3248 | void Heap::ProcessCards(TimingLogger* timings, |
| 3249 | bool use_rem_sets, |
| 3250 | bool process_alloc_space_cards, |
Lei Li | 4add3b4 | 2015-01-15 11:55:26 +0800 | [diff] [blame] | 3251 | bool clear_alloc_space_cards) { |
Mathieu Chartier | f5997b4 | 2014-06-20 10:37:54 -0700 | [diff] [blame] | 3252 | TimingLogger::ScopedTiming t(__FUNCTION__, timings); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 3253 | // Clear cards and keep track of cards cleared in the mod-union table. |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 3254 | for (const auto& space : continuous_spaces_) { |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 3255 | accounting::ModUnionTable* table = FindModUnionTableFromSpace(space); |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 3256 | accounting::RememberedSet* rem_set = FindRememberedSetFromSpace(space); |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 3257 | if (table != nullptr) { |
| 3258 | const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" : |
| 3259 | "ImageModUnionClearCards"; |
Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 3260 | TimingLogger::ScopedTiming t2(name, timings); |
Mathieu Chartier | 6e6078a | 2016-10-24 15:45:41 -0700 | [diff] [blame] | 3261 | table->ProcessCards(); |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 3262 | } else if (use_rem_sets && rem_set != nullptr) { |
Mathieu Chartier | f75dce4 | 2019-04-08 09:36:23 -0700 | [diff] [blame] | 3263 | DCHECK(collector::SemiSpace::kUseRememberedSet) << static_cast<int>(collector_type_); |
Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 3264 | TimingLogger::ScopedTiming t2("AllocSpaceRemSetClearCards", timings); |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 3265 | rem_set->ClearCards(); |
Lei Li | 4add3b4 | 2015-01-15 11:55:26 +0800 | [diff] [blame] | 3266 | } else if (process_alloc_space_cards) { |
Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 3267 | TimingLogger::ScopedTiming t2("AllocSpaceClearCards", timings); |
Lei Li | 4add3b4 | 2015-01-15 11:55:26 +0800 | [diff] [blame] | 3268 | if (clear_alloc_space_cards) { |
Mathieu Chartier | fbc3108 | 2016-01-24 11:59:56 -0800 | [diff] [blame] | 3269 | uint8_t* end = space->End(); |
| 3270 | if (space->IsImageSpace()) { |
| 3271 | // Image space end is the end of the mirror objects, it is not necessarily page or card |
| 3272 | // aligned. Align up so that the check in ClearCardRange does not fail. |
| 3273 | end = AlignUp(end, accounting::CardTable::kCardSize); |
| 3274 | } |
| 3275 | card_table_->ClearCardRange(space->Begin(), end); |
Lei Li | 4add3b4 | 2015-01-15 11:55:26 +0800 | [diff] [blame] | 3276 | } else { |
| 3277 | // No mod union table for the AllocSpace. Age the cards so that the GC knows that these |
| 3278 | // cards were dirty before the GC started. |
| 3279 | // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread) |
| 3280 | // -> clean(cleaning thread). |
| 3281 | // The races are we either end up with: Aged card, unaged card. Since we have the |
| 3282 | // checkpoint roots and then we scan / update mod union tables after. We will always |
| 3283 | // scan either card. If we end up with the non aged card, we scan it it in the pause. |
| 3284 | card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), |
| 3285 | VoidFunctor()); |
| 3286 | } |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 3287 | } |
| 3288 | } |
| 3289 | } |
| 3290 | |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 3291 | struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor { |
Roland Levillain | f73caca | 2018-08-24 17:19:07 +0100 | [diff] [blame] | 3292 | mirror::Object* MarkObject(mirror::Object* obj) override { |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 3293 | return obj; |
| 3294 | } |
Roland Levillain | f73caca | 2018-08-24 17:19:07 +0100 | [diff] [blame] | 3295 | void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) override { |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 3296 | } |
| 3297 | }; |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 3298 | |
Mathieu Chartier | 6f365cc | 2014-04-23 12:42:27 -0700 | [diff] [blame] | 3299 | void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) { |
| 3300 | Thread* const self = Thread::Current(); |
Mathieu Chartier | 10fb83a | 2014-06-15 15:15:43 -0700 | [diff] [blame] | 3301 | TimingLogger* const timings = current_gc_iteration_.GetTimings(); |
Mathieu Chartier | f5997b4 | 2014-06-20 10:37:54 -0700 | [diff] [blame] | 3302 | TimingLogger::ScopedTiming t(__FUNCTION__, timings); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 3303 | if (verify_pre_gc_heap_) { |
Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 3304 | TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyHeapReferences", timings); |
Mathieu Chartier | 8ab7e78 | 2014-05-19 16:55:27 -0700 | [diff] [blame] | 3305 | size_t failures = VerifyHeapReferences(); |
| 3306 | if (failures > 0) { |
| 3307 | LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures |
| 3308 | << " failures"; |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 3309 | } |
Mathieu Chartier | 4da7f2f | 2012-11-13 12:51:01 -0800 | [diff] [blame] | 3310 | } |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 3311 | // Check that all objects which reference things in the live stack are on dirty cards. |
| 3312 | if (verify_missing_card_marks_) { |
Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 3313 | TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyMissingCardMarks", timings); |
Mathieu Chartier | 6f365cc | 2014-04-23 12:42:27 -0700 | [diff] [blame] | 3314 | ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | a4f6af9 | 2015-08-11 17:35:25 -0700 | [diff] [blame] | 3315 | SwapStacks(); |
Mathieu Chartier | 6f365cc | 2014-04-23 12:42:27 -0700 | [diff] [blame] | 3316 | // Sort the live stack so that we can quickly binary search it later. |
Mathieu Chartier | 4c13a3f | 2014-07-14 14:57:16 -0700 | [diff] [blame] | 3317 | CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName() |
| 3318 | << " missing card mark verification failed\n" << DumpSpaces(); |
Mathieu Chartier | a4f6af9 | 2015-08-11 17:35:25 -0700 | [diff] [blame] | 3319 | SwapStacks(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 3320 | } |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 3321 | if (verify_mod_union_table_) { |
Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 3322 | TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyModUnionTables", timings); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 3323 | ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 3324 | for (const auto& table_pair : mod_union_tables_) { |
| 3325 | accounting::ModUnionTable* mod_union_table = table_pair.second; |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 3326 | IdentityMarkHeapReferenceVisitor visitor; |
| 3327 | mod_union_table->UpdateAndMarkReferences(&visitor); |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 3328 | mod_union_table->Verify(); |
| 3329 | } |
Mathieu Chartier | 6f365cc | 2014-04-23 12:42:27 -0700 | [diff] [blame] | 3330 | } |
| 3331 | } |
| 3332 | |
| 3333 | void Heap::PreGcVerification(collector::GarbageCollector* gc) { |
Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 3334 | if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) { |
Andreas Gampe | 4934eb1 | 2017-01-30 13:15:26 -0800 | [diff] [blame] | 3335 | collector::GarbageCollector::ScopedPause pause(gc, false); |
Mathieu Chartier | 6f365cc | 2014-04-23 12:42:27 -0700 | [diff] [blame] | 3336 | PreGcVerificationPaused(gc); |
| 3337 | } |
| 3338 | } |
| 3339 | |
Mathieu Chartier | a4f6af9 | 2015-08-11 17:35:25 -0700 | [diff] [blame] | 3340 | void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc ATTRIBUTE_UNUSED) { |
Mathieu Chartier | 6f365cc | 2014-04-23 12:42:27 -0700 | [diff] [blame] | 3341 | // TODO: Add a new runtime option for this? |
| 3342 | if (verify_pre_gc_rosalloc_) { |
Mathieu Chartier | 10fb83a | 2014-06-15 15:15:43 -0700 | [diff] [blame] | 3343 | RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification"); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 3344 | } |
Mathieu Chartier | 4da7f2f | 2012-11-13 12:51:01 -0800 | [diff] [blame] | 3345 | } |
| 3346 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 3347 | void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) { |
Mathieu Chartier | 6f365cc | 2014-04-23 12:42:27 -0700 | [diff] [blame] | 3348 | Thread* const self = Thread::Current(); |
Mathieu Chartier | 10fb83a | 2014-06-15 15:15:43 -0700 | [diff] [blame] | 3349 | TimingLogger* const timings = current_gc_iteration_.GetTimings(); |
Mathieu Chartier | f5997b4 | 2014-06-20 10:37:54 -0700 | [diff] [blame] | 3350 | TimingLogger::ScopedTiming t(__FUNCTION__, timings); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 3351 | // Called before sweeping occurs since we want to make sure we are not going so reclaim any |
| 3352 | // reachable objects. |
Mathieu Chartier | 6f365cc | 2014-04-23 12:42:27 -0700 | [diff] [blame] | 3353 | if (verify_pre_sweeping_heap_) { |
Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 3354 | TimingLogger::ScopedTiming t2("(Paused)PostSweepingVerifyHeapReferences", timings); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 3355 | CHECK_NE(self->GetState(), kRunnable); |
Hiroshi Yamauchi | 0c8c303 | 2015-01-16 16:54:35 -0800 | [diff] [blame] | 3356 | { |
| 3357 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 3358 | // Swapping bound bitmaps does nothing. |
| 3359 | gc->SwapBitmaps(); |
| 3360 | } |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 3361 | // Pass in false since concurrent reference processing can mean that the reference referents |
| 3362 | // may point to dead objects at the point which PreSweepingGcVerification is called. |
Mathieu Chartier | 8ab7e78 | 2014-05-19 16:55:27 -0700 | [diff] [blame] | 3363 | size_t failures = VerifyHeapReferences(false); |
| 3364 | if (failures > 0) { |
| 3365 | LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed with " << failures |
| 3366 | << " failures"; |
Mathieu Chartier | 6f365cc | 2014-04-23 12:42:27 -0700 | [diff] [blame] | 3367 | } |
Hiroshi Yamauchi | 0c8c303 | 2015-01-16 16:54:35 -0800 | [diff] [blame] | 3368 | { |
| 3369 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 3370 | gc->SwapBitmaps(); |
| 3371 | } |
Mathieu Chartier | 6f365cc | 2014-04-23 12:42:27 -0700 | [diff] [blame] | 3372 | } |
| 3373 | if (verify_pre_sweeping_rosalloc_) { |
| 3374 | RosAllocVerification(timings, "PreSweepingRosAllocVerification"); |
| 3375 | } |
| 3376 | } |
| 3377 | |
| 3378 | void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) { |
| 3379 | // Only pause if we have to do some verification. |
| 3380 | Thread* const self = Thread::Current(); |
Mathieu Chartier | 10fb83a | 2014-06-15 15:15:43 -0700 | [diff] [blame] | 3381 | TimingLogger* const timings = GetCurrentGcIteration()->GetTimings(); |
Mathieu Chartier | f5997b4 | 2014-06-20 10:37:54 -0700 | [diff] [blame] | 3382 | TimingLogger::ScopedTiming t(__FUNCTION__, timings); |
Mathieu Chartier | 6f365cc | 2014-04-23 12:42:27 -0700 | [diff] [blame] | 3383 | if (verify_system_weaks_) { |
| 3384 | ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_); |
| 3385 | collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc); |
| 3386 | mark_sweep->VerifySystemWeaks(); |
| 3387 | } |
| 3388 | if (verify_post_gc_rosalloc_) { |
Mathieu Chartier | f5997b4 | 2014-06-20 10:37:54 -0700 | [diff] [blame] | 3389 | RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification"); |
Mathieu Chartier | 6f365cc | 2014-04-23 12:42:27 -0700 | [diff] [blame] | 3390 | } |
| 3391 | if (verify_post_gc_heap_) { |
Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 3392 | TimingLogger::ScopedTiming t2("(Paused)PostGcVerifyHeapReferences", timings); |
Mathieu Chartier | 8ab7e78 | 2014-05-19 16:55:27 -0700 | [diff] [blame] | 3393 | size_t failures = VerifyHeapReferences(); |
| 3394 | if (failures > 0) { |
| 3395 | LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures |
| 3396 | << " failures"; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 3397 | } |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 3398 | } |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 3399 | } |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 3400 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 3401 | void Heap::PostGcVerification(collector::GarbageCollector* gc) { |
Mathieu Chartier | 6f365cc | 2014-04-23 12:42:27 -0700 | [diff] [blame] | 3402 | if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) { |
Andreas Gampe | 4934eb1 | 2017-01-30 13:15:26 -0800 | [diff] [blame] | 3403 | collector::GarbageCollector::ScopedPause pause(gc, false); |
Mathieu Chartier | d35326f | 2014-08-18 15:02:59 -0700 | [diff] [blame] | 3404 | PostGcVerificationPaused(gc); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 3405 | } |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 3406 | } |
| 3407 | |
Mathieu Chartier | 6f365cc | 2014-04-23 12:42:27 -0700 | [diff] [blame] | 3408 | void Heap::RosAllocVerification(TimingLogger* timings, const char* name) { |
Mathieu Chartier | f5997b4 | 2014-06-20 10:37:54 -0700 | [diff] [blame] | 3409 | TimingLogger::ScopedTiming t(name, timings); |
Mathieu Chartier | 6f365cc | 2014-04-23 12:42:27 -0700 | [diff] [blame] | 3410 | for (const auto& space : continuous_spaces_) { |
| 3411 | if (space->IsRosAllocSpace()) { |
| 3412 | VLOG(heap) << name << " : " << space->GetName(); |
| 3413 | space->AsRosAllocSpace()->Verify(); |
Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 3414 | } |
| 3415 | } |
| 3416 | } |
| 3417 | |
Mathieu Chartier | 89a201e | 2014-05-02 10:27:26 -0700 | [diff] [blame] | 3418 | collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) { |
Mathieu Chartier | caa82d6 | 2014-02-02 16:51:17 -0800 | [diff] [blame] | 3419 | ScopedThreadStateChange tsc(self, kWaitingForGcToComplete); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 3420 | MutexLock mu(self, *gc_complete_lock_); |
Mathieu Chartier | 89a201e | 2014-05-02 10:27:26 -0700 | [diff] [blame] | 3421 | return WaitForGcToCompleteLocked(cause, self); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 3422 | } |
| 3423 | |
Mathieu Chartier | 89a201e | 2014-05-02 10:27:26 -0700 | [diff] [blame] | 3424 | collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) { |
Alex Light | 6683446 | 2019-04-08 16:28:29 +0000 | [diff] [blame] | 3425 | gc_complete_cond_->CheckSafeToWait(self); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 3426 | collector::GcType last_gc_type = collector::kGcTypeNone; |
Mathieu Chartier | 40112dd | 2017-06-26 17:49:09 -0700 | [diff] [blame] | 3427 | GcCause last_gc_cause = kGcCauseNone; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 3428 | uint64_t wait_start = NanoTime(); |
Mathieu Chartier | d5a89ee | 2014-01-31 09:55:13 -0800 | [diff] [blame] | 3429 | while (collector_type_running_ != kCollectorTypeNone) { |
Hiroshi Yamauchi | a1c9f01 | 2015-04-02 10:18:12 -0700 | [diff] [blame] | 3430 | if (self != task_processor_->GetRunningThread()) { |
| 3431 | // The current thread is about to wait for a currently running |
| 3432 | // collection to finish. If the waiting thread is not the heap |
| 3433 | // task daemon thread, the currently running collection is |
| 3434 | // considered as a blocking GC. |
| 3435 | running_collection_is_blocking_ = true; |
| 3436 | VLOG(gc) << "Waiting for a blocking GC " << cause; |
| 3437 | } |
Andreas Gampe | aac0972 | 2019-01-03 08:33:58 -0800 | [diff] [blame] | 3438 | SCOPED_TRACE << "GC: Wait For Completion " << cause; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 3439 | // We must wait, change thread state then sleep on gc_complete_cond_; |
| 3440 | gc_complete_cond_->Wait(self); |
| 3441 | last_gc_type = last_gc_type_; |
Mathieu Chartier | 40112dd | 2017-06-26 17:49:09 -0700 | [diff] [blame] | 3442 | last_gc_cause = last_gc_cause_; |
Mathieu Chartier | 7664f5c | 2012-06-08 18:15:32 -0700 | [diff] [blame] | 3443 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 3444 | uint64_t wait_time = NanoTime() - wait_start; |
| 3445 | total_wait_time_ += wait_time; |
| 3446 | if (wait_time > long_pause_log_threshold_) { |
Mathieu Chartier | 40112dd | 2017-06-26 17:49:09 -0700 | [diff] [blame] | 3447 | LOG(INFO) << "WaitForGcToComplete blocked " << cause << " on " << last_gc_cause << " for " |
| 3448 | << PrettyDuration(wait_time); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 3449 | } |
Hiroshi Yamauchi | a1c9f01 | 2015-04-02 10:18:12 -0700 | [diff] [blame] | 3450 | if (self != task_processor_->GetRunningThread()) { |
| 3451 | // The current thread is about to run a collection. If the thread |
| 3452 | // is not the heap task daemon thread, it's considered as a |
| 3453 | // blocking GC (i.e., blocking itself). |
| 3454 | running_collection_is_blocking_ = true; |
Mathieu Chartier | b166f41 | 2017-04-25 16:31:20 -0700 | [diff] [blame] | 3455 | // Don't log fake "GC" types that are only used for debugger or hidden APIs. If we log these, |
| 3456 | // it results in log spam. kGcCauseExplicit is already logged in LogGC, so avoid it here too. |
| 3457 | if (cause == kGcCauseForAlloc || |
| 3458 | cause == kGcCauseForNativeAlloc || |
| 3459 | cause == kGcCauseDisableMovingGc) { |
| 3460 | VLOG(gc) << "Starting a blocking GC " << cause; |
| 3461 | } |
Hiroshi Yamauchi | a1c9f01 | 2015-04-02 10:18:12 -0700 | [diff] [blame] | 3462 | } |
Mathieu Chartier | 866fb2a | 2012-09-10 10:47:49 -0700 | [diff] [blame] | 3463 | return last_gc_type; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 3464 | } |
| 3465 | |
Elliott Hughes | c967f78 | 2012-04-16 10:23:15 -0700 | [diff] [blame] | 3466 | void Heap::DumpForSigQuit(std::ostream& os) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 3467 | os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/" |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 3468 | << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n"; |
Elliott Hughes | 8b788fe | 2013-04-17 15:57:01 -0700 | [diff] [blame] | 3469 | DumpGcPerformanceInfo(os); |
Elliott Hughes | c967f78 | 2012-04-16 10:23:15 -0700 | [diff] [blame] | 3470 | } |
| 3471 | |
| 3472 | size_t Heap::GetPercentFree() { |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3473 | return static_cast<size_t>(100.0f * static_cast<float>( |
| 3474 | GetFreeMemory()) / target_footprint_.load(std::memory_order_relaxed)); |
Elliott Hughes | c967f78 | 2012-04-16 10:23:15 -0700 | [diff] [blame] | 3475 | } |
| 3476 | |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3477 | void Heap::SetIdealFootprint(size_t target_footprint) { |
| 3478 | if (target_footprint > GetMaxMemory()) { |
| 3479 | VLOG(gc) << "Clamp target GC heap from " << PrettySize(target_footprint) << " to " |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 3480 | << PrettySize(GetMaxMemory()); |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3481 | target_footprint = GetMaxMemory(); |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 3482 | } |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3483 | target_footprint_.store(target_footprint, std::memory_order_relaxed); |
Shih-wei Liao | 8c2f641 | 2011-10-03 22:58:14 -0700 | [diff] [blame] | 3484 | } |
| 3485 | |
Mathieu Chartier | 0795f23 | 2016-09-27 18:43:30 -0700 | [diff] [blame] | 3486 | bool Heap::IsMovableObject(ObjPtr<mirror::Object> obj) const { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 3487 | if (kMovingCollector) { |
Mathieu Chartier | 1cc62e4 | 2016-10-03 18:01:28 -0700 | [diff] [blame] | 3488 | space::Space* space = FindContinuousSpaceFromObject(obj.Ptr(), true); |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 3489 | if (space != nullptr) { |
| 3490 | // TODO: Check large object? |
| 3491 | return space->CanMoveObjects(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 3492 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 3493 | } |
| 3494 | return false; |
| 3495 | } |
| 3496 | |
Mathieu Chartier | afe4998 | 2014-03-27 10:55:04 -0700 | [diff] [blame] | 3497 | collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) { |
Albert Mingkun Yang | 1c42e75 | 2018-11-19 16:10:24 +0000 | [diff] [blame] | 3498 | for (auto* collector : garbage_collectors_) { |
Mathieu Chartier | afe4998 | 2014-03-27 10:55:04 -0700 | [diff] [blame] | 3499 | if (collector->GetCollectorType() == collector_type_ && |
| 3500 | collector->GetGcType() == gc_type) { |
| 3501 | return collector; |
| 3502 | } |
| 3503 | } |
| 3504 | return nullptr; |
| 3505 | } |
| 3506 | |
Mathieu Chartier | 2f8da3e | 2014-04-15 15:37:02 -0700 | [diff] [blame] | 3507 | double Heap::HeapGrowthMultiplier() const { |
| 3508 | // If we don't care about pause times we are background, so return 1.0. |
Mathieu Chartier | 11c273d | 2017-10-15 20:54:45 -0700 | [diff] [blame] | 3509 | if (!CareAboutPauseTimes()) { |
Mathieu Chartier | 2f8da3e | 2014-04-15 15:37:02 -0700 | [diff] [blame] | 3510 | return 1.0; |
| 3511 | } |
| 3512 | return foreground_heap_growth_multiplier_; |
| 3513 | } |
| 3514 | |
Mathieu Chartier | e2c2f6e | 2014-12-16 18:49:31 -0800 | [diff] [blame] | 3515 | void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran, |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3516 | size_t bytes_allocated_before_gc) { |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 3517 | // We know what our utilization is at this moment. |
| 3518 | // This doesn't actually resize any memory. It just lets the heap grow more when necessary. |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3519 | const size_t bytes_allocated = GetBytesAllocated(); |
Mathieu Chartier | 34afcde | 2017-06-30 15:31:11 -0700 | [diff] [blame] | 3520 | // Trace the new heap size after the GC is finished. |
| 3521 | TraceHeapSize(bytes_allocated); |
Lokesh Gidra | acd7060 | 2019-12-05 17:46:25 -0800 | [diff] [blame] | 3522 | uint64_t target_size, grow_bytes; |
Mathieu Chartier | afe4998 | 2014-03-27 10:55:04 -0700 | [diff] [blame] | 3523 | collector::GcType gc_type = collector_ran->GetGcType(); |
Lokesh Gidra | acd7060 | 2019-12-05 17:46:25 -0800 | [diff] [blame] | 3524 | MutexLock mu(Thread::Current(), process_state_update_lock_); |
Roland Levillain | 2ae376f | 2018-01-30 11:35:11 +0000 | [diff] [blame] | 3525 | // Use the multiplier to grow more for foreground. |
Lokesh Gidra | acd7060 | 2019-12-05 17:46:25 -0800 | [diff] [blame] | 3526 | const double multiplier = HeapGrowthMultiplier(); |
Mathieu Chartier | bdd0fb9 | 2013-07-02 10:16:15 -0700 | [diff] [blame] | 3527 | if (gc_type != collector::kGcTypeSticky) { |
| 3528 | // Grow the heap for non sticky GC. |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3529 | uint64_t delta = bytes_allocated * (1.0 / GetTargetHeapUtilization() - 1.0); |
| 3530 | DCHECK_LE(delta, std::numeric_limits<size_t>::max()) << "bytes_allocated=" << bytes_allocated |
| 3531 | << " target_utilization_=" << target_utilization_; |
Lokesh Gidra | acd7060 | 2019-12-05 17:46:25 -0800 | [diff] [blame] | 3532 | grow_bytes = std::min(delta, static_cast<uint64_t>(max_free_)); |
| 3533 | grow_bytes = std::max(grow_bytes, static_cast<uint64_t>(min_free_)); |
| 3534 | target_size = bytes_allocated + static_cast<uint64_t>(grow_bytes * multiplier); |
Mathieu Chartier | bdd0fb9 | 2013-07-02 10:16:15 -0700 | [diff] [blame] | 3535 | next_gc_type_ = collector::kGcTypeSticky; |
| 3536 | } else { |
Richard Uhler | caaa2b0 | 2017-02-01 09:54:17 +0000 | [diff] [blame] | 3537 | collector::GcType non_sticky_gc_type = NonStickyGcType(); |
Mathieu Chartier | afe4998 | 2014-03-27 10:55:04 -0700 | [diff] [blame] | 3538 | // Find what the next non sticky collector will be. |
| 3539 | collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type); |
Albert Mingkun Yang | 0b4d146 | 2018-11-29 13:25:35 +0000 | [diff] [blame] | 3540 | if (use_generational_cc_) { |
Mathieu Chartier | 8d1a996 | 2016-08-17 16:39:45 -0700 | [diff] [blame] | 3541 | if (non_sticky_collector == nullptr) { |
| 3542 | non_sticky_collector = FindCollectorByGcType(collector::kGcTypePartial); |
| 3543 | } |
| 3544 | CHECK(non_sticky_collector != nullptr); |
| 3545 | } |
Albert Mingkun Yang | 0b4d146 | 2018-11-29 13:25:35 +0000 | [diff] [blame] | 3546 | double sticky_gc_throughput_adjustment = GetStickyGcThroughputAdjustment(use_generational_cc_); |
| 3547 | |
Mathieu Chartier | afe4998 | 2014-03-27 10:55:04 -0700 | [diff] [blame] | 3548 | // If the throughput of the current sticky GC >= throughput of the non sticky collector, then |
| 3549 | // do another sticky collection next. |
Lokesh Gidra | 1a862c8 | 2019-02-01 11:05:04 -0800 | [diff] [blame] | 3550 | // We also check that the bytes allocated aren't over the target_footprint, or |
| 3551 | // concurrent_start_bytes in case of concurrent GCs, in order to prevent a |
Mathieu Chartier | afe4998 | 2014-03-27 10:55:04 -0700 | [diff] [blame] | 3552 | // pathological case where dead objects which aren't reclaimed by sticky could get accumulated |
| 3553 | // if the sticky GC throughput always remained >= the full/partial throughput. |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3554 | size_t target_footprint = target_footprint_.load(std::memory_order_relaxed); |
Albert Mingkun Yang | 0b4d146 | 2018-11-29 13:25:35 +0000 | [diff] [blame] | 3555 | if (current_gc_iteration_.GetEstimatedThroughput() * sticky_gc_throughput_adjustment >= |
Mathieu Chartier | afe4998 | 2014-03-27 10:55:04 -0700 | [diff] [blame] | 3556 | non_sticky_collector->GetEstimatedMeanThroughput() && |
Mathieu Chartier | 10fb83a | 2014-06-15 15:15:43 -0700 | [diff] [blame] | 3557 | non_sticky_collector->NumberOfIterations() > 0 && |
Lokesh Gidra | 1a862c8 | 2019-02-01 11:05:04 -0800 | [diff] [blame] | 3558 | bytes_allocated <= (IsGcConcurrent() ? concurrent_start_bytes_ : target_footprint)) { |
Mathieu Chartier | bdd0fb9 | 2013-07-02 10:16:15 -0700 | [diff] [blame] | 3559 | next_gc_type_ = collector::kGcTypeSticky; |
| 3560 | } else { |
Mathieu Chartier | afe4998 | 2014-03-27 10:55:04 -0700 | [diff] [blame] | 3561 | next_gc_type_ = non_sticky_gc_type; |
Mathieu Chartier | bdd0fb9 | 2013-07-02 10:16:15 -0700 | [diff] [blame] | 3562 | } |
Mathieu Chartier | bdd0fb9 | 2013-07-02 10:16:15 -0700 | [diff] [blame] | 3563 | // If we have freed enough memory, shrink the heap back down. |
Lokesh Gidra | acd7060 | 2019-12-05 17:46:25 -0800 | [diff] [blame] | 3564 | const size_t adjusted_max_free = static_cast<size_t>(max_free_ * multiplier); |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3565 | if (bytes_allocated + adjusted_max_free < target_footprint) { |
Mathieu Chartier | e2c2f6e | 2014-12-16 18:49:31 -0800 | [diff] [blame] | 3566 | target_size = bytes_allocated + adjusted_max_free; |
Lokesh Gidra | acd7060 | 2019-12-05 17:46:25 -0800 | [diff] [blame] | 3567 | grow_bytes = max_free_; |
Mathieu Chartier | bdd0fb9 | 2013-07-02 10:16:15 -0700 | [diff] [blame] | 3568 | } else { |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3569 | target_size = std::max(bytes_allocated, target_footprint); |
Lokesh Gidra | acd7060 | 2019-12-05 17:46:25 -0800 | [diff] [blame] | 3570 | // The same whether jank perceptible or not; just avoid the adjustment. |
| 3571 | grow_bytes = 0; |
Mathieu Chartier | bdd0fb9 | 2013-07-02 10:16:15 -0700 | [diff] [blame] | 3572 | } |
| 3573 | } |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3574 | CHECK_LE(target_size, std::numeric_limits<size_t>::max()); |
| 3575 | if (!ignore_target_footprint_) { |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 3576 | SetIdealFootprint(target_size); |
Lokesh Gidra | acd7060 | 2019-12-05 17:46:25 -0800 | [diff] [blame] | 3577 | // Store target size (computed with foreground heap growth multiplier) for updating |
| 3578 | // target_footprint_ when process state switches to foreground. |
| 3579 | // target_size = 0 ensures that target_footprint_ is not updated on |
| 3580 | // process-state switch. |
| 3581 | min_foreground_target_footprint_ = |
| 3582 | (multiplier <= 1.0 && grow_bytes > 0) |
| 3583 | ? bytes_allocated + static_cast<size_t>(grow_bytes * foreground_heap_growth_multiplier_) |
| 3584 | : 0; |
| 3585 | |
Hiroshi Yamauchi | 3e41780 | 2014-03-20 12:03:02 -0700 | [diff] [blame] | 3586 | if (IsGcConcurrent()) { |
Mathieu Chartier | e2c2f6e | 2014-12-16 18:49:31 -0800 | [diff] [blame] | 3587 | const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() + |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 3588 | current_gc_iteration_.GetFreedLargeObjectBytes() + |
| 3589 | current_gc_iteration_.GetFreedRevokeBytes(); |
Mathieu Chartier | e2c2f6e | 2014-12-16 18:49:31 -0800 | [diff] [blame] | 3590 | // Bytes allocated will shrink by freed_bytes after the GC runs, so if we want to figure out |
| 3591 | // how many bytes were allocated during the GC we need to add freed_bytes back on. |
| 3592 | CHECK_GE(bytes_allocated + freed_bytes, bytes_allocated_before_gc); |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3593 | const size_t bytes_allocated_during_gc = bytes_allocated + freed_bytes - |
Mathieu Chartier | e2c2f6e | 2014-12-16 18:49:31 -0800 | [diff] [blame] | 3594 | bytes_allocated_before_gc; |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 3595 | // Calculate when to perform the next ConcurrentGC. |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 3596 | // Estimate how many remaining bytes we will have when we need to start the next GC. |
Lokesh Gidra | 1144b63 | 2018-01-18 10:12:38 -0800 | [diff] [blame] | 3597 | size_t remaining_bytes = bytes_allocated_during_gc; |
Mathieu Chartier | 7476280 | 2014-01-24 10:21:35 -0800 | [diff] [blame] | 3598 | remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes); |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 3599 | remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes); |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3600 | size_t target_footprint = target_footprint_.load(std::memory_order_relaxed); |
| 3601 | if (UNLIKELY(remaining_bytes > target_footprint)) { |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 3602 | // A never going to happen situation that from the estimated allocation rate we will exceed |
| 3603 | // the applications entire footprint with the given estimated allocation rate. Schedule |
Mathieu Chartier | 7476280 | 2014-01-24 10:21:35 -0800 | [diff] [blame] | 3604 | // another GC nearly straight away. |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3605 | remaining_bytes = std::min(kMinConcurrentRemainingBytes, target_footprint); |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 3606 | } |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3607 | DCHECK_LE(target_footprint_.load(std::memory_order_relaxed), GetMaxMemory()); |
Mathieu Chartier | 7476280 | 2014-01-24 10:21:35 -0800 | [diff] [blame] | 3608 | // Start a concurrent GC when we get close to the estimated remaining bytes. When the |
| 3609 | // allocation rate is very high, remaining_bytes could tell us that we should start a GC |
| 3610 | // right away. |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3611 | concurrent_start_bytes_ = std::max(target_footprint - remaining_bytes, bytes_allocated); |
Mathieu Chartier | 65db880 | 2012-11-20 12:36:46 -0800 | [diff] [blame] | 3612 | } |
Mathieu Chartier | 65db880 | 2012-11-20 12:36:46 -0800 | [diff] [blame] | 3613 | } |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 3614 | } |
| 3615 | |
Mathieu Chartier | 379d09f | 2015-01-08 11:28:13 -0800 | [diff] [blame] | 3616 | void Heap::ClampGrowthLimit() { |
Mathieu Chartier | ddac423 | 2015-04-02 10:08:03 -0700 | [diff] [blame] | 3617 | // Use heap bitmap lock to guard against races with BindLiveToMarkBitmap. |
Mathieu Chartier | a9d82fe | 2016-01-25 20:06:11 -0800 | [diff] [blame] | 3618 | ScopedObjectAccess soa(Thread::Current()); |
| 3619 | WriterMutexLock mu(soa.Self(), *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 379d09f | 2015-01-08 11:28:13 -0800 | [diff] [blame] | 3620 | capacity_ = growth_limit_; |
| 3621 | for (const auto& space : continuous_spaces_) { |
| 3622 | if (space->IsMallocSpace()) { |
| 3623 | gc::space::MallocSpace* malloc_space = space->AsMallocSpace(); |
| 3624 | malloc_space->ClampGrowthLimit(); |
| 3625 | } |
| 3626 | } |
Lokesh Gidra | 5f0b71a | 2018-02-06 18:01:35 -0800 | [diff] [blame] | 3627 | if (collector_type_ == kCollectorTypeCC) { |
| 3628 | DCHECK(region_space_ != nullptr); |
| 3629 | // Twice the capacity as CC needs extra space for evacuating objects. |
| 3630 | region_space_->ClampGrowthLimit(2 * capacity_); |
| 3631 | } |
Mathieu Chartier | 379d09f | 2015-01-08 11:28:13 -0800 | [diff] [blame] | 3632 | // This space isn't added for performance reasons. |
| 3633 | if (main_space_backup_.get() != nullptr) { |
| 3634 | main_space_backup_->ClampGrowthLimit(); |
| 3635 | } |
| 3636 | } |
| 3637 | |
jeffhao | c116070 | 2011-10-27 15:48:45 -0700 | [diff] [blame] | 3638 | void Heap::ClearGrowthLimit() { |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3639 | if (target_footprint_.load(std::memory_order_relaxed) == growth_limit_ |
| 3640 | && growth_limit_ < capacity_) { |
| 3641 | target_footprint_.store(capacity_, std::memory_order_relaxed); |
Mathieu Chartier | a98a282 | 2017-05-24 16:14:10 -0700 | [diff] [blame] | 3642 | concurrent_start_bytes_ = |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3643 | UnsignedDifference(capacity_, kMinConcurrentRemainingBytes); |
Mathieu Chartier | a98a282 | 2017-05-24 16:14:10 -0700 | [diff] [blame] | 3644 | } |
Mathieu Chartier | 80de7a6 | 2012-11-27 17:21:50 -0800 | [diff] [blame] | 3645 | growth_limit_ = capacity_; |
Mathieu Chartier | a9d82fe | 2016-01-25 20:06:11 -0800 | [diff] [blame] | 3646 | ScopedObjectAccess soa(Thread::Current()); |
Mathieu Chartier | 0310da5 | 2014-12-01 13:40:48 -0800 | [diff] [blame] | 3647 | for (const auto& space : continuous_spaces_) { |
| 3648 | if (space->IsMallocSpace()) { |
| 3649 | gc::space::MallocSpace* malloc_space = space->AsMallocSpace(); |
| 3650 | malloc_space->ClearGrowthLimit(); |
| 3651 | malloc_space->SetFootprintLimit(malloc_space->Capacity()); |
| 3652 | } |
| 3653 | } |
| 3654 | // This space isn't added for performance reasons. |
| 3655 | if (main_space_backup_.get() != nullptr) { |
| 3656 | main_space_backup_->ClearGrowthLimit(); |
| 3657 | main_space_backup_->SetFootprintLimit(main_space_backup_->Capacity()); |
| 3658 | } |
jeffhao | c116070 | 2011-10-27 15:48:45 -0700 | [diff] [blame] | 3659 | } |
| 3660 | |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 3661 | void Heap::AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object) { |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 3662 | ScopedObjectAccess soa(self); |
Mathieu Chartier | 8668c3c | 2014-04-24 16:48:11 -0700 | [diff] [blame] | 3663 | ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object)); |
Ian Rogers | 53b8b09 | 2014-03-13 23:45:53 -0700 | [diff] [blame] | 3664 | jvalue args[1]; |
| 3665 | args[0].l = arg.get(); |
| 3666 | InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args); |
Mathieu Chartier | 8668c3c | 2014-04-24 16:48:11 -0700 | [diff] [blame] | 3667 | // Restore object in case it gets moved. |
Mathieu Chartier | 28bd2e4 | 2016-10-04 13:54:57 -0700 | [diff] [blame] | 3668 | *object = soa.Decode<mirror::Object>(arg.get()); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 3669 | } |
| 3670 | |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 3671 | void Heap::RequestConcurrentGCAndSaveObject(Thread* self, |
| 3672 | bool force_full, |
| 3673 | ObjPtr<mirror::Object>* obj) { |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 3674 | StackHandleScope<1> hs(self); |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 3675 | HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj)); |
Mathieu Chartier | 35b59a2 | 2017-04-17 15:24:43 -0700 | [diff] [blame] | 3676 | RequestConcurrentGC(self, kGcCauseBackground, force_full); |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 3677 | } |
| 3678 | |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 3679 | class Heap::ConcurrentGCTask : public HeapTask { |
| 3680 | public: |
Mathieu Chartier | 35b59a2 | 2017-04-17 15:24:43 -0700 | [diff] [blame] | 3681 | ConcurrentGCTask(uint64_t target_time, GcCause cause, bool force_full) |
| 3682 | : HeapTask(target_time), cause_(cause), force_full_(force_full) {} |
Roland Levillain | f73caca | 2018-08-24 17:19:07 +0100 | [diff] [blame] | 3683 | void Run(Thread* self) override { |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 3684 | gc::Heap* heap = Runtime::Current()->GetHeap(); |
Mathieu Chartier | 35b59a2 | 2017-04-17 15:24:43 -0700 | [diff] [blame] | 3685 | heap->ConcurrentGC(self, cause_, force_full_); |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 3686 | heap->ClearConcurrentGCRequest(); |
Ian Rogers | 120f1c7 | 2012-09-28 17:17:10 -0700 | [diff] [blame] | 3687 | } |
Hiroshi Yamauchi | 0ae9899 | 2015-05-01 14:33:19 -0700 | [diff] [blame] | 3688 | |
| 3689 | private: |
Mathieu Chartier | 35b59a2 | 2017-04-17 15:24:43 -0700 | [diff] [blame] | 3690 | const GcCause cause_; |
Hiroshi Yamauchi | 0ae9899 | 2015-05-01 14:33:19 -0700 | [diff] [blame] | 3691 | const bool force_full_; // If true, force full (or partial) collection. |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 3692 | }; |
| 3693 | |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 3694 | static bool CanAddHeapTask(Thread* self) REQUIRES(!Locks::runtime_shutdown_lock_) { |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 3695 | Runtime* runtime = Runtime::Current(); |
| 3696 | return runtime != nullptr && runtime->IsFinishedStarting() && !runtime->IsShuttingDown(self) && |
| 3697 | !self->IsHandlingStackOverflow(); |
| 3698 | } |
| 3699 | |
| 3700 | void Heap::ClearConcurrentGCRequest() { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 3701 | concurrent_gc_pending_.store(false, std::memory_order_relaxed); |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 3702 | } |
| 3703 | |
Mathieu Chartier | 35b59a2 | 2017-04-17 15:24:43 -0700 | [diff] [blame] | 3704 | void Heap::RequestConcurrentGC(Thread* self, GcCause cause, bool force_full) { |
Mathieu Chartier | ac19516 | 2015-02-20 18:44:28 +0000 | [diff] [blame] | 3705 | if (CanAddHeapTask(self) && |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 3706 | concurrent_gc_pending_.CompareAndSetStrongSequentiallyConsistent(false, true)) { |
Hiroshi Yamauchi | 0ae9899 | 2015-05-01 14:33:19 -0700 | [diff] [blame] | 3707 | task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime(), // Start straight away. |
Mathieu Chartier | 35b59a2 | 2017-04-17 15:24:43 -0700 | [diff] [blame] | 3708 | cause, |
Hiroshi Yamauchi | 0ae9899 | 2015-05-01 14:33:19 -0700 | [diff] [blame] | 3709 | force_full)); |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 3710 | } |
Mathieu Chartier | 7664f5c | 2012-06-08 18:15:32 -0700 | [diff] [blame] | 3711 | } |
| 3712 | |
Mathieu Chartier | 35b59a2 | 2017-04-17 15:24:43 -0700 | [diff] [blame] | 3713 | void Heap::ConcurrentGC(Thread* self, GcCause cause, bool force_full) { |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 3714 | if (!Runtime::Current()->IsShuttingDown(self)) { |
| 3715 | // Wait for any GCs currently running to finish. |
Mathieu Chartier | 35b59a2 | 2017-04-17 15:24:43 -0700 | [diff] [blame] | 3716 | if (WaitForGcToComplete(cause, self) == collector::kGcTypeNone) { |
Hans Boehm | 1575267 | 2018-12-18 17:01:00 -0800 | [diff] [blame] | 3717 | // If we can't run the GC type we wanted to run, find the next appropriate one and try |
Roland Levillain | b81e9e9 | 2017-04-20 17:35:32 +0100 | [diff] [blame] | 3718 | // that instead. E.g. can't do partial, so do full instead. |
Hiroshi Yamauchi | 0ae9899 | 2015-05-01 14:33:19 -0700 | [diff] [blame] | 3719 | collector::GcType next_gc_type = next_gc_type_; |
| 3720 | // If forcing full and next gc type is sticky, override with a non-sticky type. |
| 3721 | if (force_full && next_gc_type == collector::kGcTypeSticky) { |
Richard Uhler | caaa2b0 | 2017-02-01 09:54:17 +0000 | [diff] [blame] | 3722 | next_gc_type = NonStickyGcType(); |
Hiroshi Yamauchi | 0ae9899 | 2015-05-01 14:33:19 -0700 | [diff] [blame] | 3723 | } |
Mathieu Chartier | 35b59a2 | 2017-04-17 15:24:43 -0700 | [diff] [blame] | 3724 | if (CollectGarbageInternal(next_gc_type, cause, false) == collector::kGcTypeNone) { |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 3725 | for (collector::GcType gc_type : gc_plan_) { |
| 3726 | // Attempt to run the collector, if we succeed, we are done. |
Hiroshi Yamauchi | 0ae9899 | 2015-05-01 14:33:19 -0700 | [diff] [blame] | 3727 | if (gc_type > next_gc_type && |
Mathieu Chartier | 35b59a2 | 2017-04-17 15:24:43 -0700 | [diff] [blame] | 3728 | CollectGarbageInternal(gc_type, cause, false) != collector::kGcTypeNone) { |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 3729 | break; |
| 3730 | } |
Mathieu Chartier | f9ed0d3 | 2013-11-21 16:42:47 -0800 | [diff] [blame] | 3731 | } |
| 3732 | } |
| 3733 | } |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 3734 | } |
Mathieu Chartier | 7664f5c | 2012-06-08 18:15:32 -0700 | [diff] [blame] | 3735 | } |
| 3736 | |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 3737 | class Heap::CollectorTransitionTask : public HeapTask { |
| 3738 | public: |
Mathieu Chartier | a4f6af9 | 2015-08-11 17:35:25 -0700 | [diff] [blame] | 3739 | explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) {} |
| 3740 | |
Roland Levillain | f73caca | 2018-08-24 17:19:07 +0100 | [diff] [blame] | 3741 | void Run(Thread* self) override { |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 3742 | gc::Heap* heap = Runtime::Current()->GetHeap(); |
| 3743 | heap->DoPendingCollectorTransition(); |
| 3744 | heap->ClearPendingCollectorTransition(self); |
Mathieu Chartier | a5f9de0 | 2014-02-28 16:48:42 -0800 | [diff] [blame] | 3745 | } |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 3746 | }; |
| 3747 | |
| 3748 | void Heap::ClearPendingCollectorTransition(Thread* self) { |
| 3749 | MutexLock mu(self, *pending_task_lock_); |
| 3750 | pending_collector_transition_ = nullptr; |
Mathieu Chartier | a5f9de0 | 2014-02-28 16:48:42 -0800 | [diff] [blame] | 3751 | } |
| 3752 | |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 3753 | void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) { |
| 3754 | Thread* self = Thread::Current(); |
| 3755 | desired_collector_type_ = desired_collector_type; |
| 3756 | if (desired_collector_type_ == collector_type_ || !CanAddHeapTask(self)) { |
| 3757 | return; |
| 3758 | } |
Hiroshi Yamauchi | 60985b7 | 2016-08-24 13:53:12 -0700 | [diff] [blame] | 3759 | if (collector_type_ == kCollectorTypeCC) { |
| 3760 | // For CC, we invoke a full compaction when going to the background, but the collector type |
| 3761 | // doesn't change. |
| 3762 | DCHECK_EQ(desired_collector_type_, kCollectorTypeCCBackground); |
| 3763 | } |
| 3764 | DCHECK_NE(collector_type_, kCollectorTypeCCBackground); |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 3765 | CollectorTransitionTask* added_task = nullptr; |
| 3766 | const uint64_t target_time = NanoTime() + delta_time; |
| 3767 | { |
| 3768 | MutexLock mu(self, *pending_task_lock_); |
| 3769 | // If we have an existing collector transition, update the targe time to be the new target. |
| 3770 | if (pending_collector_transition_ != nullptr) { |
| 3771 | task_processor_->UpdateTargetRunTime(self, pending_collector_transition_, target_time); |
| 3772 | return; |
| 3773 | } |
| 3774 | added_task = new CollectorTransitionTask(target_time); |
| 3775 | pending_collector_transition_ = added_task; |
| 3776 | } |
| 3777 | task_processor_->AddTask(self, added_task); |
| 3778 | } |
| 3779 | |
| 3780 | class Heap::HeapTrimTask : public HeapTask { |
| 3781 | public: |
| 3782 | explicit HeapTrimTask(uint64_t delta_time) : HeapTask(NanoTime() + delta_time) { } |
Roland Levillain | f73caca | 2018-08-24 17:19:07 +0100 | [diff] [blame] | 3783 | void Run(Thread* self) override { |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 3784 | gc::Heap* heap = Runtime::Current()->GetHeap(); |
| 3785 | heap->Trim(self); |
| 3786 | heap->ClearPendingTrim(self); |
| 3787 | } |
| 3788 | }; |
| 3789 | |
| 3790 | void Heap::ClearPendingTrim(Thread* self) { |
| 3791 | MutexLock mu(self, *pending_task_lock_); |
| 3792 | pending_heap_trim_ = nullptr; |
| 3793 | } |
| 3794 | |
| 3795 | void Heap::RequestTrim(Thread* self) { |
| 3796 | if (!CanAddHeapTask(self)) { |
| 3797 | return; |
| 3798 | } |
Ian Rogers | 4893188 | 2013-01-22 14:35:16 -0800 | [diff] [blame] | 3799 | // GC completed and now we must decide whether to request a heap trim (advising pages back to the |
| 3800 | // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans |
| 3801 | // a space it will hold its lock and can become a cause of jank. |
| 3802 | // Note, the large object space self trims and the Zygote space was trimmed and unchanging since |
| 3803 | // forking. |
| 3804 | |
Elliott Hughes | 8cf5bc0 | 2012-02-02 16:32:16 -0800 | [diff] [blame] | 3805 | // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap |
| 3806 | // because that only marks object heads, so a large array looks like lots of empty space. We |
| 3807 | // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional |
| 3808 | // to utilization (which is probably inversely proportional to how much benefit we can expect). |
| 3809 | // We could try mincore(2) but that's only a measure of how many pages we haven't given away, |
| 3810 | // not how much use we're making of those pages. |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 3811 | HeapTrimTask* added_task = nullptr; |
Mathieu Chartier | 440e4ce | 2014-03-31 16:36:35 -0700 | [diff] [blame] | 3812 | { |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 3813 | MutexLock mu(self, *pending_task_lock_); |
| 3814 | if (pending_heap_trim_ != nullptr) { |
| 3815 | // Already have a heap trim request in task processor, ignore this request. |
Mathieu Chartier | 440e4ce | 2014-03-31 16:36:35 -0700 | [diff] [blame] | 3816 | return; |
Mathieu Chartier | a5f9de0 | 2014-02-28 16:48:42 -0800 | [diff] [blame] | 3817 | } |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 3818 | added_task = new HeapTrimTask(kHeapTrimWait); |
| 3819 | pending_heap_trim_ = added_task; |
Mathieu Chartier | c39e342 | 2013-08-07 16:41:36 -0700 | [diff] [blame] | 3820 | } |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 3821 | task_processor_->AddTask(self, added_task); |
Mathieu Chartier | a5f9de0 | 2014-02-28 16:48:42 -0800 | [diff] [blame] | 3822 | } |
| 3823 | |
Orion Hodson | 82cf9a2 | 2018-03-27 16:36:32 +0100 | [diff] [blame] | 3824 | void Heap::IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke) { |
| 3825 | size_t previous_num_bytes_freed_revoke = |
Hans Boehm | fb8b4e2 | 2018-09-05 16:45:42 -0700 | [diff] [blame] | 3826 | num_bytes_freed_revoke_.fetch_add(freed_bytes_revoke, std::memory_order_relaxed); |
Orion Hodson | 82cf9a2 | 2018-03-27 16:36:32 +0100 | [diff] [blame] | 3827 | // Check the updated value is less than the number of bytes allocated. There is a risk of |
| 3828 | // execution being suspended between the increment above and the CHECK below, leading to |
| 3829 | // the use of previous_num_bytes_freed_revoke in the comparison. |
| 3830 | CHECK_GE(num_bytes_allocated_.load(std::memory_order_relaxed), |
| 3831 | previous_num_bytes_freed_revoke + freed_bytes_revoke); |
| 3832 | } |
| 3833 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 3834 | void Heap::RevokeThreadLocalBuffers(Thread* thread) { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 3835 | if (rosalloc_space_ != nullptr) { |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 3836 | size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread); |
| 3837 | if (freed_bytes_revoke > 0U) { |
Orion Hodson | 82cf9a2 | 2018-03-27 16:36:32 +0100 | [diff] [blame] | 3838 | IncrementNumberOfBytesFreedRevoke(freed_bytes_revoke); |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 3839 | } |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 3840 | } |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 3841 | if (bump_pointer_space_ != nullptr) { |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 3842 | CHECK_EQ(bump_pointer_space_->RevokeThreadLocalBuffers(thread), 0U); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 3843 | } |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 3844 | if (region_space_ != nullptr) { |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 3845 | CHECK_EQ(region_space_->RevokeThreadLocalBuffers(thread), 0U); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 3846 | } |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 3847 | } |
| 3848 | |
Hiroshi Yamauchi | c93c530 | 2014-03-20 16:15:37 -0700 | [diff] [blame] | 3849 | void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) { |
| 3850 | if (rosalloc_space_ != nullptr) { |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 3851 | size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread); |
| 3852 | if (freed_bytes_revoke > 0U) { |
Orion Hodson | 82cf9a2 | 2018-03-27 16:36:32 +0100 | [diff] [blame] | 3853 | IncrementNumberOfBytesFreedRevoke(freed_bytes_revoke); |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 3854 | } |
Hiroshi Yamauchi | c93c530 | 2014-03-20 16:15:37 -0700 | [diff] [blame] | 3855 | } |
| 3856 | } |
| 3857 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 3858 | void Heap::RevokeAllThreadLocalBuffers() { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 3859 | if (rosalloc_space_ != nullptr) { |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 3860 | size_t freed_bytes_revoke = rosalloc_space_->RevokeAllThreadLocalBuffers(); |
| 3861 | if (freed_bytes_revoke > 0U) { |
Orion Hodson | 82cf9a2 | 2018-03-27 16:36:32 +0100 | [diff] [blame] | 3862 | IncrementNumberOfBytesFreedRevoke(freed_bytes_revoke); |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 3863 | } |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 3864 | } |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 3865 | if (bump_pointer_space_ != nullptr) { |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 3866 | CHECK_EQ(bump_pointer_space_->RevokeAllThreadLocalBuffers(), 0U); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 3867 | } |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 3868 | if (region_space_ != nullptr) { |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 3869 | CHECK_EQ(region_space_->RevokeAllThreadLocalBuffers(), 0U); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 3870 | } |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 3871 | } |
| 3872 | |
Mathieu Chartier | 987ccff | 2013-07-08 11:05:21 -0700 | [diff] [blame] | 3873 | bool Heap::IsGCRequestPending() const { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 3874 | return concurrent_gc_pending_.load(std::memory_order_relaxed); |
Mathieu Chartier | 987ccff | 2013-07-08 11:05:21 -0700 | [diff] [blame] | 3875 | } |
| 3876 | |
Mathieu Chartier | b5de3bb | 2015-06-05 13:21:05 -0700 | [diff] [blame] | 3877 | void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) { |
| 3878 | env->CallStaticVoidMethod(WellKnownClasses::dalvik_system_VMRuntime, |
| 3879 | WellKnownClasses::dalvik_system_VMRuntime_runFinalization, |
| 3880 | static_cast<jlong>(timeout)); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 3881 | } |
| 3882 | |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3883 | // For GC triggering purposes, we count old (pre-last-GC) and new native allocations as |
| 3884 | // different fractions of Java allocations. |
| 3885 | // For now, we essentially do not count old native allocations at all, so that we can preserve the |
| 3886 | // existing behavior of not limiting native heap size. If we seriously considered it, we would |
| 3887 | // have to adjust collection thresholds when we encounter large amounts of old native memory, |
| 3888 | // and handle native out-of-memory situations. |
Richard Uhler | 36bdbd2 | 2017-01-24 14:17:05 +0000 | [diff] [blame] | 3889 | |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3890 | static constexpr size_t kOldNativeDiscountFactor = 65536; // Approximately infinite for now. |
| 3891 | static constexpr size_t kNewNativeDiscountFactor = 2; |
| 3892 | |
| 3893 | // If weighted java + native memory use exceeds our target by kStopForNativeFactor, and |
Hans Boehm | bb2467b | 2019-03-29 22:55:06 -0700 | [diff] [blame] | 3894 | // newly allocated memory exceeds stop_for_native_allocs_, we wait for GC to complete to avoid |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3895 | // running out of memory. |
Hans Boehm | 1575267 | 2018-12-18 17:01:00 -0800 | [diff] [blame] | 3896 | static constexpr float kStopForNativeFactor = 4.0; |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3897 | |
| 3898 | // Return the ratio of the weighted native + java allocated bytes to its target value. |
| 3899 | // A return value > 1.0 means we should collect. Significantly larger values mean we're falling |
| 3900 | // behind. |
Hans Boehm | 7c73dd1 | 2019-02-06 00:20:18 +0000 | [diff] [blame] | 3901 | inline float Heap::NativeMemoryOverTarget(size_t current_native_bytes, bool is_gc_concurrent) { |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3902 | // Collection check for native allocation. Does not enforce Java heap bounds. |
| 3903 | // With adj_start_bytes defined below, effectively checks |
| 3904 | // <java bytes allocd> + c1*<old native allocd> + c2*<new native allocd) >= adj_start_bytes, |
| 3905 | // where c3 > 1, and currently c1 and c2 are 1 divided by the values defined above. |
| 3906 | size_t old_native_bytes = old_native_bytes_allocated_.load(std::memory_order_relaxed); |
| 3907 | if (old_native_bytes > current_native_bytes) { |
| 3908 | // Net decrease; skip the check, but update old value. |
| 3909 | // It's OK to lose an update if two stores race. |
| 3910 | old_native_bytes_allocated_.store(current_native_bytes, std::memory_order_relaxed); |
| 3911 | return 0.0; |
| 3912 | } else { |
| 3913 | size_t new_native_bytes = UnsignedDifference(current_native_bytes, old_native_bytes); |
| 3914 | size_t weighted_native_bytes = new_native_bytes / kNewNativeDiscountFactor |
| 3915 | + old_native_bytes / kOldNativeDiscountFactor; |
Hans Boehm | 1575267 | 2018-12-18 17:01:00 -0800 | [diff] [blame] | 3916 | size_t add_bytes_allowed = static_cast<size_t>( |
| 3917 | NativeAllocationGcWatermark() * HeapGrowthMultiplier()); |
Hans Boehm | 7c73dd1 | 2019-02-06 00:20:18 +0000 | [diff] [blame] | 3918 | size_t java_gc_start_bytes = is_gc_concurrent |
| 3919 | ? concurrent_start_bytes_ |
| 3920 | : target_footprint_.load(std::memory_order_relaxed); |
| 3921 | size_t adj_start_bytes = UnsignedSum(java_gc_start_bytes, |
| 3922 | add_bytes_allowed / kNewNativeDiscountFactor); |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3923 | return static_cast<float>(GetBytesAllocated() + weighted_native_bytes) |
| 3924 | / static_cast<float>(adj_start_bytes); |
| 3925 | } |
| 3926 | } |
| 3927 | |
Hans Boehm | 7c73dd1 | 2019-02-06 00:20:18 +0000 | [diff] [blame] | 3928 | inline void Heap::CheckGCForNative(Thread* self) { |
| 3929 | bool is_gc_concurrent = IsGcConcurrent(); |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3930 | size_t current_native_bytes = GetNativeBytes(); |
Hans Boehm | 7c73dd1 | 2019-02-06 00:20:18 +0000 | [diff] [blame] | 3931 | float gc_urgency = NativeMemoryOverTarget(current_native_bytes, is_gc_concurrent); |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3932 | if (UNLIKELY(gc_urgency >= 1.0)) { |
Hans Boehm | 7c73dd1 | 2019-02-06 00:20:18 +0000 | [diff] [blame] | 3933 | if (is_gc_concurrent) { |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3934 | RequestConcurrentGC(self, kGcCauseForNativeAlloc, /*force_full=*/true); |
| 3935 | if (gc_urgency > kStopForNativeFactor |
Hans Boehm | bb2467b | 2019-03-29 22:55:06 -0700 | [diff] [blame] | 3936 | && current_native_bytes > stop_for_native_allocs_) { |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3937 | // We're in danger of running out of memory due to rampant native allocation. |
| 3938 | if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { |
| 3939 | LOG(INFO) << "Stopping for native allocation, urgency: " << gc_urgency; |
| 3940 | } |
Hans Boehm | 1575267 | 2018-12-18 17:01:00 -0800 | [diff] [blame] | 3941 | WaitForGcToComplete(kGcCauseForNativeAlloc, self); |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3942 | } |
Richard Uhler | caaa2b0 | 2017-02-01 09:54:17 +0000 | [diff] [blame] | 3943 | } else { |
| 3944 | CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false); |
| 3945 | } |
Mathieu Chartier | 987ccff | 2013-07-08 11:05:21 -0700 | [diff] [blame] | 3946 | } |
| 3947 | } |
| 3948 | |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3949 | // About kNotifyNativeInterval allocations have occurred. Check whether we should garbage collect. |
| 3950 | void Heap::NotifyNativeAllocations(JNIEnv* env) { |
| 3951 | native_objects_notified_.fetch_add(kNotifyNativeInterval, std::memory_order_relaxed); |
Hans Boehm | 7c73dd1 | 2019-02-06 00:20:18 +0000 | [diff] [blame] | 3952 | CheckGCForNative(ThreadForEnv(env)); |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3953 | } |
| 3954 | |
| 3955 | // Register a native allocation with an explicit size. |
| 3956 | // This should only be done for large allocations of non-malloc memory, which we wouldn't |
| 3957 | // otherwise see. |
| 3958 | void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) { |
Hans Boehm | 13e951d | 2019-11-01 16:48:28 -0700 | [diff] [blame] | 3959 | // Cautiously check for a wrapped negative bytes argument. |
| 3960 | DCHECK(sizeof(size_t) < 8 || bytes < (std::numeric_limits<size_t>::max() / 2)); |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3961 | native_bytes_registered_.fetch_add(bytes, std::memory_order_relaxed); |
| 3962 | uint32_t objects_notified = |
| 3963 | native_objects_notified_.fetch_add(1, std::memory_order_relaxed); |
| 3964 | if (objects_notified % kNotifyNativeInterval == kNotifyNativeInterval - 1 |
| 3965 | || bytes > kCheckImmediatelyThreshold) { |
Hans Boehm | 7c73dd1 | 2019-02-06 00:20:18 +0000 | [diff] [blame] | 3966 | CheckGCForNative(ThreadForEnv(env)); |
Richard Uhler | caaa2b0 | 2017-02-01 09:54:17 +0000 | [diff] [blame] | 3967 | } |
Mathieu Chartier | 987ccff | 2013-07-08 11:05:21 -0700 | [diff] [blame] | 3968 | } |
| 3969 | |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3970 | void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) { |
| 3971 | size_t allocated; |
| 3972 | size_t new_freed_bytes; |
| 3973 | do { |
| 3974 | allocated = native_bytes_registered_.load(std::memory_order_relaxed); |
| 3975 | new_freed_bytes = std::min(allocated, bytes); |
| 3976 | // We should not be registering more free than allocated bytes. |
| 3977 | // But correctly keep going in non-debug builds. |
| 3978 | DCHECK_EQ(new_freed_bytes, bytes); |
| 3979 | } while (!native_bytes_registered_.CompareAndSetWeakRelaxed(allocated, |
| 3980 | allocated - new_freed_bytes)); |
| 3981 | } |
| 3982 | |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 3983 | size_t Heap::GetTotalMemory() const { |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 3984 | return std::max(target_footprint_.load(std::memory_order_relaxed), GetBytesAllocated()); |
Hiroshi Yamauchi | 09b07a9 | 2013-07-15 13:17:06 -0700 | [diff] [blame] | 3985 | } |
| 3986 | |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 3987 | void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) { |
| 3988 | DCHECK(mod_union_table != nullptr); |
| 3989 | mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table); |
| 3990 | } |
| 3991 | |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 3992 | void Heap::CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count) { |
Mathieu Chartier | df7f7f0 | 2017-10-05 09:47:58 -0700 | [diff] [blame] | 3993 | // Compare rounded sizes since the allocation may have been retried after rounding the size. |
| 3994 | // See b/37885600 |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 3995 | CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) || |
Mathieu Chartier | aac9012 | 2017-10-04 14:58:34 -0700 | [diff] [blame] | 3996 | (c->IsVariableSize() || |
| 3997 | RoundUp(c->GetObjectSize(), kObjectAlignment) == |
| 3998 | RoundUp(byte_count, kObjectAlignment))) |
Mathieu Chartier | 8876fb7 | 2017-02-24 12:39:53 -0800 | [diff] [blame] | 3999 | << "ClassFlags=" << c->GetClassFlags() |
| 4000 | << " IsClassClass=" << c->IsClassClass() |
| 4001 | << " byte_count=" << byte_count |
| 4002 | << " IsVariableSize=" << c->IsVariableSize() |
| 4003 | << " ObjectSize=" << c->GetObjectSize() |
| 4004 | << " sizeof(Class)=" << sizeof(mirror::Class) |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 4005 | << " " << verification_->DumpObjectInfo(c.Ptr(), /*tag=*/ "klass"); |
Mathieu Chartier | c645f1d | 2014-03-06 18:11:53 -0800 | [diff] [blame] | 4006 | CHECK_GE(byte_count, sizeof(mirror::Object)); |
| 4007 | } |
| 4008 | |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 4009 | void Heap::AddRememberedSet(accounting::RememberedSet* remembered_set) { |
| 4010 | CHECK(remembered_set != nullptr); |
| 4011 | space::Space* space = remembered_set->GetSpace(); |
| 4012 | CHECK(space != nullptr); |
Mathieu Chartier | 8e4a96d | 2014-05-21 10:44:32 -0700 | [diff] [blame] | 4013 | CHECK(remembered_sets_.find(space) == remembered_sets_.end()) << space; |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 4014 | remembered_sets_.Put(space, remembered_set); |
Mathieu Chartier | 8e4a96d | 2014-05-21 10:44:32 -0700 | [diff] [blame] | 4015 | CHECK(remembered_sets_.find(space) != remembered_sets_.end()) << space; |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 4016 | } |
| 4017 | |
| 4018 | void Heap::RemoveRememberedSet(space::Space* space) { |
| 4019 | CHECK(space != nullptr); |
| 4020 | auto it = remembered_sets_.find(space); |
| 4021 | CHECK(it != remembered_sets_.end()); |
Mathieu Chartier | 5189e24 | 2014-07-24 11:11:05 -0700 | [diff] [blame] | 4022 | delete it->second; |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 4023 | remembered_sets_.erase(it); |
| 4024 | CHECK(remembered_sets_.find(space) == remembered_sets_.end()); |
| 4025 | } |
| 4026 | |
Mathieu Chartier | 4aeec17 | 2014-03-27 16:09:46 -0700 | [diff] [blame] | 4027 | void Heap::ClearMarkedObjects() { |
| 4028 | // Clear all of the spaces' mark bitmaps. |
| 4029 | for (const auto& space : GetContinuousSpaces()) { |
Mathieu Chartier | 6f38201 | 2019-07-30 09:47:35 -0700 | [diff] [blame] | 4030 | if (space->GetLiveBitmap() != nullptr && !space->HasBoundBitmaps()) { |
| 4031 | space->GetMarkBitmap()->Clear(); |
Mathieu Chartier | 4aeec17 | 2014-03-27 16:09:46 -0700 | [diff] [blame] | 4032 | } |
| 4033 | } |
| 4034 | // Clear the marked objects in the discontinous space object sets. |
| 4035 | for (const auto& space : GetDiscontinuousSpaces()) { |
Mathieu Chartier | bbd695c | 2014-04-16 09:48:48 -0700 | [diff] [blame] | 4036 | space->GetMarkBitmap()->Clear(); |
Mathieu Chartier | 4aeec17 | 2014-03-27 16:09:46 -0700 | [diff] [blame] | 4037 | } |
| 4038 | } |
| 4039 | |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 4040 | void Heap::SetAllocationRecords(AllocRecordObjectMap* records) { |
| 4041 | allocation_records_.reset(records); |
| 4042 | } |
| 4043 | |
Man Cao | 1ed11b9 | 2015-06-11 22:47:35 -0700 | [diff] [blame] | 4044 | void Heap::VisitAllocationRecords(RootVisitor* visitor) const { |
| 4045 | if (IsAllocTrackingEnabled()) { |
| 4046 | MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); |
| 4047 | if (IsAllocTrackingEnabled()) { |
| 4048 | GetAllocationRecords()->VisitRoots(visitor); |
| 4049 | } |
| 4050 | } |
| 4051 | } |
| 4052 | |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 4053 | void Heap::SweepAllocationRecords(IsMarkedVisitor* visitor) const { |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 4054 | if (IsAllocTrackingEnabled()) { |
| 4055 | MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); |
| 4056 | if (IsAllocTrackingEnabled()) { |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 4057 | GetAllocationRecords()->SweepAllocationRecords(visitor); |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 4058 | } |
| 4059 | } |
| 4060 | } |
| 4061 | |
Man Cao | 42c3c33 | 2015-06-23 16:38:25 -0700 | [diff] [blame] | 4062 | void Heap::AllowNewAllocationRecords() const { |
Hiroshi Yamauchi | fdbd13c | 2015-09-02 16:16:58 -0700 | [diff] [blame] | 4063 | CHECK(!kUseReadBarrier); |
Hiroshi Yamauchi | 6f0c6cd | 2016-03-18 17:17:52 -0700 | [diff] [blame] | 4064 | MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); |
| 4065 | AllocRecordObjectMap* allocation_records = GetAllocationRecords(); |
| 4066 | if (allocation_records != nullptr) { |
| 4067 | allocation_records->AllowNewAllocationRecords(); |
Man Cao | 42c3c33 | 2015-06-23 16:38:25 -0700 | [diff] [blame] | 4068 | } |
| 4069 | } |
| 4070 | |
| 4071 | void Heap::DisallowNewAllocationRecords() const { |
Hiroshi Yamauchi | fdbd13c | 2015-09-02 16:16:58 -0700 | [diff] [blame] | 4072 | CHECK(!kUseReadBarrier); |
Hiroshi Yamauchi | 6f0c6cd | 2016-03-18 17:17:52 -0700 | [diff] [blame] | 4073 | MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); |
| 4074 | AllocRecordObjectMap* allocation_records = GetAllocationRecords(); |
| 4075 | if (allocation_records != nullptr) { |
| 4076 | allocation_records->DisallowNewAllocationRecords(); |
Man Cao | 42c3c33 | 2015-06-23 16:38:25 -0700 | [diff] [blame] | 4077 | } |
| 4078 | } |
| 4079 | |
Hiroshi Yamauchi | fdbd13c | 2015-09-02 16:16:58 -0700 | [diff] [blame] | 4080 | void Heap::BroadcastForNewAllocationRecords() const { |
Hiroshi Yamauchi | 6f0c6cd | 2016-03-18 17:17:52 -0700 | [diff] [blame] | 4081 | // Always broadcast without checking IsAllocTrackingEnabled() because IsAllocTrackingEnabled() may |
| 4082 | // be set to false while some threads are waiting for system weak access in |
| 4083 | // AllocRecordObjectMap::RecordAllocation() and we may fail to wake them up. b/27467554. |
| 4084 | MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); |
| 4085 | AllocRecordObjectMap* allocation_records = GetAllocationRecords(); |
| 4086 | if (allocation_records != nullptr) { |
| 4087 | allocation_records->BroadcastForNewAllocationRecords(); |
Hiroshi Yamauchi | fdbd13c | 2015-09-02 16:16:58 -0700 | [diff] [blame] | 4088 | } |
| 4089 | } |
| 4090 | |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 4091 | void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) { |
Vladimir Marko | 317892b | 2018-05-31 11:11:32 +0100 | [diff] [blame] | 4092 | DCHECK(gc_stress_mode_); |
Mathieu Chartier | 3100080 | 2015-06-14 14:14:37 -0700 | [diff] [blame] | 4093 | auto* const runtime = Runtime::Current(); |
Vladimir Marko | 317892b | 2018-05-31 11:11:32 +0100 | [diff] [blame] | 4094 | if (runtime->GetClassLinker()->IsInitialized() && !runtime->IsActiveTransaction()) { |
Mathieu Chartier | 3100080 | 2015-06-14 14:14:37 -0700 | [diff] [blame] | 4095 | // Check if we should GC. |
| 4096 | bool new_backtrace = false; |
| 4097 | { |
| 4098 | static constexpr size_t kMaxFrames = 16u; |
Mathieu Chartier | 409736f | 2019-10-22 18:13:29 -0700 | [diff] [blame] | 4099 | MutexLock mu(self, *backtrace_lock_); |
Mathieu Chartier | 3458359 | 2017-03-23 23:51:34 -0700 | [diff] [blame] | 4100 | FixedSizeBacktrace<kMaxFrames> backtrace; |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 4101 | backtrace.Collect(/* skip_count= */ 2); |
Mathieu Chartier | 3458359 | 2017-03-23 23:51:34 -0700 | [diff] [blame] | 4102 | uint64_t hash = backtrace.Hash(); |
Mathieu Chartier | 3100080 | 2015-06-14 14:14:37 -0700 | [diff] [blame] | 4103 | new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end(); |
| 4104 | if (new_backtrace) { |
| 4105 | seen_backtraces_.insert(hash); |
| 4106 | } |
| 4107 | } |
| 4108 | if (new_backtrace) { |
| 4109 | StackHandleScope<1> hs(self); |
| 4110 | auto h = hs.NewHandleWrapper(obj); |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 4111 | CollectGarbage(/* clear_soft_references= */ false); |
Hans Boehm | fb8b4e2 | 2018-09-05 16:45:42 -0700 | [diff] [blame] | 4112 | unique_backtrace_count_.fetch_add(1); |
Mathieu Chartier | 3100080 | 2015-06-14 14:14:37 -0700 | [diff] [blame] | 4113 | } else { |
Hans Boehm | fb8b4e2 | 2018-09-05 16:45:42 -0700 | [diff] [blame] | 4114 | seen_backtrace_count_.fetch_add(1); |
Mathieu Chartier | 3100080 | 2015-06-14 14:14:37 -0700 | [diff] [blame] | 4115 | } |
| 4116 | } |
| 4117 | } |
| 4118 | |
Mathieu Chartier | 5116837 | 2015-08-12 16:40:32 -0700 | [diff] [blame] | 4119 | void Heap::DisableGCForShutdown() { |
| 4120 | Thread* const self = Thread::Current(); |
| 4121 | CHECK(Runtime::Current()->IsShuttingDown(self)); |
| 4122 | MutexLock mu(self, *gc_complete_lock_); |
| 4123 | gc_disabled_for_shutdown_ = true; |
| 4124 | } |
| 4125 | |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 4126 | bool Heap::ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const { |
Vladimir Marko | 7cde458 | 2019-07-05 13:26:11 +0100 | [diff] [blame] | 4127 | DCHECK_EQ(IsBootImageAddress(obj.Ptr()), |
| 4128 | any_of(boot_image_spaces_.begin(), |
| 4129 | boot_image_spaces_.end(), |
| 4130 | [obj](gc::space::ImageSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) { |
| 4131 | return space->HasAddress(obj.Ptr()); |
| 4132 | })); |
| 4133 | return IsBootImageAddress(obj.Ptr()); |
Mathieu Chartier | fbc3108 | 2016-01-24 11:59:56 -0800 | [diff] [blame] | 4134 | } |
| 4135 | |
Mingyao Yang | 6ea1a0e | 2016-01-29 12:12:49 -0800 | [diff] [blame] | 4136 | bool Heap::IsInBootImageOatFile(const void* p) const { |
Vladimir Marko | 7cde458 | 2019-07-05 13:26:11 +0100 | [diff] [blame] | 4137 | DCHECK_EQ(IsBootImageAddress(p), |
| 4138 | any_of(boot_image_spaces_.begin(), |
| 4139 | boot_image_spaces_.end(), |
| 4140 | [p](gc::space::ImageSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) { |
| 4141 | return space->GetOatFile()->Contains(p); |
| 4142 | })); |
| 4143 | return IsBootImageAddress(p); |
Mathieu Chartier | fbc3108 | 2016-01-24 11:59:56 -0800 | [diff] [blame] | 4144 | } |
| 4145 | |
Andreas Gampe | 27fa96c | 2016-10-07 15:05:24 -0700 | [diff] [blame] | 4146 | void Heap::SetAllocationListener(AllocationListener* l) { |
| 4147 | AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, l); |
| 4148 | |
| 4149 | if (old == nullptr) { |
| 4150 | Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints(); |
| 4151 | } |
| 4152 | } |
| 4153 | |
| 4154 | void Heap::RemoveAllocationListener() { |
| 4155 | AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, nullptr); |
| 4156 | |
| 4157 | if (old != nullptr) { |
Andreas Gampe | 172ec8e | 2016-10-12 13:50:20 -0700 | [diff] [blame] | 4158 | Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints(); |
Andreas Gampe | 27fa96c | 2016-10-07 15:05:24 -0700 | [diff] [blame] | 4159 | } |
| 4160 | } |
| 4161 | |
Andreas Gampe | 9b8c588 | 2016-10-21 15:27:46 -0700 | [diff] [blame] | 4162 | void Heap::SetGcPauseListener(GcPauseListener* l) { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 4163 | gc_pause_listener_.store(l, std::memory_order_relaxed); |
Andreas Gampe | 9b8c588 | 2016-10-21 15:27:46 -0700 | [diff] [blame] | 4164 | } |
| 4165 | |
| 4166 | void Heap::RemoveGcPauseListener() { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 4167 | gc_pause_listener_.store(nullptr, std::memory_order_relaxed); |
Andreas Gampe | 9b8c588 | 2016-10-21 15:27:46 -0700 | [diff] [blame] | 4168 | } |
Andreas Gampe | 27fa96c | 2016-10-07 15:05:24 -0700 | [diff] [blame] | 4169 | |
Mathieu Chartier | 5ace201 | 2016-11-30 10:15:41 -0800 | [diff] [blame] | 4170 | mirror::Object* Heap::AllocWithNewTLAB(Thread* self, |
| 4171 | size_t alloc_size, |
| 4172 | bool grow, |
| 4173 | size_t* bytes_allocated, |
| 4174 | size_t* usable_size, |
| 4175 | size_t* bytes_tl_bulk_allocated) { |
| 4176 | const AllocatorType allocator_type = GetCurrentAllocator(); |
Mathieu Chartier | 6bc7774 | 2017-04-18 17:46:23 -0700 | [diff] [blame] | 4177 | if (kUsePartialTlabs && alloc_size <= self->TlabRemainingCapacity()) { |
| 4178 | DCHECK_GT(alloc_size, self->TlabSize()); |
| 4179 | // There is enough space if we grow the TLAB. Lets do that. This increases the |
| 4180 | // TLAB bytes. |
| 4181 | const size_t min_expand_size = alloc_size - self->TlabSize(); |
| 4182 | const size_t expand_bytes = std::max( |
| 4183 | min_expand_size, |
| 4184 | std::min(self->TlabRemainingCapacity() - self->TlabSize(), kPartialTlabSize)); |
| 4185 | if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, expand_bytes, grow))) { |
| 4186 | return nullptr; |
| 4187 | } |
| 4188 | *bytes_tl_bulk_allocated = expand_bytes; |
| 4189 | self->ExpandTlab(expand_bytes); |
| 4190 | DCHECK_LE(alloc_size, self->TlabSize()); |
| 4191 | } else if (allocator_type == kAllocatorTypeTLAB) { |
Mathieu Chartier | 5ace201 | 2016-11-30 10:15:41 -0800 | [diff] [blame] | 4192 | DCHECK(bump_pointer_space_ != nullptr); |
| 4193 | const size_t new_tlab_size = alloc_size + kDefaultTLABSize; |
| 4194 | if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, new_tlab_size, grow))) { |
| 4195 | return nullptr; |
| 4196 | } |
| 4197 | // Try allocating a new thread local buffer, if the allocation fails the space must be |
| 4198 | // full so return null. |
| 4199 | if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) { |
| 4200 | return nullptr; |
| 4201 | } |
| 4202 | *bytes_tl_bulk_allocated = new_tlab_size; |
| 4203 | } else { |
| 4204 | DCHECK(allocator_type == kAllocatorTypeRegionTLAB); |
| 4205 | DCHECK(region_space_ != nullptr); |
| 4206 | if (space::RegionSpace::kRegionSize >= alloc_size) { |
| 4207 | // Non-large. Check OOME for a tlab. |
| 4208 | if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type, |
| 4209 | space::RegionSpace::kRegionSize, |
| 4210 | grow))) { |
Mathieu Chartier | 6bc7774 | 2017-04-18 17:46:23 -0700 | [diff] [blame] | 4211 | const size_t new_tlab_size = kUsePartialTlabs |
| 4212 | ? std::max(alloc_size, kPartialTlabSize) |
| 4213 | : gc::space::RegionSpace::kRegionSize; |
Mathieu Chartier | 5ace201 | 2016-11-30 10:15:41 -0800 | [diff] [blame] | 4214 | // Try to allocate a tlab. |
Lokesh Gidra | 4f9d62b | 2020-01-06 15:06:04 -0800 | [diff] [blame] | 4215 | if (!region_space_->AllocNewTlab(self, new_tlab_size, bytes_tl_bulk_allocated)) { |
Mathieu Chartier | 5ace201 | 2016-11-30 10:15:41 -0800 | [diff] [blame] | 4216 | // Failed to allocate a tlab. Try non-tlab. |
| 4217 | return region_space_->AllocNonvirtual<false>(alloc_size, |
| 4218 | bytes_allocated, |
| 4219 | usable_size, |
| 4220 | bytes_tl_bulk_allocated); |
| 4221 | } |
Mathieu Chartier | 5ace201 | 2016-11-30 10:15:41 -0800 | [diff] [blame] | 4222 | // Fall-through to using the TLAB below. |
| 4223 | } else { |
| 4224 | // Check OOME for a non-tlab allocation. |
| 4225 | if (!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow)) { |
| 4226 | return region_space_->AllocNonvirtual<false>(alloc_size, |
| 4227 | bytes_allocated, |
| 4228 | usable_size, |
| 4229 | bytes_tl_bulk_allocated); |
| 4230 | } |
| 4231 | // Neither tlab or non-tlab works. Give up. |
| 4232 | return nullptr; |
| 4233 | } |
| 4234 | } else { |
| 4235 | // Large. Check OOME. |
| 4236 | if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow))) { |
| 4237 | return region_space_->AllocNonvirtual<false>(alloc_size, |
| 4238 | bytes_allocated, |
| 4239 | usable_size, |
| 4240 | bytes_tl_bulk_allocated); |
| 4241 | } |
| 4242 | return nullptr; |
| 4243 | } |
| 4244 | } |
| 4245 | // Refilled TLAB, return. |
| 4246 | mirror::Object* ret = self->AllocTlab(alloc_size); |
| 4247 | DCHECK(ret != nullptr); |
| 4248 | *bytes_allocated = alloc_size; |
| 4249 | *usable_size = alloc_size; |
| 4250 | return ret; |
| 4251 | } |
| 4252 | |
Mathieu Chartier | 1ca6890 | 2017-04-18 11:26:22 -0700 | [diff] [blame] | 4253 | const Verification* Heap::GetVerification() const { |
| 4254 | return verification_.get(); |
| 4255 | } |
| 4256 | |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 4257 | void Heap::VlogHeapGrowth(size_t old_footprint, size_t new_footprint, size_t alloc_size) { |
| 4258 | VLOG(heap) << "Growing heap from " << PrettySize(old_footprint) << " to " |
Andreas Gampe | 170331f | 2017-12-07 18:41:03 -0800 | [diff] [blame] | 4259 | << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation"; |
| 4260 | } |
| 4261 | |
Mathieu Chartier | a98a282 | 2017-05-24 16:14:10 -0700 | [diff] [blame] | 4262 | class Heap::TriggerPostForkCCGcTask : public HeapTask { |
| 4263 | public: |
| 4264 | explicit TriggerPostForkCCGcTask(uint64_t target_time) : HeapTask(target_time) {} |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 4265 | void Run(Thread* self) override { |
Mathieu Chartier | a98a282 | 2017-05-24 16:14:10 -0700 | [diff] [blame] | 4266 | gc::Heap* heap = Runtime::Current()->GetHeap(); |
Hans Boehm | fb8b4e2 | 2018-09-05 16:45:42 -0700 | [diff] [blame] | 4267 | // Trigger a GC, if not already done. The first GC after fork, whenever it |
Mathieu Chartier | a98a282 | 2017-05-24 16:14:10 -0700 | [diff] [blame] | 4268 | // takes place, will adjust the thresholds to normal levels. |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 4269 | if (heap->target_footprint_.load(std::memory_order_relaxed) == heap->growth_limit_) { |
Mathieu Chartier | a98a282 | 2017-05-24 16:14:10 -0700 | [diff] [blame] | 4270 | heap->RequestConcurrentGC(self, kGcCauseBackground, false); |
| 4271 | } |
| 4272 | } |
| 4273 | }; |
| 4274 | |
| 4275 | void Heap::PostForkChildAction(Thread* self) { |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 4276 | // Temporarily increase target_footprint_ and concurrent_start_bytes_ to |
Mathieu Chartier | a98a282 | 2017-05-24 16:14:10 -0700 | [diff] [blame] | 4277 | // max values to avoid GC during app launch. |
| 4278 | if (collector_type_ == kCollectorTypeCC && !IsLowMemoryMode()) { |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 4279 | // Set target_footprint_ to the largest allowed value. |
Mathieu Chartier | a98a282 | 2017-05-24 16:14:10 -0700 | [diff] [blame] | 4280 | SetIdealFootprint(growth_limit_); |
| 4281 | // Set concurrent_start_bytes_ to half of the heap size. |
Hans Boehm | c220f98 | 2018-10-12 16:15:45 -0700 | [diff] [blame] | 4282 | size_t target_footprint = target_footprint_.load(std::memory_order_relaxed); |
| 4283 | concurrent_start_bytes_ = std::max(target_footprint / 2, GetBytesAllocated()); |
Mathieu Chartier | a98a282 | 2017-05-24 16:14:10 -0700 | [diff] [blame] | 4284 | |
| 4285 | GetTaskProcessor()->AddTask( |
| 4286 | self, new TriggerPostForkCCGcTask(NanoTime() + MsToNs(kPostForkMaxHeapDurationMS))); |
| 4287 | } |
| 4288 | } |
| 4289 | |
Alex Light | c18eba3 | 2019-09-24 14:36:27 -0700 | [diff] [blame] | 4290 | void Heap::VisitReflectiveTargets(ReflectiveValueVisitor *visit) { |
| 4291 | VisitObjectsPaused([&visit](mirror::Object* ref) NO_THREAD_SAFETY_ANALYSIS { |
| 4292 | art::ObjPtr<mirror::Class> klass(ref->GetClass()); |
| 4293 | // All these classes are in the BootstrapClassLoader. |
| 4294 | if (!klass->IsBootStrapClassLoaded()) { |
| 4295 | return; |
| 4296 | } |
| 4297 | if (GetClassRoot<mirror::Method>()->IsAssignableFrom(klass) || |
| 4298 | GetClassRoot<mirror::Constructor>()->IsAssignableFrom(klass)) { |
| 4299 | down_cast<mirror::Executable*>(ref)->VisitTarget(visit); |
| 4300 | } else if (art::GetClassRoot<art::mirror::Field>() == klass) { |
| 4301 | down_cast<mirror::Field*>(ref)->VisitTarget(visit); |
| 4302 | } else if (art::GetClassRoot<art::mirror::MethodHandle>()->IsAssignableFrom(klass)) { |
| 4303 | down_cast<mirror::MethodHandle*>(ref)->VisitTarget(visit); |
| 4304 | } else if (art::GetClassRoot<art::mirror::FieldVarHandle>()->IsAssignableFrom(klass)) { |
| 4305 | down_cast<mirror::FieldVarHandle*>(ref)->VisitTarget(visit); |
| 4306 | } else if (art::GetClassRoot<art::mirror::DexCache>()->IsAssignableFrom(klass)) { |
| 4307 | down_cast<mirror::DexCache*>(ref)->VisitReflectiveTargets(visit); |
| 4308 | } |
| 4309 | }); |
| 4310 | } |
| 4311 | |
Mathieu Chartier | ad390fa | 2019-10-16 20:03:00 -0700 | [diff] [blame] | 4312 | bool Heap::AddHeapTask(gc::HeapTask* task) { |
| 4313 | Thread* const self = Thread::Current(); |
| 4314 | if (!CanAddHeapTask(self)) { |
| 4315 | return false; |
| 4316 | } |
| 4317 | GetTaskProcessor()->AddTask(self, task); |
| 4318 | return true; |
| 4319 | } |
| 4320 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 4321 | } // namespace gc |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 4322 | } // namespace art |