blob: bb09559e02535b8fe1aac1f416ebfd4fed598654 [file] [log] [blame]
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "concurrent_copying.h"
18
Mathieu Chartierc7853442015-03-27 14:35:38 -070019#include "art_field-inl.h"
Andreas Gampe542451c2016-07-26 09:02:02 -070020#include "base/enums.h"
Mathieu Chartier56fe2582016-07-14 13:30:03 -070021#include "base/histogram-inl.h"
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070022#include "base/stl_util.h"
Mathieu Chartier56fe2582016-07-14 13:30:03 -070023#include "base/systrace.h"
Mathieu Chartiera6b1ead2015-10-06 10:32:38 -070024#include "debugger.h"
Andreas Gampe291ce172017-04-24 13:22:18 -070025#include "gc/accounting/atomic_stack.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080026#include "gc/accounting/heap_bitmap-inl.h"
Mathieu Chartier21328a12016-07-22 10:47:45 -070027#include "gc/accounting/mod_union_table-inl.h"
Andreas Gampe291ce172017-04-24 13:22:18 -070028#include "gc/accounting/read_barrier_table.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080029#include "gc/accounting/space_bitmap-inl.h"
Andreas Gampe4934eb12017-01-30 13:15:26 -080030#include "gc/gc_pause_listener.h"
Mathieu Chartier3cf22532015-07-09 15:15:09 -070031#include "gc/reference_processor.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080032#include "gc/space/image_space.h"
Mathieu Chartier073b16c2015-11-10 14:13:23 -080033#include "gc/space/space-inl.h"
Mathieu Chartier1ca68902017-04-18 11:26:22 -070034#include "gc/verification.h"
Mathieu Chartier4a26f172016-01-26 14:26:18 -080035#include "image-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080036#include "intern_table.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070037#include "mirror/class-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080038#include "mirror/object-inl.h"
Andreas Gampec6ea7d02017-02-01 16:46:28 -080039#include "mirror/object-refvisitor-inl.h"
Mathieu Chartier0795f232016-09-27 18:43:30 -070040#include "scoped_thread_state_change-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080041#include "thread-inl.h"
42#include "thread_list.h"
43#include "well_known_classes.h"
44
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070045namespace art {
46namespace gc {
47namespace collector {
48
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070049static constexpr size_t kDefaultGcMarkStackSize = 2 * MB;
Mathieu Chartier21328a12016-07-22 10:47:45 -070050// If kFilterModUnionCards then we attempt to filter cards that don't need to be dirty in the mod
51// union table. Disabled since it does not seem to help the pause much.
52static constexpr bool kFilterModUnionCards = kIsDebugBuild;
Mathieu Chartierd6636d32016-07-28 11:02:38 -070053// If kDisallowReadBarrierDuringScan is true then the GC aborts if there are any that occur during
54// ConcurrentCopying::Scan. May be used to diagnose possibly unnecessary read barriers.
55// Only enabled for kIsDebugBuild to avoid performance hit.
56static constexpr bool kDisallowReadBarrierDuringScan = kIsDebugBuild;
Mathieu Chartier36a270a2016-07-28 18:08:51 -070057// Slow path mark stack size, increase this if the stack is getting full and it is causing
58// performance problems.
59static constexpr size_t kReadBarrierMarkStackSize = 512 * KB;
Mathieu Chartiera1467d02017-02-22 09:22:50 -080060// Verify that there are no missing card marks.
61static constexpr bool kVerifyNoMissingCardMarks = kIsDebugBuild;
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070062
Mathieu Chartier56fe2582016-07-14 13:30:03 -070063ConcurrentCopying::ConcurrentCopying(Heap* heap,
64 const std::string& name_prefix,
65 bool measure_read_barrier_slow_path)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080066 : GarbageCollector(heap,
67 name_prefix + (name_prefix.empty() ? "" : " ") +
Hiroshi Yamauchi88e08162017-01-06 15:03:26 -080068 "concurrent copying"),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070069 region_space_(nullptr), gc_barrier_(new Barrier(0)),
70 gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070071 kDefaultGcMarkStackSize,
72 kDefaultGcMarkStackSize)),
Mathieu Chartier36a270a2016-07-28 18:08:51 -070073 rb_mark_bit_stack_(accounting::ObjectStack::Create("rb copying gc mark stack",
74 kReadBarrierMarkStackSize,
75 kReadBarrierMarkStackSize)),
76 rb_mark_bit_stack_full_(false),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070077 mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
78 thread_running_gc_(nullptr),
Andreas Gamped9911ee2017-03-27 13:27:24 -070079 is_marking_(false),
Mathieu Chartier3768ade2017-05-02 14:04:39 -070080 is_using_read_barrier_entrypoints_(false),
Andreas Gamped9911ee2017-03-27 13:27:24 -070081 is_active_(false),
82 is_asserting_to_space_invariant_(false),
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070083 region_space_bitmap_(nullptr),
Andreas Gamped9911ee2017-03-27 13:27:24 -070084 heap_mark_bitmap_(nullptr),
85 live_stack_freeze_size_(0),
86 from_space_num_objects_at_first_pause_(0),
87 from_space_num_bytes_at_first_pause_(0),
88 mark_stack_mode_(kMarkStackModeOff),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070089 weak_ref_access_enabled_(true),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080090 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
Mathieu Chartier56fe2582016-07-14 13:30:03 -070091 measure_read_barrier_slow_path_(measure_read_barrier_slow_path),
Andreas Gamped9911ee2017-03-27 13:27:24 -070092 mark_from_read_barrier_measurements_(false),
Mathieu Chartier56fe2582016-07-14 13:30:03 -070093 rb_slow_path_ns_(0),
94 rb_slow_path_count_(0),
95 rb_slow_path_count_gc_(0),
96 rb_slow_path_histogram_lock_("Read barrier histogram lock"),
97 rb_slow_path_time_histogram_("Mutator time in read barrier slow path", 500, 32),
98 rb_slow_path_count_total_(0),
99 rb_slow_path_count_gc_total_(0),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800100 rb_table_(heap_->GetReadBarrierTable()),
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700101 force_evacuate_all_(false),
Andreas Gamped9911ee2017-03-27 13:27:24 -0700102 gc_grays_immune_objects_(false),
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700103 immune_gray_stack_lock_("concurrent copying immune gray stack lock",
104 kMarkSweepMarkStackLock) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800105 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
106 "The region space size and the read barrier table region size must match");
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700107 Thread* self = Thread::Current();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800108 {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800109 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
110 // Cache this so that we won't have to lock heap_bitmap_lock_ in
111 // Mark() which could cause a nested lock on heap_bitmap_lock_
112 // when GC causes a RB while doing GC or a lock order violation
113 // (class_linker_lock_ and heap_bitmap_lock_).
114 heap_mark_bitmap_ = heap->GetMarkBitmap();
115 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700116 {
117 MutexLock mu(self, mark_stack_lock_);
118 for (size_t i = 0; i < kMarkStackPoolSize; ++i) {
119 accounting::AtomicStack<mirror::Object>* mark_stack =
120 accounting::AtomicStack<mirror::Object>::Create(
121 "thread local mark stack", kMarkStackSize, kMarkStackSize);
122 pooled_mark_stacks_.push_back(mark_stack);
123 }
124 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800125}
126
Hiroshi Yamauchi057d9772017-02-17 15:33:23 -0800127void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* field,
128 bool do_atomic_update) {
129 if (UNLIKELY(do_atomic_update)) {
130 // Used to mark the referent in DelayReferenceReferent in transaction mode.
131 mirror::Object* from_ref = field->AsMirrorPtr();
132 if (from_ref == nullptr) {
133 return;
134 }
135 mirror::Object* to_ref = Mark(from_ref);
136 if (from_ref != to_ref) {
137 do {
138 if (field->AsMirrorPtr() != from_ref) {
139 // Concurrently overwritten by a mutator.
140 break;
141 }
142 } while (!field->CasWeakRelaxed(from_ref, to_ref));
143 }
144 } else {
145 // Used for preserving soft references, should be OK to not have a CAS here since there should be
146 // no other threads which can trigger read barriers on the same referent during reference
147 // processing.
148 field->Assign(Mark(field->AsMirrorPtr()));
149 }
Mathieu Chartier97509952015-07-13 14:35:43 -0700150}
151
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800152ConcurrentCopying::~ConcurrentCopying() {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700153 STLDeleteElements(&pooled_mark_stacks_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800154}
155
156void ConcurrentCopying::RunPhases() {
157 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier);
158 CHECK(!is_active_);
159 is_active_ = true;
160 Thread* self = Thread::Current();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700161 thread_running_gc_ = self;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800162 Locks::mutator_lock_->AssertNotHeld(self);
163 {
164 ReaderMutexLock mu(self, *Locks::mutator_lock_);
165 InitializePhase();
166 }
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700167 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
168 // Switch to read barrier mark entrypoints before we gray the objects. This is required in case
169 // a mutator sees a gray bit and dispatches on the entrpoint. (b/37876887).
170 ActivateReadBarrierEntrypoints();
171 // Gray dirty immune objects concurrently to reduce GC pause times. We re-process gray cards in
172 // the pause.
173 ReaderMutexLock mu(self, *Locks::mutator_lock_);
174 GrayAllDirtyImmuneObjects();
175 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800176 FlipThreadRoots();
177 {
178 ReaderMutexLock mu(self, *Locks::mutator_lock_);
179 MarkingPhase();
180 }
181 // Verify no from space refs. This causes a pause.
Andreas Gampee3ce7872017-02-22 13:36:21 -0800182 if (kEnableNoFromSpaceRefsVerification) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800183 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
Andreas Gampe4934eb12017-01-30 13:15:26 -0800184 ScopedPause pause(this, false);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700185 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800186 if (kVerboseMode) {
187 LOG(INFO) << "Verifying no from-space refs";
188 }
189 VerifyNoFromSpaceReferences();
Mathieu Chartier720e71a2015-04-06 17:10:58 -0700190 if (kVerboseMode) {
191 LOG(INFO) << "Done verifying no from-space refs";
192 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700193 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800194 }
195 {
196 ReaderMutexLock mu(self, *Locks::mutator_lock_);
197 ReclaimPhase();
198 }
199 FinishPhase();
200 CHECK(is_active_);
201 is_active_ = false;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700202 thread_running_gc_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800203}
204
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700205class ConcurrentCopying::ActivateReadBarrierEntrypointsCheckpoint : public Closure {
206 public:
207 explicit ActivateReadBarrierEntrypointsCheckpoint(ConcurrentCopying* concurrent_copying)
208 : concurrent_copying_(concurrent_copying) {}
209
210 void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
211 // Note: self is not necessarily equal to thread since thread may be suspended.
212 Thread* self = Thread::Current();
213 DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
214 << thread->GetState() << " thread " << thread << " self " << self;
215 // Switch to the read barrier entrypoints.
216 thread->SetReadBarrierEntrypoints();
217 // If thread is a running mutator, then act on behalf of the garbage collector.
218 // See the code in ThreadList::RunCheckpoint.
219 concurrent_copying_->GetBarrier().Pass(self);
220 }
221
222 private:
223 ConcurrentCopying* const concurrent_copying_;
224};
225
226class ConcurrentCopying::ActivateReadBarrierEntrypointsCallback : public Closure {
227 public:
228 explicit ActivateReadBarrierEntrypointsCallback(ConcurrentCopying* concurrent_copying)
229 : concurrent_copying_(concurrent_copying) {}
230
231 void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
232 // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
233 // to avoid a race with ThreadList::Register().
234 CHECK(!concurrent_copying_->is_using_read_barrier_entrypoints_);
235 concurrent_copying_->is_using_read_barrier_entrypoints_ = true;
236 }
237
238 private:
239 ConcurrentCopying* const concurrent_copying_;
240};
241
242void ConcurrentCopying::ActivateReadBarrierEntrypoints() {
243 Thread* const self = Thread::Current();
244 ActivateReadBarrierEntrypointsCheckpoint checkpoint(this);
245 ThreadList* thread_list = Runtime::Current()->GetThreadList();
246 gc_barrier_->Init(self, 0);
247 ActivateReadBarrierEntrypointsCallback callback(this);
248 const size_t barrier_count = thread_list->RunCheckpoint(&checkpoint, &callback);
249 // If there are no threads to wait which implies that all the checkpoint functions are finished,
250 // then no need to release the mutator lock.
251 if (barrier_count == 0) {
252 return;
253 }
254 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
255 gc_barrier_->Increment(self, barrier_count);
256}
257
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800258void ConcurrentCopying::BindBitmaps() {
259 Thread* self = Thread::Current();
260 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
261 // Mark all of the spaces we never collect as immune.
262 for (const auto& space : heap_->GetContinuousSpaces()) {
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800263 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
264 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800265 CHECK(space->IsZygoteSpace() || space->IsImageSpace());
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800266 immune_spaces_.AddSpace(space);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800267 } else if (space == region_space_) {
Mathieu Chartier7ec38dc2016-10-07 15:24:46 -0700268 // It is OK to clear the bitmap with mutators running since the only place it is read is
269 // VisitObjects which has exclusion with CC.
270 region_space_bitmap_ = region_space_->GetMarkBitmap();
271 region_space_bitmap_->Clear();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800272 }
273 }
274}
275
276void ConcurrentCopying::InitializePhase() {
277 TimingLogger::ScopedTiming split("InitializePhase", GetTimings());
278 if (kVerboseMode) {
279 LOG(INFO) << "GC InitializePhase";
280 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-"
281 << reinterpret_cast<void*>(region_space_->Limit());
282 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700283 CheckEmptyMarkStack();
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800284 if (kIsDebugBuild) {
285 MutexLock mu(Thread::Current(), mark_stack_lock_);
286 CHECK(false_gray_stack_.empty());
287 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700288
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700289 rb_mark_bit_stack_full_ = false;
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700290 mark_from_read_barrier_measurements_ = measure_read_barrier_slow_path_;
291 if (measure_read_barrier_slow_path_) {
292 rb_slow_path_ns_.StoreRelaxed(0);
293 rb_slow_path_count_.StoreRelaxed(0);
294 rb_slow_path_count_gc_.StoreRelaxed(0);
295 }
296
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800297 immune_spaces_.Reset();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800298 bytes_moved_.StoreRelaxed(0);
299 objects_moved_.StoreRelaxed(0);
Hiroshi Yamauchi60985b72016-08-24 13:53:12 -0700300 GcCause gc_cause = GetCurrentIteration()->GetGcCause();
301 if (gc_cause == kGcCauseExplicit ||
302 gc_cause == kGcCauseForNativeAlloc ||
303 gc_cause == kGcCauseCollectorTransition ||
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800304 GetCurrentIteration()->GetClearSoftReferences()) {
305 force_evacuate_all_ = true;
306 } else {
307 force_evacuate_all_ = false;
308 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700309 if (kUseBakerReadBarrier) {
310 updated_all_immune_objects_.StoreRelaxed(false);
311 // GC may gray immune objects in the thread flip.
312 gc_grays_immune_objects_ = true;
313 if (kIsDebugBuild) {
314 MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
315 DCHECK(immune_gray_stack_.empty());
316 }
317 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800318 BindBitmaps();
319 if (kVerboseMode) {
320 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_;
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800321 LOG(INFO) << "Largest immune region: " << immune_spaces_.GetLargestImmuneRegion().Begin()
322 << "-" << immune_spaces_.GetLargestImmuneRegion().End();
323 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
324 LOG(INFO) << "Immune space: " << *space;
325 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800326 LOG(INFO) << "GC end of InitializePhase";
327 }
Mathieu Chartier962cd7a2016-08-16 12:15:59 -0700328 // Mark all of the zygote large objects without graying them.
329 MarkZygoteLargeObjects();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800330}
331
332// Used to switch the thread roots of a thread from from-space refs to to-space refs.
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700333class ConcurrentCopying::ThreadFlipVisitor : public Closure, public RootVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800334 public:
Roland Levillain3887c462015-08-12 18:15:42 +0100335 ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800336 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
337 }
338
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700339 virtual void Run(Thread* thread) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800340 // Note: self is not necessarily equal to thread since thread may be suspended.
341 Thread* self = Thread::Current();
342 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
343 << thread->GetState() << " thread " << thread << " self " << self;
Mathieu Chartierfe814e82016-11-09 14:32:49 -0800344 thread->SetIsGcMarkingAndUpdateEntrypoints(true);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800345 if (use_tlab_ && thread->HasTlab()) {
346 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
347 // This must come before the revoke.
348 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
349 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
350 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)->
351 FetchAndAddSequentiallyConsistent(thread_local_objects);
352 } else {
353 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
354 }
355 }
356 if (kUseThreadLocalAllocationStack) {
357 thread->RevokeThreadLocalAllocationStack();
358 }
359 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700360 // We can use the non-CAS VisitRoots functions below because we update thread-local GC roots
361 // only.
362 thread->VisitRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800363 concurrent_copying_->GetBarrier().Pass(self);
364 }
365
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700366 void VisitRoots(mirror::Object*** roots,
367 size_t count,
368 const RootInfo& info ATTRIBUTE_UNUSED)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700369 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700370 for (size_t i = 0; i < count; ++i) {
371 mirror::Object** root = roots[i];
372 mirror::Object* ref = *root;
373 if (ref != nullptr) {
374 mirror::Object* to_ref = concurrent_copying_->Mark(ref);
375 if (to_ref != ref) {
376 *root = to_ref;
377 }
378 }
379 }
380 }
381
382 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
383 size_t count,
384 const RootInfo& info ATTRIBUTE_UNUSED)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700385 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700386 for (size_t i = 0; i < count; ++i) {
387 mirror::CompressedReference<mirror::Object>* const root = roots[i];
388 if (!root->IsNull()) {
389 mirror::Object* ref = root->AsMirrorPtr();
390 mirror::Object* to_ref = concurrent_copying_->Mark(ref);
391 if (to_ref != ref) {
392 root->Assign(to_ref);
393 }
394 }
395 }
396 }
397
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800398 private:
399 ConcurrentCopying* const concurrent_copying_;
400 const bool use_tlab_;
401};
402
403// Called back from Runtime::FlipThreadRoots() during a pause.
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700404class ConcurrentCopying::FlipCallback : public Closure {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800405 public:
406 explicit FlipCallback(ConcurrentCopying* concurrent_copying)
407 : concurrent_copying_(concurrent_copying) {
408 }
409
Mathieu Chartier90443472015-07-16 20:32:27 -0700410 virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800411 ConcurrentCopying* cc = concurrent_copying_;
412 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
413 // Note: self is not necessarily equal to thread since thread may be suspended.
414 Thread* self = Thread::Current();
Mathieu Chartiera1467d02017-02-22 09:22:50 -0800415 if (kVerifyNoMissingCardMarks) {
416 cc->VerifyNoMissingCardMarks();
417 }
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700418 CHECK_EQ(thread, self);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800419 Locks::mutator_lock_->AssertExclusiveHeld(self);
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700420 {
421 TimingLogger::ScopedTiming split2("(Paused)SetFromSpace", cc->GetTimings());
422 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
423 }
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700424 cc->SwapStacks();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800425 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
426 cc->RecordLiveStackFreezeSize(self);
427 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated();
428 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
429 }
430 cc->is_marking_ = true;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700431 cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal);
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800432 if (kIsDebugBuild) {
433 cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared();
434 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800435 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
Mathieu Chartier184c9dc2015-03-05 13:20:54 -0800436 CHECK(Runtime::Current()->IsAotCompiler());
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700437 TimingLogger::ScopedTiming split3("(Paused)VisitTransactionRoots", cc->GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700438 Runtime::Current()->VisitTransactionRoots(cc);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800439 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700440 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700441 cc->GrayAllNewlyDirtyImmuneObjects();
Mathieu Chartier21328a12016-07-22 10:47:45 -0700442 if (kIsDebugBuild) {
443 // Check that all non-gray immune objects only refernce immune objects.
444 cc->VerifyGrayImmuneObjects();
445 }
446 }
Mathieu Chartier9aef9922017-04-23 13:53:50 -0700447 // May be null during runtime creation, in this case leave java_lang_Object null.
448 // This is safe since single threaded behavior should mean FillDummyObject does not
449 // happen when java_lang_Object_ is null.
450 if (WellKnownClasses::java_lang_Object != nullptr) {
451 cc->java_lang_Object_ = down_cast<mirror::Class*>(cc->Mark(
452 WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object).Ptr()));
453 } else {
454 cc->java_lang_Object_ = nullptr;
455 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800456 }
457
458 private:
459 ConcurrentCopying* const concurrent_copying_;
460};
461
Mathieu Chartier21328a12016-07-22 10:47:45 -0700462class ConcurrentCopying::VerifyGrayImmuneObjectsVisitor {
463 public:
464 explicit VerifyGrayImmuneObjectsVisitor(ConcurrentCopying* collector)
465 : collector_(collector) {}
466
Mathieu Chartier31e88222016-10-14 18:43:19 -0700467 void operator()(ObjPtr<mirror::Object> obj, MemberOffset offset, bool /* is_static */)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700468 const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
469 REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700470 CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset),
471 obj, offset);
472 }
473
Mathieu Chartier31e88222016-10-14 18:43:19 -0700474 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700475 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700476 CHECK(klass->IsTypeOfReferenceClass());
477 CheckReference(ref->GetReferent<kWithoutReadBarrier>(),
478 ref,
479 mirror::Reference::ReferentOffset());
480 }
481
482 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
483 ALWAYS_INLINE
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700484 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700485 if (!root->IsNull()) {
486 VisitRoot(root);
487 }
488 }
489
490 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
491 ALWAYS_INLINE
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700492 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700493 CheckReference(root->AsMirrorPtr(), nullptr, MemberOffset(0));
494 }
495
496 private:
497 ConcurrentCopying* const collector_;
498
Mathieu Chartier31e88222016-10-14 18:43:19 -0700499 void CheckReference(ObjPtr<mirror::Object> ref,
500 ObjPtr<mirror::Object> holder,
501 MemberOffset offset) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700502 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700503 if (ref != nullptr) {
Mathieu Chartier31e88222016-10-14 18:43:19 -0700504 if (!collector_->immune_spaces_.ContainsObject(ref.Ptr())) {
Mathieu Chartier962cd7a2016-08-16 12:15:59 -0700505 // Not immune, must be a zygote large object.
506 CHECK(Runtime::Current()->GetHeap()->GetLargeObjectsSpace()->IsZygoteLargeObject(
Mathieu Chartier31e88222016-10-14 18:43:19 -0700507 Thread::Current(), ref.Ptr()))
Mathieu Chartier962cd7a2016-08-16 12:15:59 -0700508 << "Non gray object references non immune, non zygote large object "<< ref << " "
David Sehr709b0702016-10-13 09:12:37 -0700509 << mirror::Object::PrettyTypeOf(ref) << " in holder " << holder << " "
510 << mirror::Object::PrettyTypeOf(holder) << " offset=" << offset.Uint32Value();
Mathieu Chartier962cd7a2016-08-16 12:15:59 -0700511 } else {
512 // Make sure the large object class is immune since we will never scan the large object.
513 CHECK(collector_->immune_spaces_.ContainsObject(
514 ref->GetClass<kVerifyNone, kWithoutReadBarrier>()));
515 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700516 }
517 }
518};
519
520void ConcurrentCopying::VerifyGrayImmuneObjects() {
521 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
522 for (auto& space : immune_spaces_.GetSpaces()) {
523 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
524 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
525 VerifyGrayImmuneObjectsVisitor visitor(this);
526 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
527 reinterpret_cast<uintptr_t>(space->Limit()),
528 [&visitor](mirror::Object* obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700529 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700530 // If an object is not gray, it should only have references to things in the immune spaces.
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700531 if (obj->GetReadBarrierState() != ReadBarrier::GrayState()) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700532 obj->VisitReferences</*kVisitNativeRoots*/true,
533 kDefaultVerifyFlags,
534 kWithoutReadBarrier>(visitor, visitor);
535 }
536 });
537 }
538}
539
Mathieu Chartiera1467d02017-02-22 09:22:50 -0800540class ConcurrentCopying::VerifyNoMissingCardMarkVisitor {
541 public:
542 VerifyNoMissingCardMarkVisitor(ConcurrentCopying* cc, ObjPtr<mirror::Object> holder)
543 : cc_(cc),
544 holder_(holder) {}
545
546 void operator()(ObjPtr<mirror::Object> obj,
547 MemberOffset offset,
548 bool is_static ATTRIBUTE_UNUSED) const
549 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
550 if (offset.Uint32Value() != mirror::Object::ClassOffset().Uint32Value()) {
551 CheckReference(obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(
552 offset), offset.Uint32Value());
553 }
554 }
555 void operator()(ObjPtr<mirror::Class> klass,
556 ObjPtr<mirror::Reference> ref) const
557 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
558 CHECK(klass->IsTypeOfReferenceClass());
559 this->operator()(ref, mirror::Reference::ReferentOffset(), false);
560 }
561
562 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
563 REQUIRES_SHARED(Locks::mutator_lock_) {
564 if (!root->IsNull()) {
565 VisitRoot(root);
566 }
567 }
568
569 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
570 REQUIRES_SHARED(Locks::mutator_lock_) {
571 CheckReference(root->AsMirrorPtr());
572 }
573
574 void CheckReference(mirror::Object* ref, int32_t offset = -1) const
575 REQUIRES_SHARED(Locks::mutator_lock_) {
576 CHECK(ref == nullptr || !cc_->region_space_->IsInNewlyAllocatedRegion(ref))
577 << holder_->PrettyTypeOf() << "(" << holder_.Ptr() << ") references object "
578 << ref->PrettyTypeOf() << "(" << ref << ") in newly allocated region at offset=" << offset;
579 }
580
581 private:
582 ConcurrentCopying* const cc_;
583 ObjPtr<mirror::Object> const holder_;
584};
585
586void ConcurrentCopying::VerifyNoMissingCardMarkCallback(mirror::Object* obj, void* arg) {
587 auto* collector = reinterpret_cast<ConcurrentCopying*>(arg);
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700588 // Objects not on dirty or aged cards should never have references to newly allocated regions.
589 if (collector->heap_->GetCardTable()->GetCard(obj) == gc::accounting::CardTable::kCardClean) {
Mathieu Chartiera1467d02017-02-22 09:22:50 -0800590 VerifyNoMissingCardMarkVisitor visitor(collector, /*holder*/ obj);
591 obj->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>(
592 visitor,
593 visitor);
594 }
595}
596
597void ConcurrentCopying::VerifyNoMissingCardMarks() {
598 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
599 region_space_->Walk(&VerifyNoMissingCardMarkCallback, this);
600 {
601 ReaderMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
602 heap_->GetLiveBitmap()->Walk(&VerifyNoMissingCardMarkCallback, this);
603 }
604}
605
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800606// Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
607void ConcurrentCopying::FlipThreadRoots() {
608 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
609 if (kVerboseMode) {
610 LOG(INFO) << "time=" << region_space_->Time();
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700611 region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800612 }
613 Thread* self = Thread::Current();
614 Locks::mutator_lock_->AssertNotHeld(self);
615 gc_barrier_->Init(self, 0);
616 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
617 FlipCallback flip_callback(this);
Andreas Gampe4934eb12017-01-30 13:15:26 -0800618
619 // This is the point where Concurrent-Copying will pause all threads. We report a pause here, if
620 // necessary. This is slightly over-reporting, as this includes the time to actually suspend
621 // threads.
622 {
623 GcPauseListener* pause_listener = GetHeap()->GetGcPauseListener();
624 if (pause_listener != nullptr) {
625 pause_listener->StartPause();
626 }
627 }
628
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800629 size_t barrier_count = Runtime::Current()->FlipThreadRoots(
630 &thread_flip_visitor, &flip_callback, this);
Andreas Gampe4934eb12017-01-30 13:15:26 -0800631
632 {
633 GcPauseListener* pause_listener = GetHeap()->GetGcPauseListener();
634 if (pause_listener != nullptr) {
635 pause_listener->EndPause();
636 }
637 }
638
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800639 {
640 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
641 gc_barrier_->Increment(self, barrier_count);
642 }
643 is_asserting_to_space_invariant_ = true;
644 QuasiAtomic::ThreadFenceForConstructor();
645 if (kVerboseMode) {
646 LOG(INFO) << "time=" << region_space_->Time();
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700647 region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800648 LOG(INFO) << "GC end of FlipThreadRoots";
649 }
650}
651
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700652template <bool kConcurrent>
Mathieu Chartier21328a12016-07-22 10:47:45 -0700653class ConcurrentCopying::GrayImmuneObjectVisitor {
654 public:
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700655 explicit GrayImmuneObjectVisitor(Thread* self) : self_(self) {}
Mathieu Chartier21328a12016-07-22 10:47:45 -0700656
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700657 ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700658 if (kUseBakerReadBarrier && obj->GetReadBarrierState() == ReadBarrier::WhiteState()) {
659 if (kConcurrent) {
660 Locks::mutator_lock_->AssertSharedHeld(self_);
661 obj->AtomicSetReadBarrierState(ReadBarrier::WhiteState(), ReadBarrier::GrayState());
662 // Mod union table VisitObjects may visit the same object multiple times so we can't check
663 // the result of the atomic set.
664 } else {
665 Locks::mutator_lock_->AssertExclusiveHeld(self_);
666 obj->SetReadBarrierState(ReadBarrier::GrayState());
Mathieu Chartier21328a12016-07-22 10:47:45 -0700667 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700668 }
669 }
670
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700671 static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700672 reinterpret_cast<GrayImmuneObjectVisitor<kConcurrent>*>(arg)->operator()(obj);
Mathieu Chartier21328a12016-07-22 10:47:45 -0700673 }
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700674
675 private:
676 Thread* const self_;
Mathieu Chartier21328a12016-07-22 10:47:45 -0700677};
678
679void ConcurrentCopying::GrayAllDirtyImmuneObjects() {
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700680 TimingLogger::ScopedTiming split("GrayAllDirtyImmuneObjects", GetTimings());
681 accounting::CardTable* const card_table = heap_->GetCardTable();
682 Thread* const self = Thread::Current();
683 using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent */ true>;
684 VisitorType visitor(self);
685 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier21328a12016-07-22 10:47:45 -0700686 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
687 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700688 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
Mathieu Chartier21328a12016-07-22 10:47:45 -0700689 // Mark all the objects on dirty cards since these may point to objects in other space.
690 // Once these are marked, the GC will eventually clear them later.
691 // Table is non null for boot image and zygote spaces. It is only null for application image
692 // spaces.
693 if (table != nullptr) {
Mathieu Chartier6e6078a2016-10-24 15:45:41 -0700694 table->ProcessCards();
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700695 table->VisitObjects(&VisitorType::Callback, &visitor);
696 // Don't clear cards here since we need to rescan in the pause. If we cleared the cards here,
697 // there would be races with the mutator marking new cards.
698 } else {
699 // Keep cards aged if we don't have a mod-union table since we may need to scan them in future
700 // GCs. This case is for app images.
701 card_table->ModifyCardsAtomic(
702 space->Begin(),
703 space->End(),
704 [](uint8_t card) {
705 return (card != gc::accounting::CardTable::kCardClean)
706 ? gc::accounting::CardTable::kCardAged
707 : card;
708 },
709 /* card modified visitor */ VoidFunctor());
710 card_table->Scan</* kClearCard */ false>(space->GetMarkBitmap(),
711 space->Begin(),
712 space->End(),
713 visitor,
714 gc::accounting::CardTable::kCardAged);
715 }
716 }
717}
718
719void ConcurrentCopying::GrayAllNewlyDirtyImmuneObjects() {
720 TimingLogger::ScopedTiming split("(Paused)GrayAllNewlyDirtyImmuneObjects", GetTimings());
721 accounting::CardTable* const card_table = heap_->GetCardTable();
722 using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent */ false>;
723 Thread* const self = Thread::Current();
724 VisitorType visitor(self);
725 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
726 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
727 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
728 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
729
730 // Don't need to scan aged cards since we did these before the pause. Note that scanning cards
731 // also handles the mod-union table cards.
732 card_table->Scan</* kClearCard */ false>(space->GetMarkBitmap(),
733 space->Begin(),
734 space->End(),
735 visitor,
736 gc::accounting::CardTable::kCardDirty);
737 if (table != nullptr) {
738 // Add the cards to the mod-union table so that we can clear cards to save RAM.
739 table->ProcessCards();
Mathieu Chartier6e6078a2016-10-24 15:45:41 -0700740 TimingLogger::ScopedTiming split2("(Paused)ClearCards", GetTimings());
741 card_table->ClearCardRange(space->Begin(),
742 AlignDown(space->End(), accounting::CardTable::kCardSize));
Mathieu Chartier21328a12016-07-22 10:47:45 -0700743 }
744 }
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700745 // Since all of the objects that may point to other spaces are gray, we can avoid all the read
Mathieu Chartier21328a12016-07-22 10:47:45 -0700746 // barriers in the immune spaces.
747 updated_all_immune_objects_.StoreRelaxed(true);
748}
749
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700750void ConcurrentCopying::SwapStacks() {
751 heap_->SwapStacks();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800752}
753
754void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
755 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
756 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
757}
758
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700759// Used to visit objects in the immune spaces.
760inline void ConcurrentCopying::ScanImmuneObject(mirror::Object* obj) {
761 DCHECK(obj != nullptr);
762 DCHECK(immune_spaces_.ContainsObject(obj));
763 // Update the fields without graying it or pushing it onto the mark stack.
764 Scan(obj);
765}
766
767class ConcurrentCopying::ImmuneSpaceScanObjVisitor {
768 public:
769 explicit ImmuneSpaceScanObjVisitor(ConcurrentCopying* cc)
770 : collector_(cc) {}
771
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700772 ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700773 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700774 // Only need to scan gray objects.
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700775 if (obj->GetReadBarrierState() == ReadBarrier::GrayState()) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700776 collector_->ScanImmuneObject(obj);
777 // Done scanning the object, go back to white.
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700778 bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
779 ReadBarrier::WhiteState());
Mathieu Chartier21328a12016-07-22 10:47:45 -0700780 CHECK(success);
781 }
782 } else {
783 collector_->ScanImmuneObject(obj);
784 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700785 }
786
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700787 static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi5408f232016-07-29 15:07:05 -0700788 reinterpret_cast<ImmuneSpaceScanObjVisitor*>(arg)->operator()(obj);
789 }
790
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700791 private:
792 ConcurrentCopying* const collector_;
793};
794
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800795// Concurrently mark roots that are guarded by read barriers and process the mark stack.
796void ConcurrentCopying::MarkingPhase() {
797 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
798 if (kVerboseMode) {
799 LOG(INFO) << "GC MarkingPhase";
800 }
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700801 Thread* self = Thread::Current();
802 if (kIsDebugBuild) {
803 MutexLock mu(self, *Locks::thread_list_lock_);
804 CHECK(weak_ref_access_enabled_);
805 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700806
807 // Scan immune spaces.
808 // Update all the fields in the immune spaces first without graying the objects so that we
809 // minimize dirty pages in the immune spaces. Note mutators can concurrently access and gray some
810 // of the objects.
811 if (kUseBakerReadBarrier) {
812 gc_grays_immune_objects_ = false;
Hiroshi Yamauchi16292fc2016-06-20 20:23:34 -0700813 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700814 {
815 TimingLogger::ScopedTiming split2("ScanImmuneSpaces", GetTimings());
816 for (auto& space : immune_spaces_.GetSpaces()) {
817 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
818 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
Hiroshi Yamauchi5408f232016-07-29 15:07:05 -0700819 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
Mathieu Chartier21328a12016-07-22 10:47:45 -0700820 ImmuneSpaceScanObjVisitor visitor(this);
Hiroshi Yamauchi5408f232016-07-29 15:07:05 -0700821 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects && table != nullptr) {
822 table->VisitObjects(ImmuneSpaceScanObjVisitor::Callback, &visitor);
823 } else {
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700824 // TODO: Scan only the aged cards.
Hiroshi Yamauchi5408f232016-07-29 15:07:05 -0700825 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
826 reinterpret_cast<uintptr_t>(space->Limit()),
827 visitor);
828 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700829 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700830 }
831 if (kUseBakerReadBarrier) {
832 // This release fence makes the field updates in the above loop visible before allowing mutator
833 // getting access to immune objects without graying it first.
834 updated_all_immune_objects_.StoreRelease(true);
835 // Now whiten immune objects concurrently accessed and grayed by mutators. We can't do this in
836 // the above loop because we would incorrectly disable the read barrier by whitening an object
837 // which may point to an unscanned, white object, breaking the to-space invariant.
838 //
839 // Make sure no mutators are in the middle of marking an immune object before whitening immune
840 // objects.
841 IssueEmptyCheckpoint();
842 MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
843 if (kVerboseMode) {
844 LOG(INFO) << "immune gray stack size=" << immune_gray_stack_.size();
845 }
846 for (mirror::Object* obj : immune_gray_stack_) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700847 DCHECK(obj->GetReadBarrierState() == ReadBarrier::GrayState());
848 bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
849 ReadBarrier::WhiteState());
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700850 DCHECK(success);
851 }
852 immune_gray_stack_.clear();
853 }
854
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800855 {
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700856 TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
857 Runtime::Current()->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800858 }
859 {
860 // TODO: don't visit the transaction roots if it's not active.
861 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700862 Runtime::Current()->VisitNonThreadRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800863 }
864
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800865 {
Mathieu Chartiera6b1ead2015-10-06 10:32:38 -0700866 TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700867 // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The
868 // primary reasons are the fact that we need to use a checkpoint to process thread-local mark
869 // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock
870 // issue because running threads potentially blocking at WaitHoldingLocks, and that once we
871 // reach the point where we process weak references, we can avoid using a lock when accessing
872 // the GC mark stack, which makes mark stack processing more efficient.
873
874 // Process the mark stack once in the thread local stack mode. This marks most of the live
875 // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system
876 // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray
877 // objects and push refs on the mark stack.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800878 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700879 // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks
880 // for the last time before transitioning to the shared mark stack mode, which would process new
881 // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack()
882 // call above. At the same time, disable weak ref accesses using a per-thread flag. It's
883 // important to do these together in a single checkpoint so that we can ensure that mutators
884 // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and
885 // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on
886 // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref
887 // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones.
888 SwitchToSharedMarkStackMode();
889 CHECK(!self->GetWeakRefAccessEnabled());
890 // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here
891 // (which may be non-empty if there were refs found on thread-local mark stacks during the above
892 // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators
893 // (via read barriers) have no way to produce any more refs to process. Marking converges once
894 // before we process weak refs below.
895 ProcessMarkStack();
896 CheckEmptyMarkStack();
897 // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a
898 // lock from this point on.
899 SwitchToGcExclusiveMarkStackMode();
900 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800901 if (kVerboseMode) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800902 LOG(INFO) << "ProcessReferences";
903 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700904 // Process weak references. This may produce new refs to process and have them processed via
Mathieu Chartier97509952015-07-13 14:35:43 -0700905 // ProcessMarkStack (in the GC exclusive mark stack mode).
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700906 ProcessReferences(self);
907 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800908 if (kVerboseMode) {
909 LOG(INFO) << "SweepSystemWeaks";
910 }
911 SweepSystemWeaks(self);
912 if (kVerboseMode) {
913 LOG(INFO) << "SweepSystemWeaks done";
914 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700915 // Process the mark stack here one last time because the above SweepSystemWeaks() call may have
916 // marked some objects (strings alive) as hash_set::Erase() can call the hash function for
917 // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks().
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800918 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700919 CheckEmptyMarkStack();
920 // Re-enable weak ref accesses.
921 ReenableWeakRefAccess(self);
Mathieu Chartier951ec2c2015-09-22 08:50:05 -0700922 // Free data for class loaders that we unloaded.
923 Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700924 // Marking is done. Disable marking.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700925 DisableMarking();
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800926 if (kUseBakerReadBarrier) {
927 ProcessFalseGrayStack();
928 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700929 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800930 }
931
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700932 if (kIsDebugBuild) {
933 MutexLock mu(self, *Locks::thread_list_lock_);
934 CHECK(weak_ref_access_enabled_);
935 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800936 if (kVerboseMode) {
937 LOG(INFO) << "GC end of MarkingPhase";
938 }
939}
940
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700941void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) {
942 if (kVerboseMode) {
943 LOG(INFO) << "ReenableWeakRefAccess";
944 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700945 // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access.
946 {
947 MutexLock mu(self, *Locks::thread_list_lock_);
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700948 weak_ref_access_enabled_ = true; // This is for new threads.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700949 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
950 for (Thread* thread : thread_list) {
951 thread->SetWeakRefAccessEnabled(true);
952 }
953 }
954 // Unblock blocking threads.
955 GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
956 Runtime::Current()->BroadcastForNewSystemWeaks();
957}
958
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700959class ConcurrentCopying::DisableMarkingCheckpoint : public Closure {
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700960 public:
961 explicit DisableMarkingCheckpoint(ConcurrentCopying* concurrent_copying)
962 : concurrent_copying_(concurrent_copying) {
963 }
964
965 void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
966 // Note: self is not necessarily equal to thread since thread may be suspended.
967 Thread* self = Thread::Current();
968 DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
969 << thread->GetState() << " thread " << thread << " self " << self;
970 // Disable the thread-local is_gc_marking flag.
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700971 // Note a thread that has just started right before this checkpoint may have already this flag
972 // set to false, which is ok.
Mathieu Chartierfe814e82016-11-09 14:32:49 -0800973 thread->SetIsGcMarkingAndUpdateEntrypoints(false);
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700974 // If thread is a running mutator, then act on behalf of the garbage collector.
975 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -0700976 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700977 }
978
979 private:
980 ConcurrentCopying* const concurrent_copying_;
981};
982
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700983class ConcurrentCopying::DisableMarkingCallback : public Closure {
984 public:
985 explicit DisableMarkingCallback(ConcurrentCopying* concurrent_copying)
986 : concurrent_copying_(concurrent_copying) {
987 }
988
989 void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
990 // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
991 // to avoid a race with ThreadList::Register().
992 CHECK(concurrent_copying_->is_marking_);
993 concurrent_copying_->is_marking_ = false;
Mathieu Chartiera9a4f5f2017-05-03 18:19:13 -0700994 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
995 CHECK(concurrent_copying_->is_using_read_barrier_entrypoints_);
996 concurrent_copying_->is_using_read_barrier_entrypoints_ = false;
997 } else {
998 CHECK(!concurrent_copying_->is_using_read_barrier_entrypoints_);
999 }
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001000 }
1001
1002 private:
1003 ConcurrentCopying* const concurrent_copying_;
1004};
1005
Hiroshi Yamauchi00370822015-08-18 14:47:25 -07001006void ConcurrentCopying::IssueDisableMarkingCheckpoint() {
1007 Thread* self = Thread::Current();
1008 DisableMarkingCheckpoint check_point(this);
1009 ThreadList* thread_list = Runtime::Current()->GetThreadList();
1010 gc_barrier_->Init(self, 0);
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001011 DisableMarkingCallback dmc(this);
1012 size_t barrier_count = thread_list->RunCheckpoint(&check_point, &dmc);
Hiroshi Yamauchi00370822015-08-18 14:47:25 -07001013 // If there are no threads to wait which implies that all the checkpoint functions are finished,
1014 // then no need to release the mutator lock.
1015 if (barrier_count == 0) {
1016 return;
1017 }
1018 // Release locks then wait for all mutator threads to pass the barrier.
1019 Locks::mutator_lock_->SharedUnlock(self);
1020 {
1021 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1022 gc_barrier_->Increment(self, barrier_count);
1023 }
1024 Locks::mutator_lock_->SharedLock(self);
1025}
1026
1027void ConcurrentCopying::DisableMarking() {
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001028 // Use a checkpoint to turn off the global is_marking and the thread-local is_gc_marking flags and
1029 // to ensure no threads are still in the middle of a read barrier which may have a from-space ref
1030 // cached in a local variable.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -07001031 IssueDisableMarkingCheckpoint();
1032 if (kUseTableLookupReadBarrier) {
1033 heap_->rb_table_->ClearAll();
1034 DCHECK(heap_->rb_table_->IsAllCleared());
1035 }
1036 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1);
1037 mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff);
1038}
1039
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001040void ConcurrentCopying::PushOntoFalseGrayStack(mirror::Object* ref) {
1041 CHECK(kUseBakerReadBarrier);
1042 DCHECK(ref != nullptr);
1043 MutexLock mu(Thread::Current(), mark_stack_lock_);
1044 false_gray_stack_.push_back(ref);
1045}
1046
1047void ConcurrentCopying::ProcessFalseGrayStack() {
1048 CHECK(kUseBakerReadBarrier);
1049 // Change the objects on the false gray stack from gray to white.
1050 MutexLock mu(Thread::Current(), mark_stack_lock_);
1051 for (mirror::Object* obj : false_gray_stack_) {
1052 DCHECK(IsMarked(obj));
1053 // The object could be white here if a thread got preempted after a success at the
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07001054 // AtomicSetReadBarrierState in Mark(), GC started marking through it (but not finished so
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001055 // still gray), and the thread ran to register it onto the false gray stack.
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07001056 if (obj->GetReadBarrierState() == ReadBarrier::GrayState()) {
1057 bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
1058 ReadBarrier::WhiteState());
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001059 DCHECK(success);
1060 }
1061 }
1062 false_gray_stack_.clear();
1063}
1064
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001065void ConcurrentCopying::IssueEmptyCheckpoint() {
1066 Thread* self = Thread::Current();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001067 ThreadList* thread_list = Runtime::Current()->GetThreadList();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001068 // Release locks then wait for all mutator threads to pass the barrier.
1069 Locks::mutator_lock_->SharedUnlock(self);
Hiroshi Yamauchia2224042017-02-08 16:35:45 -08001070 thread_list->RunEmptyCheckpoint();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001071 Locks::mutator_lock_->SharedLock(self);
1072}
1073
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -07001074void ConcurrentCopying::ExpandGcMarkStack() {
1075 DCHECK(gc_mark_stack_->IsFull());
1076 const size_t new_size = gc_mark_stack_->Capacity() * 2;
1077 std::vector<StackReference<mirror::Object>> temp(gc_mark_stack_->Begin(),
1078 gc_mark_stack_->End());
1079 gc_mark_stack_->Resize(new_size);
1080 for (auto& ref : temp) {
1081 gc_mark_stack_->PushBack(ref.AsMirrorPtr());
1082 }
1083 DCHECK(!gc_mark_stack_->IsFull());
1084}
1085
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001086void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001087 CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0)
David Sehr709b0702016-10-13 09:12:37 -07001088 << " " << to_ref << " " << mirror::Object::PrettyTypeOf(to_ref);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001089 Thread* self = Thread::Current(); // TODO: pass self as an argument from call sites?
1090 CHECK(thread_running_gc_ != nullptr);
1091 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001092 if (LIKELY(mark_stack_mode == kMarkStackModeThreadLocal)) {
1093 if (LIKELY(self == thread_running_gc_)) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001094 // If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
1095 CHECK(self->GetThreadLocalMarkStack() == nullptr);
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -07001096 if (UNLIKELY(gc_mark_stack_->IsFull())) {
1097 ExpandGcMarkStack();
1098 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001099 gc_mark_stack_->PushBack(to_ref);
1100 } else {
1101 // Otherwise, use a thread-local mark stack.
1102 accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack();
1103 if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) {
1104 MutexLock mu(self, mark_stack_lock_);
1105 // Get a new thread local mark stack.
1106 accounting::AtomicStack<mirror::Object>* new_tl_mark_stack;
1107 if (!pooled_mark_stacks_.empty()) {
1108 // Use a pooled mark stack.
1109 new_tl_mark_stack = pooled_mark_stacks_.back();
1110 pooled_mark_stacks_.pop_back();
1111 } else {
1112 // None pooled. Create a new one.
1113 new_tl_mark_stack =
1114 accounting::AtomicStack<mirror::Object>::Create(
1115 "thread local mark stack", 4 * KB, 4 * KB);
1116 }
1117 DCHECK(new_tl_mark_stack != nullptr);
1118 DCHECK(new_tl_mark_stack->IsEmpty());
1119 new_tl_mark_stack->PushBack(to_ref);
1120 self->SetThreadLocalMarkStack(new_tl_mark_stack);
1121 if (tl_mark_stack != nullptr) {
1122 // Store the old full stack into a vector.
1123 revoked_mark_stacks_.push_back(tl_mark_stack);
1124 }
1125 } else {
1126 tl_mark_stack->PushBack(to_ref);
1127 }
1128 }
1129 } else if (mark_stack_mode == kMarkStackModeShared) {
1130 // Access the shared GC mark stack with a lock.
1131 MutexLock mu(self, mark_stack_lock_);
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -07001132 if (UNLIKELY(gc_mark_stack_->IsFull())) {
1133 ExpandGcMarkStack();
1134 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001135 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001136 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001137 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
Hiroshi Yamauchifa755182015-09-30 20:12:11 -07001138 static_cast<uint32_t>(kMarkStackModeGcExclusive))
1139 << "ref=" << to_ref
1140 << " self->gc_marking=" << self->GetIsGcMarking()
1141 << " cc->is_marking=" << is_marking_;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001142 CHECK(self == thread_running_gc_)
1143 << "Only GC-running thread should access the mark stack "
1144 << "in the GC exclusive mark stack mode";
1145 // Access the GC mark stack without a lock.
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -07001146 if (UNLIKELY(gc_mark_stack_->IsFull())) {
1147 ExpandGcMarkStack();
1148 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001149 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001150 }
1151}
1152
1153accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() {
1154 return heap_->allocation_stack_.get();
1155}
1156
1157accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
1158 return heap_->live_stack_.get();
1159}
1160
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001161// The following visitors are used to verify that there's no references to the from-space left after
1162// marking.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001163class ConcurrentCopying::VerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001164 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001165 explicit VerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001166 : collector_(collector) {}
1167
Mathieu Chartierbc632f02017-04-20 13:31:39 -07001168 void operator()(mirror::Object* ref,
1169 MemberOffset offset = MemberOffset(0),
1170 mirror::Object* holder = nullptr) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001171 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001172 if (ref == nullptr) {
1173 // OK.
1174 return;
1175 }
Mathieu Chartierbc632f02017-04-20 13:31:39 -07001176 collector_->AssertToSpaceInvariant(holder, offset, ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001177 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07001178 CHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::WhiteState())
David Sehr709b0702016-10-13 09:12:37 -07001179 << "Ref " << ref << " " << ref->PrettyTypeOf()
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07001180 << " has non-white rb_state ";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001181 }
1182 }
1183
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001184 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001185 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001186 DCHECK(root != nullptr);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001187 operator()(root);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001188 }
1189
1190 private:
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001191 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001192};
1193
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001194class ConcurrentCopying::VerifyNoFromSpaceRefsFieldVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001195 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001196 explicit VerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001197 : collector_(collector) {}
1198
Mathieu Chartier31e88222016-10-14 18:43:19 -07001199 void operator()(ObjPtr<mirror::Object> obj,
1200 MemberOffset offset,
1201 bool is_static ATTRIBUTE_UNUSED) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001202 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001203 mirror::Object* ref =
1204 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001205 VerifyNoFromSpaceRefsVisitor visitor(collector_);
Mathieu Chartierbc632f02017-04-20 13:31:39 -07001206 visitor(ref, offset, obj.Ptr());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001207 }
Mathieu Chartier31e88222016-10-14 18:43:19 -07001208 void operator()(ObjPtr<mirror::Class> klass,
1209 ObjPtr<mirror::Reference> ref) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001210 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001211 CHECK(klass->IsTypeOfReferenceClass());
1212 this->operator()(ref, mirror::Reference::ReferentOffset(), false);
1213 }
1214
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001215 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001216 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001217 if (!root->IsNull()) {
1218 VisitRoot(root);
1219 }
1220 }
1221
1222 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001223 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001224 VerifyNoFromSpaceRefsVisitor visitor(collector_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001225 visitor(root->AsMirrorPtr());
1226 }
1227
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001228 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001229 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001230};
1231
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001232class ConcurrentCopying::VerifyNoFromSpaceRefsObjectVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001233 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001234 explicit VerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001235 : collector_(collector) {}
1236 void operator()(mirror::Object* obj) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001237 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001238 ObjectCallback(obj, collector_);
1239 }
1240 static void ObjectCallback(mirror::Object* obj, void *arg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001241 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001242 CHECK(obj != nullptr);
1243 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
1244 space::RegionSpace* region_space = collector->RegionSpace();
1245 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001246 VerifyNoFromSpaceRefsFieldVisitor visitor(collector);
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07001247 obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
1248 visitor,
1249 visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001250 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07001251 CHECK_EQ(obj->GetReadBarrierState(), ReadBarrier::WhiteState())
1252 << "obj=" << obj << " non-white rb_state " << obj->GetReadBarrierState();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001253 }
1254 }
1255
1256 private:
1257 ConcurrentCopying* const collector_;
1258};
1259
1260// Verify there's no from-space references left after the marking phase.
1261void ConcurrentCopying::VerifyNoFromSpaceReferences() {
1262 Thread* self = Thread::Current();
1263 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
Hiroshi Yamauchi00370822015-08-18 14:47:25 -07001264 // Verify all threads have is_gc_marking to be false
1265 {
1266 MutexLock mu(self, *Locks::thread_list_lock_);
1267 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1268 for (Thread* thread : thread_list) {
1269 CHECK(!thread->GetIsGcMarking());
1270 }
1271 }
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001272 VerifyNoFromSpaceRefsObjectVisitor visitor(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001273 // Roots.
1274 {
1275 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001276 VerifyNoFromSpaceRefsVisitor ref_visitor(this);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001277 Runtime::Current()->VisitRoots(&ref_visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001278 }
1279 // The to-space.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001280 region_space_->WalkToSpace(VerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001281 // Non-moving spaces.
1282 {
1283 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1284 heap_->GetMarkBitmap()->Visit(visitor);
1285 }
1286 // The alloc stack.
1287 {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001288 VerifyNoFromSpaceRefsVisitor ref_visitor(this);
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001289 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
1290 it < end; ++it) {
1291 mirror::Object* const obj = it->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001292 if (obj != nullptr && obj->GetClass() != nullptr) {
1293 // TODO: need to call this only if obj is alive?
1294 ref_visitor(obj);
1295 visitor(obj);
1296 }
1297 }
1298 }
1299 // TODO: LOS. But only refs in LOS are classes.
1300}
1301
1302// The following visitors are used to assert the to-space invariant.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001303class ConcurrentCopying::AssertToSpaceInvariantRefsVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001304 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001305 explicit AssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001306 : collector_(collector) {}
1307
1308 void operator()(mirror::Object* ref) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001309 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001310 if (ref == nullptr) {
1311 // OK.
1312 return;
1313 }
1314 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
1315 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001316
1317 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001318 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001319};
1320
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001321class ConcurrentCopying::AssertToSpaceInvariantFieldVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001322 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001323 explicit AssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001324 : collector_(collector) {}
1325
Mathieu Chartier31e88222016-10-14 18:43:19 -07001326 void operator()(ObjPtr<mirror::Object> obj,
1327 MemberOffset offset,
1328 bool is_static ATTRIBUTE_UNUSED) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001329 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001330 mirror::Object* ref =
1331 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001332 AssertToSpaceInvariantRefsVisitor visitor(collector_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001333 visitor(ref);
1334 }
Mathieu Chartier31e88222016-10-14 18:43:19 -07001335 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001336 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001337 CHECK(klass->IsTypeOfReferenceClass());
1338 }
1339
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001340 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001341 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001342 if (!root->IsNull()) {
1343 VisitRoot(root);
1344 }
1345 }
1346
1347 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001348 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001349 AssertToSpaceInvariantRefsVisitor visitor(collector_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001350 visitor(root->AsMirrorPtr());
1351 }
1352
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001353 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001354 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001355};
1356
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001357class ConcurrentCopying::AssertToSpaceInvariantObjectVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001358 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001359 explicit AssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001360 : collector_(collector) {}
1361 void operator()(mirror::Object* obj) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001362 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001363 ObjectCallback(obj, collector_);
1364 }
1365 static void ObjectCallback(mirror::Object* obj, void *arg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001366 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001367 CHECK(obj != nullptr);
1368 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
1369 space::RegionSpace* region_space = collector->RegionSpace();
1370 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
1371 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001372 AssertToSpaceInvariantFieldVisitor visitor(collector);
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07001373 obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
1374 visitor,
1375 visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001376 }
1377
1378 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001379 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001380};
1381
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001382class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001383 public:
Roland Levillain3887c462015-08-12 18:15:42 +01001384 RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
1385 bool disable_weak_ref_access)
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001386 : concurrent_copying_(concurrent_copying),
1387 disable_weak_ref_access_(disable_weak_ref_access) {
1388 }
1389
1390 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1391 // Note: self is not necessarily equal to thread since thread may be suspended.
1392 Thread* self = Thread::Current();
1393 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1394 << thread->GetState() << " thread " << thread << " self " << self;
1395 // Revoke thread local mark stacks.
1396 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
1397 if (tl_mark_stack != nullptr) {
1398 MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
1399 concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
1400 thread->SetThreadLocalMarkStack(nullptr);
1401 }
1402 // Disable weak ref access.
1403 if (disable_weak_ref_access_) {
1404 thread->SetWeakRefAccessEnabled(false);
1405 }
1406 // If thread is a running mutator, then act on behalf of the garbage collector.
1407 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -07001408 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001409 }
1410
1411 private:
1412 ConcurrentCopying* const concurrent_copying_;
1413 const bool disable_weak_ref_access_;
1414};
1415
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001416void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access,
1417 Closure* checkpoint_callback) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001418 Thread* self = Thread::Current();
1419 RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access);
1420 ThreadList* thread_list = Runtime::Current()->GetThreadList();
1421 gc_barrier_->Init(self, 0);
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001422 size_t barrier_count = thread_list->RunCheckpoint(&check_point, checkpoint_callback);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001423 // If there are no threads to wait which implys that all the checkpoint functions are finished,
1424 // then no need to release the mutator lock.
1425 if (barrier_count == 0) {
1426 return;
1427 }
1428 Locks::mutator_lock_->SharedUnlock(self);
1429 {
1430 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1431 gc_barrier_->Increment(self, barrier_count);
1432 }
1433 Locks::mutator_lock_->SharedLock(self);
1434}
1435
1436void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) {
1437 Thread* self = Thread::Current();
1438 CHECK_EQ(self, thread);
1439 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
1440 if (tl_mark_stack != nullptr) {
1441 CHECK(is_marking_);
1442 MutexLock mu(self, mark_stack_lock_);
1443 revoked_mark_stacks_.push_back(tl_mark_stack);
1444 thread->SetThreadLocalMarkStack(nullptr);
1445 }
1446}
1447
1448void ConcurrentCopying::ProcessMarkStack() {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001449 if (kVerboseMode) {
1450 LOG(INFO) << "ProcessMarkStack. ";
1451 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001452 bool empty_prev = false;
1453 while (true) {
1454 bool empty = ProcessMarkStackOnce();
1455 if (empty_prev && empty) {
1456 // Saw empty mark stack for a second time, done.
1457 break;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001458 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001459 empty_prev = empty;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001460 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001461}
1462
1463bool ConcurrentCopying::ProcessMarkStackOnce() {
1464 Thread* self = Thread::Current();
1465 CHECK(thread_running_gc_ != nullptr);
1466 CHECK(self == thread_running_gc_);
1467 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1468 size_t count = 0;
1469 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1470 if (mark_stack_mode == kMarkStackModeThreadLocal) {
1471 // Process the thread-local mark stacks and the GC mark stack.
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001472 count += ProcessThreadLocalMarkStacks(false, nullptr);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001473 while (!gc_mark_stack_->IsEmpty()) {
1474 mirror::Object* to_ref = gc_mark_stack_->PopBack();
1475 ProcessMarkStackRef(to_ref);
1476 ++count;
1477 }
1478 gc_mark_stack_->Reset();
1479 } else if (mark_stack_mode == kMarkStackModeShared) {
Hiroshi Yamauchi30493242016-11-03 13:06:52 -07001480 // Do an empty checkpoint to avoid a race with a mutator preempted in the middle of a read
1481 // barrier but before pushing onto the mark stack. b/32508093. Note the weak ref access is
1482 // disabled at this point.
1483 IssueEmptyCheckpoint();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001484 // Process the shared GC mark stack with a lock.
1485 {
1486 MutexLock mu(self, mark_stack_lock_);
1487 CHECK(revoked_mark_stacks_.empty());
1488 }
1489 while (true) {
1490 std::vector<mirror::Object*> refs;
1491 {
1492 // Copy refs with lock. Note the number of refs should be small.
1493 MutexLock mu(self, mark_stack_lock_);
1494 if (gc_mark_stack_->IsEmpty()) {
1495 break;
1496 }
1497 for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin();
1498 p != gc_mark_stack_->End(); ++p) {
1499 refs.push_back(p->AsMirrorPtr());
1500 }
1501 gc_mark_stack_->Reset();
1502 }
1503 for (mirror::Object* ref : refs) {
1504 ProcessMarkStackRef(ref);
1505 ++count;
1506 }
1507 }
1508 } else {
1509 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
1510 static_cast<uint32_t>(kMarkStackModeGcExclusive));
1511 {
1512 MutexLock mu(self, mark_stack_lock_);
1513 CHECK(revoked_mark_stacks_.empty());
1514 }
1515 // Process the GC mark stack in the exclusive mode. No need to take the lock.
1516 while (!gc_mark_stack_->IsEmpty()) {
1517 mirror::Object* to_ref = gc_mark_stack_->PopBack();
1518 ProcessMarkStackRef(to_ref);
1519 ++count;
1520 }
1521 gc_mark_stack_->Reset();
1522 }
1523
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001524 // Return true if the stack was empty.
1525 return count == 0;
1526}
1527
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001528size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access,
1529 Closure* checkpoint_callback) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001530 // Run a checkpoint to collect all thread local mark stacks and iterate over them all.
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001531 RevokeThreadLocalMarkStacks(disable_weak_ref_access, checkpoint_callback);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001532 size_t count = 0;
1533 std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks;
1534 {
1535 MutexLock mu(Thread::Current(), mark_stack_lock_);
1536 // Make a copy of the mark stack vector.
1537 mark_stacks = revoked_mark_stacks_;
1538 revoked_mark_stacks_.clear();
1539 }
1540 for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) {
1541 for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) {
1542 mirror::Object* to_ref = p->AsMirrorPtr();
1543 ProcessMarkStackRef(to_ref);
1544 ++count;
1545 }
1546 {
1547 MutexLock mu(Thread::Current(), mark_stack_lock_);
1548 if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) {
1549 // The pool has enough. Delete it.
1550 delete mark_stack;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001551 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001552 // Otherwise, put it into the pool for later reuse.
1553 mark_stack->Reset();
1554 pooled_mark_stacks_.push_back(mark_stack);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001555 }
1556 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001557 }
1558 return count;
1559}
1560
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001561inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001562 DCHECK(!region_space_->IsInFromSpace(to_ref));
1563 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07001564 DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState())
1565 << " " << to_ref << " " << to_ref->GetReadBarrierState()
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001566 << " is_marked=" << IsMarked(to_ref);
1567 }
Mathieu Chartierc381c362016-08-23 13:27:53 -07001568 bool add_to_live_bytes = false;
1569 if (region_space_->IsInUnevacFromSpace(to_ref)) {
1570 // Mark the bitmap only in the GC thread here so that we don't need a CAS.
1571 if (!kUseBakerReadBarrier || !region_space_bitmap_->Set(to_ref)) {
1572 // It may be already marked if we accidentally pushed the same object twice due to the racy
1573 // bitmap read in MarkUnevacFromSpaceRegion.
1574 Scan(to_ref);
1575 // Only add to the live bytes if the object was not already marked.
1576 add_to_live_bytes = true;
1577 }
1578 } else {
1579 Scan(to_ref);
1580 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001581 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07001582 DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState())
1583 << " " << to_ref << " " << to_ref->GetReadBarrierState()
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001584 << " is_marked=" << IsMarked(to_ref);
1585 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001586#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
Hiroshi Yamauchi39c12d42016-12-06 16:46:37 -08001587 mirror::Object* referent = nullptr;
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001588 if (UNLIKELY((to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
Hiroshi Yamauchi39c12d42016-12-06 16:46:37 -08001589 (referent = to_ref->AsReference()->GetReferent<kWithoutReadBarrier>()) != nullptr &&
1590 !IsInToSpace(referent)))) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001591 // Leave this reference gray in the queue so that GetReferent() will trigger a read barrier. We
1592 // will change it to white later in ReferenceQueue::DequeuePendingReference().
Richard Uhlere3627402016-02-02 13:36:55 -08001593 DCHECK(to_ref->AsReference()->GetPendingNext() != nullptr) << "Left unenqueued ref gray " << to_ref;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001594 } else {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001595 // We may occasionally leave a reference white in the queue if its referent happens to be
1596 // concurrently marked after the Scan() call above has enqueued the Reference, in which case the
1597 // above IsInToSpace() evaluates to true and we change the color from gray to white here in this
1598 // else block.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001599 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07001600 bool success = to_ref->AtomicSetReadBarrierState</*kCasRelease*/true>(
1601 ReadBarrier::GrayState(),
1602 ReadBarrier::WhiteState());
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001603 DCHECK(success) << "Must succeed as we won the race.";
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001604 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001605 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001606#else
1607 DCHECK(!kUseBakerReadBarrier);
1608#endif
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001609
Mathieu Chartierc381c362016-08-23 13:27:53 -07001610 if (add_to_live_bytes) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001611 // Add to the live bytes per unevacuated from space. Note this code is always run by the
1612 // GC-running thread (no synchronization required).
1613 DCHECK(region_space_bitmap_->Test(to_ref));
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07001614 size_t obj_size = to_ref->SizeOf<kDefaultVerifyFlags>();
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001615 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1616 region_space_->AddLiveBytes(to_ref, alloc_size);
1617 }
Andreas Gampee3ce7872017-02-22 13:36:21 -08001618 if (ReadBarrier::kEnableToSpaceInvariantChecks) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001619 AssertToSpaceInvariantObjectVisitor visitor(this);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001620 visitor(to_ref);
1621 }
1622}
1623
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001624class ConcurrentCopying::DisableWeakRefAccessCallback : public Closure {
1625 public:
1626 explicit DisableWeakRefAccessCallback(ConcurrentCopying* concurrent_copying)
1627 : concurrent_copying_(concurrent_copying) {
1628 }
1629
1630 void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
1631 // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
1632 // to avoid a deadlock b/31500969.
1633 CHECK(concurrent_copying_->weak_ref_access_enabled_);
1634 concurrent_copying_->weak_ref_access_enabled_ = false;
1635 }
1636
1637 private:
1638 ConcurrentCopying* const concurrent_copying_;
1639};
1640
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001641void ConcurrentCopying::SwitchToSharedMarkStackMode() {
1642 Thread* self = Thread::Current();
1643 CHECK(thread_running_gc_ != nullptr);
1644 CHECK_EQ(self, thread_running_gc_);
1645 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1646 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1647 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1648 static_cast<uint32_t>(kMarkStackModeThreadLocal));
1649 mark_stack_mode_.StoreRelaxed(kMarkStackModeShared);
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001650 DisableWeakRefAccessCallback dwrac(this);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001651 // Process the thread local mark stacks one last time after switching to the shared mark stack
1652 // mode and disable weak ref accesses.
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001653 ProcessThreadLocalMarkStacks(true, &dwrac);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001654 if (kVerboseMode) {
1655 LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
1656 }
1657}
1658
1659void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() {
1660 Thread* self = Thread::Current();
1661 CHECK(thread_running_gc_ != nullptr);
1662 CHECK_EQ(self, thread_running_gc_);
1663 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1664 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1665 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1666 static_cast<uint32_t>(kMarkStackModeShared));
1667 mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive);
1668 QuasiAtomic::ThreadFenceForConstructor();
1669 if (kVerboseMode) {
1670 LOG(INFO) << "Switched to GC exclusive mark stack mode";
1671 }
1672}
1673
1674void ConcurrentCopying::CheckEmptyMarkStack() {
1675 Thread* self = Thread::Current();
1676 CHECK(thread_running_gc_ != nullptr);
1677 CHECK_EQ(self, thread_running_gc_);
1678 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1679 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1680 if (mark_stack_mode == kMarkStackModeThreadLocal) {
1681 // Thread-local mark stack mode.
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001682 RevokeThreadLocalMarkStacks(false, nullptr);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001683 MutexLock mu(Thread::Current(), mark_stack_lock_);
1684 if (!revoked_mark_stacks_.empty()) {
1685 for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) {
1686 while (!mark_stack->IsEmpty()) {
1687 mirror::Object* obj = mark_stack->PopBack();
1688 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07001689 uint32_t rb_state = obj->GetReadBarrierState();
1690 LOG(INFO) << "On mark queue : " << obj << " " << obj->PrettyTypeOf() << " rb_state="
1691 << rb_state << " is_marked=" << IsMarked(obj);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001692 } else {
David Sehr709b0702016-10-13 09:12:37 -07001693 LOG(INFO) << "On mark queue : " << obj << " " << obj->PrettyTypeOf()
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001694 << " is_marked=" << IsMarked(obj);
1695 }
1696 }
1697 }
1698 LOG(FATAL) << "mark stack is not empty";
1699 }
1700 } else {
1701 // Shared, GC-exclusive, or off.
1702 MutexLock mu(Thread::Current(), mark_stack_lock_);
1703 CHECK(gc_mark_stack_->IsEmpty());
1704 CHECK(revoked_mark_stacks_.empty());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001705 }
1706}
1707
1708void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
1709 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
1710 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier97509952015-07-13 14:35:43 -07001711 Runtime::Current()->SweepSystemWeaks(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001712}
1713
1714void ConcurrentCopying::Sweep(bool swap_bitmaps) {
1715 {
1716 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
1717 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1718 if (kEnableFromSpaceAccountingCheck) {
1719 CHECK_GE(live_stack_freeze_size_, live_stack->Size());
1720 }
1721 heap_->MarkAllocStackAsLive(live_stack);
1722 live_stack->Reset();
1723 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001724 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001725 TimingLogger::ScopedTiming split("Sweep", GetTimings());
1726 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1727 if (space->IsContinuousMemMapAllocSpace()) {
1728 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001729 if (space == region_space_ || immune_spaces_.ContainsSpace(space)) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001730 continue;
1731 }
1732 TimingLogger::ScopedTiming split2(
1733 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
1734 RecordFree(alloc_space->Sweep(swap_bitmaps));
1735 }
1736 }
1737 SweepLargeObjects(swap_bitmaps);
1738}
1739
Mathieu Chartier962cd7a2016-08-16 12:15:59 -07001740void ConcurrentCopying::MarkZygoteLargeObjects() {
1741 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
1742 Thread* const self = Thread::Current();
1743 WriterMutexLock rmu(self, *Locks::heap_bitmap_lock_);
1744 space::LargeObjectSpace* const los = heap_->GetLargeObjectsSpace();
1745 // Pick the current live bitmap (mark bitmap if swapped).
1746 accounting::LargeObjectBitmap* const live_bitmap = los->GetLiveBitmap();
1747 accounting::LargeObjectBitmap* const mark_bitmap = los->GetMarkBitmap();
1748 // Walk through all of the objects and explicitly mark the zygote ones so they don't get swept.
Mathieu Chartier208aaf02016-10-25 10:45:08 -07001749 std::pair<uint8_t*, uint8_t*> range = los->GetBeginEndAtomic();
1750 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(range.first),
1751 reinterpret_cast<uintptr_t>(range.second),
Mathieu Chartier962cd7a2016-08-16 12:15:59 -07001752 [mark_bitmap, los, self](mirror::Object* obj)
1753 REQUIRES(Locks::heap_bitmap_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001754 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier962cd7a2016-08-16 12:15:59 -07001755 if (los->IsZygoteLargeObject(self, obj)) {
1756 mark_bitmap->Set(obj);
1757 }
1758 });
1759}
1760
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001761void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) {
1762 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
1763 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
1764}
1765
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001766void ConcurrentCopying::ReclaimPhase() {
1767 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
1768 if (kVerboseMode) {
1769 LOG(INFO) << "GC ReclaimPhase";
1770 }
1771 Thread* self = Thread::Current();
1772
1773 {
1774 // Double-check that the mark stack is empty.
1775 // Note: need to set this after VerifyNoFromSpaceRef().
1776 is_asserting_to_space_invariant_ = false;
1777 QuasiAtomic::ThreadFenceForConstructor();
1778 if (kVerboseMode) {
1779 LOG(INFO) << "Issue an empty check point. ";
1780 }
1781 IssueEmptyCheckpoint();
1782 // Disable the check.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001783 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001784 if (kUseBakerReadBarrier) {
1785 updated_all_immune_objects_.StoreSequentiallyConsistent(false);
1786 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001787 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001788 }
1789
1790 {
1791 // Record freed objects.
1792 TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
1793 // Don't include thread-locals that are in the to-space.
Mathieu Chartier371b0472017-02-27 16:37:21 -08001794 const uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
1795 const uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
1796 const uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
1797 const uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001798 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
Mathieu Chartiercca44a02016-08-17 10:07:29 -07001799 cumulative_bytes_moved_.FetchAndAddRelaxed(to_bytes);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001800 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
Mathieu Chartiercca44a02016-08-17 10:07:29 -07001801 cumulative_objects_moved_.FetchAndAddRelaxed(to_objects);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001802 if (kEnableFromSpaceAccountingCheck) {
1803 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
1804 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
1805 }
1806 CHECK_LE(to_objects, from_objects);
1807 CHECK_LE(to_bytes, from_bytes);
Mathieu Chartier371b0472017-02-27 16:37:21 -08001808 // cleared_bytes and cleared_objects may be greater than the from space equivalents since
1809 // ClearFromSpace may clear empty unevac regions.
1810 uint64_t cleared_bytes;
1811 uint64_t cleared_objects;
1812 {
1813 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
1814 region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects);
1815 CHECK_GE(cleared_bytes, from_bytes);
1816 CHECK_GE(cleared_objects, from_objects);
1817 }
1818 int64_t freed_bytes = cleared_bytes - to_bytes;
1819 int64_t freed_objects = cleared_objects - to_objects;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001820 if (kVerboseMode) {
1821 LOG(INFO) << "RecordFree:"
1822 << " from_bytes=" << from_bytes << " from_objects=" << from_objects
1823 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects
1824 << " to_bytes=" << to_bytes << " to_objects=" << to_objects
1825 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects
1826 << " from_space size=" << region_space_->FromSpaceSize()
1827 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
1828 << " to_space size=" << region_space_->ToSpaceSize();
1829 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1830 }
1831 RecordFree(ObjectBytePair(freed_objects, freed_bytes));
1832 if (kVerboseMode) {
1833 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1834 }
1835 }
1836
1837 {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001838 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001839 Sweep(false);
1840 SwapBitmaps();
1841 heap_->UnBindBitmaps();
1842
Mathieu Chartier7ec38dc2016-10-07 15:24:46 -07001843 // The bitmap was cleared at the start of the GC, there is nothing we need to do here.
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001844 DCHECK(region_space_bitmap_ != nullptr);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001845 region_space_bitmap_ = nullptr;
1846 }
1847
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001848 CheckEmptyMarkStack();
1849
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001850 if (kVerboseMode) {
1851 LOG(INFO) << "GC end of ReclaimPhase";
1852 }
1853}
1854
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001855// Assert the to-space invariant.
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07001856void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj,
1857 MemberOffset offset,
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001858 mirror::Object* ref) {
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07001859 CHECK_EQ(heap_->collector_type_, kCollectorTypeCC);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001860 if (is_asserting_to_space_invariant_) {
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07001861 using RegionType = space::RegionSpace::RegionType;
1862 space::RegionSpace::RegionType type = region_space_->GetRegionType(ref);
1863 if (type == RegionType::kRegionTypeToSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001864 // OK.
1865 return;
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07001866 } else if (type == RegionType::kRegionTypeUnevacFromSpace) {
Mathieu Chartierc381c362016-08-23 13:27:53 -07001867 CHECK(IsMarkedInUnevacFromSpace(ref)) << ref;
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07001868 } else if (UNLIKELY(type == RegionType::kRegionTypeFromSpace)) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001869 // Not OK. Do extra logging.
1870 if (obj != nullptr) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001871 LogFromSpaceRefHolder(obj, offset);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001872 }
Andreas Gampe3fec9ac2016-09-13 10:47:28 -07001873 ref->GetLockWord(false).Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
David Sehr709b0702016-10-13 09:12:37 -07001874 CHECK(false) << "Found from-space ref " << ref << " " << ref->PrettyTypeOf();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001875 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001876 AssertToSpaceInvariantInNonMovingSpace(obj, ref);
1877 }
1878 }
1879}
1880
1881class RootPrinter {
1882 public:
1883 RootPrinter() { }
1884
1885 template <class MirrorType>
1886 ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001887 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001888 if (!root->IsNull()) {
1889 VisitRoot(root);
1890 }
1891 }
1892
1893 template <class MirrorType>
1894 void VisitRoot(mirror::Object** root)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001895 REQUIRES_SHARED(Locks::mutator_lock_) {
Andreas Gampe3fec9ac2016-09-13 10:47:28 -07001896 LOG(FATAL_WITHOUT_ABORT) << "root=" << root << " ref=" << *root;
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001897 }
1898
1899 template <class MirrorType>
1900 void VisitRoot(mirror::CompressedReference<MirrorType>* root)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001901 REQUIRES_SHARED(Locks::mutator_lock_) {
Andreas Gampe3fec9ac2016-09-13 10:47:28 -07001902 LOG(FATAL_WITHOUT_ABORT) << "root=" << root << " ref=" << root->AsMirrorPtr();
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001903 }
1904};
1905
1906void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source,
1907 mirror::Object* ref) {
1908 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1909 if (is_asserting_to_space_invariant_) {
1910 if (region_space_->IsInToSpace(ref)) {
1911 // OK.
1912 return;
1913 } else if (region_space_->IsInUnevacFromSpace(ref)) {
Mathieu Chartierc381c362016-08-23 13:27:53 -07001914 CHECK(IsMarkedInUnevacFromSpace(ref)) << ref;
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001915 } else if (region_space_->IsInFromSpace(ref)) {
1916 // Not OK. Do extra logging.
1917 if (gc_root_source == nullptr) {
1918 // No info.
1919 } else if (gc_root_source->HasArtField()) {
1920 ArtField* field = gc_root_source->GetArtField();
David Sehr709b0702016-10-13 09:12:37 -07001921 LOG(FATAL_WITHOUT_ABORT) << "gc root in field " << field << " "
1922 << ArtField::PrettyField(field);
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001923 RootPrinter root_printer;
1924 field->VisitRoots(root_printer);
1925 } else if (gc_root_source->HasArtMethod()) {
1926 ArtMethod* method = gc_root_source->GetArtMethod();
David Sehr709b0702016-10-13 09:12:37 -07001927 LOG(FATAL_WITHOUT_ABORT) << "gc root in method " << method << " "
1928 << ArtMethod::PrettyMethod(method);
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001929 RootPrinter root_printer;
Andreas Gampe542451c2016-07-26 09:02:02 -07001930 method->VisitRoots(root_printer, kRuntimePointerSize);
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001931 }
Andreas Gampe3fec9ac2016-09-13 10:47:28 -07001932 ref->GetLockWord(false).Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
1933 region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
1934 PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
1935 MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), true);
David Sehr709b0702016-10-13 09:12:37 -07001936 CHECK(false) << "Found from-space ref " << ref << " " << ref->PrettyTypeOf();
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001937 } else {
1938 AssertToSpaceInvariantInNonMovingSpace(nullptr, ref);
1939 }
1940 }
1941}
1942
1943void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) {
1944 if (kUseBakerReadBarrier) {
David Sehr709b0702016-10-13 09:12:37 -07001945 LOG(INFO) << "holder=" << obj << " " << obj->PrettyTypeOf()
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07001946 << " holder rb_state=" << obj->GetReadBarrierState();
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001947 } else {
David Sehr709b0702016-10-13 09:12:37 -07001948 LOG(INFO) << "holder=" << obj << " " << obj->PrettyTypeOf();
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001949 }
1950 if (region_space_->IsInFromSpace(obj)) {
1951 LOG(INFO) << "holder is in the from-space.";
1952 } else if (region_space_->IsInToSpace(obj)) {
1953 LOG(INFO) << "holder is in the to-space.";
1954 } else if (region_space_->IsInUnevacFromSpace(obj)) {
1955 LOG(INFO) << "holder is in the unevac from-space.";
Mathieu Chartierc381c362016-08-23 13:27:53 -07001956 if (IsMarkedInUnevacFromSpace(obj)) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001957 LOG(INFO) << "holder is marked in the region space bitmap.";
1958 } else {
1959 LOG(INFO) << "holder is not marked in the region space bitmap.";
1960 }
1961 } else {
1962 // In a non-moving space.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001963 if (immune_spaces_.ContainsObject(obj)) {
1964 LOG(INFO) << "holder is in an immune image or the zygote space.";
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001965 } else {
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001966 LOG(INFO) << "holder is in a non-immune, non-moving (or main) space.";
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001967 accounting::ContinuousSpaceBitmap* mark_bitmap =
1968 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
1969 accounting::LargeObjectBitmap* los_bitmap =
1970 heap_mark_bitmap_->GetLargeObjectBitmap(obj);
1971 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1972 bool is_los = mark_bitmap == nullptr;
1973 if (!is_los && mark_bitmap->Test(obj)) {
1974 LOG(INFO) << "holder is marked in the mark bit map.";
1975 } else if (is_los && los_bitmap->Test(obj)) {
1976 LOG(INFO) << "holder is marked in the los bit map.";
1977 } else {
1978 // If ref is on the allocation stack, then it is considered
1979 // mark/alive (but not necessarily on the live stack.)
1980 if (IsOnAllocStack(obj)) {
1981 LOG(INFO) << "holder is on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001982 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001983 LOG(INFO) << "holder is not marked or on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001984 }
1985 }
1986 }
1987 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001988 LOG(INFO) << "offset=" << offset.SizeValue();
1989}
1990
1991void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj,
1992 mirror::Object* ref) {
1993 // In a non-moving spaces. Check that the ref is marked.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001994 if (immune_spaces_.ContainsObject(ref)) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001995 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001996 // Immune object may not be gray if called from the GC.
1997 if (Thread::Current() == thread_running_gc_ && !gc_grays_immune_objects_) {
1998 return;
1999 }
2000 bool updated_all_immune_objects = updated_all_immune_objects_.LoadSequentiallyConsistent();
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07002001 CHECK(updated_all_immune_objects || ref->GetReadBarrierState() == ReadBarrier::GrayState())
2002 << "Unmarked immune space ref. obj=" << obj << " rb_state="
2003 << (obj != nullptr ? obj->GetReadBarrierState() : 0U)
2004 << " ref=" << ref << " ref rb_state=" << ref->GetReadBarrierState()
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002005 << " updated_all_immune_objects=" << updated_all_immune_objects;
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07002006 }
2007 } else {
2008 accounting::ContinuousSpaceBitmap* mark_bitmap =
2009 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
2010 accounting::LargeObjectBitmap* los_bitmap =
2011 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
2012 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
2013 bool is_los = mark_bitmap == nullptr;
2014 if ((!is_los && mark_bitmap->Test(ref)) ||
2015 (is_los && los_bitmap->Test(ref))) {
2016 // OK.
2017 } else {
2018 // If ref is on the allocation stack, then it may not be
2019 // marked live, but considered marked/alive (but not
2020 // necessarily on the live stack).
2021 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. "
2022 << "obj=" << obj << " ref=" << ref;
2023 }
2024 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002025}
2026
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002027// Used to scan ref fields of an object.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07002028class ConcurrentCopying::RefFieldsVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002029 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07002030 explicit RefFieldsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002031 : collector_(collector) {}
2032
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07002033 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07002034 const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
2035 REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07002036 collector_->Process(obj, offset);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002037 }
2038
Mathieu Chartier31e88222016-10-14 18:43:19 -07002039 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07002040 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002041 CHECK(klass->IsTypeOfReferenceClass());
2042 collector_->DelayReferenceReferent(klass, ref);
2043 }
2044
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002045 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002046 ALWAYS_INLINE
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07002047 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002048 if (!root->IsNull()) {
2049 VisitRoot(root);
2050 }
2051 }
2052
2053 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002054 ALWAYS_INLINE
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07002055 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002056 collector_->MarkRoot</*kGrayImmuneObject*/false>(root);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002057 }
2058
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002059 private:
2060 ConcurrentCopying* const collector_;
2061};
2062
2063// Scan ref fields of an object.
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002064inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
Hiroshi Yamauchi9b60d502017-02-03 15:09:26 -08002065 if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07002066 // Avoid all read barriers during visit references to help performance.
Hiroshi Yamauchi9b60d502017-02-03 15:09:26 -08002067 // Don't do this in transaction mode because we may read the old value of an field which may
2068 // trigger read barriers.
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07002069 Thread::Current()->ModifyDebugDisallowReadBarrier(1);
2070 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002071 DCHECK(!region_space_->IsInFromSpace(to_ref));
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002072 DCHECK_EQ(Thread::Current(), thread_running_gc_);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07002073 RefFieldsVisitor visitor(this);
Hiroshi Yamauchi5496f692016-02-17 13:29:59 -08002074 // Disable the read barrier for a performance reason.
2075 to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
2076 visitor, visitor);
Hiroshi Yamauchi9b60d502017-02-03 15:09:26 -08002077 if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07002078 Thread::Current()->ModifyDebugDisallowReadBarrier(-1);
2079 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002080}
2081
2082// Process a field.
2083inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002084 DCHECK_EQ(Thread::Current(), thread_running_gc_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002085 mirror::Object* ref = obj->GetFieldObject<
2086 mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
Mathieu Chartierc381c362016-08-23 13:27:53 -07002087 mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false, /*kFromGCThread*/true>(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002088 if (to_ref == ref) {
2089 return;
2090 }
2091 // This may fail if the mutator writes to the field at the same time. But it's ok.
2092 mirror::Object* expected_ref = ref;
2093 mirror::Object* new_ref = to_ref;
2094 do {
2095 if (expected_ref !=
2096 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) {
2097 // It was updated by the mutator.
2098 break;
2099 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07002100 } while (!obj->CasFieldWeakRelaxedObjectWithoutWriteBarrier<
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002101 false, false, kVerifyNone>(offset, expected_ref, new_ref));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002102}
2103
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002104// Process some roots.
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002105inline void ConcurrentCopying::VisitRoots(
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002106 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
2107 for (size_t i = 0; i < count; ++i) {
2108 mirror::Object** root = roots[i];
2109 mirror::Object* ref = *root;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002110 mirror::Object* to_ref = Mark(ref);
2111 if (to_ref == ref) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -07002112 continue;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002113 }
2114 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
2115 mirror::Object* expected_ref = ref;
2116 mirror::Object* new_ref = to_ref;
2117 do {
2118 if (expected_ref != addr->LoadRelaxed()) {
2119 // It was updated by the mutator.
2120 break;
2121 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07002122 } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002123 }
2124}
2125
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002126template<bool kGrayImmuneObject>
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002127inline void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002128 DCHECK(!root->IsNull());
2129 mirror::Object* const ref = root->AsMirrorPtr();
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002130 mirror::Object* to_ref = Mark<kGrayImmuneObject>(ref);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002131 if (to_ref != ref) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002132 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
2133 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
2134 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002135 // If the cas fails, then it was updated by the mutator.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002136 do {
2137 if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
2138 // It was updated by the mutator.
2139 break;
2140 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07002141 } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002142 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002143}
2144
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002145inline void ConcurrentCopying::VisitRoots(
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002146 mirror::CompressedReference<mirror::Object>** roots, size_t count,
2147 const RootInfo& info ATTRIBUTE_UNUSED) {
2148 for (size_t i = 0; i < count; ++i) {
2149 mirror::CompressedReference<mirror::Object>* const root = roots[i];
2150 if (!root->IsNull()) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002151 // kGrayImmuneObject is true because this is used for the thread flip.
2152 MarkRoot</*kGrayImmuneObject*/true>(root);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002153 }
2154 }
2155}
2156
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002157// Temporary set gc_grays_immune_objects_ to true in a scope if the current thread is GC.
2158class ConcurrentCopying::ScopedGcGraysImmuneObjects {
2159 public:
2160 explicit ScopedGcGraysImmuneObjects(ConcurrentCopying* collector)
2161 : collector_(collector), enabled_(false) {
2162 if (kUseBakerReadBarrier &&
2163 collector_->thread_running_gc_ == Thread::Current() &&
2164 !collector_->gc_grays_immune_objects_) {
2165 collector_->gc_grays_immune_objects_ = true;
2166 enabled_ = true;
2167 }
2168 }
2169
2170 ~ScopedGcGraysImmuneObjects() {
2171 if (kUseBakerReadBarrier &&
2172 collector_->thread_running_gc_ == Thread::Current() &&
2173 enabled_) {
2174 DCHECK(collector_->gc_grays_immune_objects_);
2175 collector_->gc_grays_immune_objects_ = false;
2176 }
2177 }
2178
2179 private:
2180 ConcurrentCopying* const collector_;
2181 bool enabled_;
2182};
2183
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002184// Fill the given memory block with a dummy object. Used to fill in a
2185// copy of objects that was lost in race.
2186void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002187 // GC doesn't gray immune objects while scanning immune objects. But we need to trigger the read
2188 // barriers here because we need the updated reference to the int array class, etc. Temporary set
2189 // gc_grays_immune_objects_ to true so that we won't cause a DCHECK failure in MarkImmuneSpace().
2190 ScopedGcGraysImmuneObjects scoped_gc_gray_immune_objects(this);
Roland Levillain14d90572015-07-16 10:52:26 +01002191 CHECK_ALIGNED(byte_size, kObjectAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002192 memset(dummy_obj, 0, byte_size);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07002193 // Avoid going through read barrier for since kDisallowReadBarrierDuringScan may be enabled.
2194 // Explicitly mark to make sure to get an object in the to-space.
2195 mirror::Class* int_array_class = down_cast<mirror::Class*>(
2196 Mark(mirror::IntArray::GetArrayClass<kWithoutReadBarrier>()));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002197 CHECK(int_array_class != nullptr);
2198 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07002199 size_t component_size = int_array_class->GetComponentSize<kWithoutReadBarrier>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002200 CHECK_EQ(component_size, sizeof(int32_t));
2201 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
2202 if (data_offset > byte_size) {
2203 // An int array is too big. Use java.lang.Object.
Mathieu Chartier9aef9922017-04-23 13:53:50 -07002204 CHECK(java_lang_Object_ != nullptr);
Mathieu Chartier3ed8ec12017-04-20 19:28:54 -07002205 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object_);
2206 CHECK_EQ(byte_size, (java_lang_Object_->GetObjectSize<kVerifyNone, kWithoutReadBarrier>()));
2207 dummy_obj->SetClass(java_lang_Object_);
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07002208 CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone>()));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002209 } else {
2210 // Use an int array.
2211 dummy_obj->SetClass(int_array_class);
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07002212 CHECK((dummy_obj->IsArrayInstance<kVerifyNone, kWithoutReadBarrier>()));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002213 int32_t length = (byte_size - data_offset) / component_size;
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07002214 mirror::Array* dummy_arr = dummy_obj->AsArray<kVerifyNone, kWithoutReadBarrier>();
2215 dummy_arr->SetLength(length);
2216 CHECK_EQ(dummy_arr->GetLength(), length)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002217 << "byte_size=" << byte_size << " length=" << length
2218 << " component_size=" << component_size << " data_offset=" << data_offset;
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07002219 CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone>()))
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002220 << "byte_size=" << byte_size << " length=" << length
2221 << " component_size=" << component_size << " data_offset=" << data_offset;
2222 }
2223}
2224
2225// Reuse the memory blocks that were copy of objects that were lost in race.
2226mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
2227 // Try to reuse the blocks that were unused due to CAS failures.
Roland Levillain14d90572015-07-16 10:52:26 +01002228 CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002229 Thread* self = Thread::Current();
2230 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07002231 size_t byte_size;
2232 uint8_t* addr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002233 {
Mathieu Chartierd6636d32016-07-28 11:02:38 -07002234 MutexLock mu(self, skipped_blocks_lock_);
2235 auto it = skipped_blocks_map_.lower_bound(alloc_size);
2236 if (it == skipped_blocks_map_.end()) {
2237 // Not found.
2238 return nullptr;
2239 }
2240 byte_size = it->first;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002241 CHECK_GE(byte_size, alloc_size);
2242 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
2243 // If remainder would be too small for a dummy object, retry with a larger request size.
2244 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size);
2245 if (it == skipped_blocks_map_.end()) {
2246 // Not found.
2247 return nullptr;
2248 }
Roland Levillain14d90572015-07-16 10:52:26 +01002249 CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002250 CHECK_GE(it->first - alloc_size, min_object_size)
2251 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
2252 }
Mathieu Chartierd6636d32016-07-28 11:02:38 -07002253 // Found a block.
2254 CHECK(it != skipped_blocks_map_.end());
2255 byte_size = it->first;
2256 addr = it->second;
2257 CHECK_GE(byte_size, alloc_size);
2258 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
2259 CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment);
2260 if (kVerboseMode) {
2261 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
2262 }
2263 skipped_blocks_map_.erase(it);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002264 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002265 memset(addr, 0, byte_size);
2266 if (byte_size > alloc_size) {
2267 // Return the remainder to the map.
Roland Levillain14d90572015-07-16 10:52:26 +01002268 CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002269 CHECK_GE(byte_size - alloc_size, min_object_size);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07002270 // FillWithDummyObject may mark an object, avoid holding skipped_blocks_lock_ to prevent lock
2271 // violation and possible deadlock. The deadlock case is a recursive case:
2272 // FillWithDummyObject -> IntArray::GetArrayClass -> Mark -> Copy -> AllocateInSkippedBlock.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002273 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
2274 byte_size - alloc_size);
2275 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
Mathieu Chartierd6636d32016-07-28 11:02:38 -07002276 {
2277 MutexLock mu(self, skipped_blocks_lock_);
2278 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
2279 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002280 }
2281 return reinterpret_cast<mirror::Object*>(addr);
2282}
2283
Mathieu Chartieref496d92017-04-28 18:58:59 -07002284mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref,
2285 mirror::Object* holder,
2286 MemberOffset offset) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002287 DCHECK(region_space_->IsInFromSpace(from_ref));
Mathieu Chartieref496d92017-04-28 18:58:59 -07002288 // If the class pointer is null, the object is invalid. This could occur for a dangling pointer
2289 // from a previous GC that is either inside or outside the allocated region.
2290 mirror::Class* klass = from_ref->GetClass<kVerifyNone, kWithoutReadBarrier>();
2291 if (UNLIKELY(klass == nullptr)) {
2292 heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal */ true);
2293 }
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07002294 // There must not be a read barrier to avoid nested RB that might violate the to-space invariant.
2295 // Note that from_ref is a from space ref so the SizeOf() call will access the from-space meta
2296 // objects, but it's ok and necessary.
2297 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002298 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
2299 size_t region_space_bytes_allocated = 0U;
2300 size_t non_moving_space_bytes_allocated = 0U;
2301 size_t bytes_allocated = 0U;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07002302 size_t dummy;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002303 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07002304 region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002305 bytes_allocated = region_space_bytes_allocated;
2306 if (to_ref != nullptr) {
2307 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
2308 }
2309 bool fall_back_to_non_moving = false;
2310 if (UNLIKELY(to_ref == nullptr)) {
2311 // Failed to allocate in the region space. Try the skipped blocks.
2312 to_ref = AllocateInSkippedBlock(region_space_alloc_size);
2313 if (to_ref != nullptr) {
2314 // Succeeded to allocate in a skipped block.
2315 if (heap_->use_tlab_) {
2316 // This is necessary for the tlab case as it's not accounted in the space.
2317 region_space_->RecordAlloc(to_ref);
2318 }
2319 bytes_allocated = region_space_alloc_size;
2320 } else {
2321 // Fall back to the non-moving space.
2322 fall_back_to_non_moving = true;
2323 if (kVerboseMode) {
2324 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
2325 << to_space_bytes_skipped_.LoadSequentiallyConsistent()
2326 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent();
2327 }
2328 fall_back_to_non_moving = true;
2329 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07002330 &non_moving_space_bytes_allocated, nullptr, &dummy);
Mathieu Chartierb01335c2017-03-22 13:15:01 -07002331 if (UNLIKELY(to_ref == nullptr)) {
2332 LOG(FATAL_WITHOUT_ABORT) << "Fall-back non-moving space allocation failed for a "
2333 << obj_size << " byte object in region type "
2334 << region_space_->GetRegionType(from_ref);
2335 LOG(FATAL) << "Object address=" << from_ref << " type=" << from_ref->PrettyTypeOf();
2336 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002337 bytes_allocated = non_moving_space_bytes_allocated;
2338 // Mark it in the mark bitmap.
2339 accounting::ContinuousSpaceBitmap* mark_bitmap =
2340 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
2341 CHECK(mark_bitmap != nullptr);
2342 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref));
2343 }
2344 }
2345 DCHECK(to_ref != nullptr);
2346
Mathieu Chartierd818adb2016-09-15 13:12:47 -07002347 // Copy the object excluding the lock word since that is handled in the loop.
Mathieu Chartieref496d92017-04-28 18:58:59 -07002348 to_ref->SetClass(klass);
Mathieu Chartierd818adb2016-09-15 13:12:47 -07002349 const size_t kObjectHeaderSize = sizeof(mirror::Object);
2350 DCHECK_GE(obj_size, kObjectHeaderSize);
2351 static_assert(kObjectHeaderSize == sizeof(mirror::HeapReference<mirror::Class>) +
2352 sizeof(LockWord),
2353 "Object header size does not match");
2354 // Memcpy can tear for words since it may do byte copy. It is only safe to do this since the
2355 // object in the from space is immutable other than the lock word. b/31423258
2356 memcpy(reinterpret_cast<uint8_t*>(to_ref) + kObjectHeaderSize,
2357 reinterpret_cast<const uint8_t*>(from_ref) + kObjectHeaderSize,
2358 obj_size - kObjectHeaderSize);
2359
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002360 // Attempt to install the forward pointer. This is in a loop as the
2361 // lock word atomic write can fail.
2362 while (true) {
Mathieu Chartierd818adb2016-09-15 13:12:47 -07002363 LockWord old_lock_word = from_ref->GetLockWord(false);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002364
2365 if (old_lock_word.GetState() == LockWord::kForwardingAddress) {
2366 // Lost the race. Another thread (either GC or mutator) stored
2367 // the forwarding pointer first. Make the lost copy (to_ref)
2368 // look like a valid but dead (dummy) object and keep it for
2369 // future reuse.
2370 FillWithDummyObject(to_ref, bytes_allocated);
2371 if (!fall_back_to_non_moving) {
2372 DCHECK(region_space_->IsInToSpace(to_ref));
2373 if (bytes_allocated > space::RegionSpace::kRegionSize) {
2374 // Free the large alloc.
2375 region_space_->FreeLarge(to_ref, bytes_allocated);
2376 } else {
2377 // Record the lost copy for later reuse.
2378 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
2379 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated);
2380 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1);
2381 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
2382 skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
2383 reinterpret_cast<uint8_t*>(to_ref)));
2384 }
2385 } else {
2386 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
2387 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
2388 // Free the non-moving-space chunk.
2389 accounting::ContinuousSpaceBitmap* mark_bitmap =
2390 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
2391 CHECK(mark_bitmap != nullptr);
2392 CHECK(mark_bitmap->Clear(to_ref));
2393 heap_->non_moving_space_->Free(Thread::Current(), to_ref);
2394 }
2395
2396 // Get the winner's forward ptr.
2397 mirror::Object* lost_fwd_ptr = to_ref;
2398 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress());
2399 CHECK(to_ref != nullptr);
2400 CHECK_NE(to_ref, lost_fwd_ptr);
Mathieu Chartierdfcd6f42016-09-13 10:02:48 -07002401 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
2402 << "to_ref=" << to_ref << " " << heap_->DumpSpaces();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002403 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
2404 return to_ref;
2405 }
2406
Mathieu Chartierd818adb2016-09-15 13:12:47 -07002407 // Copy the old lock word over since we did not copy it yet.
2408 to_ref->SetLockWord(old_lock_word, false);
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07002409 // Set the gray ptr.
2410 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07002411 to_ref->SetReadBarrierState(ReadBarrier::GrayState());
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07002412 }
2413
Mathieu Chartiera8131262016-11-29 17:55:19 -08002414 // Do a fence to prevent the field CAS in ConcurrentCopying::Process from possibly reordering
2415 // before the object copy.
2416 QuasiAtomic::ThreadFenceRelease();
2417
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002418 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
2419
2420 // Try to atomically write the fwd ptr.
Mathieu Chartiera8131262016-11-29 17:55:19 -08002421 bool success = from_ref->CasLockWordWeakRelaxed(old_lock_word, new_lock_word);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002422 if (LIKELY(success)) {
2423 // The CAS succeeded.
Mathieu Chartiera8131262016-11-29 17:55:19 -08002424 objects_moved_.FetchAndAddRelaxed(1);
2425 bytes_moved_.FetchAndAddRelaxed(region_space_alloc_size);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002426 if (LIKELY(!fall_back_to_non_moving)) {
2427 DCHECK(region_space_->IsInToSpace(to_ref));
2428 } else {
2429 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
2430 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
2431 }
2432 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07002433 DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002434 }
2435 DCHECK(GetFwdPtr(from_ref) == to_ref);
2436 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002437 PushOntoMarkStack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002438 return to_ref;
2439 } else {
2440 // The CAS failed. It may have lost the race or may have failed
2441 // due to monitor/hashcode ops. Either way, retry.
2442 }
2443 }
2444}
2445
2446mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
2447 DCHECK(from_ref != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08002448 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
2449 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002450 // It's already marked.
2451 return from_ref;
2452 }
2453 mirror::Object* to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08002454 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002455 to_ref = GetFwdPtr(from_ref);
2456 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) ||
2457 heap_->non_moving_space_->HasAddress(to_ref))
2458 << "from_ref=" << from_ref << " to_ref=" << to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08002459 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
Mathieu Chartierc381c362016-08-23 13:27:53 -07002460 if (IsMarkedInUnevacFromSpace(from_ref)) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002461 to_ref = from_ref;
2462 } else {
2463 to_ref = nullptr;
2464 }
2465 } else {
2466 // from_ref is in a non-moving space.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08002467 if (immune_spaces_.ContainsObject(from_ref)) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002468 // An immune object is alive.
2469 to_ref = from_ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002470 } else {
2471 // Non-immune non-moving space. Use the mark bitmap.
2472 accounting::ContinuousSpaceBitmap* mark_bitmap =
2473 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
2474 accounting::LargeObjectBitmap* los_bitmap =
2475 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
2476 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
2477 bool is_los = mark_bitmap == nullptr;
2478 if (!is_los && mark_bitmap->Test(from_ref)) {
2479 // Already marked.
2480 to_ref = from_ref;
2481 } else if (is_los && los_bitmap->Test(from_ref)) {
2482 // Already marked in LOS.
2483 to_ref = from_ref;
2484 } else {
2485 // Not marked.
2486 if (IsOnAllocStack(from_ref)) {
2487 // If on the allocation stack, it's considered marked.
2488 to_ref = from_ref;
2489 } else {
2490 // Not marked.
2491 to_ref = nullptr;
2492 }
2493 }
2494 }
2495 }
2496 return to_ref;
2497}
2498
2499bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
2500 QuasiAtomic::ThreadFenceAcquire();
2501 accounting::ObjectStack* alloc_stack = GetAllocationStack();
Mathieu Chartiercb535da2015-01-23 13:50:03 -08002502 return alloc_stack->Contains(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002503}
2504
Mathieu Chartier1ca68902017-04-18 11:26:22 -07002505mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref,
2506 mirror::Object* holder,
2507 MemberOffset offset) {
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002508 // ref is in a non-moving space (from_ref == to_ref).
2509 DCHECK(!region_space_->HasAddress(ref)) << ref;
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002510 DCHECK(!immune_spaces_.ContainsObject(ref));
2511 // Use the mark bitmap.
2512 accounting::ContinuousSpaceBitmap* mark_bitmap =
2513 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
2514 accounting::LargeObjectBitmap* los_bitmap =
2515 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
2516 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
2517 bool is_los = mark_bitmap == nullptr;
2518 if (!is_los && mark_bitmap->Test(ref)) {
2519 // Already marked.
2520 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07002521 DCHECK(ref->GetReadBarrierState() == ReadBarrier::GrayState() ||
2522 ref->GetReadBarrierState() == ReadBarrier::WhiteState());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002523 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002524 } else if (is_los && los_bitmap->Test(ref)) {
2525 // Already marked in LOS.
2526 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07002527 DCHECK(ref->GetReadBarrierState() == ReadBarrier::GrayState() ||
2528 ref->GetReadBarrierState() == ReadBarrier::WhiteState());
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002529 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002530 } else {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002531 // Not marked.
2532 if (IsOnAllocStack(ref)) {
2533 // If it's on the allocation stack, it's considered marked. Keep it white.
2534 // Objects on the allocation stack need not be marked.
2535 if (!is_los) {
2536 DCHECK(!mark_bitmap->Test(ref));
2537 } else {
2538 DCHECK(!los_bitmap->Test(ref));
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00002539 }
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00002540 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07002541 DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::WhiteState());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002542 }
2543 } else {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002544 // For the baker-style RB, we need to handle 'false-gray' cases. See the
2545 // kRegionTypeUnevacFromSpace-case comment in Mark().
2546 if (kUseBakerReadBarrier) {
2547 // Test the bitmap first to reduce the chance of false gray cases.
2548 if ((!is_los && mark_bitmap->Test(ref)) ||
2549 (is_los && los_bitmap->Test(ref))) {
2550 return ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002551 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002552 }
Mathieu Chartier1ca68902017-04-18 11:26:22 -07002553 if (is_los && !IsAligned<kPageSize>(ref)) {
2554 // Ref is a large object that is not aligned, it must be heap corruption. Dump data before
2555 // AtomicSetReadBarrierState since it will fault if the address is not valid.
Mathieu Chartieref496d92017-04-28 18:58:59 -07002556 heap_->GetVerification()->LogHeapCorruption(holder, offset, ref, /* fatal */ true);
Mathieu Chartier1ca68902017-04-18 11:26:22 -07002557 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002558 // Not marked or on the allocation stack. Try to mark it.
2559 // This may or may not succeed, which is ok.
2560 bool cas_success = false;
2561 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07002562 cas_success = ref->AtomicSetReadBarrierState(ReadBarrier::WhiteState(),
2563 ReadBarrier::GrayState());
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002564 }
2565 if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
2566 // Already marked.
2567 if (kUseBakerReadBarrier && cas_success &&
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07002568 ref->GetReadBarrierState() == ReadBarrier::GrayState()) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002569 PushOntoFalseGrayStack(ref);
2570 }
2571 } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
2572 // Already marked in LOS.
2573 if (kUseBakerReadBarrier && cas_success &&
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07002574 ref->GetReadBarrierState() == ReadBarrier::GrayState()) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002575 PushOntoFalseGrayStack(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002576 }
2577 } else {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002578 // Newly marked.
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08002579 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07002580 DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState());
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08002581 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002582 PushOntoMarkStack(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002583 }
2584 }
2585 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002586 return ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002587}
2588
2589void ConcurrentCopying::FinishPhase() {
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08002590 Thread* const self = Thread::Current();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002591 {
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08002592 MutexLock mu(self, mark_stack_lock_);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002593 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
2594 }
Mathieu Chartiera1467d02017-02-22 09:22:50 -08002595 // kVerifyNoMissingCardMarks relies on the region space cards not being cleared to avoid false
2596 // positives.
2597 if (!kVerifyNoMissingCardMarks) {
Mathieu Chartier6e6078a2016-10-24 15:45:41 -07002598 TimingLogger::ScopedTiming split("ClearRegionSpaceCards", GetTimings());
2599 // We do not currently use the region space cards at all, madvise them away to save ram.
2600 heap_->GetCardTable()->ClearCardRange(region_space_->Begin(), region_space_->Limit());
Mathieu Chartier6e6078a2016-10-24 15:45:41 -07002601 }
2602 {
2603 MutexLock mu(self, skipped_blocks_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002604 skipped_blocks_map_.clear();
2605 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002606 {
2607 ReaderMutexLock mu(self, *Locks::mutator_lock_);
Mathieu Chartier21328a12016-07-22 10:47:45 -07002608 {
2609 WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
2610 heap_->ClearMarkedObjects();
2611 }
2612 if (kUseBakerReadBarrier && kFilterModUnionCards) {
2613 TimingLogger::ScopedTiming split("FilterModUnionCards", GetTimings());
2614 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier21328a12016-07-22 10:47:45 -07002615 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
2616 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
Mathieu Chartier6e6078a2016-10-24 15:45:41 -07002617 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
Mathieu Chartier21328a12016-07-22 10:47:45 -07002618 // Filter out cards that don't need to be set.
2619 if (table != nullptr) {
2620 table->FilterCards();
2621 }
2622 }
2623 }
Mathieu Chartier36a270a2016-07-28 18:08:51 -07002624 if (kUseBakerReadBarrier) {
2625 TimingLogger::ScopedTiming split("EmptyRBMarkBitStack", GetTimings());
Mathieu Chartier6e6078a2016-10-24 15:45:41 -07002626 DCHECK(rb_mark_bit_stack_ != nullptr);
Mathieu Chartier36a270a2016-07-28 18:08:51 -07002627 const auto* limit = rb_mark_bit_stack_->End();
2628 for (StackReference<mirror::Object>* it = rb_mark_bit_stack_->Begin(); it != limit; ++it) {
2629 CHECK(it->AsMirrorPtr()->AtomicSetMarkBit(1, 0));
2630 }
2631 rb_mark_bit_stack_->Reset();
2632 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002633 }
2634 if (measure_read_barrier_slow_path_) {
2635 MutexLock mu(self, rb_slow_path_histogram_lock_);
2636 rb_slow_path_time_histogram_.AdjustAndAddValue(rb_slow_path_ns_.LoadRelaxed());
2637 rb_slow_path_count_total_ += rb_slow_path_count_.LoadRelaxed();
2638 rb_slow_path_count_gc_total_ += rb_slow_path_count_gc_.LoadRelaxed();
2639 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002640}
2641
Hiroshi Yamauchi65f5f242016-12-19 11:44:47 -08002642bool ConcurrentCopying::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
2643 bool do_atomic_update) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002644 mirror::Object* from_ref = field->AsMirrorPtr();
Hiroshi Yamauchi65f5f242016-12-19 11:44:47 -08002645 if (from_ref == nullptr) {
2646 return true;
2647 }
Mathieu Chartier97509952015-07-13 14:35:43 -07002648 mirror::Object* to_ref = IsMarked(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002649 if (to_ref == nullptr) {
2650 return false;
2651 }
2652 if (from_ref != to_ref) {
Hiroshi Yamauchi65f5f242016-12-19 11:44:47 -08002653 if (do_atomic_update) {
2654 do {
2655 if (field->AsMirrorPtr() != from_ref) {
2656 // Concurrently overwritten by a mutator.
2657 break;
2658 }
2659 } while (!field->CasWeakRelaxed(from_ref, to_ref));
2660 } else {
2661 QuasiAtomic::ThreadFenceRelease();
2662 field->Assign(to_ref);
2663 QuasiAtomic::ThreadFenceSequentiallyConsistent();
2664 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002665 }
2666 return true;
2667}
2668
Mathieu Chartier97509952015-07-13 14:35:43 -07002669mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) {
2670 return Mark(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002671}
2672
Mathieu Chartier31e88222016-10-14 18:43:19 -07002673void ConcurrentCopying::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
2674 ObjPtr<mirror::Reference> reference) {
Mathieu Chartier97509952015-07-13 14:35:43 -07002675 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002676}
2677
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002678void ConcurrentCopying::ProcessReferences(Thread* self) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002679 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002680 // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002681 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2682 GetHeap()->GetReferenceProcessor()->ProcessReferences(
Mathieu Chartier97509952015-07-13 14:35:43 -07002683 true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002684}
2685
2686void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
2687 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
2688 region_space_->RevokeAllThreadLocalBuffers();
2689}
2690
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002691mirror::Object* ConcurrentCopying::MarkFromReadBarrierWithMeasurements(mirror::Object* from_ref) {
2692 if (Thread::Current() != thread_running_gc_) {
2693 rb_slow_path_count_.FetchAndAddRelaxed(1u);
2694 } else {
2695 rb_slow_path_count_gc_.FetchAndAddRelaxed(1u);
2696 }
2697 ScopedTrace tr(__FUNCTION__);
2698 const uint64_t start_time = measure_read_barrier_slow_path_ ? NanoTime() : 0u;
2699 mirror::Object* ret = Mark(from_ref);
2700 if (measure_read_barrier_slow_path_) {
2701 rb_slow_path_ns_.FetchAndAddRelaxed(NanoTime() - start_time);
2702 }
2703 return ret;
2704}
2705
2706void ConcurrentCopying::DumpPerformanceInfo(std::ostream& os) {
2707 GarbageCollector::DumpPerformanceInfo(os);
2708 MutexLock mu(Thread::Current(), rb_slow_path_histogram_lock_);
2709 if (rb_slow_path_time_histogram_.SampleSize() > 0) {
2710 Histogram<uint64_t>::CumulativeData cumulative_data;
2711 rb_slow_path_time_histogram_.CreateHistogram(&cumulative_data);
2712 rb_slow_path_time_histogram_.PrintConfidenceIntervals(os, 0.99, cumulative_data);
2713 }
2714 if (rb_slow_path_count_total_ > 0) {
2715 os << "Slow path count " << rb_slow_path_count_total_ << "\n";
2716 }
2717 if (rb_slow_path_count_gc_total_ > 0) {
2718 os << "GC slow path count " << rb_slow_path_count_gc_total_ << "\n";
2719 }
Mathieu Chartiercca44a02016-08-17 10:07:29 -07002720 os << "Cumulative bytes moved " << cumulative_bytes_moved_.LoadRelaxed() << "\n";
2721 os << "Cumulative objects moved " << cumulative_objects_moved_.LoadRelaxed() << "\n";
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002722}
2723
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07002724} // namespace collector
2725} // namespace gc
2726} // namespace art