blob: ec689f8528debba68ab366ac0f39eeea8bc60714 [file] [log] [blame]
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "concurrent_copying.h"
18
Mathieu Chartierc7853442015-03-27 14:35:38 -070019#include "art_field-inl.h"
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070020#include "base/stl_util.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080021#include "gc/accounting/heap_bitmap-inl.h"
22#include "gc/accounting/space_bitmap-inl.h"
Mathieu Chartier3cf22532015-07-09 15:15:09 -070023#include "gc/reference_processor.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080024#include "gc/space/image_space.h"
25#include "gc/space/space.h"
26#include "intern_table.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070027#include "mirror/class-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080028#include "mirror/object-inl.h"
29#include "scoped_thread_state_change.h"
30#include "thread-inl.h"
31#include "thread_list.h"
32#include "well_known_classes.h"
33
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070034namespace art {
35namespace gc {
36namespace collector {
37
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080038ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix)
39 : GarbageCollector(heap,
40 name_prefix + (name_prefix.empty() ? "" : " ") +
41 "concurrent copying + mark sweep"),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070042 region_space_(nullptr), gc_barrier_(new Barrier(0)),
43 gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
44 2 * MB, 2 * MB)),
45 mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
46 thread_running_gc_(nullptr),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080047 is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070048 heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), mark_stack_mode_(kMarkStackModeOff),
49 weak_ref_access_enabled_(true),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080050 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
51 rb_table_(heap_->GetReadBarrierTable()),
52 force_evacuate_all_(false) {
53 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
54 "The region space size and the read barrier table region size must match");
55 cc_heap_bitmap_.reset(new accounting::HeapBitmap(heap));
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070056 Thread* self = Thread::Current();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080057 {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080058 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
59 // Cache this so that we won't have to lock heap_bitmap_lock_ in
60 // Mark() which could cause a nested lock on heap_bitmap_lock_
61 // when GC causes a RB while doing GC or a lock order violation
62 // (class_linker_lock_ and heap_bitmap_lock_).
63 heap_mark_bitmap_ = heap->GetMarkBitmap();
64 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070065 {
66 MutexLock mu(self, mark_stack_lock_);
67 for (size_t i = 0; i < kMarkStackPoolSize; ++i) {
68 accounting::AtomicStack<mirror::Object>* mark_stack =
69 accounting::AtomicStack<mirror::Object>::Create(
70 "thread local mark stack", kMarkStackSize, kMarkStackSize);
71 pooled_mark_stacks_.push_back(mark_stack);
72 }
73 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080074}
75
Mathieu Chartierb19ccb12015-07-15 10:24:16 -070076void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) {
77 // Used for preserving soft references, should be OK to not have a CAS here since there should be
78 // no other threads which can trigger read barriers on the same referent during reference
79 // processing.
80 from_ref->Assign(Mark(from_ref->AsMirrorPtr()));
Mathieu Chartier81187812015-07-15 14:24:07 -070081 DCHECK(!from_ref->IsNull());
Mathieu Chartier97509952015-07-13 14:35:43 -070082}
83
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080084ConcurrentCopying::~ConcurrentCopying() {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070085 STLDeleteElements(&pooled_mark_stacks_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080086}
87
88void ConcurrentCopying::RunPhases() {
89 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier);
90 CHECK(!is_active_);
91 is_active_ = true;
92 Thread* self = Thread::Current();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070093 thread_running_gc_ = self;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080094 Locks::mutator_lock_->AssertNotHeld(self);
95 {
96 ReaderMutexLock mu(self, *Locks::mutator_lock_);
97 InitializePhase();
98 }
99 FlipThreadRoots();
100 {
101 ReaderMutexLock mu(self, *Locks::mutator_lock_);
102 MarkingPhase();
103 }
104 // Verify no from space refs. This causes a pause.
105 if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) {
106 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
107 ScopedPause pause(this);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700108 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800109 if (kVerboseMode) {
110 LOG(INFO) << "Verifying no from-space refs";
111 }
112 VerifyNoFromSpaceReferences();
Mathieu Chartier720e71a2015-04-06 17:10:58 -0700113 if (kVerboseMode) {
114 LOG(INFO) << "Done verifying no from-space refs";
115 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700116 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800117 }
118 {
119 ReaderMutexLock mu(self, *Locks::mutator_lock_);
120 ReclaimPhase();
121 }
122 FinishPhase();
123 CHECK(is_active_);
124 is_active_ = false;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700125 thread_running_gc_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800126}
127
128void ConcurrentCopying::BindBitmaps() {
129 Thread* self = Thread::Current();
130 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
131 // Mark all of the spaces we never collect as immune.
132 for (const auto& space : heap_->GetContinuousSpaces()) {
133 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
134 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
135 CHECK(space->IsZygoteSpace() || space->IsImageSpace());
136 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
137 const char* bitmap_name = space->IsImageSpace() ? "cc image space bitmap" :
138 "cc zygote space bitmap";
139 // TODO: try avoiding using bitmaps for image/zygote to save space.
140 accounting::ContinuousSpaceBitmap* bitmap =
141 accounting::ContinuousSpaceBitmap::Create(bitmap_name, space->Begin(), space->Capacity());
142 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
143 cc_bitmaps_.push_back(bitmap);
144 } else if (space == region_space_) {
145 accounting::ContinuousSpaceBitmap* bitmap =
146 accounting::ContinuousSpaceBitmap::Create("cc region space bitmap",
147 space->Begin(), space->Capacity());
148 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
149 cc_bitmaps_.push_back(bitmap);
150 region_space_bitmap_ = bitmap;
151 }
152 }
153}
154
155void ConcurrentCopying::InitializePhase() {
156 TimingLogger::ScopedTiming split("InitializePhase", GetTimings());
157 if (kVerboseMode) {
158 LOG(INFO) << "GC InitializePhase";
159 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-"
160 << reinterpret_cast<void*>(region_space_->Limit());
161 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700162 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800163 immune_region_.Reset();
164 bytes_moved_.StoreRelaxed(0);
165 objects_moved_.StoreRelaxed(0);
166 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
167 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
168 GetCurrentIteration()->GetClearSoftReferences()) {
169 force_evacuate_all_ = true;
170 } else {
171 force_evacuate_all_ = false;
172 }
173 BindBitmaps();
174 if (kVerboseMode) {
175 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_;
176 LOG(INFO) << "Immune region: " << immune_region_.Begin() << "-" << immune_region_.End();
177 LOG(INFO) << "GC end of InitializePhase";
178 }
179}
180
181// Used to switch the thread roots of a thread from from-space refs to to-space refs.
182class ThreadFlipVisitor : public Closure {
183 public:
184 explicit ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
185 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
186 }
187
Mathieu Chartier90443472015-07-16 20:32:27 -0700188 virtual void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800189 // Note: self is not necessarily equal to thread since thread may be suspended.
190 Thread* self = Thread::Current();
191 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
192 << thread->GetState() << " thread " << thread << " self " << self;
193 if (use_tlab_ && thread->HasTlab()) {
194 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
195 // This must come before the revoke.
196 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
197 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
198 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)->
199 FetchAndAddSequentiallyConsistent(thread_local_objects);
200 } else {
201 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
202 }
203 }
204 if (kUseThreadLocalAllocationStack) {
205 thread->RevokeThreadLocalAllocationStack();
206 }
207 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700208 thread->VisitRoots(concurrent_copying_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800209 concurrent_copying_->GetBarrier().Pass(self);
210 }
211
212 private:
213 ConcurrentCopying* const concurrent_copying_;
214 const bool use_tlab_;
215};
216
217// Called back from Runtime::FlipThreadRoots() during a pause.
218class FlipCallback : public Closure {
219 public:
220 explicit FlipCallback(ConcurrentCopying* concurrent_copying)
221 : concurrent_copying_(concurrent_copying) {
222 }
223
Mathieu Chartier90443472015-07-16 20:32:27 -0700224 virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800225 ConcurrentCopying* cc = concurrent_copying_;
226 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
227 // Note: self is not necessarily equal to thread since thread may be suspended.
228 Thread* self = Thread::Current();
229 CHECK(thread == self);
230 Locks::mutator_lock_->AssertExclusiveHeld(self);
231 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
232 cc->SwapStacks(self);
233 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
234 cc->RecordLiveStackFreezeSize(self);
235 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated();
236 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
237 }
238 cc->is_marking_ = true;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700239 cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800240 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
Mathieu Chartier184c9dc2015-03-05 13:20:54 -0800241 CHECK(Runtime::Current()->IsAotCompiler());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800242 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700243 Runtime::Current()->VisitTransactionRoots(cc);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800244 }
245 }
246
247 private:
248 ConcurrentCopying* const concurrent_copying_;
249};
250
251// Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
252void ConcurrentCopying::FlipThreadRoots() {
253 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
254 if (kVerboseMode) {
255 LOG(INFO) << "time=" << region_space_->Time();
256 region_space_->DumpNonFreeRegions(LOG(INFO));
257 }
258 Thread* self = Thread::Current();
259 Locks::mutator_lock_->AssertNotHeld(self);
260 gc_barrier_->Init(self, 0);
261 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
262 FlipCallback flip_callback(this);
263 size_t barrier_count = Runtime::Current()->FlipThreadRoots(
264 &thread_flip_visitor, &flip_callback, this);
265 {
266 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
267 gc_barrier_->Increment(self, barrier_count);
268 }
269 is_asserting_to_space_invariant_ = true;
270 QuasiAtomic::ThreadFenceForConstructor();
271 if (kVerboseMode) {
272 LOG(INFO) << "time=" << region_space_->Time();
273 region_space_->DumpNonFreeRegions(LOG(INFO));
274 LOG(INFO) << "GC end of FlipThreadRoots";
275 }
276}
277
278void ConcurrentCopying::SwapStacks(Thread* self) {
279 heap_->SwapStacks(self);
280}
281
282void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
283 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
284 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
285}
286
287// Used to visit objects in the immune spaces.
288class ConcurrentCopyingImmuneSpaceObjVisitor {
289 public:
290 explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc)
291 : collector_(cc) {}
292
Mathieu Chartier90443472015-07-16 20:32:27 -0700293 void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
294 SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800295 DCHECK(obj != nullptr);
296 DCHECK(collector_->immune_region_.ContainsObject(obj));
297 accounting::ContinuousSpaceBitmap* cc_bitmap =
298 collector_->cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
299 DCHECK(cc_bitmap != nullptr)
300 << "An immune space object must have a bitmap";
301 if (kIsDebugBuild) {
302 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj))
303 << "Immune space object must be already marked";
304 }
305 // This may or may not succeed, which is ok.
306 if (kUseBakerReadBarrier) {
307 obj->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
308 }
309 if (cc_bitmap->AtomicTestAndSet(obj)) {
310 // Already marked. Do nothing.
311 } else {
312 // Newly marked. Set the gray bit and push it onto the mark stack.
313 CHECK(!kUseBakerReadBarrier || obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700314 collector_->PushOntoMarkStack(obj);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800315 }
316 }
317
318 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700319 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800320};
321
322class EmptyCheckpoint : public Closure {
323 public:
324 explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying)
325 : concurrent_copying_(concurrent_copying) {
326 }
327
328 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
329 // Note: self is not necessarily equal to thread since thread may be suspended.
330 Thread* self = Thread::Current();
331 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
332 << thread->GetState() << " thread " << thread << " self " << self;
Lei Lidd9943d2015-02-02 14:24:44 +0800333 // If thread is a running mutator, then act on behalf of the garbage collector.
334 // See the code in ThreadList::RunCheckpoint.
335 if (thread->GetState() == kRunnable) {
336 concurrent_copying_->GetBarrier().Pass(self);
337 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800338 }
339
340 private:
341 ConcurrentCopying* const concurrent_copying_;
342};
343
344// Concurrently mark roots that are guarded by read barriers and process the mark stack.
345void ConcurrentCopying::MarkingPhase() {
346 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
347 if (kVerboseMode) {
348 LOG(INFO) << "GC MarkingPhase";
349 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700350 CHECK(weak_ref_access_enabled_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800351 {
352 // Mark the image root. The WB-based collectors do not need to
353 // scan the image objects from roots by relying on the card table,
354 // but it's necessary for the RB to-space invariant to hold.
355 TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings());
356 gc::space::ImageSpace* image = heap_->GetImageSpace();
357 if (image != nullptr) {
358 mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots();
359 mirror::Object* marked_image_root = Mark(image_root);
360 CHECK_EQ(image_root, marked_image_root) << "An image object does not move";
361 if (ReadBarrier::kEnableToSpaceInvariantChecks) {
362 AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root);
363 }
364 }
365 }
Man Cao41656de2015-07-06 18:53:15 -0700366 // TODO: Other garbage collectors uses Runtime::VisitConcurrentRoots(), refactor this part
367 // to also use the same function.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800368 {
369 TimingLogger::ScopedTiming split2("VisitConstantRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700370 Runtime::Current()->VisitConstantRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800371 }
372 {
373 TimingLogger::ScopedTiming split3("VisitInternTableRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700374 Runtime::Current()->GetInternTable()->VisitRoots(this, kVisitRootFlagAllRoots);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800375 }
376 {
377 TimingLogger::ScopedTiming split4("VisitClassLinkerRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700378 Runtime::Current()->GetClassLinker()->VisitRoots(this, kVisitRootFlagAllRoots);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800379 }
380 {
381 // TODO: don't visit the transaction roots if it's not active.
382 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700383 Runtime::Current()->VisitNonThreadRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800384 }
Man Cao41656de2015-07-06 18:53:15 -0700385 Runtime::Current()->GetHeap()->VisitAllocationRecords(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800386
387 // Immune spaces.
388 for (auto& space : heap_->GetContinuousSpaces()) {
389 if (immune_region_.ContainsSpace(space)) {
390 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
391 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
392 ConcurrentCopyingImmuneSpaceObjVisitor visitor(this);
393 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
394 reinterpret_cast<uintptr_t>(space->Limit()),
395 visitor);
396 }
397 }
398
399 Thread* self = Thread::Current();
400 {
401 TimingLogger::ScopedTiming split6("ProcessMarkStack", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700402 // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The
403 // primary reasons are the fact that we need to use a checkpoint to process thread-local mark
404 // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock
405 // issue because running threads potentially blocking at WaitHoldingLocks, and that once we
406 // reach the point where we process weak references, we can avoid using a lock when accessing
407 // the GC mark stack, which makes mark stack processing more efficient.
408
409 // Process the mark stack once in the thread local stack mode. This marks most of the live
410 // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system
411 // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray
412 // objects and push refs on the mark stack.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800413 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700414 // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks
415 // for the last time before transitioning to the shared mark stack mode, which would process new
416 // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack()
417 // call above. At the same time, disable weak ref accesses using a per-thread flag. It's
418 // important to do these together in a single checkpoint so that we can ensure that mutators
419 // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and
420 // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on
421 // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref
422 // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones.
423 SwitchToSharedMarkStackMode();
424 CHECK(!self->GetWeakRefAccessEnabled());
425 // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here
426 // (which may be non-empty if there were refs found on thread-local mark stacks during the above
427 // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators
428 // (via read barriers) have no way to produce any more refs to process. Marking converges once
429 // before we process weak refs below.
430 ProcessMarkStack();
431 CheckEmptyMarkStack();
432 // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a
433 // lock from this point on.
434 SwitchToGcExclusiveMarkStackMode();
435 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800436 if (kVerboseMode) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800437 LOG(INFO) << "ProcessReferences";
438 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700439 // Process weak references. This may produce new refs to process and have them processed via
Mathieu Chartier97509952015-07-13 14:35:43 -0700440 // ProcessMarkStack (in the GC exclusive mark stack mode).
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700441 ProcessReferences(self);
442 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800443 if (kVerboseMode) {
444 LOG(INFO) << "SweepSystemWeaks";
445 }
446 SweepSystemWeaks(self);
447 if (kVerboseMode) {
448 LOG(INFO) << "SweepSystemWeaks done";
449 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700450 // Process the mark stack here one last time because the above SweepSystemWeaks() call may have
451 // marked some objects (strings alive) as hash_set::Erase() can call the hash function for
452 // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks().
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800453 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700454 CheckEmptyMarkStack();
455 // Re-enable weak ref accesses.
456 ReenableWeakRefAccess(self);
457 // Issue an empty checkpoint to ensure no threads are still in the middle of a read barrier
458 // which may have a from-space ref cached in a local variable.
Hiroshi Yamauchi46ec5202015-06-19 17:39:45 -0700459 IssueEmptyCheckpoint();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700460 // Marking is done. Disable marking.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800461 if (kUseTableLookupReadBarrier) {
462 heap_->rb_table_->ClearAll();
463 DCHECK(heap_->rb_table_->IsAllCleared());
464 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700465 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1);
466 is_marking_ = false; // This disables the read barrier/marking of weak roots.
467 mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff);
468 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800469 }
470
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700471 CHECK(weak_ref_access_enabled_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800472 if (kVerboseMode) {
473 LOG(INFO) << "GC end of MarkingPhase";
474 }
475}
476
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700477void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) {
478 if (kVerboseMode) {
479 LOG(INFO) << "ReenableWeakRefAccess";
480 }
481 weak_ref_access_enabled_.StoreRelaxed(true); // This is for new threads.
482 QuasiAtomic::ThreadFenceForConstructor();
483 // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access.
484 {
485 MutexLock mu(self, *Locks::thread_list_lock_);
486 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
487 for (Thread* thread : thread_list) {
488 thread->SetWeakRefAccessEnabled(true);
489 }
490 }
491 // Unblock blocking threads.
492 GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
493 Runtime::Current()->BroadcastForNewSystemWeaks();
494}
495
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800496void ConcurrentCopying::IssueEmptyCheckpoint() {
497 Thread* self = Thread::Current();
498 EmptyCheckpoint check_point(this);
499 ThreadList* thread_list = Runtime::Current()->GetThreadList();
500 gc_barrier_->Init(self, 0);
501 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
Lei Lidd9943d2015-02-02 14:24:44 +0800502 // If there are no threads to wait which implys that all the checkpoint functions are finished,
503 // then no need to release the mutator lock.
504 if (barrier_count == 0) {
505 return;
506 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800507 // Release locks then wait for all mutator threads to pass the barrier.
508 Locks::mutator_lock_->SharedUnlock(self);
509 {
510 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
511 gc_barrier_->Increment(self, barrier_count);
512 }
513 Locks::mutator_lock_->SharedLock(self);
514}
515
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800516void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700517 CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800518 << " " << to_ref << " " << PrettyTypeOf(to_ref);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700519 Thread* self = Thread::Current(); // TODO: pass self as an argument from call sites?
520 CHECK(thread_running_gc_ != nullptr);
521 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
522 if (mark_stack_mode == kMarkStackModeThreadLocal) {
523 if (self == thread_running_gc_) {
524 // If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
525 CHECK(self->GetThreadLocalMarkStack() == nullptr);
526 CHECK(!gc_mark_stack_->IsFull());
527 gc_mark_stack_->PushBack(to_ref);
528 } else {
529 // Otherwise, use a thread-local mark stack.
530 accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack();
531 if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) {
532 MutexLock mu(self, mark_stack_lock_);
533 // Get a new thread local mark stack.
534 accounting::AtomicStack<mirror::Object>* new_tl_mark_stack;
535 if (!pooled_mark_stacks_.empty()) {
536 // Use a pooled mark stack.
537 new_tl_mark_stack = pooled_mark_stacks_.back();
538 pooled_mark_stacks_.pop_back();
539 } else {
540 // None pooled. Create a new one.
541 new_tl_mark_stack =
542 accounting::AtomicStack<mirror::Object>::Create(
543 "thread local mark stack", 4 * KB, 4 * KB);
544 }
545 DCHECK(new_tl_mark_stack != nullptr);
546 DCHECK(new_tl_mark_stack->IsEmpty());
547 new_tl_mark_stack->PushBack(to_ref);
548 self->SetThreadLocalMarkStack(new_tl_mark_stack);
549 if (tl_mark_stack != nullptr) {
550 // Store the old full stack into a vector.
551 revoked_mark_stacks_.push_back(tl_mark_stack);
552 }
553 } else {
554 tl_mark_stack->PushBack(to_ref);
555 }
556 }
557 } else if (mark_stack_mode == kMarkStackModeShared) {
558 // Access the shared GC mark stack with a lock.
559 MutexLock mu(self, mark_stack_lock_);
560 CHECK(!gc_mark_stack_->IsFull());
561 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800562 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700563 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
564 static_cast<uint32_t>(kMarkStackModeGcExclusive));
565 CHECK(self == thread_running_gc_)
566 << "Only GC-running thread should access the mark stack "
567 << "in the GC exclusive mark stack mode";
568 // Access the GC mark stack without a lock.
569 CHECK(!gc_mark_stack_->IsFull());
570 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800571 }
572}
573
574accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() {
575 return heap_->allocation_stack_.get();
576}
577
578accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
579 return heap_->live_stack_.get();
580}
581
582inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) {
583 DCHECK(region_space_->IsInFromSpace(from_ref));
584 LockWord lw = from_ref->GetLockWord(false);
585 if (lw.GetState() == LockWord::kForwardingAddress) {
586 mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress());
587 CHECK(fwd_ptr != nullptr);
588 return fwd_ptr;
589 } else {
590 return nullptr;
591 }
592}
593
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800594// The following visitors are that used to verify that there's no
595// references to the from-space left after marking.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700596class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800597 public:
598 explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
599 : collector_(collector) {}
600
601 void operator()(mirror::Object* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700602 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800603 if (ref == nullptr) {
604 // OK.
605 return;
606 }
607 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
608 if (kUseBakerReadBarrier) {
609 if (collector_->RegionSpace()->IsInToSpace(ref)) {
610 CHECK(ref->GetReadBarrierPointer() == nullptr)
611 << "To-space ref " << ref << " " << PrettyTypeOf(ref)
612 << " has non-white rb_ptr " << ref->GetReadBarrierPointer();
613 } else {
614 CHECK(ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
615 (ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
616 collector_->IsOnAllocStack(ref)))
617 << "Non-moving/unevac from space ref " << ref << " " << PrettyTypeOf(ref)
618 << " has non-black rb_ptr " << ref->GetReadBarrierPointer()
619 << " but isn't on the alloc stack (and has white rb_ptr)."
620 << " Is it in the non-moving space="
621 << (collector_->GetHeap()->GetNonMovingSpace()->HasAddress(ref));
622 }
623 }
624 }
625
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700626 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
Mathieu Chartier90443472015-07-16 20:32:27 -0700627 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800628 DCHECK(root != nullptr);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700629 operator()(root);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800630 }
631
632 private:
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700633 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800634};
635
636class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor {
637 public:
638 explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
639 : collector_(collector) {}
640
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700641 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700642 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800643 mirror::Object* ref =
644 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
645 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_);
646 visitor(ref);
647 }
648 void operator()(mirror::Class* klass, mirror::Reference* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700649 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800650 CHECK(klass->IsTypeOfReferenceClass());
651 this->operator()(ref, mirror::Reference::ReferentOffset(), false);
652 }
653
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700654 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
655 SHARED_REQUIRES(Locks::mutator_lock_) {
656 if (!root->IsNull()) {
657 VisitRoot(root);
658 }
659 }
660
661 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
662 SHARED_REQUIRES(Locks::mutator_lock_) {
663 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_);
664 visitor(root->AsMirrorPtr());
665 }
666
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800667 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700668 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800669};
670
671class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor {
672 public:
673 explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
674 : collector_(collector) {}
675 void operator()(mirror::Object* obj) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700676 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800677 ObjectCallback(obj, collector_);
678 }
679 static void ObjectCallback(mirror::Object* obj, void *arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700680 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800681 CHECK(obj != nullptr);
682 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
683 space::RegionSpace* region_space = collector->RegionSpace();
684 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
685 ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector);
686 obj->VisitReferences<true>(visitor, visitor);
687 if (kUseBakerReadBarrier) {
688 if (collector->RegionSpace()->IsInToSpace(obj)) {
689 CHECK(obj->GetReadBarrierPointer() == nullptr)
690 << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
691 } else {
692 CHECK(obj->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
693 (obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
694 collector->IsOnAllocStack(obj)))
695 << "Non-moving space/unevac from space ref " << obj << " " << PrettyTypeOf(obj)
696 << " has non-black rb_ptr " << obj->GetReadBarrierPointer()
697 << " but isn't on the alloc stack (and has white rb_ptr). Is it in the non-moving space="
698 << (collector->GetHeap()->GetNonMovingSpace()->HasAddress(obj));
699 }
700 }
701 }
702
703 private:
704 ConcurrentCopying* const collector_;
705};
706
707// Verify there's no from-space references left after the marking phase.
708void ConcurrentCopying::VerifyNoFromSpaceReferences() {
709 Thread* self = Thread::Current();
710 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
711 ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor visitor(this);
712 // Roots.
713 {
714 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700715 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
716 Runtime::Current()->VisitRoots(&ref_visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800717 }
718 // The to-space.
719 region_space_->WalkToSpace(ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor::ObjectCallback,
720 this);
721 // Non-moving spaces.
722 {
723 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
724 heap_->GetMarkBitmap()->Visit(visitor);
725 }
726 // The alloc stack.
727 {
728 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800729 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
730 it < end; ++it) {
731 mirror::Object* const obj = it->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800732 if (obj != nullptr && obj->GetClass() != nullptr) {
733 // TODO: need to call this only if obj is alive?
734 ref_visitor(obj);
735 visitor(obj);
736 }
737 }
738 }
739 // TODO: LOS. But only refs in LOS are classes.
740}
741
742// The following visitors are used to assert the to-space invariant.
743class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor {
744 public:
745 explicit ConcurrentCopyingAssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
746 : collector_(collector) {}
747
748 void operator()(mirror::Object* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700749 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800750 if (ref == nullptr) {
751 // OK.
752 return;
753 }
754 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
755 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800756
757 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700758 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800759};
760
761class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor {
762 public:
763 explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
764 : collector_(collector) {}
765
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700766 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700767 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800768 mirror::Object* ref =
769 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
770 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
771 visitor(ref);
772 }
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700773 void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700774 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800775 CHECK(klass->IsTypeOfReferenceClass());
776 }
777
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700778 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
779 SHARED_REQUIRES(Locks::mutator_lock_) {
780 if (!root->IsNull()) {
781 VisitRoot(root);
782 }
783 }
784
785 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
786 SHARED_REQUIRES(Locks::mutator_lock_) {
787 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
788 visitor(root->AsMirrorPtr());
789 }
790
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800791 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700792 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800793};
794
795class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor {
796 public:
797 explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
798 : collector_(collector) {}
799 void operator()(mirror::Object* obj) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700800 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800801 ObjectCallback(obj, collector_);
802 }
803 static void ObjectCallback(mirror::Object* obj, void *arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700804 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800805 CHECK(obj != nullptr);
806 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
807 space::RegionSpace* region_space = collector->RegionSpace();
808 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
809 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
810 ConcurrentCopyingAssertToSpaceInvariantFieldVisitor visitor(collector);
811 obj->VisitReferences<true>(visitor, visitor);
812 }
813
814 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700815 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800816};
817
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700818class RevokeThreadLocalMarkStackCheckpoint : public Closure {
819 public:
820 explicit RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
821 bool disable_weak_ref_access)
822 : concurrent_copying_(concurrent_copying),
823 disable_weak_ref_access_(disable_weak_ref_access) {
824 }
825
826 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
827 // Note: self is not necessarily equal to thread since thread may be suspended.
828 Thread* self = Thread::Current();
829 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
830 << thread->GetState() << " thread " << thread << " self " << self;
831 // Revoke thread local mark stacks.
832 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
833 if (tl_mark_stack != nullptr) {
834 MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
835 concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
836 thread->SetThreadLocalMarkStack(nullptr);
837 }
838 // Disable weak ref access.
839 if (disable_weak_ref_access_) {
840 thread->SetWeakRefAccessEnabled(false);
841 }
842 // If thread is a running mutator, then act on behalf of the garbage collector.
843 // See the code in ThreadList::RunCheckpoint.
844 if (thread->GetState() == kRunnable) {
845 concurrent_copying_->GetBarrier().Pass(self);
846 }
847 }
848
849 private:
850 ConcurrentCopying* const concurrent_copying_;
851 const bool disable_weak_ref_access_;
852};
853
854void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access) {
855 Thread* self = Thread::Current();
856 RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access);
857 ThreadList* thread_list = Runtime::Current()->GetThreadList();
858 gc_barrier_->Init(self, 0);
859 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
860 // If there are no threads to wait which implys that all the checkpoint functions are finished,
861 // then no need to release the mutator lock.
862 if (barrier_count == 0) {
863 return;
864 }
865 Locks::mutator_lock_->SharedUnlock(self);
866 {
867 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
868 gc_barrier_->Increment(self, barrier_count);
869 }
870 Locks::mutator_lock_->SharedLock(self);
871}
872
873void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) {
874 Thread* self = Thread::Current();
875 CHECK_EQ(self, thread);
876 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
877 if (tl_mark_stack != nullptr) {
878 CHECK(is_marking_);
879 MutexLock mu(self, mark_stack_lock_);
880 revoked_mark_stacks_.push_back(tl_mark_stack);
881 thread->SetThreadLocalMarkStack(nullptr);
882 }
883}
884
885void ConcurrentCopying::ProcessMarkStack() {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800886 if (kVerboseMode) {
887 LOG(INFO) << "ProcessMarkStack. ";
888 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700889 bool empty_prev = false;
890 while (true) {
891 bool empty = ProcessMarkStackOnce();
892 if (empty_prev && empty) {
893 // Saw empty mark stack for a second time, done.
894 break;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800895 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700896 empty_prev = empty;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800897 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700898}
899
900bool ConcurrentCopying::ProcessMarkStackOnce() {
901 Thread* self = Thread::Current();
902 CHECK(thread_running_gc_ != nullptr);
903 CHECK(self == thread_running_gc_);
904 CHECK(self->GetThreadLocalMarkStack() == nullptr);
905 size_t count = 0;
906 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
907 if (mark_stack_mode == kMarkStackModeThreadLocal) {
908 // Process the thread-local mark stacks and the GC mark stack.
909 count += ProcessThreadLocalMarkStacks(false);
910 while (!gc_mark_stack_->IsEmpty()) {
911 mirror::Object* to_ref = gc_mark_stack_->PopBack();
912 ProcessMarkStackRef(to_ref);
913 ++count;
914 }
915 gc_mark_stack_->Reset();
916 } else if (mark_stack_mode == kMarkStackModeShared) {
917 // Process the shared GC mark stack with a lock.
918 {
919 MutexLock mu(self, mark_stack_lock_);
920 CHECK(revoked_mark_stacks_.empty());
921 }
922 while (true) {
923 std::vector<mirror::Object*> refs;
924 {
925 // Copy refs with lock. Note the number of refs should be small.
926 MutexLock mu(self, mark_stack_lock_);
927 if (gc_mark_stack_->IsEmpty()) {
928 break;
929 }
930 for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin();
931 p != gc_mark_stack_->End(); ++p) {
932 refs.push_back(p->AsMirrorPtr());
933 }
934 gc_mark_stack_->Reset();
935 }
936 for (mirror::Object* ref : refs) {
937 ProcessMarkStackRef(ref);
938 ++count;
939 }
940 }
941 } else {
942 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
943 static_cast<uint32_t>(kMarkStackModeGcExclusive));
944 {
945 MutexLock mu(self, mark_stack_lock_);
946 CHECK(revoked_mark_stacks_.empty());
947 }
948 // Process the GC mark stack in the exclusive mode. No need to take the lock.
949 while (!gc_mark_stack_->IsEmpty()) {
950 mirror::Object* to_ref = gc_mark_stack_->PopBack();
951 ProcessMarkStackRef(to_ref);
952 ++count;
953 }
954 gc_mark_stack_->Reset();
955 }
956
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800957 // Return true if the stack was empty.
958 return count == 0;
959}
960
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700961size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access) {
962 // Run a checkpoint to collect all thread local mark stacks and iterate over them all.
963 RevokeThreadLocalMarkStacks(disable_weak_ref_access);
964 size_t count = 0;
965 std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks;
966 {
967 MutexLock mu(Thread::Current(), mark_stack_lock_);
968 // Make a copy of the mark stack vector.
969 mark_stacks = revoked_mark_stacks_;
970 revoked_mark_stacks_.clear();
971 }
972 for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) {
973 for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) {
974 mirror::Object* to_ref = p->AsMirrorPtr();
975 ProcessMarkStackRef(to_ref);
976 ++count;
977 }
978 {
979 MutexLock mu(Thread::Current(), mark_stack_lock_);
980 if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) {
981 // The pool has enough. Delete it.
982 delete mark_stack;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800983 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700984 // Otherwise, put it into the pool for later reuse.
985 mark_stack->Reset();
986 pooled_mark_stacks_.push_back(mark_stack);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800987 }
988 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700989 }
990 return count;
991}
992
993void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
994 DCHECK(!region_space_->IsInFromSpace(to_ref));
995 if (kUseBakerReadBarrier) {
996 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
997 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
998 << " is_marked=" << IsMarked(to_ref);
999 }
1000 // Scan ref fields.
1001 Scan(to_ref);
1002 // Mark the gray ref as white or black.
1003 if (kUseBakerReadBarrier) {
1004 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
1005 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
1006 << " is_marked=" << IsMarked(to_ref);
1007 }
1008 if (to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
1009 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
1010 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())) {
1011 // Leave References gray so that GetReferent() will trigger RB.
1012 CHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref;
1013 } else {
1014#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
1015 if (kUseBakerReadBarrier) {
1016 if (region_space_->IsInToSpace(to_ref)) {
1017 // If to-space, change from gray to white.
1018 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
1019 ReadBarrier::WhitePtr());
1020 CHECK(success) << "Must succeed as we won the race.";
1021 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
1022 } else {
1023 // If non-moving space/unevac from space, change from gray
1024 // to black. We can't change gray to white because it's not
1025 // safe to use CAS if two threads change values in opposite
1026 // directions (A->B and B->A). So, we change it to black to
1027 // indicate non-moving objects that have been marked
1028 // through. Note we'd need to change from black to white
1029 // later (concurrently).
1030 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
1031 ReadBarrier::BlackPtr());
1032 CHECK(success) << "Must succeed as we won the race.";
1033 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
1034 }
1035 }
1036#else
1037 DCHECK(!kUseBakerReadBarrier);
1038#endif
1039 }
1040 if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
1041 ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this);
1042 visitor(to_ref);
1043 }
1044}
1045
1046void ConcurrentCopying::SwitchToSharedMarkStackMode() {
1047 Thread* self = Thread::Current();
1048 CHECK(thread_running_gc_ != nullptr);
1049 CHECK_EQ(self, thread_running_gc_);
1050 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1051 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1052 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1053 static_cast<uint32_t>(kMarkStackModeThreadLocal));
1054 mark_stack_mode_.StoreRelaxed(kMarkStackModeShared);
1055 CHECK(weak_ref_access_enabled_.LoadRelaxed());
1056 weak_ref_access_enabled_.StoreRelaxed(false);
1057 QuasiAtomic::ThreadFenceForConstructor();
1058 // Process the thread local mark stacks one last time after switching to the shared mark stack
1059 // mode and disable weak ref accesses.
1060 ProcessThreadLocalMarkStacks(true);
1061 if (kVerboseMode) {
1062 LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
1063 }
1064}
1065
1066void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() {
1067 Thread* self = Thread::Current();
1068 CHECK(thread_running_gc_ != nullptr);
1069 CHECK_EQ(self, thread_running_gc_);
1070 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1071 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1072 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1073 static_cast<uint32_t>(kMarkStackModeShared));
1074 mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive);
1075 QuasiAtomic::ThreadFenceForConstructor();
1076 if (kVerboseMode) {
1077 LOG(INFO) << "Switched to GC exclusive mark stack mode";
1078 }
1079}
1080
1081void ConcurrentCopying::CheckEmptyMarkStack() {
1082 Thread* self = Thread::Current();
1083 CHECK(thread_running_gc_ != nullptr);
1084 CHECK_EQ(self, thread_running_gc_);
1085 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1086 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1087 if (mark_stack_mode == kMarkStackModeThreadLocal) {
1088 // Thread-local mark stack mode.
1089 RevokeThreadLocalMarkStacks(false);
1090 MutexLock mu(Thread::Current(), mark_stack_lock_);
1091 if (!revoked_mark_stacks_.empty()) {
1092 for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) {
1093 while (!mark_stack->IsEmpty()) {
1094 mirror::Object* obj = mark_stack->PopBack();
1095 if (kUseBakerReadBarrier) {
1096 mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
1097 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr
1098 << " is_marked=" << IsMarked(obj);
1099 } else {
1100 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj)
1101 << " is_marked=" << IsMarked(obj);
1102 }
1103 }
1104 }
1105 LOG(FATAL) << "mark stack is not empty";
1106 }
1107 } else {
1108 // Shared, GC-exclusive, or off.
1109 MutexLock mu(Thread::Current(), mark_stack_lock_);
1110 CHECK(gc_mark_stack_->IsEmpty());
1111 CHECK(revoked_mark_stacks_.empty());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001112 }
1113}
1114
1115void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
1116 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
1117 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier97509952015-07-13 14:35:43 -07001118 Runtime::Current()->SweepSystemWeaks(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001119}
1120
1121void ConcurrentCopying::Sweep(bool swap_bitmaps) {
1122 {
1123 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
1124 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1125 if (kEnableFromSpaceAccountingCheck) {
1126 CHECK_GE(live_stack_freeze_size_, live_stack->Size());
1127 }
1128 heap_->MarkAllocStackAsLive(live_stack);
1129 live_stack->Reset();
1130 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001131 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001132 TimingLogger::ScopedTiming split("Sweep", GetTimings());
1133 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1134 if (space->IsContinuousMemMapAllocSpace()) {
1135 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1136 if (space == region_space_ || immune_region_.ContainsSpace(space)) {
1137 continue;
1138 }
1139 TimingLogger::ScopedTiming split2(
1140 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
1141 RecordFree(alloc_space->Sweep(swap_bitmaps));
1142 }
1143 }
1144 SweepLargeObjects(swap_bitmaps);
1145}
1146
1147void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) {
1148 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
1149 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
1150}
1151
1152class ConcurrentCopyingClearBlackPtrsVisitor {
1153 public:
1154 explicit ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying* cc)
1155 : collector_(cc) {}
Andreas Gampe65b798e2015-04-06 09:35:22 -07001156#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
1157 NO_RETURN
1158#endif
Mathieu Chartier90443472015-07-16 20:32:27 -07001159 void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
1160 SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001161 DCHECK(obj != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001162 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj;
1163 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj;
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07001164 obj->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr());
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001165 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001166 }
1167
1168 private:
1169 ConcurrentCopying* const collector_;
1170};
1171
1172// Clear the black ptrs in non-moving objects back to white.
1173void ConcurrentCopying::ClearBlackPtrs() {
1174 CHECK(kUseBakerReadBarrier);
1175 TimingLogger::ScopedTiming split("ClearBlackPtrs", GetTimings());
1176 ConcurrentCopyingClearBlackPtrsVisitor visitor(this);
1177 for (auto& space : heap_->GetContinuousSpaces()) {
1178 if (space == region_space_) {
1179 continue;
1180 }
1181 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1182 if (kVerboseMode) {
1183 LOG(INFO) << "ClearBlackPtrs: " << *space << " bitmap: " << *mark_bitmap;
1184 }
1185 mark_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
1186 reinterpret_cast<uintptr_t>(space->Limit()),
1187 visitor);
1188 }
1189 space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace();
1190 large_object_space->GetMarkBitmap()->VisitMarkedRange(
1191 reinterpret_cast<uintptr_t>(large_object_space->Begin()),
1192 reinterpret_cast<uintptr_t>(large_object_space->End()),
1193 visitor);
1194 // Objects on the allocation stack?
1195 if (ReadBarrier::kEnableReadBarrierInvariantChecks || kIsDebugBuild) {
1196 size_t count = GetAllocationStack()->Size();
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001197 auto* it = GetAllocationStack()->Begin();
1198 auto* end = GetAllocationStack()->End();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001199 for (size_t i = 0; i < count; ++i, ++it) {
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001200 CHECK_LT(it, end);
1201 mirror::Object* obj = it->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001202 if (obj != nullptr) {
1203 // Must have been cleared above.
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001204 CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001205 }
1206 }
1207 }
1208}
1209
1210void ConcurrentCopying::ReclaimPhase() {
1211 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
1212 if (kVerboseMode) {
1213 LOG(INFO) << "GC ReclaimPhase";
1214 }
1215 Thread* self = Thread::Current();
1216
1217 {
1218 // Double-check that the mark stack is empty.
1219 // Note: need to set this after VerifyNoFromSpaceRef().
1220 is_asserting_to_space_invariant_ = false;
1221 QuasiAtomic::ThreadFenceForConstructor();
1222 if (kVerboseMode) {
1223 LOG(INFO) << "Issue an empty check point. ";
1224 }
1225 IssueEmptyCheckpoint();
1226 // Disable the check.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001227 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0);
1228 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001229 }
1230
1231 {
1232 // Record freed objects.
1233 TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
1234 // Don't include thread-locals that are in the to-space.
1235 uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
1236 uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
1237 uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
1238 uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
1239 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
1240 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
1241 if (kEnableFromSpaceAccountingCheck) {
1242 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
1243 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
1244 }
1245 CHECK_LE(to_objects, from_objects);
1246 CHECK_LE(to_bytes, from_bytes);
1247 int64_t freed_bytes = from_bytes - to_bytes;
1248 int64_t freed_objects = from_objects - to_objects;
1249 if (kVerboseMode) {
1250 LOG(INFO) << "RecordFree:"
1251 << " from_bytes=" << from_bytes << " from_objects=" << from_objects
1252 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects
1253 << " to_bytes=" << to_bytes << " to_objects=" << to_objects
1254 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects
1255 << " from_space size=" << region_space_->FromSpaceSize()
1256 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
1257 << " to_space size=" << region_space_->ToSpaceSize();
1258 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1259 }
1260 RecordFree(ObjectBytePair(freed_objects, freed_bytes));
1261 if (kVerboseMode) {
1262 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1263 }
1264 }
1265
1266 {
1267 TimingLogger::ScopedTiming split3("ComputeUnevacFromSpaceLiveRatio", GetTimings());
1268 ComputeUnevacFromSpaceLiveRatio();
1269 }
1270
1271 {
1272 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
1273 region_space_->ClearFromSpace();
1274 }
1275
1276 {
1277 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1278 if (kUseBakerReadBarrier) {
1279 ClearBlackPtrs();
1280 }
1281 Sweep(false);
1282 SwapBitmaps();
1283 heap_->UnBindBitmaps();
1284
1285 // Remove bitmaps for the immune spaces.
1286 while (!cc_bitmaps_.empty()) {
1287 accounting::ContinuousSpaceBitmap* cc_bitmap = cc_bitmaps_.back();
1288 cc_heap_bitmap_->RemoveContinuousSpaceBitmap(cc_bitmap);
1289 delete cc_bitmap;
1290 cc_bitmaps_.pop_back();
1291 }
1292 region_space_bitmap_ = nullptr;
1293 }
1294
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001295 CheckEmptyMarkStack();
1296
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001297 if (kVerboseMode) {
1298 LOG(INFO) << "GC end of ReclaimPhase";
1299 }
1300}
1301
1302class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor {
1303 public:
1304 explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc)
1305 : collector_(cc) {}
Mathieu Chartier90443472015-07-16 20:32:27 -07001306 void operator()(mirror::Object* ref) const SHARED_REQUIRES(Locks::mutator_lock_)
1307 SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001308 DCHECK(ref != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001309 DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref;
1310 DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001311 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001312 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001313 // Clear the black ptr.
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07001314 ref->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr());
1315 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001316 }
1317 size_t obj_size = ref->SizeOf();
1318 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1319 collector_->region_space_->AddLiveBytes(ref, alloc_size);
1320 }
1321
1322 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001323 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001324};
1325
1326// Compute how much live objects are left in regions.
1327void ConcurrentCopying::ComputeUnevacFromSpaceLiveRatio() {
1328 region_space_->AssertAllRegionLiveBytesZeroOrCleared();
1329 ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor visitor(this);
1330 region_space_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(region_space_->Begin()),
1331 reinterpret_cast<uintptr_t>(region_space_->Limit()),
1332 visitor);
1333}
1334
1335// Assert the to-space invariant.
1336void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
1337 mirror::Object* ref) {
1338 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1339 if (is_asserting_to_space_invariant_) {
1340 if (region_space_->IsInToSpace(ref)) {
1341 // OK.
1342 return;
1343 } else if (region_space_->IsInUnevacFromSpace(ref)) {
1344 CHECK(region_space_bitmap_->Test(ref)) << ref;
1345 } else if (region_space_->IsInFromSpace(ref)) {
1346 // Not OK. Do extra logging.
1347 if (obj != nullptr) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001348 LogFromSpaceRefHolder(obj, offset);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001349 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001350 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001351 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1352 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001353 AssertToSpaceInvariantInNonMovingSpace(obj, ref);
1354 }
1355 }
1356}
1357
1358class RootPrinter {
1359 public:
1360 RootPrinter() { }
1361
1362 template <class MirrorType>
1363 ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
Mathieu Chartier90443472015-07-16 20:32:27 -07001364 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001365 if (!root->IsNull()) {
1366 VisitRoot(root);
1367 }
1368 }
1369
1370 template <class MirrorType>
1371 void VisitRoot(mirror::Object** root)
Mathieu Chartier90443472015-07-16 20:32:27 -07001372 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001373 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root;
1374 }
1375
1376 template <class MirrorType>
1377 void VisitRoot(mirror::CompressedReference<MirrorType>* root)
Mathieu Chartier90443472015-07-16 20:32:27 -07001378 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001379 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr();
1380 }
1381};
1382
1383void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source,
1384 mirror::Object* ref) {
1385 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1386 if (is_asserting_to_space_invariant_) {
1387 if (region_space_->IsInToSpace(ref)) {
1388 // OK.
1389 return;
1390 } else if (region_space_->IsInUnevacFromSpace(ref)) {
1391 CHECK(region_space_bitmap_->Test(ref)) << ref;
1392 } else if (region_space_->IsInFromSpace(ref)) {
1393 // Not OK. Do extra logging.
1394 if (gc_root_source == nullptr) {
1395 // No info.
1396 } else if (gc_root_source->HasArtField()) {
1397 ArtField* field = gc_root_source->GetArtField();
1398 LOG(INTERNAL_FATAL) << "gc root in field " << field << " " << PrettyField(field);
1399 RootPrinter root_printer;
1400 field->VisitRoots(root_printer);
1401 } else if (gc_root_source->HasArtMethod()) {
1402 ArtMethod* method = gc_root_source->GetArtMethod();
1403 LOG(INTERNAL_FATAL) << "gc root in method " << method << " " << PrettyMethod(method);
1404 RootPrinter root_printer;
1405 method->VisitRoots(root_printer);
1406 }
1407 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
1408 region_space_->DumpNonFreeRegions(LOG(INTERNAL_FATAL));
1409 PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
1410 MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
1411 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1412 } else {
1413 AssertToSpaceInvariantInNonMovingSpace(nullptr, ref);
1414 }
1415 }
1416}
1417
1418void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) {
1419 if (kUseBakerReadBarrier) {
1420 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj)
1421 << " holder rb_ptr=" << obj->GetReadBarrierPointer();
1422 } else {
1423 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj);
1424 }
1425 if (region_space_->IsInFromSpace(obj)) {
1426 LOG(INFO) << "holder is in the from-space.";
1427 } else if (region_space_->IsInToSpace(obj)) {
1428 LOG(INFO) << "holder is in the to-space.";
1429 } else if (region_space_->IsInUnevacFromSpace(obj)) {
1430 LOG(INFO) << "holder is in the unevac from-space.";
1431 if (region_space_bitmap_->Test(obj)) {
1432 LOG(INFO) << "holder is marked in the region space bitmap.";
1433 } else {
1434 LOG(INFO) << "holder is not marked in the region space bitmap.";
1435 }
1436 } else {
1437 // In a non-moving space.
1438 if (immune_region_.ContainsObject(obj)) {
1439 LOG(INFO) << "holder is in the image or the zygote space.";
1440 accounting::ContinuousSpaceBitmap* cc_bitmap =
1441 cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
1442 CHECK(cc_bitmap != nullptr)
1443 << "An immune space object must have a bitmap.";
1444 if (cc_bitmap->Test(obj)) {
1445 LOG(INFO) << "holder is marked in the bit map.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001446 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001447 LOG(INFO) << "holder is NOT marked in the bit map.";
1448 }
1449 } else {
1450 LOG(INFO) << "holder is in a non-moving (or main) space.";
1451 accounting::ContinuousSpaceBitmap* mark_bitmap =
1452 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
1453 accounting::LargeObjectBitmap* los_bitmap =
1454 heap_mark_bitmap_->GetLargeObjectBitmap(obj);
1455 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1456 bool is_los = mark_bitmap == nullptr;
1457 if (!is_los && mark_bitmap->Test(obj)) {
1458 LOG(INFO) << "holder is marked in the mark bit map.";
1459 } else if (is_los && los_bitmap->Test(obj)) {
1460 LOG(INFO) << "holder is marked in the los bit map.";
1461 } else {
1462 // If ref is on the allocation stack, then it is considered
1463 // mark/alive (but not necessarily on the live stack.)
1464 if (IsOnAllocStack(obj)) {
1465 LOG(INFO) << "holder is on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001466 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001467 LOG(INFO) << "holder is not marked or on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001468 }
1469 }
1470 }
1471 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001472 LOG(INFO) << "offset=" << offset.SizeValue();
1473}
1474
1475void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj,
1476 mirror::Object* ref) {
1477 // In a non-moving spaces. Check that the ref is marked.
1478 if (immune_region_.ContainsObject(ref)) {
1479 accounting::ContinuousSpaceBitmap* cc_bitmap =
1480 cc_heap_bitmap_->GetContinuousSpaceBitmap(ref);
1481 CHECK(cc_bitmap != nullptr)
1482 << "An immune space ref must have a bitmap. " << ref;
1483 if (kUseBakerReadBarrier) {
1484 CHECK(cc_bitmap->Test(ref))
1485 << "Unmarked immune space ref. obj=" << obj << " rb_ptr="
1486 << obj->GetReadBarrierPointer() << " ref=" << ref;
1487 } else {
1488 CHECK(cc_bitmap->Test(ref))
1489 << "Unmarked immune space ref. obj=" << obj << " ref=" << ref;
1490 }
1491 } else {
1492 accounting::ContinuousSpaceBitmap* mark_bitmap =
1493 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
1494 accounting::LargeObjectBitmap* los_bitmap =
1495 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
1496 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1497 bool is_los = mark_bitmap == nullptr;
1498 if ((!is_los && mark_bitmap->Test(ref)) ||
1499 (is_los && los_bitmap->Test(ref))) {
1500 // OK.
1501 } else {
1502 // If ref is on the allocation stack, then it may not be
1503 // marked live, but considered marked/alive (but not
1504 // necessarily on the live stack).
1505 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. "
1506 << "obj=" << obj << " ref=" << ref;
1507 }
1508 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001509}
1510
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001511// Used to scan ref fields of an object.
1512class ConcurrentCopyingRefFieldsVisitor {
1513 public:
1514 explicit ConcurrentCopyingRefFieldsVisitor(ConcurrentCopying* collector)
1515 : collector_(collector) {}
1516
1517 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
Mathieu Chartier90443472015-07-16 20:32:27 -07001518 const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
1519 SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001520 collector_->Process(obj, offset);
1521 }
1522
1523 void operator()(mirror::Class* klass, mirror::Reference* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001524 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001525 CHECK(klass->IsTypeOfReferenceClass());
1526 collector_->DelayReferenceReferent(klass, ref);
1527 }
1528
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001529 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
1530 SHARED_REQUIRES(Locks::mutator_lock_) {
1531 if (!root->IsNull()) {
1532 VisitRoot(root);
1533 }
1534 }
1535
1536 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
1537 SHARED_REQUIRES(Locks::mutator_lock_) {
1538 collector_->MarkRoot(root);
1539 }
1540
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001541 private:
1542 ConcurrentCopying* const collector_;
1543};
1544
1545// Scan ref fields of an object.
1546void ConcurrentCopying::Scan(mirror::Object* to_ref) {
1547 DCHECK(!region_space_->IsInFromSpace(to_ref));
1548 ConcurrentCopyingRefFieldsVisitor visitor(this);
1549 to_ref->VisitReferences<true>(visitor, visitor);
1550}
1551
1552// Process a field.
1553inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001554 mirror::Object* ref = obj->GetFieldObject<
1555 mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001556 if (ref == nullptr || region_space_->IsInToSpace(ref)) {
1557 return;
1558 }
1559 mirror::Object* to_ref = Mark(ref);
1560 if (to_ref == ref) {
1561 return;
1562 }
1563 // This may fail if the mutator writes to the field at the same time. But it's ok.
1564 mirror::Object* expected_ref = ref;
1565 mirror::Object* new_ref = to_ref;
1566 do {
1567 if (expected_ref !=
1568 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) {
1569 // It was updated by the mutator.
1570 break;
1571 }
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001572 } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<
1573 false, false, kVerifyNone>(offset, expected_ref, new_ref));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001574}
1575
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001576// Process some roots.
1577void ConcurrentCopying::VisitRoots(
1578 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
1579 for (size_t i = 0; i < count; ++i) {
1580 mirror::Object** root = roots[i];
1581 mirror::Object* ref = *root;
1582 if (ref == nullptr || region_space_->IsInToSpace(ref)) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -07001583 continue;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001584 }
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001585 mirror::Object* to_ref = Mark(ref);
1586 if (to_ref == ref) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -07001587 continue;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001588 }
1589 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
1590 mirror::Object* expected_ref = ref;
1591 mirror::Object* new_ref = to_ref;
1592 do {
1593 if (expected_ref != addr->LoadRelaxed()) {
1594 // It was updated by the mutator.
1595 break;
1596 }
1597 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
1598 }
1599}
1600
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001601void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
1602 DCHECK(!root->IsNull());
1603 mirror::Object* const ref = root->AsMirrorPtr();
1604 if (region_space_->IsInToSpace(ref)) {
1605 return;
1606 }
1607 mirror::Object* to_ref = Mark(ref);
1608 if (to_ref != ref) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001609 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
1610 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
1611 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001612 // If the cas fails, then it was updated by the mutator.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001613 do {
1614 if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
1615 // It was updated by the mutator.
1616 break;
1617 }
1618 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
1619 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001620}
1621
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001622void ConcurrentCopying::VisitRoots(
1623 mirror::CompressedReference<mirror::Object>** roots, size_t count,
1624 const RootInfo& info ATTRIBUTE_UNUSED) {
1625 for (size_t i = 0; i < count; ++i) {
1626 mirror::CompressedReference<mirror::Object>* const root = roots[i];
1627 if (!root->IsNull()) {
1628 MarkRoot(root);
1629 }
1630 }
1631}
1632
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001633// Fill the given memory block with a dummy object. Used to fill in a
1634// copy of objects that was lost in race.
1635void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
Roland Levillain14d90572015-07-16 10:52:26 +01001636 CHECK_ALIGNED(byte_size, kObjectAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001637 memset(dummy_obj, 0, byte_size);
1638 mirror::Class* int_array_class = mirror::IntArray::GetArrayClass();
1639 CHECK(int_array_class != nullptr);
1640 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
1641 size_t component_size = int_array_class->GetComponentSize();
1642 CHECK_EQ(component_size, sizeof(int32_t));
1643 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
1644 if (data_offset > byte_size) {
1645 // An int array is too big. Use java.lang.Object.
1646 mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object);
1647 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object);
1648 CHECK_EQ(byte_size, java_lang_Object->GetObjectSize());
1649 dummy_obj->SetClass(java_lang_Object);
1650 CHECK_EQ(byte_size, dummy_obj->SizeOf());
1651 } else {
1652 // Use an int array.
1653 dummy_obj->SetClass(int_array_class);
1654 CHECK(dummy_obj->IsArrayInstance());
1655 int32_t length = (byte_size - data_offset) / component_size;
1656 dummy_obj->AsArray()->SetLength(length);
1657 CHECK_EQ(dummy_obj->AsArray()->GetLength(), length)
1658 << "byte_size=" << byte_size << " length=" << length
1659 << " component_size=" << component_size << " data_offset=" << data_offset;
1660 CHECK_EQ(byte_size, dummy_obj->SizeOf())
1661 << "byte_size=" << byte_size << " length=" << length
1662 << " component_size=" << component_size << " data_offset=" << data_offset;
1663 }
1664}
1665
1666// Reuse the memory blocks that were copy of objects that were lost in race.
1667mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
1668 // Try to reuse the blocks that were unused due to CAS failures.
Roland Levillain14d90572015-07-16 10:52:26 +01001669 CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001670 Thread* self = Thread::Current();
1671 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
1672 MutexLock mu(self, skipped_blocks_lock_);
1673 auto it = skipped_blocks_map_.lower_bound(alloc_size);
1674 if (it == skipped_blocks_map_.end()) {
1675 // Not found.
1676 return nullptr;
1677 }
1678 {
1679 size_t byte_size = it->first;
1680 CHECK_GE(byte_size, alloc_size);
1681 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
1682 // If remainder would be too small for a dummy object, retry with a larger request size.
1683 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size);
1684 if (it == skipped_blocks_map_.end()) {
1685 // Not found.
1686 return nullptr;
1687 }
Roland Levillain14d90572015-07-16 10:52:26 +01001688 CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001689 CHECK_GE(it->first - alloc_size, min_object_size)
1690 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
1691 }
1692 }
1693 // Found a block.
1694 CHECK(it != skipped_blocks_map_.end());
1695 size_t byte_size = it->first;
1696 uint8_t* addr = it->second;
1697 CHECK_GE(byte_size, alloc_size);
1698 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
Roland Levillain14d90572015-07-16 10:52:26 +01001699 CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001700 if (kVerboseMode) {
1701 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
1702 }
1703 skipped_blocks_map_.erase(it);
1704 memset(addr, 0, byte_size);
1705 if (byte_size > alloc_size) {
1706 // Return the remainder to the map.
Roland Levillain14d90572015-07-16 10:52:26 +01001707 CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001708 CHECK_GE(byte_size - alloc_size, min_object_size);
1709 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
1710 byte_size - alloc_size);
1711 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
1712 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
1713 }
1714 return reinterpret_cast<mirror::Object*>(addr);
1715}
1716
1717mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) {
1718 DCHECK(region_space_->IsInFromSpace(from_ref));
1719 // No read barrier to avoid nested RB that might violate the to-space
1720 // invariant. Note that from_ref is a from space ref so the SizeOf()
1721 // call will access the from-space meta objects, but it's ok and necessary.
1722 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
1723 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1724 size_t region_space_bytes_allocated = 0U;
1725 size_t non_moving_space_bytes_allocated = 0U;
1726 size_t bytes_allocated = 0U;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001727 size_t dummy;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001728 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001729 region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001730 bytes_allocated = region_space_bytes_allocated;
1731 if (to_ref != nullptr) {
1732 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
1733 }
1734 bool fall_back_to_non_moving = false;
1735 if (UNLIKELY(to_ref == nullptr)) {
1736 // Failed to allocate in the region space. Try the skipped blocks.
1737 to_ref = AllocateInSkippedBlock(region_space_alloc_size);
1738 if (to_ref != nullptr) {
1739 // Succeeded to allocate in a skipped block.
1740 if (heap_->use_tlab_) {
1741 // This is necessary for the tlab case as it's not accounted in the space.
1742 region_space_->RecordAlloc(to_ref);
1743 }
1744 bytes_allocated = region_space_alloc_size;
1745 } else {
1746 // Fall back to the non-moving space.
1747 fall_back_to_non_moving = true;
1748 if (kVerboseMode) {
1749 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
1750 << to_space_bytes_skipped_.LoadSequentiallyConsistent()
1751 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent();
1752 }
1753 fall_back_to_non_moving = true;
1754 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001755 &non_moving_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001756 CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed";
1757 bytes_allocated = non_moving_space_bytes_allocated;
1758 // Mark it in the mark bitmap.
1759 accounting::ContinuousSpaceBitmap* mark_bitmap =
1760 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1761 CHECK(mark_bitmap != nullptr);
1762 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref));
1763 }
1764 }
1765 DCHECK(to_ref != nullptr);
1766
1767 // Attempt to install the forward pointer. This is in a loop as the
1768 // lock word atomic write can fail.
1769 while (true) {
1770 // Copy the object. TODO: copy only the lockword in the second iteration and on?
1771 memcpy(to_ref, from_ref, obj_size);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001772
1773 LockWord old_lock_word = to_ref->GetLockWord(false);
1774
1775 if (old_lock_word.GetState() == LockWord::kForwardingAddress) {
1776 // Lost the race. Another thread (either GC or mutator) stored
1777 // the forwarding pointer first. Make the lost copy (to_ref)
1778 // look like a valid but dead (dummy) object and keep it for
1779 // future reuse.
1780 FillWithDummyObject(to_ref, bytes_allocated);
1781 if (!fall_back_to_non_moving) {
1782 DCHECK(region_space_->IsInToSpace(to_ref));
1783 if (bytes_allocated > space::RegionSpace::kRegionSize) {
1784 // Free the large alloc.
1785 region_space_->FreeLarge(to_ref, bytes_allocated);
1786 } else {
1787 // Record the lost copy for later reuse.
1788 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
1789 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated);
1790 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1);
1791 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
1792 skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
1793 reinterpret_cast<uint8_t*>(to_ref)));
1794 }
1795 } else {
1796 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
1797 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
1798 // Free the non-moving-space chunk.
1799 accounting::ContinuousSpaceBitmap* mark_bitmap =
1800 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1801 CHECK(mark_bitmap != nullptr);
1802 CHECK(mark_bitmap->Clear(to_ref));
1803 heap_->non_moving_space_->Free(Thread::Current(), to_ref);
1804 }
1805
1806 // Get the winner's forward ptr.
1807 mirror::Object* lost_fwd_ptr = to_ref;
1808 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress());
1809 CHECK(to_ref != nullptr);
1810 CHECK_NE(to_ref, lost_fwd_ptr);
1811 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref));
1812 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
1813 return to_ref;
1814 }
1815
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07001816 // Set the gray ptr.
1817 if (kUseBakerReadBarrier) {
1818 to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr());
1819 }
1820
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001821 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
1822
1823 // Try to atomically write the fwd ptr.
1824 bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word);
1825 if (LIKELY(success)) {
1826 // The CAS succeeded.
1827 objects_moved_.FetchAndAddSequentiallyConsistent(1);
1828 bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size);
1829 if (LIKELY(!fall_back_to_non_moving)) {
1830 DCHECK(region_space_->IsInToSpace(to_ref));
1831 } else {
1832 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
1833 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
1834 }
1835 if (kUseBakerReadBarrier) {
1836 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1837 }
1838 DCHECK(GetFwdPtr(from_ref) == to_ref);
1839 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001840 PushOntoMarkStack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001841 return to_ref;
1842 } else {
1843 // The CAS failed. It may have lost the race or may have failed
1844 // due to monitor/hashcode ops. Either way, retry.
1845 }
1846 }
1847}
1848
1849mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
1850 DCHECK(from_ref != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001851 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
1852 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001853 // It's already marked.
1854 return from_ref;
1855 }
1856 mirror::Object* to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001857 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001858 to_ref = GetFwdPtr(from_ref);
1859 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) ||
1860 heap_->non_moving_space_->HasAddress(to_ref))
1861 << "from_ref=" << from_ref << " to_ref=" << to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001862 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001863 if (region_space_bitmap_->Test(from_ref)) {
1864 to_ref = from_ref;
1865 } else {
1866 to_ref = nullptr;
1867 }
1868 } else {
1869 // from_ref is in a non-moving space.
1870 if (immune_region_.ContainsObject(from_ref)) {
1871 accounting::ContinuousSpaceBitmap* cc_bitmap =
1872 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref);
1873 DCHECK(cc_bitmap != nullptr)
1874 << "An immune space object must have a bitmap";
1875 if (kIsDebugBuild) {
1876 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref))
1877 << "Immune space object must be already marked";
1878 }
1879 if (cc_bitmap->Test(from_ref)) {
1880 // Already marked.
1881 to_ref = from_ref;
1882 } else {
1883 // Newly marked.
1884 to_ref = nullptr;
1885 }
1886 } else {
1887 // Non-immune non-moving space. Use the mark bitmap.
1888 accounting::ContinuousSpaceBitmap* mark_bitmap =
1889 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
1890 accounting::LargeObjectBitmap* los_bitmap =
1891 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
1892 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1893 bool is_los = mark_bitmap == nullptr;
1894 if (!is_los && mark_bitmap->Test(from_ref)) {
1895 // Already marked.
1896 to_ref = from_ref;
1897 } else if (is_los && los_bitmap->Test(from_ref)) {
1898 // Already marked in LOS.
1899 to_ref = from_ref;
1900 } else {
1901 // Not marked.
1902 if (IsOnAllocStack(from_ref)) {
1903 // If on the allocation stack, it's considered marked.
1904 to_ref = from_ref;
1905 } else {
1906 // Not marked.
1907 to_ref = nullptr;
1908 }
1909 }
1910 }
1911 }
1912 return to_ref;
1913}
1914
1915bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
1916 QuasiAtomic::ThreadFenceAcquire();
1917 accounting::ObjectStack* alloc_stack = GetAllocationStack();
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001918 return alloc_stack->Contains(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001919}
1920
1921mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) {
1922 if (from_ref == nullptr) {
1923 return nullptr;
1924 }
1925 DCHECK(from_ref != nullptr);
1926 DCHECK(heap_->collector_type_ == kCollectorTypeCC);
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07001927 if (kUseBakerReadBarrier && !is_active_) {
1928 // In the lock word forward address state, the read barrier bits
1929 // in the lock word are part of the stored forwarding address and
1930 // invalid. This is usually OK as the from-space copy of objects
1931 // aren't accessed by mutators due to the to-space
1932 // invariant. However, during the dex2oat image writing relocation
1933 // and the zygote compaction, objects can be in the forward
1934 // address state (to store the forward/relocation addresses) and
1935 // they can still be accessed and the invalid read barrier bits
1936 // are consulted. If they look like gray but aren't really, the
1937 // read barriers slow path can trigger when it shouldn't. To guard
1938 // against this, return here if the CC collector isn't running.
1939 return from_ref;
1940 }
1941 DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?";
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001942 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
1943 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001944 // It's already marked.
1945 return from_ref;
1946 }
1947 mirror::Object* to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001948 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001949 to_ref = GetFwdPtr(from_ref);
1950 if (kUseBakerReadBarrier) {
1951 DCHECK(to_ref != ReadBarrier::GrayPtr()) << "from_ref=" << from_ref << " to_ref=" << to_ref;
1952 }
1953 if (to_ref == nullptr) {
1954 // It isn't marked yet. Mark it by copying it to the to-space.
1955 to_ref = Copy(from_ref);
1956 }
1957 DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
1958 << "from_ref=" << from_ref << " to_ref=" << to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001959 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001960 // This may or may not succeed, which is ok.
1961 if (kUseBakerReadBarrier) {
1962 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
1963 }
1964 if (region_space_bitmap_->AtomicTestAndSet(from_ref)) {
1965 // Already marked.
1966 to_ref = from_ref;
1967 } else {
1968 // Newly marked.
1969 to_ref = from_ref;
1970 if (kUseBakerReadBarrier) {
1971 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1972 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001973 PushOntoMarkStack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001974 }
1975 } else {
1976 // from_ref is in a non-moving space.
1977 DCHECK(!region_space_->HasAddress(from_ref)) << from_ref;
1978 if (immune_region_.ContainsObject(from_ref)) {
1979 accounting::ContinuousSpaceBitmap* cc_bitmap =
1980 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref);
1981 DCHECK(cc_bitmap != nullptr)
1982 << "An immune space object must have a bitmap";
1983 if (kIsDebugBuild) {
1984 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref))
1985 << "Immune space object must be already marked";
1986 }
1987 // This may or may not succeed, which is ok.
1988 if (kUseBakerReadBarrier) {
1989 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
1990 }
1991 if (cc_bitmap->AtomicTestAndSet(from_ref)) {
1992 // Already marked.
1993 to_ref = from_ref;
1994 } else {
1995 // Newly marked.
1996 to_ref = from_ref;
1997 if (kUseBakerReadBarrier) {
1998 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1999 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002000 PushOntoMarkStack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002001 }
2002 } else {
2003 // Use the mark bitmap.
2004 accounting::ContinuousSpaceBitmap* mark_bitmap =
2005 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
2006 accounting::LargeObjectBitmap* los_bitmap =
2007 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
2008 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
2009 bool is_los = mark_bitmap == nullptr;
2010 if (!is_los && mark_bitmap->Test(from_ref)) {
2011 // Already marked.
2012 to_ref = from_ref;
2013 if (kUseBakerReadBarrier) {
2014 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
2015 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
2016 }
2017 } else if (is_los && los_bitmap->Test(from_ref)) {
2018 // Already marked in LOS.
2019 to_ref = from_ref;
2020 if (kUseBakerReadBarrier) {
2021 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
2022 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
2023 }
2024 } else {
2025 // Not marked.
2026 if (IsOnAllocStack(from_ref)) {
2027 // If it's on the allocation stack, it's considered marked. Keep it white.
2028 to_ref = from_ref;
2029 // Objects on the allocation stack need not be marked.
2030 if (!is_los) {
2031 DCHECK(!mark_bitmap->Test(to_ref));
2032 } else {
2033 DCHECK(!los_bitmap->Test(to_ref));
2034 }
2035 if (kUseBakerReadBarrier) {
2036 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
2037 }
2038 } else {
2039 // Not marked or on the allocation stack. Try to mark it.
2040 // This may or may not succeed, which is ok.
2041 if (kUseBakerReadBarrier) {
2042 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
2043 }
2044 if (!is_los && mark_bitmap->AtomicTestAndSet(from_ref)) {
2045 // Already marked.
2046 to_ref = from_ref;
2047 } else if (is_los && los_bitmap->AtomicTestAndSet(from_ref)) {
2048 // Already marked in LOS.
2049 to_ref = from_ref;
2050 } else {
2051 // Newly marked.
2052 to_ref = from_ref;
2053 if (kUseBakerReadBarrier) {
2054 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
2055 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002056 PushOntoMarkStack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002057 }
2058 }
2059 }
2060 }
2061 }
2062 return to_ref;
2063}
2064
2065void ConcurrentCopying::FinishPhase() {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002066 {
2067 MutexLock mu(Thread::Current(), mark_stack_lock_);
2068 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
2069 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002070 region_space_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002071 {
2072 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
2073 skipped_blocks_map_.clear();
2074 }
2075 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2076 heap_->ClearMarkedObjects();
2077}
2078
Mathieu Chartier97509952015-07-13 14:35:43 -07002079bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002080 mirror::Object* from_ref = field->AsMirrorPtr();
Mathieu Chartier97509952015-07-13 14:35:43 -07002081 mirror::Object* to_ref = IsMarked(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002082 if (to_ref == nullptr) {
2083 return false;
2084 }
2085 if (from_ref != to_ref) {
2086 QuasiAtomic::ThreadFenceRelease();
2087 field->Assign(to_ref);
2088 QuasiAtomic::ThreadFenceSequentiallyConsistent();
2089 }
2090 return true;
2091}
2092
Mathieu Chartier97509952015-07-13 14:35:43 -07002093mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) {
2094 return Mark(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002095}
2096
2097void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
Mathieu Chartier97509952015-07-13 14:35:43 -07002098 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002099}
2100
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002101void ConcurrentCopying::ProcessReferences(Thread* self) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002102 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002103 // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002104 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2105 GetHeap()->GetReferenceProcessor()->ProcessReferences(
Mathieu Chartier97509952015-07-13 14:35:43 -07002106 true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002107}
2108
2109void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
2110 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
2111 region_space_->RevokeAllThreadLocalBuffers();
2112}
2113
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07002114} // namespace collector
2115} // namespace gc
2116} // namespace art