blob: eed5cf2d0c7524f75049b11fb84dbce2bd8d0384 [file] [log] [blame]
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "concurrent_copying.h"
18
Mathieu Chartierc7853442015-03-27 14:35:38 -070019#include "art_field-inl.h"
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070020#include "base/stl_util.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080021#include "gc/accounting/heap_bitmap-inl.h"
22#include "gc/accounting/space_bitmap-inl.h"
Mathieu Chartier3cf22532015-07-09 15:15:09 -070023#include "gc/reference_processor.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080024#include "gc/space/image_space.h"
25#include "gc/space/space.h"
26#include "intern_table.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070027#include "mirror/class-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080028#include "mirror/object-inl.h"
29#include "scoped_thread_state_change.h"
30#include "thread-inl.h"
31#include "thread_list.h"
32#include "well_known_classes.h"
33
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070034namespace art {
35namespace gc {
36namespace collector {
37
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080038ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix)
39 : GarbageCollector(heap,
40 name_prefix + (name_prefix.empty() ? "" : " ") +
41 "concurrent copying + mark sweep"),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070042 region_space_(nullptr), gc_barrier_(new Barrier(0)),
43 gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
44 2 * MB, 2 * MB)),
45 mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
46 thread_running_gc_(nullptr),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080047 is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070048 heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), mark_stack_mode_(kMarkStackModeOff),
49 weak_ref_access_enabled_(true),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080050 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
51 rb_table_(heap_->GetReadBarrierTable()),
52 force_evacuate_all_(false) {
53 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
54 "The region space size and the read barrier table region size must match");
55 cc_heap_bitmap_.reset(new accounting::HeapBitmap(heap));
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070056 Thread* self = Thread::Current();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080057 {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080058 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
59 // Cache this so that we won't have to lock heap_bitmap_lock_ in
60 // Mark() which could cause a nested lock on heap_bitmap_lock_
61 // when GC causes a RB while doing GC or a lock order violation
62 // (class_linker_lock_ and heap_bitmap_lock_).
63 heap_mark_bitmap_ = heap->GetMarkBitmap();
64 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070065 {
66 MutexLock mu(self, mark_stack_lock_);
67 for (size_t i = 0; i < kMarkStackPoolSize; ++i) {
68 accounting::AtomicStack<mirror::Object>* mark_stack =
69 accounting::AtomicStack<mirror::Object>::Create(
70 "thread local mark stack", kMarkStackSize, kMarkStackSize);
71 pooled_mark_stacks_.push_back(mark_stack);
72 }
73 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080074}
75
Mathieu Chartierb19ccb12015-07-15 10:24:16 -070076void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) {
77 // Used for preserving soft references, should be OK to not have a CAS here since there should be
78 // no other threads which can trigger read barriers on the same referent during reference
79 // processing.
80 from_ref->Assign(Mark(from_ref->AsMirrorPtr()));
Mathieu Chartier81187812015-07-15 14:24:07 -070081 DCHECK(!from_ref->IsNull());
Mathieu Chartier97509952015-07-13 14:35:43 -070082}
83
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080084ConcurrentCopying::~ConcurrentCopying() {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070085 STLDeleteElements(&pooled_mark_stacks_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080086}
87
88void ConcurrentCopying::RunPhases() {
89 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier);
90 CHECK(!is_active_);
91 is_active_ = true;
92 Thread* self = Thread::Current();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070093 thread_running_gc_ = self;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080094 Locks::mutator_lock_->AssertNotHeld(self);
95 {
96 ReaderMutexLock mu(self, *Locks::mutator_lock_);
97 InitializePhase();
98 }
99 FlipThreadRoots();
100 {
101 ReaderMutexLock mu(self, *Locks::mutator_lock_);
102 MarkingPhase();
103 }
104 // Verify no from space refs. This causes a pause.
105 if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) {
106 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
107 ScopedPause pause(this);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700108 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800109 if (kVerboseMode) {
110 LOG(INFO) << "Verifying no from-space refs";
111 }
112 VerifyNoFromSpaceReferences();
Mathieu Chartier720e71a2015-04-06 17:10:58 -0700113 if (kVerboseMode) {
114 LOG(INFO) << "Done verifying no from-space refs";
115 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700116 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800117 }
118 {
119 ReaderMutexLock mu(self, *Locks::mutator_lock_);
120 ReclaimPhase();
121 }
122 FinishPhase();
123 CHECK(is_active_);
124 is_active_ = false;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700125 thread_running_gc_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800126}
127
128void ConcurrentCopying::BindBitmaps() {
129 Thread* self = Thread::Current();
130 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
131 // Mark all of the spaces we never collect as immune.
132 for (const auto& space : heap_->GetContinuousSpaces()) {
133 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
134 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
135 CHECK(space->IsZygoteSpace() || space->IsImageSpace());
136 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
137 const char* bitmap_name = space->IsImageSpace() ? "cc image space bitmap" :
138 "cc zygote space bitmap";
139 // TODO: try avoiding using bitmaps for image/zygote to save space.
140 accounting::ContinuousSpaceBitmap* bitmap =
141 accounting::ContinuousSpaceBitmap::Create(bitmap_name, space->Begin(), space->Capacity());
142 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
143 cc_bitmaps_.push_back(bitmap);
144 } else if (space == region_space_) {
145 accounting::ContinuousSpaceBitmap* bitmap =
146 accounting::ContinuousSpaceBitmap::Create("cc region space bitmap",
147 space->Begin(), space->Capacity());
148 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
149 cc_bitmaps_.push_back(bitmap);
150 region_space_bitmap_ = bitmap;
151 }
152 }
153}
154
155void ConcurrentCopying::InitializePhase() {
156 TimingLogger::ScopedTiming split("InitializePhase", GetTimings());
157 if (kVerboseMode) {
158 LOG(INFO) << "GC InitializePhase";
159 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-"
160 << reinterpret_cast<void*>(region_space_->Limit());
161 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700162 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800163 immune_region_.Reset();
164 bytes_moved_.StoreRelaxed(0);
165 objects_moved_.StoreRelaxed(0);
166 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
167 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
168 GetCurrentIteration()->GetClearSoftReferences()) {
169 force_evacuate_all_ = true;
170 } else {
171 force_evacuate_all_ = false;
172 }
173 BindBitmaps();
174 if (kVerboseMode) {
175 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_;
176 LOG(INFO) << "Immune region: " << immune_region_.Begin() << "-" << immune_region_.End();
177 LOG(INFO) << "GC end of InitializePhase";
178 }
179}
180
181// Used to switch the thread roots of a thread from from-space refs to to-space refs.
182class ThreadFlipVisitor : public Closure {
183 public:
184 explicit ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
185 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
186 }
187
188 virtual void Run(Thread* thread) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
189 // Note: self is not necessarily equal to thread since thread may be suspended.
190 Thread* self = Thread::Current();
191 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
192 << thread->GetState() << " thread " << thread << " self " << self;
193 if (use_tlab_ && thread->HasTlab()) {
194 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
195 // This must come before the revoke.
196 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
197 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
198 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)->
199 FetchAndAddSequentiallyConsistent(thread_local_objects);
200 } else {
201 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
202 }
203 }
204 if (kUseThreadLocalAllocationStack) {
205 thread->RevokeThreadLocalAllocationStack();
206 }
207 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700208 thread->VisitRoots(concurrent_copying_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800209 concurrent_copying_->GetBarrier().Pass(self);
210 }
211
212 private:
213 ConcurrentCopying* const concurrent_copying_;
214 const bool use_tlab_;
215};
216
217// Called back from Runtime::FlipThreadRoots() during a pause.
218class FlipCallback : public Closure {
219 public:
220 explicit FlipCallback(ConcurrentCopying* concurrent_copying)
221 : concurrent_copying_(concurrent_copying) {
222 }
223
224 virtual void Run(Thread* thread) OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
225 ConcurrentCopying* cc = concurrent_copying_;
226 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
227 // Note: self is not necessarily equal to thread since thread may be suspended.
228 Thread* self = Thread::Current();
229 CHECK(thread == self);
230 Locks::mutator_lock_->AssertExclusiveHeld(self);
231 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
232 cc->SwapStacks(self);
233 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
234 cc->RecordLiveStackFreezeSize(self);
235 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated();
236 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
237 }
238 cc->is_marking_ = true;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700239 cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800240 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
Mathieu Chartier184c9dc2015-03-05 13:20:54 -0800241 CHECK(Runtime::Current()->IsAotCompiler());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800242 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700243 Runtime::Current()->VisitTransactionRoots(cc);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800244 }
245 }
246
247 private:
248 ConcurrentCopying* const concurrent_copying_;
249};
250
251// Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
252void ConcurrentCopying::FlipThreadRoots() {
253 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
254 if (kVerboseMode) {
255 LOG(INFO) << "time=" << region_space_->Time();
256 region_space_->DumpNonFreeRegions(LOG(INFO));
257 }
258 Thread* self = Thread::Current();
259 Locks::mutator_lock_->AssertNotHeld(self);
260 gc_barrier_->Init(self, 0);
261 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
262 FlipCallback flip_callback(this);
263 size_t barrier_count = Runtime::Current()->FlipThreadRoots(
264 &thread_flip_visitor, &flip_callback, this);
265 {
266 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
267 gc_barrier_->Increment(self, barrier_count);
268 }
269 is_asserting_to_space_invariant_ = true;
270 QuasiAtomic::ThreadFenceForConstructor();
271 if (kVerboseMode) {
272 LOG(INFO) << "time=" << region_space_->Time();
273 region_space_->DumpNonFreeRegions(LOG(INFO));
274 LOG(INFO) << "GC end of FlipThreadRoots";
275 }
276}
277
278void ConcurrentCopying::SwapStacks(Thread* self) {
279 heap_->SwapStacks(self);
280}
281
282void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
283 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
284 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
285}
286
287// Used to visit objects in the immune spaces.
288class ConcurrentCopyingImmuneSpaceObjVisitor {
289 public:
290 explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc)
291 : collector_(cc) {}
292
293 void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
294 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
295 DCHECK(obj != nullptr);
296 DCHECK(collector_->immune_region_.ContainsObject(obj));
297 accounting::ContinuousSpaceBitmap* cc_bitmap =
298 collector_->cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
299 DCHECK(cc_bitmap != nullptr)
300 << "An immune space object must have a bitmap";
301 if (kIsDebugBuild) {
302 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj))
303 << "Immune space object must be already marked";
304 }
305 // This may or may not succeed, which is ok.
306 if (kUseBakerReadBarrier) {
307 obj->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
308 }
309 if (cc_bitmap->AtomicTestAndSet(obj)) {
310 // Already marked. Do nothing.
311 } else {
312 // Newly marked. Set the gray bit and push it onto the mark stack.
313 CHECK(!kUseBakerReadBarrier || obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700314 collector_->PushOntoMarkStack(obj);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800315 }
316 }
317
318 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700319 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800320};
321
322class EmptyCheckpoint : public Closure {
323 public:
324 explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying)
325 : concurrent_copying_(concurrent_copying) {
326 }
327
328 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
329 // Note: self is not necessarily equal to thread since thread may be suspended.
330 Thread* self = Thread::Current();
331 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
332 << thread->GetState() << " thread " << thread << " self " << self;
Lei Lidd9943d2015-02-02 14:24:44 +0800333 // If thread is a running mutator, then act on behalf of the garbage collector.
334 // See the code in ThreadList::RunCheckpoint.
335 if (thread->GetState() == kRunnable) {
336 concurrent_copying_->GetBarrier().Pass(self);
337 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800338 }
339
340 private:
341 ConcurrentCopying* const concurrent_copying_;
342};
343
344// Concurrently mark roots that are guarded by read barriers and process the mark stack.
345void ConcurrentCopying::MarkingPhase() {
346 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
347 if (kVerboseMode) {
348 LOG(INFO) << "GC MarkingPhase";
349 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700350 CHECK(weak_ref_access_enabled_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800351 {
352 // Mark the image root. The WB-based collectors do not need to
353 // scan the image objects from roots by relying on the card table,
354 // but it's necessary for the RB to-space invariant to hold.
355 TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings());
356 gc::space::ImageSpace* image = heap_->GetImageSpace();
357 if (image != nullptr) {
358 mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots();
359 mirror::Object* marked_image_root = Mark(image_root);
360 CHECK_EQ(image_root, marked_image_root) << "An image object does not move";
361 if (ReadBarrier::kEnableToSpaceInvariantChecks) {
362 AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root);
363 }
364 }
365 }
Man Cao41656de2015-07-06 18:53:15 -0700366 // TODO: Other garbage collectors uses Runtime::VisitConcurrentRoots(), refactor this part
367 // to also use the same function.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800368 {
369 TimingLogger::ScopedTiming split2("VisitConstantRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700370 Runtime::Current()->VisitConstantRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800371 }
372 {
373 TimingLogger::ScopedTiming split3("VisitInternTableRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700374 Runtime::Current()->GetInternTable()->VisitRoots(this, kVisitRootFlagAllRoots);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800375 }
376 {
377 TimingLogger::ScopedTiming split4("VisitClassLinkerRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700378 Runtime::Current()->GetClassLinker()->VisitRoots(this, kVisitRootFlagAllRoots);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800379 }
380 {
381 // TODO: don't visit the transaction roots if it's not active.
382 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700383 Runtime::Current()->VisitNonThreadRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800384 }
Man Cao41656de2015-07-06 18:53:15 -0700385 Runtime::Current()->GetHeap()->VisitAllocationRecords(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800386
387 // Immune spaces.
388 for (auto& space : heap_->GetContinuousSpaces()) {
389 if (immune_region_.ContainsSpace(space)) {
390 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
391 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
392 ConcurrentCopyingImmuneSpaceObjVisitor visitor(this);
393 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
394 reinterpret_cast<uintptr_t>(space->Limit()),
395 visitor);
396 }
397 }
398
399 Thread* self = Thread::Current();
400 {
401 TimingLogger::ScopedTiming split6("ProcessMarkStack", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700402 // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The
403 // primary reasons are the fact that we need to use a checkpoint to process thread-local mark
404 // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock
405 // issue because running threads potentially blocking at WaitHoldingLocks, and that once we
406 // reach the point where we process weak references, we can avoid using a lock when accessing
407 // the GC mark stack, which makes mark stack processing more efficient.
408
409 // Process the mark stack once in the thread local stack mode. This marks most of the live
410 // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system
411 // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray
412 // objects and push refs on the mark stack.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800413 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700414 // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks
415 // for the last time before transitioning to the shared mark stack mode, which would process new
416 // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack()
417 // call above. At the same time, disable weak ref accesses using a per-thread flag. It's
418 // important to do these together in a single checkpoint so that we can ensure that mutators
419 // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and
420 // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on
421 // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref
422 // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones.
423 SwitchToSharedMarkStackMode();
424 CHECK(!self->GetWeakRefAccessEnabled());
425 // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here
426 // (which may be non-empty if there were refs found on thread-local mark stacks during the above
427 // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators
428 // (via read barriers) have no way to produce any more refs to process. Marking converges once
429 // before we process weak refs below.
430 ProcessMarkStack();
431 CheckEmptyMarkStack();
432 // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a
433 // lock from this point on.
434 SwitchToGcExclusiveMarkStackMode();
435 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800436 if (kVerboseMode) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800437 LOG(INFO) << "ProcessReferences";
438 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700439 // Process weak references. This may produce new refs to process and have them processed via
Mathieu Chartier97509952015-07-13 14:35:43 -0700440 // ProcessMarkStack (in the GC exclusive mark stack mode).
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700441 ProcessReferences(self);
442 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800443 if (kVerboseMode) {
444 LOG(INFO) << "SweepSystemWeaks";
445 }
446 SweepSystemWeaks(self);
447 if (kVerboseMode) {
448 LOG(INFO) << "SweepSystemWeaks done";
449 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700450 // Process the mark stack here one last time because the above SweepSystemWeaks() call may have
451 // marked some objects (strings alive) as hash_set::Erase() can call the hash function for
452 // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks().
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800453 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700454 CheckEmptyMarkStack();
455 // Re-enable weak ref accesses.
456 ReenableWeakRefAccess(self);
457 // Issue an empty checkpoint to ensure no threads are still in the middle of a read barrier
458 // which may have a from-space ref cached in a local variable.
Hiroshi Yamauchi46ec5202015-06-19 17:39:45 -0700459 IssueEmptyCheckpoint();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700460 // Marking is done. Disable marking.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800461 if (kUseTableLookupReadBarrier) {
462 heap_->rb_table_->ClearAll();
463 DCHECK(heap_->rb_table_->IsAllCleared());
464 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700465 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1);
466 is_marking_ = false; // This disables the read barrier/marking of weak roots.
467 mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff);
468 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800469 }
470
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700471 CHECK(weak_ref_access_enabled_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800472 if (kVerboseMode) {
473 LOG(INFO) << "GC end of MarkingPhase";
474 }
475}
476
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700477void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) {
478 if (kVerboseMode) {
479 LOG(INFO) << "ReenableWeakRefAccess";
480 }
481 weak_ref_access_enabled_.StoreRelaxed(true); // This is for new threads.
482 QuasiAtomic::ThreadFenceForConstructor();
483 // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access.
484 {
485 MutexLock mu(self, *Locks::thread_list_lock_);
486 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
487 for (Thread* thread : thread_list) {
488 thread->SetWeakRefAccessEnabled(true);
489 }
490 }
491 // Unblock blocking threads.
492 GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
493 Runtime::Current()->BroadcastForNewSystemWeaks();
494}
495
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800496void ConcurrentCopying::IssueEmptyCheckpoint() {
497 Thread* self = Thread::Current();
498 EmptyCheckpoint check_point(this);
499 ThreadList* thread_list = Runtime::Current()->GetThreadList();
500 gc_barrier_->Init(self, 0);
501 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
Lei Lidd9943d2015-02-02 14:24:44 +0800502 // If there are no threads to wait which implys that all the checkpoint functions are finished,
503 // then no need to release the mutator lock.
504 if (barrier_count == 0) {
505 return;
506 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800507 // Release locks then wait for all mutator threads to pass the barrier.
508 Locks::mutator_lock_->SharedUnlock(self);
509 {
510 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
511 gc_barrier_->Increment(self, barrier_count);
512 }
513 Locks::mutator_lock_->SharedLock(self);
514}
515
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800516void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700517 CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800518 << " " << to_ref << " " << PrettyTypeOf(to_ref);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700519 Thread* self = Thread::Current(); // TODO: pass self as an argument from call sites?
520 CHECK(thread_running_gc_ != nullptr);
521 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
522 if (mark_stack_mode == kMarkStackModeThreadLocal) {
523 if (self == thread_running_gc_) {
524 // If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
525 CHECK(self->GetThreadLocalMarkStack() == nullptr);
526 CHECK(!gc_mark_stack_->IsFull());
527 gc_mark_stack_->PushBack(to_ref);
528 } else {
529 // Otherwise, use a thread-local mark stack.
530 accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack();
531 if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) {
532 MutexLock mu(self, mark_stack_lock_);
533 // Get a new thread local mark stack.
534 accounting::AtomicStack<mirror::Object>* new_tl_mark_stack;
535 if (!pooled_mark_stacks_.empty()) {
536 // Use a pooled mark stack.
537 new_tl_mark_stack = pooled_mark_stacks_.back();
538 pooled_mark_stacks_.pop_back();
539 } else {
540 // None pooled. Create a new one.
541 new_tl_mark_stack =
542 accounting::AtomicStack<mirror::Object>::Create(
543 "thread local mark stack", 4 * KB, 4 * KB);
544 }
545 DCHECK(new_tl_mark_stack != nullptr);
546 DCHECK(new_tl_mark_stack->IsEmpty());
547 new_tl_mark_stack->PushBack(to_ref);
548 self->SetThreadLocalMarkStack(new_tl_mark_stack);
549 if (tl_mark_stack != nullptr) {
550 // Store the old full stack into a vector.
551 revoked_mark_stacks_.push_back(tl_mark_stack);
552 }
553 } else {
554 tl_mark_stack->PushBack(to_ref);
555 }
556 }
557 } else if (mark_stack_mode == kMarkStackModeShared) {
558 // Access the shared GC mark stack with a lock.
559 MutexLock mu(self, mark_stack_lock_);
560 CHECK(!gc_mark_stack_->IsFull());
561 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800562 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700563 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
564 static_cast<uint32_t>(kMarkStackModeGcExclusive));
565 CHECK(self == thread_running_gc_)
566 << "Only GC-running thread should access the mark stack "
567 << "in the GC exclusive mark stack mode";
568 // Access the GC mark stack without a lock.
569 CHECK(!gc_mark_stack_->IsFull());
570 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800571 }
572}
573
574accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() {
575 return heap_->allocation_stack_.get();
576}
577
578accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
579 return heap_->live_stack_.get();
580}
581
582inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) {
583 DCHECK(region_space_->IsInFromSpace(from_ref));
584 LockWord lw = from_ref->GetLockWord(false);
585 if (lw.GetState() == LockWord::kForwardingAddress) {
586 mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress());
587 CHECK(fwd_ptr != nullptr);
588 return fwd_ptr;
589 } else {
590 return nullptr;
591 }
592}
593
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800594// The following visitors are that used to verify that there's no
595// references to the from-space left after marking.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700596class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800597 public:
598 explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
599 : collector_(collector) {}
600
601 void operator()(mirror::Object* ref) const
602 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
603 if (ref == nullptr) {
604 // OK.
605 return;
606 }
607 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
608 if (kUseBakerReadBarrier) {
609 if (collector_->RegionSpace()->IsInToSpace(ref)) {
610 CHECK(ref->GetReadBarrierPointer() == nullptr)
611 << "To-space ref " << ref << " " << PrettyTypeOf(ref)
612 << " has non-white rb_ptr " << ref->GetReadBarrierPointer();
613 } else {
614 CHECK(ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
615 (ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
616 collector_->IsOnAllocStack(ref)))
617 << "Non-moving/unevac from space ref " << ref << " " << PrettyTypeOf(ref)
618 << " has non-black rb_ptr " << ref->GetReadBarrierPointer()
619 << " but isn't on the alloc stack (and has white rb_ptr)."
620 << " Is it in the non-moving space="
621 << (collector_->GetHeap()->GetNonMovingSpace()->HasAddress(ref));
622 }
623 }
624 }
625
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700626 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
627 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800628 DCHECK(root != nullptr);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700629 operator()(root);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800630 }
631
632 private:
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700633 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800634};
635
636class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor {
637 public:
638 explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
639 : collector_(collector) {}
640
641 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
642 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
643 mirror::Object* ref =
644 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
645 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_);
646 visitor(ref);
647 }
648 void operator()(mirror::Class* klass, mirror::Reference* ref) const
649 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
650 CHECK(klass->IsTypeOfReferenceClass());
651 this->operator()(ref, mirror::Reference::ReferentOffset(), false);
652 }
653
654 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700655 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800656};
657
658class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor {
659 public:
660 explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
661 : collector_(collector) {}
662 void operator()(mirror::Object* obj) const
663 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
664 ObjectCallback(obj, collector_);
665 }
666 static void ObjectCallback(mirror::Object* obj, void *arg)
667 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
668 CHECK(obj != nullptr);
669 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
670 space::RegionSpace* region_space = collector->RegionSpace();
671 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
672 ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector);
673 obj->VisitReferences<true>(visitor, visitor);
674 if (kUseBakerReadBarrier) {
675 if (collector->RegionSpace()->IsInToSpace(obj)) {
676 CHECK(obj->GetReadBarrierPointer() == nullptr)
677 << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
678 } else {
679 CHECK(obj->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
680 (obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
681 collector->IsOnAllocStack(obj)))
682 << "Non-moving space/unevac from space ref " << obj << " " << PrettyTypeOf(obj)
683 << " has non-black rb_ptr " << obj->GetReadBarrierPointer()
684 << " but isn't on the alloc stack (and has white rb_ptr). Is it in the non-moving space="
685 << (collector->GetHeap()->GetNonMovingSpace()->HasAddress(obj));
686 }
687 }
688 }
689
690 private:
691 ConcurrentCopying* const collector_;
692};
693
694// Verify there's no from-space references left after the marking phase.
695void ConcurrentCopying::VerifyNoFromSpaceReferences() {
696 Thread* self = Thread::Current();
697 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
698 ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor visitor(this);
699 // Roots.
700 {
701 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700702 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
703 Runtime::Current()->VisitRoots(&ref_visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800704 }
705 // The to-space.
706 region_space_->WalkToSpace(ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor::ObjectCallback,
707 this);
708 // Non-moving spaces.
709 {
710 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
711 heap_->GetMarkBitmap()->Visit(visitor);
712 }
713 // The alloc stack.
714 {
715 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800716 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
717 it < end; ++it) {
718 mirror::Object* const obj = it->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800719 if (obj != nullptr && obj->GetClass() != nullptr) {
720 // TODO: need to call this only if obj is alive?
721 ref_visitor(obj);
722 visitor(obj);
723 }
724 }
725 }
726 // TODO: LOS. But only refs in LOS are classes.
727}
728
729// The following visitors are used to assert the to-space invariant.
730class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor {
731 public:
732 explicit ConcurrentCopyingAssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
733 : collector_(collector) {}
734
735 void operator()(mirror::Object* ref) const
736 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
737 if (ref == nullptr) {
738 // OK.
739 return;
740 }
741 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
742 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800743
744 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700745 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800746};
747
748class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor {
749 public:
750 explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
751 : collector_(collector) {}
752
753 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
754 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
755 mirror::Object* ref =
756 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
757 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
758 visitor(ref);
759 }
760 void operator()(mirror::Class* klass, mirror::Reference* /* ref */) const
761 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
762 CHECK(klass->IsTypeOfReferenceClass());
763 }
764
765 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700766 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800767};
768
769class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor {
770 public:
771 explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
772 : collector_(collector) {}
773 void operator()(mirror::Object* obj) const
774 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
775 ObjectCallback(obj, collector_);
776 }
777 static void ObjectCallback(mirror::Object* obj, void *arg)
778 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
779 CHECK(obj != nullptr);
780 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
781 space::RegionSpace* region_space = collector->RegionSpace();
782 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
783 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
784 ConcurrentCopyingAssertToSpaceInvariantFieldVisitor visitor(collector);
785 obj->VisitReferences<true>(visitor, visitor);
786 }
787
788 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700789 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800790};
791
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700792class RevokeThreadLocalMarkStackCheckpoint : public Closure {
793 public:
794 explicit RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
795 bool disable_weak_ref_access)
796 : concurrent_copying_(concurrent_copying),
797 disable_weak_ref_access_(disable_weak_ref_access) {
798 }
799
800 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
801 // Note: self is not necessarily equal to thread since thread may be suspended.
802 Thread* self = Thread::Current();
803 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
804 << thread->GetState() << " thread " << thread << " self " << self;
805 // Revoke thread local mark stacks.
806 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
807 if (tl_mark_stack != nullptr) {
808 MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
809 concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
810 thread->SetThreadLocalMarkStack(nullptr);
811 }
812 // Disable weak ref access.
813 if (disable_weak_ref_access_) {
814 thread->SetWeakRefAccessEnabled(false);
815 }
816 // If thread is a running mutator, then act on behalf of the garbage collector.
817 // See the code in ThreadList::RunCheckpoint.
818 if (thread->GetState() == kRunnable) {
819 concurrent_copying_->GetBarrier().Pass(self);
820 }
821 }
822
823 private:
824 ConcurrentCopying* const concurrent_copying_;
825 const bool disable_weak_ref_access_;
826};
827
828void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access) {
829 Thread* self = Thread::Current();
830 RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access);
831 ThreadList* thread_list = Runtime::Current()->GetThreadList();
832 gc_barrier_->Init(self, 0);
833 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
834 // If there are no threads to wait which implys that all the checkpoint functions are finished,
835 // then no need to release the mutator lock.
836 if (barrier_count == 0) {
837 return;
838 }
839 Locks::mutator_lock_->SharedUnlock(self);
840 {
841 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
842 gc_barrier_->Increment(self, barrier_count);
843 }
844 Locks::mutator_lock_->SharedLock(self);
845}
846
847void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) {
848 Thread* self = Thread::Current();
849 CHECK_EQ(self, thread);
850 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
851 if (tl_mark_stack != nullptr) {
852 CHECK(is_marking_);
853 MutexLock mu(self, mark_stack_lock_);
854 revoked_mark_stacks_.push_back(tl_mark_stack);
855 thread->SetThreadLocalMarkStack(nullptr);
856 }
857}
858
859void ConcurrentCopying::ProcessMarkStack() {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800860 if (kVerboseMode) {
861 LOG(INFO) << "ProcessMarkStack. ";
862 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700863 bool empty_prev = false;
864 while (true) {
865 bool empty = ProcessMarkStackOnce();
866 if (empty_prev && empty) {
867 // Saw empty mark stack for a second time, done.
868 break;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800869 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700870 empty_prev = empty;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800871 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700872}
873
874bool ConcurrentCopying::ProcessMarkStackOnce() {
875 Thread* self = Thread::Current();
876 CHECK(thread_running_gc_ != nullptr);
877 CHECK(self == thread_running_gc_);
878 CHECK(self->GetThreadLocalMarkStack() == nullptr);
879 size_t count = 0;
880 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
881 if (mark_stack_mode == kMarkStackModeThreadLocal) {
882 // Process the thread-local mark stacks and the GC mark stack.
883 count += ProcessThreadLocalMarkStacks(false);
884 while (!gc_mark_stack_->IsEmpty()) {
885 mirror::Object* to_ref = gc_mark_stack_->PopBack();
886 ProcessMarkStackRef(to_ref);
887 ++count;
888 }
889 gc_mark_stack_->Reset();
890 } else if (mark_stack_mode == kMarkStackModeShared) {
891 // Process the shared GC mark stack with a lock.
892 {
893 MutexLock mu(self, mark_stack_lock_);
894 CHECK(revoked_mark_stacks_.empty());
895 }
896 while (true) {
897 std::vector<mirror::Object*> refs;
898 {
899 // Copy refs with lock. Note the number of refs should be small.
900 MutexLock mu(self, mark_stack_lock_);
901 if (gc_mark_stack_->IsEmpty()) {
902 break;
903 }
904 for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin();
905 p != gc_mark_stack_->End(); ++p) {
906 refs.push_back(p->AsMirrorPtr());
907 }
908 gc_mark_stack_->Reset();
909 }
910 for (mirror::Object* ref : refs) {
911 ProcessMarkStackRef(ref);
912 ++count;
913 }
914 }
915 } else {
916 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
917 static_cast<uint32_t>(kMarkStackModeGcExclusive));
918 {
919 MutexLock mu(self, mark_stack_lock_);
920 CHECK(revoked_mark_stacks_.empty());
921 }
922 // Process the GC mark stack in the exclusive mode. No need to take the lock.
923 while (!gc_mark_stack_->IsEmpty()) {
924 mirror::Object* to_ref = gc_mark_stack_->PopBack();
925 ProcessMarkStackRef(to_ref);
926 ++count;
927 }
928 gc_mark_stack_->Reset();
929 }
930
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800931 // Return true if the stack was empty.
932 return count == 0;
933}
934
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700935size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access) {
936 // Run a checkpoint to collect all thread local mark stacks and iterate over them all.
937 RevokeThreadLocalMarkStacks(disable_weak_ref_access);
938 size_t count = 0;
939 std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks;
940 {
941 MutexLock mu(Thread::Current(), mark_stack_lock_);
942 // Make a copy of the mark stack vector.
943 mark_stacks = revoked_mark_stacks_;
944 revoked_mark_stacks_.clear();
945 }
946 for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) {
947 for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) {
948 mirror::Object* to_ref = p->AsMirrorPtr();
949 ProcessMarkStackRef(to_ref);
950 ++count;
951 }
952 {
953 MutexLock mu(Thread::Current(), mark_stack_lock_);
954 if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) {
955 // The pool has enough. Delete it.
956 delete mark_stack;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800957 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700958 // Otherwise, put it into the pool for later reuse.
959 mark_stack->Reset();
960 pooled_mark_stacks_.push_back(mark_stack);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800961 }
962 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700963 }
964 return count;
965}
966
967void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
968 DCHECK(!region_space_->IsInFromSpace(to_ref));
969 if (kUseBakerReadBarrier) {
970 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
971 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
972 << " is_marked=" << IsMarked(to_ref);
973 }
974 // Scan ref fields.
975 Scan(to_ref);
976 // Mark the gray ref as white or black.
977 if (kUseBakerReadBarrier) {
978 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
979 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
980 << " is_marked=" << IsMarked(to_ref);
981 }
982 if (to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
983 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
984 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())) {
985 // Leave References gray so that GetReferent() will trigger RB.
986 CHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref;
987 } else {
988#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
989 if (kUseBakerReadBarrier) {
990 if (region_space_->IsInToSpace(to_ref)) {
991 // If to-space, change from gray to white.
992 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
993 ReadBarrier::WhitePtr());
994 CHECK(success) << "Must succeed as we won the race.";
995 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
996 } else {
997 // If non-moving space/unevac from space, change from gray
998 // to black. We can't change gray to white because it's not
999 // safe to use CAS if two threads change values in opposite
1000 // directions (A->B and B->A). So, we change it to black to
1001 // indicate non-moving objects that have been marked
1002 // through. Note we'd need to change from black to white
1003 // later (concurrently).
1004 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
1005 ReadBarrier::BlackPtr());
1006 CHECK(success) << "Must succeed as we won the race.";
1007 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
1008 }
1009 }
1010#else
1011 DCHECK(!kUseBakerReadBarrier);
1012#endif
1013 }
1014 if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
1015 ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this);
1016 visitor(to_ref);
1017 }
1018}
1019
1020void ConcurrentCopying::SwitchToSharedMarkStackMode() {
1021 Thread* self = Thread::Current();
1022 CHECK(thread_running_gc_ != nullptr);
1023 CHECK_EQ(self, thread_running_gc_);
1024 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1025 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1026 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1027 static_cast<uint32_t>(kMarkStackModeThreadLocal));
1028 mark_stack_mode_.StoreRelaxed(kMarkStackModeShared);
1029 CHECK(weak_ref_access_enabled_.LoadRelaxed());
1030 weak_ref_access_enabled_.StoreRelaxed(false);
1031 QuasiAtomic::ThreadFenceForConstructor();
1032 // Process the thread local mark stacks one last time after switching to the shared mark stack
1033 // mode and disable weak ref accesses.
1034 ProcessThreadLocalMarkStacks(true);
1035 if (kVerboseMode) {
1036 LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
1037 }
1038}
1039
1040void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() {
1041 Thread* self = Thread::Current();
1042 CHECK(thread_running_gc_ != nullptr);
1043 CHECK_EQ(self, thread_running_gc_);
1044 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1045 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1046 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1047 static_cast<uint32_t>(kMarkStackModeShared));
1048 mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive);
1049 QuasiAtomic::ThreadFenceForConstructor();
1050 if (kVerboseMode) {
1051 LOG(INFO) << "Switched to GC exclusive mark stack mode";
1052 }
1053}
1054
1055void ConcurrentCopying::CheckEmptyMarkStack() {
1056 Thread* self = Thread::Current();
1057 CHECK(thread_running_gc_ != nullptr);
1058 CHECK_EQ(self, thread_running_gc_);
1059 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1060 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1061 if (mark_stack_mode == kMarkStackModeThreadLocal) {
1062 // Thread-local mark stack mode.
1063 RevokeThreadLocalMarkStacks(false);
1064 MutexLock mu(Thread::Current(), mark_stack_lock_);
1065 if (!revoked_mark_stacks_.empty()) {
1066 for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) {
1067 while (!mark_stack->IsEmpty()) {
1068 mirror::Object* obj = mark_stack->PopBack();
1069 if (kUseBakerReadBarrier) {
1070 mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
1071 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr
1072 << " is_marked=" << IsMarked(obj);
1073 } else {
1074 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj)
1075 << " is_marked=" << IsMarked(obj);
1076 }
1077 }
1078 }
1079 LOG(FATAL) << "mark stack is not empty";
1080 }
1081 } else {
1082 // Shared, GC-exclusive, or off.
1083 MutexLock mu(Thread::Current(), mark_stack_lock_);
1084 CHECK(gc_mark_stack_->IsEmpty());
1085 CHECK(revoked_mark_stacks_.empty());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001086 }
1087}
1088
1089void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
1090 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
1091 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier97509952015-07-13 14:35:43 -07001092 Runtime::Current()->SweepSystemWeaks(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001093}
1094
1095void ConcurrentCopying::Sweep(bool swap_bitmaps) {
1096 {
1097 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
1098 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1099 if (kEnableFromSpaceAccountingCheck) {
1100 CHECK_GE(live_stack_freeze_size_, live_stack->Size());
1101 }
1102 heap_->MarkAllocStackAsLive(live_stack);
1103 live_stack->Reset();
1104 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001105 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001106 TimingLogger::ScopedTiming split("Sweep", GetTimings());
1107 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1108 if (space->IsContinuousMemMapAllocSpace()) {
1109 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1110 if (space == region_space_ || immune_region_.ContainsSpace(space)) {
1111 continue;
1112 }
1113 TimingLogger::ScopedTiming split2(
1114 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
1115 RecordFree(alloc_space->Sweep(swap_bitmaps));
1116 }
1117 }
1118 SweepLargeObjects(swap_bitmaps);
1119}
1120
1121void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) {
1122 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
1123 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
1124}
1125
1126class ConcurrentCopyingClearBlackPtrsVisitor {
1127 public:
1128 explicit ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying* cc)
1129 : collector_(cc) {}
Andreas Gampe65b798e2015-04-06 09:35:22 -07001130#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
1131 NO_RETURN
1132#endif
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001133 void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1134 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1135 DCHECK(obj != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001136 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj;
1137 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj;
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07001138 obj->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr());
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001139 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001140 }
1141
1142 private:
1143 ConcurrentCopying* const collector_;
1144};
1145
1146// Clear the black ptrs in non-moving objects back to white.
1147void ConcurrentCopying::ClearBlackPtrs() {
1148 CHECK(kUseBakerReadBarrier);
1149 TimingLogger::ScopedTiming split("ClearBlackPtrs", GetTimings());
1150 ConcurrentCopyingClearBlackPtrsVisitor visitor(this);
1151 for (auto& space : heap_->GetContinuousSpaces()) {
1152 if (space == region_space_) {
1153 continue;
1154 }
1155 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1156 if (kVerboseMode) {
1157 LOG(INFO) << "ClearBlackPtrs: " << *space << " bitmap: " << *mark_bitmap;
1158 }
1159 mark_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
1160 reinterpret_cast<uintptr_t>(space->Limit()),
1161 visitor);
1162 }
1163 space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace();
1164 large_object_space->GetMarkBitmap()->VisitMarkedRange(
1165 reinterpret_cast<uintptr_t>(large_object_space->Begin()),
1166 reinterpret_cast<uintptr_t>(large_object_space->End()),
1167 visitor);
1168 // Objects on the allocation stack?
1169 if (ReadBarrier::kEnableReadBarrierInvariantChecks || kIsDebugBuild) {
1170 size_t count = GetAllocationStack()->Size();
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001171 auto* it = GetAllocationStack()->Begin();
1172 auto* end = GetAllocationStack()->End();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001173 for (size_t i = 0; i < count; ++i, ++it) {
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001174 CHECK_LT(it, end);
1175 mirror::Object* obj = it->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001176 if (obj != nullptr) {
1177 // Must have been cleared above.
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001178 CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001179 }
1180 }
1181 }
1182}
1183
1184void ConcurrentCopying::ReclaimPhase() {
1185 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
1186 if (kVerboseMode) {
1187 LOG(INFO) << "GC ReclaimPhase";
1188 }
1189 Thread* self = Thread::Current();
1190
1191 {
1192 // Double-check that the mark stack is empty.
1193 // Note: need to set this after VerifyNoFromSpaceRef().
1194 is_asserting_to_space_invariant_ = false;
1195 QuasiAtomic::ThreadFenceForConstructor();
1196 if (kVerboseMode) {
1197 LOG(INFO) << "Issue an empty check point. ";
1198 }
1199 IssueEmptyCheckpoint();
1200 // Disable the check.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001201 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0);
1202 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001203 }
1204
1205 {
1206 // Record freed objects.
1207 TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
1208 // Don't include thread-locals that are in the to-space.
1209 uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
1210 uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
1211 uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
1212 uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
1213 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
1214 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
1215 if (kEnableFromSpaceAccountingCheck) {
1216 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
1217 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
1218 }
1219 CHECK_LE(to_objects, from_objects);
1220 CHECK_LE(to_bytes, from_bytes);
1221 int64_t freed_bytes = from_bytes - to_bytes;
1222 int64_t freed_objects = from_objects - to_objects;
1223 if (kVerboseMode) {
1224 LOG(INFO) << "RecordFree:"
1225 << " from_bytes=" << from_bytes << " from_objects=" << from_objects
1226 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects
1227 << " to_bytes=" << to_bytes << " to_objects=" << to_objects
1228 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects
1229 << " from_space size=" << region_space_->FromSpaceSize()
1230 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
1231 << " to_space size=" << region_space_->ToSpaceSize();
1232 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1233 }
1234 RecordFree(ObjectBytePair(freed_objects, freed_bytes));
1235 if (kVerboseMode) {
1236 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1237 }
1238 }
1239
1240 {
1241 TimingLogger::ScopedTiming split3("ComputeUnevacFromSpaceLiveRatio", GetTimings());
1242 ComputeUnevacFromSpaceLiveRatio();
1243 }
1244
1245 {
1246 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
1247 region_space_->ClearFromSpace();
1248 }
1249
1250 {
1251 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1252 if (kUseBakerReadBarrier) {
1253 ClearBlackPtrs();
1254 }
1255 Sweep(false);
1256 SwapBitmaps();
1257 heap_->UnBindBitmaps();
1258
1259 // Remove bitmaps for the immune spaces.
1260 while (!cc_bitmaps_.empty()) {
1261 accounting::ContinuousSpaceBitmap* cc_bitmap = cc_bitmaps_.back();
1262 cc_heap_bitmap_->RemoveContinuousSpaceBitmap(cc_bitmap);
1263 delete cc_bitmap;
1264 cc_bitmaps_.pop_back();
1265 }
1266 region_space_bitmap_ = nullptr;
1267 }
1268
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001269 CheckEmptyMarkStack();
1270
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001271 if (kVerboseMode) {
1272 LOG(INFO) << "GC end of ReclaimPhase";
1273 }
1274}
1275
1276class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor {
1277 public:
1278 explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc)
1279 : collector_(cc) {}
1280 void operator()(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1281 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1282 DCHECK(ref != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001283 DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref;
1284 DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001285 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001286 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001287 // Clear the black ptr.
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07001288 ref->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr());
1289 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001290 }
1291 size_t obj_size = ref->SizeOf();
1292 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1293 collector_->region_space_->AddLiveBytes(ref, alloc_size);
1294 }
1295
1296 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001297 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001298};
1299
1300// Compute how much live objects are left in regions.
1301void ConcurrentCopying::ComputeUnevacFromSpaceLiveRatio() {
1302 region_space_->AssertAllRegionLiveBytesZeroOrCleared();
1303 ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor visitor(this);
1304 region_space_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(region_space_->Begin()),
1305 reinterpret_cast<uintptr_t>(region_space_->Limit()),
1306 visitor);
1307}
1308
1309// Assert the to-space invariant.
1310void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
1311 mirror::Object* ref) {
1312 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1313 if (is_asserting_to_space_invariant_) {
1314 if (region_space_->IsInToSpace(ref)) {
1315 // OK.
1316 return;
1317 } else if (region_space_->IsInUnevacFromSpace(ref)) {
1318 CHECK(region_space_bitmap_->Test(ref)) << ref;
1319 } else if (region_space_->IsInFromSpace(ref)) {
1320 // Not OK. Do extra logging.
1321 if (obj != nullptr) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001322 LogFromSpaceRefHolder(obj, offset);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001323 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001324 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001325 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1326 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001327 AssertToSpaceInvariantInNonMovingSpace(obj, ref);
1328 }
1329 }
1330}
1331
1332class RootPrinter {
1333 public:
1334 RootPrinter() { }
1335
1336 template <class MirrorType>
1337 ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
1338 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1339 if (!root->IsNull()) {
1340 VisitRoot(root);
1341 }
1342 }
1343
1344 template <class MirrorType>
1345 void VisitRoot(mirror::Object** root)
1346 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1347 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root;
1348 }
1349
1350 template <class MirrorType>
1351 void VisitRoot(mirror::CompressedReference<MirrorType>* root)
1352 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1353 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr();
1354 }
1355};
1356
1357void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source,
1358 mirror::Object* ref) {
1359 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1360 if (is_asserting_to_space_invariant_) {
1361 if (region_space_->IsInToSpace(ref)) {
1362 // OK.
1363 return;
1364 } else if (region_space_->IsInUnevacFromSpace(ref)) {
1365 CHECK(region_space_bitmap_->Test(ref)) << ref;
1366 } else if (region_space_->IsInFromSpace(ref)) {
1367 // Not OK. Do extra logging.
1368 if (gc_root_source == nullptr) {
1369 // No info.
1370 } else if (gc_root_source->HasArtField()) {
1371 ArtField* field = gc_root_source->GetArtField();
1372 LOG(INTERNAL_FATAL) << "gc root in field " << field << " " << PrettyField(field);
1373 RootPrinter root_printer;
1374 field->VisitRoots(root_printer);
1375 } else if (gc_root_source->HasArtMethod()) {
1376 ArtMethod* method = gc_root_source->GetArtMethod();
1377 LOG(INTERNAL_FATAL) << "gc root in method " << method << " " << PrettyMethod(method);
1378 RootPrinter root_printer;
1379 method->VisitRoots(root_printer);
1380 }
1381 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
1382 region_space_->DumpNonFreeRegions(LOG(INTERNAL_FATAL));
1383 PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
1384 MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
1385 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1386 } else {
1387 AssertToSpaceInvariantInNonMovingSpace(nullptr, ref);
1388 }
1389 }
1390}
1391
1392void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) {
1393 if (kUseBakerReadBarrier) {
1394 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj)
1395 << " holder rb_ptr=" << obj->GetReadBarrierPointer();
1396 } else {
1397 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj);
1398 }
1399 if (region_space_->IsInFromSpace(obj)) {
1400 LOG(INFO) << "holder is in the from-space.";
1401 } else if (region_space_->IsInToSpace(obj)) {
1402 LOG(INFO) << "holder is in the to-space.";
1403 } else if (region_space_->IsInUnevacFromSpace(obj)) {
1404 LOG(INFO) << "holder is in the unevac from-space.";
1405 if (region_space_bitmap_->Test(obj)) {
1406 LOG(INFO) << "holder is marked in the region space bitmap.";
1407 } else {
1408 LOG(INFO) << "holder is not marked in the region space bitmap.";
1409 }
1410 } else {
1411 // In a non-moving space.
1412 if (immune_region_.ContainsObject(obj)) {
1413 LOG(INFO) << "holder is in the image or the zygote space.";
1414 accounting::ContinuousSpaceBitmap* cc_bitmap =
1415 cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
1416 CHECK(cc_bitmap != nullptr)
1417 << "An immune space object must have a bitmap.";
1418 if (cc_bitmap->Test(obj)) {
1419 LOG(INFO) << "holder is marked in the bit map.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001420 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001421 LOG(INFO) << "holder is NOT marked in the bit map.";
1422 }
1423 } else {
1424 LOG(INFO) << "holder is in a non-moving (or main) space.";
1425 accounting::ContinuousSpaceBitmap* mark_bitmap =
1426 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
1427 accounting::LargeObjectBitmap* los_bitmap =
1428 heap_mark_bitmap_->GetLargeObjectBitmap(obj);
1429 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1430 bool is_los = mark_bitmap == nullptr;
1431 if (!is_los && mark_bitmap->Test(obj)) {
1432 LOG(INFO) << "holder is marked in the mark bit map.";
1433 } else if (is_los && los_bitmap->Test(obj)) {
1434 LOG(INFO) << "holder is marked in the los bit map.";
1435 } else {
1436 // If ref is on the allocation stack, then it is considered
1437 // mark/alive (but not necessarily on the live stack.)
1438 if (IsOnAllocStack(obj)) {
1439 LOG(INFO) << "holder is on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001440 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001441 LOG(INFO) << "holder is not marked or on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001442 }
1443 }
1444 }
1445 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001446 LOG(INFO) << "offset=" << offset.SizeValue();
1447}
1448
1449void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj,
1450 mirror::Object* ref) {
1451 // In a non-moving spaces. Check that the ref is marked.
1452 if (immune_region_.ContainsObject(ref)) {
1453 accounting::ContinuousSpaceBitmap* cc_bitmap =
1454 cc_heap_bitmap_->GetContinuousSpaceBitmap(ref);
1455 CHECK(cc_bitmap != nullptr)
1456 << "An immune space ref must have a bitmap. " << ref;
1457 if (kUseBakerReadBarrier) {
1458 CHECK(cc_bitmap->Test(ref))
1459 << "Unmarked immune space ref. obj=" << obj << " rb_ptr="
1460 << obj->GetReadBarrierPointer() << " ref=" << ref;
1461 } else {
1462 CHECK(cc_bitmap->Test(ref))
1463 << "Unmarked immune space ref. obj=" << obj << " ref=" << ref;
1464 }
1465 } else {
1466 accounting::ContinuousSpaceBitmap* mark_bitmap =
1467 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
1468 accounting::LargeObjectBitmap* los_bitmap =
1469 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
1470 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1471 bool is_los = mark_bitmap == nullptr;
1472 if ((!is_los && mark_bitmap->Test(ref)) ||
1473 (is_los && los_bitmap->Test(ref))) {
1474 // OK.
1475 } else {
1476 // If ref is on the allocation stack, then it may not be
1477 // marked live, but considered marked/alive (but not
1478 // necessarily on the live stack).
1479 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. "
1480 << "obj=" << obj << " ref=" << ref;
1481 }
1482 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001483}
1484
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001485// Used to scan ref fields of an object.
1486class ConcurrentCopyingRefFieldsVisitor {
1487 public:
1488 explicit ConcurrentCopyingRefFieldsVisitor(ConcurrentCopying* collector)
1489 : collector_(collector) {}
1490
1491 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
1492 const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1493 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1494 collector_->Process(obj, offset);
1495 }
1496
1497 void operator()(mirror::Class* klass, mirror::Reference* ref) const
1498 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
1499 CHECK(klass->IsTypeOfReferenceClass());
1500 collector_->DelayReferenceReferent(klass, ref);
1501 }
1502
1503 private:
1504 ConcurrentCopying* const collector_;
1505};
1506
1507// Scan ref fields of an object.
1508void ConcurrentCopying::Scan(mirror::Object* to_ref) {
1509 DCHECK(!region_space_->IsInFromSpace(to_ref));
1510 ConcurrentCopyingRefFieldsVisitor visitor(this);
1511 to_ref->VisitReferences<true>(visitor, visitor);
1512}
1513
1514// Process a field.
1515inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
1516 mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
1517 if (ref == nullptr || region_space_->IsInToSpace(ref)) {
1518 return;
1519 }
1520 mirror::Object* to_ref = Mark(ref);
1521 if (to_ref == ref) {
1522 return;
1523 }
1524 // This may fail if the mutator writes to the field at the same time. But it's ok.
1525 mirror::Object* expected_ref = ref;
1526 mirror::Object* new_ref = to_ref;
1527 do {
1528 if (expected_ref !=
1529 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) {
1530 // It was updated by the mutator.
1531 break;
1532 }
1533 } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<false, false, kVerifyNone>(
1534 offset, expected_ref, new_ref));
1535}
1536
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001537// Process some roots.
1538void ConcurrentCopying::VisitRoots(
1539 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
1540 for (size_t i = 0; i < count; ++i) {
1541 mirror::Object** root = roots[i];
1542 mirror::Object* ref = *root;
1543 if (ref == nullptr || region_space_->IsInToSpace(ref)) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -07001544 continue;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001545 }
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001546 mirror::Object* to_ref = Mark(ref);
1547 if (to_ref == ref) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -07001548 continue;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001549 }
1550 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
1551 mirror::Object* expected_ref = ref;
1552 mirror::Object* new_ref = to_ref;
1553 do {
1554 if (expected_ref != addr->LoadRelaxed()) {
1555 // It was updated by the mutator.
1556 break;
1557 }
1558 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
1559 }
1560}
1561
1562void ConcurrentCopying::VisitRoots(
1563 mirror::CompressedReference<mirror::Object>** roots, size_t count,
1564 const RootInfo& info ATTRIBUTE_UNUSED) {
1565 for (size_t i = 0; i < count; ++i) {
1566 mirror::CompressedReference<mirror::Object>* root = roots[i];
1567 mirror::Object* ref = root->AsMirrorPtr();
1568 if (ref == nullptr || region_space_->IsInToSpace(ref)) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -07001569 continue;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001570 }
1571 mirror::Object* to_ref = Mark(ref);
1572 if (to_ref == ref) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -07001573 continue;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001574 }
1575 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
1576 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
1577 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
1578 do {
1579 if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
1580 // It was updated by the mutator.
1581 break;
1582 }
1583 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
1584 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001585}
1586
1587// Fill the given memory block with a dummy object. Used to fill in a
1588// copy of objects that was lost in race.
1589void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
1590 CHECK(IsAligned<kObjectAlignment>(byte_size));
1591 memset(dummy_obj, 0, byte_size);
1592 mirror::Class* int_array_class = mirror::IntArray::GetArrayClass();
1593 CHECK(int_array_class != nullptr);
1594 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
1595 size_t component_size = int_array_class->GetComponentSize();
1596 CHECK_EQ(component_size, sizeof(int32_t));
1597 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
1598 if (data_offset > byte_size) {
1599 // An int array is too big. Use java.lang.Object.
1600 mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object);
1601 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object);
1602 CHECK_EQ(byte_size, java_lang_Object->GetObjectSize());
1603 dummy_obj->SetClass(java_lang_Object);
1604 CHECK_EQ(byte_size, dummy_obj->SizeOf());
1605 } else {
1606 // Use an int array.
1607 dummy_obj->SetClass(int_array_class);
1608 CHECK(dummy_obj->IsArrayInstance());
1609 int32_t length = (byte_size - data_offset) / component_size;
1610 dummy_obj->AsArray()->SetLength(length);
1611 CHECK_EQ(dummy_obj->AsArray()->GetLength(), length)
1612 << "byte_size=" << byte_size << " length=" << length
1613 << " component_size=" << component_size << " data_offset=" << data_offset;
1614 CHECK_EQ(byte_size, dummy_obj->SizeOf())
1615 << "byte_size=" << byte_size << " length=" << length
1616 << " component_size=" << component_size << " data_offset=" << data_offset;
1617 }
1618}
1619
1620// Reuse the memory blocks that were copy of objects that were lost in race.
1621mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
1622 // Try to reuse the blocks that were unused due to CAS failures.
1623 CHECK(IsAligned<space::RegionSpace::kAlignment>(alloc_size));
1624 Thread* self = Thread::Current();
1625 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
1626 MutexLock mu(self, skipped_blocks_lock_);
1627 auto it = skipped_blocks_map_.lower_bound(alloc_size);
1628 if (it == skipped_blocks_map_.end()) {
1629 // Not found.
1630 return nullptr;
1631 }
1632 {
1633 size_t byte_size = it->first;
1634 CHECK_GE(byte_size, alloc_size);
1635 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
1636 // If remainder would be too small for a dummy object, retry with a larger request size.
1637 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size);
1638 if (it == skipped_blocks_map_.end()) {
1639 // Not found.
1640 return nullptr;
1641 }
1642 CHECK(IsAligned<space::RegionSpace::kAlignment>(it->first - alloc_size));
1643 CHECK_GE(it->first - alloc_size, min_object_size)
1644 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
1645 }
1646 }
1647 // Found a block.
1648 CHECK(it != skipped_blocks_map_.end());
1649 size_t byte_size = it->first;
1650 uint8_t* addr = it->second;
1651 CHECK_GE(byte_size, alloc_size);
1652 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
1653 CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size));
1654 if (kVerboseMode) {
1655 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
1656 }
1657 skipped_blocks_map_.erase(it);
1658 memset(addr, 0, byte_size);
1659 if (byte_size > alloc_size) {
1660 // Return the remainder to the map.
1661 CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size - alloc_size));
1662 CHECK_GE(byte_size - alloc_size, min_object_size);
1663 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
1664 byte_size - alloc_size);
1665 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
1666 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
1667 }
1668 return reinterpret_cast<mirror::Object*>(addr);
1669}
1670
1671mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) {
1672 DCHECK(region_space_->IsInFromSpace(from_ref));
1673 // No read barrier to avoid nested RB that might violate the to-space
1674 // invariant. Note that from_ref is a from space ref so the SizeOf()
1675 // call will access the from-space meta objects, but it's ok and necessary.
1676 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
1677 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1678 size_t region_space_bytes_allocated = 0U;
1679 size_t non_moving_space_bytes_allocated = 0U;
1680 size_t bytes_allocated = 0U;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001681 size_t dummy;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001682 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001683 region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001684 bytes_allocated = region_space_bytes_allocated;
1685 if (to_ref != nullptr) {
1686 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
1687 }
1688 bool fall_back_to_non_moving = false;
1689 if (UNLIKELY(to_ref == nullptr)) {
1690 // Failed to allocate in the region space. Try the skipped blocks.
1691 to_ref = AllocateInSkippedBlock(region_space_alloc_size);
1692 if (to_ref != nullptr) {
1693 // Succeeded to allocate in a skipped block.
1694 if (heap_->use_tlab_) {
1695 // This is necessary for the tlab case as it's not accounted in the space.
1696 region_space_->RecordAlloc(to_ref);
1697 }
1698 bytes_allocated = region_space_alloc_size;
1699 } else {
1700 // Fall back to the non-moving space.
1701 fall_back_to_non_moving = true;
1702 if (kVerboseMode) {
1703 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
1704 << to_space_bytes_skipped_.LoadSequentiallyConsistent()
1705 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent();
1706 }
1707 fall_back_to_non_moving = true;
1708 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001709 &non_moving_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001710 CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed";
1711 bytes_allocated = non_moving_space_bytes_allocated;
1712 // Mark it in the mark bitmap.
1713 accounting::ContinuousSpaceBitmap* mark_bitmap =
1714 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1715 CHECK(mark_bitmap != nullptr);
1716 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref));
1717 }
1718 }
1719 DCHECK(to_ref != nullptr);
1720
1721 // Attempt to install the forward pointer. This is in a loop as the
1722 // lock word atomic write can fail.
1723 while (true) {
1724 // Copy the object. TODO: copy only the lockword in the second iteration and on?
1725 memcpy(to_ref, from_ref, obj_size);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001726
1727 LockWord old_lock_word = to_ref->GetLockWord(false);
1728
1729 if (old_lock_word.GetState() == LockWord::kForwardingAddress) {
1730 // Lost the race. Another thread (either GC or mutator) stored
1731 // the forwarding pointer first. Make the lost copy (to_ref)
1732 // look like a valid but dead (dummy) object and keep it for
1733 // future reuse.
1734 FillWithDummyObject(to_ref, bytes_allocated);
1735 if (!fall_back_to_non_moving) {
1736 DCHECK(region_space_->IsInToSpace(to_ref));
1737 if (bytes_allocated > space::RegionSpace::kRegionSize) {
1738 // Free the large alloc.
1739 region_space_->FreeLarge(to_ref, bytes_allocated);
1740 } else {
1741 // Record the lost copy for later reuse.
1742 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
1743 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated);
1744 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1);
1745 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
1746 skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
1747 reinterpret_cast<uint8_t*>(to_ref)));
1748 }
1749 } else {
1750 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
1751 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
1752 // Free the non-moving-space chunk.
1753 accounting::ContinuousSpaceBitmap* mark_bitmap =
1754 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1755 CHECK(mark_bitmap != nullptr);
1756 CHECK(mark_bitmap->Clear(to_ref));
1757 heap_->non_moving_space_->Free(Thread::Current(), to_ref);
1758 }
1759
1760 // Get the winner's forward ptr.
1761 mirror::Object* lost_fwd_ptr = to_ref;
1762 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress());
1763 CHECK(to_ref != nullptr);
1764 CHECK_NE(to_ref, lost_fwd_ptr);
1765 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref));
1766 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
1767 return to_ref;
1768 }
1769
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07001770 // Set the gray ptr.
1771 if (kUseBakerReadBarrier) {
1772 to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr());
1773 }
1774
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001775 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
1776
1777 // Try to atomically write the fwd ptr.
1778 bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word);
1779 if (LIKELY(success)) {
1780 // The CAS succeeded.
1781 objects_moved_.FetchAndAddSequentiallyConsistent(1);
1782 bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size);
1783 if (LIKELY(!fall_back_to_non_moving)) {
1784 DCHECK(region_space_->IsInToSpace(to_ref));
1785 } else {
1786 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
1787 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
1788 }
1789 if (kUseBakerReadBarrier) {
1790 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1791 }
1792 DCHECK(GetFwdPtr(from_ref) == to_ref);
1793 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001794 PushOntoMarkStack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001795 return to_ref;
1796 } else {
1797 // The CAS failed. It may have lost the race or may have failed
1798 // due to monitor/hashcode ops. Either way, retry.
1799 }
1800 }
1801}
1802
1803mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
1804 DCHECK(from_ref != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001805 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
1806 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001807 // It's already marked.
1808 return from_ref;
1809 }
1810 mirror::Object* to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001811 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001812 to_ref = GetFwdPtr(from_ref);
1813 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) ||
1814 heap_->non_moving_space_->HasAddress(to_ref))
1815 << "from_ref=" << from_ref << " to_ref=" << to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001816 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001817 if (region_space_bitmap_->Test(from_ref)) {
1818 to_ref = from_ref;
1819 } else {
1820 to_ref = nullptr;
1821 }
1822 } else {
1823 // from_ref is in a non-moving space.
1824 if (immune_region_.ContainsObject(from_ref)) {
1825 accounting::ContinuousSpaceBitmap* cc_bitmap =
1826 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref);
1827 DCHECK(cc_bitmap != nullptr)
1828 << "An immune space object must have a bitmap";
1829 if (kIsDebugBuild) {
1830 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref))
1831 << "Immune space object must be already marked";
1832 }
1833 if (cc_bitmap->Test(from_ref)) {
1834 // Already marked.
1835 to_ref = from_ref;
1836 } else {
1837 // Newly marked.
1838 to_ref = nullptr;
1839 }
1840 } else {
1841 // Non-immune non-moving space. Use the mark bitmap.
1842 accounting::ContinuousSpaceBitmap* mark_bitmap =
1843 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
1844 accounting::LargeObjectBitmap* los_bitmap =
1845 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
1846 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1847 bool is_los = mark_bitmap == nullptr;
1848 if (!is_los && mark_bitmap->Test(from_ref)) {
1849 // Already marked.
1850 to_ref = from_ref;
1851 } else if (is_los && los_bitmap->Test(from_ref)) {
1852 // Already marked in LOS.
1853 to_ref = from_ref;
1854 } else {
1855 // Not marked.
1856 if (IsOnAllocStack(from_ref)) {
1857 // If on the allocation stack, it's considered marked.
1858 to_ref = from_ref;
1859 } else {
1860 // Not marked.
1861 to_ref = nullptr;
1862 }
1863 }
1864 }
1865 }
1866 return to_ref;
1867}
1868
1869bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
1870 QuasiAtomic::ThreadFenceAcquire();
1871 accounting::ObjectStack* alloc_stack = GetAllocationStack();
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001872 return alloc_stack->Contains(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001873}
1874
1875mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) {
1876 if (from_ref == nullptr) {
1877 return nullptr;
1878 }
1879 DCHECK(from_ref != nullptr);
1880 DCHECK(heap_->collector_type_ == kCollectorTypeCC);
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07001881 if (kUseBakerReadBarrier && !is_active_) {
1882 // In the lock word forward address state, the read barrier bits
1883 // in the lock word are part of the stored forwarding address and
1884 // invalid. This is usually OK as the from-space copy of objects
1885 // aren't accessed by mutators due to the to-space
1886 // invariant. However, during the dex2oat image writing relocation
1887 // and the zygote compaction, objects can be in the forward
1888 // address state (to store the forward/relocation addresses) and
1889 // they can still be accessed and the invalid read barrier bits
1890 // are consulted. If they look like gray but aren't really, the
1891 // read barriers slow path can trigger when it shouldn't. To guard
1892 // against this, return here if the CC collector isn't running.
1893 return from_ref;
1894 }
1895 DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?";
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001896 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
1897 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001898 // It's already marked.
1899 return from_ref;
1900 }
1901 mirror::Object* to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001902 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001903 to_ref = GetFwdPtr(from_ref);
1904 if (kUseBakerReadBarrier) {
1905 DCHECK(to_ref != ReadBarrier::GrayPtr()) << "from_ref=" << from_ref << " to_ref=" << to_ref;
1906 }
1907 if (to_ref == nullptr) {
1908 // It isn't marked yet. Mark it by copying it to the to-space.
1909 to_ref = Copy(from_ref);
1910 }
1911 DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
1912 << "from_ref=" << from_ref << " to_ref=" << to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001913 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001914 // This may or may not succeed, which is ok.
1915 if (kUseBakerReadBarrier) {
1916 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
1917 }
1918 if (region_space_bitmap_->AtomicTestAndSet(from_ref)) {
1919 // Already marked.
1920 to_ref = from_ref;
1921 } else {
1922 // Newly marked.
1923 to_ref = from_ref;
1924 if (kUseBakerReadBarrier) {
1925 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1926 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001927 PushOntoMarkStack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001928 }
1929 } else {
1930 // from_ref is in a non-moving space.
1931 DCHECK(!region_space_->HasAddress(from_ref)) << from_ref;
1932 if (immune_region_.ContainsObject(from_ref)) {
1933 accounting::ContinuousSpaceBitmap* cc_bitmap =
1934 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref);
1935 DCHECK(cc_bitmap != nullptr)
1936 << "An immune space object must have a bitmap";
1937 if (kIsDebugBuild) {
1938 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref))
1939 << "Immune space object must be already marked";
1940 }
1941 // This may or may not succeed, which is ok.
1942 if (kUseBakerReadBarrier) {
1943 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
1944 }
1945 if (cc_bitmap->AtomicTestAndSet(from_ref)) {
1946 // Already marked.
1947 to_ref = from_ref;
1948 } else {
1949 // Newly marked.
1950 to_ref = from_ref;
1951 if (kUseBakerReadBarrier) {
1952 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1953 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001954 PushOntoMarkStack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001955 }
1956 } else {
1957 // Use the mark bitmap.
1958 accounting::ContinuousSpaceBitmap* mark_bitmap =
1959 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
1960 accounting::LargeObjectBitmap* los_bitmap =
1961 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
1962 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1963 bool is_los = mark_bitmap == nullptr;
1964 if (!is_los && mark_bitmap->Test(from_ref)) {
1965 // Already marked.
1966 to_ref = from_ref;
1967 if (kUseBakerReadBarrier) {
1968 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
1969 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
1970 }
1971 } else if (is_los && los_bitmap->Test(from_ref)) {
1972 // Already marked in LOS.
1973 to_ref = from_ref;
1974 if (kUseBakerReadBarrier) {
1975 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
1976 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
1977 }
1978 } else {
1979 // Not marked.
1980 if (IsOnAllocStack(from_ref)) {
1981 // If it's on the allocation stack, it's considered marked. Keep it white.
1982 to_ref = from_ref;
1983 // Objects on the allocation stack need not be marked.
1984 if (!is_los) {
1985 DCHECK(!mark_bitmap->Test(to_ref));
1986 } else {
1987 DCHECK(!los_bitmap->Test(to_ref));
1988 }
1989 if (kUseBakerReadBarrier) {
1990 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
1991 }
1992 } else {
1993 // Not marked or on the allocation stack. Try to mark it.
1994 // This may or may not succeed, which is ok.
1995 if (kUseBakerReadBarrier) {
1996 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
1997 }
1998 if (!is_los && mark_bitmap->AtomicTestAndSet(from_ref)) {
1999 // Already marked.
2000 to_ref = from_ref;
2001 } else if (is_los && los_bitmap->AtomicTestAndSet(from_ref)) {
2002 // Already marked in LOS.
2003 to_ref = from_ref;
2004 } else {
2005 // Newly marked.
2006 to_ref = from_ref;
2007 if (kUseBakerReadBarrier) {
2008 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
2009 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002010 PushOntoMarkStack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002011 }
2012 }
2013 }
2014 }
2015 }
2016 return to_ref;
2017}
2018
2019void ConcurrentCopying::FinishPhase() {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002020 {
2021 MutexLock mu(Thread::Current(), mark_stack_lock_);
2022 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
2023 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002024 region_space_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002025 {
2026 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
2027 skipped_blocks_map_.clear();
2028 }
2029 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2030 heap_->ClearMarkedObjects();
2031}
2032
Mathieu Chartier97509952015-07-13 14:35:43 -07002033bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002034 mirror::Object* from_ref = field->AsMirrorPtr();
Mathieu Chartier97509952015-07-13 14:35:43 -07002035 mirror::Object* to_ref = IsMarked(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002036 if (to_ref == nullptr) {
2037 return false;
2038 }
2039 if (from_ref != to_ref) {
2040 QuasiAtomic::ThreadFenceRelease();
2041 field->Assign(to_ref);
2042 QuasiAtomic::ThreadFenceSequentiallyConsistent();
2043 }
2044 return true;
2045}
2046
Mathieu Chartier97509952015-07-13 14:35:43 -07002047mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) {
2048 return Mark(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002049}
2050
2051void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
Mathieu Chartier97509952015-07-13 14:35:43 -07002052 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002053}
2054
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002055void ConcurrentCopying::ProcessReferences(Thread* self) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002056 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002057 // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002058 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2059 GetHeap()->GetReferenceProcessor()->ProcessReferences(
Mathieu Chartier97509952015-07-13 14:35:43 -07002060 true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002061}
2062
2063void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
2064 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
2065 region_space_->RevokeAllThreadLocalBuffers();
2066}
2067
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07002068} // namespace collector
2069} // namespace gc
2070} // namespace art