Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #ifndef ART_RUNTIME_GC_REFERENCE_QUEUE_H_ |
| 18 | #define ART_RUNTIME_GC_REFERENCE_QUEUE_H_ |
| 19 | |
| 20 | #include <iosfwd> |
| 21 | #include <string> |
| 22 | #include <vector> |
| 23 | |
David Sehr | c431b9d | 2018-03-02 12:01:51 -0800 | [diff] [blame] | 24 | #include "base/atomic.h" |
Andreas Gampe | 7fbc4a5 | 2018-11-28 08:26:47 -0800 | [diff] [blame] | 25 | #include "base/locks.h" |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 26 | #include "base/timing_logger.h" |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 27 | #include "jni.h" |
Mathieu Chartier | 5d3f73a | 2016-10-14 14:28:47 -0700 | [diff] [blame] | 28 | #include "obj_ptr.h" |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 29 | #include "offsets.h" |
Andreas Gampe | 5a0430d | 2019-01-04 14:33:57 -0800 | [diff] [blame] | 30 | #include "runtime_globals.h" |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 31 | #include "thread_pool.h" |
| 32 | |
| 33 | namespace art { |
Andreas Gampe | 7fbc4a5 | 2018-11-28 08:26:47 -0800 | [diff] [blame] | 34 | |
| 35 | class Mutex; |
| 36 | |
Mathieu Chartier | 8fa2dad | 2014-03-13 12:22:56 -0700 | [diff] [blame] | 37 | namespace mirror { |
| 38 | class Reference; |
| 39 | } // namespace mirror |
| 40 | |
Andreas Gampe | 5d08fcc | 2017-06-05 17:56:46 -0700 | [diff] [blame] | 41 | class IsMarkedVisitor; |
| 42 | class MarkObjectVisitor; |
| 43 | |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 44 | namespace gc { |
| 45 | |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 46 | namespace collector { |
| 47 | class GarbageCollector; |
| 48 | } // namespace collector |
| 49 | |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 50 | class Heap; |
| 51 | |
Hans Boehm | d7b4161 | 2021-06-17 18:31:14 -0700 | [diff] [blame] | 52 | struct FinalizerStats { |
| 53 | FinalizerStats(size_t num_refs, size_t num_enqueued) |
| 54 | : num_refs_(num_refs), num_enqueued_(num_enqueued) {} |
| 55 | const uint32_t num_refs_; |
| 56 | const uint32_t num_enqueued_; |
| 57 | }; |
| 58 | |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 59 | // Used to temporarily store java.lang.ref.Reference(s) during GC and prior to queueing on the |
Richard Uhler | c4695df | 2016-01-15 14:08:05 -0800 | [diff] [blame] | 60 | // appropriate java.lang.ref.ReferenceQueue. The linked list is maintained as an unordered, |
| 61 | // circular, and singly-linked list using the pendingNext fields of the java.lang.ref.Reference |
| 62 | // objects. |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 63 | class ReferenceQueue { |
| 64 | public: |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 65 | explicit ReferenceQueue(Mutex* lock); |
Ian Rogers | 6f3dbba | 2014-10-14 17:41:57 -0700 | [diff] [blame] | 66 | |
Richard Uhler | c4695df | 2016-01-15 14:08:05 -0800 | [diff] [blame] | 67 | // Enqueue a reference if it is unprocessed. Thread safe to call from multiple |
| 68 | // threads since it uses a lock to avoid a race between checking for the references presence and |
| 69 | // adding it. |
Mathieu Chartier | 5d3f73a | 2016-10-14 14:28:47 -0700 | [diff] [blame] | 70 | void AtomicEnqueueIfNotEnqueued(Thread* self, ObjPtr<mirror::Reference> ref) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 71 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*lock_); |
Ian Rogers | 6f3dbba | 2014-10-14 17:41:57 -0700 | [diff] [blame] | 72 | |
Richard Uhler | c4695df | 2016-01-15 14:08:05 -0800 | [diff] [blame] | 73 | // Enqueue a reference. The reference must be unprocessed. |
| 74 | // Not thread safe, used when mutators are paused to minimize lock overhead. |
Mathieu Chartier | 5d3f73a | 2016-10-14 14:28:47 -0700 | [diff] [blame] | 75 | void EnqueueReference(ObjPtr<mirror::Reference> ref) REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 6f3dbba | 2014-10-14 17:41:57 -0700 | [diff] [blame] | 76 | |
Richard Uhler | c4695df | 2016-01-15 14:08:05 -0800 | [diff] [blame] | 77 | // Dequeue a reference from the queue and return that dequeued reference. |
Hiroshi Yamauchi | 057d977 | 2017-02-17 15:33:23 -0800 | [diff] [blame] | 78 | // Call DisableReadBarrierForReference for the reference that's returned from this function. |
Mathieu Chartier | 5d3f73a | 2016-10-14 14:28:47 -0700 | [diff] [blame] | 79 | ObjPtr<mirror::Reference> DequeuePendingReference() REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 6f3dbba | 2014-10-14 17:41:57 -0700 | [diff] [blame] | 80 | |
Hiroshi Yamauchi | 057d977 | 2017-02-17 15:33:23 -0800 | [diff] [blame] | 81 | // If applicable, disable the read barrier for the reference after its referent is handled (see |
| 82 | // ConcurrentCopying::ProcessMarkStackRef.) This must be called for a reference that's dequeued |
| 83 | // from pending queue (DequeuePendingReference). |
| 84 | void DisableReadBarrierForReference(ObjPtr<mirror::Reference> ref) |
| 85 | REQUIRES_SHARED(Locks::mutator_lock_); |
| 86 | |
Mathieu Chartier | 9e2094f | 2014-12-11 18:43:48 -0800 | [diff] [blame] | 87 | // Enqueues finalizer references with white referents. White referents are blackened, moved to |
| 88 | // the zombie field, and the referent field is cleared. |
Hans Boehm | d7b4161 | 2021-06-17 18:31:14 -0700 | [diff] [blame] | 89 | FinalizerStats EnqueueFinalizerReferences(ReferenceQueue* cleared_references, |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 90 | collector::GarbageCollector* collector) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 91 | REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 6f3dbba | 2014-10-14 17:41:57 -0700 | [diff] [blame] | 92 | |
Hans Boehm | 1b3ec0f | 2022-01-26 16:53:07 +0000 | [diff] [blame] | 93 | // Walks the reference list marking and dequeuing any references subject to the reference |
| 94 | // clearing policy. References with a black referent are removed from the list. References |
| 95 | // with white referents biased toward saving are blackened and also removed from the list. |
| 96 | // Returns the number of non-null soft references. May be called concurrently with |
| 97 | // AtomicEnqueueIfNotEnqueued(). |
Hans Boehm | d7b4161 | 2021-06-17 18:31:14 -0700 | [diff] [blame] | 98 | uint32_t ForwardSoftReferences(MarkObjectVisitor* visitor) |
Hans Boehm | 1b3ec0f | 2022-01-26 16:53:07 +0000 | [diff] [blame] | 99 | REQUIRES(!*lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 100 | REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 6f3dbba | 2014-10-14 17:41:57 -0700 | [diff] [blame] | 101 | |
Mathieu Chartier | 9e2094f | 2014-12-11 18:43:48 -0800 | [diff] [blame] | 102 | // Unlink the reference list clearing references objects with white referents. Cleared references |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 103 | // registered to a reference queue are scheduled for appending by the heap worker thread. |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 104 | void ClearWhiteReferences(ReferenceQueue* cleared_references, |
Hans Boehm | 1b3ec0f | 2022-01-26 16:53:07 +0000 | [diff] [blame] | 105 | collector::GarbageCollector* collector, |
| 106 | bool report_cleared = false) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 107 | REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 6f3dbba | 2014-10-14 17:41:57 -0700 | [diff] [blame] | 108 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 109 | void Dump(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_); |
| 110 | size_t GetLength() const REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 6f3dbba | 2014-10-14 17:41:57 -0700 | [diff] [blame] | 111 | |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 112 | bool IsEmpty() const { |
| 113 | return list_ == nullptr; |
| 114 | } |
Hans Boehm | 1b3ec0f | 2022-01-26 16:53:07 +0000 | [diff] [blame] | 115 | |
| 116 | // Clear this queue. Only safe after handing off the contents elsewhere for further processing. |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 117 | void Clear() { |
| 118 | list_ = nullptr; |
| 119 | } |
Hans Boehm | 1b3ec0f | 2022-01-26 16:53:07 +0000 | [diff] [blame] | 120 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 121 | mirror::Reference* GetList() REQUIRES_SHARED(Locks::mutator_lock_) { |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 122 | return list_; |
| 123 | } |
Ian Rogers | 6f3dbba | 2014-10-14 17:41:57 -0700 | [diff] [blame] | 124 | |
Mathieu Chartier | 52e4b43 | 2014-06-10 11:22:31 -0700 | [diff] [blame] | 125 | // Visits list_, currently only used for the mark compact GC. |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 126 | void UpdateRoots(IsMarkedVisitor* visitor) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 127 | REQUIRES_SHARED(Locks::mutator_lock_); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 128 | |
| 129 | private: |
| 130 | // Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously |
| 131 | // calling AtomicEnqueueIfNotEnqueued. |
Ian Rogers | 6f3dbba | 2014-10-14 17:41:57 -0700 | [diff] [blame] | 132 | Mutex* const lock_; |
Hans Boehm | 1b3ec0f | 2022-01-26 16:53:07 +0000 | [diff] [blame] | 133 | // The actual reference list. Only a root for the mark compact GC since it |
| 134 | // will be null during root marking for other GC types. Not an ObjPtr since it |
| 135 | // is accessed from multiple threads. Points to a singly-linked circular list |
| 136 | // using the pendingNext field. |
Mathieu Chartier | 8fa2dad | 2014-03-13 12:22:56 -0700 | [diff] [blame] | 137 | mirror::Reference* list_; |
Ian Rogers | 6f3dbba | 2014-10-14 17:41:57 -0700 | [diff] [blame] | 138 | |
Mathieu Chartier | 3130cdf | 2015-05-03 15:20:23 -0700 | [diff] [blame] | 139 | DISALLOW_IMPLICIT_CONSTRUCTORS(ReferenceQueue); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 140 | }; |
| 141 | |
| 142 | } // namespace gc |
| 143 | } // namespace art |
| 144 | |
| 145 | #endif // ART_RUNTIME_GC_REFERENCE_QUEUE_H_ |