blob: b0434d8a8177da4af40b6624c1e986485d6aa1c2 [file] [log] [blame]
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_READ_BARRIER_INL_H_
18#define ART_RUNTIME_READ_BARRIER_INL_H_
19
20#include "read_barrier.h"
21
Andreas Gamped4901292017-05-30 18:41:34 -070022#include "gc/accounting/read_barrier_table.h"
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -070023#include "gc/collector/concurrent_copying-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080024#include "gc/heap.h"
Andreas Gampec15a2f42017-04-21 12:09:39 -070025#include "mirror/object-readbarrier-inl.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070026#include "mirror/object_reference.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080027#include "mirror/reference.h"
28#include "runtime.h"
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070029
30namespace art {
31
Hans Boehmcc55e1d2017-07-27 15:28:07 -070032template <typename MirrorType, bool kIsVolatile, ReadBarrierOption kReadBarrierOption,
33 bool kAlwaysUpdateField>
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070034inline MirrorType* ReadBarrier::Barrier(
35 mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
Igor Murashkinc449e8b2015-06-10 15:56:42 -070036 constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080037 if (kUseReadBarrier && with_read_barrier) {
Mathieu Chartierd08f66f2017-04-13 11:47:53 -070038 if (kCheckDebugDisallowReadBarrierCount) {
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080039 Thread* const self = Thread::Current();
40 if (self != nullptr) {
41 CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -080042 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080043 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080044 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -070045 // fake_address_dependency (must be zero) is used to create artificial data dependency from
46 // the is_gray load to the ref field (ptr) load to avoid needing a load-load barrier between
47 // the two.
48 uintptr_t fake_address_dependency;
49 bool is_gray = IsGray(obj, &fake_address_dependency);
50 if (kEnableReadBarrierInvariantChecks) {
51 CHECK_EQ(fake_address_dependency, 0U) << obj << " rb_state=" << obj->GetReadBarrierState();
52 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080053 ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>(
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -070054 fake_address_dependency | reinterpret_cast<uintptr_t>(ref_addr));
Hans Boehmcc55e1d2017-07-27 15:28:07 -070055 MirrorType* ref = ref_addr->template AsMirrorPtr<kIsVolatile>();
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080056 MirrorType* old_ref = ref;
57 if (is_gray) {
58 // Slow-path.
59 ref = reinterpret_cast<MirrorType*>(Mark(ref));
60 // If kAlwaysUpdateField is true, update the field atomically. This may fail if mutator
Roland Levillaina1aa3b12016-10-26 13:03:38 +010061 // updates before us, but it's OK.
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080062 if (kAlwaysUpdateField && ref != old_ref) {
Mathieu Chartiera9746b92018-06-22 10:25:40 -070063 obj->CasFieldObjectWithoutWriteBarrier<false, false>(offset,
64 old_ref,
65 ref,
66 CASMode::kStrong,
67 std::memory_order_release);
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080068 }
Hiroshi Yamauchifa755182015-09-30 20:12:11 -070069 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080070 AssertToSpaceInvariant(obj, offset, ref);
71 return ref;
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080072 } else if (kUseTableLookupReadBarrier) {
Hans Boehmcc55e1d2017-07-27 15:28:07 -070073 MirrorType* ref = ref_addr->template AsMirrorPtr<kIsVolatile>();
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080074 MirrorType* old_ref = ref;
75 // The heap or the collector can be null at startup. TODO: avoid the need for this null check.
76 gc::Heap* heap = Runtime::Current()->GetHeap();
77 if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) {
78 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
79 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
80 if (ref != old_ref) {
Mathieu Chartiera9746b92018-06-22 10:25:40 -070081 obj->CasFieldObjectWithoutWriteBarrier<false, false>(offset,
82 old_ref,
83 ref,
84 CASMode::kStrong,
85 std::memory_order_release);
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080086 }
87 }
88 AssertToSpaceInvariant(obj, offset, ref);
89 return ref;
90 } else {
91 LOG(FATAL) << "Unexpected read barrier type";
92 UNREACHABLE();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080093 }
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070094 } else {
95 // No read barrier.
Hans Boehmcc55e1d2017-07-27 15:28:07 -070096 return ref_addr->template AsMirrorPtr<kIsVolatile>();
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070097 }
98}
99
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -0800100template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700101inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root,
102 GcRootSource* gc_root_source) {
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -0700103 MirrorType* ref = *root;
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -0700104 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
Mathieu Chartierdfe02f62016-02-01 20:15:11 -0800105 if (kUseReadBarrier && with_read_barrier) {
Vladimir Marko13701852022-02-11 10:21:10 +0000106 if (kCheckDebugDisallowReadBarrierCount) {
Mathieu Chartierdfe02f62016-02-01 20:15:11 -0800107 Thread* const self = Thread::Current();
108 if (self != nullptr) {
109 CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700110 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800111 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -0800112 if (kUseBakerReadBarrier) {
113 // TODO: separate the read barrier code from the collector code more.
114 Thread* self = Thread::Current();
115 if (self != nullptr && self->GetIsGcMarking()) {
116 ref = reinterpret_cast<MirrorType*>(Mark(ref));
117 }
118 AssertToSpaceInvariant(gc_root_source, ref);
119 return ref;
Mathieu Chartierdfe02f62016-02-01 20:15:11 -0800120 } else if (kUseTableLookupReadBarrier) {
121 Thread* self = Thread::Current();
122 if (self != nullptr &&
123 self->GetIsGcMarking() &&
124 Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
125 MirrorType* old_ref = ref;
126 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
127 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
128 if (ref != old_ref) {
Orion Hodson88591fe2018-03-06 13:35:43 +0000129 Atomic<MirrorType*>* atomic_root = reinterpret_cast<Atomic<MirrorType*>*>(root);
Orion Hodson4557b382018-01-03 11:47:54 +0000130 atomic_root->CompareAndSetStrongRelaxed(old_ref, ref);
Mathieu Chartierdfe02f62016-02-01 20:15:11 -0800131 }
132 }
133 AssertToSpaceInvariant(gc_root_source, ref);
134 return ref;
135 } else {
136 LOG(FATAL) << "Unexpected read barrier type";
137 UNREACHABLE();
138 }
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -0700139 } else {
140 return ref;
141 }
142}
143
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700144// TODO: Reduce copy paste
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -0800145template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700146inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root,
147 GcRootSource* gc_root_source) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700148 MirrorType* ref = root->AsMirrorPtr();
149 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
Vladimir Marko13701852022-02-11 10:21:10 +0000150 if (kUseReadBarrier && with_read_barrier) {
151 if (kCheckDebugDisallowReadBarrierCount) {
152 Thread* const self = Thread::Current();
153 if (self != nullptr) {
154 CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700155 }
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700156 }
Vladimir Marko13701852022-02-11 10:21:10 +0000157 if (kUseBakerReadBarrier) {
158 // TODO: separate the read barrier code from the collector code more.
159 Thread* self = Thread::Current();
160 if (self != nullptr && self->GetIsGcMarking()) {
161 ref = reinterpret_cast<MirrorType*>(Mark(ref));
162 }
163 AssertToSpaceInvariant(gc_root_source, ref);
164 return ref;
165 } else if (kUseTableLookupReadBarrier) {
166 Thread* self = Thread::Current();
167 if (self != nullptr &&
168 self->GetIsGcMarking() &&
169 Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
170 auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
171 ref = reinterpret_cast<MirrorType*>(Mark(ref));
172 auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
173 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
174 if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) {
175 auto* atomic_root =
176 reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
177 atomic_root->CompareAndSetStrongRelaxed(old_ref, new_ref);
178 }
179 }
180 AssertToSpaceInvariant(gc_root_source, ref);
181 return ref;
182 } else {
183 LOG(FATAL) << "Unexpected read barrier type";
184 UNREACHABLE();
185 }
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700186 } else {
187 return ref;
188 }
189}
190
Nicolas Geoffray13056a12017-05-11 11:48:28 +0000191template <typename MirrorType>
192inline MirrorType* ReadBarrier::IsMarked(MirrorType* ref) {
193 // Only read-barrier configurations can have mutators run while
194 // the GC is marking.
195 if (!kUseReadBarrier) {
196 return ref;
197 }
198 // IsMarked does not handle null, so handle it here.
199 if (ref == nullptr) {
200 return nullptr;
201 }
202 // IsMarked should only be called when the GC is marking.
203 if (!Thread::Current()->GetIsGcMarking()) {
204 return ref;
205 }
206
207 return reinterpret_cast<MirrorType*>(
208 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarked(ref));
209}
210
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800211inline bool ReadBarrier::IsDuringStartup() {
212 gc::Heap* heap = Runtime::Current()->GetHeap();
213 if (heap == nullptr) {
214 // During startup, the heap can be null.
215 return true;
216 }
217 if (heap->CurrentCollectorType() != gc::kCollectorTypeCC) {
218 // CC isn't running.
219 return true;
220 }
221 gc::collector::ConcurrentCopying* collector = heap->ConcurrentCopyingCollector();
222 if (collector == nullptr) {
223 // During startup, the collector can be null.
224 return true;
225 }
226 return false;
227}
228
229inline void ReadBarrier::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
230 mirror::Object* ref) {
Andreas Gampee3ce7872017-02-22 13:36:21 -0800231 if (kEnableToSpaceInvariantChecks) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800232 if (ref == nullptr || IsDuringStartup()) {
233 return;
234 }
235 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
236 AssertToSpaceInvariant(obj, offset, ref);
237 }
238}
239
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700240inline void ReadBarrier::AssertToSpaceInvariant(GcRootSource* gc_root_source,
241 mirror::Object* ref) {
Andreas Gampee3ce7872017-02-22 13:36:21 -0800242 if (kEnableToSpaceInvariantChecks) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700243 if (ref == nullptr || IsDuringStartup()) {
244 return;
245 }
246 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
247 AssertToSpaceInvariant(gc_root_source, ref);
248 }
249}
250
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800251inline mirror::Object* ReadBarrier::Mark(mirror::Object* obj) {
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700252 return Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->MarkFromReadBarrier(obj);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800253}
254
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700255inline bool ReadBarrier::IsGray(mirror::Object* obj, uintptr_t* fake_address_dependency) {
Roland Levillain14e5a292018-06-28 12:00:56 +0100256 return obj->GetReadBarrierState(fake_address_dependency) == kGrayState;
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700257}
258
259inline bool ReadBarrier::IsGray(mirror::Object* obj) {
260 // Use a load-acquire to load the read barrier bit to avoid reordering with the subsequent load.
261 // GetReadBarrierStateAcquire() has load-acquire semantics.
Roland Levillain14e5a292018-06-28 12:00:56 +0100262 return obj->GetReadBarrierStateAcquire() == kGrayState;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800263}
264
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -0700265} // namespace art
266
267#endif // ART_RUNTIME_READ_BARRIER_INL_H_