blob: d341ee10171cffe7a3a2ca4ba6ffe1a9ed399ce9 [file] [log] [blame]
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_READ_BARRIER_INL_H_
18#define ART_RUNTIME_READ_BARRIER_INL_H_
19
20#include "read_barrier.h"
21
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080022#include "gc/collector/concurrent_copying.h"
23#include "gc/heap.h"
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070024#include "mirror/object_reference.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080025#include "mirror/reference.h"
26#include "runtime.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010027#include "utils.h"
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070028
29namespace art {
30
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080031template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup>
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070032inline MirrorType* ReadBarrier::Barrier(
33 mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
Hiroshi Yamauchi6e83c172014-05-01 21:25:41 -070034 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
35 if (with_read_barrier && kUseBakerReadBarrier) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080036 // The higher bits of the rb ptr, rb_ptr_high_bits (must be zero)
37 // is used to create artificial data dependency from the is_gray
38 // load to the ref field (ptr) load to avoid needing a load-load
39 // barrier between the two.
40 uintptr_t rb_ptr_high_bits;
41 bool is_gray = HasGrayReadBarrierPointer(obj, &rb_ptr_high_bits);
42 ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>(
43 rb_ptr_high_bits | reinterpret_cast<uintptr_t>(ref_addr));
44 MirrorType* ref = ref_addr->AsMirrorPtr();
45 if (is_gray) {
46 // Slow-path.
47 ref = reinterpret_cast<MirrorType*>(Mark(ref));
48 }
49 if (kEnableReadBarrierInvariantChecks) {
50 CHECK_EQ(rb_ptr_high_bits, 0U) << obj << " rb_ptr=" << obj->GetReadBarrierPointer();
51 }
52 AssertToSpaceInvariant(obj, offset, ref);
53 return ref;
Hiroshi Yamauchi6e83c172014-05-01 21:25:41 -070054 } else if (with_read_barrier && kUseBrooksReadBarrier) {
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070055 // To be implemented.
56 return ref_addr->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080057 } else if (with_read_barrier && kUseTableLookupReadBarrier) {
58 MirrorType* ref = ref_addr->AsMirrorPtr();
59 MirrorType* old_ref = ref;
60 // The heap or the collector can be null at startup. TODO: avoid the need for this null check.
61 gc::Heap* heap = Runtime::Current()->GetHeap();
62 if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) {
63 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
64 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
65 obj->CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier<false, false>(
66 offset, old_ref, ref);
67 }
68 AssertToSpaceInvariant(obj, offset, ref);
69 return ref;
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070070 } else {
71 // No read barrier.
72 return ref_addr->AsMirrorPtr();
73 }
74}
75
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080076template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup>
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -070077inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root) {
78 MirrorType* ref = *root;
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -070079 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
80 if (with_read_barrier && kUseBakerReadBarrier) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080081 if (kMaybeDuringStartup && IsDuringStartup()) {
82 // During startup, the heap may not be initialized yet. Just
83 // return the given ref.
84 return ref;
85 }
86 // TODO: separate the read barrier code from the collector code more.
87 if (Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarking()) {
88 ref = reinterpret_cast<MirrorType*>(Mark(ref));
89 }
90 AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -070091 return ref;
92 } else if (with_read_barrier && kUseBrooksReadBarrier) {
93 // To be implemented.
94 return ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080095 } else if (with_read_barrier && kUseTableLookupReadBarrier) {
96 if (kMaybeDuringStartup && IsDuringStartup()) {
97 // During startup, the heap may not be initialized yet. Just
98 // return the given ref.
99 return ref;
100 }
101 if (Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
102 MirrorType* old_ref = ref;
103 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
104 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
105 Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
106 atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, ref);
107 }
108 AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
109 return ref;
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -0700110 } else {
111 return ref;
112 }
113}
114
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700115// TODO: Reduce copy paste
116template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup>
117inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root) {
118 MirrorType* ref = root->AsMirrorPtr();
119 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
120 if (with_read_barrier && kUseBakerReadBarrier) {
121 if (kMaybeDuringStartup && IsDuringStartup()) {
122 // During startup, the heap may not be initialized yet. Just
123 // return the given ref.
124 return ref;
125 }
126 // TODO: separate the read barrier code from the collector code more.
127 if (Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarking()) {
128 ref = reinterpret_cast<MirrorType*>(Mark(ref));
129 }
130 AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
131 return ref;
132 } else if (with_read_barrier && kUseBrooksReadBarrier) {
133 // To be implemented.
134 return ref;
135 } else if (with_read_barrier && kUseTableLookupReadBarrier) {
136 if (kMaybeDuringStartup && IsDuringStartup()) {
137 // During startup, the heap may not be initialized yet. Just
138 // return the given ref.
139 return ref;
140 }
141 if (Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
142 auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
143 ref = reinterpret_cast<MirrorType*>(Mark(ref));
144 auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
145 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
146 auto* atomic_root =
147 reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
148 atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, new_ref);
149 }
150 AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
151 return ref;
152 } else {
153 return ref;
154 }
155}
156
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800157inline bool ReadBarrier::IsDuringStartup() {
158 gc::Heap* heap = Runtime::Current()->GetHeap();
159 if (heap == nullptr) {
160 // During startup, the heap can be null.
161 return true;
162 }
163 if (heap->CurrentCollectorType() != gc::kCollectorTypeCC) {
164 // CC isn't running.
165 return true;
166 }
167 gc::collector::ConcurrentCopying* collector = heap->ConcurrentCopyingCollector();
168 if (collector == nullptr) {
169 // During startup, the collector can be null.
170 return true;
171 }
172 return false;
173}
174
175inline void ReadBarrier::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
176 mirror::Object* ref) {
177 if (kEnableToSpaceInvariantChecks || kIsDebugBuild) {
178 if (ref == nullptr || IsDuringStartup()) {
179 return;
180 }
181 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
182 AssertToSpaceInvariant(obj, offset, ref);
183 }
184}
185
186inline mirror::Object* ReadBarrier::Mark(mirror::Object* obj) {
187 return Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->Mark(obj);
188}
189
190inline bool ReadBarrier::HasGrayReadBarrierPointer(mirror::Object* obj,
191 uintptr_t* out_rb_ptr_high_bits) {
192 mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
193 uintptr_t rb_ptr_bits = reinterpret_cast<uintptr_t>(rb_ptr);
194 uintptr_t rb_ptr_low_bits = rb_ptr_bits & rb_ptr_mask_;
195 if (kEnableReadBarrierInvariantChecks) {
196 CHECK(rb_ptr_low_bits == white_ptr_ || rb_ptr_low_bits == gray_ptr_ ||
197 rb_ptr_low_bits == black_ptr_)
198 << "obj=" << obj << " rb_ptr=" << rb_ptr << " " << PrettyTypeOf(obj);
199 }
200 bool is_gray = rb_ptr_low_bits == gray_ptr_;
201 // The high bits are supposed to be zero. We check this on the caller side.
202 *out_rb_ptr_high_bits = rb_ptr_bits & ~rb_ptr_mask_;
203 return is_gray;
204}
205
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -0700206} // namespace art
207
208#endif // ART_RUNTIME_READ_BARRIER_INL_H_