Hiroshi Yamauchi | 800ac2d | 2014-04-02 17:32:54 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2014 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #ifndef ART_RUNTIME_READ_BARRIER_INL_H_ |
| 18 | #define ART_RUNTIME_READ_BARRIER_INL_H_ |
| 19 | |
| 20 | #include "read_barrier.h" |
| 21 | |
Hiroshi Yamauchi | 723e6ce | 2015-10-28 20:59:47 -0700 | [diff] [blame^] | 22 | #include "gc/collector/concurrent_copying-inl.h" |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 23 | #include "gc/heap.h" |
Hiroshi Yamauchi | 800ac2d | 2014-04-02 17:32:54 -0700 | [diff] [blame] | 24 | #include "mirror/object_reference.h" |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 25 | #include "mirror/reference.h" |
| 26 | #include "runtime.h" |
Vladimir Marko | 80afd02 | 2015-05-19 18:08:00 +0100 | [diff] [blame] | 27 | #include "utils.h" |
Hiroshi Yamauchi | 800ac2d | 2014-04-02 17:32:54 -0700 | [diff] [blame] | 28 | |
| 29 | namespace art { |
| 30 | |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 31 | template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup> |
Hiroshi Yamauchi | 800ac2d | 2014-04-02 17:32:54 -0700 | [diff] [blame] | 32 | inline MirrorType* ReadBarrier::Barrier( |
| 33 | mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) { |
Igor Murashkin | c449e8b | 2015-06-10 15:56:42 -0700 | [diff] [blame] | 34 | constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier; |
Hiroshi Yamauchi | 6e83c17 | 2014-05-01 21:25:41 -0700 | [diff] [blame] | 35 | if (with_read_barrier && kUseBakerReadBarrier) { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 36 | // The higher bits of the rb ptr, rb_ptr_high_bits (must be zero) |
| 37 | // is used to create artificial data dependency from the is_gray |
| 38 | // load to the ref field (ptr) load to avoid needing a load-load |
| 39 | // barrier between the two. |
| 40 | uintptr_t rb_ptr_high_bits; |
| 41 | bool is_gray = HasGrayReadBarrierPointer(obj, &rb_ptr_high_bits); |
| 42 | ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>( |
| 43 | rb_ptr_high_bits | reinterpret_cast<uintptr_t>(ref_addr)); |
| 44 | MirrorType* ref = ref_addr->AsMirrorPtr(); |
| 45 | if (is_gray) { |
| 46 | // Slow-path. |
| 47 | ref = reinterpret_cast<MirrorType*>(Mark(ref)); |
| 48 | } |
| 49 | if (kEnableReadBarrierInvariantChecks) { |
| 50 | CHECK_EQ(rb_ptr_high_bits, 0U) << obj << " rb_ptr=" << obj->GetReadBarrierPointer(); |
| 51 | } |
| 52 | AssertToSpaceInvariant(obj, offset, ref); |
| 53 | return ref; |
Hiroshi Yamauchi | 6e83c17 | 2014-05-01 21:25:41 -0700 | [diff] [blame] | 54 | } else if (with_read_barrier && kUseBrooksReadBarrier) { |
Hiroshi Yamauchi | 800ac2d | 2014-04-02 17:32:54 -0700 | [diff] [blame] | 55 | // To be implemented. |
| 56 | return ref_addr->AsMirrorPtr(); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 57 | } else if (with_read_barrier && kUseTableLookupReadBarrier) { |
| 58 | MirrorType* ref = ref_addr->AsMirrorPtr(); |
| 59 | MirrorType* old_ref = ref; |
| 60 | // The heap or the collector can be null at startup. TODO: avoid the need for this null check. |
| 61 | gc::Heap* heap = Runtime::Current()->GetHeap(); |
| 62 | if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) { |
| 63 | ref = reinterpret_cast<MirrorType*>(Mark(old_ref)); |
| 64 | // Update the field atomically. This may fail if mutator updates before us, but it's ok. |
Hiroshi Yamauchi | fa75518 | 2015-09-30 20:12:11 -0700 | [diff] [blame] | 65 | if (ref != old_ref) { |
| 66 | obj->CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier<false, false>( |
| 67 | offset, old_ref, ref); |
| 68 | } |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 69 | } |
| 70 | AssertToSpaceInvariant(obj, offset, ref); |
| 71 | return ref; |
Hiroshi Yamauchi | 800ac2d | 2014-04-02 17:32:54 -0700 | [diff] [blame] | 72 | } else { |
| 73 | // No read barrier. |
| 74 | return ref_addr->AsMirrorPtr(); |
| 75 | } |
| 76 | } |
| 77 | |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 78 | template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup> |
Hiroshi Yamauchi | 3f64f25 | 2015-06-12 18:35:06 -0700 | [diff] [blame] | 79 | inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root, |
| 80 | GcRootSource* gc_root_source) { |
Hiroshi Yamauchi | a91a4bc | 2014-06-13 16:44:55 -0700 | [diff] [blame] | 81 | MirrorType* ref = *root; |
Hiroshi Yamauchi | 4cba0d9 | 2014-05-21 21:10:23 -0700 | [diff] [blame] | 82 | const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier; |
| 83 | if (with_read_barrier && kUseBakerReadBarrier) { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 84 | // TODO: separate the read barrier code from the collector code more. |
Hiroshi Yamauchi | 0037082 | 2015-08-18 14:47:25 -0700 | [diff] [blame] | 85 | Thread* self = Thread::Current(); |
| 86 | if (self != nullptr && self->GetIsGcMarking()) { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 87 | ref = reinterpret_cast<MirrorType*>(Mark(ref)); |
| 88 | } |
Hiroshi Yamauchi | 3f64f25 | 2015-06-12 18:35:06 -0700 | [diff] [blame] | 89 | AssertToSpaceInvariant(gc_root_source, ref); |
Hiroshi Yamauchi | 4cba0d9 | 2014-05-21 21:10:23 -0700 | [diff] [blame] | 90 | return ref; |
| 91 | } else if (with_read_barrier && kUseBrooksReadBarrier) { |
| 92 | // To be implemented. |
| 93 | return ref; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 94 | } else if (with_read_barrier && kUseTableLookupReadBarrier) { |
Hiroshi Yamauchi | fa75518 | 2015-09-30 20:12:11 -0700 | [diff] [blame] | 95 | Thread* self = Thread::Current(); |
| 96 | if (self != nullptr && |
| 97 | self->GetIsGcMarking() && |
| 98 | Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 99 | MirrorType* old_ref = ref; |
| 100 | ref = reinterpret_cast<MirrorType*>(Mark(old_ref)); |
| 101 | // Update the field atomically. This may fail if mutator updates before us, but it's ok. |
Hiroshi Yamauchi | fa75518 | 2015-09-30 20:12:11 -0700 | [diff] [blame] | 102 | if (ref != old_ref) { |
| 103 | Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root); |
| 104 | atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, ref); |
| 105 | } |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 106 | } |
Hiroshi Yamauchi | 3f64f25 | 2015-06-12 18:35:06 -0700 | [diff] [blame] | 107 | AssertToSpaceInvariant(gc_root_source, ref); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 108 | return ref; |
Hiroshi Yamauchi | 4cba0d9 | 2014-05-21 21:10:23 -0700 | [diff] [blame] | 109 | } else { |
| 110 | return ref; |
| 111 | } |
| 112 | } |
| 113 | |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 114 | // TODO: Reduce copy paste |
| 115 | template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup> |
Hiroshi Yamauchi | 3f64f25 | 2015-06-12 18:35:06 -0700 | [diff] [blame] | 116 | inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root, |
| 117 | GcRootSource* gc_root_source) { |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 118 | MirrorType* ref = root->AsMirrorPtr(); |
| 119 | const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier; |
| 120 | if (with_read_barrier && kUseBakerReadBarrier) { |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 121 | // TODO: separate the read barrier code from the collector code more. |
Hiroshi Yamauchi | 0037082 | 2015-08-18 14:47:25 -0700 | [diff] [blame] | 122 | Thread* self = Thread::Current(); |
| 123 | if (self != nullptr && self->GetIsGcMarking()) { |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 124 | ref = reinterpret_cast<MirrorType*>(Mark(ref)); |
| 125 | } |
Hiroshi Yamauchi | 3f64f25 | 2015-06-12 18:35:06 -0700 | [diff] [blame] | 126 | AssertToSpaceInvariant(gc_root_source, ref); |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 127 | return ref; |
| 128 | } else if (with_read_barrier && kUseBrooksReadBarrier) { |
| 129 | // To be implemented. |
| 130 | return ref; |
| 131 | } else if (with_read_barrier && kUseTableLookupReadBarrier) { |
Hiroshi Yamauchi | fa75518 | 2015-09-30 20:12:11 -0700 | [diff] [blame] | 132 | Thread* self = Thread::Current(); |
| 133 | if (self != nullptr && |
| 134 | self->GetIsGcMarking() && |
| 135 | Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) { |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 136 | auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref); |
| 137 | ref = reinterpret_cast<MirrorType*>(Mark(ref)); |
| 138 | auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref); |
| 139 | // Update the field atomically. This may fail if mutator updates before us, but it's ok. |
Hiroshi Yamauchi | fa75518 | 2015-09-30 20:12:11 -0700 | [diff] [blame] | 140 | if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) { |
| 141 | auto* atomic_root = |
| 142 | reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root); |
| 143 | atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, new_ref); |
| 144 | } |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 145 | } |
Hiroshi Yamauchi | 3f64f25 | 2015-06-12 18:35:06 -0700 | [diff] [blame] | 146 | AssertToSpaceInvariant(gc_root_source, ref); |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 147 | return ref; |
| 148 | } else { |
| 149 | return ref; |
| 150 | } |
| 151 | } |
| 152 | |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 153 | inline bool ReadBarrier::IsDuringStartup() { |
| 154 | gc::Heap* heap = Runtime::Current()->GetHeap(); |
| 155 | if (heap == nullptr) { |
| 156 | // During startup, the heap can be null. |
| 157 | return true; |
| 158 | } |
| 159 | if (heap->CurrentCollectorType() != gc::kCollectorTypeCC) { |
| 160 | // CC isn't running. |
| 161 | return true; |
| 162 | } |
| 163 | gc::collector::ConcurrentCopying* collector = heap->ConcurrentCopyingCollector(); |
| 164 | if (collector == nullptr) { |
| 165 | // During startup, the collector can be null. |
| 166 | return true; |
| 167 | } |
| 168 | return false; |
| 169 | } |
| 170 | |
| 171 | inline void ReadBarrier::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, |
| 172 | mirror::Object* ref) { |
| 173 | if (kEnableToSpaceInvariantChecks || kIsDebugBuild) { |
| 174 | if (ref == nullptr || IsDuringStartup()) { |
| 175 | return; |
| 176 | } |
| 177 | Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()-> |
| 178 | AssertToSpaceInvariant(obj, offset, ref); |
| 179 | } |
| 180 | } |
| 181 | |
Hiroshi Yamauchi | 3f64f25 | 2015-06-12 18:35:06 -0700 | [diff] [blame] | 182 | inline void ReadBarrier::AssertToSpaceInvariant(GcRootSource* gc_root_source, |
| 183 | mirror::Object* ref) { |
| 184 | if (kEnableToSpaceInvariantChecks || kIsDebugBuild) { |
| 185 | if (ref == nullptr || IsDuringStartup()) { |
| 186 | return; |
| 187 | } |
| 188 | Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()-> |
| 189 | AssertToSpaceInvariant(gc_root_source, ref); |
| 190 | } |
| 191 | } |
| 192 | |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 193 | inline mirror::Object* ReadBarrier::Mark(mirror::Object* obj) { |
| 194 | return Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->Mark(obj); |
| 195 | } |
| 196 | |
| 197 | inline bool ReadBarrier::HasGrayReadBarrierPointer(mirror::Object* obj, |
| 198 | uintptr_t* out_rb_ptr_high_bits) { |
| 199 | mirror::Object* rb_ptr = obj->GetReadBarrierPointer(); |
| 200 | uintptr_t rb_ptr_bits = reinterpret_cast<uintptr_t>(rb_ptr); |
| 201 | uintptr_t rb_ptr_low_bits = rb_ptr_bits & rb_ptr_mask_; |
| 202 | if (kEnableReadBarrierInvariantChecks) { |
| 203 | CHECK(rb_ptr_low_bits == white_ptr_ || rb_ptr_low_bits == gray_ptr_ || |
| 204 | rb_ptr_low_bits == black_ptr_) |
| 205 | << "obj=" << obj << " rb_ptr=" << rb_ptr << " " << PrettyTypeOf(obj); |
| 206 | } |
| 207 | bool is_gray = rb_ptr_low_bits == gray_ptr_; |
| 208 | // The high bits are supposed to be zero. We check this on the caller side. |
| 209 | *out_rb_ptr_high_bits = rb_ptr_bits & ~rb_ptr_mask_; |
| 210 | return is_gray; |
| 211 | } |
| 212 | |
Hiroshi Yamauchi | 800ac2d | 2014-04-02 17:32:54 -0700 | [diff] [blame] | 213 | } // namespace art |
| 214 | |
| 215 | #endif // ART_RUNTIME_READ_BARRIER_INL_H_ |