Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "bump_pointer_space.h" |
| 18 | #include "bump_pointer_space-inl.h" |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 19 | #include "mirror/class-inl.h" |
Andreas Gampe | 8cf9cb3 | 2017-07-19 09:28:38 -0700 | [diff] [blame] | 20 | #include "mirror/object-inl.h" |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 21 | #include "thread_list.h" |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 22 | |
| 23 | namespace art { |
| 24 | namespace gc { |
| 25 | namespace space { |
| 26 | |
Vladimir Marko | 1130659 | 2018-10-26 14:22:59 +0100 | [diff] [blame] | 27 | BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 28 | capacity = RoundUp(capacity, kPageSize); |
| 29 | std::string error_msg; |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 30 | MemMap mem_map = MemMap::MapAnonymous(name.c_str(), |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 31 | capacity, |
| 32 | PROT_READ | PROT_WRITE, |
Vladimir Marko | 1130659 | 2018-10-26 14:22:59 +0100 | [diff] [blame] | 33 | /*low_4gb=*/ true, |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 34 | &error_msg); |
| 35 | if (!mem_map.IsValid()) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 36 | LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size " |
| 37 | << PrettySize(capacity) << " with message " << error_msg; |
| 38 | return nullptr; |
| 39 | } |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 40 | return new BumpPointerSpace(name, std::move(mem_map)); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 41 | } |
| 42 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 43 | BumpPointerSpace* BumpPointerSpace::CreateFromMemMap(const std::string& name, MemMap&& mem_map) { |
| 44 | return new BumpPointerSpace(name, std::move(mem_map)); |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 45 | } |
| 46 | |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 47 | BumpPointerSpace::BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit) |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 48 | : ContinuousMemMapAllocSpace(name, |
| 49 | MemMap::Invalid(), |
| 50 | begin, |
| 51 | begin, |
| 52 | limit, |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 53 | kGcRetentionPolicyAlwaysCollect), |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 54 | growth_end_(limit), |
| 55 | objects_allocated_(0), bytes_allocated_(0), |
| 56 | block_lock_("Block lock"), |
Mathieu Chartier | fc4c27e | 2014-02-11 11:05:41 -0800 | [diff] [blame] | 57 | main_block_size_(0), |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 58 | num_blocks_(0) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 59 | } |
| 60 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 61 | BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap&& mem_map) |
| 62 | : ContinuousMemMapAllocSpace(name, |
| 63 | std::move(mem_map), |
| 64 | mem_map.Begin(), |
| 65 | mem_map.Begin(), |
| 66 | mem_map.End(), |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 67 | kGcRetentionPolicyAlwaysCollect), |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 68 | growth_end_(mem_map_.End()), |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 69 | objects_allocated_(0), bytes_allocated_(0), |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 70 | block_lock_("Block lock", kBumpPointerSpaceBlockLock), |
Mathieu Chartier | fc4c27e | 2014-02-11 11:05:41 -0800 | [diff] [blame] | 71 | main_block_size_(0), |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 72 | num_blocks_(0) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 73 | } |
| 74 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 75 | void BumpPointerSpace::Clear() { |
| 76 | // Release the pages back to the operating system. |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 77 | if (!kMadviseZeroes) { |
| 78 | memset(Begin(), 0, Limit() - Begin()); |
| 79 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 80 | CHECK_NE(madvise(Begin(), Limit() - Begin(), MADV_DONTNEED), -1) << "madvise failed"; |
| 81 | // Reset the end of the space back to the beginning, we move the end forward as we allocate |
| 82 | // objects. |
Mathieu Chartier | fc4c27e | 2014-02-11 11:05:41 -0800 | [diff] [blame] | 83 | SetEnd(Begin()); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 84 | objects_allocated_.store(0, std::memory_order_relaxed); |
| 85 | bytes_allocated_.store(0, std::memory_order_relaxed); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 86 | growth_end_ = Limit(); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 87 | { |
| 88 | MutexLock mu(Thread::Current(), block_lock_); |
| 89 | num_blocks_ = 0; |
Mathieu Chartier | fc4c27e | 2014-02-11 11:05:41 -0800 | [diff] [blame] | 90 | main_block_size_ = 0; |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 91 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 92 | } |
| 93 | |
| 94 | void BumpPointerSpace::Dump(std::ostream& os) const { |
Mathieu Chartier | 15d3402 | 2014-02-26 17:16:38 -0800 | [diff] [blame] | 95 | os << GetName() << " " |
| 96 | << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(End()) << " - " |
| 97 | << reinterpret_cast<void*>(Limit()); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 98 | } |
| 99 | |
| 100 | mirror::Object* BumpPointerSpace::GetNextObject(mirror::Object* obj) { |
| 101 | const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf(); |
| 102 | return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment)); |
| 103 | } |
| 104 | |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 105 | size_t BumpPointerSpace::RevokeThreadLocalBuffers(Thread* thread) { |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 106 | MutexLock mu(Thread::Current(), block_lock_); |
| 107 | RevokeThreadLocalBuffersLocked(thread); |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 108 | return 0U; |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 109 | } |
| 110 | |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 111 | size_t BumpPointerSpace::RevokeAllThreadLocalBuffers() { |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 112 | Thread* self = Thread::Current(); |
| 113 | MutexLock mu(self, *Locks::runtime_shutdown_lock_); |
| 114 | MutexLock mu2(self, *Locks::thread_list_lock_); |
| 115 | // TODO: Not do a copy of the thread list? |
| 116 | std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); |
| 117 | for (Thread* thread : thread_list) { |
| 118 | RevokeThreadLocalBuffers(thread); |
| 119 | } |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 120 | return 0U; |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 121 | } |
| 122 | |
Hiroshi Yamauchi | c93c530 | 2014-03-20 16:15:37 -0700 | [diff] [blame] | 123 | void BumpPointerSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) { |
| 124 | if (kIsDebugBuild) { |
| 125 | MutexLock mu(Thread::Current(), block_lock_); |
| 126 | DCHECK(!thread->HasTlab()); |
| 127 | } |
| 128 | } |
| 129 | |
| 130 | void BumpPointerSpace::AssertAllThreadLocalBuffersAreRevoked() { |
| 131 | if (kIsDebugBuild) { |
| 132 | Thread* self = Thread::Current(); |
| 133 | MutexLock mu(self, *Locks::runtime_shutdown_lock_); |
| 134 | MutexLock mu2(self, *Locks::thread_list_lock_); |
| 135 | // TODO: Not do a copy of the thread list? |
| 136 | std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); |
| 137 | for (Thread* thread : thread_list) { |
| 138 | AssertThreadLocalBuffersAreRevoked(thread); |
| 139 | } |
| 140 | } |
| 141 | } |
| 142 | |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 143 | void BumpPointerSpace::UpdateMainBlock() { |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 144 | DCHECK_EQ(num_blocks_, 0U); |
Mathieu Chartier | fc4c27e | 2014-02-11 11:05:41 -0800 | [diff] [blame] | 145 | main_block_size_ = Size(); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 146 | } |
| 147 | |
| 148 | // Returns the start of the storage. |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 149 | uint8_t* BumpPointerSpace::AllocBlock(size_t bytes) { |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 150 | bytes = RoundUp(bytes, kAlignment); |
| 151 | if (!num_blocks_) { |
| 152 | UpdateMainBlock(); |
| 153 | } |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 154 | uint8_t* storage = reinterpret_cast<uint8_t*>( |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 155 | AllocNonvirtualWithoutAccounting(bytes + sizeof(BlockHeader))); |
| 156 | if (LIKELY(storage != nullptr)) { |
| 157 | BlockHeader* header = reinterpret_cast<BlockHeader*>(storage); |
| 158 | header->size_ = bytes; // Write out the block header. |
| 159 | storage += sizeof(BlockHeader); |
| 160 | ++num_blocks_; |
| 161 | } |
| 162 | return storage; |
| 163 | } |
| 164 | |
Mathieu Chartier | a8e8f9c | 2014-04-09 14:51:05 -0700 | [diff] [blame] | 165 | accounting::ContinuousSpaceBitmap::SweepCallback* BumpPointerSpace::GetSweepCallback() { |
Ian Rogers | 2c4257b | 2014-10-24 14:20:06 -0700 | [diff] [blame] | 166 | UNIMPLEMENTED(FATAL); |
| 167 | UNREACHABLE(); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 168 | } |
| 169 | |
| 170 | uint64_t BumpPointerSpace::GetBytesAllocated() { |
| 171 | // Start out pre-determined amount (blocks which are not being allocated into). |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 172 | uint64_t total = static_cast<uint64_t>(bytes_allocated_.load(std::memory_order_relaxed)); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 173 | Thread* self = Thread::Current(); |
| 174 | MutexLock mu(self, *Locks::runtime_shutdown_lock_); |
| 175 | MutexLock mu2(self, *Locks::thread_list_lock_); |
| 176 | std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); |
| 177 | MutexLock mu3(Thread::Current(), block_lock_); |
| 178 | // If we don't have any blocks, we don't have any thread local buffers. This check is required |
| 179 | // since there can exist multiple bump pointer spaces which exist at the same time. |
| 180 | if (num_blocks_ > 0) { |
| 181 | for (Thread* thread : thread_list) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 182 | total += thread->GetThreadLocalBytesAllocated(); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 183 | } |
| 184 | } |
| 185 | return total; |
| 186 | } |
| 187 | |
| 188 | uint64_t BumpPointerSpace::GetObjectsAllocated() { |
| 189 | // Start out pre-determined amount (blocks which are not being allocated into). |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 190 | uint64_t total = static_cast<uint64_t>(objects_allocated_.load(std::memory_order_relaxed)); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 191 | Thread* self = Thread::Current(); |
| 192 | MutexLock mu(self, *Locks::runtime_shutdown_lock_); |
| 193 | MutexLock mu2(self, *Locks::thread_list_lock_); |
| 194 | std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); |
| 195 | MutexLock mu3(Thread::Current(), block_lock_); |
| 196 | // If we don't have any blocks, we don't have any thread local buffers. This check is required |
| 197 | // since there can exist multiple bump pointer spaces which exist at the same time. |
| 198 | if (num_blocks_ > 0) { |
| 199 | for (Thread* thread : thread_list) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 200 | total += thread->GetThreadLocalObjectsAllocated(); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 201 | } |
| 202 | } |
| 203 | return total; |
| 204 | } |
| 205 | |
| 206 | void BumpPointerSpace::RevokeThreadLocalBuffersLocked(Thread* thread) { |
Hans Boehm | fb8b4e2 | 2018-09-05 16:45:42 -0700 | [diff] [blame] | 207 | objects_allocated_.fetch_add(thread->GetThreadLocalObjectsAllocated(), std::memory_order_relaxed); |
| 208 | bytes_allocated_.fetch_add(thread->GetThreadLocalBytesAllocated(), std::memory_order_relaxed); |
Mathieu Chartier | c4bf667 | 2020-01-13 13:07:16 -0800 | [diff] [blame^] | 209 | thread->ResetTlab(); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 210 | } |
| 211 | |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 212 | bool BumpPointerSpace::AllocNewTlab(Thread* self, size_t bytes) { |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 213 | MutexLock mu(Thread::Current(), block_lock_); |
| 214 | RevokeThreadLocalBuffersLocked(self); |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 215 | uint8_t* start = AllocBlock(bytes); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 216 | if (start == nullptr) { |
| 217 | return false; |
| 218 | } |
Mathieu Chartier | 6bc7774 | 2017-04-18 17:46:23 -0700 | [diff] [blame] | 219 | self->SetTlab(start, start + bytes, start + bytes); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 220 | return true; |
| 221 | } |
| 222 | |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 223 | void BumpPointerSpace::LogFragmentationAllocFailure(std::ostream& os, |
| 224 | size_t /* failed_alloc_bytes */) { |
| 225 | size_t max_contiguous_allocation = Limit() - End(); |
| 226 | os << "; failed due to fragmentation (largest possible contiguous allocation " |
| 227 | << max_contiguous_allocation << " bytes)"; |
| 228 | // Caller's job to print failed_alloc_bytes. |
| 229 | } |
| 230 | |
Andreas Gampe | d490129 | 2017-05-30 18:41:34 -0700 | [diff] [blame] | 231 | size_t BumpPointerSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) { |
| 232 | size_t num_bytes = obj->SizeOf(); |
| 233 | if (usable_size != nullptr) { |
| 234 | *usable_size = RoundUp(num_bytes, kAlignment); |
| 235 | } |
| 236 | return num_bytes; |
| 237 | } |
| 238 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 239 | } // namespace space |
| 240 | } // namespace gc |
| 241 | } // namespace art |