blob: c4fda14188e04da6c66ae047e6c4e22fd51bb218 [file] [log] [blame]
Mathieu Chartier590fee92013-09-13 13:46:47 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "bump_pointer_space.h"
18#include "bump_pointer_space-inl.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070019#include "mirror/class-inl.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070020#include "mirror/object-inl.h"
Mathieu Chartier692fafd2013-11-29 17:24:40 -080021#include "thread_list.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070022
23namespace art {
24namespace gc {
25namespace space {
26
Vladimir Marko11306592018-10-26 14:22:59 +010027BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity) {
Mathieu Chartier590fee92013-09-13 13:46:47 -070028 capacity = RoundUp(capacity, kPageSize);
29 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +010030 MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
Vladimir Markoc34bebf2018-08-16 16:12:49 +010031 capacity,
32 PROT_READ | PROT_WRITE,
Vladimir Marko11306592018-10-26 14:22:59 +010033 /*low_4gb=*/ true,
Vladimir Markoc34bebf2018-08-16 16:12:49 +010034 &error_msg);
35 if (!mem_map.IsValid()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -070036 LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
37 << PrettySize(capacity) << " with message " << error_msg;
38 return nullptr;
39 }
Vladimir Markoc34bebf2018-08-16 16:12:49 +010040 return new BumpPointerSpace(name, std::move(mem_map));
Mathieu Chartier590fee92013-09-13 13:46:47 -070041}
42
Vladimir Markoc34bebf2018-08-16 16:12:49 +010043BumpPointerSpace* BumpPointerSpace::CreateFromMemMap(const std::string& name, MemMap&& mem_map) {
44 return new BumpPointerSpace(name, std::move(mem_map));
Mathieu Chartier31f44142014-04-08 14:40:03 -070045}
46
Ian Rogers13735952014-10-08 12:43:28 -070047BumpPointerSpace::BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit)
Vladimir Markoc34bebf2018-08-16 16:12:49 +010048 : ContinuousMemMapAllocSpace(name,
49 MemMap::Invalid(),
50 begin,
51 begin,
52 limit,
Mathieu Chartier590fee92013-09-13 13:46:47 -070053 kGcRetentionPolicyAlwaysCollect),
Mathieu Chartier692fafd2013-11-29 17:24:40 -080054 growth_end_(limit),
55 objects_allocated_(0), bytes_allocated_(0),
56 block_lock_("Block lock"),
Mathieu Chartierfc4c27e2014-02-11 11:05:41 -080057 main_block_size_(0),
Mathieu Chartier692fafd2013-11-29 17:24:40 -080058 num_blocks_(0) {
Mathieu Chartier590fee92013-09-13 13:46:47 -070059}
60
Vladimir Markoc34bebf2018-08-16 16:12:49 +010061BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap&& mem_map)
62 : ContinuousMemMapAllocSpace(name,
63 std::move(mem_map),
64 mem_map.Begin(),
65 mem_map.Begin(),
66 mem_map.End(),
Mathieu Chartier590fee92013-09-13 13:46:47 -070067 kGcRetentionPolicyAlwaysCollect),
Vladimir Markoc34bebf2018-08-16 16:12:49 +010068 growth_end_(mem_map_.End()),
Mathieu Chartier692fafd2013-11-29 17:24:40 -080069 objects_allocated_(0), bytes_allocated_(0),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080070 block_lock_("Block lock", kBumpPointerSpaceBlockLock),
Mathieu Chartierfc4c27e2014-02-11 11:05:41 -080071 main_block_size_(0),
Mathieu Chartier692fafd2013-11-29 17:24:40 -080072 num_blocks_(0) {
Mathieu Chartier590fee92013-09-13 13:46:47 -070073}
74
Mathieu Chartier590fee92013-09-13 13:46:47 -070075void BumpPointerSpace::Clear() {
76 // Release the pages back to the operating system.
Ian Rogersc5f17732014-06-05 20:48:42 -070077 if (!kMadviseZeroes) {
78 memset(Begin(), 0, Limit() - Begin());
79 }
Mathieu Chartier590fee92013-09-13 13:46:47 -070080 CHECK_NE(madvise(Begin(), Limit() - Begin(), MADV_DONTNEED), -1) << "madvise failed";
81 // Reset the end of the space back to the beginning, we move the end forward as we allocate
82 // objects.
Mathieu Chartierfc4c27e2014-02-11 11:05:41 -080083 SetEnd(Begin());
Orion Hodson88591fe2018-03-06 13:35:43 +000084 objects_allocated_.store(0, std::memory_order_relaxed);
85 bytes_allocated_.store(0, std::memory_order_relaxed);
Mathieu Chartier590fee92013-09-13 13:46:47 -070086 growth_end_ = Limit();
Mathieu Chartier692fafd2013-11-29 17:24:40 -080087 {
88 MutexLock mu(Thread::Current(), block_lock_);
89 num_blocks_ = 0;
Mathieu Chartierfc4c27e2014-02-11 11:05:41 -080090 main_block_size_ = 0;
Mathieu Chartier692fafd2013-11-29 17:24:40 -080091 }
Mathieu Chartier590fee92013-09-13 13:46:47 -070092}
93
94void BumpPointerSpace::Dump(std::ostream& os) const {
Mathieu Chartier15d34022014-02-26 17:16:38 -080095 os << GetName() << " "
96 << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(End()) << " - "
97 << reinterpret_cast<void*>(Limit());
Mathieu Chartier590fee92013-09-13 13:46:47 -070098}
99
100mirror::Object* BumpPointerSpace::GetNextObject(mirror::Object* obj) {
101 const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
102 return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
103}
104
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700105size_t BumpPointerSpace::RevokeThreadLocalBuffers(Thread* thread) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800106 MutexLock mu(Thread::Current(), block_lock_);
107 RevokeThreadLocalBuffersLocked(thread);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700108 return 0U;
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800109}
110
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700111size_t BumpPointerSpace::RevokeAllThreadLocalBuffers() {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800112 Thread* self = Thread::Current();
113 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
114 MutexLock mu2(self, *Locks::thread_list_lock_);
115 // TODO: Not do a copy of the thread list?
116 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
117 for (Thread* thread : thread_list) {
118 RevokeThreadLocalBuffers(thread);
119 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700120 return 0U;
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800121}
122
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -0700123void BumpPointerSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
124 if (kIsDebugBuild) {
125 MutexLock mu(Thread::Current(), block_lock_);
126 DCHECK(!thread->HasTlab());
127 }
128}
129
130void BumpPointerSpace::AssertAllThreadLocalBuffersAreRevoked() {
131 if (kIsDebugBuild) {
132 Thread* self = Thread::Current();
133 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
134 MutexLock mu2(self, *Locks::thread_list_lock_);
135 // TODO: Not do a copy of the thread list?
136 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
137 for (Thread* thread : thread_list) {
138 AssertThreadLocalBuffersAreRevoked(thread);
139 }
140 }
141}
142
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800143void BumpPointerSpace::UpdateMainBlock() {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800144 DCHECK_EQ(num_blocks_, 0U);
Mathieu Chartierfc4c27e2014-02-11 11:05:41 -0800145 main_block_size_ = Size();
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800146}
147
148// Returns the start of the storage.
Ian Rogers13735952014-10-08 12:43:28 -0700149uint8_t* BumpPointerSpace::AllocBlock(size_t bytes) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800150 bytes = RoundUp(bytes, kAlignment);
151 if (!num_blocks_) {
152 UpdateMainBlock();
153 }
Ian Rogers13735952014-10-08 12:43:28 -0700154 uint8_t* storage = reinterpret_cast<uint8_t*>(
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800155 AllocNonvirtualWithoutAccounting(bytes + sizeof(BlockHeader)));
156 if (LIKELY(storage != nullptr)) {
157 BlockHeader* header = reinterpret_cast<BlockHeader*>(storage);
158 header->size_ = bytes; // Write out the block header.
159 storage += sizeof(BlockHeader);
160 ++num_blocks_;
161 }
162 return storage;
163}
164
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700165accounting::ContinuousSpaceBitmap::SweepCallback* BumpPointerSpace::GetSweepCallback() {
Ian Rogers2c4257b2014-10-24 14:20:06 -0700166 UNIMPLEMENTED(FATAL);
167 UNREACHABLE();
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800168}
169
170uint64_t BumpPointerSpace::GetBytesAllocated() {
171 // Start out pre-determined amount (blocks which are not being allocated into).
Orion Hodson88591fe2018-03-06 13:35:43 +0000172 uint64_t total = static_cast<uint64_t>(bytes_allocated_.load(std::memory_order_relaxed));
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800173 Thread* self = Thread::Current();
174 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
175 MutexLock mu2(self, *Locks::thread_list_lock_);
176 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
177 MutexLock mu3(Thread::Current(), block_lock_);
178 // If we don't have any blocks, we don't have any thread local buffers. This check is required
179 // since there can exist multiple bump pointer spaces which exist at the same time.
180 if (num_blocks_ > 0) {
181 for (Thread* thread : thread_list) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700182 total += thread->GetThreadLocalBytesAllocated();
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800183 }
184 }
185 return total;
186}
187
188uint64_t BumpPointerSpace::GetObjectsAllocated() {
189 // Start out pre-determined amount (blocks which are not being allocated into).
Orion Hodson88591fe2018-03-06 13:35:43 +0000190 uint64_t total = static_cast<uint64_t>(objects_allocated_.load(std::memory_order_relaxed));
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800191 Thread* self = Thread::Current();
192 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
193 MutexLock mu2(self, *Locks::thread_list_lock_);
194 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
195 MutexLock mu3(Thread::Current(), block_lock_);
196 // If we don't have any blocks, we don't have any thread local buffers. This check is required
197 // since there can exist multiple bump pointer spaces which exist at the same time.
198 if (num_blocks_ > 0) {
199 for (Thread* thread : thread_list) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700200 total += thread->GetThreadLocalObjectsAllocated();
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800201 }
202 }
203 return total;
204}
205
206void BumpPointerSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
Hans Boehmfb8b4e22018-09-05 16:45:42 -0700207 objects_allocated_.fetch_add(thread->GetThreadLocalObjectsAllocated(), std::memory_order_relaxed);
208 bytes_allocated_.fetch_add(thread->GetThreadLocalBytesAllocated(), std::memory_order_relaxed);
Mathieu Chartierc4bf6672020-01-13 13:07:16 -0800209 thread->ResetTlab();
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800210}
211
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800212bool BumpPointerSpace::AllocNewTlab(Thread* self, size_t bytes) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800213 MutexLock mu(Thread::Current(), block_lock_);
214 RevokeThreadLocalBuffersLocked(self);
Ian Rogers13735952014-10-08 12:43:28 -0700215 uint8_t* start = AllocBlock(bytes);
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800216 if (start == nullptr) {
217 return false;
218 }
Mathieu Chartier6bc77742017-04-18 17:46:23 -0700219 self->SetTlab(start, start + bytes, start + bytes);
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800220 return true;
221}
222
Mathieu Chartierb363f662014-07-16 13:28:58 -0700223void BumpPointerSpace::LogFragmentationAllocFailure(std::ostream& os,
224 size_t /* failed_alloc_bytes */) {
225 size_t max_contiguous_allocation = Limit() - End();
226 os << "; failed due to fragmentation (largest possible contiguous allocation "
227 << max_contiguous_allocation << " bytes)";
228 // Caller's job to print failed_alloc_bytes.
229}
230
Andreas Gamped4901292017-05-30 18:41:34 -0700231size_t BumpPointerSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
232 size_t num_bytes = obj->SizeOf();
233 if (usable_size != nullptr) {
234 *usable_size = RoundUp(num_bytes, kAlignment);
235 }
236 return num_bytes;
237}
238
Mathieu Chartier590fee92013-09-13 13:46:47 -0700239} // namespace space
240} // namespace gc
241} // namespace art