blob: 823aa385d8e362b2e1c03f6ea50738d1fef017d7 [file] [log] [blame]
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
19
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -080020#include "gc/accounting/read_barrier_table.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080021#include "object_callbacks.h"
22#include "space.h"
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -080023#include "thread.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080024
25namespace art {
26namespace gc {
27namespace space {
28
29// A space that consists of equal-sized regions.
30class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
31 public:
32 typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
33
34 SpaceType GetType() const OVERRIDE {
35 return kSpaceTypeRegionSpace;
36 }
37
38 // Create a region space with the requested sizes. The requested base address is not
39 // guaranteed to be granted, if it is required, the caller should call Begin on the returned
40 // space to confirm the request was granted.
41 static RegionSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
42
Mathieu Chartier2cebb242015-04-21 16:50:40 -070043 // Allocate num_bytes, returns null if the space is full.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080044 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Mathieu Chartier90443472015-07-16 20:32:27 -070045 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
46 OVERRIDE REQUIRES(!region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080047 // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
48 mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070049 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -070050 OVERRIDE REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080051 // The main allocation routine.
52 template<bool kForEvac>
53 ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070054 size_t* usable_size,
Mathieu Chartier90443472015-07-16 20:32:27 -070055 size_t* bytes_tl_bulk_allocated)
56 REQUIRES(!region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080057 // Allocate/free large objects (objects that are larger than the region size.)
58 template<bool kForEvac>
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070059 mirror::Object* AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size,
Mathieu Chartier90443472015-07-16 20:32:27 -070060 size_t* bytes_tl_bulk_allocated) REQUIRES(!region_lock_);
61 void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) REQUIRES(!region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080062
63 // Return the storage space required by obj.
64 size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
Mathieu Chartier90443472015-07-16 20:32:27 -070065 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080066 return AllocationSizeNonvirtual(obj, usable_size);
67 }
68 size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
Mathieu Chartier90443472015-07-16 20:32:27 -070069 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080070
71 size_t Free(Thread*, mirror::Object*) OVERRIDE {
72 UNIMPLEMENTED(FATAL);
73 return 0;
74 }
75 size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
76 UNIMPLEMENTED(FATAL);
77 return 0;
78 }
79 accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
80 // No live bitmap.
81 return nullptr;
82 }
83 accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
84 // No mark bitmap.
85 return nullptr;
86 }
87
Mathieu Chartier90443472015-07-16 20:32:27 -070088 void Clear() OVERRIDE REQUIRES(!region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080089
90 void Dump(std::ostream& os) const;
Mathieu Chartier90443472015-07-16 20:32:27 -070091 void DumpRegions(std::ostream& os) REQUIRES(!region_lock_);
92 void DumpNonFreeRegions(std::ostream& os) REQUIRES(!region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080093
Mathieu Chartier90443472015-07-16 20:32:27 -070094 size_t RevokeThreadLocalBuffers(Thread* thread) REQUIRES(!region_lock_);
95 void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(region_lock_);
96 size_t RevokeAllThreadLocalBuffers()
97 REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_);
98 void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!region_lock_);
99 void AssertAllThreadLocalBuffersAreRevoked()
100 REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800101
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800102 enum class RegionType : uint8_t {
103 kRegionTypeAll, // All types.
104 kRegionTypeFromSpace, // From-space. To be evacuated.
105 kRegionTypeUnevacFromSpace, // Unevacuated from-space. Not to be evacuated.
106 kRegionTypeToSpace, // To-space.
107 kRegionTypeNone, // None.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800108 };
109
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800110 enum class RegionState : uint8_t {
111 kRegionStateFree, // Free region.
112 kRegionStateAllocated, // Allocated region.
113 kRegionStateLarge, // Large allocated (allocation larger than the region size).
114 kRegionStateLargeTail, // Large tail (non-first regions of a large allocation).
115 };
116
Mathieu Chartier90443472015-07-16 20:32:27 -0700117 template<RegionType kRegionType> uint64_t GetBytesAllocatedInternal() REQUIRES(!region_lock_);
118 template<RegionType kRegionType> uint64_t GetObjectsAllocatedInternal() REQUIRES(!region_lock_);
119 uint64_t GetBytesAllocated() REQUIRES(!region_lock_) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800120 return GetBytesAllocatedInternal<RegionType::kRegionTypeAll>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800121 }
Mathieu Chartier90443472015-07-16 20:32:27 -0700122 uint64_t GetObjectsAllocated() REQUIRES(!region_lock_) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800123 return GetObjectsAllocatedInternal<RegionType::kRegionTypeAll>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800124 }
Mathieu Chartier90443472015-07-16 20:32:27 -0700125 uint64_t GetBytesAllocatedInFromSpace() REQUIRES(!region_lock_) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800126 return GetBytesAllocatedInternal<RegionType::kRegionTypeFromSpace>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800127 }
Mathieu Chartier90443472015-07-16 20:32:27 -0700128 uint64_t GetObjectsAllocatedInFromSpace() REQUIRES(!region_lock_) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800129 return GetObjectsAllocatedInternal<RegionType::kRegionTypeFromSpace>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800130 }
Mathieu Chartier90443472015-07-16 20:32:27 -0700131 uint64_t GetBytesAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800132 return GetBytesAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800133 }
Mathieu Chartier90443472015-07-16 20:32:27 -0700134 uint64_t GetObjectsAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800135 return GetObjectsAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800136 }
137
138 bool CanMoveObjects() const OVERRIDE {
139 return true;
140 }
141
142 bool Contains(const mirror::Object* obj) const {
143 const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
144 return byte_obj >= Begin() && byte_obj < Limit();
145 }
146
147 RegionSpace* AsRegionSpace() OVERRIDE {
148 return this;
149 }
150
151 // Go through all of the blocks and visit the continuous objects.
152 void Walk(ObjectCallback* callback, void* arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700153 REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800154 WalkInternal<false>(callback, arg);
155 }
156
157 void WalkToSpace(ObjectCallback* callback, void* arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700158 REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800159 WalkInternal<true>(callback, arg);
160 }
161
162 accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE {
163 return nullptr;
164 }
165 void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
Mathieu Chartier90443472015-07-16 20:32:27 -0700166 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800167
168 // Object alignment within the space.
169 static constexpr size_t kAlignment = kObjectAlignment;
170 // The region size.
171 static constexpr size_t kRegionSize = 1 * MB;
172
173 bool IsInFromSpace(mirror::Object* ref) {
174 if (HasAddress(ref)) {
175 Region* r = RefToRegionUnlocked(ref);
176 return r->IsInFromSpace();
177 }
178 return false;
179 }
180
181 bool IsInUnevacFromSpace(mirror::Object* ref) {
182 if (HasAddress(ref)) {
183 Region* r = RefToRegionUnlocked(ref);
184 return r->IsInUnevacFromSpace();
185 }
186 return false;
187 }
188
189 bool IsInToSpace(mirror::Object* ref) {
190 if (HasAddress(ref)) {
191 Region* r = RefToRegionUnlocked(ref);
192 return r->IsInToSpace();
193 }
194 return false;
195 }
196
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800197 RegionType GetRegionType(mirror::Object* ref) {
198 if (HasAddress(ref)) {
199 Region* r = RefToRegionUnlocked(ref);
200 return r->Type();
201 }
202 return RegionType::kRegionTypeNone;
203 }
204
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800205 void SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all)
Mathieu Chartier90443472015-07-16 20:32:27 -0700206 REQUIRES(!region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800207
Mathieu Chartier90443472015-07-16 20:32:27 -0700208 size_t FromSpaceSize() REQUIRES(!region_lock_);
209 size_t UnevacFromSpaceSize() REQUIRES(!region_lock_);
210 size_t ToSpaceSize() REQUIRES(!region_lock_);
211 void ClearFromSpace() REQUIRES(!region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800212
213 void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800214 Region* reg = RefToRegionUnlocked(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800215 reg->AddLiveBytes(alloc_size);
216 }
217
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800218 void AssertAllRegionLiveBytesZeroOrCleared() REQUIRES(!region_lock_) {
219 if (kIsDebugBuild) {
220 MutexLock mu(Thread::Current(), region_lock_);
221 for (size_t i = 0; i < num_regions_; ++i) {
222 Region* r = &regions_[i];
223 size_t live_bytes = r->LiveBytes();
224 CHECK(live_bytes == 0U || live_bytes == static_cast<size_t>(-1)) << live_bytes;
225 }
226 }
227 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800228
Mathieu Chartier90443472015-07-16 20:32:27 -0700229 void RecordAlloc(mirror::Object* ref) REQUIRES(!region_lock_);
230 bool AllocNewTlab(Thread* self) REQUIRES(!region_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800231
232 uint32_t Time() {
233 return time_;
234 }
235
236 private:
237 RegionSpace(const std::string& name, MemMap* mem_map);
238
239 template<bool kToSpaceOnly>
240 void WalkInternal(ObjectCallback* callback, void* arg) NO_THREAD_SAFETY_ANALYSIS;
241
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800242 class Region {
243 public:
244 Region()
245 : idx_(static_cast<size_t>(-1)),
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800246 begin_(nullptr), top_(nullptr), end_(nullptr),
247 state_(RegionState::kRegionStateAllocated), type_(RegionType::kRegionTypeToSpace),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800248 objects_allocated_(0), alloc_time_(0), live_bytes_(static_cast<size_t>(-1)),
249 is_newly_allocated_(false), is_a_tlab_(false), thread_(nullptr) {}
250
251 Region(size_t idx, uint8_t* begin, uint8_t* end)
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800252 : idx_(idx), begin_(begin), top_(begin), end_(end),
253 state_(RegionState::kRegionStateFree), type_(RegionType::kRegionTypeNone),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800254 objects_allocated_(0), alloc_time_(0), live_bytes_(static_cast<size_t>(-1)),
255 is_newly_allocated_(false), is_a_tlab_(false), thread_(nullptr) {
256 DCHECK_LT(begin, end);
257 DCHECK_EQ(static_cast<size_t>(end - begin), kRegionSize);
258 }
259
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800260 RegionState State() const {
261 return state_;
262 }
263
264 RegionType Type() const {
265 return type_;
266 }
267
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800268 void Clear() {
269 top_ = begin_;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800270 state_ = RegionState::kRegionStateFree;
271 type_ = RegionType::kRegionTypeNone;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800272 objects_allocated_ = 0;
273 alloc_time_ = 0;
274 live_bytes_ = static_cast<size_t>(-1);
275 if (!kMadviseZeroes) {
276 memset(begin_, 0, end_ - begin_);
277 }
278 madvise(begin_, end_ - begin_, MADV_DONTNEED);
279 is_newly_allocated_ = false;
280 is_a_tlab_ = false;
281 thread_ = nullptr;
282 }
283
284 ALWAYS_INLINE mirror::Object* Alloc(size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700285 size_t* usable_size,
286 size_t* bytes_tl_bulk_allocated);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800287
288 bool IsFree() const {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800289 bool is_free = state_ == RegionState::kRegionStateFree;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800290 if (is_free) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800291 DCHECK(IsInNoSpace());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800292 DCHECK_EQ(begin_, top_);
293 DCHECK_EQ(objects_allocated_, 0U);
294 }
295 return is_free;
296 }
297
298 // Given a free region, declare it non-free (allocated).
299 void Unfree(uint32_t alloc_time) {
300 DCHECK(IsFree());
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800301 state_ = RegionState::kRegionStateAllocated;
302 type_ = RegionType::kRegionTypeToSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800303 alloc_time_ = alloc_time;
304 }
305
306 void UnfreeLarge(uint32_t alloc_time) {
307 DCHECK(IsFree());
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800308 state_ = RegionState::kRegionStateLarge;
309 type_ = RegionType::kRegionTypeToSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800310 alloc_time_ = alloc_time;
311 }
312
313 void UnfreeLargeTail(uint32_t alloc_time) {
314 DCHECK(IsFree());
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800315 state_ = RegionState::kRegionStateLargeTail;
316 type_ = RegionType::kRegionTypeToSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800317 alloc_time_ = alloc_time;
318 }
319
320 void SetNewlyAllocated() {
321 is_newly_allocated_ = true;
322 }
323
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800324 // Non-large, non-large-tail allocated.
325 bool IsAllocated() const {
326 return state_ == RegionState::kRegionStateAllocated;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800327 }
328
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800329 // Large allocated.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800330 bool IsLarge() const {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800331 bool is_large = state_ == RegionState::kRegionStateLarge;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800332 if (is_large) {
333 DCHECK_LT(begin_ + 1 * MB, top_);
334 }
335 return is_large;
336 }
337
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800338 // Large-tail allocated.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800339 bool IsLargeTail() const {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800340 bool is_large_tail = state_ == RegionState::kRegionStateLargeTail;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800341 if (is_large_tail) {
342 DCHECK_EQ(begin_, top_);
343 }
344 return is_large_tail;
345 }
346
347 size_t Idx() const {
348 return idx_;
349 }
350
351 bool IsInFromSpace() const {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800352 return type_ == RegionType::kRegionTypeFromSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800353 }
354
355 bool IsInToSpace() const {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800356 return type_ == RegionType::kRegionTypeToSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800357 }
358
359 bool IsInUnevacFromSpace() const {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800360 return type_ == RegionType::kRegionTypeUnevacFromSpace;
361 }
362
363 bool IsInNoSpace() const {
364 return type_ == RegionType::kRegionTypeNone;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800365 }
366
367 void SetAsFromSpace() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800368 DCHECK(!IsFree() && IsInToSpace());
369 type_ = RegionType::kRegionTypeFromSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800370 live_bytes_ = static_cast<size_t>(-1);
371 }
372
373 void SetAsUnevacFromSpace() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800374 DCHECK(!IsFree() && IsInToSpace());
375 type_ = RegionType::kRegionTypeUnevacFromSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800376 live_bytes_ = 0U;
377 }
378
379 void SetUnevacFromSpaceAsToSpace() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800380 DCHECK(!IsFree() && IsInUnevacFromSpace());
381 type_ = RegionType::kRegionTypeToSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800382 }
383
384 ALWAYS_INLINE bool ShouldBeEvacuated();
385
386 void AddLiveBytes(size_t live_bytes) {
387 DCHECK(IsInUnevacFromSpace());
388 DCHECK(!IsLargeTail());
389 DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
390 live_bytes_ += live_bytes;
391 DCHECK_LE(live_bytes_, BytesAllocated());
392 }
393
394 size_t LiveBytes() const {
395 return live_bytes_;
396 }
397
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800398 size_t BytesAllocated() const {
399 if (IsLarge()) {
400 DCHECK_LT(begin_ + kRegionSize, top_);
401 return static_cast<size_t>(top_ - begin_);
402 } else if (IsLargeTail()) {
403 DCHECK_EQ(begin_, top_);
404 return 0;
405 } else {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800406 DCHECK(IsAllocated()) << static_cast<uint>(state_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800407 DCHECK_LE(begin_, top_);
408 size_t bytes = static_cast<size_t>(top_ - begin_);
409 DCHECK_LE(bytes, kRegionSize);
410 return bytes;
411 }
412 }
413
414 size_t ObjectsAllocated() const {
415 if (IsLarge()) {
416 DCHECK_LT(begin_ + 1 * MB, top_);
417 DCHECK_EQ(objects_allocated_, 0U);
418 return 1;
419 } else if (IsLargeTail()) {
420 DCHECK_EQ(begin_, top_);
421 DCHECK_EQ(objects_allocated_, 0U);
422 return 0;
423 } else {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800424 DCHECK(IsAllocated()) << static_cast<uint>(state_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800425 return objects_allocated_;
426 }
427 }
428
429 uint8_t* Begin() const {
430 return begin_;
431 }
432
433 uint8_t* Top() const {
434 return top_;
435 }
436
437 void SetTop(uint8_t* new_top) {
438 top_ = new_top;
439 }
440
441 uint8_t* End() const {
442 return end_;
443 }
444
445 bool Contains(mirror::Object* ref) const {
446 return begin_ <= reinterpret_cast<uint8_t*>(ref) && reinterpret_cast<uint8_t*>(ref) < end_;
447 }
448
449 void Dump(std::ostream& os) const;
450
451 void RecordThreadLocalAllocations(size_t num_objects, size_t num_bytes) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800452 DCHECK(IsAllocated());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800453 DCHECK_EQ(objects_allocated_, 0U);
454 DCHECK_EQ(top_, end_);
455 objects_allocated_ = num_objects;
456 top_ = begin_ + num_bytes;
457 DCHECK_EQ(top_, end_);
458 }
459
460 private:
461 size_t idx_; // The region's index in the region space.
462 uint8_t* begin_; // The begin address of the region.
463 // Can't use Atomic<uint8_t*> as Atomic's copy operator is implicitly deleted.
464 uint8_t* top_; // The current position of the allocation.
465 uint8_t* end_; // The end address of the region.
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800466 RegionState state_; // The region state (see RegionState).
467 RegionType type_; // The region type (see RegionType).
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800468 uint64_t objects_allocated_; // The number of objects allocated.
469 uint32_t alloc_time_; // The allocation time of the region.
470 size_t live_bytes_; // The live bytes. Used to compute the live percent.
471 bool is_newly_allocated_; // True if it's allocated after the last collection.
472 bool is_a_tlab_; // True if it's a tlab.
473 Thread* thread_; // The owning thread if it's a tlab.
474
475 friend class RegionSpace;
476 };
477
Mathieu Chartier90443472015-07-16 20:32:27 -0700478 Region* RefToRegion(mirror::Object* ref) REQUIRES(!region_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800479 MutexLock mu(Thread::Current(), region_lock_);
480 return RefToRegionLocked(ref);
481 }
482
483 Region* RefToRegionUnlocked(mirror::Object* ref) NO_THREAD_SAFETY_ANALYSIS {
484 // For a performance reason (this is frequently called via
485 // IsInFromSpace() etc.) we avoid taking a lock here. Note that
486 // since we only change a region from to-space to from-space only
487 // during a pause (SetFromSpace()) and from from-space to free
488 // (after GC is done) as long as ref is a valid reference into an
489 // allocated region, it's safe to access the region state without
490 // the lock.
491 return RefToRegionLocked(ref);
492 }
493
Mathieu Chartier90443472015-07-16 20:32:27 -0700494 Region* RefToRegionLocked(mirror::Object* ref) REQUIRES(region_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800495 DCHECK(HasAddress(ref));
496 uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin());
497 size_t reg_idx = offset / kRegionSize;
498 DCHECK_LT(reg_idx, num_regions_);
499 Region* reg = &regions_[reg_idx];
500 DCHECK_EQ(reg->Idx(), reg_idx);
501 DCHECK(reg->Contains(ref));
502 return reg;
503 }
504
505 mirror::Object* GetNextObject(mirror::Object* obj)
Mathieu Chartier90443472015-07-16 20:32:27 -0700506 SHARED_REQUIRES(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800507
508 Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
509
510 uint32_t time_; // The time as the number of collections since the startup.
511 size_t num_regions_; // The number of regions in this space.
512 size_t num_non_free_regions_; // The number of non-free regions in this space.
513 std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_);
514 // The pointer to the region array.
515 Region* current_region_; // The region that's being allocated currently.
516 Region* evac_region_; // The region that's being evacuated to currently.
517 Region full_region_; // The dummy/sentinel region that looks full.
518
519 DISALLOW_COPY_AND_ASSIGN(RegionSpace);
520};
521
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800522std::ostream& operator<<(std::ostream& os, const RegionSpace::RegionState& value);
523std::ostream& operator<<(std::ostream& os, const RegionSpace::RegionType& value);
524
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800525} // namespace space
526} // namespace gc
527} // namespace art
528
529#endif // ART_RUNTIME_GC_SPACE_REGION_SPACE_H_