blob: c630826f48a6293c1b81348d50dd32c7e6ca4d5a [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_ROSALLOC_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_ROSALLOC_SPACE_H_
19
20#include "gc/allocator/rosalloc.h"
21#include "malloc_space.h"
22#include "space.h"
23
24namespace art {
25namespace gc {
26
27namespace collector {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080028class MarkSweep;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070029} // namespace collector
30
31namespace space {
32
Ian Rogers6fac4472014-02-25 17:01:10 -080033// An alloc space implemented using a runs-of-slots memory allocator. Not final as may be
Evgenii Stepanov1e133742015-05-20 12:30:59 -070034// overridden by a MemoryToolMallocSpace.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070035class RosAllocSpace : public MallocSpace {
36 public:
37 // Create a RosAllocSpace with the requested sizes. The requested
38 // base address is not guaranteed to be granted, if it is required,
39 // the caller should call Begin on the returned space to confirm the
40 // request was granted.
41 static RosAllocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
Ian Rogers13735952014-10-08 12:43:28 -070042 size_t capacity, uint8_t* requested_begin, bool low_memory_mode,
Mathieu Chartier31f44142014-04-08 14:40:03 -070043 bool can_move_objects);
Vladimir Markoc34bebf2018-08-16 16:12:49 +010044 static RosAllocSpace* CreateFromMemMap(MemMap&& mem_map,
45 const std::string& name,
46 size_t starting_size,
47 size_t initial_size,
48 size_t growth_limit,
49 size_t capacity,
50 bool low_memory_mode,
51 bool can_move_objects);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070052
Ian Rogers6fac4472014-02-25 17:01:10 -080053 mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070054 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -070055 OVERRIDE REQUIRES(!lock_);
Ian Rogers6fac4472014-02-25 17:01:10 -080056 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070057 size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE {
58 return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
59 bytes_tl_bulk_allocated);
Ian Rogers6fac4472014-02-25 17:01:10 -080060 }
Mathieu Chartier0651d412014-04-29 14:37:57 -070061 mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070062 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -070063 OVERRIDE REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070064 return AllocNonvirtualThreadUnsafe(self, num_bytes, bytes_allocated, usable_size,
65 bytes_tl_bulk_allocated);
Mathieu Chartier0651d412014-04-29 14:37:57 -070066 }
Ian Rogers6fac4472014-02-25 17:01:10 -080067 size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070068 return AllocationSizeNonvirtual<true>(obj, usable_size);
Ian Rogers6fac4472014-02-25 17:01:10 -080069 }
70 size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070071 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers6fac4472014-02-25 17:01:10 -080072 size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070073 REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070074
Ian Rogers6fac4472014-02-25 17:01:10 -080075 mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070076 size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
Ian Rogers6fac4472014-02-25 17:01:10 -080077 // RosAlloc zeroes memory internally.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070078 return AllocCommon(self, num_bytes, bytes_allocated, usable_size,
79 bytes_tl_bulk_allocated);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070080 }
Mathieu Chartier0651d412014-04-29 14:37:57 -070081 mirror::Object* AllocNonvirtualThreadUnsafe(Thread* self, size_t num_bytes,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070082 size_t* bytes_allocated, size_t* usable_size,
83 size_t* bytes_tl_bulk_allocated) {
Mathieu Chartier0651d412014-04-29 14:37:57 -070084 // RosAlloc zeroes memory internally. Pass in false for thread unsafe.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070085 return AllocCommon<false>(self, num_bytes, bytes_allocated, usable_size,
86 bytes_tl_bulk_allocated);
Mathieu Chartier0651d412014-04-29 14:37:57 -070087 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070088
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070089 // Returns true if the given allocation request can be allocated in
90 // an existing thread local run without allocating a new run.
91 ALWAYS_INLINE bool CanAllocThreadLocal(Thread* self, size_t num_bytes);
92 // Allocate the given allocation request in an existing thread local
93 // run without allocating a new run.
94 ALWAYS_INLINE mirror::Object* AllocThreadLocal(Thread* self, size_t num_bytes,
95 size_t* bytes_allocated);
96 size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE {
97 return MaxBytesBulkAllocatedForNonvirtual(num_bytes);
98 }
99 ALWAYS_INLINE size_t MaxBytesBulkAllocatedForNonvirtual(size_t num_bytes);
100
Ian Rogers6fac4472014-02-25 17:01:10 -0800101 // TODO: NO_THREAD_SAFETY_ANALYSIS because SizeOf() requires that mutator_lock is held.
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700102 template<bool kMaybeIsRunningOnMemoryTool>
Ian Rogers6fac4472014-02-25 17:01:10 -0800103 size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
104 NO_THREAD_SAFETY_ANALYSIS;
105
106 allocator::RosAlloc* GetRosAlloc() const {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700107 return rosalloc_;
108 }
109
Ian Rogers6fac4472014-02-25 17:01:10 -0800110 size_t Trim() OVERRIDE;
Mathieu Chartier90443472015-07-16 20:32:27 -0700111 void Walk(WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
Ian Rogers6fac4472014-02-25 17:01:10 -0800112 size_t GetFootprint() OVERRIDE;
113 size_t GetFootprintLimit() OVERRIDE;
114 void SetFootprintLimit(size_t limit) OVERRIDE;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700115
Ian Rogers6fac4472014-02-25 17:01:10 -0800116 void Clear() OVERRIDE;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700117
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100118 MallocSpace* CreateInstance(MemMap&& mem_map,
119 const std::string& name,
120 void* allocator,
121 uint8_t* begin,
122 uint8_t* end,
123 uint8_t* limit,
124 size_t growth_limit,
Mathieu Chartier31f44142014-04-08 14:40:03 -0700125 bool can_move_objects) OVERRIDE;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700126
Ian Rogers6fac4472014-02-25 17:01:10 -0800127 uint64_t GetBytesAllocated() OVERRIDE;
128 uint64_t GetObjectsAllocated() OVERRIDE;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700129
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700130 size_t RevokeThreadLocalBuffers(Thread* thread);
131 size_t RevokeAllThreadLocalBuffers();
Ian Rogers68d8b422014-07-17 11:09:10 -0700132 void AssertThreadLocalBuffersAreRevoked(Thread* thread);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -0700133 void AssertAllThreadLocalBuffersAreRevoked();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700134
135 // Returns the class of a recently freed object.
136 mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
137
Ian Rogers6fac4472014-02-25 17:01:10 -0800138 bool IsRosAllocSpace() const OVERRIDE {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700139 return true;
140 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800141
142 RosAllocSpace* AsRosAllocSpace() OVERRIDE {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700143 return this;
144 }
145
Mathieu Chartier90443472015-07-16 20:32:27 -0700146 void Verify() REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800147 rosalloc_->Verify();
148 }
149
Mathieu Chartier661974a2014-01-09 11:23:53 -0800150 virtual ~RosAllocSpace();
151
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -0700152 void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE {
153 rosalloc_->LogFragmentationAllocFailure(os, failed_alloc_bytes);
154 }
155
Hiroshi Yamauchi565c2d92016-03-23 12:53:26 -0700156 void DumpStats(std::ostream& os);
157
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700158 protected:
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100159 RosAllocSpace(MemMap&& mem_map,
160 size_t initial_size,
161 const std::string& name,
162 allocator::RosAlloc* rosalloc,
163 uint8_t* begin,
164 uint8_t* end,
165 uint8_t* limit,
166 size_t growth_limit,
167 bool can_move_objects,
168 size_t starting_size,
Andreas Gamped7576322014-10-24 22:13:45 -0700169 bool low_memory_mode);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700170
171 private:
Mathieu Chartier0651d412014-04-29 14:37:57 -0700172 template<bool kThreadSafe = true>
Ian Rogers6fac4472014-02-25 17:01:10 -0800173 mirror::Object* AllocCommon(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700174 size_t* usable_size, size_t* bytes_tl_bulk_allocated);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700175
Ian Rogers6fac4472014-02-25 17:01:10 -0800176 void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800177 size_t maximum_size, bool low_memory_mode) OVERRIDE {
Roland Levillain05e34f42018-05-24 13:19:05 +0000178 return CreateRosAlloc(
179 base, morecore_start, initial_size, maximum_size, low_memory_mode, kRunningOnMemoryTool);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700180 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800181 static allocator::RosAlloc* CreateRosAlloc(void* base, size_t morecore_start, size_t initial_size,
Andreas Gamped7576322014-10-24 22:13:45 -0700182 size_t maximum_size, bool low_memory_mode,
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700183 bool running_on_memory_tool);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700184
185 void InspectAllRosAlloc(void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
Hiroshi Yamauchi1cd53db2014-03-28 15:26:48 -0700186 void* arg, bool do_null_callback_at_end)
Mathieu Chartier90443472015-07-16 20:32:27 -0700187 REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_);
Hiroshi Yamauchi1cd53db2014-03-28 15:26:48 -0700188 void InspectAllRosAllocWithSuspendAll(
189 void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
190 void* arg, bool do_null_callback_at_end)
Mathieu Chartier90443472015-07-16 20:32:27 -0700191 REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700192
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700193 // Underlying rosalloc.
Mathieu Chartier31f44142014-04-08 14:40:03 -0700194 allocator::RosAlloc* rosalloc_;
195
196 const bool low_memory_mode_;
Hiroshi Yamauchi4ce1f002013-11-18 14:49:09 -0800197
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700198 friend class collector::MarkSweep;
199
200 DISALLOW_COPY_AND_ASSIGN(RosAllocSpace);
201};
202
203} // namespace space
204} // namespace gc
205} // namespace art
206
207#endif // ART_RUNTIME_GC_SPACE_ROSALLOC_SPACE_H_