blob: 86d19a8170eb44fb838cef7a7525b7a81681b8b9 [file] [log] [blame]
Carl Shapiro69759ea2011-07-21 18:13:35 -07001// Copyright 2011 Google Inc. All Rights Reserved.
Carl Shapiro69759ea2011-07-21 18:13:35 -07002
Brian Carlstrom578bbdc2011-07-21 14:07:47 -07003#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -07004
5#include <vector>
6
Elliott Hughes90a33692011-08-30 13:27:07 -07007#include "UniquePtr.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -07008#include "image.h"
Carl Shapiro58551df2011-07-24 03:09:51 -07009#include "mark_sweep.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070010#include "object.h"
11#include "space.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070012#include "stl_util.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070013
14namespace art {
15
Carl Shapiro58551df2011-07-24 03:09:51 -070016std::vector<Space*> Heap::spaces_;
Carl Shapiro69759ea2011-07-21 18:13:35 -070017
Brian Carlstroma663ea52011-08-19 23:33:41 -070018Space* Heap::boot_space_ = NULL;
19
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070020Space* Heap::alloc_space_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -070021
22size_t Heap::maximum_size_ = 0;
23
Carl Shapiro58551df2011-07-24 03:09:51 -070024size_t Heap::num_bytes_allocated_ = 0;
25
26size_t Heap::num_objects_allocated_ = 0;
27
Carl Shapiro69759ea2011-07-21 18:13:35 -070028bool Heap::is_gc_running_ = false;
29
30HeapBitmap* Heap::mark_bitmap_ = NULL;
31
32HeapBitmap* Heap::live_bitmap_ = NULL;
33
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070034MemberOffset Heap::reference_referent_offset_ = MemberOffset(0);
35MemberOffset Heap::reference_queue_offset_ = MemberOffset(0);
36MemberOffset Heap::reference_queueNext_offset_ = MemberOffset(0);
37MemberOffset Heap::reference_pendingNext_offset_ = MemberOffset(0);
38MemberOffset Heap::finalizer_reference_zombie_offset_ = MemberOffset(0);
Brian Carlstrom1f870082011-08-23 16:02:11 -070039
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070040bool Heap::Init(size_t initial_size, size_t maximum_size,
41 const char* boot_image_file_name,
42 std::vector<const char*>& image_file_names) {
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070043 Space* boot_space;
44 byte* requested_base;
45 if (boot_image_file_name == NULL) {
46 boot_space = NULL;
47 requested_base = NULL;
48 } else {
49 boot_space = Space::Create(boot_image_file_name);
50 if (boot_space == NULL) {
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070051 LOG(WARNING) << "Failed to create space from " << boot_image_file_name;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070052 return false;
53 }
54 spaces_.push_back(boot_space);
55 requested_base = boot_space->GetBase() + RoundUp(boot_space->Size(), kPageSize);
56 }
57
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070058 std::vector<Space*> image_spaces;
59 for (size_t i = 0; i < image_file_names.size(); i++) {
60 Space* space = Space::Create(image_file_names[i]);
61 if (space == NULL) {
62 LOG(WARNING) << "Failed to create space from " << image_file_names[i];
63 return false;
64 }
65 image_spaces.push_back(space);
66 spaces_.push_back(space);
67 requested_base = space->GetBase() + RoundUp(space->Size(), kPageSize);
68 }
69
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070070 Space* space = Space::Create(initial_size, maximum_size, requested_base);
Carl Shapiro58551df2011-07-24 03:09:51 -070071 if (space == NULL) {
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070072 LOG(WARNING) << "Failed to create alloc space";
Carl Shapiro69759ea2011-07-21 18:13:35 -070073 return false;
74 }
75
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070076 if (boot_space == NULL) {
77 boot_space = space;
78 }
79 byte* base = std::min(boot_space->GetBase(), space->GetBase());
80 byte* limit = std::max(boot_space->GetLimit(), space->GetLimit());
81 DCHECK_LT(base, limit);
82 size_t num_bytes = limit - base;
Carl Shapiro69759ea2011-07-21 18:13:35 -070083
84 // Allocate the initial live bitmap.
Elliott Hughes90a33692011-08-30 13:27:07 -070085 UniquePtr<HeapBitmap> live_bitmap(HeapBitmap::Create(base, num_bytes));
86 if (live_bitmap.get() == NULL) {
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070087 LOG(WARNING) << "Failed to create live bitmap";
Carl Shapiro69759ea2011-07-21 18:13:35 -070088 return false;
89 }
90
91 // Allocate the initial mark bitmap.
Elliott Hughes90a33692011-08-30 13:27:07 -070092 UniquePtr<HeapBitmap> mark_bitmap(HeapBitmap::Create(base, num_bytes));
93 if (mark_bitmap.get() == NULL) {
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070094 LOG(WARNING) << "Failed to create mark bitmap";
Carl Shapiro69759ea2011-07-21 18:13:35 -070095 return false;
96 }
97
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070098 alloc_space_ = space;
Carl Shapiro58551df2011-07-24 03:09:51 -070099 spaces_.push_back(space);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700100 maximum_size_ = maximum_size;
101 live_bitmap_ = live_bitmap.release();
102 mark_bitmap_ = mark_bitmap.release();
103
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700104 num_bytes_allocated_ = 0;
105 num_objects_allocated_ = 0;
106
Carl Shapiro69759ea2011-07-21 18:13:35 -0700107 // TODO: allocate the card table
108
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700109 // Make objects in boot_space live (after live_bitmap_ is set)
110 if (boot_image_file_name != NULL) {
Brian Carlstroma663ea52011-08-19 23:33:41 -0700111 boot_space_ = boot_space;
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700112 RecordImageAllocations(boot_space);
113 }
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700114 for (size_t i = 0; i < image_spaces.size(); i++) {
115 RecordImageAllocations(image_spaces[i]);
116 }
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700117
Carl Shapiro69759ea2011-07-21 18:13:35 -0700118 return true;
119}
120
121void Heap::Destroy() {
Carl Shapiro58551df2011-07-24 03:09:51 -0700122 STLDeleteElements(&spaces_);
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700123 if (mark_bitmap_ != NULL) {
124 delete mark_bitmap_;
125 mark_bitmap_ = NULL;
126 }
127 if (live_bitmap_ != NULL) {
128 delete live_bitmap_;
129 }
130 live_bitmap_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700131}
132
Carl Shapiro58551df2011-07-24 03:09:51 -0700133Object* Heap::AllocObject(Class* klass, size_t num_bytes) {
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700134 DCHECK(klass == NULL
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700135 || klass->GetDescriptor() == NULL
Brian Carlstrom4873d462011-08-21 15:23:39 -0700136 || (klass->IsClassClass() && num_bytes >= sizeof(Class))
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700137 || (klass->IsVariableSize() || klass->GetObjectSize() == num_bytes));
138 DCHECK(num_bytes >= sizeof(Object));
Carl Shapiro58551df2011-07-24 03:09:51 -0700139 Object* obj = Allocate(num_bytes);
140 if (obj != NULL) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700141 obj->SetClass(klass);
Carl Shapiro58551df2011-07-24 03:09:51 -0700142 }
143 return obj;
144}
145
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700146bool Heap::IsHeapAddress(const Object* obj) {
Elliott Hughesa2501992011-08-26 19:39:54 -0700147 if (!IsAligned(obj, kObjectAlignment)) {
148 return false;
149 }
150 // TODO
151 return true;
152}
153
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700154bool Heap::verify_object_disabled_;
155
Elliott Hughes3e465b12011-09-02 18:26:12 -0700156#if VERIFY_OBJECT_ENABLED
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700157void Heap::VerifyObject(const Object* obj) {
158 if (obj != NULL && !verify_object_disabled_) {
159 if (!IsAligned(obj, kObjectAlignment)) {
160 LOG(FATAL) << "Object isn't aligned: " << obj;
161 } else if (!live_bitmap_->Test(obj)) {
162 // TODO: we don't hold a lock here as it is assumed the live bit map
163 // isn't changing if the mutator is running.
164 LOG(FATAL) << "Object is dead: " << obj;
165 }
166 // Ignore early dawn of the universe verifications
167 if(num_objects_allocated_ > 10) {
168 const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
169 Object::ClassOffset().Int32Value();
170 const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
171 if (c == NULL) {
172 LOG(FATAL) << "Null class" << " in object: " << obj;
173 } else if (!IsAligned(c, kObjectAlignment)) {
174 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
175 } else if (!live_bitmap_->Test(c)) {
176 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
177 }
178 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
179 // NB we don't use the accessors here as they have internal sanity checks
180 // that we don't want to run
181 raw_addr = reinterpret_cast<const byte*>(c) +
182 Object::ClassOffset().Int32Value();
183 const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
184 raw_addr = reinterpret_cast<const byte*>(c_c) +
185 Object::ClassOffset().Int32Value();
186 const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
187 CHECK_EQ(c_c, c_c_c);
188 }
189 }
190}
Elliott Hughes3e465b12011-09-02 18:26:12 -0700191#endif
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700192
193static void HeapVerifyCallback(Object* obj, void *arg) {
194 DCHECK(obj != NULL);
195 Heap::VerifyObject(obj);
196}
197
198void Heap::VerifyHeap() {
199 live_bitmap_->Walk(HeapVerifyCallback, NULL);
200}
201
Carl Shapiro58551df2011-07-24 03:09:51 -0700202void Heap::RecordAllocation(Space* space, const Object* obj) {
203 size_t size = space->AllocationSize(obj);
204 DCHECK_NE(size, 0u);
205 num_bytes_allocated_ += size;
206 num_objects_allocated_ += 1;
207 live_bitmap_->Set(obj);
208}
209
210void Heap::RecordFree(Space* space, const Object* obj) {
211 size_t size = space->AllocationSize(obj);
212 DCHECK_NE(size, 0u);
213 if (size < num_bytes_allocated_) {
214 num_bytes_allocated_ -= size;
215 } else {
216 num_bytes_allocated_ = 0;
217 }
218 live_bitmap_->Clear(obj);
219 if (num_objects_allocated_ > 0) {
220 num_objects_allocated_ -= 1;
221 }
222}
223
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700224void Heap::RecordImageAllocations(Space* space) {
225 CHECK(space != NULL);
226 CHECK(live_bitmap_ != NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700227 byte* current = space->GetBase() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700228 while (current < space->GetLimit()) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700229 DCHECK(IsAligned(current, kObjectAlignment));
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700230 const Object* obj = reinterpret_cast<const Object*>(current);
231 live_bitmap_->Set(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700232 current += RoundUp(obj->SizeOf(), kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700233 }
234}
235
Carl Shapiro69759ea2011-07-21 18:13:35 -0700236Object* Heap::Allocate(size_t size) {
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700237 DCHECK(alloc_space_ != NULL);
238 Space* space = alloc_space_;
Carl Shapiro58551df2011-07-24 03:09:51 -0700239 Object* obj = Allocate(space, size);
240 if (obj != NULL) {
241 RecordAllocation(space, obj);
242 }
243 return obj;
244}
245
246Object* Heap::Allocate(Space* space, size_t size) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700247 // Fail impossible allocations. TODO: collect soft references.
248 if (size > maximum_size_) {
249 return NULL;
250 }
251
Carl Shapiro58551df2011-07-24 03:09:51 -0700252 Object* ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700253 if (ptr != NULL) {
254 return ptr;
255 }
256
257 // The allocation failed. If the GC is running, block until it
258 // completes and retry.
259 if (is_gc_running_) {
260 // The GC is concurrently tracing the heap. Release the heap
261 // lock, wait for the GC to complete, and retrying allocating.
262 WaitForConcurrentGcToComplete();
Carl Shapiro58551df2011-07-24 03:09:51 -0700263 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700264 if (ptr != NULL) {
265 return ptr;
266 }
267 }
268
269 // Another failure. Our thread was starved or there may be too many
270 // live objects. Try a foreground GC. This will have no effect if
271 // the concurrent GC is already running.
Carl Shapiro58551df2011-07-24 03:09:51 -0700272 CollectGarbageInternal();
273 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700274 if (ptr != NULL) {
275 return ptr;
276 }
277
278 // Even that didn't work; this is an exceptional state.
279 // Try harder, growing the heap if necessary.
Carl Shapiro58551df2011-07-24 03:09:51 -0700280 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700281 if (ptr != NULL) {
282 //size_t new_footprint = dvmHeapSourceGetIdealFootprint();
Carl Shapiro58551df2011-07-24 03:09:51 -0700283 size_t new_footprint = space->MaxAllowedFootprint();
284 // TODO: may want to grow a little bit more so that the amount of
285 // free space is equal to the old free space + the
286 // utilization slop for the new allocation.
287 LOG(INFO) << "Grow heap (frag case) to " << new_footprint / MB
Carl Shapiro69759ea2011-07-21 18:13:35 -0700288 << "for " << size << "-byte allocation";
289 return ptr;
290 }
291
292 // Most allocations should have succeeded by now, so the heap is
293 // really full, really fragmented, or the requested size is really
294 // big. Do another GC, collecting SoftReferences this time. The VM
295 // spec requires that all SoftReferences have been collected and
296 // cleared before throwing an OOME.
297
Carl Shapiro58551df2011-07-24 03:09:51 -0700298 // TODO: wait for the finalizers from the previous GC to finish
Carl Shapiro69759ea2011-07-21 18:13:35 -0700299 LOG(INFO) << "Forcing collection of SoftReferences for "
300 << size << "-byte allocation";
Carl Shapiro58551df2011-07-24 03:09:51 -0700301 CollectGarbageInternal();
302 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700303 if (ptr != NULL) {
304 return ptr;
305 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700306
Carl Shapiro69759ea2011-07-21 18:13:35 -0700307 LOG(ERROR) << "Out of memory on a " << size << " byte allocation";
308
Carl Shapiro58551df2011-07-24 03:09:51 -0700309 // TODO: tell the HeapSource to dump its state
310 // TODO: dump stack traces for all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700311
Carl Shapiro69759ea2011-07-21 18:13:35 -0700312 return NULL;
313}
314
Elliott Hughesbf86d042011-08-31 17:53:14 -0700315int64_t Heap::GetMaxMemory() {
316 UNIMPLEMENTED(WARNING);
317 return 0;
318}
319
320int64_t Heap::GetTotalMemory() {
321 UNIMPLEMENTED(WARNING);
322 return 0;
323}
324
325int64_t Heap::GetFreeMemory() {
326 UNIMPLEMENTED(WARNING);
327 return 0;
328}
329
Carl Shapiro69759ea2011-07-21 18:13:35 -0700330void Heap::CollectGarbage() {
Carl Shapiro58551df2011-07-24 03:09:51 -0700331 CollectGarbageInternal();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700332}
333
334void Heap::CollectGarbageInternal() {
Carl Shapiro58551df2011-07-24 03:09:51 -0700335 // TODO: check that heap lock is held
336
337 // TODO: Suspend all threads
338 {
339 MarkSweep mark_sweep;
340
341 mark_sweep.Init();
342
343 mark_sweep.MarkRoots();
344
345 // Push marked roots onto the mark stack
346
347 // TODO: if concurrent
348 // unlock heap
349 // resume threads
350
351 mark_sweep.RecursiveMark();
352
353 // TODO: if concurrent
354 // lock heap
355 // suspend threads
356 // re-mark root set
357 // scan dirty objects
358
359 mark_sweep.ProcessReferences(false);
360
361 // TODO: swap bitmaps
362
363 mark_sweep.Sweep();
364 }
365
366 GrowForUtilization();
367
368 // TODO: Resume all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700369}
370
371void Heap::WaitForConcurrentGcToComplete() {
372}
373
374// Given the current contents of the active heap, increase the allowed
375// heap footprint to match the target utilization ratio. This should
376// only be called immediately after a full garbage collection.
377void Heap::GrowForUtilization() {
Elliott Hughes53b61312011-08-12 18:28:20 -0700378 UNIMPLEMENTED(ERROR);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700379}
380
381} // namespace art