blob: e346478016e5e61f3b1b14b75b91a6fdb95ccbb4 [file] [log] [blame]
Carl Shapiro69759ea2011-07-21 18:13:35 -07001// Copyright 2011 Google Inc. All Rights Reserved.
Carl Shapiro69759ea2011-07-21 18:13:35 -07002
Brian Carlstrom578bbdc2011-07-21 14:07:47 -07003#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -07004
5#include <vector>
6
Elliott Hughes90a33692011-08-30 13:27:07 -07007#include "UniquePtr.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -07008#include "image.h"
Carl Shapiro58551df2011-07-24 03:09:51 -07009#include "mark_sweep.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070010#include "object.h"
11#include "space.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070012#include "stl_util.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070013
14namespace art {
15
Carl Shapiro58551df2011-07-24 03:09:51 -070016std::vector<Space*> Heap::spaces_;
Carl Shapiro69759ea2011-07-21 18:13:35 -070017
Brian Carlstroma663ea52011-08-19 23:33:41 -070018Space* Heap::boot_space_ = NULL;
19
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070020Space* Heap::alloc_space_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -070021
22size_t Heap::maximum_size_ = 0;
23
Carl Shapiro58551df2011-07-24 03:09:51 -070024size_t Heap::num_bytes_allocated_ = 0;
25
26size_t Heap::num_objects_allocated_ = 0;
27
Carl Shapiro69759ea2011-07-21 18:13:35 -070028bool Heap::is_gc_running_ = false;
29
30HeapBitmap* Heap::mark_bitmap_ = NULL;
31
32HeapBitmap* Heap::live_bitmap_ = NULL;
33
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070034MemberOffset Heap::reference_referent_offset_ = MemberOffset(0);
35MemberOffset Heap::reference_queue_offset_ = MemberOffset(0);
36MemberOffset Heap::reference_queueNext_offset_ = MemberOffset(0);
37MemberOffset Heap::reference_pendingNext_offset_ = MemberOffset(0);
38MemberOffset Heap::finalizer_reference_zombie_offset_ = MemberOffset(0);
Brian Carlstrom1f870082011-08-23 16:02:11 -070039
Elliott Hughes92b3b562011-09-08 16:32:26 -070040Mutex* Heap::lock_ = NULL;
41
42class ScopedHeapLock {
43 public:
44 ScopedHeapLock() {
45 Heap::Lock();
46 }
47
48 ~ScopedHeapLock() {
49 Heap::Unlock();
50 }
51};
52
Elliott Hughesbe759c62011-09-08 19:38:21 -070053void Heap::Init(size_t initial_size, size_t maximum_size,
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070054 const char* boot_image_file_name,
55 std::vector<const char*>& image_file_names) {
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070056 Space* boot_space;
57 byte* requested_base;
58 if (boot_image_file_name == NULL) {
59 boot_space = NULL;
60 requested_base = NULL;
61 } else {
62 boot_space = Space::Create(boot_image_file_name);
63 if (boot_space == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -070064 LOG(FATAL) << "Failed to create space from " << boot_image_file_name;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070065 }
66 spaces_.push_back(boot_space);
67 requested_base = boot_space->GetBase() + RoundUp(boot_space->Size(), kPageSize);
68 }
69
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070070 std::vector<Space*> image_spaces;
71 for (size_t i = 0; i < image_file_names.size(); i++) {
72 Space* space = Space::Create(image_file_names[i]);
73 if (space == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -070074 LOG(FATAL) << "Failed to create space from " << image_file_names[i];
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070075 }
76 image_spaces.push_back(space);
77 spaces_.push_back(space);
78 requested_base = space->GetBase() + RoundUp(space->Size(), kPageSize);
79 }
80
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070081 Space* space = Space::Create(initial_size, maximum_size, requested_base);
Carl Shapiro58551df2011-07-24 03:09:51 -070082 if (space == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -070083 LOG(FATAL) << "Failed to create alloc space";
Carl Shapiro69759ea2011-07-21 18:13:35 -070084 }
85
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070086 if (boot_space == NULL) {
87 boot_space = space;
88 }
89 byte* base = std::min(boot_space->GetBase(), space->GetBase());
90 byte* limit = std::max(boot_space->GetLimit(), space->GetLimit());
91 DCHECK_LT(base, limit);
92 size_t num_bytes = limit - base;
Carl Shapiro69759ea2011-07-21 18:13:35 -070093
94 // Allocate the initial live bitmap.
Elliott Hughes90a33692011-08-30 13:27:07 -070095 UniquePtr<HeapBitmap> live_bitmap(HeapBitmap::Create(base, num_bytes));
96 if (live_bitmap.get() == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -070097 LOG(FATAL) << "Failed to create live bitmap";
Carl Shapiro69759ea2011-07-21 18:13:35 -070098 }
99
100 // Allocate the initial mark bitmap.
Elliott Hughes90a33692011-08-30 13:27:07 -0700101 UniquePtr<HeapBitmap> mark_bitmap(HeapBitmap::Create(base, num_bytes));
102 if (mark_bitmap.get() == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700103 LOG(FATAL) << "Failed to create mark bitmap";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700104 }
105
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700106 alloc_space_ = space;
Carl Shapiro58551df2011-07-24 03:09:51 -0700107 spaces_.push_back(space);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700108 maximum_size_ = maximum_size;
109 live_bitmap_ = live_bitmap.release();
110 mark_bitmap_ = mark_bitmap.release();
111
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700112 num_bytes_allocated_ = 0;
113 num_objects_allocated_ = 0;
114
Carl Shapiro69759ea2011-07-21 18:13:35 -0700115 // TODO: allocate the card table
116
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700117 // Make objects in boot_space live (after live_bitmap_ is set)
118 if (boot_image_file_name != NULL) {
Brian Carlstroma663ea52011-08-19 23:33:41 -0700119 boot_space_ = boot_space;
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700120 RecordImageAllocations(boot_space);
121 }
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700122 for (size_t i = 0; i < image_spaces.size(); i++) {
123 RecordImageAllocations(image_spaces[i]);
124 }
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700125
Elliott Hughes92b3b562011-09-08 16:32:26 -0700126 // It's still to early to take a lock because there are no threads yet,
127 // but we can create the heap lock now. We don't create it earlier to
128 // make it clear that you can't use locks during heap initialization.
Elliott Hughes8daa0922011-09-11 13:46:25 -0700129 lock_ = new Mutex("Heap lock");
Carl Shapiro69759ea2011-07-21 18:13:35 -0700130}
131
132void Heap::Destroy() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700133 ScopedHeapLock lock;
Carl Shapiro58551df2011-07-24 03:09:51 -0700134 STLDeleteElements(&spaces_);
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700135 if (mark_bitmap_ != NULL) {
136 delete mark_bitmap_;
137 mark_bitmap_ = NULL;
138 }
139 if (live_bitmap_ != NULL) {
140 delete live_bitmap_;
141 }
142 live_bitmap_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700143}
144
Carl Shapiro58551df2011-07-24 03:09:51 -0700145Object* Heap::AllocObject(Class* klass, size_t num_bytes) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700146 ScopedHeapLock lock;
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700147 DCHECK(klass == NULL
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700148 || klass->GetDescriptor() == NULL
Brian Carlstrom4873d462011-08-21 15:23:39 -0700149 || (klass->IsClassClass() && num_bytes >= sizeof(Class))
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700150 || (klass->IsVariableSize() || klass->GetObjectSize() == num_bytes));
151 DCHECK(num_bytes >= sizeof(Object));
Elliott Hughes92b3b562011-09-08 16:32:26 -0700152 Object* obj = AllocateLocked(num_bytes);
Carl Shapiro58551df2011-07-24 03:09:51 -0700153 if (obj != NULL) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700154 obj->SetClass(klass);
Carl Shapiro58551df2011-07-24 03:09:51 -0700155 }
156 return obj;
157}
158
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700159bool Heap::IsHeapAddress(const Object* obj) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700160 // Note: we deliberately don't take the lock here, and mustn't test anything that would
161 // require taking the lock.
Elliott Hughesa2501992011-08-26 19:39:54 -0700162 if (!IsAligned(obj, kObjectAlignment)) {
163 return false;
164 }
165 // TODO
166 return true;
167}
168
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700169bool Heap::verify_object_disabled_;
170
Elliott Hughes3e465b12011-09-02 18:26:12 -0700171#if VERIFY_OBJECT_ENABLED
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700172void Heap::VerifyObject(const Object* obj) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700173 ScopedHeapLock lock;
174 Heap::VerifyObjectLocked(obj);
175}
176#endif
177
178void Heap::VerifyObjectLocked(const Object* obj) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700179 lock_->AssertHeld();
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700180 if (obj != NULL && !verify_object_disabled_) {
181 if (!IsAligned(obj, kObjectAlignment)) {
182 LOG(FATAL) << "Object isn't aligned: " << obj;
183 } else if (!live_bitmap_->Test(obj)) {
184 // TODO: we don't hold a lock here as it is assumed the live bit map
185 // isn't changing if the mutator is running.
186 LOG(FATAL) << "Object is dead: " << obj;
187 }
188 // Ignore early dawn of the universe verifications
Brian Carlstromdbc05252011-09-09 01:59:59 -0700189 if (num_objects_allocated_ > 10) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700190 const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
191 Object::ClassOffset().Int32Value();
192 const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
193 if (c == NULL) {
194 LOG(FATAL) << "Null class" << " in object: " << obj;
195 } else if (!IsAligned(c, kObjectAlignment)) {
196 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
197 } else if (!live_bitmap_->Test(c)) {
198 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
199 }
200 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
201 // NB we don't use the accessors here as they have internal sanity checks
202 // that we don't want to run
203 raw_addr = reinterpret_cast<const byte*>(c) +
204 Object::ClassOffset().Int32Value();
205 const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
206 raw_addr = reinterpret_cast<const byte*>(c_c) +
207 Object::ClassOffset().Int32Value();
208 const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
209 CHECK_EQ(c_c, c_c_c);
210 }
211 }
212}
213
Elliott Hughes92b3b562011-09-08 16:32:26 -0700214void Heap::VerificationCallback(Object* obj, void *arg) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700215 DCHECK(obj != NULL);
Elliott Hughes92b3b562011-09-08 16:32:26 -0700216 Heap::VerifyObjectLocked(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700217}
218
219void Heap::VerifyHeap() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700220 ScopedHeapLock lock;
221 live_bitmap_->Walk(Heap::VerificationCallback, NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700222}
223
Elliott Hughes92b3b562011-09-08 16:32:26 -0700224void Heap::RecordAllocationLocked(Space* space, const Object* obj) {
225#ifndef NDEBUG
226 if (Runtime::Current()->IsStarted()) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700227 lock_->AssertHeld();
Elliott Hughes92b3b562011-09-08 16:32:26 -0700228 }
229#endif
Carl Shapiro58551df2011-07-24 03:09:51 -0700230 size_t size = space->AllocationSize(obj);
231 DCHECK_NE(size, 0u);
232 num_bytes_allocated_ += size;
233 num_objects_allocated_ += 1;
234 live_bitmap_->Set(obj);
235}
236
Elliott Hughes92b3b562011-09-08 16:32:26 -0700237void Heap::RecordFreeLocked(Space* space, const Object* obj) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700238 lock_->AssertHeld();
Carl Shapiro58551df2011-07-24 03:09:51 -0700239 size_t size = space->AllocationSize(obj);
240 DCHECK_NE(size, 0u);
241 if (size < num_bytes_allocated_) {
242 num_bytes_allocated_ -= size;
243 } else {
244 num_bytes_allocated_ = 0;
245 }
246 live_bitmap_->Clear(obj);
247 if (num_objects_allocated_ > 0) {
248 num_objects_allocated_ -= 1;
249 }
250}
251
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700252void Heap::RecordImageAllocations(Space* space) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700253 DCHECK(!Runtime::Current()->IsStarted());
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700254 CHECK(space != NULL);
255 CHECK(live_bitmap_ != NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700256 byte* current = space->GetBase() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700257 while (current < space->GetLimit()) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700258 DCHECK(IsAligned(current, kObjectAlignment));
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700259 const Object* obj = reinterpret_cast<const Object*>(current);
260 live_bitmap_->Set(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700261 current += RoundUp(obj->SizeOf(), kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700262 }
263}
264
Elliott Hughes92b3b562011-09-08 16:32:26 -0700265Object* Heap::AllocateLocked(size_t size) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700266 lock_->AssertHeld();
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700267 DCHECK(alloc_space_ != NULL);
268 Space* space = alloc_space_;
Elliott Hughes92b3b562011-09-08 16:32:26 -0700269 Object* obj = AllocateLocked(space, size);
Carl Shapiro58551df2011-07-24 03:09:51 -0700270 if (obj != NULL) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700271 RecordAllocationLocked(space, obj);
Carl Shapiro58551df2011-07-24 03:09:51 -0700272 }
273 return obj;
274}
275
Elliott Hughes92b3b562011-09-08 16:32:26 -0700276Object* Heap::AllocateLocked(Space* space, size_t size) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700277 lock_->AssertHeld();
Elliott Hughes92b3b562011-09-08 16:32:26 -0700278
Carl Shapiro69759ea2011-07-21 18:13:35 -0700279 // Fail impossible allocations. TODO: collect soft references.
280 if (size > maximum_size_) {
281 return NULL;
282 }
283
Carl Shapiro58551df2011-07-24 03:09:51 -0700284 Object* ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700285 if (ptr != NULL) {
286 return ptr;
287 }
288
289 // The allocation failed. If the GC is running, block until it
290 // completes and retry.
291 if (is_gc_running_) {
292 // The GC is concurrently tracing the heap. Release the heap
293 // lock, wait for the GC to complete, and retrying allocating.
294 WaitForConcurrentGcToComplete();
Carl Shapiro58551df2011-07-24 03:09:51 -0700295 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700296 if (ptr != NULL) {
297 return ptr;
298 }
299 }
300
301 // Another failure. Our thread was starved or there may be too many
302 // live objects. Try a foreground GC. This will have no effect if
303 // the concurrent GC is already running.
Carl Shapiro58551df2011-07-24 03:09:51 -0700304 CollectGarbageInternal();
305 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700306 if (ptr != NULL) {
307 return ptr;
308 }
309
310 // Even that didn't work; this is an exceptional state.
311 // Try harder, growing the heap if necessary.
Carl Shapiro58551df2011-07-24 03:09:51 -0700312 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700313 if (ptr != NULL) {
314 //size_t new_footprint = dvmHeapSourceGetIdealFootprint();
Carl Shapiro58551df2011-07-24 03:09:51 -0700315 size_t new_footprint = space->MaxAllowedFootprint();
316 // TODO: may want to grow a little bit more so that the amount of
317 // free space is equal to the old free space + the
318 // utilization slop for the new allocation.
319 LOG(INFO) << "Grow heap (frag case) to " << new_footprint / MB
Carl Shapiro69759ea2011-07-21 18:13:35 -0700320 << "for " << size << "-byte allocation";
321 return ptr;
322 }
323
324 // Most allocations should have succeeded by now, so the heap is
325 // really full, really fragmented, or the requested size is really
326 // big. Do another GC, collecting SoftReferences this time. The VM
327 // spec requires that all SoftReferences have been collected and
328 // cleared before throwing an OOME.
329
Carl Shapiro58551df2011-07-24 03:09:51 -0700330 // TODO: wait for the finalizers from the previous GC to finish
Carl Shapiro69759ea2011-07-21 18:13:35 -0700331 LOG(INFO) << "Forcing collection of SoftReferences for "
332 << size << "-byte allocation";
Carl Shapiro58551df2011-07-24 03:09:51 -0700333 CollectGarbageInternal();
334 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700335 if (ptr != NULL) {
336 return ptr;
337 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700338
Carl Shapiro69759ea2011-07-21 18:13:35 -0700339 LOG(ERROR) << "Out of memory on a " << size << " byte allocation";
340
Carl Shapiro58551df2011-07-24 03:09:51 -0700341 // TODO: tell the HeapSource to dump its state
342 // TODO: dump stack traces for all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700343
Carl Shapiro69759ea2011-07-21 18:13:35 -0700344 return NULL;
345}
346
Elliott Hughesbf86d042011-08-31 17:53:14 -0700347int64_t Heap::GetMaxMemory() {
348 UNIMPLEMENTED(WARNING);
349 return 0;
350}
351
352int64_t Heap::GetTotalMemory() {
353 UNIMPLEMENTED(WARNING);
354 return 0;
355}
356
357int64_t Heap::GetFreeMemory() {
358 UNIMPLEMENTED(WARNING);
359 return 0;
360}
361
Carl Shapiro69759ea2011-07-21 18:13:35 -0700362void Heap::CollectGarbage() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700363 ScopedHeapLock lock;
Carl Shapiro58551df2011-07-24 03:09:51 -0700364 CollectGarbageInternal();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700365}
366
367void Heap::CollectGarbageInternal() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700368 lock_->AssertHeld();
Carl Shapiro58551df2011-07-24 03:09:51 -0700369
370 // TODO: Suspend all threads
371 {
372 MarkSweep mark_sweep;
373
374 mark_sweep.Init();
375
376 mark_sweep.MarkRoots();
377
378 // Push marked roots onto the mark stack
379
380 // TODO: if concurrent
381 // unlock heap
382 // resume threads
383
384 mark_sweep.RecursiveMark();
385
386 // TODO: if concurrent
387 // lock heap
388 // suspend threads
389 // re-mark root set
390 // scan dirty objects
391
392 mark_sweep.ProcessReferences(false);
393
394 // TODO: swap bitmaps
395
396 mark_sweep.Sweep();
397 }
398
399 GrowForUtilization();
400
401 // TODO: Resume all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700402}
403
404void Heap::WaitForConcurrentGcToComplete() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700405 lock_->AssertHeld();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700406}
407
408// Given the current contents of the active heap, increase the allowed
409// heap footprint to match the target utilization ratio. This should
410// only be called immediately after a full garbage collection.
411void Heap::GrowForUtilization() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700412 lock_->AssertHeld();
Elliott Hughes53b61312011-08-12 18:28:20 -0700413 UNIMPLEMENTED(ERROR);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700414}
415
Elliott Hughes92b3b562011-09-08 16:32:26 -0700416void Heap::Lock() {
417 // TODO: grab the lock, but put ourselves into THREAD_VMWAIT if it looks like
418 // we're going to have to wait on the mutex.
419 lock_->Lock();
420}
421
422void Heap::Unlock() {
423 lock_->Unlock();
424}
425
Carl Shapiro69759ea2011-07-21 18:13:35 -0700426} // namespace art