blob: 074636187a71b1a46df423efbe9e18fb517868d1 [file] [log] [blame]
Carl Shapiro69759ea2011-07-21 18:13:35 -07001// Copyright 2011 Google Inc. All Rights Reserved.
2// Author: cshapiro@google.com (Carl Shapiro)
3
Brian Carlstrom578bbdc2011-07-21 14:07:47 -07004#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -07005
6#include <vector>
7
8#include "mark_sweep.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -07009#include "object.h"
10#include "space.h"
Carl Shapirofc322c72011-07-27 00:20:01 -070011#include "scoped_ptr.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070012#include "stl_util.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070013
14namespace art {
15
Carl Shapiro58551df2011-07-24 03:09:51 -070016std::vector<Space*> Heap::spaces_;
Carl Shapiro69759ea2011-07-21 18:13:35 -070017
18size_t Heap::startup_size_ = 0;
19
20size_t Heap::maximum_size_ = 0;
21
Carl Shapiro58551df2011-07-24 03:09:51 -070022size_t Heap::num_bytes_allocated_ = 0;
23
24size_t Heap::num_objects_allocated_ = 0;
25
Carl Shapiro69759ea2011-07-21 18:13:35 -070026bool Heap::is_gc_running_ = false;
27
28HeapBitmap* Heap::mark_bitmap_ = NULL;
29
30HeapBitmap* Heap::live_bitmap_ = NULL;
31
32bool Heap::Init(size_t startup_size, size_t maximum_size) {
Carl Shapiro58551df2011-07-24 03:09:51 -070033 Space* space = Space::Create(startup_size, maximum_size);
34 if (space == NULL) {
Carl Shapiro69759ea2011-07-21 18:13:35 -070035 return false;
36 }
37
Carl Shapiro58551df2011-07-24 03:09:51 -070038 byte* base = space->GetBase();
39 size_t num_bytes = space->Size();
Carl Shapiro69759ea2011-07-21 18:13:35 -070040
41 // Allocate the initial live bitmap.
42 scoped_ptr<HeapBitmap> live_bitmap(HeapBitmap::Create(base, num_bytes));
43 if (live_bitmap == NULL) {
44 return false;
45 }
46
47 // Allocate the initial mark bitmap.
48 scoped_ptr<HeapBitmap> mark_bitmap(HeapBitmap::Create(base, num_bytes));
49 if (mark_bitmap == NULL) {
50 return false;
51 }
52
Carl Shapiro58551df2011-07-24 03:09:51 -070053 spaces_.push_back(space);
Carl Shapiro69759ea2011-07-21 18:13:35 -070054 startup_size_ = startup_size;
55 maximum_size_ = maximum_size;
56 live_bitmap_ = live_bitmap.release();
57 mark_bitmap_ = mark_bitmap.release();
58
59 // TODO: allocate the card table
60
61 return true;
62}
63
64void Heap::Destroy() {
Carl Shapiro58551df2011-07-24 03:09:51 -070065 STLDeleteElements(&spaces_);
Carl Shapiro69759ea2011-07-21 18:13:35 -070066 delete mark_bitmap_;
67 delete live_bitmap_;
68}
69
Carl Shapiro58551df2011-07-24 03:09:51 -070070Object* Heap::AllocObject(Class* klass, size_t num_bytes) {
Brian Carlstrom74eb46a2011-08-02 20:10:14 -070071 DCHECK((klass == NULL && num_bytes == sizeof(Class))
72 || klass->descriptor_ == NULL
73 || (klass->object_size_ == (klass->IsArray() ? 0 : num_bytes)));
Carl Shapiro58551df2011-07-24 03:09:51 -070074 Object* obj = Allocate(num_bytes);
75 if (obj != NULL) {
76 obj->klass_ = klass;
77 }
78 return obj;
79}
80
81void Heap::RecordAllocation(Space* space, const Object* obj) {
82 size_t size = space->AllocationSize(obj);
83 DCHECK_NE(size, 0u);
84 num_bytes_allocated_ += size;
85 num_objects_allocated_ += 1;
86 live_bitmap_->Set(obj);
87}
88
89void Heap::RecordFree(Space* space, const Object* obj) {
90 size_t size = space->AllocationSize(obj);
91 DCHECK_NE(size, 0u);
92 if (size < num_bytes_allocated_) {
93 num_bytes_allocated_ -= size;
94 } else {
95 num_bytes_allocated_ = 0;
96 }
97 live_bitmap_->Clear(obj);
98 if (num_objects_allocated_ > 0) {
99 num_objects_allocated_ -= 1;
100 }
101}
102
Carl Shapiro69759ea2011-07-21 18:13:35 -0700103Object* Heap::Allocate(size_t size) {
Carl Shapiro58551df2011-07-24 03:09:51 -0700104 CHECK_EQ(spaces_.size(), 1u);
105 Space* space = spaces_[0];
106 Object* obj = Allocate(space, size);
107 if (obj != NULL) {
108 RecordAllocation(space, obj);
109 }
110 return obj;
111}
112
113Object* Heap::Allocate(Space* space, size_t size) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700114 // Fail impossible allocations. TODO: collect soft references.
115 if (size > maximum_size_) {
116 return NULL;
117 }
118
Carl Shapiro58551df2011-07-24 03:09:51 -0700119 Object* ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700120 if (ptr != NULL) {
121 return ptr;
122 }
123
124 // The allocation failed. If the GC is running, block until it
125 // completes and retry.
126 if (is_gc_running_) {
127 // The GC is concurrently tracing the heap. Release the heap
128 // lock, wait for the GC to complete, and retrying allocating.
129 WaitForConcurrentGcToComplete();
Carl Shapiro58551df2011-07-24 03:09:51 -0700130 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700131 if (ptr != NULL) {
132 return ptr;
133 }
134 }
135
136 // Another failure. Our thread was starved or there may be too many
137 // live objects. Try a foreground GC. This will have no effect if
138 // the concurrent GC is already running.
Carl Shapiro58551df2011-07-24 03:09:51 -0700139 CollectGarbageInternal();
140 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700141 if (ptr != NULL) {
142 return ptr;
143 }
144
145 // Even that didn't work; this is an exceptional state.
146 // Try harder, growing the heap if necessary.
Carl Shapiro58551df2011-07-24 03:09:51 -0700147 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700148 if (ptr != NULL) {
149 //size_t new_footprint = dvmHeapSourceGetIdealFootprint();
Carl Shapiro58551df2011-07-24 03:09:51 -0700150 size_t new_footprint = space->MaxAllowedFootprint();
151 // TODO: may want to grow a little bit more so that the amount of
152 // free space is equal to the old free space + the
153 // utilization slop for the new allocation.
154 LOG(INFO) << "Grow heap (frag case) to " << new_footprint / MB
Carl Shapiro69759ea2011-07-21 18:13:35 -0700155 << "for " << size << "-byte allocation";
156 return ptr;
157 }
158
159 // Most allocations should have succeeded by now, so the heap is
160 // really full, really fragmented, or the requested size is really
161 // big. Do another GC, collecting SoftReferences this time. The VM
162 // spec requires that all SoftReferences have been collected and
163 // cleared before throwing an OOME.
164
Carl Shapiro58551df2011-07-24 03:09:51 -0700165 // TODO: wait for the finalizers from the previous GC to finish
Carl Shapiro69759ea2011-07-21 18:13:35 -0700166 LOG(INFO) << "Forcing collection of SoftReferences for "
167 << size << "-byte allocation";
Carl Shapiro58551df2011-07-24 03:09:51 -0700168 CollectGarbageInternal();
169 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700170 if (ptr != NULL) {
171 return ptr;
172 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700173
Carl Shapiro69759ea2011-07-21 18:13:35 -0700174 LOG(ERROR) << "Out of memory on a " << size << " byte allocation";
175
Carl Shapiro58551df2011-07-24 03:09:51 -0700176 // TODO: tell the HeapSource to dump its state
177 // TODO: dump stack traces for all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700178
Carl Shapiro69759ea2011-07-21 18:13:35 -0700179 return NULL;
180}
181
Carl Shapiro69759ea2011-07-21 18:13:35 -0700182void Heap::CollectGarbage() {
Carl Shapiro58551df2011-07-24 03:09:51 -0700183 CollectGarbageInternal();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700184}
185
186void Heap::CollectGarbageInternal() {
Carl Shapiro58551df2011-07-24 03:09:51 -0700187 // TODO: check that heap lock is held
188
189 // TODO: Suspend all threads
190 {
191 MarkSweep mark_sweep;
192
193 mark_sweep.Init();
194
195 mark_sweep.MarkRoots();
196
197 // Push marked roots onto the mark stack
198
199 // TODO: if concurrent
200 // unlock heap
201 // resume threads
202
203 mark_sweep.RecursiveMark();
204
205 // TODO: if concurrent
206 // lock heap
207 // suspend threads
208 // re-mark root set
209 // scan dirty objects
210
211 mark_sweep.ProcessReferences(false);
212
213 // TODO: swap bitmaps
214
215 mark_sweep.Sweep();
216 }
217
218 GrowForUtilization();
219
220 // TODO: Resume all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700221}
222
223void Heap::WaitForConcurrentGcToComplete() {
224}
225
226// Given the current contents of the active heap, increase the allowed
227// heap footprint to match the target utilization ratio. This should
228// only be called immediately after a full garbage collection.
229void Heap::GrowForUtilization() {
Elliott Hughes53b61312011-08-12 18:28:20 -0700230 UNIMPLEMENTED(ERROR);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700231}
232
233} // namespace art