blob: 96d5ff06549778dc94da5ff8573d7bba14ac54b4 [file] [log] [blame]
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -07001// Copyright 2011 Google Inc. All Rights Reserved.
2
3#ifndef ART_SRC_ASSEMBLER_H_
4#define ART_SRC_ASSEMBLER_H_
5
Brian Carlstrom578bbdc2011-07-21 14:07:47 -07006#include "logging.h"
7#include "macros.h"
8#include "managed_register.h"
9#include "memory_region.h"
10#include "offsets.h"
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -070011
Carl Shapiro6b6b5f02011-06-21 15:05:09 -070012namespace art {
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -070013
14class Assembler;
15class AssemblerBuffer;
16class AssemblerFixup;
17
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -070018class Label {
19 public:
20 Label() : position_(0) {}
21
22 ~Label() {
23 // Assert if label is being destroyed with unresolved branches pending.
24 CHECK(!IsLinked());
25 }
26
27 // Returns the position for bound and linked labels. Cannot be used
28 // for unused labels.
29 int Position() const {
30 CHECK(!IsUnused());
31 return IsBound() ? -position_ - kPointerSize : position_ - kPointerSize;
32 }
33
34 int LinkPosition() const {
35 CHECK(IsLinked());
36 return position_ - kWordSize;
37 }
38
39 bool IsBound() const { return position_ < 0; }
40 bool IsUnused() const { return position_ == 0; }
41 bool IsLinked() const { return position_ > 0; }
42
43 private:
44 int position_;
45
46 void Reinitialize() {
47 position_ = 0;
48 }
49
50 void BindTo(int position) {
51 CHECK(!IsBound());
52 position_ = -position - kPointerSize;
53 CHECK(IsBound());
54 }
55
56 void LinkTo(int position) {
57 CHECK(!IsBound());
58 position_ = position + kPointerSize;
59 CHECK(IsLinked());
60 }
61
62 friend class Assembler;
63 DISALLOW_COPY_AND_ASSIGN(Label);
64};
65
66
67// Assembler fixups are positions in generated code that require processing
68// after the code has been copied to executable memory. This includes building
69// relocation information.
70class AssemblerFixup {
71 public:
72 virtual void Process(const MemoryRegion& region, int position) = 0;
73 virtual ~AssemblerFixup() {}
74
75 private:
76 AssemblerFixup* previous_;
77 int position_;
78
79 AssemblerFixup* previous() const { return previous_; }
80 void set_previous(AssemblerFixup* previous) { previous_ = previous; }
81
82 int position() const { return position_; }
83 void set_position(int position) { position_ = position; }
84
85 friend class AssemblerBuffer;
86};
87
Ian Rogers45a76cb2011-07-21 22:00:15 -070088// Parent of all queued slow paths, emitted during finalization
89class SlowPath {
90 public:
91 SlowPath() : next_(NULL) {}
92 virtual ~SlowPath() {}
93
94 Label* Continuation() { return &continuation_; }
95 Label* Entry() { return &entry_; }
96 // Generate code for slow path
97 virtual void Emit(Assembler *sp_asm) = 0;
98
99 protected:
100 // Entry branched to by fast path
101 Label entry_;
102 // Optional continuation that is branched to at the end of the slow path
103 Label continuation_;
104 // Next in linked list of slow paths
105 SlowPath *next_;
106
107 friend class AssemblerBuffer;
108 DISALLOW_COPY_AND_ASSIGN(SlowPath);
109};
110
111// Slowpath entered when Thread::Current()->_exception is non-null
112class ExceptionSlowPath : public SlowPath {
113 public:
114 ExceptionSlowPath() {}
115 virtual void Emit(Assembler *sp_asm);
116};
117
118// Slowpath entered when Thread::Current()->_suspend_count is non-zero
119class SuspendCountSlowPath : public SlowPath {
120 public:
121 SuspendCountSlowPath(ManagedRegister return_reg,
122 FrameOffset return_save_location,
123 size_t return_size) :
124 return_register_(return_reg), return_save_location_(return_save_location),
125 return_size_(return_size) {}
126 virtual void Emit(Assembler *sp_asm);
127
128 private:
129 // Remember how to save the return value
130 const ManagedRegister return_register_;
131 const FrameOffset return_save_location_;
132 const size_t return_size_;
133};
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700134
135class AssemblerBuffer {
136 public:
137 AssemblerBuffer();
138 ~AssemblerBuffer();
139
140 // Basic support for emitting, loading, and storing.
141 template<typename T> void Emit(T value) {
142 CHECK(HasEnsuredCapacity());
143 *reinterpret_cast<T*>(cursor_) = value;
144 cursor_ += sizeof(T);
145 }
146
147 template<typename T> T Load(size_t position) {
148 CHECK_LE(position, Size() - static_cast<int>(sizeof(T)));
149 return *reinterpret_cast<T*>(contents_ + position);
150 }
151
152 template<typename T> void Store(size_t position, T value) {
153 CHECK_LE(position, Size() - static_cast<int>(sizeof(T)));
154 *reinterpret_cast<T*>(contents_ + position) = value;
155 }
156
157 // Emit a fixup at the current location.
158 void EmitFixup(AssemblerFixup* fixup) {
159 fixup->set_previous(fixup_);
160 fixup->set_position(Size());
161 fixup_ = fixup;
162 }
163
Ian Rogers45a76cb2011-07-21 22:00:15 -0700164 void EnqueueSlowPath(SlowPath* slowpath) {
165 if (slow_path_ == NULL) {
166 slow_path_ = slowpath;
167 } else {
168 SlowPath* cur = slow_path_;
169 for ( ; cur->next_ != NULL ; cur = cur->next_) {}
170 cur->next_ = slowpath;
171 }
172 }
173
174 void EmitSlowPaths(Assembler* sp_asm) {
175 SlowPath* cur = slow_path_;
176 SlowPath* next = NULL;
177 slow_path_ = NULL;
178 for ( ; cur != NULL ; cur = next) {
179 cur->Emit(sp_asm);
180 next = cur->next_;
181 delete cur;
182 }
183 }
184
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700185 // Get the size of the emitted code.
186 size_t Size() const {
187 CHECK_GE(cursor_, contents_);
188 return cursor_ - contents_;
189 }
190
191 byte* contents() const { return contents_; }
192
193 // Copy the assembled instructions into the specified memory block
194 // and apply all fixups.
195 void FinalizeInstructions(const MemoryRegion& region);
196
197 // To emit an instruction to the assembler buffer, the EnsureCapacity helper
198 // must be used to guarantee that the underlying data area is big enough to
199 // hold the emitted instruction. Usage:
200 //
201 // AssemblerBuffer buffer;
202 // AssemblerBuffer::EnsureCapacity ensured(&buffer);
203 // ... emit bytes for single instruction ...
204
205#ifdef DEBUG
206
207 class EnsureCapacity {
208 public:
209 explicit EnsureCapacity(AssemblerBuffer* buffer) {
210 if (buffer->cursor() >= buffer->limit()) buffer->ExtendCapacity();
211 // In debug mode, we save the assembler buffer along with the gap
212 // size before we start emitting to the buffer. This allows us to
213 // check that any single generated instruction doesn't overflow the
214 // limit implied by the minimum gap size.
215 buffer_ = buffer;
216 gap_ = ComputeGap();
217 // Make sure that extending the capacity leaves a big enough gap
218 // for any kind of instruction.
219 CHECK_GE(gap_, kMinimumGap);
220 // Mark the buffer as having ensured the capacity.
221 CHECK(!buffer->HasEnsuredCapacity()); // Cannot nest.
222 buffer->has_ensured_capacity_ = true;
223 }
224
225 ~EnsureCapacity() {
226 // Unmark the buffer, so we cannot emit after this.
227 buffer_->has_ensured_capacity_ = false;
228 // Make sure the generated instruction doesn't take up more
229 // space than the minimum gap.
230 int delta = gap_ - ComputeGap();
Ian Rogersb033c752011-07-20 12:22:35 -0700231 CHECK_LE(delta, kMinimumGap);
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700232 }
233
234 private:
235 AssemblerBuffer* buffer_;
236 int gap_;
237
238 int ComputeGap() { return buffer_->Capacity() - buffer_->Size(); }
239 };
240
241 bool has_ensured_capacity_;
242 bool HasEnsuredCapacity() const { return has_ensured_capacity_; }
243
244#else
245
246 class EnsureCapacity {
247 public:
248 explicit EnsureCapacity(AssemblerBuffer* buffer) {
249 if (buffer->cursor() >= buffer->limit()) buffer->ExtendCapacity();
250 }
251 };
252
253 // When building the C++ tests, assertion code is enabled. To allow
254 // asserting that the user of the assembler buffer has ensured the
255 // capacity needed for emitting, we add a dummy method in non-debug mode.
256 bool HasEnsuredCapacity() const { return true; }
257
258#endif
259
260 // Returns the position in the instruction stream.
261 int GetPosition() { return cursor_ - contents_; }
262
263 private:
264 // The limit is set to kMinimumGap bytes before the end of the data area.
265 // This leaves enough space for the longest possible instruction and allows
266 // for a single, fast space check per instruction.
267 static const int kMinimumGap = 32;
268
269 byte* contents_;
270 byte* cursor_;
271 byte* limit_;
272 AssemblerFixup* fixup_;
273 bool fixups_processed_;
274
Ian Rogers45a76cb2011-07-21 22:00:15 -0700275 // Head of linked list of slow paths
276 SlowPath* slow_path_;
277
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700278 byte* cursor() const { return cursor_; }
279 byte* limit() const { return limit_; }
280 size_t Capacity() const {
281 CHECK_GE(limit_, contents_);
282 return (limit_ - contents_) + kMinimumGap;
283 }
284
285 // Process the fixup chain starting at the given fixup. The offset is
286 // non-zero for fixups in the body if the preamble is non-empty.
287 void ProcessFixups(const MemoryRegion& region);
288
289 // Compute the limit based on the data area and the capacity. See
290 // description of kMinimumGap for the reasoning behind the value.
291 static byte* ComputeLimit(byte* data, size_t capacity) {
292 return data + capacity - kMinimumGap;
293 }
294
295 void ExtendCapacity();
296
297 friend class AssemblerFixup;
298};
299
Carl Shapiro6b6b5f02011-06-21 15:05:09 -0700300} // namespace art
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700301
302#if defined(__i386__)
Brian Carlstrom578bbdc2011-07-21 14:07:47 -0700303#include "assembler_x86.h"
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700304#elif defined(__arm__)
Brian Carlstrom578bbdc2011-07-21 14:07:47 -0700305#include "assembler_arm.h"
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700306#endif
307
308#endif // ART_SRC_ASSEMBLER_H_