blob: 01cc1cc881ba4b58bb1630b73201cfcfa7263887 [file] [log] [blame]
Elliott Hughes8d768a92011-09-14 16:35:25 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070016
17#ifndef ART_SRC_THREAD_H_
18#define ART_SRC_THREAD_H_
19
Carl Shapirob5573532011-07-12 18:22:59 -070020#include <pthread.h>
Elliott Hughesa0957642011-09-02 14:27:33 -070021
Elliott Hughes02b48d12011-09-07 17:15:51 -070022#include <bitset>
Elliott Hughesa0957642011-09-02 14:27:33 -070023#include <iosfwd>
Ian Rogersb033c752011-07-20 12:22:35 -070024#include <list>
Elliott Hughes8daa0922011-09-11 13:46:25 -070025#include <string>
Carl Shapirob5573532011-07-12 18:22:59 -070026
Brian Carlstrom1f870082011-08-23 16:02:11 -070027#include "dex_file.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070028#include "globals.h"
Elliott Hughes69f5bc62011-08-24 09:26:14 -070029#include "jni_internal.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070030#include "logging.h"
31#include "macros.h"
Elliott Hughes8daa0922011-09-11 13:46:25 -070032#include "mutex.h"
Brian Carlstromb765be02011-08-17 23:54:10 -070033#include "mem_map.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070034#include "offsets.h"
Elliott Hughes9d5ccec2011-09-19 13:19:50 -070035#include "runtime_stats.h"
Ian Rogersbdb03912011-09-14 00:55:44 -070036#include "UniquePtr.h"
Ian Rogersb033c752011-07-20 12:22:35 -070037
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070038namespace art {
39
Elliott Hughes69f5bc62011-08-24 09:26:14 -070040class Array;
Elliott Hughes37f7a402011-08-22 18:56:01 -070041class Class;
Brian Carlstrom1f870082011-08-23 16:02:11 -070042class ClassLinker;
Elliott Hughesedcc09c2011-08-21 18:47:05 -070043class ClassLoader;
Ian Rogersbdb03912011-09-14 00:55:44 -070044class Context;
Brian Carlstroma40f9bc2011-07-26 21:26:07 -070045class Method;
Elliott Hughes8daa0922011-09-11 13:46:25 -070046class Monitor;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070047class Object;
Carl Shapirob5573532011-07-12 18:22:59 -070048class Runtime;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070049class Thread;
Carl Shapirob5573532011-07-12 18:22:59 -070050class ThreadList;
Elliott Hughese5b0dc82011-08-23 09:59:02 -070051class Throwable;
Shih-wei Liao55df06b2011-08-26 14:39:27 -070052class StackTraceElement;
buzbee1da522d2011-09-04 11:22:20 -070053class StaticStorageBase;
54
Shih-wei Liao55df06b2011-08-26 14:39:27 -070055template<class T> class ObjectArray;
Shih-wei Liao44175362011-08-28 16:59:17 -070056template<class T> class PrimitiveArray;
57typedef PrimitiveArray<int32_t> IntArray;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070058
Ian Rogers408f79a2011-08-23 18:22:33 -070059// Stack allocated indirect reference table, allocated within the bridge frame
60// between managed and native code.
61class StackIndirectReferenceTable {
Ian Rogersb033c752011-07-20 12:22:35 -070062 public:
Ian Rogers408f79a2011-08-23 18:22:33 -070063 // Number of references contained within this SIRT
Ian Rogersb033c752011-07-20 12:22:35 -070064 size_t NumberOfReferences() {
65 return number_of_references_;
66 }
67
Ian Rogers408f79a2011-08-23 18:22:33 -070068 // Link to previous SIRT or NULL
69 StackIndirectReferenceTable* Link() {
Ian Rogersb033c752011-07-20 12:22:35 -070070 return link_;
71 }
72
Ian Rogers408f79a2011-08-23 18:22:33 -070073 Object** References() {
74 return references_;
Ian Rogersa8cd9f42011-08-19 16:43:41 -070075 }
76
Ian Rogers408f79a2011-08-23 18:22:33 -070077 // Offset of length within SIRT, used by generated code
Ian Rogersb033c752011-07-20 12:22:35 -070078 static size_t NumberOfReferencesOffset() {
Ian Rogers408f79a2011-08-23 18:22:33 -070079 return OFFSETOF_MEMBER(StackIndirectReferenceTable, number_of_references_);
Ian Rogersb033c752011-07-20 12:22:35 -070080 }
81
Ian Rogers408f79a2011-08-23 18:22:33 -070082 // Offset of link within SIRT, used by generated code
Ian Rogersb033c752011-07-20 12:22:35 -070083 static size_t LinkOffset() {
Ian Rogers408f79a2011-08-23 18:22:33 -070084 return OFFSETOF_MEMBER(StackIndirectReferenceTable, link_);
Ian Rogersb033c752011-07-20 12:22:35 -070085 }
86
87 private:
Ian Rogers408f79a2011-08-23 18:22:33 -070088 StackIndirectReferenceTable() {}
Ian Rogersb033c752011-07-20 12:22:35 -070089
90 size_t number_of_references_;
Ian Rogers408f79a2011-08-23 18:22:33 -070091 StackIndirectReferenceTable* link_;
Ian Rogersb033c752011-07-20 12:22:35 -070092
Ian Rogersa8cd9f42011-08-19 16:43:41 -070093 // Fake array, really allocated and filled in by jni_compiler.
Ian Rogers408f79a2011-08-23 18:22:33 -070094 Object* references_[0];
Ian Rogersa8cd9f42011-08-19 16:43:41 -070095
Ian Rogers408f79a2011-08-23 18:22:33 -070096 DISALLOW_COPY_AND_ASSIGN(StackIndirectReferenceTable);
Ian Rogersb033c752011-07-20 12:22:35 -070097};
98
Ian Rogers6de08602011-08-19 14:52:39 -070099struct NativeToManagedRecord {
Ian Rogersbdb03912011-09-14 00:55:44 -0700100 NativeToManagedRecord* link_;
101 void* last_top_of_managed_stack_;
102 uintptr_t last_top_of_managed_stack_pc_;
Ian Rogers6de08602011-08-19 14:52:39 -0700103};
104
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700105// Iterator over managed frames up to the first native-to-managed transition
Elliott Hughes85d15452011-09-16 17:33:01 -0700106class PACKED Frame {
Shih-wei Liao9b576b42011-08-29 01:45:07 -0700107 public:
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700108 Frame() : sp_(NULL) {}
109
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700110 Method* GetMethod() const {
Elliott Hughesa0957642011-09-02 14:27:33 -0700111 return (sp_ != NULL) ? *sp_ : NULL;
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700112 }
113
114 bool HasNext() const {
115 return NextMethod() != NULL;
116 }
117
118 void Next();
119
Ian Rogersbdb03912011-09-14 00:55:44 -0700120 uintptr_t GetReturnPC() const;
121
122 uintptr_t LoadCalleeSave(int num) const;
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700123
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700124 Method** GetSP() const {
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700125 return sp_;
126 }
127
128 // TODO: this is here for testing, remove when we have exception unit tests
129 // that use the real stack
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700130 void SetSP(Method** sp) {
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700131 sp_ = sp;
132 }
133
Ian Rogers90865722011-09-19 11:11:44 -0700134 // Is this a frame for a real method (native or with dex code)
135 bool HasMethod() const;
136
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700137 private:
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700138 Method* NextMethod() const;
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700139
140 friend class Thread;
141
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700142 Method** sp_;
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700143};
144
Elliott Hughes85d15452011-09-16 17:33:01 -0700145class PACKED Thread {
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700146 public:
Elliott Hughes8daa0922011-09-11 13:46:25 -0700147 /* thread priorities, from java.lang.Thread */
148 enum Priority {
149 kMinPriority = 1,
150 kNormPriority = 5,
151 kMaxPriority = 10,
152 };
Carl Shapirob5573532011-07-12 18:22:59 -0700153 enum State {
Elliott Hughes93e74e82011-09-13 11:07:03 -0700154 // These match up with JDWP values.
155 kTerminated = 0, // TERMINATED
156 kRunnable = 1, // RUNNABLE or running now
157 kTimedWaiting = 2, // TIMED_WAITING in Object.wait()
158 kBlocked = 3, // BLOCKED on a monitor
159 kWaiting = 4, // WAITING in Object.wait()
160 // Non-JDWP states.
161 kInitializing = 5, // allocated, not yet running --- TODO: unnecessary?
162 kStarting = 6, // native thread started, not yet ready to run managed code
163 kNative = 7, // off in a JNI native method
164 kVmWait = 8, // waiting on a VM resource
165 kSuspended = 9, // suspended, usually by GC or debugger
Carl Shapirob5573532011-07-12 18:22:59 -0700166 };
167
Ian Rogers932746a2011-09-22 18:57:50 -0700168 // Space to throw a StackOverflowError in.
169 static const size_t kStackOverflowReservedBytes = 3 * KB;
buzbeec143c552011-08-20 17:38:58 -0700170
Carl Shapiro61e019d2011-07-14 16:53:09 -0700171 static const size_t kDefaultStackSize = 64 * KB;
172
buzbeec143c552011-08-20 17:38:58 -0700173 // Runtime support function pointers
buzbee4a3164f2011-09-03 11:25:10 -0700174 void (*pDebugMe)(Method*, uint32_t);
buzbeec143c552011-08-20 17:38:58 -0700175 void* (*pMemcpy)(void*, const void*, size_t);
buzbee54330722011-08-23 16:46:55 -0700176 uint64_t (*pShlLong)(uint64_t, uint32_t);
177 uint64_t (*pShrLong)(uint64_t, uint32_t);
178 uint64_t (*pUshrLong)(uint64_t, uint32_t);
buzbeec143c552011-08-20 17:38:58 -0700179 float (*pI2f)(int);
180 int (*pF2iz)(float);
181 float (*pD2f)(double);
182 double (*pF2d)(float);
183 double (*pI2d)(int);
184 int (*pD2iz)(double);
185 float (*pL2f)(long);
186 double (*pL2d)(long);
buzbee1b4c8592011-08-31 10:43:51 -0700187 long long (*pF2l)(float);
188 long long (*pD2l)(double);
buzbeec143c552011-08-20 17:38:58 -0700189 float (*pFadd)(float, float);
190 float (*pFsub)(float, float);
191 float (*pFdiv)(float, float);
192 float (*pFmul)(float, float);
193 float (*pFmodf)(float, float);
194 double (*pDadd)(double, double);
195 double (*pDsub)(double, double);
196 double (*pDdiv)(double, double);
197 double (*pDmul)(double, double);
198 double (*pFmod)(double, double);
199 int (*pIdivmod)(int, int);
200 int (*pIdiv)(int, int);
buzbee439c4fa2011-08-27 15:59:07 -0700201 long long (*pLmul)(long long, long long);
buzbeec143c552011-08-20 17:38:58 -0700202 long long (*pLdivmod)(long long, long long);
Ian Rogers21d9e832011-09-23 17:05:09 -0700203 void* (*pAllocObjectFromCode)(uint32_t, void*);
Ian Rogersb886da82011-09-23 16:27:54 -0700204 void* (*pArrayAllocFromCode)(uint32_t, void*, int32_t);
205 void* (*pCheckAndArrayAllocFromCode)(uint32_t, void*, int32_t);
buzbeee1931742011-08-28 21:15:53 -0700206 uint32_t (*pGet32Static)(uint32_t, const Method*);
207 void (*pSet32Static)(uint32_t, const Method*, uint32_t);
208 uint64_t (*pGet64Static)(uint32_t, const Method*);
209 void (*pSet64Static)(uint32_t, const Method*, uint64_t);
210 Object* (*pGetObjStatic)(uint32_t, const Method*);
211 void (*pSetObjStatic)(uint32_t, const Method*, Object*);
Ian Rogerse51a5112011-09-23 14:16:35 -0700212 void (*pCanPutArrayElementFromCode)(void*, void*);
buzbee2a475e72011-09-07 17:19:17 -0700213 bool (*pInstanceofNonTrivialFromCode) (const Object*, const Class*);
Ian Rogersff1ed472011-09-20 13:46:24 -0700214 void (*pCheckCastFromCode) (void*, void*);
buzbee1b4c8592011-08-31 10:43:51 -0700215 Method* (*pFindInterfaceMethodInCache)(Class*, uint32_t, const Method*, struct DvmDex*);
Ian Rogersff1ed472011-09-20 13:46:24 -0700216 void (*pUnlockObjectFromCode)(void*, void*);
buzbee1b4c8592011-08-31 10:43:51 -0700217 void (*pLockObjectFromCode)(Thread*, Object*);
Ian Rogers67375ac2011-09-14 00:55:44 -0700218 void (*pDeliverException)(void*);
Ian Rogersff1ed472011-09-20 13:46:24 -0700219 void (*pHandleFillArrayDataFromCode)(void*, void*);
buzbee1b4c8592011-08-31 10:43:51 -0700220 Class* (*pInitializeTypeFromCode)(uint32_t, Method*);
buzbee561227c2011-09-02 15:28:19 -0700221 void (*pResolveMethodFromCode)(Method*, uint32_t);
buzbee4a3164f2011-09-03 11:25:10 -0700222 void (*pInvokeInterfaceTrampoline)(void*, void*, void*, void*);
Ian Rogerscbba6ac2011-09-22 16:28:37 -0700223 void* (*pInitializeStaticStorage)(uint32_t, void*);
Brian Carlstrom845490b2011-09-19 15:56:53 -0700224 Field* (*pFindInstanceFieldFromCode)(uint32_t, const Method*);
buzbee0d966cf2011-09-08 17:34:58 -0700225 void (*pCheckSuspendFromCode)(Thread*);
buzbeec1f45042011-09-21 16:03:19 -0700226 void (*pTestSuspendFromCode)();
Ian Rogers932746a2011-09-22 18:57:50 -0700227 void (*pThrowStackOverflowFromCode)(void*);
buzbee5ade1d22011-09-09 14:44:52 -0700228 void (*pThrowNullPointerFromCode)();
229 void (*pThrowArrayBoundsFromCode)(int32_t, int32_t);
230 void (*pThrowDivZeroFromCode)();
231 void (*pThrowVerificationErrorFromCode)(int32_t, int32_t);
232 void (*pThrowNegArraySizeFromCode)(int32_t);
233 void (*pThrowRuntimeExceptionFromCode)(int32_t);
234 void (*pThrowInternalErrorFromCode)(int32_t);
235 void (*pThrowNoSuchMethodFromCode)(int32_t);
Ian Rogersff1ed472011-09-20 13:46:24 -0700236 void (*pThrowAbstractMethodErrorFromCode)(Method* method, Thread* thread, Method** sp);
Brian Carlstrom16192862011-09-12 17:50:06 -0700237 void* (*pFindNativeMethod)(Thread* thread);
238 Object* (*pDecodeJObjectInThread)(Thread* thread, jobject obj);
buzbeec143c552011-08-20 17:38:58 -0700239
Shih-wei Liao9b576b42011-08-29 01:45:07 -0700240 class StackVisitor {
241 public:
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700242 virtual ~StackVisitor() {}
Ian Rogersbdb03912011-09-14 00:55:44 -0700243 virtual void VisitFrame(const Frame& frame, uintptr_t pc) = 0;
Shih-wei Liao9b576b42011-08-29 01:45:07 -0700244 };
245
Carl Shapiro61e019d2011-07-14 16:53:09 -0700246 // Creates a new thread.
Elliott Hughesd369bb72011-09-12 14:41:14 -0700247 static void Create(Object* peer, size_t stack_size);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700248
249 // Creates a new thread from the calling thread.
Elliott Hughesdcc24742011-09-07 14:02:44 -0700250 static Thread* Attach(const Runtime* runtime, const char* name, bool as_daemon);
Carl Shapirob5573532011-07-12 18:22:59 -0700251
252 static Thread* Current() {
Carl Shapirod0e7e772011-07-15 14:31:01 -0700253 void* thread = pthread_getspecific(Thread::pthread_key_self_);
254 return reinterpret_cast<Thread*>(thread);
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700255 }
256
Elliott Hughes01158d72011-09-19 19:47:10 -0700257 static Thread* FromManagedThread(JNIEnv* env, jobject thread);
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700258 static uint32_t LockOwnerFromThreadLock(Object* thread_lock);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700259
Elliott Hughesa0957642011-09-02 14:27:33 -0700260 void Dump(std::ostream& os) const;
261
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700262 State GetState() const {
263 return state_;
264 }
265
Elliott Hughes8d768a92011-09-14 16:35:25 -0700266 State SetState(State new_state);
267
Elliott Hughes038a8062011-09-18 14:12:41 -0700268 bool IsDaemon();
269
Elliott Hughes8d768a92011-09-14 16:35:25 -0700270 void WaitUntilSuspended();
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700271
Elliott Hughes5f791332011-09-15 17:45:30 -0700272 bool HoldsLock(Object*);
273
Elliott Hughes8daa0922011-09-11 13:46:25 -0700274 /*
275 * Changes the priority of this thread to match that of the java.lang.Thread object.
276 *
277 * We map a priority value from 1-10 to Linux "nice" values, where lower
278 * numbers indicate higher priority.
279 */
280 void SetNativePriority(int newPriority);
281
282 /*
283 * Returns the thread priority for the current thread by querying the system.
284 * This is useful when attaching a thread through JNI.
285 *
286 * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
287 */
288 static int GetNativePriority();
289
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700290 bool CanAccessDirectReferences() const {
Elliott Hughesa59d1792011-09-04 18:42:35 -0700291 // TODO: when we have a moving collector, we'll need: return state_ == kRunnable;
292 return true;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700293 }
294
Elliott Hughesdcc24742011-09-07 14:02:44 -0700295 uint32_t GetThinLockId() const {
296 return thin_lock_id_;
Carl Shapirob5573532011-07-12 18:22:59 -0700297 }
298
Elliott Hughesd92bec42011-09-02 17:04:36 -0700299 pid_t GetTid() const {
300 return tid_;
301 }
Elliott Hughese27955c2011-08-26 15:21:24 -0700302
303 pthread_t GetImpl() const {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700304 return pthread_;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700305 }
306
Elliott Hughesd369bb72011-09-12 14:41:14 -0700307 Object* GetPeer() const {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700308 return peer_;
309 }
310
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700311 RuntimeStats* GetStats() {
312 return &stats_;
313 }
314
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700315 // Returns the Method* for the current method.
316 // This is used by the JNI implementation for logging and diagnostic purposes.
317 const Method* GetCurrentMethod() const {
318 return top_of_managed_stack_.GetMethod();
319 }
320
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700321 bool IsExceptionPending() const {
Elliott Hughesb20a5542011-08-12 18:03:12 -0700322 return exception_ != NULL;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700323 }
324
Elliott Hughese5b0dc82011-08-23 09:59:02 -0700325 Throwable* GetException() const {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700326 DCHECK(CanAccessDirectReferences());
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700327 return exception_;
328 }
329
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700330 void SetException(Throwable* new_exception) {
331 DCHECK(CanAccessDirectReferences());
332 CHECK(new_exception != NULL);
333 // TODO: CHECK(exception_ == NULL);
334 exception_ = new_exception; // TODO
335 }
336
337 void ClearException() {
338 exception_ = NULL;
Elliott Hughesa0957642011-09-02 14:27:33 -0700339 }
340
Ian Rogersbdb03912011-09-14 00:55:44 -0700341 // Find catch block and perform long jump to appropriate exception handle
Ian Rogersff1ed472011-09-20 13:46:24 -0700342 void DeliverException();
Ian Rogersbdb03912011-09-14 00:55:44 -0700343
344 Context* GetLongJumpContext();
345
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700346 Frame GetTopOfStack() const {
347 return top_of_managed_stack_;
348 }
349
350 // TODO: this is here for testing, remove when we have exception unit tests
351 // that use the real stack
Ian Rogersbdb03912011-09-14 00:55:44 -0700352 void SetTopOfStack(void* stack, uintptr_t pc) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700353 top_of_managed_stack_.SetSP(reinterpret_cast<Method**>(stack));
Ian Rogersbdb03912011-09-14 00:55:44 -0700354 top_of_managed_stack_pc_ = pc;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700355 }
356
Ian Rogersbdb03912011-09-14 00:55:44 -0700357 void SetTopOfStackPC(uintptr_t pc) {
358 top_of_managed_stack_pc_ = pc;
359 }
360
Elliott Hughese5b0dc82011-08-23 09:59:02 -0700361 void ThrowNewException(const char* exception_class_descriptor, const char* fmt, ...)
Elliott Hughesa5b897e2011-08-16 11:33:06 -0700362 __attribute__ ((format(printf, 3, 4)));
363
Elliott Hughes4a2b4172011-09-20 17:08:25 -0700364 void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap);
365
Elliott Hughes79082e32011-08-25 12:07:32 -0700366 // This exception is special, because we need to pre-allocate an instance.
367 void ThrowOutOfMemoryError();
368
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700369 Frame FindExceptionHandler(void* throw_pc, void** handler_pc);
370
371 void* FindExceptionHandlerInMethod(const Method* method,
372 void* throw_pc,
373 const DexFile& dex_file,
374 ClassLinker* class_linker);
buzbeec143c552011-08-20 17:38:58 -0700375
Carl Shapirob5573532011-07-12 18:22:59 -0700376 void SetName(const char* name);
377
Elliott Hughesbe759c62011-09-08 19:38:21 -0700378 static void Startup();
Elliott Hughes038a8062011-09-18 14:12:41 -0700379 static void FinishStartup();
Elliott Hughesc1674ed2011-08-25 18:09:09 -0700380 static void Shutdown();
Carl Shapirob5573532011-07-12 18:22:59 -0700381
Ian Rogersb033c752011-07-20 12:22:35 -0700382 // JNI methods
Elliott Hughes69f5bc62011-08-24 09:26:14 -0700383 JNIEnvExt* GetJniEnv() const {
Ian Rogersb033c752011-07-20 12:22:35 -0700384 return jni_env_;
385 }
386
Ian Rogers408f79a2011-08-23 18:22:33 -0700387 // Number of references allocated in SIRTs on this thread
388 size_t NumSirtReferences();
Ian Rogersa8cd9f42011-08-19 16:43:41 -0700389
Ian Rogers408f79a2011-08-23 18:22:33 -0700390 // Is the given obj in this thread's stack indirect reference table?
391 bool SirtContains(jobject obj);
392
Ian Rogers67375ac2011-09-14 00:55:44 -0700393 // Pop the top SIRT
394 void PopSirt();
395
Ian Rogers408f79a2011-08-23 18:22:33 -0700396 // Convert a jobject into a Object*
397 Object* DecodeJObject(jobject obj);
Ian Rogersb033c752011-07-20 12:22:35 -0700398
Elliott Hughes8daa0922011-09-11 13:46:25 -0700399 // Implements java.lang.Thread.interrupted.
400 bool Interrupted() {
Elliott Hughes85d15452011-09-16 17:33:01 -0700401 MutexLock mu(*wait_mutex_);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700402 bool interrupted = interrupted_;
403 interrupted_ = false;
404 return interrupted;
405 }
406
407 // Implements java.lang.Thread.isInterrupted.
408 bool IsInterrupted() {
Elliott Hughes85d15452011-09-16 17:33:01 -0700409 MutexLock mu(*wait_mutex_);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700410 return interrupted_;
411 }
412
Elliott Hughes5f791332011-09-15 17:45:30 -0700413 void Interrupt() {
Elliott Hughes85d15452011-09-16 17:33:01 -0700414 MutexLock mu(*wait_mutex_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700415 if (interrupted_) {
416 return;
417 }
418 interrupted_ = true;
419 NotifyLocked();
420 }
421
422 void Notify() {
Elliott Hughes85d15452011-09-16 17:33:01 -0700423 MutexLock mu(*wait_mutex_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700424 NotifyLocked();
425 }
426
Ian Rogers6de08602011-08-19 14:52:39 -0700427 // Linked list recording transitions from native to managed code
428 void PushNativeToManagedRecord(NativeToManagedRecord* record) {
Ian Rogersbdb03912011-09-14 00:55:44 -0700429 record->last_top_of_managed_stack_ = reinterpret_cast<void*>(top_of_managed_stack_.GetSP());
430 record->last_top_of_managed_stack_pc_ = top_of_managed_stack_pc_;
431 record->link_ = native_to_managed_record_;
Ian Rogers6de08602011-08-19 14:52:39 -0700432 native_to_managed_record_ = record;
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700433 top_of_managed_stack_.SetSP(NULL);
Ian Rogers6de08602011-08-19 14:52:39 -0700434 }
435 void PopNativeToManagedRecord(const NativeToManagedRecord& record) {
Ian Rogersbdb03912011-09-14 00:55:44 -0700436 native_to_managed_record_ = record.link_;
437 top_of_managed_stack_.SetSP(reinterpret_cast<Method**>(record.last_top_of_managed_stack_));
438 top_of_managed_stack_pc_ = record.last_top_of_managed_stack_pc_;
Ian Rogers6de08602011-08-19 14:52:39 -0700439 }
440
Brian Carlstrombffb1552011-08-25 12:23:53 -0700441 const ClassLoader* GetClassLoaderOverride() {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700442 // TODO: need to place the class_loader_override_ in a handle
443 // DCHECK(CanAccessDirectReferences());
buzbeec143c552011-08-20 17:38:58 -0700444 return class_loader_override_;
445 }
446
Brian Carlstrombffb1552011-08-25 12:23:53 -0700447 void SetClassLoaderOverride(const ClassLoader* class_loader_override) {
buzbeec143c552011-08-20 17:38:58 -0700448 class_loader_override_ = class_loader_override;
449 }
450
Ian Rogersaaa20802011-09-11 21:47:37 -0700451 // Create the internal representation of a stack trace, that is more time
452 // and space efficient to compute than the StackTraceElement[]
Elliott Hughes01158d72011-09-19 19:47:10 -0700453 jobject CreateInternalStackTrace(JNIEnv* env) const;
Ian Rogersaaa20802011-09-11 21:47:37 -0700454
Elliott Hughes01158d72011-09-19 19:47:10 -0700455 // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
456 // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many
457 // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated
458 // with the number of valid frames in the returned array.
459 static jobjectArray InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal,
460 jobjectArray output_array = NULL, int* stack_depth = NULL);
Shih-wei Liao55df06b2011-08-26 14:39:27 -0700461
Elliott Hughes410c0c82011-09-01 17:58:25 -0700462 void VisitRoots(Heap::RootVisitor* visitor, void* arg) const;
463
Elliott Hughesbe759c62011-09-08 19:38:21 -0700464 //
465 // Offsets of various members of native Thread class, used by compiled code.
466 //
467
468 static ThreadOffset SelfOffset() {
469 return ThreadOffset(OFFSETOF_MEMBER(Thread, self_));
470 }
471
472 static ThreadOffset ExceptionOffset() {
473 return ThreadOffset(OFFSETOF_MEMBER(Thread, exception_));
474 }
475
Elliott Hughes54e7df12011-09-16 11:47:04 -0700476 static ThreadOffset ThinLockIdOffset() {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700477 return ThreadOffset(OFFSETOF_MEMBER(Thread, thin_lock_id_));
478 }
479
480 static ThreadOffset CardTableOffset() {
481 return ThreadOffset(OFFSETOF_MEMBER(Thread, card_table_));
482 }
483
484 static ThreadOffset SuspendCountOffset() {
485 return ThreadOffset(OFFSETOF_MEMBER(Thread, suspend_count_));
486 }
487
488 static ThreadOffset StateOffset() {
Elliott Hughes93e74e82011-09-13 11:07:03 -0700489 return ThreadOffset(OFFSETOF_VOLATILE_MEMBER(Thread, state_));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700490 }
491
Ian Rogers932746a2011-09-22 18:57:50 -0700492 // Size of stack less any space reserved for stack overflow
493 size_t GetStackSize() {
494 return stack_size_ - (stack_end_ - stack_base_);
495 }
496
497 // Set the stack end to that to be used during a stack overflow
498 void SetStackEndForStackOverflow() {
499 // During stack overflow we allow use of the full stack
500 CHECK(stack_end_ != stack_base_) << "Need to increase: kStackOverflowReservedBytes ("
501 << kStackOverflowReservedBytes << ")";
502 stack_end_ = stack_base_;
503 }
504
505 // Set the stack end to that to be used during regular execution
506 void ResetDefaultStackEnd() {
507 // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
508 // to throw a StackOverflowError.
509 stack_end_ = stack_base_ + kStackOverflowReservedBytes;
510 }
511
Elliott Hughes449b4bd2011-09-09 12:01:38 -0700512 static ThreadOffset StackEndOffset() {
513 return ThreadOffset(OFFSETOF_MEMBER(Thread, stack_end_));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700514 }
515
516 static ThreadOffset JniEnvOffset() {
517 return ThreadOffset(OFFSETOF_MEMBER(Thread, jni_env_));
518 }
519
520 static ThreadOffset TopOfManagedStackOffset() {
521 return ThreadOffset(OFFSETOF_MEMBER(Thread, top_of_managed_stack_) +
522 OFFSETOF_MEMBER(Frame, sp_));
523 }
524
Ian Rogersbdb03912011-09-14 00:55:44 -0700525 static ThreadOffset TopOfManagedStackPcOffset() {
526 return ThreadOffset(OFFSETOF_MEMBER(Thread, top_of_managed_stack_pc_));
527 }
528
Elliott Hughesbe759c62011-09-08 19:38:21 -0700529 static ThreadOffset TopSirtOffset() {
530 return ThreadOffset(OFFSETOF_MEMBER(Thread, top_sirt_));
531 }
532
Shih-wei Liao9407c602011-09-16 10:36:43 -0700533 void WalkStack(StackVisitor* visitor) const;
534
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700535 private:
Elliott Hughesdcc24742011-09-07 14:02:44 -0700536 Thread();
Elliott Hughesc1674ed2011-08-25 18:09:09 -0700537 ~Thread();
Elliott Hughes02b48d12011-09-07 17:15:51 -0700538 friend class ThreadList; // For ~Thread.
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700539
Elliott Hughes5fe594f2011-09-08 12:33:17 -0700540 void CreatePeer(const char* name, bool as_daemon);
541 friend class Runtime; // For CreatePeer.
542
Elliott Hughesd92bec42011-09-02 17:04:36 -0700543 void DumpState(std::ostream& os) const;
544 void DumpStack(std::ostream& os) const;
545
Elliott Hughes93e74e82011-09-13 11:07:03 -0700546 void Attach(const Runtime* runtime);
547 static void* CreateCallback(void* arg);
548
Ian Rogersb033c752011-07-20 12:22:35 -0700549 void InitCpu();
buzbee3ea4ec52011-08-22 17:37:19 -0700550 void InitFunctionPointers();
Elliott Hughesbe759c62011-09-08 19:38:21 -0700551 void InitStackHwm();
552
Elliott Hughes5f791332011-09-15 17:45:30 -0700553 void NotifyLocked() {
554 if (wait_monitor_ != NULL) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700555 wait_cond_->Signal();
Elliott Hughes5f791332011-09-15 17:45:30 -0700556 }
557 }
558
Elliott Hughesbe759c62011-09-08 19:38:21 -0700559 static void ThreadExitCallback(void* arg);
Ian Rogersb033c752011-07-20 12:22:35 -0700560
Ian Rogers67375ac2011-09-14 00:55:44 -0700561 void WalkStackUntilUpCall(StackVisitor* visitor, bool include_upcall) const;
Ian Rogersbdb03912011-09-14 00:55:44 -0700562
Elliott Hughesdcc24742011-09-07 14:02:44 -0700563 // Thin lock thread id. This is a small integer used by the thin lock implementation.
564 // This is not to be confused with the native thread's tid, nor is it the value returned
565 // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
566 // important difference between this id and the ids visible to managed code is that these
567 // ones get reused (to ensure that they fit in the number of bits available).
568 uint32_t thin_lock_id_;
Ian Rogersb033c752011-07-20 12:22:35 -0700569
Elliott Hughesd92bec42011-09-02 17:04:36 -0700570 // System thread id.
571 pid_t tid_;
572
573 // Native thread handle.
Elliott Hughesbe759c62011-09-08 19:38:21 -0700574 pthread_t pthread_;
Elliott Hughesd92bec42011-09-02 17:04:36 -0700575
Elliott Hughesdcc24742011-09-07 14:02:44 -0700576 // Our managed peer (an instance of java.lang.Thread).
Elliott Hughesd369bb72011-09-12 14:41:14 -0700577 Object* peer_;
Elliott Hughes8daa0922011-09-11 13:46:25 -0700578
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700579 // The top_of_managed_stack_ and top_of_managed_stack_pc_ fields are accessed from
580 // compiled code, so we keep them early in the structure to (a) avoid having to keep
581 // fixing the assembler offsets and (b) improve the chances that these will still be aligned.
582
583 // Top of the managed stack, written out prior to the state transition from
584 // kRunnable to kNative. Uses include to give the starting point for scanning
585 // a managed stack when a thread is in native code.
586 Frame top_of_managed_stack_;
587 // PC corresponding to the call out of the top_of_managed_stack_ frame
588 uintptr_t top_of_managed_stack_pc_;
589
Elliott Hughes8daa0922011-09-11 13:46:25 -0700590 // Guards the 'interrupted_' and 'wait_monitor_' members.
Elliott Hughes85d15452011-09-16 17:33:01 -0700591 mutable Mutex* wait_mutex_;
592 ConditionVariable* wait_cond_;
Elliott Hughes8daa0922011-09-11 13:46:25 -0700593 // Pointer to the monitor lock we're currently waiting on (or NULL), guarded by wait_mutex_.
594 Monitor* wait_monitor_;
595 // Thread "interrupted" status; stays raised until queried or thrown, guarded by wait_mutex_.
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700596 uint32_t interrupted_;
Elliott Hughes5f791332011-09-15 17:45:30 -0700597 // The next thread in the wait set this thread is part of.
598 Thread* wait_next_;
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700599 // If we're blocked in MonitorEnter, this is the object we're trying to lock.
600 Object* monitor_enter_object_;
Elliott Hughes5f791332011-09-15 17:45:30 -0700601
602 friend class Monitor;
Elliott Hughesdcc24742011-09-07 14:02:44 -0700603
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700604 RuntimeStats stats_;
605
buzbeec143c552011-08-20 17:38:58 -0700606 // FIXME: placeholder for the gc cardTable
607 uint32_t card_table_;
608
Elliott Hughes449b4bd2011-09-09 12:01:38 -0700609 // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
610 // We leave extra space so there's room for the code that throws StackOverflowError.
611 byte* stack_end_;
Elliott Hughesbe759c62011-09-08 19:38:21 -0700612
Ian Rogers932746a2011-09-22 18:57:50 -0700613 // Size of the stack
614 size_t stack_size_;
615
616 // The "lowest addressable byte" of the stack
617 byte* stack_base_;
618
Ian Rogers6de08602011-08-19 14:52:39 -0700619 // A linked list (of stack allocated records) recording transitions from
620 // native to managed code.
621 NativeToManagedRecord* native_to_managed_record_;
622
Ian Rogers408f79a2011-08-23 18:22:33 -0700623 // Top of linked list of stack indirect reference tables or NULL for none
624 StackIndirectReferenceTable* top_sirt_;
Ian Rogersb033c752011-07-20 12:22:35 -0700625
626 // Every thread may have an associated JNI environment
Elliott Hughes69f5bc62011-08-24 09:26:14 -0700627 JNIEnvExt* jni_env_;
Ian Rogersb033c752011-07-20 12:22:35 -0700628
Elliott Hughes93e74e82011-09-13 11:07:03 -0700629 volatile State state_;
Carl Shapirob5573532011-07-12 18:22:59 -0700630
Carl Shapiro69759ea2011-07-21 18:13:35 -0700631 // Initialized to "this". On certain architectures (such as x86) reading
632 // off of Thread::Current is easy but getting the address of Thread::Current
633 // is hard. This field can be read off of Thread::Current to give the address.
634 Thread* self_;
635
636 Runtime* runtime_;
637
638 // The pending exception or NULL.
Elliott Hughese5b0dc82011-08-23 09:59:02 -0700639 Throwable* exception_;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700640
Ian Rogers45a76cb2011-07-21 22:00:15 -0700641 // A non-zero value is used to tell the current thread to enter a safe point
642 // at the next poll.
643 int suspend_count_;
644
Elliott Hughesedcc09c2011-08-21 18:47:05 -0700645 // Needed to get the right ClassLoader in JNI_OnLoad, but also
646 // useful for testing.
Brian Carlstrombffb1552011-08-25 12:23:53 -0700647 const ClassLoader* class_loader_override_;
buzbeec143c552011-08-20 17:38:58 -0700648
Ian Rogersbdb03912011-09-14 00:55:44 -0700649 // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
Elliott Hughes85d15452011-09-16 17:33:01 -0700650 Context* long_jump_context_;
Ian Rogersbdb03912011-09-14 00:55:44 -0700651
Carl Shapiro69759ea2011-07-21 18:13:35 -0700652 // TLS key used to retrieve the VM thread object.
Carl Shapirob5573532011-07-12 18:22:59 -0700653 static pthread_key_t pthread_key_self_;
654
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700655 DISALLOW_COPY_AND_ASSIGN(Thread);
656};
Ian Rogersbdb03912011-09-14 00:55:44 -0700657
Elliott Hughes330304d2011-08-12 14:28:05 -0700658std::ostream& operator<<(std::ostream& os, const Thread& thread);
Ian Rogersb033c752011-07-20 12:22:35 -0700659std::ostream& operator<<(std::ostream& os, const Thread::State& state);
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700660
Elliott Hughesad7c2a32011-08-31 11:58:10 -0700661class ScopedThreadStateChange {
662 public:
663 ScopedThreadStateChange(Thread* thread, Thread::State new_state) : thread_(thread) {
664 old_thread_state_ = thread_->SetState(new_state);
665 }
666
667 ~ScopedThreadStateChange() {
668 thread_->SetState(old_thread_state_);
669 }
670
671 private:
672 Thread* thread_;
673 Thread::State old_thread_state_;
674 DISALLOW_COPY_AND_ASSIGN(ScopedThreadStateChange);
675};
676
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700677} // namespace art
678
679#endif // ART_SRC_THREAD_H_