blob: bff9b5221cbd2d0531826aea13804f26785eba96 [file] [log] [blame]
Elliott Hughes8d768a92011-09-14 16:35:25 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070016
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_THREAD_H_
18#define ART_RUNTIME_THREAD_H_
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070019
Elliott Hughes02b48d12011-09-07 17:15:51 -070020#include <bitset>
Ian Rogers306057f2012-11-26 12:45:53 -080021#include <deque>
Elliott Hughesa0957642011-09-02 14:27:33 -070022#include <iosfwd>
Ian Rogersb033c752011-07-20 12:22:35 -070023#include <list>
Ian Rogers700a4022014-05-19 16:49:03 -070024#include <memory>
Elliott Hughes8daa0922011-09-11 13:46:25 -070025#include <string>
Carl Shapirob5573532011-07-12 18:22:59 -070026
Elliott Hughes76160052012-12-12 16:31:20 -080027#include "base/macros.h"
Ian Rogers719d1a32014-03-06 12:13:39 -080028#include "base/mutex.h"
Ian Rogers848871b2013-08-05 10:56:33 -070029#include "entrypoints/interpreter/interpreter_entrypoints.h"
30#include "entrypoints/jni/jni_entrypoints.h"
Ian Rogers7655f292013-07-29 11:07:13 -070031#include "entrypoints/portable/portable_entrypoints.h"
32#include "entrypoints/quick/quick_entrypoints.h"
Mathieu Chartier0651d412014-04-29 14:37:57 -070033#include "gc/allocator/rosalloc.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070034#include "globals.h"
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070035#include "handle_scope.h"
Ian Rogers306057f2012-11-26 12:45:53 -080036#include "jvalue.h"
Mathieu Chartier83c8ee02014-01-28 14:50:23 -080037#include "object_callbacks.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070038#include "offsets.h"
Elliott Hughes9d5ccec2011-09-19 13:19:50 -070039#include "runtime_stats.h"
Elliott Hughes68e76522011-10-05 13:22:16 -070040#include "stack.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080041#include "thread_state.h"
Ian Rogers62d6c772013-02-27 08:32:07 -080042#include "throw_location.h"
Ian Rogersb033c752011-07-20 12:22:35 -070043
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070044namespace art {
45
Mathieu Chartier15d34022014-02-26 17:16:38 -080046namespace gc {
47namespace collector {
48class SemiSpace;
49} // namespace collector
50} // namespace gc
51
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080052namespace mirror {
Brian Carlstromea46f952013-07-30 01:26:50 -070053 class ArtMethod;
Ian Rogers848871b2013-08-05 10:56:33 -070054 class Array;
55 class Class;
56 class ClassLoader;
57 class Object;
58 template<class T> class ObjectArray;
59 template<class T> class PrimitiveArray;
60 typedef PrimitiveArray<int32_t> IntArray;
61 class StackTraceElement;
62 class StaticStorageBase;
63 class Throwable;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080064} // namespace mirror
65class BaseMutex;
66class ClassLinker;
Ian Rogers7a22fa62013-01-23 12:16:16 -080067class Closure;
Ian Rogersbdb03912011-09-14 00:55:44 -070068class Context;
Ian Rogers1b09b092012-08-20 15:35:52 -070069struct DebugInvokeReq;
Ian Rogers81d425b2012-09-27 16:03:43 -070070class DexFile;
Ian Rogersb48b9eb2014-02-28 16:20:21 -080071class JavaVMExt;
Ian Rogers81d425b2012-09-27 16:03:43 -070072struct JNIEnvExt;
Elliott Hughes8daa0922011-09-11 13:46:25 -070073class Monitor;
Carl Shapirob5573532011-07-12 18:22:59 -070074class Runtime;
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -070075class ScopedObjectAccessAlreadyRunnable;
Logan Chienf7ad17e2012-03-15 03:10:03 +080076class ShadowFrame;
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +010077struct SingleStepControl;
Brian Carlstrom40381fb2011-10-19 14:13:40 -070078class Thread;
79class ThreadList;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070080
Elliott Hughes34e06962012-04-09 13:55:55 -070081// Thread priorities. These must match the Thread.MIN_PRIORITY,
82// Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
83enum ThreadPriority {
84 kMinThreadPriority = 1,
85 kNormThreadPriority = 5,
86 kMaxThreadPriority = 10,
87};
88
Ian Rogers474b6da2012-09-25 00:20:38 -070089enum ThreadFlag {
Ian Rogers50ffee22012-11-20 11:47:44 -080090 kSuspendRequest = 1, // If set implies that suspend_count_ > 0 and the Thread should enter the
91 // safepoint handler.
Ian Rogers62d6c772013-02-27 08:32:07 -080092 kCheckpointRequest = 2 // Request that the thread do some checkpoint work and then continue.
Ian Rogers474b6da2012-09-25 00:20:38 -070093};
94
Ian Rogersdd7624d2014-03-14 17:43:00 -070095class Thread {
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070096 public:
Ian Rogers932746a2011-09-22 18:57:50 -070097 // Space to throw a StackOverflowError in.
Ian Rogersb3fabf42014-03-12 23:35:11 -070098 // TODO: shrink reserved space, in particular for 64bit.
Andreas Gampee62a07e2014-03-26 14:53:21 -070099#if defined(__x86_64__)
Serguei Katkovd5080062014-05-14 17:00:05 +0700100 static constexpr size_t kStackOverflowReservedBytes = 32 * KB;
Andreas Gampee62a07e2014-03-26 14:53:21 -0700101#elif defined(__aarch64__)
102 // Worst-case, we would need about 2.6x the amount of x86_64 for many more registers.
103 // But this one works rather well.
104 static constexpr size_t kStackOverflowReservedBytes = 32 * KB;
Brian Carlstrom2aef07c2014-05-15 11:18:29 -0700105#elif defined(__i386__)
106 // TODO: Bumped to workaround regression (http://b/14982147) Specifically to fix:
107 // test-art-host-run-test-interpreter-018-stack-overflow
108 // test-art-host-run-test-interpreter-107-int-math2
109 static constexpr size_t kStackOverflowReservedBytes = 24 * KB;
Ian Rogers463cb4d2014-03-12 16:23:09 -0700110#else
111 static constexpr size_t kStackOverflowReservedBytes = 16 * KB;
112#endif
Mathieu Chartier0d507d12014-03-19 10:17:28 -0700113 // How much of the reserved bytes is reserved for incoming signals.
114 static constexpr size_t kStackOverflowSignalReservedBytes = 2 * KB;
115 // How much of the reserved bytes we may temporarily use during stack overflow checks as an
116 // optimization.
117 static constexpr size_t kStackOverflowReservedUsableBytes =
118 kStackOverflowReservedBytes - kStackOverflowSignalReservedBytes;
buzbeec143c552011-08-20 17:38:58 -0700119
Dave Allisonf9439142014-03-27 15:10:22 -0700120 // For implicit overflow checks we reserve an extra piece of memory at the bottom
121 // of the stack (lowest memory). The higher portion of the memory
122 // is protected against reads and the lower is available for use while
123 // throwing the StackOverflow exception.
Dave Allisonf4b80bc2014-05-14 15:41:25 -0700124 static constexpr size_t kStackOverflowProtectedSize = 16 * KB;
Dave Allisonf9439142014-03-27 15:10:22 -0700125 static constexpr size_t kStackOverflowImplicitCheckSize = kStackOverflowProtectedSize +
126 kStackOverflowReservedBytes;
127
Elliott Hughes462c9442012-03-23 18:47:50 -0700128 // Creates a new native thread corresponding to the given managed peer.
129 // Used to implement Thread.start.
Ian Rogers52673ff2012-06-27 23:25:34 -0700130 static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700131
Elliott Hughes462c9442012-03-23 18:47:50 -0700132 // Attaches the calling native thread to the runtime, returning the new native peer.
133 // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
Mathieu Chartier664bebf2012-11-12 16:54:11 -0800134 static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
135 bool create_peer);
Carl Shapirob5573532011-07-12 18:22:59 -0700136
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700137 // Reset internal state of child thread after fork.
138 void InitAfterFork();
139
Ian Rogers02ed4c02013-09-06 13:10:04 -0700140 static Thread* Current();
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700141
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -0700142 static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800143 mirror::Object* thread_peer)
jeffhaoa77f0f62012-12-05 17:19:31 -0800144 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700145 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
146 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -0700147 static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
jeffhaoa77f0f62012-12-05 17:19:31 -0800148 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700149 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
150 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700151
Elliott Hughes28fa76d2012-04-09 17:31:46 -0700152 // Translates 172 to pAllocArrayFromCode and so on.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700153 template<size_t size_of_pointers>
154 static void DumpThreadOffset(std::ostream& os, uint32_t offset);
Elliott Hughes28fa76d2012-04-09 17:31:46 -0700155
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700156 // Dumps a one-line summary of thread state (used for operator<<).
157 void ShortDump(std::ostream& os) const;
158
159 // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
160 void Dump(std::ostream& os) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700161 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
162 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesa0957642011-09-02 14:27:33 -0700163
Mathieu Chartierc751fdc2014-03-30 15:25:44 -0700164 void DumpJavaStack(std::ostream& os) const
165 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
166 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
167
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700168 // Dumps the SIGQUIT per-thread header. 'thread' can be NULL for a non-attached thread, in which
169 // case we use 'tid' to identify the thread, and we'll include as much information as we can.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700170 static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
Ian Rogerscfaa4552012-11-26 21:00:08 -0800171 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
172 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700173
Ian Rogers474b6da2012-09-25 00:20:38 -0700174 ThreadState GetState() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700175 DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
176 DCHECK_LE(tls32_.state_and_flags.as_struct.state, kSuspended);
177 return static_cast<ThreadState>(tls32_.state_and_flags.as_struct.state);
Dave Allison0aded082013-11-07 13:15:11 -0800178 }
179
Ian Rogers474b6da2012-09-25 00:20:38 -0700180 ThreadState SetState(ThreadState new_state);
Ian Rogers52673ff2012-06-27 23:25:34 -0700181
Ian Rogers81d425b2012-09-27 16:03:43 -0700182 int GetSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700183 return tls32_.suspend_count;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700184 }
Elliott Hughes038a8062011-09-18 14:12:41 -0700185
Ian Rogers81d425b2012-09-27 16:03:43 -0700186 int GetDebugSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700187 return tls32_.debug_suspend_count;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700188 }
189
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700190 bool IsSuspended() const {
Chris Dearman59cde532013-12-04 18:53:49 -0800191 union StateAndFlags state_and_flags;
Ian Rogersdd7624d2014-03-14 17:43:00 -0700192 state_and_flags.as_int = tls32_.state_and_flags.as_int;
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700193 return state_and_flags.as_struct.state != kRunnable &&
194 (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700195 }
196
Ian Rogers01ae5802012-09-28 16:14:01 -0700197 void ModifySuspendCount(Thread* self, int delta, bool for_debugger)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700198 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700199
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700200 bool RequestCheckpoint(Closure* function)
201 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700202
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700203 // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
204 // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
Ian Rogers9da7f592012-08-20 17:14:28 -0700205 void FullSuspendCheck()
Ian Rogersb726dcb2012-09-05 08:57:23 -0700206 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
207 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700208
209 // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
210 ThreadState TransitionFromSuspendedToRunnable()
Ian Rogersb726dcb2012-09-05 08:57:23 -0700211 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
Ian Rogers693ff612013-02-01 10:56:12 -0800212 SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
Ian Rogers1ffa32f2013-02-05 18:29:08 -0800213 ALWAYS_INLINE;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700214
215 // Transition from runnable into a state where mutator privileges are denied. Releases share of
216 // mutator lock.
217 void TransitionFromRunnableToSuspended(ThreadState new_state)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700218 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
Ian Rogers693ff612013-02-01 10:56:12 -0800219 UNLOCK_FUNCTION(Locks::mutator_lock_)
Ian Rogers1ffa32f2013-02-05 18:29:08 -0800220 ALWAYS_INLINE;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700221
Ian Rogers0399dde2012-06-06 17:09:28 -0700222 // Once called thread suspension will cause an assertion failure.
Ian Rogers52673ff2012-06-27 23:25:34 -0700223 const char* StartAssertNoThreadSuspension(const char* cause) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700224 if (kIsDebugBuild) {
225 CHECK(cause != NULL);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700226 const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
227 tls32_.no_thread_suspension++;
228 tlsPtr_.last_no_thread_suspension_cause = cause;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700229 return previous_cause;
230 } else {
231 return nullptr;
232 }
Ian Rogers0399dde2012-06-06 17:09:28 -0700233 }
Ian Rogers52673ff2012-06-27 23:25:34 -0700234
Ian Rogers0399dde2012-06-06 17:09:28 -0700235 // End region where no thread suspension is expected.
Ian Rogers52673ff2012-06-27 23:25:34 -0700236 void EndAssertNoThreadSuspension(const char* old_cause) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700237 if (kIsDebugBuild) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700238 CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1);
239 CHECK_GT(tls32_.no_thread_suspension, 0U);
240 tls32_.no_thread_suspension--;
241 tlsPtr_.last_no_thread_suspension_cause = old_cause;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700242 }
Ian Rogers0399dde2012-06-06 17:09:28 -0700243 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700244
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700245 void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
Ian Rogers0399dde2012-06-06 17:09:28 -0700246
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700247 bool IsDaemon() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700248 return tls32_.daemon;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700249 }
250
Ian Rogersdd7624d2014-03-14 17:43:00 -0700251 bool HoldsLock(mirror::Object*) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700252
Elliott Hughes8daa0922011-09-11 13:46:25 -0700253 /*
254 * Changes the priority of this thread to match that of the java.lang.Thread object.
255 *
256 * We map a priority value from 1-10 to Linux "nice" values, where lower
257 * numbers indicate higher priority.
258 */
259 void SetNativePriority(int newPriority);
260
261 /*
262 * Returns the thread priority for the current thread by querying the system.
263 * This is useful when attaching a thread through JNI.
264 *
265 * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
266 */
267 static int GetNativePriority();
268
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700269 uint32_t GetThreadId() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700270 return tls32_.thin_lock_thread_id;
Carl Shapirob5573532011-07-12 18:22:59 -0700271 }
272
Elliott Hughesd92bec42011-09-02 17:04:36 -0700273 pid_t GetTid() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700274 return tls32_.tid;
Elliott Hughesd92bec42011-09-02 17:04:36 -0700275 }
Elliott Hughese27955c2011-08-26 15:21:24 -0700276
Elliott Hughesffb465f2012-03-01 18:46:05 -0800277 // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer.
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -0700278 mirror::String* GetThreadName(const ScopedObjectAccessAlreadyRunnable& ts) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700279 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes899e7892012-01-24 14:57:32 -0800280
Elliott Hughesffb465f2012-03-01 18:46:05 -0800281 // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
282 // allocation, or locking.
283 void GetThreadName(std::string& name) const;
284
Elliott Hughes899e7892012-01-24 14:57:32 -0800285 // Sets the thread's name.
Ian Rogersb726dcb2012-09-05 08:57:23 -0700286 void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesfc861622011-10-17 17:57:47 -0700287
Jeff Hao57dac6e2013-08-15 16:36:24 -0700288 // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
289 uint64_t GetCpuMicroTime() const;
290
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800291 mirror::Object* GetPeer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700292 CHECK(tlsPtr_.jpeer == nullptr);
293 return tlsPtr_.opeer;
Elliott Hughes8daa0922011-09-11 13:46:25 -0700294 }
295
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700296 bool HasPeer() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700297 return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700298 }
299
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700300 RuntimeStats* GetStats() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700301 return &tls64_.stats;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700302 }
303
Elliott Hughes7dc51662012-05-16 14:48:43 -0700304 bool IsStillStarting() const;
305
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700306 bool IsExceptionPending() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700307 return tlsPtr_.exception != nullptr;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700308 }
309
Ian Rogers62d6c772013-02-27 08:32:07 -0800310 mirror::Throwable* GetException(ThrowLocation* throw_location) const
311 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700312 if (throw_location != nullptr) {
313 *throw_location = tlsPtr_.throw_location;
Ian Rogers62d6c772013-02-27 08:32:07 -0800314 }
Ian Rogersdd7624d2014-03-14 17:43:00 -0700315 return tlsPtr_.exception;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700316 }
317
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700318 void AssertNoPendingException() const;
Mathieu Chartier8d7672e2014-02-25 10:57:16 -0800319 void AssertNoPendingExceptionForNewException(const char* msg) const;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700320
Ian Rogers62d6c772013-02-27 08:32:07 -0800321 void SetException(const ThrowLocation& throw_location, mirror::Throwable* new_exception)
322 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700323 CHECK(new_exception != NULL);
Ian Rogers474b6da2012-09-25 00:20:38 -0700324 // TODO: DCHECK(!IsExceptionPending());
Ian Rogersdd7624d2014-03-14 17:43:00 -0700325 tlsPtr_.exception = new_exception;
326 tlsPtr_.throw_location = throw_location;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700327 }
328
Serguei Katkova309d762014-05-26 11:23:39 +0700329 void ClearException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700330 tlsPtr_.exception = nullptr;
331 tlsPtr_.throw_location.Clear();
Sebastien Hertz9f102032014-05-23 08:59:42 +0200332 SetExceptionReportedToInstrumentation(false);
jeffhao94d6df42012-11-26 16:02:12 -0800333 }
334
Ian Rogersbdb03912011-09-14 00:55:44 -0700335 // Find catch block and perform long jump to appropriate exception handle
jeffhao94d6df42012-11-26 16:02:12 -0800336 void QuickDeliverException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogersbdb03912011-09-14 00:55:44 -0700337
338 Context* GetLongJumpContext();
Ian Rogers0399dde2012-06-06 17:09:28 -0700339 void ReleaseLongJumpContext(Context* context) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700340 DCHECK(tlsPtr_.long_jump_context == nullptr);
341 tlsPtr_.long_jump_context = context;
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700342 }
343
Brian Carlstromea46f952013-07-30 01:26:50 -0700344 mirror::ArtMethod* GetCurrentMethod(uint32_t* dex_pc) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700345 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers0399dde2012-06-06 17:09:28 -0700346
Ian Rogers62d6c772013-02-27 08:32:07 -0800347 ThrowLocation GetCurrentLocationForThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
348
Andreas Gampecf4035a2014-05-28 22:43:01 -0700349 void SetTopOfStack(StackReference<mirror::ArtMethod>* top_method, uintptr_t pc) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700350 tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
351 tlsPtr_.managed_stack.SetTopQuickFramePc(pc);
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700352 }
353
Jeff Hao11ffc2d2013-02-01 11:52:17 -0800354 void SetTopOfShadowStack(ShadowFrame* top) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700355 tlsPtr_.managed_stack.SetTopShadowFrame(top);
Jeff Hao11ffc2d2013-02-01 11:52:17 -0800356 }
357
Ian Rogers0399dde2012-06-06 17:09:28 -0700358 bool HasManagedStack() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700359 return (tlsPtr_.managed_stack.GetTopQuickFrame() != nullptr) ||
360 (tlsPtr_.managed_stack.GetTopShadowFrame() != nullptr);
Ian Rogersbdb03912011-09-14 00:55:44 -0700361 }
362
Elliott Hughesa4f94742012-05-29 16:28:38 -0700363 // If 'msg' is NULL, no detail message is set.
Ian Rogers62d6c772013-02-27 08:32:07 -0800364 void ThrowNewException(const ThrowLocation& throw_location,
365 const char* exception_class_descriptor, const char* msg)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700366 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes5cb5ad22011-10-02 12:13:39 -0700367
Elliott Hughesa4f94742012-05-29 16:28:38 -0700368 // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be
369 // used as the new exception's cause.
Ian Rogers62d6c772013-02-27 08:32:07 -0800370 void ThrowNewWrappedException(const ThrowLocation& throw_location,
371 const char* exception_class_descriptor,
372 const char* msg)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700373 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesa4f94742012-05-29 16:28:38 -0700374
Ian Rogers62d6c772013-02-27 08:32:07 -0800375 void ThrowNewExceptionF(const ThrowLocation& throw_location,
376 const char* exception_class_descriptor, const char* fmt, ...)
377 __attribute__((format(printf, 4, 5)))
Ian Rogersb726dcb2012-09-05 08:57:23 -0700378 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesa5b897e2011-08-16 11:33:06 -0700379
Ian Rogers62d6c772013-02-27 08:32:07 -0800380 void ThrowNewExceptionV(const ThrowLocation& throw_location,
381 const char* exception_class_descriptor, const char* fmt, va_list ap)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700382 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes4a2b4172011-09-20 17:08:25 -0700383
Elliott Hughes2ced6a52011-10-16 18:44:48 -0700384 // OutOfMemoryError is special, because we need to pre-allocate an instance.
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700385 // Only the GC should call this.
Ian Rogers120f1c72012-09-28 17:17:10 -0700386 void ThrowOutOfMemoryError(const char* msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes79082e32011-08-25 12:07:32 -0700387
Elliott Hughesbe759c62011-09-08 19:38:21 -0700388 static void Startup();
Elliott Hughes038a8062011-09-18 14:12:41 -0700389 static void FinishStartup();
Elliott Hughesc1674ed2011-08-25 18:09:09 -0700390 static void Shutdown();
Carl Shapirob5573532011-07-12 18:22:59 -0700391
Ian Rogersb033c752011-07-20 12:22:35 -0700392 // JNI methods
Elliott Hughes69f5bc62011-08-24 09:26:14 -0700393 JNIEnvExt* GetJniEnv() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700394 return tlsPtr_.jni_env;
Ian Rogersb033c752011-07-20 12:22:35 -0700395 }
396
Ian Rogers408f79a2011-08-23 18:22:33 -0700397 // Convert a jobject into a Object*
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800398 mirror::Object* DecodeJObject(jobject obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogersb033c752011-07-20 12:22:35 -0700399
Mathieu Chartiera6e7f082014-05-22 14:43:37 -0700400 mirror::Object* GetMonitorEnterObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700401 return tlsPtr_.monitor_enter_object;
402 }
403
Mathieu Chartiera6e7f082014-05-22 14:43:37 -0700404 void SetMonitorEnterObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700405 tlsPtr_.monitor_enter_object = obj;
406 }
407
Elliott Hughes8daa0922011-09-11 13:46:25 -0700408 // Implements java.lang.Thread.interrupted.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700409 bool Interrupted() LOCKS_EXCLUDED(wait_mutex_);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700410 // Implements java.lang.Thread.isInterrupted.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700411 bool IsInterrupted() LOCKS_EXCLUDED(wait_mutex_);
412 bool IsInterruptedLocked() EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
413 return interrupted_;
414 }
415 void Interrupt(Thread* self) LOCKS_EXCLUDED(wait_mutex_);
416 void SetInterruptedLocked(bool i) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
417 interrupted_ = i;
418 }
419 void Notify() LOCKS_EXCLUDED(wait_mutex_);
420
421 private:
422 void NotifyLocked(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_);
423
424 public:
425 Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
426 return wait_mutex_;
427 }
428
429 ConditionVariable* GetWaitConditionVariable() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
430 return wait_cond_;
431 }
432
433 Monitor* GetWaitMonitor() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
434 return wait_monitor_;
435 }
436
437 void SetWaitMonitor(Monitor* mon) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
438 wait_monitor_ = mon;
439 }
440
441
442 // Waiter link-list support.
443 Thread* GetWaitNext() const {
444 return tlsPtr_.wait_next;
445 }
446
447 void SetWaitNext(Thread* next) {
448 tlsPtr_.wait_next = next;
449 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700450
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800451 mirror::ClassLoader* GetClassLoaderOverride() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700452 return tlsPtr_.class_loader_override;
buzbeec143c552011-08-20 17:38:58 -0700453 }
454
Mathieu Chartier4e305412014-02-19 10:54:44 -0800455 void SetClassLoaderOverride(mirror::ClassLoader* class_loader_override)
456 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
buzbeec143c552011-08-20 17:38:58 -0700457
Ian Rogersaaa20802011-09-11 21:47:37 -0700458 // Create the internal representation of a stack trace, that is more time
Sebastien Hertzee1d79a2014-02-21 15:46:30 +0100459 // and space efficient to compute than the StackTraceElement[].
460 template<bool kTransactionActive>
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -0700461 jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700462 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogersaaa20802011-09-11 21:47:37 -0700463
Elliott Hughes01158d72011-09-19 19:47:10 -0700464 // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
465 // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many
466 // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated
467 // with the number of valid frames in the returned array.
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -0700468 static jobjectArray InternalStackTraceToStackTraceElementArray(
469 const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
470 jobjectArray output_array = nullptr, int* stack_depth = nullptr)
Ian Rogers53b8b092014-03-13 23:45:53 -0700471 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Shih-wei Liao55df06b2011-08-26 14:39:27 -0700472
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800473 void VisitRoots(RootCallback* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes410c0c82011-09-01 17:58:25 -0700474
Mathieu Chartier4e305412014-02-19 10:54:44 -0800475 ALWAYS_INLINE void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
jeffhao25045522012-03-13 19:34:37 -0700476
Elliott Hughesbe759c62011-09-08 19:38:21 -0700477 //
478 // Offsets of various members of native Thread class, used by compiled code.
479 //
480
Ian Rogersdd7624d2014-03-14 17:43:00 -0700481 template<size_t pointer_size>
482 static ThreadOffset<pointer_size> ThinLockIdOffset() {
483 return ThreadOffset<pointer_size>(
484 OFFSETOF_MEMBER(Thread, tls32_) +
485 OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700486 }
487
Ian Rogersdd7624d2014-03-14 17:43:00 -0700488 template<size_t pointer_size>
489 static ThreadOffset<pointer_size> ThreadFlagsOffset() {
490 return ThreadOffset<pointer_size>(
491 OFFSETOF_MEMBER(Thread, tls32_) +
492 OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700493 }
494
Ian Rogersdd7624d2014-03-14 17:43:00 -0700495 private:
496 template<size_t pointer_size>
497 static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
498 size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
499 size_t scale;
500 size_t shrink;
501 if (pointer_size == sizeof(void*)) {
502 scale = 1;
503 shrink = 1;
504 } else if (pointer_size > sizeof(void*)) {
505 scale = pointer_size / sizeof(void*);
506 shrink = 1;
507 } else {
508 DCHECK_GT(sizeof(void*), pointer_size);
509 scale = 1;
510 shrink = sizeof(void*) / pointer_size;
511 }
512 return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
Ian Rogers07ec8e12012-12-01 01:26:51 -0800513 }
514
Ian Rogersdd7624d2014-03-14 17:43:00 -0700515 public:
516 template<size_t pointer_size>
517 static ThreadOffset<pointer_size> QuickEntryPointOffset(size_t quick_entrypoint_offset) {
518 return ThreadOffsetFromTlsPtr<pointer_size>(
519 OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
Elliott Hughesbe759c62011-09-08 19:38:21 -0700520 }
521
Ian Rogersdd7624d2014-03-14 17:43:00 -0700522 template<size_t pointer_size>
523 static ThreadOffset<pointer_size> InterpreterEntryPointOffset(size_t interp_entrypoint_offset) {
524 return ThreadOffsetFromTlsPtr<pointer_size>(
525 OFFSETOF_MEMBER(tls_ptr_sized_values, interpreter_entrypoints) + interp_entrypoint_offset);
Elliott Hughesbe759c62011-09-08 19:38:21 -0700526 }
527
Ian Rogersdd7624d2014-03-14 17:43:00 -0700528 template<size_t pointer_size>
529 static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
530 return ThreadOffsetFromTlsPtr<pointer_size>(
531 OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
Elliott Hughesbe759c62011-09-08 19:38:21 -0700532 }
533
Ian Rogersdd7624d2014-03-14 17:43:00 -0700534 template<size_t pointer_size>
535 static ThreadOffset<pointer_size> PortableEntryPointOffset(size_t port_entrypoint_offset) {
536 return ThreadOffsetFromTlsPtr<pointer_size>(
537 OFFSETOF_MEMBER(tls_ptr_sized_values, portable_entrypoints) + port_entrypoint_offset);
538 }
539
540 template<size_t pointer_size>
541 static ThreadOffset<pointer_size> SelfOffset() {
542 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
543 }
544
545 template<size_t pointer_size>
546 static ThreadOffset<pointer_size> ExceptionOffset() {
547 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
548 }
549
550 template<size_t pointer_size>
551 static ThreadOffset<pointer_size> PeerOffset() {
552 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
553 }
554
555
556 template<size_t pointer_size>
557 static ThreadOffset<pointer_size> CardTableOffset() {
558 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
559 }
560
561 template<size_t pointer_size>
562 static ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
563 return ThreadOffsetFromTlsPtr<pointer_size>(
564 OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
Dave Allisonb373e092014-02-20 16:06:36 -0800565 }
566
Ian Rogers932746a2011-09-22 18:57:50 -0700567 // Size of stack less any space reserved for stack overflow
jeffhaod7521322012-11-21 15:38:24 -0800568 size_t GetStackSize() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700569 return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
Ian Rogers932746a2011-09-22 18:57:50 -0700570 }
571
jeffhaod7521322012-11-21 15:38:24 -0800572 byte* GetStackEnd() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700573 return tlsPtr_.stack_end;
jeffhaod7521322012-11-21 15:38:24 -0800574 }
575
Ian Rogers932746a2011-09-22 18:57:50 -0700576 // Set the stack end to that to be used during a stack overflow
Ian Rogersb726dcb2012-09-05 08:57:23 -0700577 void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers932746a2011-09-22 18:57:50 -0700578
579 // Set the stack end to that to be used during regular execution
Dave Allisonf9439142014-03-27 15:10:22 -0700580 void ResetDefaultStackEnd(bool implicit_overflow_check) {
Ian Rogers932746a2011-09-22 18:57:50 -0700581 // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
582 // to throw a StackOverflowError.
Dave Allisonf9439142014-03-27 15:10:22 -0700583 if (implicit_overflow_check) {
584 // For implicit checks we also need to add in the protected region above the
585 // overflow region.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700586 tlsPtr_.stack_end = tlsPtr_.stack_begin + kStackOverflowImplicitCheckSize;
Dave Allisonf9439142014-03-27 15:10:22 -0700587 } else {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700588 tlsPtr_.stack_end = tlsPtr_.stack_begin + kStackOverflowReservedBytes;
Dave Allisonf9439142014-03-27 15:10:22 -0700589 }
Ian Rogers932746a2011-09-22 18:57:50 -0700590 }
591
Dave Allisonf9439142014-03-27 15:10:22 -0700592 // Install the protected region for implicit stack checks.
593 void InstallImplicitProtection(bool is_main_stack);
594
Ian Rogers120f1c72012-09-28 17:17:10 -0700595 bool IsHandlingStackOverflow() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700596 return tlsPtr_.stack_end == tlsPtr_.stack_begin;
Ian Rogers120f1c72012-09-28 17:17:10 -0700597 }
598
Ian Rogersdd7624d2014-03-14 17:43:00 -0700599 template<size_t pointer_size>
600 static ThreadOffset<pointer_size> StackEndOffset() {
601 return ThreadOffsetFromTlsPtr<pointer_size>(
602 OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700603 }
604
Ian Rogersdd7624d2014-03-14 17:43:00 -0700605 template<size_t pointer_size>
606 static ThreadOffset<pointer_size> JniEnvOffset() {
607 return ThreadOffsetFromTlsPtr<pointer_size>(
608 OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700609 }
610
Ian Rogersdd7624d2014-03-14 17:43:00 -0700611 template<size_t pointer_size>
612 static ThreadOffset<pointer_size> TopOfManagedStackOffset() {
613 return ThreadOffsetFromTlsPtr<pointer_size>(
614 OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
615 ManagedStack::TopQuickFrameOffset());
Elliott Hughesbe759c62011-09-08 19:38:21 -0700616 }
617
Ian Rogersdd7624d2014-03-14 17:43:00 -0700618 template<size_t pointer_size>
619 static ThreadOffset<pointer_size> TopOfManagedStackPcOffset() {
620 return ThreadOffsetFromTlsPtr<pointer_size>(
621 OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
622 ManagedStack::TopQuickFramePcOffset());
Ian Rogersbdb03912011-09-14 00:55:44 -0700623 }
624
Ian Rogers0399dde2012-06-06 17:09:28 -0700625 const ManagedStack* GetManagedStack() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700626 return &tlsPtr_.managed_stack;
Ian Rogers0399dde2012-06-06 17:09:28 -0700627 }
628
629 // Linked list recording fragments of managed stack.
630 void PushManagedStackFragment(ManagedStack* fragment) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700631 tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
Ian Rogers0399dde2012-06-06 17:09:28 -0700632 }
633 void PopManagedStackFragment(const ManagedStack& fragment) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700634 tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
Ian Rogers0399dde2012-06-06 17:09:28 -0700635 }
636
637 ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700638 return tlsPtr_.managed_stack.PushShadowFrame(new_top_frame);
TDYa127de479be2012-05-31 08:03:26 -0700639 }
640
641 ShadowFrame* PopShadowFrame() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700642 return tlsPtr_.managed_stack.PopShadowFrame();
TDYa127de479be2012-05-31 08:03:26 -0700643 }
Logan Chienf7ad17e2012-03-15 03:10:03 +0800644
Ian Rogersdd7624d2014-03-14 17:43:00 -0700645 template<size_t pointer_size>
646 static ThreadOffset<pointer_size> TopShadowFrameOffset() {
647 return ThreadOffsetFromTlsPtr<pointer_size>(
648 OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
649 ManagedStack::TopShadowFrameOffset());
TDYa127d668a062012-04-13 12:36:57 -0700650 }
651
Ian Rogersef7d42f2014-01-06 12:55:46 -0800652 // Number of references allocated in JNI ShadowFrames on this thread.
653 size_t NumJniShadowFrameReferences() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700654 return tlsPtr_.managed_stack.NumJniShadowFrameReferences();
Ian Rogers0399dde2012-06-06 17:09:28 -0700655 }
656
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700657 // Number of references in handle scope on this thread.
658 size_t NumHandleReferences();
Ian Rogers0399dde2012-06-06 17:09:28 -0700659
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700660 // Number of references allocated in handle scopes & JNI shadow frames on this thread.
Ian Rogersef7d42f2014-01-06 12:55:46 -0800661 size_t NumStackReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700662 return NumHandleReferences() + NumJniShadowFrameReferences();
Ian Rogers0399dde2012-06-06 17:09:28 -0700663 };
664
665 // Is the given obj in this thread's stack indirect reference table?
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700666 bool HandleScopeContains(jobject obj) const;
Ian Rogers0399dde2012-06-06 17:09:28 -0700667
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700668 void HandleScopeVisitRoots(RootCallback* visitor, void* arg, uint32_t thread_id)
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800669 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers0399dde2012-06-06 17:09:28 -0700670
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700671 HandleScope* GetTopHandleScope() {
672 return tlsPtr_.top_handle_scope;
Ian Rogers1f539342012-10-03 21:09:42 -0700673 }
674
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700675 void PushHandleScope(HandleScope* handle_scope) {
676 handle_scope->SetLink(tlsPtr_.top_handle_scope);
677 tlsPtr_.top_handle_scope = handle_scope;
678 }
679
680 HandleScope* PopHandleScope() {
681 HandleScope* handle_scope = tlsPtr_.top_handle_scope;
682 DCHECK(handle_scope != nullptr);
683 tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
684 return handle_scope;
Ian Rogers1f539342012-10-03 21:09:42 -0700685 }
Brian Carlstrom40381fb2011-10-19 14:13:40 -0700686
Ian Rogersdd7624d2014-03-14 17:43:00 -0700687 template<size_t pointer_size>
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700688 static ThreadOffset<pointer_size> TopHandleScopeOffset() {
689 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
690 top_handle_scope));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700691 }
692
Ian Rogersdd7624d2014-03-14 17:43:00 -0700693 DebugInvokeReq* GetInvokeReq() const {
694 return tlsPtr_.debug_invoke_req;
Elliott Hughes475fc232011-10-25 15:00:35 -0700695 }
696
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +0100697 SingleStepControl* GetSingleStepControl() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700698 return tlsPtr_.single_step_control;
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +0100699 }
700
Sebastien Hertzfd3077e2014-04-23 10:32:43 +0200701 // Returns the fake exception used to activate deoptimization.
702 static mirror::Throwable* GetDeoptimizationException() {
703 return reinterpret_cast<mirror::Throwable*>(-1);
704 }
705
Ian Rogers62d6c772013-02-27 08:32:07 -0800706 void SetDeoptimizationShadowFrame(ShadowFrame* sf);
707 void SetDeoptimizationReturnValue(const JValue& ret_val);
Ian Rogers306057f2012-11-26 12:45:53 -0800708
709 ShadowFrame* GetAndClearDeoptimizationShadowFrame(JValue* ret_val);
710
Sebastien Hertz714f1752014-04-28 15:03:08 +0200711 bool HasDeoptimizationShadowFrame() const {
712 return tlsPtr_.deoptimization_shadow_frame != nullptr;
713 }
714
Andreas Gampe2a0d4ec2014-06-02 22:05:22 -0700715 void SetShadowFrameUnderConstruction(ShadowFrame* sf);
716 void ClearShadowFrameUnderConstruction();
717
718 bool HasShadowFrameUnderConstruction() const {
719 return tlsPtr_.shadow_frame_under_construction != nullptr;
720 }
721
Ian Rogers62d6c772013-02-27 08:32:07 -0800722 std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700723 return tlsPtr_.instrumentation_stack;
jeffhaoe343b762011-12-05 16:36:44 -0800724 }
725
Jeff Hao5ce4b172013-08-16 16:27:18 -0700726 std::vector<mirror::ArtMethod*>* GetStackTraceSample() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700727 return tlsPtr_.stack_trace_sample;
Jeff Hao5ce4b172013-08-16 16:27:18 -0700728 }
729
730 void SetStackTraceSample(std::vector<mirror::ArtMethod*>* sample) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700731 tlsPtr_.stack_trace_sample = sample;
Jeff Hao5ce4b172013-08-16 16:27:18 -0700732 }
733
734 uint64_t GetTraceClockBase() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700735 return tls64_.trace_clock_base;
Jeff Hao5ce4b172013-08-16 16:27:18 -0700736 }
737
738 void SetTraceClockBase(uint64_t clock_base) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700739 tls64_.trace_clock_base = clock_base;
Jeff Hao5ce4b172013-08-16 16:27:18 -0700740 }
741
Ian Rogers81d425b2012-09-27 16:03:43 -0700742 BaseMutex* GetHeldMutex(LockLevel level) const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700743 return tlsPtr_.held_mutexes[level];
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700744 }
745
Ian Rogers81d425b2012-09-27 16:03:43 -0700746 void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700747 tlsPtr_.held_mutexes[level] = mutex;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700748 }
Elliott Hughesffb465f2012-03-01 18:46:05 -0800749
Mathieu Chartier752a0e62013-06-27 11:03:27 -0700750 void RunCheckpointFunction();
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700751
752 bool ReadFlag(ThreadFlag flag) const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700753 return (tls32_.state_and_flags.as_struct.flags & flag) != 0;
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700754 }
755
Jeff Hao9cec2472013-05-14 18:17:06 -0700756 bool TestAllFlags() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700757 return (tls32_.state_and_flags.as_struct.flags != 0);
Jeff Hao9cec2472013-05-14 18:17:06 -0700758 }
759
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700760 void AtomicSetFlag(ThreadFlag flag);
761
762 void AtomicClearFlag(ThreadFlag flag);
763
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700764 void ResetQuickAllocEntryPointsForThread();
765
Ian Rogersdd7624d2014-03-14 17:43:00 -0700766 // Returns the remaining space in the TLAB.
767 size_t TlabSize() const;
768 // Doesn't check that there is room.
769 mirror::Object* AllocTlab(size_t bytes);
770 void SetTlab(byte* start, byte* end);
771 bool HasTlab() const;
Elliott Hughes5d96a712012-06-28 12:24:27 -0700772
Ian Rogersdd7624d2014-03-14 17:43:00 -0700773 // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
774 // equal to a valid pointer.
775 // TODO: does this need to atomic? I don't think so.
776 void RemoveSuspendTrigger() {
777 tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
778 }
779
780 // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
781 // The next time a suspend check is done, it will load from the value at this address
782 // and trigger a SIGSEGV.
783 void TriggerSuspend() {
784 tlsPtr_.suspend_trigger = nullptr;
785 }
786
787
788 // Push an object onto the allocation stack.
789 bool PushOnThreadLocalAllocationStack(mirror::Object* obj);
790
791 // Set the thread local allocation pointers to the given pointers.
792 void SetThreadLocalAllocationStack(mirror::Object** start, mirror::Object** end);
793
794 // Resets the thread local allocation pointers.
795 void RevokeThreadLocalAllocationStack();
796
797 size_t GetThreadLocalBytesAllocated() const {
798 return tlsPtr_.thread_local_pos - tlsPtr_.thread_local_start;
799 }
800
801 size_t GetThreadLocalObjectsAllocated() const {
802 return tlsPtr_.thread_local_objects;
803 }
804
Ian Rogersdd7624d2014-03-14 17:43:00 -0700805 void* GetRosAllocRun(size_t index) const {
806 return tlsPtr_.rosalloc_runs[index];
807 }
808
809 void SetRosAllocRun(size_t index, void* run) {
810 tlsPtr_.rosalloc_runs[index] = run;
811 }
812
Sebastien Hertz9f102032014-05-23 08:59:42 +0200813 bool IsExceptionReportedToInstrumentation() const {
814 return tls32_.is_exception_reported_to_instrumentation_;
815 }
816
817 void SetExceptionReportedToInstrumentation(bool reported) {
818 tls32_.is_exception_reported_to_instrumentation_ = reported;
819 }
820
Ian Rogersdd7624d2014-03-14 17:43:00 -0700821 private:
Ian Rogers52673ff2012-06-27 23:25:34 -0700822 explicit Thread(bool daemon);
Ian Rogersb726dcb2012-09-05 08:57:23 -0700823 ~Thread() LOCKS_EXCLUDED(Locks::mutator_lock_,
824 Locks::thread_suspend_count_lock_);
Elliott Hughesc0f09332012-03-26 13:27:06 -0700825 void Destroy();
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700826
Ian Rogers365c1022012-06-22 15:05:28 -0700827 void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
Elliott Hughes5fe594f2011-09-08 12:33:17 -0700828
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100829 template<bool kTransactionActive>
830 void InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
831 jobject thread_name, jint thread_priority)
832 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
833
Ian Rogers62d6c772013-02-27 08:32:07 -0800834 // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and
835 // Dbg::Disconnected.
Ian Rogers474b6da2012-09-25 00:20:38 -0700836 ThreadState SetStateUnsafe(ThreadState new_state) {
837 ThreadState old_state = GetState();
Ian Rogersdd7624d2014-03-14 17:43:00 -0700838 tls32_.state_and_flags.as_struct.state = new_state;
Ian Rogersc747cff2012-08-31 18:20:08 -0700839 return old_state;
840 }
Ian Rogersc747cff2012-08-31 18:20:08 -0700841
Ian Rogers04d7aa92013-03-16 14:29:17 -0700842 void VerifyStackImpl() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
843
Ian Rogerscfaa4552012-11-26 21:00:08 -0800844 void DumpState(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700845 void DumpStack(std::ostream& os) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700846 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
847 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesd92bec42011-09-02 17:04:36 -0700848
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700849 // Out-of-line conveniences for debugging in gdb.
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700850 static Thread* CurrentFromGdb(); // Like Thread::Current.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700851 // Like Thread::Dump(std::cerr).
Ian Rogersb726dcb2012-09-05 08:57:23 -0700852 void DumpFromGdb() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700853
Elliott Hughes93e74e82011-09-13 11:07:03 -0700854 static void* CreateCallback(void* arg);
855
Ian Rogerscfaa4552012-11-26 21:00:08 -0800856 void HandleUncaughtExceptions(ScopedObjectAccess& soa)
857 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
858 void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700859
Ian Rogers120f1c72012-09-28 17:17:10 -0700860 void Init(ThreadList*, JavaVMExt*) EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
Ian Rogers5d76c432011-10-31 21:42:49 -0700861 void InitCardTable();
Ian Rogersb033c752011-07-20 12:22:35 -0700862 void InitCpu();
Alexei Zavjalov1efa0a92014-02-04 02:08:31 +0700863 void CleanupCpu();
Ian Rogers848871b2013-08-05 10:56:33 -0700864 void InitTlsEntryPoints();
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700865 void InitTid();
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700866 void InitPthreadKeySelf();
Elliott Hughesbe759c62011-09-08 19:38:21 -0700867 void InitStackHwm();
868
Elliott Hughesd6a23bd2013-07-16 14:19:52 -0700869 void SetUpAlternateSignalStack();
870 void TearDownAlternateSignalStack();
871
Ian Rogers474b6da2012-09-25 00:20:38 -0700872 // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
873 // change from being Suspended to Runnable without a suspend request occurring.
Chris Dearman59cde532013-12-04 18:53:49 -0800874 union PACKED(4) StateAndFlags {
875 StateAndFlags() {}
Ian Rogersdf1ce912012-11-27 17:07:11 -0800876 struct PACKED(4) {
Ian Rogers30e173f2012-09-26 14:35:03 -0700877 // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
878 // ThreadFlags for bit field meanings.
879 volatile uint16_t flags;
880 // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
881 // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
882 // operation. If a thread is suspended and a suspend_request is present, a thread may not
883 // change to Runnable as a GC or other operation is in progress.
Ian Rogers01ae5802012-09-28 16:14:01 -0700884 volatile uint16_t state;
Ian Rogers30e173f2012-09-26 14:35:03 -0700885 } as_struct;
Ian Rogers01ae5802012-09-28 16:14:01 -0700886 volatile int32_t as_int;
Chris Dearman59cde532013-12-04 18:53:49 -0800887
888 private:
889 // gcc does not handle struct with volatile member assignments correctly.
890 // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
891 DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
Ian Rogers474b6da2012-09-25 00:20:38 -0700892 };
Ian Rogers474b6da2012-09-25 00:20:38 -0700893
Ian Rogersdd7624d2014-03-14 17:43:00 -0700894 static void ThreadExitCallback(void* arg);
Elliott Hughes5d96a712012-06-28 12:24:27 -0700895
Dave Allison0aded082013-11-07 13:15:11 -0800896 // Maximum number of checkpoint functions.
897 static constexpr uint32_t kMaxCheckpoints = 3;
898
Ian Rogersdd7624d2014-03-14 17:43:00 -0700899 // Has Thread::Startup been called?
900 static bool is_started_;
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700901
Ian Rogersdd7624d2014-03-14 17:43:00 -0700902 // TLS key used to retrieve the Thread*.
903 static pthread_key_t pthread_key_self_;
Ian Rogersa32a6fd2012-02-06 20:18:44 -0800904
Ian Rogersdd7624d2014-03-14 17:43:00 -0700905 // Used to notify threads that they should attempt to resume, they will suspend again if
906 // their suspend count is > 0.
907 static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
Dave Allisonb373e092014-02-20 16:06:36 -0800908
Ian Rogersdd7624d2014-03-14 17:43:00 -0700909 /***********************************************************************************************/
910 // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
911 // pointer size differences. To encourage shorter encoding, more frequently used values appear
912 // first if possible.
913 /***********************************************************************************************/
Elliott Hughes6a607ad2012-07-13 20:40:00 -0700914
Ian Rogersdd7624d2014-03-14 17:43:00 -0700915 struct PACKED(4) tls_32bit_sized_values {
916 // We have no control over the size of 'bool', but want our boolean fields
917 // to be 4-byte quantities.
918 typedef uint32_t bool32_t;
Ian Rogers22f454c2012-09-08 11:06:29 -0700919
Ian Rogersdd7624d2014-03-14 17:43:00 -0700920 explicit tls_32bit_sized_values(bool is_daemon) :
921 suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
922 daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
Sebastien Hertz9f102032014-05-23 08:59:42 +0200923 thread_exit_check_count(0), is_exception_reported_to_instrumentation_(false) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700924 }
Dave Allisonb373e092014-02-20 16:06:36 -0800925
Ian Rogersdd7624d2014-03-14 17:43:00 -0700926 union StateAndFlags state_and_flags;
927 COMPILE_ASSERT(sizeof(union StateAndFlags) == sizeof(int32_t),
928 sizeof_state_and_flags_and_int32_are_different);
Dave Allisonb373e092014-02-20 16:06:36 -0800929
Ian Rogersdd7624d2014-03-14 17:43:00 -0700930 // A non-zero value is used to tell the current thread to enter a safe point
931 // at the next poll.
932 int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700933
Ian Rogersdd7624d2014-03-14 17:43:00 -0700934 // How much of 'suspend_count_' is by request of the debugger, used to set things right
935 // when the debugger detaches. Must be <= suspend_count_.
936 int debug_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800937
Ian Rogersdd7624d2014-03-14 17:43:00 -0700938 // Thin lock thread id. This is a small integer used by the thin lock implementation.
939 // This is not to be confused with the native thread's tid, nor is it the value returned
940 // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
941 // important difference between this id and the ids visible to managed code is that these
942 // ones get reused (to ensure that they fit in the number of bits available).
943 uint32_t thin_lock_thread_id;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800944
Ian Rogersdd7624d2014-03-14 17:43:00 -0700945 // System thread id.
946 uint32_t tid;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800947
Ian Rogersdd7624d2014-03-14 17:43:00 -0700948 // Is the thread a daemon?
949 const bool32_t daemon;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800950
Ian Rogersdd7624d2014-03-14 17:43:00 -0700951 // A boolean telling us whether we're recursively throwing OOME.
952 bool32_t throwing_OutOfMemoryError;
953
954 // A positive value implies we're in a region where thread suspension isn't expected.
955 uint32_t no_thread_suspension;
956
957 // How many times has our pthread key's destructor been called?
958 uint32_t thread_exit_check_count;
Sebastien Hertz9f102032014-05-23 08:59:42 +0200959
960 // When true this field indicates that the exception associated with this thread has already
961 // been reported to instrumentation.
962 bool32_t is_exception_reported_to_instrumentation_;
Ian Rogersdd7624d2014-03-14 17:43:00 -0700963 } tls32_;
964
965 struct PACKED(8) tls_64bit_sized_values {
966 tls_64bit_sized_values() : trace_clock_base(0), deoptimization_return_value() {
967 }
968
969 // The clock base used for tracing.
970 uint64_t trace_clock_base;
971
972 // Return value used by deoptimization.
973 JValue deoptimization_return_value;
974
975 RuntimeStats stats;
976 } tls64_;
977
978 struct PACKED(4) tls_ptr_sized_values {
979 tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr),
980 managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), self(nullptr), opeer(nullptr),
981 jpeer(nullptr), stack_begin(nullptr), stack_size(0), throw_location(),
982 stack_trace_sample(nullptr), wait_next(nullptr), monitor_enter_object(nullptr),
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700983 top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
Ian Rogersdd7624d2014-03-14 17:43:00 -0700984 instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
Andreas Gampe2a0d4ec2014-06-02 22:05:22 -0700985 deoptimization_shadow_frame(nullptr), shadow_frame_under_construction(nullptr), name(nullptr),
986 pthread_self(0), last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr),
Ian Rogersdd7624d2014-03-14 17:43:00 -0700987 thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_objects(0),
988 thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr) {
989 }
990
991 // The biased card table, see CardTable for details.
992 byte* card_table;
993
994 // The pending exception or NULL.
995 mirror::Throwable* exception;
996
997 // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
998 // We leave extra space so there's room for the code that throws StackOverflowError.
999 byte* stack_end;
1000
1001 // The top of the managed stack often manipulated directly by compiler generated code.
1002 ManagedStack managed_stack;
1003
1004 // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check. It is
1005 // normally set to the address of itself.
1006 uintptr_t* suspend_trigger;
1007
1008 // Every thread may have an associated JNI environment
1009 JNIEnvExt* jni_env;
1010
1011 // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
1012 // is easy but getting the address of Thread::Current is hard. This field can be read off of
1013 // Thread::Current to give the address.
1014 Thread* self;
1015
1016 // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
1017 // start up, until the thread is registered and the local opeer_ is used.
1018 mirror::Object* opeer;
1019 jobject jpeer;
1020
1021 // The "lowest addressable byte" of the stack.
1022 byte* stack_begin;
1023
1024 // Size of the stack.
1025 size_t stack_size;
1026
1027 // The location the current exception was thrown from.
1028 ThrowLocation throw_location;
1029
1030 // Pointer to previous stack trace captured by sampling profiler.
1031 std::vector<mirror::ArtMethod*>* stack_trace_sample;
1032
1033 // The next thread in the wait set this thread is part of or NULL if not waiting.
1034 Thread* wait_next;
1035
1036 // If we're blocked in MonitorEnter, this is the object we're trying to lock.
1037 mirror::Object* monitor_enter_object;
1038
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001039 // Top of linked list of handle scopes or nullptr for none.
1040 HandleScope* top_handle_scope;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001041
1042 // Needed to get the right ClassLoader in JNI_OnLoad, but also
1043 // useful for testing.
1044 mirror::ClassLoader* class_loader_override;
1045
1046 // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
1047 Context* long_jump_context;
1048
1049 // Additional stack used by method instrumentation to store method and return pc values.
1050 // Stored as a pointer since std::deque is not PACKED.
1051 std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack;
1052
1053 // JDWP invoke-during-breakpoint support.
1054 DebugInvokeReq* debug_invoke_req;
1055
1056 // JDWP single-stepping support.
1057 SingleStepControl* single_step_control;
1058
1059 // Shadow frame stack that is used temporarily during the deoptimization of a method.
1060 ShadowFrame* deoptimization_shadow_frame;
1061
Andreas Gampe2a0d4ec2014-06-02 22:05:22 -07001062 // Shadow frame stack that is currently under construction but not yet on the stack
1063 ShadowFrame* shadow_frame_under_construction;
1064
Ian Rogersdd7624d2014-03-14 17:43:00 -07001065 // A cached copy of the java.lang.Thread's name.
1066 std::string* name;
1067
1068 // A cached pthread_t for the pthread underlying this Thread*.
1069 pthread_t pthread_self;
1070
Ian Rogersdd7624d2014-03-14 17:43:00 -07001071 // If no_thread_suspension_ is > 0, what is causing that assertion.
1072 const char* last_no_thread_suspension_cause;
1073
1074 // Pending checkpoint function or NULL if non-pending. Installation guarding by
1075 // Locks::thread_suspend_count_lock_.
1076 Closure* checkpoint_functions[kMaxCheckpoints];
1077
1078 // Entrypoint function pointers.
1079 // TODO: move this to more of a global offset table model to avoid per-thread duplication.
1080 InterpreterEntryPoints interpreter_entrypoints;
1081 JniEntryPoints jni_entrypoints;
1082 PortableEntryPoints portable_entrypoints;
1083 QuickEntryPoints quick_entrypoints;
1084
1085 // Thread-local allocation pointer.
1086 byte* thread_local_start;
1087 byte* thread_local_pos;
1088 byte* thread_local_end;
1089 size_t thread_local_objects;
1090
Mathieu Chartier0651d412014-04-29 14:37:57 -07001091 // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
1092 void* rosalloc_runs[gc::allocator::RosAlloc::kNumThreadLocalSizeBrackets];
Ian Rogersdd7624d2014-03-14 17:43:00 -07001093
1094 // Thread-local allocation stack data/routines.
1095 mirror::Object** thread_local_alloc_stack_top;
1096 mirror::Object** thread_local_alloc_stack_end;
Chao-ying Fu9e369312014-05-21 11:20:52 -07001097
1098 // Support for Mutex lock hierarchy bug detection.
1099 BaseMutex* held_mutexes[kLockLevelCount];
Ian Rogersdd7624d2014-03-14 17:43:00 -07001100 } tlsPtr_;
1101
1102 // Guards the 'interrupted_' and 'wait_monitor_' members.
1103 Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1104
1105 // Condition variable waited upon during a wait.
1106 ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
1107 // Pointer to the monitor lock we're currently waiting on or NULL if not waiting.
1108 Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
1109
1110 // Thread "interrupted" status; stays raised until queried or thrown.
1111 bool interrupted_ GUARDED_BY(wait_mutex_);
1112
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001113 friend class Dbg; // For SetStateUnsafe.
Mathieu Chartier15d34022014-02-26 17:16:38 -08001114 friend class gc::collector::SemiSpace; // For getting stack traces.
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001115 friend class Runtime; // For CreatePeer.
Ian Rogers5cf98192014-05-29 21:31:50 -07001116 friend class QuickExceptionHandler; // For dumping the stack.
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001117 friend class ScopedThreadStateChange;
1118 friend class SignalCatcher; // For SetStateUnsafe.
Mathieu Chartier119c6bd2014-05-09 14:11:47 -07001119 friend class StubTest; // For accessing entrypoints.
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001120 friend class ThreadList; // For ~Thread and Destroy.
1121
Andreas Gampe4352b452014-06-04 18:59:01 -07001122 friend class EntrypointsOrderTest; // To test the order of tls entries.
1123
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07001124 DISALLOW_COPY_AND_ASSIGN(Thread);
1125};
Ian Rogersbdb03912011-09-14 00:55:44 -07001126
Elliott Hughes330304d2011-08-12 14:28:05 -07001127std::ostream& operator<<(std::ostream& os, const Thread& thread);
Elliott Hughes34e06962012-04-09 13:55:55 -07001128std::ostream& operator<<(std::ostream& os, const ThreadState& state);
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07001129
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07001130} // namespace art
1131
Brian Carlstromfc0e3212013-07-17 14:40:12 -07001132#endif // ART_RUNTIME_THREAD_H_