blob: 76ddbb8650269a070444c939f3f762d5fbe79b3f [file] [log] [blame]
Elliott Hughes8d768a92011-09-14 16:35:25 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070016
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_THREAD_H_
18#define ART_RUNTIME_THREAD_H_
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070019
Elliott Hughes02b48d12011-09-07 17:15:51 -070020#include <bitset>
Ian Rogers306057f2012-11-26 12:45:53 -080021#include <deque>
Elliott Hughesa0957642011-09-02 14:27:33 -070022#include <iosfwd>
Ian Rogersb033c752011-07-20 12:22:35 -070023#include <list>
Ian Rogers700a4022014-05-19 16:49:03 -070024#include <memory>
Dave Allison8ce6b902014-08-26 11:07:58 -070025#include <setjmp.h>
Elliott Hughes8daa0922011-09-11 13:46:25 -070026#include <string>
Carl Shapirob5573532011-07-12 18:22:59 -070027
Mingyao Yang4dcfc432015-04-21 16:55:22 -070028#include "arch/context.h"
Ian Rogersd582fa42014-11-05 23:46:43 -080029#include "arch/instruction_set.h"
Ian Rogersb8e087e2014-07-09 21:12:06 -070030#include "atomic.h"
Andreas Gampe542451c2016-07-26 09:02:02 -070031#include "base/enums.h"
Elliott Hughes76160052012-12-12 16:31:20 -080032#include "base/macros.h"
Ian Rogers719d1a32014-03-06 12:13:39 -080033#include "base/mutex.h"
Ian Rogers848871b2013-08-05 10:56:33 -070034#include "entrypoints/jni/jni_entrypoints.h"
Ian Rogers7655f292013-07-29 11:07:13 -070035#include "entrypoints/quick/quick_entrypoints.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070036#include "globals.h"
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070037#include "handle_scope.h"
Elliott Hughes956af0f2014-12-11 14:34:28 -080038#include "instrumentation.h"
Ian Rogers306057f2012-11-26 12:45:53 -080039#include "jvalue.h"
Mathieu Chartier83c8ee02014-01-28 14:50:23 -080040#include "object_callbacks.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070041#include "offsets.h"
Elliott Hughes9d5ccec2011-09-19 13:19:50 -070042#include "runtime_stats.h"
Elliott Hughes68e76522011-10-05 13:22:16 -070043#include "stack.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080044#include "thread_state.h"
Ian Rogersb033c752011-07-20 12:22:35 -070045
Christopher Ferris6cff48f2014-01-26 21:36:13 -080046class BacktraceMap;
47
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070048namespace art {
49
Mathieu Chartier15d34022014-02-26 17:16:38 -080050namespace gc {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070051namespace accounting {
52 template<class T> class AtomicStack;
53} // namespace accounting
Mathieu Chartier15d34022014-02-26 17:16:38 -080054namespace collector {
Mingyao Yang98d1cc82014-05-15 17:02:16 -070055 class SemiSpace;
Mathieu Chartier15d34022014-02-26 17:16:38 -080056} // namespace collector
57} // namespace gc
58
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080059namespace mirror {
Ian Rogers848871b2013-08-05 10:56:33 -070060 class Array;
61 class Class;
62 class ClassLoader;
63 class Object;
64 template<class T> class ObjectArray;
65 template<class T> class PrimitiveArray;
66 typedef PrimitiveArray<int32_t> IntArray;
67 class StackTraceElement;
Vladimir Marko80afd022015-05-19 18:08:00 +010068 class String;
Ian Rogers848871b2013-08-05 10:56:33 -070069 class Throwable;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080070} // namespace mirror
Mathieu Chartier12d625f2015-03-13 11:33:37 -070071
72namespace verifier {
73class MethodVerifier;
74} // namespace verifier
75
Mathieu Chartiere401d142015-04-22 13:56:20 -070076class ArtMethod;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080077class BaseMutex;
78class ClassLinker;
Ian Rogers7a22fa62013-01-23 12:16:16 -080079class Closure;
Ian Rogersbdb03912011-09-14 00:55:44 -070080class Context;
Ian Rogers1b09b092012-08-20 15:35:52 -070081struct DebugInvokeReq;
Sebastien Hertz07474662015-08-25 15:12:33 +000082class DeoptimizationContextRecord;
Ian Rogers81d425b2012-09-27 16:03:43 -070083class DexFile;
Mingyao Yang99170c62015-07-06 11:10:37 -070084class FrameIdToShadowFrame;
Ian Rogersb48b9eb2014-02-28 16:20:21 -080085class JavaVMExt;
Ian Rogers81d425b2012-09-27 16:03:43 -070086struct JNIEnvExt;
Elliott Hughes8daa0922011-09-11 13:46:25 -070087class Monitor;
Carl Shapirob5573532011-07-12 18:22:59 -070088class Runtime;
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -070089class ScopedObjectAccessAlreadyRunnable;
Logan Chienf7ad17e2012-03-15 03:10:03 +080090class ShadowFrame;
Sebastien Hertz597c4f02015-01-26 17:37:14 +010091class SingleStepControl;
Sebastien Hertzf7958692015-06-09 14:09:14 +020092class StackedShadowFrameRecord;
Brian Carlstrom40381fb2011-10-19 14:13:40 -070093class Thread;
94class ThreadList;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070095
Elliott Hughes34e06962012-04-09 13:55:55 -070096// Thread priorities. These must match the Thread.MIN_PRIORITY,
97// Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
98enum ThreadPriority {
99 kMinThreadPriority = 1,
100 kNormThreadPriority = 5,
101 kMaxThreadPriority = 10,
102};
103
Ian Rogers474b6da2012-09-25 00:20:38 -0700104enum ThreadFlag {
Ian Rogers50ffee22012-11-20 11:47:44 -0800105 kSuspendRequest = 1, // If set implies that suspend_count_ > 0 and the Thread should enter the
106 // safepoint handler.
Yu Lieac44242015-06-29 10:50:03 +0800107 kCheckpointRequest = 2, // Request that the thread do some checkpoint work and then continue.
108 kActiveSuspendBarrier = 4 // Register that at least 1 suspend barrier needs to be passed.
Ian Rogers474b6da2012-09-25 00:20:38 -0700109};
110
Sebastien Hertzf7958692015-06-09 14:09:14 +0200111enum class StackedShadowFrameType {
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -0700112 kShadowFrameUnderConstruction,
Andreas Gampe639bdd12015-06-03 11:22:45 -0700113 kDeoptimizationShadowFrame,
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -0700114};
115
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800116// This should match RosAlloc::kNumThreadLocalSizeBrackets.
117static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16;
Ian Rogerse63db272014-07-15 15:36:11 -0700118
Dave Allison648d7112014-07-25 16:15:27 -0700119// Thread's stack layout for implicit stack overflow checks:
120//
121// +---------------------+ <- highest address of stack memory
122// | |
123// . . <- SP
124// | |
125// | |
126// +---------------------+ <- stack_end
127// | |
128// | Gap |
129// | |
130// +---------------------+ <- stack_begin
131// | |
132// | Protected region |
133// | |
134// +---------------------+ <- lowest address of stack memory
135//
136// The stack always grows down in memory. At the lowest address is a region of memory
137// that is set mprotect(PROT_NONE). Any attempt to read/write to this region will
138// result in a segmentation fault signal. At any point, the thread's SP will be somewhere
139// between the stack_end and the highest address in stack memory. An implicit stack
140// overflow check is a read of memory at a certain offset below the current SP (4K typically).
141// If the thread's SP is below the stack_end address this will be a read into the protected
142// region. If the SP is above the stack_end address, the thread is guaranteed to have
143// at least 4K of space. Because stack overflow checks are only performed in generated code,
144// if the thread makes a call out to a native function (through JNI), that native function
145// might only have 4K of memory (if the SP is adjacent to stack_end).
146
Ian Rogersdd7624d2014-03-14 17:43:00 -0700147class Thread {
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700148 public:
Andreas Gampe7ea6f792014-07-14 16:21:44 -0700149 static const size_t kStackOverflowImplicitCheckSize;
Dave Allisonf9439142014-03-27 15:10:22 -0700150
Elliott Hughes462c9442012-03-23 18:47:50 -0700151 // Creates a new native thread corresponding to the given managed peer.
152 // Used to implement Thread.start.
Ian Rogers52673ff2012-06-27 23:25:34 -0700153 static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700154
Elliott Hughes462c9442012-03-23 18:47:50 -0700155 // Attaches the calling native thread to the runtime, returning the new native peer.
156 // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
Mathieu Chartier664bebf2012-11-12 16:54:11 -0800157 static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
158 bool create_peer);
Carl Shapirob5573532011-07-12 18:22:59 -0700159
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700160 // Reset internal state of child thread after fork.
161 void InitAfterFork();
162
Ian Rogers6f3dbba2014-10-14 17:41:57 -0700163 // Get the currently executing thread, frequently referred to as 'self'. This call has reasonably
164 // high cost and so we favor passing self around when possible.
165 // TODO: mark as PURE so the compiler may coalesce and remove?
Ian Rogers02ed4c02013-09-06 13:10:04 -0700166 static Thread* Current();
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700167
Ian Rogers7b078e82014-09-10 14:44:24 -0700168 // On a runnable thread, check for pending thread suspension request and handle if pending.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700169 void AllowThreadSuspension() REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers7b078e82014-09-10 14:44:24 -0700170
171 // Process pending thread suspension request and handle if pending.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700172 void CheckSuspend() REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers7b078e82014-09-10 14:44:24 -0700173
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -0700174 static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800175 mirror::Object* thread_peer)
Mathieu Chartier90443472015-07-16 20:32:27 -0700176 REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700177 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -0700178 static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
Mathieu Chartier90443472015-07-16 20:32:27 -0700179 REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700180 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700181
Elliott Hughes28fa76d2012-04-09 17:31:46 -0700182 // Translates 172 to pAllocArrayFromCode and so on.
Andreas Gampe542451c2016-07-26 09:02:02 -0700183 template<PointerSize size_of_pointers>
Ian Rogersdd7624d2014-03-14 17:43:00 -0700184 static void DumpThreadOffset(std::ostream& os, uint32_t offset);
Elliott Hughes28fa76d2012-04-09 17:31:46 -0700185
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700186 // Dumps a one-line summary of thread state (used for operator<<).
187 void ShortDump(std::ostream& os) const;
188
189 // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
Nicolas Geoffraya73280d2016-02-15 13:05:16 +0000190 void Dump(std::ostream& os,
191 bool dump_native_stack = true,
192 BacktraceMap* backtrace_map = nullptr) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700193 REQUIRES(!Locks::thread_suspend_count_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700194 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughesa0957642011-09-02 14:27:33 -0700195
Mathieu Chartierc751fdc2014-03-30 15:25:44 -0700196 void DumpJavaStack(std::ostream& os) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700197 REQUIRES(!Locks::thread_suspend_count_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700198 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartierc751fdc2014-03-30 15:25:44 -0700199
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700200 // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700201 // case we use 'tid' to identify the thread, and we'll include as much information as we can.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700202 static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
Mathieu Chartier90443472015-07-16 20:32:27 -0700203 REQUIRES(!Locks::thread_suspend_count_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700204 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700205
Ian Rogers474b6da2012-09-25 00:20:38 -0700206 ThreadState GetState() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700207 DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
208 DCHECK_LE(tls32_.state_and_flags.as_struct.state, kSuspended);
209 return static_cast<ThreadState>(tls32_.state_and_flags.as_struct.state);
Dave Allison0aded082013-11-07 13:15:11 -0800210 }
211
Ian Rogers474b6da2012-09-25 00:20:38 -0700212 ThreadState SetState(ThreadState new_state);
Ian Rogers52673ff2012-06-27 23:25:34 -0700213
Mathieu Chartier90443472015-07-16 20:32:27 -0700214 int GetSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700215 return tls32_.suspend_count;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700216 }
Elliott Hughes038a8062011-09-18 14:12:41 -0700217
Mathieu Chartier90443472015-07-16 20:32:27 -0700218 int GetDebugSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700219 return tls32_.debug_suspend_count;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700220 }
221
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700222 bool IsSuspended() const {
Chris Dearman59cde532013-12-04 18:53:49 -0800223 union StateAndFlags state_and_flags;
Ian Rogersdd7624d2014-03-14 17:43:00 -0700224 state_and_flags.as_int = tls32_.state_and_flags.as_int;
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700225 return state_and_flags.as_struct.state != kRunnable &&
226 (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700227 }
228
Hiroshi Yamauchi02e7f1a2016-10-03 15:32:01 -0700229 // If delta > 0 and (this != self or suspend_barrier is not null), this function may temporarily
230 // release thread_suspend_count_lock_ internally.
231 ALWAYS_INLINE
232 bool ModifySuspendCount(Thread* self,
233 int delta,
234 AtomicInteger* suspend_barrier,
235 bool for_debugger)
Mathieu Chartier90443472015-07-16 20:32:27 -0700236 REQUIRES(Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700237
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700238 bool RequestCheckpoint(Closure* function)
Mathieu Chartier90443472015-07-16 20:32:27 -0700239 REQUIRES(Locks::thread_suspend_count_lock_);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700240
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800241 void SetFlipFunction(Closure* function);
242 Closure* GetFlipFunction();
243
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700244 gc::accounting::AtomicStack<mirror::Object>* GetThreadLocalMarkStack() {
245 CHECK(kUseReadBarrier);
246 return tlsPtr_.thread_local_mark_stack;
247 }
248 void SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object>* stack) {
249 CHECK(kUseReadBarrier);
250 tlsPtr_.thread_local_mark_stack = stack;
251 }
252
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700253 // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
254 // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
Ian Rogers9da7f592012-08-20 17:14:28 -0700255 void FullSuspendCheck()
Mathieu Chartier90443472015-07-16 20:32:27 -0700256 REQUIRES(!Locks::thread_suspend_count_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700257 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700258
259 // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
Mathieu Chartier8ac9c912015-10-01 15:58:41 -0700260 ALWAYS_INLINE ThreadState TransitionFromSuspendedToRunnable()
Mathieu Chartier90443472015-07-16 20:32:27 -0700261 REQUIRES(!Locks::thread_suspend_count_lock_)
Mathieu Chartier8ac9c912015-10-01 15:58:41 -0700262 SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700263
264 // Transition from runnable into a state where mutator privileges are denied. Releases share of
265 // mutator lock.
Mathieu Chartier8ac9c912015-10-01 15:58:41 -0700266 ALWAYS_INLINE void TransitionFromRunnableToSuspended(ThreadState new_state)
Mathieu Chartier4e2cb092015-07-22 16:17:51 -0700267 REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
Mathieu Chartier8ac9c912015-10-01 15:58:41 -0700268 UNLOCK_FUNCTION(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700269
Ian Rogers0399dde2012-06-06 17:09:28 -0700270 // Once called thread suspension will cause an assertion failure.
Mathieu Chartier4e2cb092015-07-22 16:17:51 -0700271 const char* StartAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) {
272 Roles::uninterruptible_.Acquire(); // No-op.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700273 if (kIsDebugBuild) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700274 CHECK(cause != nullptr);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700275 const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
276 tls32_.no_thread_suspension++;
277 tlsPtr_.last_no_thread_suspension_cause = cause;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700278 return previous_cause;
279 } else {
280 return nullptr;
281 }
Ian Rogers0399dde2012-06-06 17:09:28 -0700282 }
Ian Rogers52673ff2012-06-27 23:25:34 -0700283
Ian Rogers0399dde2012-06-06 17:09:28 -0700284 // End region where no thread suspension is expected.
Mathieu Chartier4e2cb092015-07-22 16:17:51 -0700285 void EndAssertNoThreadSuspension(const char* old_cause) RELEASE(Roles::uninterruptible_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700286 if (kIsDebugBuild) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700287 CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1);
288 CHECK_GT(tls32_.no_thread_suspension, 0U);
289 tls32_.no_thread_suspension--;
290 tlsPtr_.last_no_thread_suspension_cause = old_cause;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700291 }
Mathieu Chartier4e2cb092015-07-22 16:17:51 -0700292 Roles::uninterruptible_.Release(); // No-op.
Ian Rogers0399dde2012-06-06 17:09:28 -0700293 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700294
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700295 void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
Ian Rogers0399dde2012-06-06 17:09:28 -0700296
Mathieu Chartier10b218d2016-07-25 17:48:52 -0700297 // Return true if thread suspension is allowable.
298 bool IsThreadSuspensionAllowable() const;
299
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700300 bool IsDaemon() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700301 return tls32_.daemon;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700302 }
303
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700304 size_t NumberOfHeldMutexes() const;
305
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700306 bool HoldsLock(mirror::Object*) const REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700307
Elliott Hughes8daa0922011-09-11 13:46:25 -0700308 /*
309 * Changes the priority of this thread to match that of the java.lang.Thread object.
310 *
311 * We map a priority value from 1-10 to Linux "nice" values, where lower
312 * numbers indicate higher priority.
313 */
314 void SetNativePriority(int newPriority);
315
316 /*
317 * Returns the thread priority for the current thread by querying the system.
318 * This is useful when attaching a thread through JNI.
319 *
320 * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
321 */
322 static int GetNativePriority();
323
Mathieu Chartier61b3cd42016-04-18 11:43:29 -0700324 // Guaranteed to be non-zero.
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700325 uint32_t GetThreadId() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700326 return tls32_.thin_lock_thread_id;
Carl Shapirob5573532011-07-12 18:22:59 -0700327 }
328
Elliott Hughesd92bec42011-09-02 17:04:36 -0700329 pid_t GetTid() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700330 return tls32_.tid;
Elliott Hughesd92bec42011-09-02 17:04:36 -0700331 }
Elliott Hughese27955c2011-08-26 15:21:24 -0700332
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700333 // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -0700334 mirror::String* GetThreadName(const ScopedObjectAccessAlreadyRunnable& ts) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700335 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes899e7892012-01-24 14:57:32 -0800336
Elliott Hughesffb465f2012-03-01 18:46:05 -0800337 // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
338 // allocation, or locking.
339 void GetThreadName(std::string& name) const;
340
Elliott Hughes899e7892012-01-24 14:57:32 -0800341 // Sets the thread's name.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700342 void SetThreadName(const char* name) REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughesfc861622011-10-17 17:57:47 -0700343
Jeff Hao57dac6e2013-08-15 16:36:24 -0700344 // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
345 uint64_t GetCpuMicroTime() const;
346
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700347 mirror::Object* GetPeer() const REQUIRES_SHARED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700348 CHECK(tlsPtr_.jpeer == nullptr);
349 return tlsPtr_.opeer;
Elliott Hughes8daa0922011-09-11 13:46:25 -0700350 }
351
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700352 bool HasPeer() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700353 return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700354 }
355
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700356 RuntimeStats* GetStats() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700357 return &tls64_.stats;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700358 }
359
Elliott Hughes7dc51662012-05-16 14:48:43 -0700360 bool IsStillStarting() const;
361
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700362 bool IsExceptionPending() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700363 return tlsPtr_.exception != nullptr;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700364 }
365
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700366 mirror::Throwable* GetException() const REQUIRES_SHARED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700367 return tlsPtr_.exception;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700368 }
369
Andreas Gamped9efea62014-07-21 22:56:08 -0700370 void AssertPendingException() const;
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700371 void AssertPendingOOMException() const REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700372 void AssertNoPendingException() const;
Mathieu Chartier8d7672e2014-02-25 10:57:16 -0800373 void AssertNoPendingExceptionForNewException(const char* msg) const;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700374
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700375 void SetException(mirror::Throwable* new_exception) REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700376
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700377 void ClearException() REQUIRES_SHARED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700378 tlsPtr_.exception = nullptr;
jeffhao94d6df42012-11-26 16:02:12 -0800379 }
380
Ian Rogersbdb03912011-09-14 00:55:44 -0700381 // Find catch block and perform long jump to appropriate exception handle
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700382 NO_RETURN void QuickDeliverException() REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersbdb03912011-09-14 00:55:44 -0700383
384 Context* GetLongJumpContext();
Ian Rogers0399dde2012-06-06 17:09:28 -0700385 void ReleaseLongJumpContext(Context* context) {
Mingyao Yang4dcfc432015-04-21 16:55:22 -0700386 if (tlsPtr_.long_jump_context != nullptr) {
387 // Each QuickExceptionHandler gets a long jump context and uses
388 // it for doing the long jump, after finding catch blocks/doing deoptimization.
389 // Both finding catch blocks and deoptimization can trigger another
390 // exception such as a result of class loading. So there can be nested
391 // cases of exception handling and multiple contexts being used.
392 // ReleaseLongJumpContext tries to save the context in tlsPtr_.long_jump_context
393 // for reuse so there is no need to always allocate a new one each time when
394 // getting a context. Since we only keep one context for reuse, delete the
395 // existing one since the passed in context is yet to be used for longjump.
396 delete tlsPtr_.long_jump_context;
397 }
Ian Rogersdd7624d2014-03-14 17:43:00 -0700398 tlsPtr_.long_jump_context = context;
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700399 }
400
Andreas Gampe6ec8ebd2014-07-25 13:36:56 -0700401 // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
402 // abort the runtime iff abort_on_error is true.
Mathieu Chartiere401d142015-04-22 13:56:20 -0700403 ArtMethod* GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error = true) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700404 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers0399dde2012-06-06 17:09:28 -0700405
Nicolas Geoffray7642cfc2015-02-26 10:56:09 +0000406 // Returns whether the given exception was thrown by the current Java method being executed
407 // (Note that this includes native Java methods).
408 bool IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700409 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray7642cfc2015-02-26 10:56:09 +0000410
Mathieu Chartiere401d142015-04-22 13:56:20 -0700411 void SetTopOfStack(ArtMethod** top_method) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700412 tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700413 }
414
Jeff Hao11ffc2d2013-02-01 11:52:17 -0800415 void SetTopOfShadowStack(ShadowFrame* top) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700416 tlsPtr_.managed_stack.SetTopShadowFrame(top);
Jeff Hao11ffc2d2013-02-01 11:52:17 -0800417 }
418
Ian Rogers0399dde2012-06-06 17:09:28 -0700419 bool HasManagedStack() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700420 return (tlsPtr_.managed_stack.GetTopQuickFrame() != nullptr) ||
421 (tlsPtr_.managed_stack.GetTopShadowFrame() != nullptr);
Ian Rogersbdb03912011-09-14 00:55:44 -0700422 }
423
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700424 // If 'msg' is null, no detail message is set.
Nicolas Geoffray0aa50ce2015-03-10 11:03:29 +0000425 void ThrowNewException(const char* exception_class_descriptor, const char* msg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700426 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
Elliott Hughes5cb5ad22011-10-02 12:13:39 -0700427
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700428 // If 'msg' is null, no detail message is set. An exception must be pending, and will be
Elliott Hughesa4f94742012-05-29 16:28:38 -0700429 // used as the new exception's cause.
Nicolas Geoffray0aa50ce2015-03-10 11:03:29 +0000430 void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700431 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
Elliott Hughesa4f94742012-05-29 16:28:38 -0700432
Nicolas Geoffray0aa50ce2015-03-10 11:03:29 +0000433 void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
434 __attribute__((format(printf, 3, 4)))
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700435 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
Elliott Hughesa5b897e2011-08-16 11:33:06 -0700436
Nicolas Geoffray0aa50ce2015-03-10 11:03:29 +0000437 void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700438 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
Elliott Hughes4a2b4172011-09-20 17:08:25 -0700439
Elliott Hughes2ced6a52011-10-16 18:44:48 -0700440 // OutOfMemoryError is special, because we need to pre-allocate an instance.
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700441 // Only the GC should call this.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700442 void ThrowOutOfMemoryError(const char* msg) REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartiered8990a2015-07-23 14:11:16 -0700443 REQUIRES(!Roles::uninterruptible_);
Elliott Hughes79082e32011-08-25 12:07:32 -0700444
Elliott Hughesbe759c62011-09-08 19:38:21 -0700445 static void Startup();
Elliott Hughes038a8062011-09-18 14:12:41 -0700446 static void FinishStartup();
Elliott Hughesc1674ed2011-08-25 18:09:09 -0700447 static void Shutdown();
Carl Shapirob5573532011-07-12 18:22:59 -0700448
Ian Rogersb033c752011-07-20 12:22:35 -0700449 // JNI methods
Elliott Hughes69f5bc62011-08-24 09:26:14 -0700450 JNIEnvExt* GetJniEnv() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700451 return tlsPtr_.jni_env;
Ian Rogersb033c752011-07-20 12:22:35 -0700452 }
453
Ian Rogers408f79a2011-08-23 18:22:33 -0700454 // Convert a jobject into a Object*
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700455 mirror::Object* DecodeJObject(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi04302db2015-11-11 23:45:34 -0800456 // Checks if the weak global ref has been cleared by the GC without decoding it.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700457 bool IsJWeakCleared(jweak obj) const REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersb033c752011-07-20 12:22:35 -0700458
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700459 mirror::Object* GetMonitorEnterObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700460 return tlsPtr_.monitor_enter_object;
461 }
462
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700463 void SetMonitorEnterObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700464 tlsPtr_.monitor_enter_object = obj;
465 }
466
Elliott Hughes8daa0922011-09-11 13:46:25 -0700467 // Implements java.lang.Thread.interrupted.
Mathieu Chartier90443472015-07-16 20:32:27 -0700468 bool Interrupted() REQUIRES(!*wait_mutex_);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700469 // Implements java.lang.Thread.isInterrupted.
Mathieu Chartier90443472015-07-16 20:32:27 -0700470 bool IsInterrupted() REQUIRES(!*wait_mutex_);
471 bool IsInterruptedLocked() REQUIRES(wait_mutex_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700472 return interrupted_;
473 }
Mathieu Chartier90443472015-07-16 20:32:27 -0700474 void Interrupt(Thread* self) REQUIRES(!*wait_mutex_);
475 void SetInterruptedLocked(bool i) REQUIRES(wait_mutex_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700476 interrupted_ = i;
477 }
Mathieu Chartier90443472015-07-16 20:32:27 -0700478 void Notify() REQUIRES(!*wait_mutex_);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700479
Mathieu Chartier3f7f03c2016-09-26 11:39:52 -0700480 ALWAYS_INLINE void PoisonObjectPointers() {
481 ++poison_object_cookie_;
482 }
483
Mathieu Chartiera59d9b22016-09-26 18:13:17 -0700484 ALWAYS_INLINE static void PoisonObjectPointersIfDebug();
485
Mathieu Chartier3f7f03c2016-09-26 11:39:52 -0700486 ALWAYS_INLINE uintptr_t GetPoisonObjectCookie() const {
487 return poison_object_cookie_;
488 }
489
Ian Rogersdd7624d2014-03-14 17:43:00 -0700490 private:
Mathieu Chartier90443472015-07-16 20:32:27 -0700491 void NotifyLocked(Thread* self) REQUIRES(wait_mutex_);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700492
493 public:
494 Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
495 return wait_mutex_;
496 }
497
Mathieu Chartier90443472015-07-16 20:32:27 -0700498 ConditionVariable* GetWaitConditionVariable() const REQUIRES(wait_mutex_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700499 return wait_cond_;
500 }
501
Mathieu Chartier90443472015-07-16 20:32:27 -0700502 Monitor* GetWaitMonitor() const REQUIRES(wait_mutex_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700503 return wait_monitor_;
504 }
505
Mathieu Chartier90443472015-07-16 20:32:27 -0700506 void SetWaitMonitor(Monitor* mon) REQUIRES(wait_mutex_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700507 wait_monitor_ = mon;
508 }
509
Ian Rogersdd7624d2014-03-14 17:43:00 -0700510 // Waiter link-list support.
511 Thread* GetWaitNext() const {
512 return tlsPtr_.wait_next;
513 }
514
515 void SetWaitNext(Thread* next) {
516 tlsPtr_.wait_next = next;
517 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700518
Ian Rogers68d8b422014-07-17 11:09:10 -0700519 jobject GetClassLoaderOverride() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700520 return tlsPtr_.class_loader_override;
buzbeec143c552011-08-20 17:38:58 -0700521 }
522
Ian Rogers68d8b422014-07-17 11:09:10 -0700523 void SetClassLoaderOverride(jobject class_loader_override);
buzbeec143c552011-08-20 17:38:58 -0700524
Ian Rogersaaa20802011-09-11 21:47:37 -0700525 // Create the internal representation of a stack trace, that is more time
Sebastien Hertzee1d79a2014-02-21 15:46:30 +0100526 // and space efficient to compute than the StackTraceElement[].
527 template<bool kTransactionActive>
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -0700528 jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700529 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersaaa20802011-09-11 21:47:37 -0700530
Elliott Hughes01158d72011-09-19 19:47:10 -0700531 // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700532 // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
533 // frames as will fit are written into the given array. If stack_depth is non-null, it's updated
Elliott Hughes01158d72011-09-19 19:47:10 -0700534 // with the number of valid frames in the returned array.
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -0700535 static jobjectArray InternalStackTraceToStackTraceElementArray(
536 const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
537 jobjectArray output_array = nullptr, int* stack_depth = nullptr)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700538 REQUIRES_SHARED(Locks::mutator_lock_);
Shih-wei Liao55df06b2011-08-26 14:39:27 -0700539
Mingyao Yang99170c62015-07-06 11:10:37 -0700540 bool HasDebuggerShadowFrames() const {
541 return tlsPtr_.frame_id_to_shadow_frame != nullptr;
542 }
543
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700544 void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes410c0c82011-09-01 17:58:25 -0700545
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700546 ALWAYS_INLINE void VerifyStack() REQUIRES_SHARED(Locks::mutator_lock_);
jeffhao25045522012-03-13 19:34:37 -0700547
Elliott Hughesbe759c62011-09-08 19:38:21 -0700548 //
549 // Offsets of various members of native Thread class, used by compiled code.
550 //
551
Andreas Gampe542451c2016-07-26 09:02:02 -0700552 template<PointerSize pointer_size>
Ian Rogersdd7624d2014-03-14 17:43:00 -0700553 static ThreadOffset<pointer_size> ThinLockIdOffset() {
554 return ThreadOffset<pointer_size>(
555 OFFSETOF_MEMBER(Thread, tls32_) +
556 OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700557 }
558
Andreas Gampe542451c2016-07-26 09:02:02 -0700559 template<PointerSize pointer_size>
Ian Rogersdd7624d2014-03-14 17:43:00 -0700560 static ThreadOffset<pointer_size> ThreadFlagsOffset() {
561 return ThreadOffset<pointer_size>(
562 OFFSETOF_MEMBER(Thread, tls32_) +
563 OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700564 }
565
Andreas Gampe542451c2016-07-26 09:02:02 -0700566 template<PointerSize pointer_size>
Roland Levillain7c1559a2015-12-15 10:55:36 +0000567 static ThreadOffset<pointer_size> IsGcMarkingOffset() {
568 return ThreadOffset<pointer_size>(
569 OFFSETOF_MEMBER(Thread, tls32_) +
570 OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
571 }
572
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000573 // Deoptimize the Java stack.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700574 void DeoptimizeWithDeoptimizationException(JValue* result) REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000575
Ian Rogersdd7624d2014-03-14 17:43:00 -0700576 private:
Andreas Gampe542451c2016-07-26 09:02:02 -0700577 template<PointerSize pointer_size>
Ian Rogersdd7624d2014-03-14 17:43:00 -0700578 static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
579 size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
580 size_t scale;
581 size_t shrink;
Andreas Gampe542451c2016-07-26 09:02:02 -0700582 if (pointer_size == kRuntimePointerSize) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700583 scale = 1;
584 shrink = 1;
Andreas Gampe542451c2016-07-26 09:02:02 -0700585 } else if (pointer_size > kRuntimePointerSize) {
586 scale = static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700587 shrink = 1;
588 } else {
Andreas Gampe542451c2016-07-26 09:02:02 -0700589 DCHECK_GT(kRuntimePointerSize, pointer_size);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700590 scale = 1;
Andreas Gampe542451c2016-07-26 09:02:02 -0700591 shrink = static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700592 }
593 return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
Ian Rogers07ec8e12012-12-01 01:26:51 -0800594 }
595
Ian Rogersdd7624d2014-03-14 17:43:00 -0700596 public:
Jeff Hao848f70a2014-01-15 13:49:50 -0800597 static uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
Andreas Gampe542451c2016-07-26 09:02:02 -0700598 PointerSize pointer_size) {
599 if (pointer_size == PointerSize::k32) {
600 return QuickEntryPointOffset<PointerSize::k32>(quick_entrypoint_offset).
601 Uint32Value();
Jeff Hao848f70a2014-01-15 13:49:50 -0800602 } else {
Andreas Gampe542451c2016-07-26 09:02:02 -0700603 return QuickEntryPointOffset<PointerSize::k64>(quick_entrypoint_offset).
604 Uint32Value();
Jeff Hao848f70a2014-01-15 13:49:50 -0800605 }
606 }
607
Andreas Gampe542451c2016-07-26 09:02:02 -0700608 template<PointerSize pointer_size>
Ian Rogersdd7624d2014-03-14 17:43:00 -0700609 static ThreadOffset<pointer_size> QuickEntryPointOffset(size_t quick_entrypoint_offset) {
610 return ThreadOffsetFromTlsPtr<pointer_size>(
611 OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
Elliott Hughesbe759c62011-09-08 19:38:21 -0700612 }
613
Andreas Gampe542451c2016-07-26 09:02:02 -0700614 template<PointerSize pointer_size>
Ian Rogersdd7624d2014-03-14 17:43:00 -0700615 static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
616 return ThreadOffsetFromTlsPtr<pointer_size>(
617 OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
Elliott Hughesbe759c62011-09-08 19:38:21 -0700618 }
619
Andreas Gampe542451c2016-07-26 09:02:02 -0700620 template<PointerSize pointer_size>
Ian Rogersdd7624d2014-03-14 17:43:00 -0700621 static ThreadOffset<pointer_size> SelfOffset() {
622 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
623 }
624
Andreas Gampe542451c2016-07-26 09:02:02 -0700625 template<PointerSize pointer_size>
buzbee1452bee2015-03-06 14:43:04 -0800626 static ThreadOffset<pointer_size> MterpCurrentIBaseOffset() {
627 return ThreadOffsetFromTlsPtr<pointer_size>(
628 OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_current_ibase));
629 }
630
Andreas Gampe542451c2016-07-26 09:02:02 -0700631 template<PointerSize pointer_size>
buzbee1452bee2015-03-06 14:43:04 -0800632 static ThreadOffset<pointer_size> MterpDefaultIBaseOffset() {
633 return ThreadOffsetFromTlsPtr<pointer_size>(
634 OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_default_ibase));
635 }
636
Andreas Gampe542451c2016-07-26 09:02:02 -0700637 template<PointerSize pointer_size>
buzbee1452bee2015-03-06 14:43:04 -0800638 static ThreadOffset<pointer_size> MterpAltIBaseOffset() {
639 return ThreadOffsetFromTlsPtr<pointer_size>(
640 OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_alt_ibase));
641 }
642
Andreas Gampe542451c2016-07-26 09:02:02 -0700643 template<PointerSize pointer_size>
Ian Rogersdd7624d2014-03-14 17:43:00 -0700644 static ThreadOffset<pointer_size> ExceptionOffset() {
645 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
646 }
647
Andreas Gampe542451c2016-07-26 09:02:02 -0700648 template<PointerSize pointer_size>
Ian Rogersdd7624d2014-03-14 17:43:00 -0700649 static ThreadOffset<pointer_size> PeerOffset() {
650 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
651 }
652
653
Andreas Gampe542451c2016-07-26 09:02:02 -0700654 template<PointerSize pointer_size>
Ian Rogersdd7624d2014-03-14 17:43:00 -0700655 static ThreadOffset<pointer_size> CardTableOffset() {
656 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
657 }
658
Andreas Gampe542451c2016-07-26 09:02:02 -0700659 template<PointerSize pointer_size>
Ian Rogersdd7624d2014-03-14 17:43:00 -0700660 static ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
661 return ThreadOffsetFromTlsPtr<pointer_size>(
662 OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
Dave Allisonb373e092014-02-20 16:06:36 -0800663 }
664
Andreas Gampe542451c2016-07-26 09:02:02 -0700665 template<PointerSize pointer_size>
Hiroshi Yamauchie01a5202015-03-19 12:35:04 -0700666 static ThreadOffset<pointer_size> ThreadLocalPosOffset() {
Andreas Gampe542451c2016-07-26 09:02:02 -0700667 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
668 thread_local_pos));
Hiroshi Yamauchie01a5202015-03-19 12:35:04 -0700669 }
670
Andreas Gampe542451c2016-07-26 09:02:02 -0700671 template<PointerSize pointer_size>
Hiroshi Yamauchie01a5202015-03-19 12:35:04 -0700672 static ThreadOffset<pointer_size> ThreadLocalEndOffset() {
Andreas Gampe542451c2016-07-26 09:02:02 -0700673 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
674 thread_local_end));
Hiroshi Yamauchie01a5202015-03-19 12:35:04 -0700675 }
676
Andreas Gampe542451c2016-07-26 09:02:02 -0700677 template<PointerSize pointer_size>
Hiroshi Yamauchie01a5202015-03-19 12:35:04 -0700678 static ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
Andreas Gampe542451c2016-07-26 09:02:02 -0700679 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
680 thread_local_objects));
Hiroshi Yamauchie01a5202015-03-19 12:35:04 -0700681 }
682
Andreas Gampe542451c2016-07-26 09:02:02 -0700683 template<PointerSize pointer_size>
Hiroshi Yamauchidc412b62015-10-15 12:26:57 -0700684 static ThreadOffset<pointer_size> RosAllocRunsOffset() {
685 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
686 rosalloc_runs));
687 }
688
Andreas Gampe542451c2016-07-26 09:02:02 -0700689 template<PointerSize pointer_size>
Hiroshi Yamauchidc412b62015-10-15 12:26:57 -0700690 static ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
691 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
692 thread_local_alloc_stack_top));
693 }
694
Andreas Gampe542451c2016-07-26 09:02:02 -0700695 template<PointerSize pointer_size>
Hiroshi Yamauchidc412b62015-10-15 12:26:57 -0700696 static ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
697 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
698 thread_local_alloc_stack_end));
699 }
700
Ian Rogers932746a2011-09-22 18:57:50 -0700701 // Size of stack less any space reserved for stack overflow
jeffhaod7521322012-11-21 15:38:24 -0800702 size_t GetStackSize() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700703 return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
Ian Rogers932746a2011-09-22 18:57:50 -0700704 }
705
Ian Rogers13735952014-10-08 12:43:28 -0700706 uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const {
Nicolas Geoffray535a3fb2014-07-22 15:17:38 +0100707 if (implicit_overflow_check) {
708 // The interpreter needs the extra overflow bytes that stack_end does
709 // not include.
710 return tlsPtr_.stack_end + GetStackOverflowReservedBytes(kRuntimeISA);
711 } else {
712 return tlsPtr_.stack_end;
713 }
714 }
715
Ian Rogers13735952014-10-08 12:43:28 -0700716 uint8_t* GetStackEnd() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700717 return tlsPtr_.stack_end;
jeffhaod7521322012-11-21 15:38:24 -0800718 }
719
Ian Rogers932746a2011-09-22 18:57:50 -0700720 // Set the stack end to that to be used during a stack overflow
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700721 void SetStackEndForStackOverflow() REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers932746a2011-09-22 18:57:50 -0700722
723 // Set the stack end to that to be used during regular execution
Dave Allisonb090a182014-08-14 17:02:48 +0000724 void ResetDefaultStackEnd() {
Ian Rogers932746a2011-09-22 18:57:50 -0700725 // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
726 // to throw a StackOverflowError.
Dave Allisonb090a182014-08-14 17:02:48 +0000727 tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA);
Ian Rogers932746a2011-09-22 18:57:50 -0700728 }
729
Dave Allisonf9439142014-03-27 15:10:22 -0700730 // Install the protected region for implicit stack checks.
Dave Allison648d7112014-07-25 16:15:27 -0700731 void InstallImplicitProtection();
Dave Allisonf9439142014-03-27 15:10:22 -0700732
Ian Rogers120f1c72012-09-28 17:17:10 -0700733 bool IsHandlingStackOverflow() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700734 return tlsPtr_.stack_end == tlsPtr_.stack_begin;
Ian Rogers120f1c72012-09-28 17:17:10 -0700735 }
736
Andreas Gampe542451c2016-07-26 09:02:02 -0700737 template<PointerSize pointer_size>
Ian Rogersdd7624d2014-03-14 17:43:00 -0700738 static ThreadOffset<pointer_size> StackEndOffset() {
739 return ThreadOffsetFromTlsPtr<pointer_size>(
740 OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700741 }
742
Andreas Gampe542451c2016-07-26 09:02:02 -0700743 template<PointerSize pointer_size>
Ian Rogersdd7624d2014-03-14 17:43:00 -0700744 static ThreadOffset<pointer_size> JniEnvOffset() {
745 return ThreadOffsetFromTlsPtr<pointer_size>(
746 OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700747 }
748
Andreas Gampe542451c2016-07-26 09:02:02 -0700749 template<PointerSize pointer_size>
Ian Rogersdd7624d2014-03-14 17:43:00 -0700750 static ThreadOffset<pointer_size> TopOfManagedStackOffset() {
751 return ThreadOffsetFromTlsPtr<pointer_size>(
752 OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
753 ManagedStack::TopQuickFrameOffset());
Elliott Hughesbe759c62011-09-08 19:38:21 -0700754 }
755
Ian Rogers0399dde2012-06-06 17:09:28 -0700756 const ManagedStack* GetManagedStack() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700757 return &tlsPtr_.managed_stack;
Ian Rogers0399dde2012-06-06 17:09:28 -0700758 }
759
760 // Linked list recording fragments of managed stack.
761 void PushManagedStackFragment(ManagedStack* fragment) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700762 tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
Ian Rogers0399dde2012-06-06 17:09:28 -0700763 }
764 void PopManagedStackFragment(const ManagedStack& fragment) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700765 tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
Ian Rogers0399dde2012-06-06 17:09:28 -0700766 }
767
768 ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700769 return tlsPtr_.managed_stack.PushShadowFrame(new_top_frame);
TDYa127de479be2012-05-31 08:03:26 -0700770 }
771
772 ShadowFrame* PopShadowFrame() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700773 return tlsPtr_.managed_stack.PopShadowFrame();
TDYa127de479be2012-05-31 08:03:26 -0700774 }
Logan Chienf7ad17e2012-03-15 03:10:03 +0800775
Andreas Gampe542451c2016-07-26 09:02:02 -0700776 template<PointerSize pointer_size>
Ian Rogersdd7624d2014-03-14 17:43:00 -0700777 static ThreadOffset<pointer_size> TopShadowFrameOffset() {
778 return ThreadOffsetFromTlsPtr<pointer_size>(
779 OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
780 ManagedStack::TopShadowFrameOffset());
TDYa127d668a062012-04-13 12:36:57 -0700781 }
782
Ian Rogersef7d42f2014-01-06 12:55:46 -0800783 // Number of references allocated in JNI ShadowFrames on this thread.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700784 size_t NumJniShadowFrameReferences() const REQUIRES_SHARED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700785 return tlsPtr_.managed_stack.NumJniShadowFrameReferences();
Ian Rogers0399dde2012-06-06 17:09:28 -0700786 }
787
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700788 // Number of references in handle scope on this thread.
789 size_t NumHandleReferences();
Ian Rogers0399dde2012-06-06 17:09:28 -0700790
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700791 // Number of references allocated in handle scopes & JNI shadow frames on this thread.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700792 size_t NumStackReferences() REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700793 return NumHandleReferences() + NumJniShadowFrameReferences();
Andreas Gampec8ccf682014-09-29 20:07:43 -0700794 }
Ian Rogers0399dde2012-06-06 17:09:28 -0700795
796 // Is the given obj in this thread's stack indirect reference table?
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700797 bool HandleScopeContains(jobject obj) const;
Ian Rogers0399dde2012-06-06 17:09:28 -0700798
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700799 void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700800 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers0399dde2012-06-06 17:09:28 -0700801
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700802 HandleScope* GetTopHandleScope() {
803 return tlsPtr_.top_handle_scope;
Ian Rogers1f539342012-10-03 21:09:42 -0700804 }
805
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700806 void PushHandleScope(HandleScope* handle_scope) {
Ian Rogers59c07062014-10-10 13:03:39 -0700807 DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700808 tlsPtr_.top_handle_scope = handle_scope;
809 }
810
811 HandleScope* PopHandleScope() {
812 HandleScope* handle_scope = tlsPtr_.top_handle_scope;
813 DCHECK(handle_scope != nullptr);
814 tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
815 return handle_scope;
Ian Rogers1f539342012-10-03 21:09:42 -0700816 }
Brian Carlstrom40381fb2011-10-19 14:13:40 -0700817
Andreas Gampe542451c2016-07-26 09:02:02 -0700818 template<PointerSize pointer_size>
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700819 static ThreadOffset<pointer_size> TopHandleScopeOffset() {
820 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
821 top_handle_scope));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700822 }
823
Ian Rogersdd7624d2014-03-14 17:43:00 -0700824 DebugInvokeReq* GetInvokeReq() const {
825 return tlsPtr_.debug_invoke_req;
Elliott Hughes475fc232011-10-25 15:00:35 -0700826 }
827
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +0100828 SingleStepControl* GetSingleStepControl() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700829 return tlsPtr_.single_step_control;
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +0100830 }
831
Sebastien Hertz1558b572015-02-25 15:05:59 +0100832 // Indicates whether this thread is ready to invoke a method for debugging. This
833 // is only true if the thread has been suspended by a debug event.
834 bool IsReadyForDebugInvoke() const {
835 return tls32_.ready_for_debug_invoke;
836 }
837
838 void SetReadyForDebugInvoke(bool ready) {
839 tls32_.ready_for_debug_invoke = ready;
840 }
841
Sebastien Hertz9d6bf692015-04-10 12:12:33 +0200842 bool IsDebugMethodEntry() const {
843 return tls32_.debug_method_entry_;
844 }
845
846 void SetDebugMethodEntry() {
847 tls32_.debug_method_entry_ = true;
848 }
849
850 void ClearDebugMethodEntry() {
851 tls32_.debug_method_entry_ = false;
852 }
853
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700854 bool GetIsGcMarking() const {
855 CHECK(kUseReadBarrier);
856 return tls32_.is_gc_marking;
857 }
858
859 void SetIsGcMarking(bool is_marking) {
860 CHECK(kUseReadBarrier);
861 tls32_.is_gc_marking = is_marking;
862 }
863
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700864 bool GetWeakRefAccessEnabled() const {
865 CHECK(kUseReadBarrier);
866 return tls32_.weak_ref_access_enabled;
867 }
868
869 void SetWeakRefAccessEnabled(bool enabled) {
870 CHECK(kUseReadBarrier);
871 tls32_.weak_ref_access_enabled = enabled;
872 }
873
Hiroshi Yamauchi20a0be02016-02-19 15:44:06 -0800874 uint32_t GetDisableThreadFlipCount() const {
875 CHECK(kUseReadBarrier);
876 return tls32_.disable_thread_flip_count;
877 }
878
879 void IncrementDisableThreadFlipCount() {
880 CHECK(kUseReadBarrier);
881 ++tls32_.disable_thread_flip_count;
882 }
883
884 void DecrementDisableThreadFlipCount() {
885 CHECK(kUseReadBarrier);
886 DCHECK_GT(tls32_.disable_thread_flip_count, 0U);
887 --tls32_.disable_thread_flip_count;
888 }
889
Sebastien Hertz597c4f02015-01-26 17:37:14 +0100890 // Activates single step control for debugging. The thread takes the
891 // ownership of the given SingleStepControl*. It is deleted by a call
892 // to DeactivateSingleStepControl or upon thread destruction.
893 void ActivateSingleStepControl(SingleStepControl* ssc);
894
895 // Deactivates single step control for debugging.
896 void DeactivateSingleStepControl();
897
Sebastien Hertz1558b572015-02-25 15:05:59 +0100898 // Sets debug invoke request for debugging. When the thread is resumed,
Sebastien Hertzcbc50642015-06-01 17:33:12 +0200899 // it executes the method described by this request then sends the reply
900 // before suspending itself. The thread takes the ownership of the given
901 // DebugInvokeReq*. It is deleted by a call to ClearDebugInvokeReq.
Sebastien Hertz1558b572015-02-25 15:05:59 +0100902 void SetDebugInvokeReq(DebugInvokeReq* req);
903
904 // Clears debug invoke request for debugging. When the thread completes
Sebastien Hertzcbc50642015-06-01 17:33:12 +0200905 // method invocation, it deletes its debug invoke request and suspends
906 // itself.
Sebastien Hertz1558b572015-02-25 15:05:59 +0100907 void ClearDebugInvokeReq();
Sebastien Hertz597c4f02015-01-26 17:37:14 +0100908
Sebastien Hertzfd3077e2014-04-23 10:32:43 +0200909 // Returns the fake exception used to activate deoptimization.
910 static mirror::Throwable* GetDeoptimizationException() {
911 return reinterpret_cast<mirror::Throwable*>(-1);
912 }
913
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -0700914 // Currently deoptimization invokes verifier which can trigger class loading
915 // and execute Java code, so there might be nested deoptimizations happening.
916 // We need to save the ongoing deoptimization shadow frames and return
917 // values on stacks.
Nicolas Geoffray73be1e82015-09-17 15:22:56 +0100918 // 'from_code' denotes whether the deoptimization was explicitly made from
919 // compiled code.
920 void PushDeoptimizationContext(const JValue& return_value,
921 bool is_reference,
922 bool from_code,
Sebastien Hertz07474662015-08-25 15:12:33 +0000923 mirror::Throwable* exception)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700924 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray73be1e82015-09-17 15:22:56 +0100925 void PopDeoptimizationContext(JValue* result, mirror::Throwable** exception, bool* from_code)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700926 REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertz07474662015-08-25 15:12:33 +0000927 void AssertHasDeoptimizationContext()
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700928 REQUIRES_SHARED(Locks::mutator_lock_);
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -0700929 void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type);
Andreas Gampe639bdd12015-06-03 11:22:45 -0700930 ShadowFrame* PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present = true);
Andreas Gampe2a0d4ec2014-06-02 22:05:22 -0700931
Mingyao Yang99170c62015-07-06 11:10:37 -0700932 // For debugger, find the shadow frame that corresponds to a frame id.
933 // Or return null if there is none.
934 ShadowFrame* FindDebuggerShadowFrame(size_t frame_id)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700935 REQUIRES_SHARED(Locks::mutator_lock_);
Mingyao Yang99170c62015-07-06 11:10:37 -0700936 // For debugger, find the bool array that keeps track of the updated vreg set
937 // for a frame id.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700938 bool* GetUpdatedVRegFlags(size_t frame_id) REQUIRES_SHARED(Locks::mutator_lock_);
Mingyao Yang99170c62015-07-06 11:10:37 -0700939 // For debugger, find the shadow frame that corresponds to a frame id. If
940 // one doesn't exist yet, create one and track it in frame_id_to_shadow_frame.
941 ShadowFrame* FindOrCreateDebuggerShadowFrame(size_t frame_id,
942 uint32_t num_vregs,
943 ArtMethod* method,
944 uint32_t dex_pc)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700945 REQUIRES_SHARED(Locks::mutator_lock_);
Mingyao Yang99170c62015-07-06 11:10:37 -0700946
947 // Delete the entry that maps from frame_id to shadow_frame.
948 void RemoveDebuggerShadowFrameMapping(size_t frame_id)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700949 REQUIRES_SHARED(Locks::mutator_lock_);
Mingyao Yang99170c62015-07-06 11:10:37 -0700950
Ian Rogers62d6c772013-02-27 08:32:07 -0800951 std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700952 return tlsPtr_.instrumentation_stack;
jeffhaoe343b762011-12-05 16:36:44 -0800953 }
954
Mathieu Chartiere401d142015-04-22 13:56:20 -0700955 std::vector<ArtMethod*>* GetStackTraceSample() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700956 return tlsPtr_.stack_trace_sample;
Jeff Hao5ce4b172013-08-16 16:27:18 -0700957 }
958
Mathieu Chartiere401d142015-04-22 13:56:20 -0700959 void SetStackTraceSample(std::vector<ArtMethod*>* sample) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700960 tlsPtr_.stack_trace_sample = sample;
Jeff Hao5ce4b172013-08-16 16:27:18 -0700961 }
962
963 uint64_t GetTraceClockBase() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700964 return tls64_.trace_clock_base;
Jeff Hao5ce4b172013-08-16 16:27:18 -0700965 }
966
967 void SetTraceClockBase(uint64_t clock_base) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700968 tls64_.trace_clock_base = clock_base;
Jeff Hao5ce4b172013-08-16 16:27:18 -0700969 }
970
Ian Rogers81d425b2012-09-27 16:03:43 -0700971 BaseMutex* GetHeldMutex(LockLevel level) const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700972 return tlsPtr_.held_mutexes[level];
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700973 }
974
Ian Rogers81d425b2012-09-27 16:03:43 -0700975 void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700976 tlsPtr_.held_mutexes[level] = mutex;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700977 }
Elliott Hughesffb465f2012-03-01 18:46:05 -0800978
Mathieu Chartier752a0e62013-06-27 11:03:27 -0700979 void RunCheckpointFunction();
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700980
Yu Lieac44242015-06-29 10:50:03 +0800981 bool PassActiveSuspendBarriers(Thread* self)
Mathieu Chartier90443472015-07-16 20:32:27 -0700982 REQUIRES(!Locks::thread_suspend_count_lock_);
Yu Lieac44242015-06-29 10:50:03 +0800983
984 void ClearSuspendBarrier(AtomicInteger* target)
Mathieu Chartier90443472015-07-16 20:32:27 -0700985 REQUIRES(Locks::thread_suspend_count_lock_);
Yu Lieac44242015-06-29 10:50:03 +0800986
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700987 bool ReadFlag(ThreadFlag flag) const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700988 return (tls32_.state_and_flags.as_struct.flags & flag) != 0;
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700989 }
990
Jeff Hao9cec2472013-05-14 18:17:06 -0700991 bool TestAllFlags() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700992 return (tls32_.state_and_flags.as_struct.flags != 0);
Jeff Hao9cec2472013-05-14 18:17:06 -0700993 }
994
Ian Rogers8c1b5f72014-07-09 22:02:36 -0700995 void AtomicSetFlag(ThreadFlag flag) {
996 tls32_.state_and_flags.as_atomic_int.FetchAndOrSequentiallyConsistent(flag);
997 }
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700998
Ian Rogers8c1b5f72014-07-09 22:02:36 -0700999 void AtomicClearFlag(ThreadFlag flag) {
1000 tls32_.state_and_flags.as_atomic_int.FetchAndAndSequentiallyConsistent(-1 ^ flag);
1001 }
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001002
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -07001003 void ResetQuickAllocEntryPointsForThread();
1004
Ian Rogersdd7624d2014-03-14 17:43:00 -07001005 // Returns the remaining space in the TLAB.
1006 size_t TlabSize() const;
1007 // Doesn't check that there is room.
1008 mirror::Object* AllocTlab(size_t bytes);
Ian Rogers13735952014-10-08 12:43:28 -07001009 void SetTlab(uint8_t* start, uint8_t* end);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001010 bool HasTlab() const;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001011 uint8_t* GetTlabStart() {
1012 return tlsPtr_.thread_local_start;
1013 }
1014 uint8_t* GetTlabPos() {
1015 return tlsPtr_.thread_local_pos;
1016 }
Elliott Hughes5d96a712012-06-28 12:24:27 -07001017
Ian Rogersdd7624d2014-03-14 17:43:00 -07001018 // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
1019 // equal to a valid pointer.
1020 // TODO: does this need to atomic? I don't think so.
1021 void RemoveSuspendTrigger() {
1022 tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
1023 }
1024
1025 // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
1026 // The next time a suspend check is done, it will load from the value at this address
1027 // and trigger a SIGSEGV.
1028 void TriggerSuspend() {
1029 tlsPtr_.suspend_trigger = nullptr;
1030 }
1031
1032
1033 // Push an object onto the allocation stack.
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001034 bool PushOnThreadLocalAllocationStack(mirror::Object* obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001035 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001036
1037 // Set the thread local allocation pointers to the given pointers.
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001038 void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
1039 StackReference<mirror::Object>* end);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001040
1041 // Resets the thread local allocation pointers.
1042 void RevokeThreadLocalAllocationStack();
1043
1044 size_t GetThreadLocalBytesAllocated() const {
Mathieu Chartier14cc9be2014-07-11 10:26:37 -07001045 return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001046 }
1047
1048 size_t GetThreadLocalObjectsAllocated() const {
1049 return tlsPtr_.thread_local_objects;
1050 }
1051
Ian Rogersdd7624d2014-03-14 17:43:00 -07001052 void* GetRosAllocRun(size_t index) const {
1053 return tlsPtr_.rosalloc_runs[index];
1054 }
1055
1056 void SetRosAllocRun(size_t index, void* run) {
1057 tlsPtr_.rosalloc_runs[index] = run;
1058 }
1059
Andreas Gampe2c2d2a02016-03-17 21:27:19 -07001060 bool ProtectStack(bool fatal_on_error = true);
Dave Allison648d7112014-07-25 16:15:27 -07001061 bool UnprotectStack();
1062
buzbee1452bee2015-03-06 14:43:04 -08001063 void SetMterpDefaultIBase(void* ibase) {
1064 tlsPtr_.mterp_default_ibase = ibase;
1065 }
1066
1067 void SetMterpCurrentIBase(void* ibase) {
1068 tlsPtr_.mterp_current_ibase = ibase;
1069 }
1070
1071 void SetMterpAltIBase(void* ibase) {
1072 tlsPtr_.mterp_alt_ibase = ibase;
1073 }
1074
1075 const void* GetMterpDefaultIBase() const {
1076 return tlsPtr_.mterp_default_ibase;
1077 }
1078
1079 const void* GetMterpCurrentIBase() const {
1080 return tlsPtr_.mterp_current_ibase;
1081 }
1082
1083 const void* GetMterpAltIBase() const {
1084 return tlsPtr_.mterp_alt_ibase;
1085 }
1086
Nicolas Geoffraye8e11272016-06-28 18:08:46 +01001087 // Notify that a signal is being handled. This is to protect us from doing recursive
1088 // NPE handling after a SIGSEGV.
Dave Allison648d7112014-07-25 16:15:27 -07001089 void NoteSignalBeingHandled() {
1090 if (tls32_.handling_signal_) {
1091 LOG(FATAL) << "Detected signal while processing a signal";
1092 }
1093 tls32_.handling_signal_ = true;
1094 }
1095
1096 void NoteSignalHandlerDone() {
1097 tls32_.handling_signal_ = false;
1098 }
1099
Dave Allison8ce6b902014-08-26 11:07:58 -07001100 jmp_buf* GetNestedSignalState() {
1101 return tlsPtr_.nested_signal_state;
1102 }
1103
Hiroshi Yamauchiee235822016-08-19 17:03:27 -07001104 bool IsTransitioningToRunnable() const {
1105 return tls32_.is_transitioning_to_runnable;
1106 }
1107
1108 void SetIsTransitioningToRunnable(bool value) {
1109 tls32_.is_transitioning_to_runnable = value;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001110 }
1111
Mathieu Chartierd0ad2ee2015-03-31 14:59:59 -07001112 void PushVerifier(verifier::MethodVerifier* verifier);
1113 void PopVerifier(verifier::MethodVerifier* verifier);
Mathieu Chartier12d625f2015-03-13 11:33:37 -07001114
Jeff Hao848f70a2014-01-15 13:49:50 -08001115 void InitStringEntryPoints();
1116
Mathieu Chartierdfe02f62016-02-01 20:15:11 -08001117 void ModifyDebugDisallowReadBarrier(int8_t delta) {
1118 debug_disallow_read_barrier_ += delta;
1119 }
1120
1121 uint8_t GetDebugDisallowReadBarrierCount() const {
1122 return debug_disallow_read_barrier_;
1123 }
1124
Calin Juravle97cbc922016-04-15 16:16:35 +01001125 // Returns true if the current thread is the jit sensitive thread.
1126 bool IsJitSensitiveThread() const {
1127 return this == jit_sensitive_thread_;
1128 }
1129
1130 // Returns true if StrictMode events are traced for the current thread.
Calin Juravleb2771b42016-04-07 17:09:25 +01001131 static bool IsSensitiveThread() {
1132 if (is_sensitive_thread_hook_ != nullptr) {
1133 return (*is_sensitive_thread_hook_)();
1134 }
1135 return false;
1136 }
1137
Ian Rogersdd7624d2014-03-14 17:43:00 -07001138 private:
Ian Rogers52673ff2012-06-27 23:25:34 -07001139 explicit Thread(bool daemon);
Mathieu Chartier90443472015-07-16 20:32:27 -07001140 ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
Elliott Hughesc0f09332012-03-26 13:27:06 -07001141 void Destroy();
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07001142
Ian Rogers365c1022012-06-22 15:05:28 -07001143 void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
Elliott Hughes5fe594f2011-09-08 12:33:17 -07001144
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +01001145 template<bool kTransactionActive>
1146 void InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
1147 jobject thread_name, jint thread_priority)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001148 REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +01001149
Ian Rogers62d6c772013-02-27 08:32:07 -08001150 // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and
Hiroshi Yamauchi98810e32016-05-24 14:55:40 -07001151 // Dbg::ManageDeoptimization.
Ian Rogers474b6da2012-09-25 00:20:38 -07001152 ThreadState SetStateUnsafe(ThreadState new_state) {
1153 ThreadState old_state = GetState();
Mathieu Chartier8ac9c912015-10-01 15:58:41 -07001154 if (old_state == kRunnable && new_state != kRunnable) {
1155 // Need to run pending checkpoint and suspend barriers. Run checkpoints in runnable state in
1156 // case they need to use a ScopedObjectAccess. If we are holding the mutator lock and a SOA
1157 // attempts to TransitionFromSuspendedToRunnable, it results in a deadlock.
1158 TransitionToSuspendedAndRunCheckpoints(new_state);
1159 // Since we transitioned to a suspended state, check the pass barrier requests.
1160 PassActiveSuspendBarriers();
1161 } else {
1162 tls32_.state_and_flags.as_struct.state = new_state;
Yu Lieac44242015-06-29 10:50:03 +08001163 }
Ian Rogersc747cff2012-08-31 18:20:08 -07001164 return old_state;
1165 }
Ian Rogersc747cff2012-08-31 18:20:08 -07001166
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001167 void VerifyStackImpl() REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers04d7aa92013-03-16 14:29:17 -07001168
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001169 void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffraya73280d2016-02-15 13:05:16 +00001170 void DumpStack(std::ostream& os,
1171 bool dump_native_stack = true,
1172 BacktraceMap* backtrace_map = nullptr) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001173 REQUIRES(!Locks::thread_suspend_count_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001174 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughesd92bec42011-09-02 17:04:36 -07001175
Elliott Hughesaccd83d2011-10-17 14:25:58 -07001176 // Out-of-line conveniences for debugging in gdb.
Brian Carlstrom7934ac22013-07-26 10:54:15 -07001177 static Thread* CurrentFromGdb(); // Like Thread::Current.
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001178 // Like Thread::Dump(std::cerr).
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001179 void DumpFromGdb() const REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughesaccd83d2011-10-17 14:25:58 -07001180
Elliott Hughes93e74e82011-09-13 11:07:03 -07001181 static void* CreateCallback(void* arg);
1182
Ian Rogerscfaa4552012-11-26 21:00:08 -08001183 void HandleUncaughtExceptions(ScopedObjectAccess& soa)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001184 REQUIRES_SHARED(Locks::mutator_lock_);
1185 void RemoveFromThreadGroup(ScopedObjectAccess& soa) REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughesaccd83d2011-10-17 14:25:58 -07001186
Andreas Gampe449357d2015-06-01 22:29:51 -07001187 // Initialize a thread.
1188 //
1189 // The third parameter is not mandatory. If given, the thread will use this JNIEnvExt. In case
1190 // Init succeeds, this means the thread takes ownership of it. If Init fails, it is the caller's
1191 // responsibility to destroy the given JNIEnvExt. If the parameter is null, Init will try to
1192 // create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value
1193 // of false).
1194 bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr)
Mathieu Chartier90443472015-07-16 20:32:27 -07001195 REQUIRES(Locks::runtime_shutdown_lock_);
Ian Rogers5d76c432011-10-31 21:42:49 -07001196 void InitCardTable();
Ian Rogersb033c752011-07-20 12:22:35 -07001197 void InitCpu();
Alexei Zavjalov1efa0a92014-02-04 02:08:31 +07001198 void CleanupCpu();
Ian Rogers848871b2013-08-05 10:56:33 -07001199 void InitTlsEntryPoints();
Brian Carlstromcaabb1b2011-10-11 18:09:13 -07001200 void InitTid();
Brian Carlstromcaabb1b2011-10-11 18:09:13 -07001201 void InitPthreadKeySelf();
Ian Rogersf4d4da12014-11-11 16:10:33 -08001202 bool InitStackHwm();
Elliott Hughesbe759c62011-09-08 19:38:21 -07001203
Elliott Hughesd6a23bd2013-07-16 14:19:52 -07001204 void SetUpAlternateSignalStack();
1205 void TearDownAlternateSignalStack();
1206
Mathieu Chartier8ac9c912015-10-01 15:58:41 -07001207 ALWAYS_INLINE void TransitionToSuspendedAndRunCheckpoints(ThreadState new_state)
1208 REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
1209
1210 ALWAYS_INLINE void PassActiveSuspendBarriers()
1211 REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
1212
Calin Juravle97cbc922016-04-15 16:16:35 +01001213 // Registers the current thread as the jit sensitive thread. Should be called just once.
1214 static void SetJitSensitiveThread() {
1215 if (jit_sensitive_thread_ == nullptr) {
1216 jit_sensitive_thread_ = Thread::Current();
1217 } else {
1218 LOG(WARNING) << "Attempt to set the sensitive thread twice. Tid:"
1219 << Thread::Current()->GetTid();
1220 }
1221 }
1222
Calin Juravleb2771b42016-04-07 17:09:25 +01001223 static void SetSensitiveThreadHook(bool (*is_sensitive_thread_hook)()) {
1224 is_sensitive_thread_hook_ = is_sensitive_thread_hook;
1225 }
1226
Hiroshi Yamauchi02e7f1a2016-10-03 15:32:01 -07001227 bool ModifySuspendCountInternal(Thread* self,
1228 int delta,
1229 AtomicInteger* suspend_barrier,
1230 bool for_debugger)
1231 REQUIRES(Locks::thread_suspend_count_lock_);
1232
Ian Rogers474b6da2012-09-25 00:20:38 -07001233 // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
1234 // change from being Suspended to Runnable without a suspend request occurring.
Chris Dearman59cde532013-12-04 18:53:49 -08001235 union PACKED(4) StateAndFlags {
1236 StateAndFlags() {}
Ian Rogersdf1ce912012-11-27 17:07:11 -08001237 struct PACKED(4) {
Ian Rogers30e173f2012-09-26 14:35:03 -07001238 // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
1239 // ThreadFlags for bit field meanings.
1240 volatile uint16_t flags;
1241 // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
1242 // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
1243 // operation. If a thread is suspended and a suspend_request is present, a thread may not
1244 // change to Runnable as a GC or other operation is in progress.
Ian Rogers01ae5802012-09-28 16:14:01 -07001245 volatile uint16_t state;
Ian Rogers30e173f2012-09-26 14:35:03 -07001246 } as_struct;
Ian Rogersb8e087e2014-07-09 21:12:06 -07001247 AtomicInteger as_atomic_int;
Ian Rogers01ae5802012-09-28 16:14:01 -07001248 volatile int32_t as_int;
Chris Dearman59cde532013-12-04 18:53:49 -08001249
1250 private:
1251 // gcc does not handle struct with volatile member assignments correctly.
1252 // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
1253 DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
Ian Rogers474b6da2012-09-25 00:20:38 -07001254 };
Andreas Gampe575e78c2014-11-03 23:41:03 -08001255 static_assert(sizeof(StateAndFlags) == sizeof(int32_t), "Weird state_and_flags size");
Ian Rogers474b6da2012-09-25 00:20:38 -07001256
Ian Rogersdd7624d2014-03-14 17:43:00 -07001257 static void ThreadExitCallback(void* arg);
Elliott Hughes5d96a712012-06-28 12:24:27 -07001258
Yu Lieac44242015-06-29 10:50:03 +08001259 // Maximum number of suspend barriers.
1260 static constexpr uint32_t kMaxSuspendBarriers = 3;
1261
Ian Rogersdd7624d2014-03-14 17:43:00 -07001262 // Has Thread::Startup been called?
1263 static bool is_started_;
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001264
Ian Rogersdd7624d2014-03-14 17:43:00 -07001265 // TLS key used to retrieve the Thread*.
1266 static pthread_key_t pthread_key_self_;
Ian Rogersa32a6fd2012-02-06 20:18:44 -08001267
Ian Rogersdd7624d2014-03-14 17:43:00 -07001268 // Used to notify threads that they should attempt to resume, they will suspend again if
1269 // their suspend count is > 0.
1270 static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
Dave Allisonb373e092014-02-20 16:06:36 -08001271
Calin Juravleb2771b42016-04-07 17:09:25 +01001272 // Hook passed by framework which returns true
1273 // when StrictMode events are traced for the current thread.
1274 static bool (*is_sensitive_thread_hook_)();
Calin Juravle97cbc922016-04-15 16:16:35 +01001275 // Stores the jit sensitive thread (which for now is the UI thread).
1276 static Thread* jit_sensitive_thread_;
Calin Juravleb2771b42016-04-07 17:09:25 +01001277
Ian Rogersdd7624d2014-03-14 17:43:00 -07001278 /***********************************************************************************************/
1279 // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
1280 // pointer size differences. To encourage shorter encoding, more frequently used values appear
1281 // first if possible.
1282 /***********************************************************************************************/
Elliott Hughes6a607ad2012-07-13 20:40:00 -07001283
Zuo Wangf37a88b2014-07-10 04:26:41 -07001284 struct PACKED(4) tls_32bit_sized_values {
Ian Rogersdd7624d2014-03-14 17:43:00 -07001285 // We have no control over the size of 'bool', but want our boolean fields
1286 // to be 4-byte quantities.
1287 typedef uint32_t bool32_t;
Ian Rogers22f454c2012-09-08 11:06:29 -07001288
Ian Rogersdd7624d2014-03-14 17:43:00 -07001289 explicit tls_32bit_sized_values(bool is_daemon) :
1290 suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
1291 daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -07001292 thread_exit_check_count(0), handling_signal_(false),
Hiroshi Yamauchiee235822016-08-19 17:03:27 -07001293 is_transitioning_to_runnable(false), ready_for_debug_invoke(false),
Hiroshi Yamauchi20a0be02016-02-19 15:44:06 -08001294 debug_method_entry_(false), is_gc_marking(false), weak_ref_access_enabled(true),
1295 disable_thread_flip_count(0) {
Ian Rogersdd7624d2014-03-14 17:43:00 -07001296 }
Dave Allisonb373e092014-02-20 16:06:36 -08001297
Ian Rogersdd7624d2014-03-14 17:43:00 -07001298 union StateAndFlags state_and_flags;
Andreas Gampe575e78c2014-11-03 23:41:03 -08001299 static_assert(sizeof(union StateAndFlags) == sizeof(int32_t),
1300 "Size of state_and_flags and int32 are different");
Dave Allisonb373e092014-02-20 16:06:36 -08001301
Ian Rogersdd7624d2014-03-14 17:43:00 -07001302 // A non-zero value is used to tell the current thread to enter a safe point
1303 // at the next poll.
1304 int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001305
Ian Rogersdd7624d2014-03-14 17:43:00 -07001306 // How much of 'suspend_count_' is by request of the debugger, used to set things right
1307 // when the debugger detaches. Must be <= suspend_count_.
1308 int debug_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001309
Ian Rogersdd7624d2014-03-14 17:43:00 -07001310 // Thin lock thread id. This is a small integer used by the thin lock implementation.
1311 // This is not to be confused with the native thread's tid, nor is it the value returned
1312 // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
1313 // important difference between this id and the ids visible to managed code is that these
1314 // ones get reused (to ensure that they fit in the number of bits available).
1315 uint32_t thin_lock_thread_id;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001316
Ian Rogersdd7624d2014-03-14 17:43:00 -07001317 // System thread id.
1318 uint32_t tid;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001319
Ian Rogersdd7624d2014-03-14 17:43:00 -07001320 // Is the thread a daemon?
1321 const bool32_t daemon;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001322
Ian Rogersdd7624d2014-03-14 17:43:00 -07001323 // A boolean telling us whether we're recursively throwing OOME.
1324 bool32_t throwing_OutOfMemoryError;
1325
1326 // A positive value implies we're in a region where thread suspension isn't expected.
1327 uint32_t no_thread_suspension;
1328
1329 // How many times has our pthread key's destructor been called?
1330 uint32_t thread_exit_check_count;
Sebastien Hertz9f102032014-05-23 08:59:42 +02001331
Dave Allison648d7112014-07-25 16:15:27 -07001332 // True if signal is being handled by this thread.
1333 bool32_t handling_signal_;
1334
Hiroshi Yamauchiee235822016-08-19 17:03:27 -07001335 // True if the thread is in TransitionFromSuspendedToRunnable(). This is used to distinguish the
1336 // non-runnable threads (eg. kNative, kWaiting) that are about to transition to runnable from
1337 // the rest of them.
1338 bool32_t is_transitioning_to_runnable;
Sebastien Hertz1558b572015-02-25 15:05:59 +01001339
1340 // True if the thread has been suspended by a debugger event. This is
1341 // used to invoke method from the debugger which is only allowed when
1342 // the thread is suspended by an event.
1343 bool32_t ready_for_debug_invoke;
Sebastien Hertz9d6bf692015-04-10 12:12:33 +02001344
1345 // True if the thread enters a method. This is used to detect method entry
1346 // event for the debugger.
1347 bool32_t debug_method_entry_;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001348
Hiroshi Yamauchi00370822015-08-18 14:47:25 -07001349 // True if the GC is in the marking phase. This is used for the CC collector only. This is
1350 // thread local so that we can simplify the logic to check for the fast path of read barriers of
1351 // GC roots.
1352 bool32_t is_gc_marking;
1353
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001354 // True if the thread is allowed to access a weak ref (Reference::GetReferent() and system
1355 // weaks) and to potentially mark an object alive/gray. This is used for concurrent reference
1356 // processing of the CC collector only. This is thread local so that we can enable/disable weak
1357 // ref access by using a checkpoint and avoid a race around the time weak ref access gets
1358 // disabled and concurrent reference processing begins (if weak ref access is disabled during a
1359 // pause, this is not an issue.) Other collectors use Runtime::DisallowNewSystemWeaks() and
1360 // ReferenceProcessor::EnableSlowPath().
1361 bool32_t weak_ref_access_enabled;
Hiroshi Yamauchi20a0be02016-02-19 15:44:06 -08001362
1363 // A thread local version of Heap::disable_thread_flip_count_. This keeps track of how many
1364 // levels of (nested) JNI critical sections the thread is in and is used to detect a nested JNI
1365 // critical section enter.
1366 uint32_t disable_thread_flip_count;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001367 } tls32_;
1368
1369 struct PACKED(8) tls_64bit_sized_values {
Sebastien Hertz07474662015-08-25 15:12:33 +00001370 tls_64bit_sized_values() : trace_clock_base(0) {
Ian Rogersdd7624d2014-03-14 17:43:00 -07001371 }
1372
1373 // The clock base used for tracing.
1374 uint64_t trace_clock_base;
1375
Ian Rogersdd7624d2014-03-14 17:43:00 -07001376 RuntimeStats stats;
1377 } tls64_;
1378
Andreas Gampe6aa13702015-10-28 10:57:25 -07001379 struct PACKED(sizeof(void*)) tls_ptr_sized_values {
Ian Rogersdd7624d2014-03-14 17:43:00 -07001380 tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr),
Andreas Gampe449357d2015-06-01 22:29:51 -07001381 managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), tmp_jni_env(nullptr),
1382 self(nullptr), opeer(nullptr), jpeer(nullptr), stack_begin(nullptr), stack_size(0),
Ian Rogersdd7624d2014-03-14 17:43:00 -07001383 stack_trace_sample(nullptr), wait_next(nullptr), monitor_enter_object(nullptr),
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001384 top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
Ian Rogersdd7624d2014-03-14 17:43:00 -07001385 instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
Sebastien Hertz07474662015-08-25 15:12:33 +00001386 stacked_shadow_frame_record(nullptr), deoptimization_context_stack(nullptr),
Mingyao Yang99170c62015-07-06 11:10:37 -07001387 frame_id_to_shadow_frame(nullptr), name(nullptr), pthread_self(0),
Vladimir Marko05846472016-09-14 12:49:57 +01001388 last_no_thread_suspension_cause(nullptr), checkpoint_function(nullptr),
1389 thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr),
1390 thread_local_objects(0), mterp_current_ibase(nullptr), mterp_default_ibase(nullptr),
1391 mterp_alt_ibase(nullptr), thread_local_alloc_stack_top(nullptr),
1392 thread_local_alloc_stack_end(nullptr), nested_signal_state(nullptr),
1393 flip_function(nullptr), method_verifier(nullptr), thread_local_mark_stack(nullptr) {
Mathieu Chartier12d625f2015-03-13 11:33:37 -07001394 std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001395 }
1396
1397 // The biased card table, see CardTable for details.
Ian Rogers13735952014-10-08 12:43:28 -07001398 uint8_t* card_table;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001399
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001400 // The pending exception or null.
Ian Rogersdd7624d2014-03-14 17:43:00 -07001401 mirror::Throwable* exception;
1402
1403 // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
1404 // We leave extra space so there's room for the code that throws StackOverflowError.
Ian Rogers13735952014-10-08 12:43:28 -07001405 uint8_t* stack_end;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001406
1407 // The top of the managed stack often manipulated directly by compiler generated code.
1408 ManagedStack managed_stack;
1409
1410 // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check. It is
1411 // normally set to the address of itself.
1412 uintptr_t* suspend_trigger;
1413
1414 // Every thread may have an associated JNI environment
1415 JNIEnvExt* jni_env;
1416
Andreas Gampe449357d2015-06-01 22:29:51 -07001417 // Temporary storage to transfer a pre-allocated JNIEnvExt from the creating thread to the
1418 // created thread.
1419 JNIEnvExt* tmp_jni_env;
1420
Ian Rogersdd7624d2014-03-14 17:43:00 -07001421 // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
1422 // is easy but getting the address of Thread::Current is hard. This field can be read off of
1423 // Thread::Current to give the address.
1424 Thread* self;
1425
1426 // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
1427 // start up, until the thread is registered and the local opeer_ is used.
1428 mirror::Object* opeer;
1429 jobject jpeer;
1430
1431 // The "lowest addressable byte" of the stack.
Ian Rogers13735952014-10-08 12:43:28 -07001432 uint8_t* stack_begin;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001433
1434 // Size of the stack.
1435 size_t stack_size;
1436
Ian Rogersdd7624d2014-03-14 17:43:00 -07001437 // Pointer to previous stack trace captured by sampling profiler.
Mathieu Chartiere401d142015-04-22 13:56:20 -07001438 std::vector<ArtMethod*>* stack_trace_sample;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001439
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001440 // The next thread in the wait set this thread is part of or null if not waiting.
Ian Rogersdd7624d2014-03-14 17:43:00 -07001441 Thread* wait_next;
1442
1443 // If we're blocked in MonitorEnter, this is the object we're trying to lock.
1444 mirror::Object* monitor_enter_object;
1445
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001446 // Top of linked list of handle scopes or null for none.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001447 HandleScope* top_handle_scope;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001448
1449 // Needed to get the right ClassLoader in JNI_OnLoad, but also
1450 // useful for testing.
Ian Rogers68d8b422014-07-17 11:09:10 -07001451 jobject class_loader_override;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001452
1453 // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
1454 Context* long_jump_context;
1455
1456 // Additional stack used by method instrumentation to store method and return pc values.
1457 // Stored as a pointer since std::deque is not PACKED.
1458 std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack;
1459
1460 // JDWP invoke-during-breakpoint support.
1461 DebugInvokeReq* debug_invoke_req;
1462
1463 // JDWP single-stepping support.
1464 SingleStepControl* single_step_control;
1465
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -07001466 // For gc purpose, a shadow frame record stack that keeps track of:
1467 // 1) shadow frames under construction.
1468 // 2) deoptimization shadow frames.
1469 StackedShadowFrameRecord* stacked_shadow_frame_record;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001470
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -07001471 // Deoptimization return value record stack.
Sebastien Hertz07474662015-08-25 15:12:33 +00001472 DeoptimizationContextRecord* deoptimization_context_stack;
Andreas Gampe2a0d4ec2014-06-02 22:05:22 -07001473
Mingyao Yang99170c62015-07-06 11:10:37 -07001474 // For debugger, a linked list that keeps the mapping from frame_id to shadow frame.
1475 // Shadow frames may be created before deoptimization happens so that the debugger can
1476 // set local values there first.
1477 FrameIdToShadowFrame* frame_id_to_shadow_frame;
1478
Ian Rogersdd7624d2014-03-14 17:43:00 -07001479 // A cached copy of the java.lang.Thread's name.
1480 std::string* name;
1481
1482 // A cached pthread_t for the pthread underlying this Thread*.
1483 pthread_t pthread_self;
1484
Ian Rogersdd7624d2014-03-14 17:43:00 -07001485 // If no_thread_suspension_ is > 0, what is causing that assertion.
1486 const char* last_no_thread_suspension_cause;
1487
Mathieu Chartier952e1e32016-06-13 14:04:02 -07001488 // Pending checkpoint function or null if non-pending. If this checkpoint is set and someone\
1489 // requests another checkpoint, it goes to the checkpoint overflow list.
1490 Closure* checkpoint_function GUARDED_BY(Locks::thread_suspend_count_lock_);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001491
Yu Lieac44242015-06-29 10:50:03 +08001492 // Pending barriers that require passing or NULL if non-pending. Installation guarding by
1493 // Locks::thread_suspend_count_lock_.
1494 // They work effectively as art::Barrier, but implemented directly using AtomicInteger and futex
1495 // to avoid additional cost of a mutex and a condition variable, as used in art::Barrier.
1496 AtomicInteger* active_suspend_barriers[kMaxSuspendBarriers];
1497
Ian Rogersdd7624d2014-03-14 17:43:00 -07001498 // Entrypoint function pointers.
1499 // TODO: move this to more of a global offset table model to avoid per-thread duplication.
Ian Rogersdd7624d2014-03-14 17:43:00 -07001500 JniEntryPoints jni_entrypoints;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001501 QuickEntryPoints quick_entrypoints;
1502
1503 // Thread-local allocation pointer.
Ian Rogers13735952014-10-08 12:43:28 -07001504 uint8_t* thread_local_start;
Hiroshi Yamauchi7e1ce282015-12-11 15:46:19 -08001505 // thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
1506 // potentially better performance.
Ian Rogers13735952014-10-08 12:43:28 -07001507 uint8_t* thread_local_pos;
1508 uint8_t* thread_local_end;
Vladimir Marko05846472016-09-14 12:49:57 +01001509 size_t thread_local_objects;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001510
buzbee1452bee2015-03-06 14:43:04 -08001511 // Mterp jump table bases.
1512 void* mterp_current_ibase;
1513 void* mterp_default_ibase;
1514 void* mterp_alt_ibase;
1515
Mathieu Chartier0651d412014-04-29 14:37:57 -07001516 // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001517 void* rosalloc_runs[kNumRosAllocThreadLocalSizeBracketsInThread];
Ian Rogersdd7624d2014-03-14 17:43:00 -07001518
1519 // Thread-local allocation stack data/routines.
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001520 StackReference<mirror::Object>* thread_local_alloc_stack_top;
1521 StackReference<mirror::Object>* thread_local_alloc_stack_end;
Chao-ying Fu9e369312014-05-21 11:20:52 -07001522
1523 // Support for Mutex lock hierarchy bug detection.
1524 BaseMutex* held_mutexes[kLockLevelCount];
Dave Allison8ce6b902014-08-26 11:07:58 -07001525
1526 // Recorded thread state for nested signals.
1527 jmp_buf* nested_signal_state;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001528
1529 // The function used for thread flip.
1530 Closure* flip_function;
Mathieu Chartier12d625f2015-03-13 11:33:37 -07001531
1532 // Current method verifier, used for root marking.
1533 verifier::MethodVerifier* method_verifier;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001534
1535 // Thread-local mark stack for the concurrent copying collector.
1536 gc::accounting::AtomicStack<mirror::Object>* thread_local_mark_stack;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001537 } tlsPtr_;
1538
1539 // Guards the 'interrupted_' and 'wait_monitor_' members.
1540 Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1541
1542 // Condition variable waited upon during a wait.
1543 ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001544 // Pointer to the monitor lock we're currently waiting on or null if not waiting.
Ian Rogersdd7624d2014-03-14 17:43:00 -07001545 Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
1546
1547 // Thread "interrupted" status; stays raised until queried or thrown.
1548 bool interrupted_ GUARDED_BY(wait_mutex_);
1549
Mathieu Chartierdfe02f62016-02-01 20:15:11 -08001550 // Debug disable read barrier count, only is checked for debug builds and only in the runtime.
1551 uint8_t debug_disallow_read_barrier_ = 0;
1552
Mathieu Chartier3f7f03c2016-09-26 11:39:52 -07001553 // Note that it is not in the packed struct, may not be accessed for cross compilation.
1554 uintptr_t poison_object_cookie_ = 0;
1555
Mathieu Chartier952e1e32016-06-13 14:04:02 -07001556 // Pending extra checkpoints if checkpoint_function_ is already used.
1557 std::list<Closure*> checkpoint_overflow_ GUARDED_BY(Locks::thread_suspend_count_lock_);
1558
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001559 friend class Dbg; // For SetStateUnsafe.
Mathieu Chartier15d34022014-02-26 17:16:38 -08001560 friend class gc::collector::SemiSpace; // For getting stack traces.
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001561 friend class Runtime; // For CreatePeer.
Ian Rogers5cf98192014-05-29 21:31:50 -07001562 friend class QuickExceptionHandler; // For dumping the stack.
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001563 friend class ScopedThreadStateChange;
Mathieu Chartier119c6bd2014-05-09 14:11:47 -07001564 friend class StubTest; // For accessing entrypoints.
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001565 friend class ThreadList; // For ~Thread and Destroy.
1566
Andreas Gampe4352b452014-06-04 18:59:01 -07001567 friend class EntrypointsOrderTest; // To test the order of tls entries.
1568
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07001569 DISALLOW_COPY_AND_ASSIGN(Thread);
1570};
Ian Rogersbdb03912011-09-14 00:55:44 -07001571
Mathieu Chartier4e2cb092015-07-22 16:17:51 -07001572class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension {
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -07001573 public:
Mathieu Chartier268764d2016-09-13 12:09:38 -07001574 ALWAYS_INLINE ScopedAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) {
1575 if (kIsDebugBuild) {
1576 self_ = Thread::Current();
1577 old_cause_ = self_->StartAssertNoThreadSuspension(cause);
1578 } else {
1579 Roles::uninterruptible_.Acquire(); // No-op.
1580 }
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -07001581 }
Mathieu Chartier268764d2016-09-13 12:09:38 -07001582 ALWAYS_INLINE ~ScopedAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) {
1583 if (kIsDebugBuild) {
1584 self_->EndAssertNoThreadSuspension(old_cause_);
1585 } else {
1586 Roles::uninterruptible_.Release(); // No-op.
1587 }
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -07001588 }
1589
1590 private:
Mathieu Chartier268764d2016-09-13 12:09:38 -07001591 Thread* self_;
1592 const char* old_cause_;
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -07001593};
1594
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -07001595class ScopedStackedShadowFramePusher {
1596 public:
1597 ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf, StackedShadowFrameType type)
1598 : self_(self), type_(type) {
1599 self_->PushStackedShadowFrame(sf, type);
1600 }
1601 ~ScopedStackedShadowFramePusher() {
1602 self_->PopStackedShadowFrame(type_);
1603 }
1604
1605 private:
1606 Thread* const self_;
1607 const StackedShadowFrameType type_;
1608
1609 DISALLOW_COPY_AND_ASSIGN(ScopedStackedShadowFramePusher);
1610};
1611
Mathieu Chartierdfe02f62016-02-01 20:15:11 -08001612// Only works for debug builds.
1613class ScopedDebugDisallowReadBarriers {
1614 public:
1615 explicit ScopedDebugDisallowReadBarriers(Thread* self) : self_(self) {
1616 self_->ModifyDebugDisallowReadBarrier(1);
1617 }
1618 ~ScopedDebugDisallowReadBarriers() {
1619 self_->ModifyDebugDisallowReadBarrier(-1);
1620 }
1621
1622 private:
1623 Thread* const self_;
1624};
1625
Hiroshi Yamauchiee235822016-08-19 17:03:27 -07001626class ScopedTransitioningToRunnable : public ValueObject {
1627 public:
1628 explicit ScopedTransitioningToRunnable(Thread* self)
1629 : self_(self) {
1630 DCHECK_EQ(self, Thread::Current());
1631 if (kUseReadBarrier) {
1632 self_->SetIsTransitioningToRunnable(true);
1633 }
1634 }
1635
1636 ~ScopedTransitioningToRunnable() {
1637 if (kUseReadBarrier) {
1638 self_->SetIsTransitioningToRunnable(false);
1639 }
1640 }
1641
1642 private:
1643 Thread* const self_;
1644};
1645
Elliott Hughes330304d2011-08-12 14:28:05 -07001646std::ostream& operator<<(std::ostream& os, const Thread& thread);
Sebastien Hertzf7958692015-06-09 14:09:14 +02001647std::ostream& operator<<(std::ostream& os, const StackedShadowFrameType& thread);
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07001648
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07001649} // namespace art
1650
Brian Carlstromfc0e3212013-07-17 14:40:12 -07001651#endif // ART_RUNTIME_THREAD_H_