blob: 0f6b36922a21009796a72aaf037b106eac979b19 [file] [log] [blame]
Elliott Hughes8d768a92011-09-14 16:35:25 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070016
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_THREAD_H_
18#define ART_RUNTIME_THREAD_H_
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070019
David Srbecky28f6cff2018-10-16 15:07:28 +010020#include <atomic>
Elliott Hughes02b48d12011-09-07 17:15:51 -070021#include <bitset>
Ian Rogers306057f2012-11-26 12:45:53 -080022#include <deque>
Elliott Hughesa0957642011-09-02 14:27:33 -070023#include <iosfwd>
Ian Rogersb033c752011-07-20 12:22:35 -070024#include <list>
Ian Rogers700a4022014-05-19 16:49:03 -070025#include <memory>
Elliott Hughes8daa0922011-09-11 13:46:25 -070026#include <string>
Carl Shapirob5573532011-07-12 18:22:59 -070027
David Sehrc431b9d2018-03-02 12:01:51 -080028#include "base/atomic.h"
Andreas Gampe542451c2016-07-26 09:02:02 -070029#include "base/enums.h"
Andreas Gampe7fbc4a52018-11-28 08:26:47 -080030#include "base/locks.h"
Elliott Hughes76160052012-12-12 16:31:20 -080031#include "base/macros.h"
Alex Light184f0752018-07-13 11:18:22 -070032#include "base/safe_map.h"
Andreas Gampea1ffdba2019-01-04 16:08:51 -080033#include "base/value_object.h"
Ian Rogers848871b2013-08-05 10:56:33 -070034#include "entrypoints/jni/jni_entrypoints.h"
Ian Rogers7655f292013-07-29 11:07:13 -070035#include "entrypoints/quick/quick_entrypoints.h"
Alex Lightb7c640d2019-03-20 15:52:13 -070036#include "handle.h"
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070037#include "handle_scope.h"
David Srbecky912f36c2018-09-08 12:22:58 +010038#include "interpreter/interpreter_cache.h"
Ian Rogers306057f2012-11-26 12:45:53 -080039#include "jvalue.h"
Andreas Gampe513061a2017-06-01 09:17:34 -070040#include "managed_stack.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070041#include "offsets.h"
Andreas Gampe217488a2017-09-18 08:34:42 -070042#include "read_barrier_config.h"
Alex Light55eccdf2019-10-07 13:51:13 +000043#include "reflective_handle_scope.h"
Andreas Gampe5a0430d2019-01-04 14:33:57 -080044#include "runtime_globals.h"
Elliott Hughes9d5ccec2011-09-19 13:19:50 -070045#include "runtime_stats.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080046#include "thread_state.h"
Ian Rogersb033c752011-07-20 12:22:35 -070047
Christopher Ferris6cff48f2014-01-26 21:36:13 -080048class BacktraceMap;
49
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070050namespace art {
51
Mathieu Chartier15d34022014-02-26 17:16:38 -080052namespace gc {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070053namespace accounting {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080054template<class T> class AtomicStack;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070055} // namespace accounting
Mathieu Chartier15d34022014-02-26 17:16:38 -080056namespace collector {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080057class SemiSpace;
Mathieu Chartier15d34022014-02-26 17:16:38 -080058} // namespace collector
59} // namespace gc
60
Andreas Gamped77abd92019-01-02 16:10:20 -080061namespace instrumentation {
62struct InstrumentationStackFrame;
63} // namespace instrumentation
64
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080065namespace mirror {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080066class Array;
67class Class;
68class ClassLoader;
69class Object;
70template<class T> class ObjectArray;
71template<class T> class PrimitiveArray;
72typedef PrimitiveArray<int32_t> IntArray;
73class StackTraceElement;
74class String;
75class Throwable;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080076} // namespace mirror
Mathieu Chartier12d625f2015-03-13 11:33:37 -070077
78namespace verifier {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080079class MethodVerifier;
80class VerifierDeps;
Mathieu Chartier12d625f2015-03-13 11:33:37 -070081} // namespace verifier
82
Mathieu Chartiere401d142015-04-22 13:56:20 -070083class ArtMethod;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080084class BaseMutex;
85class ClassLinker;
Ian Rogers7a22fa62013-01-23 12:16:16 -080086class Closure;
Ian Rogersbdb03912011-09-14 00:55:44 -070087class Context;
Ian Rogers1b09b092012-08-20 15:35:52 -070088struct DebugInvokeReq;
Sebastien Hertz07474662015-08-25 15:12:33 +000089class DeoptimizationContextRecord;
Ian Rogers81d425b2012-09-27 16:03:43 -070090class DexFile;
Mingyao Yang99170c62015-07-06 11:10:37 -070091class FrameIdToShadowFrame;
Nicolas Geoffraye3f775b2019-12-04 14:41:52 +000092class IsMarkedVisitor;
Ian Rogersb48b9eb2014-02-28 16:20:21 -080093class JavaVMExt;
Ian Rogers55256cb2017-12-21 17:07:11 -080094class JNIEnvExt;
Elliott Hughes8daa0922011-09-11 13:46:25 -070095class Monitor;
Andreas Gampe513061a2017-06-01 09:17:34 -070096class RootVisitor;
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -070097class ScopedObjectAccessAlreadyRunnable;
Logan Chienf7ad17e2012-03-15 03:10:03 +080098class ShadowFrame;
Sebastien Hertz597c4f02015-01-26 17:37:14 +010099class SingleStepControl;
Sebastien Hertzf7958692015-06-09 14:09:14 +0200100class StackedShadowFrameRecord;
Andreas Gampe0c2313c2019-05-14 09:47:00 -0700101enum class SuspendReason : char;
Brian Carlstrom40381fb2011-10-19 14:13:40 -0700102class Thread;
103class ThreadList;
Andreas Gampe513061a2017-06-01 09:17:34 -0700104enum VisitRootFlags : uint8_t;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700105
Alex Light184f0752018-07-13 11:18:22 -0700106// A piece of data that can be held in the CustomTls. The destructor will be called during thread
107// shutdown. The thread the destructor is called on is not necessarily the same thread it was stored
108// on.
109class TLSData {
110 public:
111 virtual ~TLSData() {}
112};
113
Elliott Hughes34e06962012-04-09 13:55:55 -0700114// Thread priorities. These must match the Thread.MIN_PRIORITY,
115// Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
116enum ThreadPriority {
117 kMinThreadPriority = 1,
118 kNormThreadPriority = 5,
119 kMaxThreadPriority = 10,
120};
121
Ian Rogers474b6da2012-09-25 00:20:38 -0700122enum ThreadFlag {
Ian Rogers50ffee22012-11-20 11:47:44 -0800123 kSuspendRequest = 1, // If set implies that suspend_count_ > 0 and the Thread should enter the
124 // safepoint handler.
Yu Lieac44242015-06-29 10:50:03 +0800125 kCheckpointRequest = 2, // Request that the thread do some checkpoint work and then continue.
Hiroshi Yamauchi30493242016-11-03 13:06:52 -0700126 kEmptyCheckpointRequest = 4, // Request that the thread do empty checkpoint and then continue.
127 kActiveSuspendBarrier = 8, // Register that at least 1 suspend barrier needs to be passed.
Ian Rogers474b6da2012-09-25 00:20:38 -0700128};
129
Sebastien Hertzf7958692015-06-09 14:09:14 +0200130enum class StackedShadowFrameType {
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -0700131 kShadowFrameUnderConstruction,
Andreas Gampe639bdd12015-06-03 11:22:45 -0700132 kDeoptimizationShadowFrame,
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -0700133};
134
Mingyao Yang2ee17902017-08-30 11:37:08 -0700135// The type of method that triggers deoptimization. It contains info on whether
136// the deoptimized method should advance dex_pc.
137enum class DeoptimizationMethodType {
138 kKeepDexPc, // dex pc is required to be kept upon deoptimization.
139 kDefault // dex pc may or may not advance depending on other conditions.
140};
141
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800142// This should match RosAlloc::kNumThreadLocalSizeBrackets.
143static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16;
Ian Rogerse63db272014-07-15 15:36:11 -0700144
Dave Allison648d7112014-07-25 16:15:27 -0700145// Thread's stack layout for implicit stack overflow checks:
146//
147// +---------------------+ <- highest address of stack memory
148// | |
149// . . <- SP
150// | |
151// | |
152// +---------------------+ <- stack_end
153// | |
154// | Gap |
155// | |
156// +---------------------+ <- stack_begin
157// | |
158// | Protected region |
159// | |
160// +---------------------+ <- lowest address of stack memory
161//
162// The stack always grows down in memory. At the lowest address is a region of memory
163// that is set mprotect(PROT_NONE). Any attempt to read/write to this region will
164// result in a segmentation fault signal. At any point, the thread's SP will be somewhere
165// between the stack_end and the highest address in stack memory. An implicit stack
166// overflow check is a read of memory at a certain offset below the current SP (4K typically).
167// If the thread's SP is below the stack_end address this will be a read into the protected
168// region. If the SP is above the stack_end address, the thread is guaranteed to have
169// at least 4K of space. Because stack overflow checks are only performed in generated code,
170// if the thread makes a call out to a native function (through JNI), that native function
171// might only have 4K of memory (if the SP is adjacent to stack_end).
172
Ian Rogersdd7624d2014-03-14 17:43:00 -0700173class Thread {
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700174 public:
Andreas Gampe7ea6f792014-07-14 16:21:44 -0700175 static const size_t kStackOverflowImplicitCheckSize;
Andreas Gampeb486a982017-06-01 13:45:54 -0700176 static constexpr bool kVerifyStack = kIsDebugBuild;
Dave Allisonf9439142014-03-27 15:10:22 -0700177
Elliott Hughes462c9442012-03-23 18:47:50 -0700178 // Creates a new native thread corresponding to the given managed peer.
179 // Used to implement Thread.start.
Ian Rogers52673ff2012-06-27 23:25:34 -0700180 static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700181
Elliott Hughes462c9442012-03-23 18:47:50 -0700182 // Attaches the calling native thread to the runtime, returning the new native peer.
183 // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
Mathieu Chartier664bebf2012-11-12 16:54:11 -0800184 static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
185 bool create_peer);
Andreas Gampe732b0ac2017-01-18 15:23:39 -0800186 // Attaches the calling native thread to the runtime, returning the new native peer.
187 static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_peer);
Carl Shapirob5573532011-07-12 18:22:59 -0700188
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700189 // Reset internal state of child thread after fork.
190 void InitAfterFork();
191
Ian Rogers6f3dbba2014-10-14 17:41:57 -0700192 // Get the currently executing thread, frequently referred to as 'self'. This call has reasonably
193 // high cost and so we favor passing self around when possible.
194 // TODO: mark as PURE so the compiler may coalesce and remove?
Ian Rogers02ed4c02013-09-06 13:10:04 -0700195 static Thread* Current();
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700196
Ian Rogers7b078e82014-09-10 14:44:24 -0700197 // On a runnable thread, check for pending thread suspension request and handle if pending.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700198 void AllowThreadSuspension() REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers7b078e82014-09-10 14:44:24 -0700199
200 // Process pending thread suspension request and handle if pending.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700201 void CheckSuspend() REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers7b078e82014-09-10 14:44:24 -0700202
Hiroshi Yamauchi30493242016-11-03 13:06:52 -0700203 // Process a pending empty checkpoint if pending.
Hiroshi Yamauchia2224042017-02-08 16:35:45 -0800204 void CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex);
205 void CheckEmptyCheckpointFromMutex();
Hiroshi Yamauchi30493242016-11-03 13:06:52 -0700206
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -0700207 static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
Mathieu Chartierf5769e12017-01-10 15:54:41 -0800208 ObjPtr<mirror::Object> thread_peer)
Mathieu Chartier90443472015-07-16 20:32:27 -0700209 REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700210 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -0700211 static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
Mathieu Chartier90443472015-07-16 20:32:27 -0700212 REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700213 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700214
Elliott Hughes28fa76d2012-04-09 17:31:46 -0700215 // Translates 172 to pAllocArrayFromCode and so on.
Andreas Gampe542451c2016-07-26 09:02:02 -0700216 template<PointerSize size_of_pointers>
Ian Rogersdd7624d2014-03-14 17:43:00 -0700217 static void DumpThreadOffset(std::ostream& os, uint32_t offset);
Elliott Hughes28fa76d2012-04-09 17:31:46 -0700218
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700219 // Dumps a one-line summary of thread state (used for operator<<).
220 void ShortDump(std::ostream& os) const;
221
222 // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
Nicolas Geoffraya73280d2016-02-15 13:05:16 +0000223 void Dump(std::ostream& os,
Nicolas Geoffray6ee49712018-03-30 14:39:05 +0000224 bool dump_native_stack = true,
Hiroshi Yamauchi13c16352017-01-31 10:15:08 -0800225 BacktraceMap* backtrace_map = nullptr,
226 bool force_dump_stack = false) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700227 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughesa0957642011-09-02 14:27:33 -0700228
Hiroshi Yamauchi02f365f2017-02-03 15:06:00 -0800229 void DumpJavaStack(std::ostream& os,
230 bool check_suspended = true,
231 bool dump_locks = true) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700232 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartierc751fdc2014-03-30 15:25:44 -0700233
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700234 // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700235 // case we use 'tid' to identify the thread, and we'll include as much information as we can.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700236 static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700237 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700238
Ian Rogers474b6da2012-09-25 00:20:38 -0700239 ThreadState GetState() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700240 DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
241 DCHECK_LE(tls32_.state_and_flags.as_struct.state, kSuspended);
242 return static_cast<ThreadState>(tls32_.state_and_flags.as_struct.state);
Dave Allison0aded082013-11-07 13:15:11 -0800243 }
244
Ian Rogers474b6da2012-09-25 00:20:38 -0700245 ThreadState SetState(ThreadState new_state);
Ian Rogers52673ff2012-06-27 23:25:34 -0700246
Mathieu Chartier90443472015-07-16 20:32:27 -0700247 int GetSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700248 return tls32_.suspend_count;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700249 }
Elliott Hughes038a8062011-09-18 14:12:41 -0700250
Alex Light88fd7202017-06-30 08:31:59 -0700251 int GetUserCodeSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_,
252 Locks::user_code_suspension_lock_) {
253 return tls32_.user_code_suspend_count;
254 }
255
Mathieu Chartier90443472015-07-16 20:32:27 -0700256 int GetDebugSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700257 return tls32_.debug_suspend_count;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700258 }
259
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700260 bool IsSuspended() const {
Chris Dearman59cde532013-12-04 18:53:49 -0800261 union StateAndFlags state_and_flags;
Ian Rogersdd7624d2014-03-14 17:43:00 -0700262 state_and_flags.as_int = tls32_.state_and_flags.as_int;
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700263 return state_and_flags.as_struct.state != kRunnable &&
264 (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700265 }
266
Alex Light270db1c2019-12-03 12:20:01 +0000267 void DecrDefineClassCount() {
268 tls32_.define_class_counter--;
269 }
270
271 void IncrDefineClassCount() {
272 tls32_.define_class_counter++;
273 }
274 uint32_t GetDefineClassCount() const {
275 return tls32_.define_class_counter;
276 }
277
Hiroshi Yamauchi02e7f1a2016-10-03 15:32:01 -0700278 // If delta > 0 and (this != self or suspend_barrier is not null), this function may temporarily
279 // release thread_suspend_count_lock_ internally.
280 ALWAYS_INLINE
281 bool ModifySuspendCount(Thread* self,
282 int delta,
283 AtomicInteger* suspend_barrier,
Alex Light46f93402017-06-29 11:59:50 -0700284 SuspendReason reason)
Sebastien Hertz1c8f4ff2017-04-14 15:05:12 +0200285 WARN_UNUSED
Mathieu Chartier90443472015-07-16 20:32:27 -0700286 REQUIRES(Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700287
Alex Light318afe62018-03-22 16:50:10 -0700288 // Requests a checkpoint closure to run on another thread. The closure will be run when the thread
289 // gets suspended. This will return true if the closure was added and will (eventually) be
290 // executed. It returns false otherwise.
291 //
292 // Since multiple closures can be queued and some closures can delay other threads from running no
293 // closure should attempt to suspend another thread while running.
294 // TODO We should add some debug option that verifies this.
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700295 bool RequestCheckpoint(Closure* function)
Mathieu Chartier90443472015-07-16 20:32:27 -0700296 REQUIRES(Locks::thread_suspend_count_lock_);
Alex Lightb1e31a82017-10-04 16:57:36 -0700297
298 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. This is
299 // due to the fact that Thread::Current() needs to go to sleep to allow the targeted thread to
Alex Light318afe62018-03-22 16:50:10 -0700300 // execute the checkpoint for us if it is Runnable. The suspend_state is the state that the thread
301 // will go into while it is awaiting the checkpoint to be run.
302 // NB Passing ThreadState::kRunnable may cause the current thread to wait in a condition variable
303 // while holding the mutator_lock_. Callers should ensure that this will not cause any problems
304 // for the closure or the rest of the system.
305 // NB Since multiple closures can be queued and some closures can delay other threads from running
306 // no closure should attempt to suspend another thread while running.
307 bool RequestSynchronousCheckpoint(Closure* function,
308 ThreadState suspend_state = ThreadState::kWaiting)
Andreas Gampe28c4a232017-06-21 21:21:31 -0700309 REQUIRES_SHARED(Locks::mutator_lock_)
Alex Lightb1e31a82017-10-04 16:57:36 -0700310 RELEASE(Locks::thread_list_lock_)
Andreas Gampe28c4a232017-06-21 21:21:31 -0700311 REQUIRES(!Locks::thread_suspend_count_lock_);
Alex Light318afe62018-03-22 16:50:10 -0700312
Hiroshi Yamauchi30493242016-11-03 13:06:52 -0700313 bool RequestEmptyCheckpoint()
314 REQUIRES(Locks::thread_suspend_count_lock_);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700315
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800316 void SetFlipFunction(Closure* function);
317 Closure* GetFlipFunction();
318
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700319 gc::accounting::AtomicStack<mirror::Object>* GetThreadLocalMarkStack() {
320 CHECK(kUseReadBarrier);
321 return tlsPtr_.thread_local_mark_stack;
322 }
323 void SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object>* stack) {
324 CHECK(kUseReadBarrier);
325 tlsPtr_.thread_local_mark_stack = stack;
326 }
327
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700328 // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
329 // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
Ian Rogers9da7f592012-08-20 17:14:28 -0700330 void FullSuspendCheck()
Mathieu Chartier90443472015-07-16 20:32:27 -0700331 REQUIRES(!Locks::thread_suspend_count_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700332 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700333
334 // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
Mathieu Chartier8ac9c912015-10-01 15:58:41 -0700335 ALWAYS_INLINE ThreadState TransitionFromSuspendedToRunnable()
Mathieu Chartier90443472015-07-16 20:32:27 -0700336 REQUIRES(!Locks::thread_suspend_count_lock_)
Mathieu Chartier8ac9c912015-10-01 15:58:41 -0700337 SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700338
339 // Transition from runnable into a state where mutator privileges are denied. Releases share of
340 // mutator lock.
Mathieu Chartier8ac9c912015-10-01 15:58:41 -0700341 ALWAYS_INLINE void TransitionFromRunnableToSuspended(ThreadState new_state)
Mathieu Chartier4e2cb092015-07-22 16:17:51 -0700342 REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
Mathieu Chartier8ac9c912015-10-01 15:58:41 -0700343 UNLOCK_FUNCTION(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700344
Ian Rogers0399dde2012-06-06 17:09:28 -0700345 // Once called thread suspension will cause an assertion failure.
Mathieu Chartier4e2cb092015-07-22 16:17:51 -0700346 const char* StartAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) {
347 Roles::uninterruptible_.Acquire(); // No-op.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700348 if (kIsDebugBuild) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700349 CHECK(cause != nullptr);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700350 const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
351 tls32_.no_thread_suspension++;
352 tlsPtr_.last_no_thread_suspension_cause = cause;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700353 return previous_cause;
354 } else {
355 return nullptr;
356 }
Ian Rogers0399dde2012-06-06 17:09:28 -0700357 }
Ian Rogers52673ff2012-06-27 23:25:34 -0700358
Ian Rogers0399dde2012-06-06 17:09:28 -0700359 // End region where no thread suspension is expected.
Mathieu Chartier4e2cb092015-07-22 16:17:51 -0700360 void EndAssertNoThreadSuspension(const char* old_cause) RELEASE(Roles::uninterruptible_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700361 if (kIsDebugBuild) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700362 CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1);
363 CHECK_GT(tls32_.no_thread_suspension, 0U);
364 tls32_.no_thread_suspension--;
365 tlsPtr_.last_no_thread_suspension_cause = old_cause;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700366 }
Mathieu Chartier4e2cb092015-07-22 16:17:51 -0700367 Roles::uninterruptible_.Release(); // No-op.
Ian Rogers0399dde2012-06-06 17:09:28 -0700368 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700369
Mathieu Chartierdc540df2019-11-15 17:11:44 -0800370 // End region where no thread suspension is expected. Returns the current open region in case we
371 // want to reopen it. Used for ScopedAllowThreadSuspension. Not supported if no_thread_suspension
372 // is larger than one.
373 const char* EndAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) WARN_UNUSED {
374 const char* ret = nullptr;
375 if (kIsDebugBuild) {
376 CHECK_EQ(tls32_.no_thread_suspension, 1u);
377 tls32_.no_thread_suspension--;
378 ret = tlsPtr_.last_no_thread_suspension_cause;
379 tlsPtr_.last_no_thread_suspension_cause = nullptr;
380 }
381 Roles::uninterruptible_.Release(); // No-op.
382 return ret;
383 }
384
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700385 void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
Ian Rogers0399dde2012-06-06 17:09:28 -0700386
Mathieu Chartier10b218d2016-07-25 17:48:52 -0700387 // Return true if thread suspension is allowable.
388 bool IsThreadSuspensionAllowable() const;
389
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700390 bool IsDaemon() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700391 return tls32_.daemon;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700392 }
393
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700394 size_t NumberOfHeldMutexes() const;
395
Mathieu Chartierf5769e12017-01-10 15:54:41 -0800396 bool HoldsLock(ObjPtr<mirror::Object> object) const REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700397
Elliott Hughes8daa0922011-09-11 13:46:25 -0700398 /*
399 * Changes the priority of this thread to match that of the java.lang.Thread object.
400 *
401 * We map a priority value from 1-10 to Linux "nice" values, where lower
402 * numbers indicate higher priority.
403 */
404 void SetNativePriority(int newPriority);
405
406 /*
Nicolas Geoffrayfa595882019-08-06 17:40:09 +0100407 * Returns the priority of this thread by querying the system.
Elliott Hughes8daa0922011-09-11 13:46:25 -0700408 * This is useful when attaching a thread through JNI.
409 *
410 * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
411 */
Nicolas Geoffrayfa595882019-08-06 17:40:09 +0100412 int GetNativePriority() const;
Elliott Hughes8daa0922011-09-11 13:46:25 -0700413
Mathieu Chartier61b3cd42016-04-18 11:43:29 -0700414 // Guaranteed to be non-zero.
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700415 uint32_t GetThreadId() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700416 return tls32_.thin_lock_thread_id;
Carl Shapirob5573532011-07-12 18:22:59 -0700417 }
418
Elliott Hughesd92bec42011-09-02 17:04:36 -0700419 pid_t GetTid() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700420 return tls32_.tid;
Elliott Hughesd92bec42011-09-02 17:04:36 -0700421 }
Elliott Hughese27955c2011-08-26 15:21:24 -0700422
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700423 // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
Vladimir Marko4617d582019-03-28 13:48:31 +0000424 ObjPtr<mirror::String> GetThreadName() const REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes899e7892012-01-24 14:57:32 -0800425
Elliott Hughesffb465f2012-03-01 18:46:05 -0800426 // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
427 // allocation, or locking.
428 void GetThreadName(std::string& name) const;
429
Elliott Hughes899e7892012-01-24 14:57:32 -0800430 // Sets the thread's name.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700431 void SetThreadName(const char* name) REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughesfc861622011-10-17 17:57:47 -0700432
Jeff Hao57dac6e2013-08-15 16:36:24 -0700433 // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
434 uint64_t GetCpuMicroTime() const;
435
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700436 mirror::Object* GetPeer() const REQUIRES_SHARED(Locks::mutator_lock_) {
Nicolas Geoffrayffc8cad2017-02-10 10:59:22 +0000437 DCHECK(Thread::Current() == this) << "Use GetPeerFromOtherThread instead";
Ian Rogersdd7624d2014-03-14 17:43:00 -0700438 CHECK(tlsPtr_.jpeer == nullptr);
439 return tlsPtr_.opeer;
Elliott Hughes8daa0922011-09-11 13:46:25 -0700440 }
Andreas Gampe202f85a2017-02-06 10:23:26 -0800441 // GetPeer is not safe if called on another thread in the middle of the CC thread flip and
442 // the thread's stack may have not been flipped yet and peer may be a from-space (stale) ref.
443 // This function will explicitly mark/forward it.
444 mirror::Object* GetPeerFromOtherThread() const REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700445
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700446 bool HasPeer() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700447 return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700448 }
449
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700450 RuntimeStats* GetStats() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700451 return &tls64_.stats;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700452 }
453
Elliott Hughes7dc51662012-05-16 14:48:43 -0700454 bool IsStillStarting() const;
455
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700456 bool IsExceptionPending() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700457 return tlsPtr_.exception != nullptr;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700458 }
459
Alex Light848574c2017-09-25 16:59:39 -0700460 bool IsAsyncExceptionPending() const {
461 return tlsPtr_.async_exception != nullptr;
462 }
463
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700464 mirror::Throwable* GetException() const REQUIRES_SHARED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700465 return tlsPtr_.exception;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700466 }
467
Andreas Gamped9efea62014-07-21 22:56:08 -0700468 void AssertPendingException() const;
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700469 void AssertPendingOOMException() const REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700470 void AssertNoPendingException() const;
Mathieu Chartier8d7672e2014-02-25 10:57:16 -0800471 void AssertNoPendingExceptionForNewException(const char* msg) const;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700472
Mathieu Chartier0795f232016-09-27 18:43:30 -0700473 void SetException(ObjPtr<mirror::Throwable> new_exception) REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700474
Alex Light848574c2017-09-25 16:59:39 -0700475 // Set an exception that is asynchronously thrown from a different thread. This will be checked
476 // periodically and might overwrite the current 'Exception'. This can only be called from a
477 // checkpoint.
478 //
479 // The caller should also make sure that the thread has been deoptimized so that the exception
480 // could be detected on back-edges.
481 void SetAsyncException(ObjPtr<mirror::Throwable> new_exception)
482 REQUIRES_SHARED(Locks::mutator_lock_);
483
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700484 void ClearException() REQUIRES_SHARED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700485 tlsPtr_.exception = nullptr;
jeffhao94d6df42012-11-26 16:02:12 -0800486 }
487
Alex Light848574c2017-09-25 16:59:39 -0700488 // Move the current async-exception to the main exception. This should be called when the current
489 // thread is ready to deal with any async exceptions. Returns true if there is an async exception
490 // that needs to be dealt with, false otherwise.
491 bool ObserveAsyncException() REQUIRES_SHARED(Locks::mutator_lock_);
492
Ian Rogersbdb03912011-09-14 00:55:44 -0700493 // Find catch block and perform long jump to appropriate exception handle
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700494 NO_RETURN void QuickDeliverException() REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersbdb03912011-09-14 00:55:44 -0700495
496 Context* GetLongJumpContext();
Ian Rogers0399dde2012-06-06 17:09:28 -0700497 void ReleaseLongJumpContext(Context* context) {
Mingyao Yang4dcfc432015-04-21 16:55:22 -0700498 if (tlsPtr_.long_jump_context != nullptr) {
Andreas Gampee5d23982019-01-08 10:34:26 -0800499 ReleaseLongJumpContextInternal();
Mingyao Yang4dcfc432015-04-21 16:55:22 -0700500 }
Ian Rogersdd7624d2014-03-14 17:43:00 -0700501 tlsPtr_.long_jump_context = context;
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700502 }
503
Andreas Gampe6ec8ebd2014-07-25 13:36:56 -0700504 // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
505 // abort the runtime iff abort_on_error is true.
Hiroshi Yamauchi02f365f2017-02-03 15:06:00 -0800506 ArtMethod* GetCurrentMethod(uint32_t* dex_pc,
507 bool check_suspended = true,
508 bool abort_on_error = true) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700509 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers0399dde2012-06-06 17:09:28 -0700510
Nicolas Geoffray7642cfc2015-02-26 10:56:09 +0000511 // Returns whether the given exception was thrown by the current Java method being executed
512 // (Note that this includes native Java methods).
Mathieu Chartierf5769e12017-01-10 15:54:41 -0800513 bool IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700514 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray7642cfc2015-02-26 10:56:09 +0000515
Mathieu Chartiere401d142015-04-22 13:56:20 -0700516 void SetTopOfStack(ArtMethod** top_method) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700517 tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700518 }
519
Vladimir Marko2196c652017-11-30 16:16:07 +0000520 void SetTopOfStackTagged(ArtMethod** top_method) {
521 tlsPtr_.managed_stack.SetTopQuickFrameTagged(top_method);
522 }
523
Jeff Hao11ffc2d2013-02-01 11:52:17 -0800524 void SetTopOfShadowStack(ShadowFrame* top) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700525 tlsPtr_.managed_stack.SetTopShadowFrame(top);
Jeff Hao11ffc2d2013-02-01 11:52:17 -0800526 }
527
Ian Rogers0399dde2012-06-06 17:09:28 -0700528 bool HasManagedStack() const {
Vladimir Marko2196c652017-11-30 16:16:07 +0000529 return tlsPtr_.managed_stack.HasTopQuickFrame() || tlsPtr_.managed_stack.HasTopShadowFrame();
Ian Rogersbdb03912011-09-14 00:55:44 -0700530 }
531
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700532 // If 'msg' is null, no detail message is set.
Nicolas Geoffray0aa50ce2015-03-10 11:03:29 +0000533 void ThrowNewException(const char* exception_class_descriptor, const char* msg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700534 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
Elliott Hughes5cb5ad22011-10-02 12:13:39 -0700535
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700536 // If 'msg' is null, no detail message is set. An exception must be pending, and will be
Elliott Hughesa4f94742012-05-29 16:28:38 -0700537 // used as the new exception's cause.
Nicolas Geoffray0aa50ce2015-03-10 11:03:29 +0000538 void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700539 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
Elliott Hughesa4f94742012-05-29 16:28:38 -0700540
Nicolas Geoffray0aa50ce2015-03-10 11:03:29 +0000541 void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
542 __attribute__((format(printf, 3, 4)))
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700543 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
Elliott Hughesa5b897e2011-08-16 11:33:06 -0700544
Nicolas Geoffray0aa50ce2015-03-10 11:03:29 +0000545 void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700546 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
Elliott Hughes4a2b4172011-09-20 17:08:25 -0700547
Elliott Hughes2ced6a52011-10-16 18:44:48 -0700548 // OutOfMemoryError is special, because we need to pre-allocate an instance.
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700549 // Only the GC should call this.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700550 void ThrowOutOfMemoryError(const char* msg) REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartiered8990a2015-07-23 14:11:16 -0700551 REQUIRES(!Roles::uninterruptible_);
Elliott Hughes79082e32011-08-25 12:07:32 -0700552
Elliott Hughesbe759c62011-09-08 19:38:21 -0700553 static void Startup();
Elliott Hughes038a8062011-09-18 14:12:41 -0700554 static void FinishStartup();
Elliott Hughesc1674ed2011-08-25 18:09:09 -0700555 static void Shutdown();
Carl Shapirob5573532011-07-12 18:22:59 -0700556
Andreas Gampe56776012018-01-26 17:40:55 -0800557 // Notify this thread's thread-group that this thread has started.
558 // Note: the given thread-group is used as a fast path and verified in debug build. If the value
559 // is null, the thread's thread-group is loaded from the peer.
560 void NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable& soa, jobject thread_group = nullptr)
561 REQUIRES_SHARED(Locks::mutator_lock_);
562
Ian Rogersb033c752011-07-20 12:22:35 -0700563 // JNI methods
Elliott Hughes69f5bc62011-08-24 09:26:14 -0700564 JNIEnvExt* GetJniEnv() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700565 return tlsPtr_.jni_env;
Ian Rogersb033c752011-07-20 12:22:35 -0700566 }
567
Ian Rogers408f79a2011-08-23 18:22:33 -0700568 // Convert a jobject into a Object*
Mathieu Chartierc4f39252016-10-05 18:32:08 -0700569 ObjPtr<mirror::Object> DecodeJObject(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi04302db2015-11-11 23:45:34 -0800570 // Checks if the weak global ref has been cleared by the GC without decoding it.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700571 bool IsJWeakCleared(jweak obj) const REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersb033c752011-07-20 12:22:35 -0700572
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700573 mirror::Object* GetMonitorEnterObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700574 return tlsPtr_.monitor_enter_object;
575 }
576
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700577 void SetMonitorEnterObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700578 tlsPtr_.monitor_enter_object = obj;
579 }
580
Elliott Hughes8daa0922011-09-11 13:46:25 -0700581 // Implements java.lang.Thread.interrupted.
Nicolas Geoffray365719c2017-03-08 13:11:50 +0000582 bool Interrupted();
Elliott Hughes8daa0922011-09-11 13:46:25 -0700583 // Implements java.lang.Thread.isInterrupted.
Nicolas Geoffray365719c2017-03-08 13:11:50 +0000584 bool IsInterrupted();
Andreas Gampe7fbc4a52018-11-28 08:26:47 -0800585 void Interrupt(Thread* self) REQUIRES(!wait_mutex_);
Nicolas Geoffray365719c2017-03-08 13:11:50 +0000586 void SetInterrupted(bool i) {
Orion Hodson88591fe2018-03-06 13:35:43 +0000587 tls32_.interrupted.store(i, std::memory_order_seq_cst);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700588 }
Andreas Gampe7fbc4a52018-11-28 08:26:47 -0800589 void Notify() REQUIRES(!wait_mutex_);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700590
Mathieu Chartier3f7f03c2016-09-26 11:39:52 -0700591 ALWAYS_INLINE void PoisonObjectPointers() {
592 ++poison_object_cookie_;
593 }
594
Mathieu Chartiera59d9b22016-09-26 18:13:17 -0700595 ALWAYS_INLINE static void PoisonObjectPointersIfDebug();
596
Mathieu Chartier3f7f03c2016-09-26 11:39:52 -0700597 ALWAYS_INLINE uintptr_t GetPoisonObjectCookie() const {
598 return poison_object_cookie_;
599 }
600
Charles Mungeraa31f492018-11-01 18:57:38 +0000601 // Parking for 0ns of relative time means an untimed park, negative (though
602 // should be handled in java code) returns immediately
603 void Park(bool is_absolute, int64_t time) REQUIRES_SHARED(Locks::mutator_lock_);
604 void Unpark();
605
Ian Rogersdd7624d2014-03-14 17:43:00 -0700606 private:
Mathieu Chartier90443472015-07-16 20:32:27 -0700607 void NotifyLocked(Thread* self) REQUIRES(wait_mutex_);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700608
609 public:
610 Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
611 return wait_mutex_;
612 }
613
Mathieu Chartier90443472015-07-16 20:32:27 -0700614 ConditionVariable* GetWaitConditionVariable() const REQUIRES(wait_mutex_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700615 return wait_cond_;
616 }
617
Mathieu Chartier90443472015-07-16 20:32:27 -0700618 Monitor* GetWaitMonitor() const REQUIRES(wait_mutex_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700619 return wait_monitor_;
620 }
621
Mathieu Chartier90443472015-07-16 20:32:27 -0700622 void SetWaitMonitor(Monitor* mon) REQUIRES(wait_mutex_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700623 wait_monitor_ = mon;
624 }
625
Ian Rogersdd7624d2014-03-14 17:43:00 -0700626 // Waiter link-list support.
627 Thread* GetWaitNext() const {
628 return tlsPtr_.wait_next;
629 }
630
631 void SetWaitNext(Thread* next) {
632 tlsPtr_.wait_next = next;
633 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700634
Ian Rogers68d8b422014-07-17 11:09:10 -0700635 jobject GetClassLoaderOverride() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700636 return tlsPtr_.class_loader_override;
buzbeec143c552011-08-20 17:38:58 -0700637 }
638
Ian Rogers68d8b422014-07-17 11:09:10 -0700639 void SetClassLoaderOverride(jobject class_loader_override);
buzbeec143c552011-08-20 17:38:58 -0700640
Ian Rogersaaa20802011-09-11 21:47:37 -0700641 // Create the internal representation of a stack trace, that is more time
Sebastien Hertzee1d79a2014-02-21 15:46:30 +0100642 // and space efficient to compute than the StackTraceElement[].
643 template<bool kTransactionActive>
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -0700644 jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700645 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersaaa20802011-09-11 21:47:37 -0700646
Elliott Hughes01158d72011-09-19 19:47:10 -0700647 // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700648 // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
649 // frames as will fit are written into the given array. If stack_depth is non-null, it's updated
Elliott Hughes01158d72011-09-19 19:47:10 -0700650 // with the number of valid frames in the returned array.
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -0700651 static jobjectArray InternalStackTraceToStackTraceElementArray(
652 const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
653 jobjectArray output_array = nullptr, int* stack_depth = nullptr)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700654 REQUIRES_SHARED(Locks::mutator_lock_);
Shih-wei Liao55df06b2011-08-26 14:39:27 -0700655
Andreas Gampefb6b0b12017-12-11 20:47:56 -0800656 jobjectArray CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
657 REQUIRES_SHARED(Locks::mutator_lock_);
658
Mingyao Yang99170c62015-07-06 11:10:37 -0700659 bool HasDebuggerShadowFrames() const {
660 return tlsPtr_.frame_id_to_shadow_frame != nullptr;
661 }
662
Andreas Gampe513061a2017-06-01 09:17:34 -0700663 void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
Andreas Gampe585da952016-12-02 14:52:29 -0800664 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes410c0c82011-09-01 17:58:25 -0700665
Alex Light55eccdf2019-10-07 13:51:13 +0000666 void VisitReflectiveTargets(ReflectiveValueVisitor* visitor)
667 REQUIRES(Locks::mutator_lock_);
668
Andreas Gampeb486a982017-06-01 13:45:54 -0700669 void VerifyStack() REQUIRES_SHARED(Locks::mutator_lock_) {
670 if (kVerifyStack) {
671 VerifyStackImpl();
672 }
673 }
jeffhao25045522012-03-13 19:34:37 -0700674
Elliott Hughesbe759c62011-09-08 19:38:21 -0700675 //
676 // Offsets of various members of native Thread class, used by compiled code.
677 //
678
Andreas Gampe542451c2016-07-26 09:02:02 -0700679 template<PointerSize pointer_size>
David Srbecky56de89a2018-10-01 15:32:20 +0100680 static constexpr ThreadOffset<pointer_size> ThinLockIdOffset() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700681 return ThreadOffset<pointer_size>(
682 OFFSETOF_MEMBER(Thread, tls32_) +
683 OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700684 }
685
Andreas Gampe542451c2016-07-26 09:02:02 -0700686 template<PointerSize pointer_size>
David Srbecky56de89a2018-10-01 15:32:20 +0100687 static constexpr ThreadOffset<pointer_size> InterruptedOffset() {
Nicolas Geoffray365719c2017-03-08 13:11:50 +0000688 return ThreadOffset<pointer_size>(
689 OFFSETOF_MEMBER(Thread, tls32_) +
690 OFFSETOF_MEMBER(tls_32bit_sized_values, interrupted));
691 }
692
693 template<PointerSize pointer_size>
David Srbecky56de89a2018-10-01 15:32:20 +0100694 static constexpr ThreadOffset<pointer_size> ThreadFlagsOffset() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700695 return ThreadOffset<pointer_size>(
696 OFFSETOF_MEMBER(Thread, tls32_) +
697 OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700698 }
699
Andreas Gampe542451c2016-07-26 09:02:02 -0700700 template<PointerSize pointer_size>
David Srbecky28f6cff2018-10-16 15:07:28 +0100701 static constexpr ThreadOffset<pointer_size> UseMterpOffset() {
702 return ThreadOffset<pointer_size>(
703 OFFSETOF_MEMBER(Thread, tls32_) +
704 OFFSETOF_MEMBER(tls_32bit_sized_values, use_mterp));
705 }
706
707 template<PointerSize pointer_size>
David Srbecky56de89a2018-10-01 15:32:20 +0100708 static constexpr ThreadOffset<pointer_size> IsGcMarkingOffset() {
Roland Levillain7c1559a2015-12-15 10:55:36 +0000709 return ThreadOffset<pointer_size>(
710 OFFSETOF_MEMBER(Thread, tls32_) +
711 OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
712 }
713
Igor Murashkinae7ff922016-10-06 14:59:19 -0700714 static constexpr size_t IsGcMarkingSize() {
715 return sizeof(tls32_.is_gc_marking);
716 }
717
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000718 // Deoptimize the Java stack.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700719 void DeoptimizeWithDeoptimizationException(JValue* result) REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000720
Ian Rogersdd7624d2014-03-14 17:43:00 -0700721 private:
Andreas Gampe542451c2016-07-26 09:02:02 -0700722 template<PointerSize pointer_size>
David Srbecky56de89a2018-10-01 15:32:20 +0100723 static constexpr ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700724 size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
David Srbecky56de89a2018-10-01 15:32:20 +0100725 size_t scale = (pointer_size > kRuntimePointerSize) ?
726 static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize) : 1;
727 size_t shrink = (kRuntimePointerSize > pointer_size) ?
728 static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size) : 1;
Ian Rogersdd7624d2014-03-14 17:43:00 -0700729 return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
Ian Rogers07ec8e12012-12-01 01:26:51 -0800730 }
731
Ian Rogersdd7624d2014-03-14 17:43:00 -0700732 public:
Nicolas Geoffraya00b54b2019-12-03 14:36:42 +0000733 template<PointerSize pointer_size>
734 static constexpr ThreadOffset<pointer_size> QuickEntryPointOffset(
735 size_t quick_entrypoint_offset) {
736 return ThreadOffsetFromTlsPtr<pointer_size>(
737 OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
738 }
739
740 static constexpr uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
Nicolas Geoffraye3f775b2019-12-04 14:41:52 +0000741 PointerSize pointer_size) {
Andreas Gampe542451c2016-07-26 09:02:02 -0700742 if (pointer_size == PointerSize::k32) {
743 return QuickEntryPointOffset<PointerSize::k32>(quick_entrypoint_offset).
744 Uint32Value();
Jeff Hao848f70a2014-01-15 13:49:50 -0800745 } else {
Andreas Gampe542451c2016-07-26 09:02:02 -0700746 return QuickEntryPointOffset<PointerSize::k64>(quick_entrypoint_offset).
747 Uint32Value();
Jeff Hao848f70a2014-01-15 13:49:50 -0800748 }
749 }
750
Andreas Gampe542451c2016-07-26 09:02:02 -0700751 template<PointerSize pointer_size>
Ian Rogersdd7624d2014-03-14 17:43:00 -0700752 static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
753 return ThreadOffsetFromTlsPtr<pointer_size>(
754 OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
Elliott Hughesbe759c62011-09-08 19:38:21 -0700755 }
756
Roland Levillain97c46462017-05-11 14:04:03 +0100757 // Return the entry point offset integer value for ReadBarrierMarkRegX, where X is `reg`.
758 template <PointerSize pointer_size>
Nicolas Geoffraya00b54b2019-12-03 14:36:42 +0000759 static constexpr int32_t ReadBarrierMarkEntryPointsOffset(size_t reg) {
Roland Levillain97c46462017-05-11 14:04:03 +0100760 // The entry point list defines 30 ReadBarrierMarkRegX entry points.
761 DCHECK_LT(reg, 30u);
762 // The ReadBarrierMarkRegX entry points are ordered by increasing
763 // register number in Thread::tls_Ptr_.quick_entrypoints.
764 return QUICK_ENTRYPOINT_OFFSET(pointer_size, pReadBarrierMarkReg00).Int32Value()
765 + static_cast<size_t>(pointer_size) * reg;
766 }
767
Andreas Gampe542451c2016-07-26 09:02:02 -0700768 template<PointerSize pointer_size>
David Srbecky56de89a2018-10-01 15:32:20 +0100769 static constexpr ThreadOffset<pointer_size> SelfOffset() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700770 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
771 }
772
Andreas Gampe542451c2016-07-26 09:02:02 -0700773 template<PointerSize pointer_size>
David Srbecky56de89a2018-10-01 15:32:20 +0100774 static constexpr ThreadOffset<pointer_size> MterpCurrentIBaseOffset() {
buzbee1452bee2015-03-06 14:43:04 -0800775 return ThreadOffsetFromTlsPtr<pointer_size>(
776 OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_current_ibase));
777 }
778
Andreas Gampe542451c2016-07-26 09:02:02 -0700779 template<PointerSize pointer_size>
David Srbecky56de89a2018-10-01 15:32:20 +0100780 static constexpr ThreadOffset<pointer_size> ExceptionOffset() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700781 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
782 }
783
Andreas Gampe542451c2016-07-26 09:02:02 -0700784 template<PointerSize pointer_size>
David Srbecky56de89a2018-10-01 15:32:20 +0100785 static constexpr ThreadOffset<pointer_size> PeerOffset() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700786 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
787 }
788
789
Andreas Gampe542451c2016-07-26 09:02:02 -0700790 template<PointerSize pointer_size>
David Srbecky56de89a2018-10-01 15:32:20 +0100791 static constexpr ThreadOffset<pointer_size> CardTableOffset() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700792 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
793 }
794
Andreas Gampe542451c2016-07-26 09:02:02 -0700795 template<PointerSize pointer_size>
David Srbecky56de89a2018-10-01 15:32:20 +0100796 static constexpr ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700797 return ThreadOffsetFromTlsPtr<pointer_size>(
798 OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
Dave Allisonb373e092014-02-20 16:06:36 -0800799 }
800
Andreas Gampe542451c2016-07-26 09:02:02 -0700801 template<PointerSize pointer_size>
David Srbecky56de89a2018-10-01 15:32:20 +0100802 static constexpr ThreadOffset<pointer_size> ThreadLocalPosOffset() {
Andreas Gampe542451c2016-07-26 09:02:02 -0700803 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
804 thread_local_pos));
Hiroshi Yamauchie01a5202015-03-19 12:35:04 -0700805 }
806
Andreas Gampe542451c2016-07-26 09:02:02 -0700807 template<PointerSize pointer_size>
David Srbecky56de89a2018-10-01 15:32:20 +0100808 static constexpr ThreadOffset<pointer_size> ThreadLocalEndOffset() {
Andreas Gampe542451c2016-07-26 09:02:02 -0700809 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
810 thread_local_end));
Hiroshi Yamauchie01a5202015-03-19 12:35:04 -0700811 }
812
Andreas Gampe542451c2016-07-26 09:02:02 -0700813 template<PointerSize pointer_size>
David Srbecky56de89a2018-10-01 15:32:20 +0100814 static constexpr ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
Andreas Gampe542451c2016-07-26 09:02:02 -0700815 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
816 thread_local_objects));
Hiroshi Yamauchie01a5202015-03-19 12:35:04 -0700817 }
818
Andreas Gampe542451c2016-07-26 09:02:02 -0700819 template<PointerSize pointer_size>
David Srbecky56de89a2018-10-01 15:32:20 +0100820 static constexpr ThreadOffset<pointer_size> RosAllocRunsOffset() {
Hiroshi Yamauchidc412b62015-10-15 12:26:57 -0700821 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
822 rosalloc_runs));
823 }
824
Andreas Gampe542451c2016-07-26 09:02:02 -0700825 template<PointerSize pointer_size>
David Srbecky56de89a2018-10-01 15:32:20 +0100826 static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
Hiroshi Yamauchidc412b62015-10-15 12:26:57 -0700827 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
828 thread_local_alloc_stack_top));
829 }
830
Andreas Gampe542451c2016-07-26 09:02:02 -0700831 template<PointerSize pointer_size>
David Srbecky56de89a2018-10-01 15:32:20 +0100832 static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
Hiroshi Yamauchidc412b62015-10-15 12:26:57 -0700833 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
834 thread_local_alloc_stack_end));
835 }
836
Ian Rogers932746a2011-09-22 18:57:50 -0700837 // Size of stack less any space reserved for stack overflow
jeffhaod7521322012-11-21 15:38:24 -0800838 size_t GetStackSize() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700839 return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
Ian Rogers932746a2011-09-22 18:57:50 -0700840 }
841
Andreas Gampe639b2b12019-01-08 10:32:50 -0800842 ALWAYS_INLINE uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const;
Nicolas Geoffray535a3fb2014-07-22 15:17:38 +0100843
Ian Rogers13735952014-10-08 12:43:28 -0700844 uint8_t* GetStackEnd() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700845 return tlsPtr_.stack_end;
jeffhaod7521322012-11-21 15:38:24 -0800846 }
847
Ian Rogers932746a2011-09-22 18:57:50 -0700848 // Set the stack end to that to be used during a stack overflow
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700849 void SetStackEndForStackOverflow() REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers932746a2011-09-22 18:57:50 -0700850
851 // Set the stack end to that to be used during regular execution
Andreas Gampe639b2b12019-01-08 10:32:50 -0800852 ALWAYS_INLINE void ResetDefaultStackEnd();
Ian Rogers932746a2011-09-22 18:57:50 -0700853
Ian Rogers120f1c72012-09-28 17:17:10 -0700854 bool IsHandlingStackOverflow() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700855 return tlsPtr_.stack_end == tlsPtr_.stack_begin;
Ian Rogers120f1c72012-09-28 17:17:10 -0700856 }
857
Andreas Gampe542451c2016-07-26 09:02:02 -0700858 template<PointerSize pointer_size>
David Srbecky56de89a2018-10-01 15:32:20 +0100859 static constexpr ThreadOffset<pointer_size> StackEndOffset() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700860 return ThreadOffsetFromTlsPtr<pointer_size>(
861 OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700862 }
863
Andreas Gampe542451c2016-07-26 09:02:02 -0700864 template<PointerSize pointer_size>
David Srbecky56de89a2018-10-01 15:32:20 +0100865 static constexpr ThreadOffset<pointer_size> JniEnvOffset() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700866 return ThreadOffsetFromTlsPtr<pointer_size>(
867 OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700868 }
869
Andreas Gampe542451c2016-07-26 09:02:02 -0700870 template<PointerSize pointer_size>
David Srbecky56de89a2018-10-01 15:32:20 +0100871 static constexpr ThreadOffset<pointer_size> TopOfManagedStackOffset() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700872 return ThreadOffsetFromTlsPtr<pointer_size>(
873 OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
Vladimir Marko2196c652017-11-30 16:16:07 +0000874 ManagedStack::TaggedTopQuickFrameOffset());
Elliott Hughesbe759c62011-09-08 19:38:21 -0700875 }
876
Ian Rogers0399dde2012-06-06 17:09:28 -0700877 const ManagedStack* GetManagedStack() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700878 return &tlsPtr_.managed_stack;
Ian Rogers0399dde2012-06-06 17:09:28 -0700879 }
880
881 // Linked list recording fragments of managed stack.
882 void PushManagedStackFragment(ManagedStack* fragment) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700883 tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
Ian Rogers0399dde2012-06-06 17:09:28 -0700884 }
885 void PopManagedStackFragment(const ManagedStack& fragment) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700886 tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
Ian Rogers0399dde2012-06-06 17:09:28 -0700887 }
888
Andreas Gampe513061a2017-06-01 09:17:34 -0700889 ALWAYS_INLINE ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame);
890 ALWAYS_INLINE ShadowFrame* PopShadowFrame();
Logan Chienf7ad17e2012-03-15 03:10:03 +0800891
Andreas Gampe542451c2016-07-26 09:02:02 -0700892 template<PointerSize pointer_size>
David Srbecky56de89a2018-10-01 15:32:20 +0100893 static constexpr ThreadOffset<pointer_size> TopShadowFrameOffset() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700894 return ThreadOffsetFromTlsPtr<pointer_size>(
895 OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
896 ManagedStack::TopShadowFrameOffset());
TDYa127d668a062012-04-13 12:36:57 -0700897 }
898
Ian Rogers0399dde2012-06-06 17:09:28 -0700899 // Is the given obj in this thread's stack indirect reference table?
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700900 bool HandleScopeContains(jobject obj) const;
Ian Rogers0399dde2012-06-06 17:09:28 -0700901
Hans Boehm0882af22017-08-31 15:21:57 -0700902 void HandleScopeVisitRoots(RootVisitor* visitor, pid_t thread_id)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700903 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers0399dde2012-06-06 17:09:28 -0700904
Mathieu Chartiere8a3c572016-10-11 16:52:17 -0700905 BaseHandleScope* GetTopHandleScope() {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700906 return tlsPtr_.top_handle_scope;
Ian Rogers1f539342012-10-03 21:09:42 -0700907 }
908
Mathieu Chartiere8a3c572016-10-11 16:52:17 -0700909 void PushHandleScope(BaseHandleScope* handle_scope) {
Ian Rogers59c07062014-10-10 13:03:39 -0700910 DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700911 tlsPtr_.top_handle_scope = handle_scope;
912 }
913
Mathieu Chartiere8a3c572016-10-11 16:52:17 -0700914 BaseHandleScope* PopHandleScope() {
915 BaseHandleScope* handle_scope = tlsPtr_.top_handle_scope;
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700916 DCHECK(handle_scope != nullptr);
917 tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
918 return handle_scope;
Ian Rogers1f539342012-10-03 21:09:42 -0700919 }
Brian Carlstrom40381fb2011-10-19 14:13:40 -0700920
Andreas Gampe542451c2016-07-26 09:02:02 -0700921 template<PointerSize pointer_size>
David Srbecky56de89a2018-10-01 15:32:20 +0100922 static constexpr ThreadOffset<pointer_size> TopHandleScopeOffset() {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700923 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
924 top_handle_scope));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700925 }
926
Alex Light55eccdf2019-10-07 13:51:13 +0000927 BaseReflectiveHandleScope* GetTopReflectiveHandleScope() {
928 return tlsPtr_.top_reflective_handle_scope;
929 }
930
931 void PushReflectiveHandleScope(BaseReflectiveHandleScope* scope) {
932 DCHECK_EQ(scope->GetLink(), tlsPtr_.top_reflective_handle_scope);
933 DCHECK_EQ(scope->GetThread(), this);
934 tlsPtr_.top_reflective_handle_scope = scope;
935 }
936
937 BaseReflectiveHandleScope* PopReflectiveHandleScope() {
938 BaseReflectiveHandleScope* handle_scope = tlsPtr_.top_reflective_handle_scope;
939 DCHECK(handle_scope != nullptr);
940 tlsPtr_.top_reflective_handle_scope = tlsPtr_.top_reflective_handle_scope->GetLink();
941 return handle_scope;
942 }
943
Ian Rogersdd7624d2014-03-14 17:43:00 -0700944 DebugInvokeReq* GetInvokeReq() const {
945 return tlsPtr_.debug_invoke_req;
Elliott Hughes475fc232011-10-25 15:00:35 -0700946 }
947
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +0100948 SingleStepControl* GetSingleStepControl() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700949 return tlsPtr_.single_step_control;
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +0100950 }
951
Sebastien Hertz1558b572015-02-25 15:05:59 +0100952 // Indicates whether this thread is ready to invoke a method for debugging. This
953 // is only true if the thread has been suspended by a debug event.
954 bool IsReadyForDebugInvoke() const {
955 return tls32_.ready_for_debug_invoke;
956 }
957
958 void SetReadyForDebugInvoke(bool ready) {
959 tls32_.ready_for_debug_invoke = ready;
960 }
961
Sebastien Hertz9d6bf692015-04-10 12:12:33 +0200962 bool IsDebugMethodEntry() const {
963 return tls32_.debug_method_entry_;
964 }
965
966 void SetDebugMethodEntry() {
967 tls32_.debug_method_entry_ = true;
968 }
969
970 void ClearDebugMethodEntry() {
971 tls32_.debug_method_entry_ = false;
972 }
973
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700974 bool GetIsGcMarking() const {
975 CHECK(kUseReadBarrier);
976 return tls32_.is_gc_marking;
977 }
978
Mathieu Chartierfe814e82016-11-09 14:32:49 -0800979 void SetIsGcMarkingAndUpdateEntrypoints(bool is_marking);
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700980
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700981 bool GetWeakRefAccessEnabled() const {
982 CHECK(kUseReadBarrier);
983 return tls32_.weak_ref_access_enabled;
984 }
985
986 void SetWeakRefAccessEnabled(bool enabled) {
987 CHECK(kUseReadBarrier);
988 tls32_.weak_ref_access_enabled = enabled;
989 }
990
Hiroshi Yamauchi20a0be02016-02-19 15:44:06 -0800991 uint32_t GetDisableThreadFlipCount() const {
992 CHECK(kUseReadBarrier);
993 return tls32_.disable_thread_flip_count;
994 }
995
996 void IncrementDisableThreadFlipCount() {
997 CHECK(kUseReadBarrier);
998 ++tls32_.disable_thread_flip_count;
999 }
1000
1001 void DecrementDisableThreadFlipCount() {
1002 CHECK(kUseReadBarrier);
1003 DCHECK_GT(tls32_.disable_thread_flip_count, 0U);
1004 --tls32_.disable_thread_flip_count;
1005 }
1006
Alex Light185a4612018-10-04 15:54:25 -07001007 // Returns true if the thread is a runtime thread (eg from a ThreadPool).
Alex Lighte9f61032018-09-24 16:04:51 -07001008 bool IsRuntimeThread() const {
1009 return is_runtime_thread_;
Calin Juravleccd56952016-12-15 17:57:38 +00001010 }
1011
Alex Lighte9f61032018-09-24 16:04:51 -07001012 void SetIsRuntimeThread(bool is_runtime_thread) {
1013 is_runtime_thread_ = is_runtime_thread;
Calin Juravleccd56952016-12-15 17:57:38 +00001014 }
1015
Orion Hodson01ecfa12019-07-18 12:57:47 +01001016 uint32_t CorePlatformApiCookie() {
1017 return core_platform_api_cookie_;
1018 }
1019
1020 void SetCorePlatformApiCookie(uint32_t cookie) {
1021 core_platform_api_cookie_ = cookie;
1022 }
1023
Alex Lighte9f61032018-09-24 16:04:51 -07001024 // Returns true if the thread is allowed to load java classes.
1025 bool CanLoadClasses() const;
1026
Sebastien Hertz597c4f02015-01-26 17:37:14 +01001027 // Activates single step control for debugging. The thread takes the
1028 // ownership of the given SingleStepControl*. It is deleted by a call
1029 // to DeactivateSingleStepControl or upon thread destruction.
1030 void ActivateSingleStepControl(SingleStepControl* ssc);
1031
1032 // Deactivates single step control for debugging.
1033 void DeactivateSingleStepControl();
1034
Sebastien Hertz1558b572015-02-25 15:05:59 +01001035 // Sets debug invoke request for debugging. When the thread is resumed,
Sebastien Hertzcbc50642015-06-01 17:33:12 +02001036 // it executes the method described by this request then sends the reply
1037 // before suspending itself. The thread takes the ownership of the given
1038 // DebugInvokeReq*. It is deleted by a call to ClearDebugInvokeReq.
Sebastien Hertz1558b572015-02-25 15:05:59 +01001039 void SetDebugInvokeReq(DebugInvokeReq* req);
1040
1041 // Clears debug invoke request for debugging. When the thread completes
Sebastien Hertzcbc50642015-06-01 17:33:12 +02001042 // method invocation, it deletes its debug invoke request and suspends
1043 // itself.
Sebastien Hertz1558b572015-02-25 15:05:59 +01001044 void ClearDebugInvokeReq();
Sebastien Hertz597c4f02015-01-26 17:37:14 +01001045
Sebastien Hertzfd3077e2014-04-23 10:32:43 +02001046 // Returns the fake exception used to activate deoptimization.
1047 static mirror::Throwable* GetDeoptimizationException() {
Mathieu Chartier0795f232016-09-27 18:43:30 -07001048 // Note that the mirror::Throwable must be aligned to kObjectAlignment or else it cannot be
1049 // represented by ObjPtr.
1050 return reinterpret_cast<mirror::Throwable*>(0x100);
Sebastien Hertzfd3077e2014-04-23 10:32:43 +02001051 }
1052
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -07001053 // Currently deoptimization invokes verifier which can trigger class loading
1054 // and execute Java code, so there might be nested deoptimizations happening.
1055 // We need to save the ongoing deoptimization shadow frames and return
1056 // values on stacks.
Nicolas Geoffray73be1e82015-09-17 15:22:56 +01001057 // 'from_code' denotes whether the deoptimization was explicitly made from
1058 // compiled code.
Mingyao Yang2ee17902017-08-30 11:37:08 -07001059 // 'method_type' contains info on whether deoptimization should advance
1060 // dex_pc.
Nicolas Geoffray73be1e82015-09-17 15:22:56 +01001061 void PushDeoptimizationContext(const JValue& return_value,
1062 bool is_reference,
Mingyao Yang2ee17902017-08-30 11:37:08 -07001063 ObjPtr<mirror::Throwable> exception,
Nicolas Geoffray73be1e82015-09-17 15:22:56 +01001064 bool from_code,
Mingyao Yang2ee17902017-08-30 11:37:08 -07001065 DeoptimizationMethodType method_type)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001066 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartierf5769e12017-01-10 15:54:41 -08001067 void PopDeoptimizationContext(JValue* result,
1068 ObjPtr<mirror::Throwable>* exception,
Mingyao Yang2ee17902017-08-30 11:37:08 -07001069 bool* from_code,
1070 DeoptimizationMethodType* method_type)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001071 REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertz07474662015-08-25 15:12:33 +00001072 void AssertHasDeoptimizationContext()
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001073 REQUIRES_SHARED(Locks::mutator_lock_);
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -07001074 void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type);
Andreas Gampe639bdd12015-06-03 11:22:45 -07001075 ShadowFrame* PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present = true);
Andreas Gampe2a0d4ec2014-06-02 22:05:22 -07001076
Mingyao Yang99170c62015-07-06 11:10:37 -07001077 // For debugger, find the shadow frame that corresponds to a frame id.
1078 // Or return null if there is none.
1079 ShadowFrame* FindDebuggerShadowFrame(size_t frame_id)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001080 REQUIRES_SHARED(Locks::mutator_lock_);
Mingyao Yang99170c62015-07-06 11:10:37 -07001081 // For debugger, find the bool array that keeps track of the updated vreg set
1082 // for a frame id.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001083 bool* GetUpdatedVRegFlags(size_t frame_id) REQUIRES_SHARED(Locks::mutator_lock_);
Mingyao Yang99170c62015-07-06 11:10:37 -07001084 // For debugger, find the shadow frame that corresponds to a frame id. If
1085 // one doesn't exist yet, create one and track it in frame_id_to_shadow_frame.
1086 ShadowFrame* FindOrCreateDebuggerShadowFrame(size_t frame_id,
1087 uint32_t num_vregs,
1088 ArtMethod* method,
1089 uint32_t dex_pc)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001090 REQUIRES_SHARED(Locks::mutator_lock_);
Mingyao Yang99170c62015-07-06 11:10:37 -07001091
1092 // Delete the entry that maps from frame_id to shadow_frame.
1093 void RemoveDebuggerShadowFrameMapping(size_t frame_id)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001094 REQUIRES_SHARED(Locks::mutator_lock_);
Mingyao Yang99170c62015-07-06 11:10:37 -07001095
Ian Rogers62d6c772013-02-27 08:32:07 -08001096 std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
Ian Rogersdd7624d2014-03-14 17:43:00 -07001097 return tlsPtr_.instrumentation_stack;
jeffhaoe343b762011-12-05 16:36:44 -08001098 }
1099
Mathieu Chartiere401d142015-04-22 13:56:20 -07001100 std::vector<ArtMethod*>* GetStackTraceSample() const {
Andreas Gampe2c19f5b2016-11-28 08:10:18 -08001101 DCHECK(!IsAotCompiler());
Nicolas Geoffray340dafa2016-11-18 16:03:10 +00001102 return tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample;
Jeff Hao5ce4b172013-08-16 16:27:18 -07001103 }
1104
Mathieu Chartiere401d142015-04-22 13:56:20 -07001105 void SetStackTraceSample(std::vector<ArtMethod*>* sample) {
Andreas Gampe2c19f5b2016-11-28 08:10:18 -08001106 DCHECK(!IsAotCompiler());
Nicolas Geoffray340dafa2016-11-18 16:03:10 +00001107 tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample = sample;
1108 }
1109
1110 verifier::VerifierDeps* GetVerifierDeps() const {
Andreas Gampe2c19f5b2016-11-28 08:10:18 -08001111 DCHECK(IsAotCompiler());
Nicolas Geoffray340dafa2016-11-18 16:03:10 +00001112 return tlsPtr_.deps_or_stack_trace_sample.verifier_deps;
1113 }
1114
1115 // It is the responsability of the caller to make sure the verifier_deps
1116 // entry in the thread is cleared before destruction of the actual VerifierDeps
1117 // object, or the thread.
1118 void SetVerifierDeps(verifier::VerifierDeps* verifier_deps) {
Andreas Gampe2c19f5b2016-11-28 08:10:18 -08001119 DCHECK(IsAotCompiler());
Nicolas Geoffraye424c932016-11-23 12:52:01 +00001120 DCHECK(verifier_deps == nullptr || tlsPtr_.deps_or_stack_trace_sample.verifier_deps == nullptr);
Nicolas Geoffray340dafa2016-11-18 16:03:10 +00001121 tlsPtr_.deps_or_stack_trace_sample.verifier_deps = verifier_deps;
Jeff Hao5ce4b172013-08-16 16:27:18 -07001122 }
1123
1124 uint64_t GetTraceClockBase() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -07001125 return tls64_.trace_clock_base;
Jeff Hao5ce4b172013-08-16 16:27:18 -07001126 }
1127
1128 void SetTraceClockBase(uint64_t clock_base) {
Ian Rogersdd7624d2014-03-14 17:43:00 -07001129 tls64_.trace_clock_base = clock_base;
Jeff Hao5ce4b172013-08-16 16:27:18 -07001130 }
1131
Ian Rogers81d425b2012-09-27 16:03:43 -07001132 BaseMutex* GetHeldMutex(LockLevel level) const {
Ian Rogersdd7624d2014-03-14 17:43:00 -07001133 return tlsPtr_.held_mutexes[level];
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001134 }
1135
Ian Rogers81d425b2012-09-27 16:03:43 -07001136 void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
Ian Rogersdd7624d2014-03-14 17:43:00 -07001137 tlsPtr_.held_mutexes[level] = mutex;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001138 }
Elliott Hughesffb465f2012-03-01 18:46:05 -08001139
Yu Lieac44242015-06-29 10:50:03 +08001140 void ClearSuspendBarrier(AtomicInteger* target)
Mathieu Chartier90443472015-07-16 20:32:27 -07001141 REQUIRES(Locks::thread_suspend_count_lock_);
Yu Lieac44242015-06-29 10:50:03 +08001142
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001143 bool ReadFlag(ThreadFlag flag) const {
Ian Rogersdd7624d2014-03-14 17:43:00 -07001144 return (tls32_.state_and_flags.as_struct.flags & flag) != 0;
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001145 }
1146
Jeff Hao9cec2472013-05-14 18:17:06 -07001147 bool TestAllFlags() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -07001148 return (tls32_.state_and_flags.as_struct.flags != 0);
Jeff Hao9cec2472013-05-14 18:17:06 -07001149 }
1150
Ian Rogers8c1b5f72014-07-09 22:02:36 -07001151 void AtomicSetFlag(ThreadFlag flag) {
Orion Hodson88591fe2018-03-06 13:35:43 +00001152 tls32_.state_and_flags.as_atomic_int.fetch_or(flag, std::memory_order_seq_cst);
Ian Rogers8c1b5f72014-07-09 22:02:36 -07001153 }
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001154
Ian Rogers8c1b5f72014-07-09 22:02:36 -07001155 void AtomicClearFlag(ThreadFlag flag) {
Orion Hodson88591fe2018-03-06 13:35:43 +00001156 tls32_.state_and_flags.as_atomic_int.fetch_and(-1 ^ flag, std::memory_order_seq_cst);
Ian Rogers8c1b5f72014-07-09 22:02:36 -07001157 }
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001158
David Srbecky28f6cff2018-10-16 15:07:28 +01001159 bool UseMterp() const {
1160 return tls32_.use_mterp.load();
1161 }
1162
Mathieu Chartier5ace2012016-11-30 10:15:41 -08001163 void ResetQuickAllocEntryPointsForThread(bool is_marking);
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -07001164
Ian Rogersdd7624d2014-03-14 17:43:00 -07001165 // Returns the remaining space in the TLAB.
Mathieu Chartier6bc77742017-04-18 17:46:23 -07001166 size_t TlabSize() const {
1167 return tlsPtr_.thread_local_end - tlsPtr_.thread_local_pos;
1168 }
1169
1170 // Returns the remaining space in the TLAB if we were to expand it to maximum capacity.
1171 size_t TlabRemainingCapacity() const {
1172 return tlsPtr_.thread_local_limit - tlsPtr_.thread_local_pos;
1173 }
1174
1175 // Expand the TLAB by a fixed number of bytes. There must be enough capacity to do so.
1176 void ExpandTlab(size_t bytes) {
1177 tlsPtr_.thread_local_end += bytes;
1178 DCHECK_LE(tlsPtr_.thread_local_end, tlsPtr_.thread_local_limit);
1179 }
1180
Ian Rogersdd7624d2014-03-14 17:43:00 -07001181 // Doesn't check that there is room.
1182 mirror::Object* AllocTlab(size_t bytes);
Mathieu Chartier6bc77742017-04-18 17:46:23 -07001183 void SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001184 bool HasTlab() const;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001185 uint8_t* GetTlabStart() {
1186 return tlsPtr_.thread_local_start;
1187 }
1188 uint8_t* GetTlabPos() {
1189 return tlsPtr_.thread_local_pos;
1190 }
Elliott Hughes5d96a712012-06-28 12:24:27 -07001191
Ian Rogersdd7624d2014-03-14 17:43:00 -07001192 // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
1193 // equal to a valid pointer.
1194 // TODO: does this need to atomic? I don't think so.
1195 void RemoveSuspendTrigger() {
1196 tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
1197 }
1198
1199 // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
1200 // The next time a suspend check is done, it will load from the value at this address
1201 // and trigger a SIGSEGV.
1202 void TriggerSuspend() {
1203 tlsPtr_.suspend_trigger = nullptr;
1204 }
1205
1206
1207 // Push an object onto the allocation stack.
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001208 bool PushOnThreadLocalAllocationStack(mirror::Object* obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001209 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001210
1211 // Set the thread local allocation pointers to the given pointers.
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001212 void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
1213 StackReference<mirror::Object>* end);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001214
1215 // Resets the thread local allocation pointers.
1216 void RevokeThreadLocalAllocationStack();
1217
1218 size_t GetThreadLocalBytesAllocated() const {
Mathieu Chartier14cc9be2014-07-11 10:26:37 -07001219 return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001220 }
1221
1222 size_t GetThreadLocalObjectsAllocated() const {
1223 return tlsPtr_.thread_local_objects;
1224 }
1225
Ian Rogersdd7624d2014-03-14 17:43:00 -07001226 void* GetRosAllocRun(size_t index) const {
1227 return tlsPtr_.rosalloc_runs[index];
1228 }
1229
1230 void SetRosAllocRun(size_t index, void* run) {
1231 tlsPtr_.rosalloc_runs[index] = run;
1232 }
1233
Andreas Gampe2c2d2a02016-03-17 21:27:19 -07001234 bool ProtectStack(bool fatal_on_error = true);
Dave Allison648d7112014-07-25 16:15:27 -07001235 bool UnprotectStack();
1236
buzbee1452bee2015-03-06 14:43:04 -08001237 void SetMterpCurrentIBase(void* ibase) {
1238 tlsPtr_.mterp_current_ibase = ibase;
1239 }
1240
buzbee1452bee2015-03-06 14:43:04 -08001241 const void* GetMterpCurrentIBase() const {
1242 return tlsPtr_.mterp_current_ibase;
1243 }
1244
Josh Gaoefd20cb2017-02-28 16:53:59 -08001245 bool HandlingSignal() const {
1246 return tls32_.handling_signal_;
Dave Allison648d7112014-07-25 16:15:27 -07001247 }
1248
Josh Gaoefd20cb2017-02-28 16:53:59 -08001249 void SetHandlingSignal(bool handling_signal) {
1250 tls32_.handling_signal_ = handling_signal;
Dave Allison8ce6b902014-08-26 11:07:58 -07001251 }
1252
Hiroshi Yamauchiee235822016-08-19 17:03:27 -07001253 bool IsTransitioningToRunnable() const {
1254 return tls32_.is_transitioning_to_runnable;
1255 }
1256
1257 void SetIsTransitioningToRunnable(bool value) {
1258 tls32_.is_transitioning_to_runnable = value;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001259 }
1260
Alex Light3dacdd62019-03-12 15:45:47 +00001261 uint32_t DecrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
1262 return --tls32_.force_interpreter_count;
1263 }
1264
1265 uint32_t IncrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
1266 return ++tls32_.force_interpreter_count;
1267 }
1268
1269 void SetForceInterpreterCount(uint32_t value) REQUIRES(Locks::thread_list_lock_) {
1270 tls32_.force_interpreter_count = value;
1271 }
1272
1273 uint32_t ForceInterpreterCount() const {
1274 return tls32_.force_interpreter_count;
1275 }
1276
1277 bool IsForceInterpreter() const {
1278 return tls32_.force_interpreter_count != 0;
1279 }
1280
Vladimir Markobf121912019-06-04 13:49:05 +01001281 bool IncrementMakeVisiblyInitializedCounter() {
1282 tls32_.make_visibly_initialized_counter += 1u;
1283 return tls32_.make_visibly_initialized_counter == kMakeVisiblyInitializedCounterTriggerCount;
1284 }
1285
1286 void ClearMakeVisiblyInitializedCounter() {
1287 tls32_.make_visibly_initialized_counter = 0u;
1288 }
1289
Mathieu Chartierd0ad2ee2015-03-31 14:59:59 -07001290 void PushVerifier(verifier::MethodVerifier* verifier);
1291 void PopVerifier(verifier::MethodVerifier* verifier);
Mathieu Chartier12d625f2015-03-13 11:33:37 -07001292
Jeff Hao848f70a2014-01-15 13:49:50 -08001293 void InitStringEntryPoints();
1294
Mathieu Chartierdfe02f62016-02-01 20:15:11 -08001295 void ModifyDebugDisallowReadBarrier(int8_t delta) {
1296 debug_disallow_read_barrier_ += delta;
1297 }
1298
1299 uint8_t GetDebugDisallowReadBarrierCount() const {
1300 return debug_disallow_read_barrier_;
1301 }
1302
Alex Light184f0752018-07-13 11:18:22 -07001303 // Gets the current TLSData associated with the key or nullptr if there isn't any. Note that users
1304 // do not gain ownership of TLSData and must synchronize with SetCustomTls themselves to prevent
1305 // it from being deleted.
1306 TLSData* GetCustomTLS(const char* key) REQUIRES(!Locks::custom_tls_lock_);
Andreas Gampef26bf2d2017-01-13 16:47:14 -08001307
Alex Light184f0752018-07-13 11:18:22 -07001308 // Sets the tls entry at 'key' to data. The thread takes ownership of the TLSData. The destructor
1309 // will be run when the thread exits or when SetCustomTLS is called again with the same key.
1310 void SetCustomTLS(const char* key, TLSData* data) REQUIRES(!Locks::custom_tls_lock_);
Andreas Gampef26bf2d2017-01-13 16:47:14 -08001311
Calin Juravle97cbc922016-04-15 16:16:35 +01001312 // Returns true if the current thread is the jit sensitive thread.
1313 bool IsJitSensitiveThread() const {
1314 return this == jit_sensitive_thread_;
1315 }
1316
Alex Lighte0b2ce42019-02-21 19:23:42 +00001317 bool IsSystemDaemon() const REQUIRES_SHARED(Locks::mutator_lock_);
1318
Calin Juravle97cbc922016-04-15 16:16:35 +01001319 // Returns true if StrictMode events are traced for the current thread.
Calin Juravleb2771b42016-04-07 17:09:25 +01001320 static bool IsSensitiveThread() {
1321 if (is_sensitive_thread_hook_ != nullptr) {
1322 return (*is_sensitive_thread_hook_)();
1323 }
1324 return false;
1325 }
1326
Mathieu Chartier3768ade2017-05-02 14:04:39 -07001327 // Set to the read barrier marking entrypoints to be non-null.
1328 void SetReadBarrierEntrypoints();
1329
Andreas Gampebad529d2017-02-13 18:52:10 -08001330 static jobject CreateCompileTimePeer(JNIEnv* env,
1331 const char* name,
1332 bool as_daemon,
1333 jobject thread_group)
1334 REQUIRES_SHARED(Locks::mutator_lock_);
1335
David Srbecky912f36c2018-09-08 12:22:58 +01001336 ALWAYS_INLINE InterpreterCache* GetInterpreterCache() {
1337 return &interpreter_cache_;
1338 }
1339
1340 // Clear all thread-local interpreter caches.
1341 //
1342 // Since the caches are keyed by memory pointer to dex instructions, this must be
1343 // called when any dex code is unloaded (before different code gets loaded at the
1344 // same memory location).
1345 //
1346 // If presence of cache entry implies some pre-conditions, this must also be
1347 // called if the pre-conditions might no longer hold true.
1348 static void ClearAllInterpreterCaches();
1349
1350 template<PointerSize pointer_size>
David Srbecky56de89a2018-10-01 15:32:20 +01001351 static constexpr ThreadOffset<pointer_size> InterpreterCacheOffset() {
David Srbecky912f36c2018-09-08 12:22:58 +01001352 return ThreadOffset<pointer_size>(OFFSETOF_MEMBER(Thread, interpreter_cache_));
1353 }
1354
David Srbecky56de89a2018-10-01 15:32:20 +01001355 static constexpr int InterpreterCacheSizeLog2() {
David Srbecky912f36c2018-09-08 12:22:58 +01001356 return WhichPowerOf2(InterpreterCache::kSize);
1357 }
1358
Ian Rogersdd7624d2014-03-14 17:43:00 -07001359 private:
Ian Rogers52673ff2012-06-27 23:25:34 -07001360 explicit Thread(bool daemon);
Mathieu Chartier90443472015-07-16 20:32:27 -07001361 ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
Elliott Hughesc0f09332012-03-26 13:27:06 -07001362 void Destroy();
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07001363
Alex Light4847a072019-12-12 16:13:47 -08001364 // Deletes and clears the tlsPtr_.jpeer field. Done in a way so that both it and opeer cannot be
1365 // observed to be set at the same time by instrumentation.
1366 void DeleteJPeer(JNIEnv* env);
1367
David Srbecky28f6cff2018-10-16 15:07:28 +01001368 void NotifyInTheadList()
1369 REQUIRES_SHARED(Locks::thread_list_lock_);
1370
Andreas Gampe732b0ac2017-01-18 15:23:39 -08001371 // Attaches the calling native thread to the runtime, returning the new native peer.
1372 // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
1373 template <typename PeerAction>
1374 static Thread* Attach(const char* thread_name,
1375 bool as_daemon,
1376 PeerAction p);
1377
Ian Rogers365c1022012-06-22 15:05:28 -07001378 void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
Elliott Hughes5fe594f2011-09-08 12:33:17 -07001379
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +01001380 template<bool kTransactionActive>
Andreas Gampebad529d2017-02-13 18:52:10 -08001381 static void InitPeer(ScopedObjectAccessAlreadyRunnable& soa,
1382 ObjPtr<mirror::Object> peer,
1383 jboolean thread_is_daemon,
1384 jobject thread_group,
1385 jobject thread_name,
1386 jint thread_priority)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001387 REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +01001388
Ian Rogers62d6c772013-02-27 08:32:07 -08001389 // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and
Hiroshi Yamauchi98810e32016-05-24 14:55:40 -07001390 // Dbg::ManageDeoptimization.
Ian Rogers474b6da2012-09-25 00:20:38 -07001391 ThreadState SetStateUnsafe(ThreadState new_state) {
1392 ThreadState old_state = GetState();
Mathieu Chartier8ac9c912015-10-01 15:58:41 -07001393 if (old_state == kRunnable && new_state != kRunnable) {
1394 // Need to run pending checkpoint and suspend barriers. Run checkpoints in runnable state in
1395 // case they need to use a ScopedObjectAccess. If we are holding the mutator lock and a SOA
1396 // attempts to TransitionFromSuspendedToRunnable, it results in a deadlock.
1397 TransitionToSuspendedAndRunCheckpoints(new_state);
1398 // Since we transitioned to a suspended state, check the pass barrier requests.
1399 PassActiveSuspendBarriers();
1400 } else {
1401 tls32_.state_and_flags.as_struct.state = new_state;
Yu Lieac44242015-06-29 10:50:03 +08001402 }
Ian Rogersc747cff2012-08-31 18:20:08 -07001403 return old_state;
1404 }
Ian Rogersc747cff2012-08-31 18:20:08 -07001405
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001406 void VerifyStackImpl() REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers04d7aa92013-03-16 14:29:17 -07001407
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001408 void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffraya73280d2016-02-15 13:05:16 +00001409 void DumpStack(std::ostream& os,
Nicolas Geoffray6ee49712018-03-30 14:39:05 +00001410 bool dump_native_stack = true,
Hiroshi Yamauchi13c16352017-01-31 10:15:08 -08001411 BacktraceMap* backtrace_map = nullptr,
1412 bool force_dump_stack = false) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001413 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughesd92bec42011-09-02 17:04:36 -07001414
Elliott Hughesaccd83d2011-10-17 14:25:58 -07001415 // Out-of-line conveniences for debugging in gdb.
Brian Carlstrom7934ac22013-07-26 10:54:15 -07001416 static Thread* CurrentFromGdb(); // Like Thread::Current.
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001417 // Like Thread::Dump(std::cerr).
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001418 void DumpFromGdb() const REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughesaccd83d2011-10-17 14:25:58 -07001419
Elliott Hughes93e74e82011-09-13 11:07:03 -07001420 static void* CreateCallback(void* arg);
1421
Andreas Gampe513061a2017-06-01 09:17:34 -07001422 void HandleUncaughtExceptions(ScopedObjectAccessAlreadyRunnable& soa)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001423 REQUIRES_SHARED(Locks::mutator_lock_);
Andreas Gampe513061a2017-06-01 09:17:34 -07001424 void RemoveFromThreadGroup(ScopedObjectAccessAlreadyRunnable& soa)
1425 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughesaccd83d2011-10-17 14:25:58 -07001426
Andreas Gampe449357d2015-06-01 22:29:51 -07001427 // Initialize a thread.
1428 //
1429 // The third parameter is not mandatory. If given, the thread will use this JNIEnvExt. In case
1430 // Init succeeds, this means the thread takes ownership of it. If Init fails, it is the caller's
1431 // responsibility to destroy the given JNIEnvExt. If the parameter is null, Init will try to
1432 // create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value
1433 // of false).
1434 bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr)
Mathieu Chartier90443472015-07-16 20:32:27 -07001435 REQUIRES(Locks::runtime_shutdown_lock_);
Ian Rogers5d76c432011-10-31 21:42:49 -07001436 void InitCardTable();
Ian Rogersb033c752011-07-20 12:22:35 -07001437 void InitCpu();
Alexei Zavjalov1efa0a92014-02-04 02:08:31 +07001438 void CleanupCpu();
Ian Rogers848871b2013-08-05 10:56:33 -07001439 void InitTlsEntryPoints();
Brian Carlstromcaabb1b2011-10-11 18:09:13 -07001440 void InitTid();
Brian Carlstromcaabb1b2011-10-11 18:09:13 -07001441 void InitPthreadKeySelf();
Ian Rogersf4d4da12014-11-11 16:10:33 -08001442 bool InitStackHwm();
Elliott Hughesbe759c62011-09-08 19:38:21 -07001443
Elliott Hughesd6a23bd2013-07-16 14:19:52 -07001444 void SetUpAlternateSignalStack();
1445 void TearDownAlternateSignalStack();
1446
Mathieu Chartier8ac9c912015-10-01 15:58:41 -07001447 ALWAYS_INLINE void TransitionToSuspendedAndRunCheckpoints(ThreadState new_state)
1448 REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
1449
1450 ALWAYS_INLINE void PassActiveSuspendBarriers()
1451 REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
1452
Calin Juravle97cbc922016-04-15 16:16:35 +01001453 // Registers the current thread as the jit sensitive thread. Should be called just once.
1454 static void SetJitSensitiveThread() {
1455 if (jit_sensitive_thread_ == nullptr) {
1456 jit_sensitive_thread_ = Thread::Current();
1457 } else {
1458 LOG(WARNING) << "Attempt to set the sensitive thread twice. Tid:"
1459 << Thread::Current()->GetTid();
1460 }
1461 }
1462
Calin Juravleb2771b42016-04-07 17:09:25 +01001463 static void SetSensitiveThreadHook(bool (*is_sensitive_thread_hook)()) {
1464 is_sensitive_thread_hook_ = is_sensitive_thread_hook;
1465 }
1466
Hiroshi Yamauchi02e7f1a2016-10-03 15:32:01 -07001467 bool ModifySuspendCountInternal(Thread* self,
1468 int delta,
1469 AtomicInteger* suspend_barrier,
Alex Light46f93402017-06-29 11:59:50 -07001470 SuspendReason reason)
Sebastien Hertz1c8f4ff2017-04-14 15:05:12 +02001471 WARN_UNUSED
Hiroshi Yamauchi02e7f1a2016-10-03 15:32:01 -07001472 REQUIRES(Locks::thread_suspend_count_lock_);
1473
Alex Lightdf00a1e2017-11-01 09:29:53 -07001474 // Runs a single checkpoint function. If there are no more pending checkpoint functions it will
1475 // clear the kCheckpointRequest flag. The caller is responsible for calling this in a loop until
1476 // the kCheckpointRequest flag is cleared.
Andreas Gampe0a855762016-10-26 13:43:14 -07001477 void RunCheckpointFunction();
Hiroshi Yamauchi30493242016-11-03 13:06:52 -07001478 void RunEmptyCheckpoint();
Andreas Gampe0a855762016-10-26 13:43:14 -07001479
1480 bool PassActiveSuspendBarriers(Thread* self)
1481 REQUIRES(!Locks::thread_suspend_count_lock_);
1482
1483 // Install the protected region for implicit stack checks.
1484 void InstallImplicitProtection();
1485
Andreas Gampe585da952016-12-02 14:52:29 -08001486 template <bool kPrecise>
1487 void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
1488
Nicolas Geoffraye3f775b2019-12-04 14:41:52 +00001489 void SweepInterpreterCache(IsMarkedVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
1490
Andreas Gampe2c19f5b2016-11-28 08:10:18 -08001491 static bool IsAotCompiler();
1492
Andreas Gampee5d23982019-01-08 10:34:26 -08001493 void ReleaseLongJumpContextInternal();
1494
Ian Rogers474b6da2012-09-25 00:20:38 -07001495 // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
1496 // change from being Suspended to Runnable without a suspend request occurring.
Chris Dearman59cde532013-12-04 18:53:49 -08001497 union PACKED(4) StateAndFlags {
1498 StateAndFlags() {}
Ian Rogersdf1ce912012-11-27 17:07:11 -08001499 struct PACKED(4) {
Ian Rogers30e173f2012-09-26 14:35:03 -07001500 // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
1501 // ThreadFlags for bit field meanings.
1502 volatile uint16_t flags;
1503 // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
1504 // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
1505 // operation. If a thread is suspended and a suspend_request is present, a thread may not
1506 // change to Runnable as a GC or other operation is in progress.
Ian Rogers01ae5802012-09-28 16:14:01 -07001507 volatile uint16_t state;
Ian Rogers30e173f2012-09-26 14:35:03 -07001508 } as_struct;
Ian Rogersb8e087e2014-07-09 21:12:06 -07001509 AtomicInteger as_atomic_int;
Ian Rogers01ae5802012-09-28 16:14:01 -07001510 volatile int32_t as_int;
Chris Dearman59cde532013-12-04 18:53:49 -08001511
1512 private:
1513 // gcc does not handle struct with volatile member assignments correctly.
1514 // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
1515 DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
Ian Rogers474b6da2012-09-25 00:20:38 -07001516 };
Andreas Gampe575e78c2014-11-03 23:41:03 -08001517 static_assert(sizeof(StateAndFlags) == sizeof(int32_t), "Weird state_and_flags size");
Ian Rogers474b6da2012-09-25 00:20:38 -07001518
Ian Rogersdd7624d2014-03-14 17:43:00 -07001519 static void ThreadExitCallback(void* arg);
Elliott Hughes5d96a712012-06-28 12:24:27 -07001520
Yu Lieac44242015-06-29 10:50:03 +08001521 // Maximum number of suspend barriers.
1522 static constexpr uint32_t kMaxSuspendBarriers = 3;
1523
Ian Rogersdd7624d2014-03-14 17:43:00 -07001524 // Has Thread::Startup been called?
1525 static bool is_started_;
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001526
Ian Rogersdd7624d2014-03-14 17:43:00 -07001527 // TLS key used to retrieve the Thread*.
1528 static pthread_key_t pthread_key_self_;
Ian Rogersa32a6fd2012-02-06 20:18:44 -08001529
Ian Rogersdd7624d2014-03-14 17:43:00 -07001530 // Used to notify threads that they should attempt to resume, they will suspend again if
1531 // their suspend count is > 0.
1532 static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
Dave Allisonb373e092014-02-20 16:06:36 -08001533
Calin Juravleb2771b42016-04-07 17:09:25 +01001534 // Hook passed by framework which returns true
1535 // when StrictMode events are traced for the current thread.
1536 static bool (*is_sensitive_thread_hook_)();
Calin Juravle97cbc922016-04-15 16:16:35 +01001537 // Stores the jit sensitive thread (which for now is the UI thread).
1538 static Thread* jit_sensitive_thread_;
Calin Juravleb2771b42016-04-07 17:09:25 +01001539
Vladimir Marko9f18fbc2019-07-31 15:06:12 +01001540 static constexpr uint32_t kMakeVisiblyInitializedCounterTriggerCount = 128;
Vladimir Markobf121912019-06-04 13:49:05 +01001541
Ian Rogersdd7624d2014-03-14 17:43:00 -07001542 /***********************************************************************************************/
1543 // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
1544 // pointer size differences. To encourage shorter encoding, more frequently used values appear
1545 // first if possible.
1546 /***********************************************************************************************/
Elliott Hughes6a607ad2012-07-13 20:40:00 -07001547
Zuo Wangf37a88b2014-07-10 04:26:41 -07001548 struct PACKED(4) tls_32bit_sized_values {
Ian Rogersdd7624d2014-03-14 17:43:00 -07001549 // We have no control over the size of 'bool', but want our boolean fields
1550 // to be 4-byte quantities.
1551 typedef uint32_t bool32_t;
Ian Rogers22f454c2012-09-08 11:06:29 -07001552
Vladimir Markobf121912019-06-04 13:49:05 +01001553 explicit tls_32bit_sized_values(bool is_daemon)
1554 : suspend_count(0),
1555 debug_suspend_count(0),
1556 thin_lock_thread_id(0),
1557 tid(0),
1558 daemon(is_daemon),
1559 throwing_OutOfMemoryError(false),
1560 no_thread_suspension(0),
1561 thread_exit_check_count(0),
1562 handling_signal_(false),
1563 is_transitioning_to_runnable(false),
1564 ready_for_debug_invoke(false),
1565 debug_method_entry_(false),
1566 is_gc_marking(false),
1567 weak_ref_access_enabled(true),
1568 disable_thread_flip_count(0),
1569 user_code_suspend_count(0),
1570 force_interpreter_count(0),
1571 use_mterp(0),
Alex Light270db1c2019-12-03 12:20:01 +00001572 make_visibly_initialized_counter(0),
1573 define_class_counter(0) {}
Dave Allisonb373e092014-02-20 16:06:36 -08001574
Ian Rogersdd7624d2014-03-14 17:43:00 -07001575 union StateAndFlags state_and_flags;
Andreas Gampe575e78c2014-11-03 23:41:03 -08001576 static_assert(sizeof(union StateAndFlags) == sizeof(int32_t),
1577 "Size of state_and_flags and int32 are different");
Dave Allisonb373e092014-02-20 16:06:36 -08001578
Ian Rogersdd7624d2014-03-14 17:43:00 -07001579 // A non-zero value is used to tell the current thread to enter a safe point
1580 // at the next poll.
1581 int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001582
Ian Rogersdd7624d2014-03-14 17:43:00 -07001583 // How much of 'suspend_count_' is by request of the debugger, used to set things right
1584 // when the debugger detaches. Must be <= suspend_count_.
1585 int debug_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001586
Ian Rogersdd7624d2014-03-14 17:43:00 -07001587 // Thin lock thread id. This is a small integer used by the thin lock implementation.
1588 // This is not to be confused with the native thread's tid, nor is it the value returned
1589 // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
1590 // important difference between this id and the ids visible to managed code is that these
1591 // ones get reused (to ensure that they fit in the number of bits available).
1592 uint32_t thin_lock_thread_id;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001593
Ian Rogersdd7624d2014-03-14 17:43:00 -07001594 // System thread id.
1595 uint32_t tid;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001596
Ian Rogersdd7624d2014-03-14 17:43:00 -07001597 // Is the thread a daemon?
1598 const bool32_t daemon;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001599
Ian Rogersdd7624d2014-03-14 17:43:00 -07001600 // A boolean telling us whether we're recursively throwing OOME.
1601 bool32_t throwing_OutOfMemoryError;
1602
1603 // A positive value implies we're in a region where thread suspension isn't expected.
1604 uint32_t no_thread_suspension;
1605
1606 // How many times has our pthread key's destructor been called?
1607 uint32_t thread_exit_check_count;
Sebastien Hertz9f102032014-05-23 08:59:42 +02001608
Dave Allison648d7112014-07-25 16:15:27 -07001609 // True if signal is being handled by this thread.
1610 bool32_t handling_signal_;
1611
Hiroshi Yamauchiee235822016-08-19 17:03:27 -07001612 // True if the thread is in TransitionFromSuspendedToRunnable(). This is used to distinguish the
1613 // non-runnable threads (eg. kNative, kWaiting) that are about to transition to runnable from
1614 // the rest of them.
1615 bool32_t is_transitioning_to_runnable;
Sebastien Hertz1558b572015-02-25 15:05:59 +01001616
1617 // True if the thread has been suspended by a debugger event. This is
1618 // used to invoke method from the debugger which is only allowed when
1619 // the thread is suspended by an event.
1620 bool32_t ready_for_debug_invoke;
Sebastien Hertz9d6bf692015-04-10 12:12:33 +02001621
1622 // True if the thread enters a method. This is used to detect method entry
1623 // event for the debugger.
1624 bool32_t debug_method_entry_;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001625
Hiroshi Yamauchi00370822015-08-18 14:47:25 -07001626 // True if the GC is in the marking phase. This is used for the CC collector only. This is
1627 // thread local so that we can simplify the logic to check for the fast path of read barriers of
1628 // GC roots.
1629 bool32_t is_gc_marking;
1630
Nicolas Geoffray365719c2017-03-08 13:11:50 +00001631 // Thread "interrupted" status; stays raised until queried or thrown.
1632 Atomic<bool32_t> interrupted;
1633
Charles Mungeraa31f492018-11-01 18:57:38 +00001634 AtomicInteger park_state_;
1635
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001636 // True if the thread is allowed to access a weak ref (Reference::GetReferent() and system
1637 // weaks) and to potentially mark an object alive/gray. This is used for concurrent reference
1638 // processing of the CC collector only. This is thread local so that we can enable/disable weak
1639 // ref access by using a checkpoint and avoid a race around the time weak ref access gets
1640 // disabled and concurrent reference processing begins (if weak ref access is disabled during a
1641 // pause, this is not an issue.) Other collectors use Runtime::DisallowNewSystemWeaks() and
1642 // ReferenceProcessor::EnableSlowPath().
1643 bool32_t weak_ref_access_enabled;
Hiroshi Yamauchi20a0be02016-02-19 15:44:06 -08001644
1645 // A thread local version of Heap::disable_thread_flip_count_. This keeps track of how many
1646 // levels of (nested) JNI critical sections the thread is in and is used to detect a nested JNI
1647 // critical section enter.
1648 uint32_t disable_thread_flip_count;
Alex Light88fd7202017-06-30 08:31:59 -07001649
Alex Lightcea42152018-09-18 22:51:55 +00001650 // How much of 'suspend_count_' is by request of user code, used to distinguish threads
1651 // suspended by the runtime from those suspended by user code.
Alex Light88fd7202017-06-30 08:31:59 -07001652 // This should have GUARDED_BY(Locks::user_code_suspension_lock_) but auto analysis cannot be
1653 // told that AssertHeld should be good enough.
1654 int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
David Srbecky28f6cff2018-10-16 15:07:28 +01001655
Alex Light3dacdd62019-03-12 15:45:47 +00001656 // Count of how many times this thread has been forced to interpreter. If this is not 0 the
1657 // thread must remain in interpreted code as much as possible.
1658 uint32_t force_interpreter_count;
1659
David Srbecky28f6cff2018-10-16 15:07:28 +01001660 // True if everything is in the ideal state for fast interpretation.
1661 // False if we need to switch to the C++ interpreter to handle special cases.
1662 std::atomic<bool32_t> use_mterp;
Vladimir Markobf121912019-06-04 13:49:05 +01001663
1664 // Counter for calls to initialize a class that's initialized but not visibly initialized.
1665 // When this reaches kMakeVisiblyInitializedCounterTriggerCount, we call the runtime to
1666 // make initialized classes visibly initialized. This is needed because we usually make
1667 // classes visibly initialized in batches but we do not want to be stuck with a class
1668 // initialized but not visibly initialized for a long time even if no more classes are
1669 // being initialized anymore.
1670 uint32_t make_visibly_initialized_counter;
Alex Light270db1c2019-12-03 12:20:01 +00001671
1672 // Counter for how many nested define-classes are ongoing in this thread. Used to allow waiting
1673 // for threads to be done with class-definition work.
1674 uint32_t define_class_counter;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001675 } tls32_;
1676
1677 struct PACKED(8) tls_64bit_sized_values {
Sebastien Hertz07474662015-08-25 15:12:33 +00001678 tls_64bit_sized_values() : trace_clock_base(0) {
Ian Rogersdd7624d2014-03-14 17:43:00 -07001679 }
1680
1681 // The clock base used for tracing.
1682 uint64_t trace_clock_base;
1683
Ian Rogersdd7624d2014-03-14 17:43:00 -07001684 RuntimeStats stats;
1685 } tls64_;
1686
Andreas Gampe6aa13702015-10-28 10:57:25 -07001687 struct PACKED(sizeof(void*)) tls_ptr_sized_values {
Ian Rogersdd7624d2014-03-14 17:43:00 -07001688 tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr),
Andreas Gampe449357d2015-06-01 22:29:51 -07001689 managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), tmp_jni_env(nullptr),
1690 self(nullptr), opeer(nullptr), jpeer(nullptr), stack_begin(nullptr), stack_size(0),
Nicolas Geoffray340dafa2016-11-18 16:03:10 +00001691 deps_or_stack_trace_sample(), wait_next(nullptr), monitor_enter_object(nullptr),
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001692 top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
Ian Rogersdd7624d2014-03-14 17:43:00 -07001693 instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
Sebastien Hertz07474662015-08-25 15:12:33 +00001694 stacked_shadow_frame_record(nullptr), deoptimization_context_stack(nullptr),
Mingyao Yang99170c62015-07-06 11:10:37 -07001695 frame_id_to_shadow_frame(nullptr), name(nullptr), pthread_self(0),
Vladimir Marko05846472016-09-14 12:49:57 +01001696 last_no_thread_suspension_cause(nullptr), checkpoint_function(nullptr),
Roland Levillaine71b3542017-01-16 14:58:23 +00001697 thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr),
Mathieu Chartier6bc77742017-04-18 17:46:23 -07001698 thread_local_limit(nullptr),
David Srbecky776f3f72018-10-15 18:03:55 +01001699 thread_local_objects(0), mterp_current_ibase(nullptr), thread_local_alloc_stack_top(nullptr),
Josh Gaoefd20cb2017-02-28 16:53:59 -08001700 thread_local_alloc_stack_end(nullptr),
Alex Light848574c2017-09-25 16:59:39 -07001701 flip_function(nullptr), method_verifier(nullptr), thread_local_mark_stack(nullptr),
Alex Light55eccdf2019-10-07 13:51:13 +00001702 async_exception(nullptr), top_reflective_handle_scope(nullptr) {
Mathieu Chartier12d625f2015-03-13 11:33:37 -07001703 std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001704 }
1705
1706 // The biased card table, see CardTable for details.
Ian Rogers13735952014-10-08 12:43:28 -07001707 uint8_t* card_table;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001708
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001709 // The pending exception or null.
Ian Rogersdd7624d2014-03-14 17:43:00 -07001710 mirror::Throwable* exception;
1711
1712 // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
1713 // We leave extra space so there's room for the code that throws StackOverflowError.
Ian Rogers13735952014-10-08 12:43:28 -07001714 uint8_t* stack_end;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001715
1716 // The top of the managed stack often manipulated directly by compiler generated code.
1717 ManagedStack managed_stack;
1718
1719 // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check. It is
1720 // normally set to the address of itself.
1721 uintptr_t* suspend_trigger;
1722
1723 // Every thread may have an associated JNI environment
1724 JNIEnvExt* jni_env;
1725
Andreas Gampe449357d2015-06-01 22:29:51 -07001726 // Temporary storage to transfer a pre-allocated JNIEnvExt from the creating thread to the
1727 // created thread.
1728 JNIEnvExt* tmp_jni_env;
1729
Ian Rogersdd7624d2014-03-14 17:43:00 -07001730 // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
1731 // is easy but getting the address of Thread::Current is hard. This field can be read off of
1732 // Thread::Current to give the address.
1733 Thread* self;
1734
1735 // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
1736 // start up, until the thread is registered and the local opeer_ is used.
1737 mirror::Object* opeer;
1738 jobject jpeer;
1739
1740 // The "lowest addressable byte" of the stack.
Ian Rogers13735952014-10-08 12:43:28 -07001741 uint8_t* stack_begin;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001742
1743 // Size of the stack.
1744 size_t stack_size;
1745
Nicolas Geoffray340dafa2016-11-18 16:03:10 +00001746 // Sampling profiler and AOT verification cannot happen on the same run, so we share
1747 // the same entry for the stack trace and the verifier deps.
1748 union DepsOrStackTraceSample {
1749 DepsOrStackTraceSample() {
1750 verifier_deps = nullptr;
1751 stack_trace_sample = nullptr;
1752 }
1753 // Pointer to previous stack trace captured by sampling profiler.
1754 std::vector<ArtMethod*>* stack_trace_sample;
1755 // When doing AOT verification, per-thread VerifierDeps.
1756 verifier::VerifierDeps* verifier_deps;
1757 } deps_or_stack_trace_sample;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001758
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001759 // The next thread in the wait set this thread is part of or null if not waiting.
Ian Rogersdd7624d2014-03-14 17:43:00 -07001760 Thread* wait_next;
1761
1762 // If we're blocked in MonitorEnter, this is the object we're trying to lock.
1763 mirror::Object* monitor_enter_object;
1764
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001765 // Top of linked list of handle scopes or null for none.
Mathieu Chartiere8a3c572016-10-11 16:52:17 -07001766 BaseHandleScope* top_handle_scope;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001767
1768 // Needed to get the right ClassLoader in JNI_OnLoad, but also
1769 // useful for testing.
Ian Rogers68d8b422014-07-17 11:09:10 -07001770 jobject class_loader_override;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001771
1772 // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
1773 Context* long_jump_context;
1774
1775 // Additional stack used by method instrumentation to store method and return pc values.
1776 // Stored as a pointer since std::deque is not PACKED.
1777 std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack;
1778
1779 // JDWP invoke-during-breakpoint support.
1780 DebugInvokeReq* debug_invoke_req;
1781
1782 // JDWP single-stepping support.
1783 SingleStepControl* single_step_control;
1784
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -07001785 // For gc purpose, a shadow frame record stack that keeps track of:
1786 // 1) shadow frames under construction.
1787 // 2) deoptimization shadow frames.
1788 StackedShadowFrameRecord* stacked_shadow_frame_record;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001789
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -07001790 // Deoptimization return value record stack.
Sebastien Hertz07474662015-08-25 15:12:33 +00001791 DeoptimizationContextRecord* deoptimization_context_stack;
Andreas Gampe2a0d4ec2014-06-02 22:05:22 -07001792
Mingyao Yang99170c62015-07-06 11:10:37 -07001793 // For debugger, a linked list that keeps the mapping from frame_id to shadow frame.
1794 // Shadow frames may be created before deoptimization happens so that the debugger can
1795 // set local values there first.
1796 FrameIdToShadowFrame* frame_id_to_shadow_frame;
1797
Ian Rogersdd7624d2014-03-14 17:43:00 -07001798 // A cached copy of the java.lang.Thread's name.
1799 std::string* name;
1800
1801 // A cached pthread_t for the pthread underlying this Thread*.
1802 pthread_t pthread_self;
1803
Ian Rogersdd7624d2014-03-14 17:43:00 -07001804 // If no_thread_suspension_ is > 0, what is causing that assertion.
1805 const char* last_no_thread_suspension_cause;
1806
Mathieu Chartier952e1e32016-06-13 14:04:02 -07001807 // Pending checkpoint function or null if non-pending. If this checkpoint is set and someone\
1808 // requests another checkpoint, it goes to the checkpoint overflow list.
1809 Closure* checkpoint_function GUARDED_BY(Locks::thread_suspend_count_lock_);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001810
Yu Lieac44242015-06-29 10:50:03 +08001811 // Pending barriers that require passing or NULL if non-pending. Installation guarding by
1812 // Locks::thread_suspend_count_lock_.
1813 // They work effectively as art::Barrier, but implemented directly using AtomicInteger and futex
1814 // to avoid additional cost of a mutex and a condition variable, as used in art::Barrier.
1815 AtomicInteger* active_suspend_barriers[kMaxSuspendBarriers];
1816
Roland Levillaine71b3542017-01-16 14:58:23 +00001817 // Thread-local allocation pointer. Moved here to force alignment for thread_local_pos on ARM.
1818 uint8_t* thread_local_start;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001819
Hiroshi Yamauchi7e1ce282015-12-11 15:46:19 -08001820 // thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
1821 // potentially better performance.
Ian Rogers13735952014-10-08 12:43:28 -07001822 uint8_t* thread_local_pos;
1823 uint8_t* thread_local_end;
Igor Murashkinaf1e2992016-10-12 17:44:50 -07001824
Mathieu Chartier6bc77742017-04-18 17:46:23 -07001825 // Thread local limit is how much we can expand the thread local buffer to, it is greater or
1826 // equal to thread_local_end.
1827 uint8_t* thread_local_limit;
1828
Vladimir Marko05846472016-09-14 12:49:57 +01001829 size_t thread_local_objects;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001830
Roland Levillaine71b3542017-01-16 14:58:23 +00001831 // Entrypoint function pointers.
1832 // TODO: move this to more of a global offset table model to avoid per-thread duplication.
1833 JniEntryPoints jni_entrypoints;
1834 QuickEntryPoints quick_entrypoints;
1835
David Srbecky776f3f72018-10-15 18:03:55 +01001836 // Mterp jump table base.
buzbee1452bee2015-03-06 14:43:04 -08001837 void* mterp_current_ibase;
buzbee1452bee2015-03-06 14:43:04 -08001838
Mathieu Chartier0651d412014-04-29 14:37:57 -07001839 // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001840 void* rosalloc_runs[kNumRosAllocThreadLocalSizeBracketsInThread];
Ian Rogersdd7624d2014-03-14 17:43:00 -07001841
1842 // Thread-local allocation stack data/routines.
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001843 StackReference<mirror::Object>* thread_local_alloc_stack_top;
1844 StackReference<mirror::Object>* thread_local_alloc_stack_end;
Chao-ying Fu9e369312014-05-21 11:20:52 -07001845
1846 // Support for Mutex lock hierarchy bug detection.
1847 BaseMutex* held_mutexes[kLockLevelCount];
Dave Allison8ce6b902014-08-26 11:07:58 -07001848
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001849 // The function used for thread flip.
1850 Closure* flip_function;
Mathieu Chartier12d625f2015-03-13 11:33:37 -07001851
1852 // Current method verifier, used for root marking.
1853 verifier::MethodVerifier* method_verifier;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001854
1855 // Thread-local mark stack for the concurrent copying collector.
1856 gc::accounting::AtomicStack<mirror::Object>* thread_local_mark_stack;
Alex Light848574c2017-09-25 16:59:39 -07001857
1858 // The pending async-exception or null.
1859 mirror::Throwable* async_exception;
Alex Light55eccdf2019-10-07 13:51:13 +00001860
1861 // Top of the linked-list for reflective-handle scopes or null if none.
1862 BaseReflectiveHandleScope* top_reflective_handle_scope;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001863 } tlsPtr_;
1864
Alex Lightbb68fda2018-10-01 13:21:47 -07001865 // Small thread-local cache to be used from the interpreter.
1866 // It is keyed by dex instruction pointer.
1867 // The value is opcode-depended (e.g. field offset).
1868 InterpreterCache interpreter_cache_;
1869
1870 // All fields below this line should not be accessed by native code. This means these fields can
1871 // be modified, rearranged, added or removed without having to modify asm_support.h
1872
Nicolas Geoffray365719c2017-03-08 13:11:50 +00001873 // Guards the 'wait_monitor_' members.
Ian Rogersdd7624d2014-03-14 17:43:00 -07001874 Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1875
1876 // Condition variable waited upon during a wait.
1877 ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001878 // Pointer to the monitor lock we're currently waiting on or null if not waiting.
Ian Rogersdd7624d2014-03-14 17:43:00 -07001879 Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
1880
Mathieu Chartierdfe02f62016-02-01 20:15:11 -08001881 // Debug disable read barrier count, only is checked for debug builds and only in the runtime.
1882 uint8_t debug_disallow_read_barrier_ = 0;
1883
Mathieu Chartier3f7f03c2016-09-26 11:39:52 -07001884 // Note that it is not in the packed struct, may not be accessed for cross compilation.
1885 uintptr_t poison_object_cookie_ = 0;
1886
Mathieu Chartier952e1e32016-06-13 14:04:02 -07001887 // Pending extra checkpoints if checkpoint_function_ is already used.
1888 std::list<Closure*> checkpoint_overflow_ GUARDED_BY(Locks::thread_suspend_count_lock_);
1889
Alex Light184f0752018-07-13 11:18:22 -07001890 // Custom TLS field that can be used by plugins or the runtime. Should not be accessed directly by
1891 // compiled code or entrypoints.
1892 SafeMap<std::string, std::unique_ptr<TLSData>> custom_tls_ GUARDED_BY(Locks::custom_tls_lock_);
Andreas Gampef26bf2d2017-01-13 16:47:14 -08001893
Andreas Gampe82372002019-07-24 15:42:09 -07001894#ifndef __BIONIC__
Andreas Gampe2be43062019-07-24 16:52:33 -07001895 __attribute__((tls_model("initial-exec")))
Andreas Gampe82372002019-07-24 15:42:09 -07001896 static thread_local Thread* self_tls_;
1897#endif
1898
Alex Lighte9f61032018-09-24 16:04:51 -07001899 // True if the thread is some form of runtime thread (ex, GC or JIT).
1900 bool is_runtime_thread_;
Calin Juravleccd56952016-12-15 17:57:38 +00001901
Orion Hodson01ecfa12019-07-18 12:57:47 +01001902 // Set during execution of JNI methods that get field and method id's as part of determining if
1903 // the caller is allowed to access all fields and methods in the Core Platform API.
1904 uint32_t core_platform_api_cookie_ = 0;
1905
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001906 friend class Dbg; // For SetStateUnsafe.
Mathieu Chartier15d34022014-02-26 17:16:38 -08001907 friend class gc::collector::SemiSpace; // For getting stack traces.
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001908 friend class Runtime; // For CreatePeer.
Ian Rogers5cf98192014-05-29 21:31:50 -07001909 friend class QuickExceptionHandler; // For dumping the stack.
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001910 friend class ScopedThreadStateChange;
Mathieu Chartier119c6bd2014-05-09 14:11:47 -07001911 friend class StubTest; // For accessing entrypoints.
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001912 friend class ThreadList; // For ~Thread and Destroy.
1913
Andreas Gampe4352b452014-06-04 18:59:01 -07001914 friend class EntrypointsOrderTest; // To test the order of tls entries.
1915
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07001916 DISALLOW_COPY_AND_ASSIGN(Thread);
1917};
Ian Rogersbdb03912011-09-14 00:55:44 -07001918
Mathieu Chartier4e2cb092015-07-22 16:17:51 -07001919class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension {
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -07001920 public:
Mingyao Yangf26828b2017-07-27 12:49:01 -07001921 ALWAYS_INLINE ScopedAssertNoThreadSuspension(const char* cause,
1922 bool enabled = true)
1923 ACQUIRE(Roles::uninterruptible_)
1924 : enabled_(enabled) {
1925 if (!enabled_) {
1926 return;
1927 }
Mathieu Chartier268764d2016-09-13 12:09:38 -07001928 if (kIsDebugBuild) {
1929 self_ = Thread::Current();
1930 old_cause_ = self_->StartAssertNoThreadSuspension(cause);
1931 } else {
1932 Roles::uninterruptible_.Acquire(); // No-op.
1933 }
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -07001934 }
Mathieu Chartier268764d2016-09-13 12:09:38 -07001935 ALWAYS_INLINE ~ScopedAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) {
Mingyao Yangf26828b2017-07-27 12:49:01 -07001936 if (!enabled_) {
1937 return;
1938 }
Mathieu Chartier268764d2016-09-13 12:09:38 -07001939 if (kIsDebugBuild) {
1940 self_->EndAssertNoThreadSuspension(old_cause_);
1941 } else {
1942 Roles::uninterruptible_.Release(); // No-op.
1943 }
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -07001944 }
1945
1946 private:
Mathieu Chartier268764d2016-09-13 12:09:38 -07001947 Thread* self_;
Mingyao Yangf26828b2017-07-27 12:49:01 -07001948 const bool enabled_;
Mathieu Chartier268764d2016-09-13 12:09:38 -07001949 const char* old_cause_;
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -07001950};
1951
Mathieu Chartierdc540df2019-11-15 17:11:44 -08001952class ScopedAllowThreadSuspension {
1953 public:
1954 ALWAYS_INLINE ScopedAllowThreadSuspension() RELEASE(Roles::uninterruptible_) {
1955 if (kIsDebugBuild) {
1956 self_ = Thread::Current();
1957 old_cause_ = self_->EndAssertNoThreadSuspension();
1958 } else {
1959 Roles::uninterruptible_.Release(); // No-op.
1960 }
1961 }
1962 ALWAYS_INLINE ~ScopedAllowThreadSuspension() ACQUIRE(Roles::uninterruptible_) {
1963 if (kIsDebugBuild) {
1964 CHECK(self_->StartAssertNoThreadSuspension(old_cause_) == nullptr);
1965 } else {
1966 Roles::uninterruptible_.Acquire(); // No-op.
1967 }
1968 }
1969
1970 private:
1971 Thread* self_;
1972 const char* old_cause_;
1973};
1974
1975
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -07001976class ScopedStackedShadowFramePusher {
1977 public:
1978 ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf, StackedShadowFrameType type)
1979 : self_(self), type_(type) {
1980 self_->PushStackedShadowFrame(sf, type);
1981 }
1982 ~ScopedStackedShadowFramePusher() {
1983 self_->PopStackedShadowFrame(type_);
1984 }
1985
1986 private:
1987 Thread* const self_;
1988 const StackedShadowFrameType type_;
1989
1990 DISALLOW_COPY_AND_ASSIGN(ScopedStackedShadowFramePusher);
1991};
1992
Mathieu Chartierdfe02f62016-02-01 20:15:11 -08001993// Only works for debug builds.
1994class ScopedDebugDisallowReadBarriers {
1995 public:
1996 explicit ScopedDebugDisallowReadBarriers(Thread* self) : self_(self) {
1997 self_->ModifyDebugDisallowReadBarrier(1);
1998 }
1999 ~ScopedDebugDisallowReadBarriers() {
2000 self_->ModifyDebugDisallowReadBarrier(-1);
2001 }
2002
2003 private:
2004 Thread* const self_;
2005};
2006
Hiroshi Yamauchiee235822016-08-19 17:03:27 -07002007class ScopedTransitioningToRunnable : public ValueObject {
2008 public:
2009 explicit ScopedTransitioningToRunnable(Thread* self)
2010 : self_(self) {
2011 DCHECK_EQ(self, Thread::Current());
2012 if (kUseReadBarrier) {
2013 self_->SetIsTransitioningToRunnable(true);
2014 }
2015 }
2016
2017 ~ScopedTransitioningToRunnable() {
2018 if (kUseReadBarrier) {
2019 self_->SetIsTransitioningToRunnable(false);
2020 }
2021 }
2022
2023 private:
2024 Thread* const self_;
2025};
2026
Andreas Gampe04bbb5b2017-01-19 17:49:03 +00002027class ThreadLifecycleCallback {
2028 public:
2029 virtual ~ThreadLifecycleCallback() {}
2030
2031 virtual void ThreadStart(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
2032 virtual void ThreadDeath(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
2033};
2034
Alex Lightb7c640d2019-03-20 15:52:13 -07002035// Store an exception from the thread and suppress it for the duration of this object.
2036class ScopedExceptionStorage {
2037 public:
2038 explicit ScopedExceptionStorage(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
Alex Light79d6c802019-06-27 15:50:11 +00002039 void SuppressOldException(const char* message = "") REQUIRES_SHARED(Locks::mutator_lock_);
Alex Lightb7c640d2019-03-20 15:52:13 -07002040 ~ScopedExceptionStorage() REQUIRES_SHARED(Locks::mutator_lock_);
2041
2042 private:
2043 Thread* self_;
2044 StackHandleScope<1> hs_;
Alex Light79d6c802019-06-27 15:50:11 +00002045 MutableHandle<mirror::Throwable> excp_;
Alex Lightb7c640d2019-03-20 15:52:13 -07002046};
2047
Elliott Hughes330304d2011-08-12 14:28:05 -07002048std::ostream& operator<<(std::ostream& os, const Thread& thread);
Sebastien Hertzf7958692015-06-09 14:09:14 +02002049std::ostream& operator<<(std::ostream& os, const StackedShadowFrameType& thread);
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07002050
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07002051} // namespace art
2052
Brian Carlstromfc0e3212013-07-17 14:40:12 -07002053#endif // ART_RUNTIME_THREAD_H_