Elliott Hughes | 8d768a9 | 2011-09-14 16:35:25 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 16 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 17 | #ifndef ART_RUNTIME_THREAD_H_ |
| 18 | #define ART_RUNTIME_THREAD_H_ |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 19 | |
Andreas Gampe | 8cf9cb3 | 2017-07-19 09:28:38 -0700 | [diff] [blame] | 20 | #include <setjmp.h> |
| 21 | |
Elliott Hughes | 02b48d1 | 2011-09-07 17:15:51 -0700 | [diff] [blame] | 22 | #include <bitset> |
Ian Rogers | 306057f | 2012-11-26 12:45:53 -0800 | [diff] [blame] | 23 | #include <deque> |
Elliott Hughes | a095764 | 2011-09-02 14:27:33 -0700 | [diff] [blame] | 24 | #include <iosfwd> |
Ian Rogers | b033c75 | 2011-07-20 12:22:35 -0700 | [diff] [blame] | 25 | #include <list> |
Ian Rogers | 700a402 | 2014-05-19 16:49:03 -0700 | [diff] [blame] | 26 | #include <memory> |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 27 | #include <string> |
Carl Shapiro | b557353 | 2011-07-12 18:22:59 -0700 | [diff] [blame] | 28 | |
Mingyao Yang | 4dcfc43 | 2015-04-21 16:55:22 -0700 | [diff] [blame] | 29 | #include "arch/context.h" |
Ian Rogers | d582fa4 | 2014-11-05 23:46:43 -0800 | [diff] [blame] | 30 | #include "arch/instruction_set.h" |
David Sehr | c431b9d | 2018-03-02 12:01:51 -0800 | [diff] [blame^] | 31 | #include "base/atomic.h" |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 32 | #include "base/enums.h" |
Elliott Hughes | 7616005 | 2012-12-12 16:31:20 -0800 | [diff] [blame] | 33 | #include "base/macros.h" |
Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 34 | #include "base/mutex.h" |
Ian Rogers | 848871b | 2013-08-05 10:56:33 -0700 | [diff] [blame] | 35 | #include "entrypoints/jni/jni_entrypoints.h" |
Ian Rogers | 7655f29 | 2013-07-29 11:07:13 -0700 | [diff] [blame] | 36 | #include "entrypoints/quick/quick_entrypoints.h" |
Brian Carlstrom | 578bbdc | 2011-07-21 14:07:47 -0700 | [diff] [blame] | 37 | #include "globals.h" |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 38 | #include "handle_scope.h" |
Elliott Hughes | 956af0f | 2014-12-11 14:34:28 -0800 | [diff] [blame] | 39 | #include "instrumentation.h" |
Ian Rogers | 306057f | 2012-11-26 12:45:53 -0800 | [diff] [blame] | 40 | #include "jvalue.h" |
Andreas Gampe | 513061a | 2017-06-01 09:17:34 -0700 | [diff] [blame] | 41 | #include "managed_stack.h" |
Brian Carlstrom | 578bbdc | 2011-07-21 14:07:47 -0700 | [diff] [blame] | 42 | #include "offsets.h" |
Andreas Gampe | 217488a | 2017-09-18 08:34:42 -0700 | [diff] [blame] | 43 | #include "read_barrier_config.h" |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 44 | #include "runtime_stats.h" |
Alex Light | 46f9340 | 2017-06-29 11:59:50 -0700 | [diff] [blame] | 45 | #include "suspend_reason.h" |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 46 | #include "thread_state.h" |
Ian Rogers | b033c75 | 2011-07-20 12:22:35 -0700 | [diff] [blame] | 47 | |
Christopher Ferris | 6cff48f | 2014-01-26 21:36:13 -0800 | [diff] [blame] | 48 | class BacktraceMap; |
| 49 | |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 50 | namespace art { |
| 51 | |
Mathieu Chartier | 15d3402 | 2014-02-26 17:16:38 -0800 | [diff] [blame] | 52 | namespace gc { |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 53 | namespace accounting { |
Igor Murashkin | 2ffb703 | 2017-11-08 13:35:21 -0800 | [diff] [blame] | 54 | template<class T> class AtomicStack; |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 55 | } // namespace accounting |
Mathieu Chartier | 15d3402 | 2014-02-26 17:16:38 -0800 | [diff] [blame] | 56 | namespace collector { |
Igor Murashkin | 2ffb703 | 2017-11-08 13:35:21 -0800 | [diff] [blame] | 57 | class SemiSpace; |
Mathieu Chartier | 15d3402 | 2014-02-26 17:16:38 -0800 | [diff] [blame] | 58 | } // namespace collector |
| 59 | } // namespace gc |
| 60 | |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 61 | namespace mirror { |
Igor Murashkin | 2ffb703 | 2017-11-08 13:35:21 -0800 | [diff] [blame] | 62 | class Array; |
| 63 | class Class; |
| 64 | class ClassLoader; |
| 65 | class Object; |
| 66 | template<class T> class ObjectArray; |
| 67 | template<class T> class PrimitiveArray; |
| 68 | typedef PrimitiveArray<int32_t> IntArray; |
| 69 | class StackTraceElement; |
| 70 | class String; |
| 71 | class Throwable; |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 72 | } // namespace mirror |
Mathieu Chartier | 12d625f | 2015-03-13 11:33:37 -0700 | [diff] [blame] | 73 | |
| 74 | namespace verifier { |
Igor Murashkin | 2ffb703 | 2017-11-08 13:35:21 -0800 | [diff] [blame] | 75 | class MethodVerifier; |
| 76 | class VerifierDeps; |
Mathieu Chartier | 12d625f | 2015-03-13 11:33:37 -0700 | [diff] [blame] | 77 | } // namespace verifier |
| 78 | |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 79 | class ArtMethod; |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 80 | class BaseMutex; |
| 81 | class ClassLinker; |
Ian Rogers | 7a22fa6 | 2013-01-23 12:16:16 -0800 | [diff] [blame] | 82 | class Closure; |
Ian Rogers | bdb0391 | 2011-09-14 00:55:44 -0700 | [diff] [blame] | 83 | class Context; |
Ian Rogers | 1b09b09 | 2012-08-20 15:35:52 -0700 | [diff] [blame] | 84 | struct DebugInvokeReq; |
Sebastien Hertz | 0747466 | 2015-08-25 15:12:33 +0000 | [diff] [blame] | 85 | class DeoptimizationContextRecord; |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 86 | class DexFile; |
Mingyao Yang | 99170c6 | 2015-07-06 11:10:37 -0700 | [diff] [blame] | 87 | class FrameIdToShadowFrame; |
Ian Rogers | b48b9eb | 2014-02-28 16:20:21 -0800 | [diff] [blame] | 88 | class JavaVMExt; |
Ian Rogers | 55256cb | 2017-12-21 17:07:11 -0800 | [diff] [blame] | 89 | class JNIEnvExt; |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 90 | class Monitor; |
Andreas Gampe | 513061a | 2017-06-01 09:17:34 -0700 | [diff] [blame] | 91 | class RootVisitor; |
Mathieu Chartier | 2b7c4d1 | 2014-05-19 10:52:16 -0700 | [diff] [blame] | 92 | class ScopedObjectAccessAlreadyRunnable; |
Logan Chien | f7ad17e | 2012-03-15 03:10:03 +0800 | [diff] [blame] | 93 | class ShadowFrame; |
Sebastien Hertz | 597c4f0 | 2015-01-26 17:37:14 +0100 | [diff] [blame] | 94 | class SingleStepControl; |
Sebastien Hertz | f795869 | 2015-06-09 14:09:14 +0200 | [diff] [blame] | 95 | class StackedShadowFrameRecord; |
Brian Carlstrom | 40381fb | 2011-10-19 14:13:40 -0700 | [diff] [blame] | 96 | class Thread; |
| 97 | class ThreadList; |
Andreas Gampe | 513061a | 2017-06-01 09:17:34 -0700 | [diff] [blame] | 98 | enum VisitRootFlags : uint8_t; |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 99 | |
Elliott Hughes | 34e0696 | 2012-04-09 13:55:55 -0700 | [diff] [blame] | 100 | // Thread priorities. These must match the Thread.MIN_PRIORITY, |
| 101 | // Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants. |
| 102 | enum ThreadPriority { |
| 103 | kMinThreadPriority = 1, |
| 104 | kNormThreadPriority = 5, |
| 105 | kMaxThreadPriority = 10, |
| 106 | }; |
| 107 | |
Ian Rogers | 474b6da | 2012-09-25 00:20:38 -0700 | [diff] [blame] | 108 | enum ThreadFlag { |
Ian Rogers | 50ffee2 | 2012-11-20 11:47:44 -0800 | [diff] [blame] | 109 | kSuspendRequest = 1, // If set implies that suspend_count_ > 0 and the Thread should enter the |
| 110 | // safepoint handler. |
Yu Li | eac4424 | 2015-06-29 10:50:03 +0800 | [diff] [blame] | 111 | kCheckpointRequest = 2, // Request that the thread do some checkpoint work and then continue. |
Hiroshi Yamauchi | 3049324 | 2016-11-03 13:06:52 -0700 | [diff] [blame] | 112 | kEmptyCheckpointRequest = 4, // Request that the thread do empty checkpoint and then continue. |
| 113 | kActiveSuspendBarrier = 8, // Register that at least 1 suspend barrier needs to be passed. |
Ian Rogers | 474b6da | 2012-09-25 00:20:38 -0700 | [diff] [blame] | 114 | }; |
| 115 | |
Sebastien Hertz | f795869 | 2015-06-09 14:09:14 +0200 | [diff] [blame] | 116 | enum class StackedShadowFrameType { |
Mingyao Yang | 1f2d3ba | 2015-05-18 12:12:50 -0700 | [diff] [blame] | 117 | kShadowFrameUnderConstruction, |
Andreas Gampe | 639bdd1 | 2015-06-03 11:22:45 -0700 | [diff] [blame] | 118 | kDeoptimizationShadowFrame, |
Mingyao Yang | 1f2d3ba | 2015-05-18 12:12:50 -0700 | [diff] [blame] | 119 | }; |
| 120 | |
Mingyao Yang | 2ee1790 | 2017-08-30 11:37:08 -0700 | [diff] [blame] | 121 | // The type of method that triggers deoptimization. It contains info on whether |
| 122 | // the deoptimized method should advance dex_pc. |
| 123 | enum class DeoptimizationMethodType { |
| 124 | kKeepDexPc, // dex pc is required to be kept upon deoptimization. |
| 125 | kDefault // dex pc may or may not advance depending on other conditions. |
| 126 | }; |
| 127 | |
Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 128 | // This should match RosAlloc::kNumThreadLocalSizeBrackets. |
| 129 | static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16; |
Ian Rogers | e63db27 | 2014-07-15 15:36:11 -0700 | [diff] [blame] | 130 | |
Dave Allison | 648d711 | 2014-07-25 16:15:27 -0700 | [diff] [blame] | 131 | // Thread's stack layout for implicit stack overflow checks: |
| 132 | // |
| 133 | // +---------------------+ <- highest address of stack memory |
| 134 | // | | |
| 135 | // . . <- SP |
| 136 | // | | |
| 137 | // | | |
| 138 | // +---------------------+ <- stack_end |
| 139 | // | | |
| 140 | // | Gap | |
| 141 | // | | |
| 142 | // +---------------------+ <- stack_begin |
| 143 | // | | |
| 144 | // | Protected region | |
| 145 | // | | |
| 146 | // +---------------------+ <- lowest address of stack memory |
| 147 | // |
| 148 | // The stack always grows down in memory. At the lowest address is a region of memory |
| 149 | // that is set mprotect(PROT_NONE). Any attempt to read/write to this region will |
| 150 | // result in a segmentation fault signal. At any point, the thread's SP will be somewhere |
| 151 | // between the stack_end and the highest address in stack memory. An implicit stack |
| 152 | // overflow check is a read of memory at a certain offset below the current SP (4K typically). |
| 153 | // If the thread's SP is below the stack_end address this will be a read into the protected |
| 154 | // region. If the SP is above the stack_end address, the thread is guaranteed to have |
| 155 | // at least 4K of space. Because stack overflow checks are only performed in generated code, |
| 156 | // if the thread makes a call out to a native function (through JNI), that native function |
| 157 | // might only have 4K of memory (if the SP is adjacent to stack_end). |
| 158 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 159 | class Thread { |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 160 | public: |
Andreas Gampe | 7ea6f79 | 2014-07-14 16:21:44 -0700 | [diff] [blame] | 161 | static const size_t kStackOverflowImplicitCheckSize; |
Andreas Gampe | b486a98 | 2017-06-01 13:45:54 -0700 | [diff] [blame] | 162 | static constexpr bool kVerifyStack = kIsDebugBuild; |
Dave Allison | f943914 | 2014-03-27 15:10:22 -0700 | [diff] [blame] | 163 | |
Elliott Hughes | 462c944 | 2012-03-23 18:47:50 -0700 | [diff] [blame] | 164 | // Creates a new native thread corresponding to the given managed peer. |
| 165 | // Used to implement Thread.start. |
Ian Rogers | 52673ff | 2012-06-27 23:25:34 -0700 | [diff] [blame] | 166 | static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon); |
Carl Shapiro | 61e019d | 2011-07-14 16:53:09 -0700 | [diff] [blame] | 167 | |
Elliott Hughes | 462c944 | 2012-03-23 18:47:50 -0700 | [diff] [blame] | 168 | // Attaches the calling native thread to the runtime, returning the new native peer. |
| 169 | // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls. |
Mathieu Chartier | 664bebf | 2012-11-12 16:54:11 -0800 | [diff] [blame] | 170 | static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group, |
| 171 | bool create_peer); |
Andreas Gampe | 732b0ac | 2017-01-18 15:23:39 -0800 | [diff] [blame] | 172 | // Attaches the calling native thread to the runtime, returning the new native peer. |
| 173 | static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_peer); |
Carl Shapiro | b557353 | 2011-07-12 18:22:59 -0700 | [diff] [blame] | 174 | |
Brian Carlstrom | caabb1b | 2011-10-11 18:09:13 -0700 | [diff] [blame] | 175 | // Reset internal state of child thread after fork. |
| 176 | void InitAfterFork(); |
| 177 | |
Ian Rogers | 6f3dbba | 2014-10-14 17:41:57 -0700 | [diff] [blame] | 178 | // Get the currently executing thread, frequently referred to as 'self'. This call has reasonably |
| 179 | // high cost and so we favor passing self around when possible. |
| 180 | // TODO: mark as PURE so the compiler may coalesce and remove? |
Ian Rogers | 02ed4c0 | 2013-09-06 13:10:04 -0700 | [diff] [blame] | 181 | static Thread* Current(); |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 182 | |
Ian Rogers | 7b078e8 | 2014-09-10 14:44:24 -0700 | [diff] [blame] | 183 | // On a runnable thread, check for pending thread suspension request and handle if pending. |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 184 | void AllowThreadSuspension() REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 7b078e8 | 2014-09-10 14:44:24 -0700 | [diff] [blame] | 185 | |
| 186 | // Process pending thread suspension request and handle if pending. |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 187 | void CheckSuspend() REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 7b078e8 | 2014-09-10 14:44:24 -0700 | [diff] [blame] | 188 | |
Hiroshi Yamauchi | 3049324 | 2016-11-03 13:06:52 -0700 | [diff] [blame] | 189 | // Process a pending empty checkpoint if pending. |
Hiroshi Yamauchi | a222404 | 2017-02-08 16:35:45 -0800 | [diff] [blame] | 190 | void CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex); |
| 191 | void CheckEmptyCheckpointFromMutex(); |
Hiroshi Yamauchi | 3049324 | 2016-11-03 13:06:52 -0700 | [diff] [blame] | 192 | |
Mathieu Chartier | 2b7c4d1 | 2014-05-19 10:52:16 -0700 | [diff] [blame] | 193 | static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, |
Mathieu Chartier | f5769e1 | 2017-01-10 15:54:41 -0800 | [diff] [blame] | 194 | ObjPtr<mirror::Object> thread_peer) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 195 | REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 196 | REQUIRES_SHARED(Locks::mutator_lock_); |
Mathieu Chartier | 2b7c4d1 | 2014-05-19 10:52:16 -0700 | [diff] [blame] | 197 | static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 198 | REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 199 | REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 200 | |
Elliott Hughes | 28fa76d | 2012-04-09 17:31:46 -0700 | [diff] [blame] | 201 | // Translates 172 to pAllocArrayFromCode and so on. |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 202 | template<PointerSize size_of_pointers> |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 203 | static void DumpThreadOffset(std::ostream& os, uint32_t offset); |
Elliott Hughes | 28fa76d | 2012-04-09 17:31:46 -0700 | [diff] [blame] | 204 | |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 205 | // Dumps a one-line summary of thread state (used for operator<<). |
| 206 | void ShortDump(std::ostream& os) const; |
| 207 | |
| 208 | // Dumps the detailed thread state and the thread stack (used for SIGQUIT). |
Nicolas Geoffray | a73280d | 2016-02-15 13:05:16 +0000 | [diff] [blame] | 209 | void Dump(std::ostream& os, |
| 210 | bool dump_native_stack = true, |
Hiroshi Yamauchi | 13c1635 | 2017-01-31 10:15:08 -0800 | [diff] [blame] | 211 | BacktraceMap* backtrace_map = nullptr, |
| 212 | bool force_dump_stack = false) const |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 213 | REQUIRES(!Locks::thread_suspend_count_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 214 | REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | a095764 | 2011-09-02 14:27:33 -0700 | [diff] [blame] | 215 | |
Hiroshi Yamauchi | 02f365f | 2017-02-03 15:06:00 -0800 | [diff] [blame] | 216 | void DumpJavaStack(std::ostream& os, |
| 217 | bool check_suspended = true, |
| 218 | bool dump_locks = true) const |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 219 | REQUIRES(!Locks::thread_suspend_count_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 220 | REQUIRES_SHARED(Locks::mutator_lock_); |
Mathieu Chartier | c751fdc | 2014-03-30 15:25:44 -0700 | [diff] [blame] | 221 | |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 222 | // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which |
Elliott Hughes | abbe07d | 2012-06-05 17:42:23 -0700 | [diff] [blame] | 223 | // case we use 'tid' to identify the thread, and we'll include as much information as we can. |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 224 | static void DumpState(std::ostream& os, const Thread* thread, pid_t tid) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 225 | REQUIRES(!Locks::thread_suspend_count_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 226 | REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | abbe07d | 2012-06-05 17:42:23 -0700 | [diff] [blame] | 227 | |
Ian Rogers | 474b6da | 2012-09-25 00:20:38 -0700 | [diff] [blame] | 228 | ThreadState GetState() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 229 | DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated); |
| 230 | DCHECK_LE(tls32_.state_and_flags.as_struct.state, kSuspended); |
| 231 | return static_cast<ThreadState>(tls32_.state_and_flags.as_struct.state); |
Dave Allison | 0aded08 | 2013-11-07 13:15:11 -0800 | [diff] [blame] | 232 | } |
| 233 | |
Ian Rogers | 474b6da | 2012-09-25 00:20:38 -0700 | [diff] [blame] | 234 | ThreadState SetState(ThreadState new_state); |
Ian Rogers | 52673ff | 2012-06-27 23:25:34 -0700 | [diff] [blame] | 235 | |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 236 | int GetSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 237 | return tls32_.suspend_count; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 238 | } |
Elliott Hughes | 038a806 | 2011-09-18 14:12:41 -0700 | [diff] [blame] | 239 | |
Alex Light | 88fd720 | 2017-06-30 08:31:59 -0700 | [diff] [blame] | 240 | int GetUserCodeSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_, |
| 241 | Locks::user_code_suspension_lock_) { |
| 242 | return tls32_.user_code_suspend_count; |
| 243 | } |
| 244 | |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 245 | int GetDebugSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 246 | return tls32_.debug_suspend_count; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 247 | } |
| 248 | |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 249 | bool IsSuspended() const { |
Chris Dearman | 59cde53 | 2013-12-04 18:53:49 -0800 | [diff] [blame] | 250 | union StateAndFlags state_and_flags; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 251 | state_and_flags.as_int = tls32_.state_and_flags.as_int; |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 252 | return state_and_flags.as_struct.state != kRunnable && |
| 253 | (state_and_flags.as_struct.flags & kSuspendRequest) != 0; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 254 | } |
| 255 | |
Hiroshi Yamauchi | 02e7f1a | 2016-10-03 15:32:01 -0700 | [diff] [blame] | 256 | // If delta > 0 and (this != self or suspend_barrier is not null), this function may temporarily |
| 257 | // release thread_suspend_count_lock_ internally. |
| 258 | ALWAYS_INLINE |
| 259 | bool ModifySuspendCount(Thread* self, |
| 260 | int delta, |
| 261 | AtomicInteger* suspend_barrier, |
Alex Light | 46f9340 | 2017-06-29 11:59:50 -0700 | [diff] [blame] | 262 | SuspendReason reason) |
Sebastien Hertz | 1c8f4ff | 2017-04-14 15:05:12 +0200 | [diff] [blame] | 263 | WARN_UNUSED |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 264 | REQUIRES(Locks::thread_suspend_count_lock_); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 265 | |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 266 | bool RequestCheckpoint(Closure* function) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 267 | REQUIRES(Locks::thread_suspend_count_lock_); |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 268 | |
| 269 | // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. This is |
| 270 | // due to the fact that Thread::Current() needs to go to sleep to allow the targeted thread to |
| 271 | // execute the checkpoint for us if it is Runnable. |
Andreas Gampe | 28c4a23 | 2017-06-21 21:21:31 -0700 | [diff] [blame] | 272 | bool RequestSynchronousCheckpoint(Closure* function) |
| 273 | REQUIRES_SHARED(Locks::mutator_lock_) |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 274 | RELEASE(Locks::thread_list_lock_) |
Andreas Gampe | 28c4a23 | 2017-06-21 21:21:31 -0700 | [diff] [blame] | 275 | REQUIRES(!Locks::thread_suspend_count_lock_); |
Hiroshi Yamauchi | 3049324 | 2016-11-03 13:06:52 -0700 | [diff] [blame] | 276 | bool RequestEmptyCheckpoint() |
| 277 | REQUIRES(Locks::thread_suspend_count_lock_); |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 278 | |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 279 | void SetFlipFunction(Closure* function); |
| 280 | Closure* GetFlipFunction(); |
| 281 | |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 282 | gc::accounting::AtomicStack<mirror::Object>* GetThreadLocalMarkStack() { |
| 283 | CHECK(kUseReadBarrier); |
| 284 | return tlsPtr_.thread_local_mark_stack; |
| 285 | } |
| 286 | void SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object>* stack) { |
| 287 | CHECK(kUseReadBarrier); |
| 288 | tlsPtr_.thread_local_mark_stack = stack; |
| 289 | } |
| 290 | |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 291 | // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of |
| 292 | // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero. |
Ian Rogers | 9da7f59 | 2012-08-20 17:14:28 -0700 | [diff] [blame] | 293 | void FullSuspendCheck() |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 294 | REQUIRES(!Locks::thread_suspend_count_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 295 | REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 296 | |
| 297 | // Transition from non-runnable to runnable state acquiring share on mutator_lock_. |
Mathieu Chartier | 8ac9c91 | 2015-10-01 15:58:41 -0700 | [diff] [blame] | 298 | ALWAYS_INLINE ThreadState TransitionFromSuspendedToRunnable() |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 299 | REQUIRES(!Locks::thread_suspend_count_lock_) |
Mathieu Chartier | 8ac9c91 | 2015-10-01 15:58:41 -0700 | [diff] [blame] | 300 | SHARED_LOCK_FUNCTION(Locks::mutator_lock_); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 301 | |
| 302 | // Transition from runnable into a state where mutator privileges are denied. Releases share of |
| 303 | // mutator lock. |
Mathieu Chartier | 8ac9c91 | 2015-10-01 15:58:41 -0700 | [diff] [blame] | 304 | ALWAYS_INLINE void TransitionFromRunnableToSuspended(ThreadState new_state) |
Mathieu Chartier | 4e2cb09 | 2015-07-22 16:17:51 -0700 | [diff] [blame] | 305 | REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_) |
Mathieu Chartier | 8ac9c91 | 2015-10-01 15:58:41 -0700 | [diff] [blame] | 306 | UNLOCK_FUNCTION(Locks::mutator_lock_); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 307 | |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 308 | // Once called thread suspension will cause an assertion failure. |
Mathieu Chartier | 4e2cb09 | 2015-07-22 16:17:51 -0700 | [diff] [blame] | 309 | const char* StartAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) { |
| 310 | Roles::uninterruptible_.Acquire(); // No-op. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 311 | if (kIsDebugBuild) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 312 | CHECK(cause != nullptr); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 313 | const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause; |
| 314 | tls32_.no_thread_suspension++; |
| 315 | tlsPtr_.last_no_thread_suspension_cause = cause; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 316 | return previous_cause; |
| 317 | } else { |
| 318 | return nullptr; |
| 319 | } |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 320 | } |
Ian Rogers | 52673ff | 2012-06-27 23:25:34 -0700 | [diff] [blame] | 321 | |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 322 | // End region where no thread suspension is expected. |
Mathieu Chartier | 4e2cb09 | 2015-07-22 16:17:51 -0700 | [diff] [blame] | 323 | void EndAssertNoThreadSuspension(const char* old_cause) RELEASE(Roles::uninterruptible_) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 324 | if (kIsDebugBuild) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 325 | CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1); |
| 326 | CHECK_GT(tls32_.no_thread_suspension, 0U); |
| 327 | tls32_.no_thread_suspension--; |
| 328 | tlsPtr_.last_no_thread_suspension_cause = old_cause; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 329 | } |
Mathieu Chartier | 4e2cb09 | 2015-07-22 16:17:51 -0700 | [diff] [blame] | 330 | Roles::uninterruptible_.Release(); // No-op. |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 331 | } |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 332 | |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 333 | void AssertThreadSuspensionIsAllowable(bool check_locks = true) const; |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 334 | |
Mathieu Chartier | 10b218d | 2016-07-25 17:48:52 -0700 | [diff] [blame] | 335 | // Return true if thread suspension is allowable. |
| 336 | bool IsThreadSuspensionAllowable() const; |
| 337 | |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 338 | bool IsDaemon() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 339 | return tls32_.daemon; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 340 | } |
| 341 | |
Mathieu Chartier | 14c3bf9 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 342 | size_t NumberOfHeldMutexes() const; |
| 343 | |
Mathieu Chartier | f5769e1 | 2017-01-10 15:54:41 -0800 | [diff] [blame] | 344 | bool HoldsLock(ObjPtr<mirror::Object> object) const REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 345 | |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 346 | /* |
| 347 | * Changes the priority of this thread to match that of the java.lang.Thread object. |
| 348 | * |
| 349 | * We map a priority value from 1-10 to Linux "nice" values, where lower |
| 350 | * numbers indicate higher priority. |
| 351 | */ |
| 352 | void SetNativePriority(int newPriority); |
| 353 | |
| 354 | /* |
| 355 | * Returns the thread priority for the current thread by querying the system. |
| 356 | * This is useful when attaching a thread through JNI. |
| 357 | * |
| 358 | * Returns a value from 1 to 10 (compatible with java.lang.Thread values). |
| 359 | */ |
| 360 | static int GetNativePriority(); |
| 361 | |
Mathieu Chartier | 61b3cd4 | 2016-04-18 11:43:29 -0700 | [diff] [blame] | 362 | // Guaranteed to be non-zero. |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 363 | uint32_t GetThreadId() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 364 | return tls32_.thin_lock_thread_id; |
Carl Shapiro | b557353 | 2011-07-12 18:22:59 -0700 | [diff] [blame] | 365 | } |
| 366 | |
Elliott Hughes | d92bec4 | 2011-09-02 17:04:36 -0700 | [diff] [blame] | 367 | pid_t GetTid() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 368 | return tls32_.tid; |
Elliott Hughes | d92bec4 | 2011-09-02 17:04:36 -0700 | [diff] [blame] | 369 | } |
Elliott Hughes | e27955c | 2011-08-26 15:21:24 -0700 | [diff] [blame] | 370 | |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 371 | // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer. |
Andreas Gampe | 08883de | 2016-11-08 13:20:52 -0800 | [diff] [blame] | 372 | mirror::String* GetThreadName() const REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | 899e789 | 2012-01-24 14:57:32 -0800 | [diff] [blame] | 373 | |
Elliott Hughes | ffb465f | 2012-03-01 18:46:05 -0800 | [diff] [blame] | 374 | // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code, |
| 375 | // allocation, or locking. |
| 376 | void GetThreadName(std::string& name) const; |
| 377 | |
Elliott Hughes | 899e789 | 2012-01-24 14:57:32 -0800 | [diff] [blame] | 378 | // Sets the thread's name. |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 379 | void SetThreadName(const char* name) REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | fc86162 | 2011-10-17 17:57:47 -0700 | [diff] [blame] | 380 | |
Jeff Hao | 57dac6e | 2013-08-15 16:36:24 -0700 | [diff] [blame] | 381 | // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable. |
| 382 | uint64_t GetCpuMicroTime() const; |
| 383 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 384 | mirror::Object* GetPeer() const REQUIRES_SHARED(Locks::mutator_lock_) { |
Nicolas Geoffray | ffc8cad | 2017-02-10 10:59:22 +0000 | [diff] [blame] | 385 | DCHECK(Thread::Current() == this) << "Use GetPeerFromOtherThread instead"; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 386 | CHECK(tlsPtr_.jpeer == nullptr); |
| 387 | return tlsPtr_.opeer; |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 388 | } |
Andreas Gampe | 202f85a | 2017-02-06 10:23:26 -0800 | [diff] [blame] | 389 | // GetPeer is not safe if called on another thread in the middle of the CC thread flip and |
| 390 | // the thread's stack may have not been flipped yet and peer may be a from-space (stale) ref. |
| 391 | // This function will explicitly mark/forward it. |
| 392 | mirror::Object* GetPeerFromOtherThread() const REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 393 | |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 394 | bool HasPeer() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 395 | return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 396 | } |
| 397 | |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 398 | RuntimeStats* GetStats() { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 399 | return &tls64_.stats; |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 400 | } |
| 401 | |
Elliott Hughes | 7dc5166 | 2012-05-16 14:48:43 -0700 | [diff] [blame] | 402 | bool IsStillStarting() const; |
| 403 | |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 404 | bool IsExceptionPending() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 405 | return tlsPtr_.exception != nullptr; |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 406 | } |
| 407 | |
Alex Light | 848574c | 2017-09-25 16:59:39 -0700 | [diff] [blame] | 408 | bool IsAsyncExceptionPending() const { |
| 409 | return tlsPtr_.async_exception != nullptr; |
| 410 | } |
| 411 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 412 | mirror::Throwable* GetException() const REQUIRES_SHARED(Locks::mutator_lock_) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 413 | return tlsPtr_.exception; |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 414 | } |
| 415 | |
Andreas Gampe | d9efea6 | 2014-07-21 22:56:08 -0700 | [diff] [blame] | 416 | void AssertPendingException() const; |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 417 | void AssertPendingOOMException() const REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 418 | void AssertNoPendingException() const; |
Mathieu Chartier | 8d7672e | 2014-02-25 10:57:16 -0800 | [diff] [blame] | 419 | void AssertNoPendingExceptionForNewException(const char* msg) const; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 420 | |
Mathieu Chartier | 0795f23 | 2016-09-27 18:43:30 -0700 | [diff] [blame] | 421 | void SetException(ObjPtr<mirror::Throwable> new_exception) REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 422 | |
Alex Light | 848574c | 2017-09-25 16:59:39 -0700 | [diff] [blame] | 423 | // Set an exception that is asynchronously thrown from a different thread. This will be checked |
| 424 | // periodically and might overwrite the current 'Exception'. This can only be called from a |
| 425 | // checkpoint. |
| 426 | // |
| 427 | // The caller should also make sure that the thread has been deoptimized so that the exception |
| 428 | // could be detected on back-edges. |
| 429 | void SetAsyncException(ObjPtr<mirror::Throwable> new_exception) |
| 430 | REQUIRES_SHARED(Locks::mutator_lock_); |
| 431 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 432 | void ClearException() REQUIRES_SHARED(Locks::mutator_lock_) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 433 | tlsPtr_.exception = nullptr; |
jeffhao | 94d6df4 | 2012-11-26 16:02:12 -0800 | [diff] [blame] | 434 | } |
| 435 | |
Alex Light | 848574c | 2017-09-25 16:59:39 -0700 | [diff] [blame] | 436 | // Move the current async-exception to the main exception. This should be called when the current |
| 437 | // thread is ready to deal with any async exceptions. Returns true if there is an async exception |
| 438 | // that needs to be dealt with, false otherwise. |
| 439 | bool ObserveAsyncException() REQUIRES_SHARED(Locks::mutator_lock_); |
| 440 | |
Ian Rogers | bdb0391 | 2011-09-14 00:55:44 -0700 | [diff] [blame] | 441 | // Find catch block and perform long jump to appropriate exception handle |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 442 | NO_RETURN void QuickDeliverException() REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | bdb0391 | 2011-09-14 00:55:44 -0700 | [diff] [blame] | 443 | |
| 444 | Context* GetLongJumpContext(); |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 445 | void ReleaseLongJumpContext(Context* context) { |
Mingyao Yang | 4dcfc43 | 2015-04-21 16:55:22 -0700 | [diff] [blame] | 446 | if (tlsPtr_.long_jump_context != nullptr) { |
| 447 | // Each QuickExceptionHandler gets a long jump context and uses |
| 448 | // it for doing the long jump, after finding catch blocks/doing deoptimization. |
| 449 | // Both finding catch blocks and deoptimization can trigger another |
| 450 | // exception such as a result of class loading. So there can be nested |
| 451 | // cases of exception handling and multiple contexts being used. |
| 452 | // ReleaseLongJumpContext tries to save the context in tlsPtr_.long_jump_context |
| 453 | // for reuse so there is no need to always allocate a new one each time when |
| 454 | // getting a context. Since we only keep one context for reuse, delete the |
| 455 | // existing one since the passed in context is yet to be used for longjump. |
| 456 | delete tlsPtr_.long_jump_context; |
| 457 | } |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 458 | tlsPtr_.long_jump_context = context; |
Shih-wei Liao | 1a18c8c | 2011-08-14 17:47:36 -0700 | [diff] [blame] | 459 | } |
| 460 | |
Andreas Gampe | 6ec8ebd | 2014-07-25 13:36:56 -0700 | [diff] [blame] | 461 | // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will |
| 462 | // abort the runtime iff abort_on_error is true. |
Hiroshi Yamauchi | 02f365f | 2017-02-03 15:06:00 -0800 | [diff] [blame] | 463 | ArtMethod* GetCurrentMethod(uint32_t* dex_pc, |
| 464 | bool check_suspended = true, |
| 465 | bool abort_on_error = true) const |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 466 | REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 467 | |
Nicolas Geoffray | 7642cfc | 2015-02-26 10:56:09 +0000 | [diff] [blame] | 468 | // Returns whether the given exception was thrown by the current Java method being executed |
| 469 | // (Note that this includes native Java methods). |
Mathieu Chartier | f5769e1 | 2017-01-10 15:54:41 -0800 | [diff] [blame] | 470 | bool IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 471 | REQUIRES_SHARED(Locks::mutator_lock_); |
Nicolas Geoffray | 7642cfc | 2015-02-26 10:56:09 +0000 | [diff] [blame] | 472 | |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 473 | void SetTopOfStack(ArtMethod** top_method) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 474 | tlsPtr_.managed_stack.SetTopQuickFrame(top_method); |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 475 | } |
| 476 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 477 | void SetTopOfStackTagged(ArtMethod** top_method) { |
| 478 | tlsPtr_.managed_stack.SetTopQuickFrameTagged(top_method); |
| 479 | } |
| 480 | |
Jeff Hao | 11ffc2d | 2013-02-01 11:52:17 -0800 | [diff] [blame] | 481 | void SetTopOfShadowStack(ShadowFrame* top) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 482 | tlsPtr_.managed_stack.SetTopShadowFrame(top); |
Jeff Hao | 11ffc2d | 2013-02-01 11:52:17 -0800 | [diff] [blame] | 483 | } |
| 484 | |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 485 | bool HasManagedStack() const { |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 486 | return tlsPtr_.managed_stack.HasTopQuickFrame() || tlsPtr_.managed_stack.HasTopShadowFrame(); |
Ian Rogers | bdb0391 | 2011-09-14 00:55:44 -0700 | [diff] [blame] | 487 | } |
| 488 | |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 489 | // If 'msg' is null, no detail message is set. |
Nicolas Geoffray | 0aa50ce | 2015-03-10 11:03:29 +0000 | [diff] [blame] | 490 | void ThrowNewException(const char* exception_class_descriptor, const char* msg) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 491 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); |
Elliott Hughes | 5cb5ad2 | 2011-10-02 12:13:39 -0700 | [diff] [blame] | 492 | |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 493 | // If 'msg' is null, no detail message is set. An exception must be pending, and will be |
Elliott Hughes | a4f9474 | 2012-05-29 16:28:38 -0700 | [diff] [blame] | 494 | // used as the new exception's cause. |
Nicolas Geoffray | 0aa50ce | 2015-03-10 11:03:29 +0000 | [diff] [blame] | 495 | void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 496 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); |
Elliott Hughes | a4f9474 | 2012-05-29 16:28:38 -0700 | [diff] [blame] | 497 | |
Nicolas Geoffray | 0aa50ce | 2015-03-10 11:03:29 +0000 | [diff] [blame] | 498 | void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) |
| 499 | __attribute__((format(printf, 3, 4))) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 500 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); |
Elliott Hughes | a5b897e | 2011-08-16 11:33:06 -0700 | [diff] [blame] | 501 | |
Nicolas Geoffray | 0aa50ce | 2015-03-10 11:03:29 +0000 | [diff] [blame] | 502 | void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 503 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); |
Elliott Hughes | 4a2b417 | 2011-09-20 17:08:25 -0700 | [diff] [blame] | 504 | |
Elliott Hughes | 2ced6a5 | 2011-10-16 18:44:48 -0700 | [diff] [blame] | 505 | // OutOfMemoryError is special, because we need to pre-allocate an instance. |
Elliott Hughes | 8a8b9cb | 2012-04-13 18:29:22 -0700 | [diff] [blame] | 506 | // Only the GC should call this. |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 507 | void ThrowOutOfMemoryError(const char* msg) REQUIRES_SHARED(Locks::mutator_lock_) |
Mathieu Chartier | ed8990a | 2015-07-23 14:11:16 -0700 | [diff] [blame] | 508 | REQUIRES(!Roles::uninterruptible_); |
Elliott Hughes | 79082e3 | 2011-08-25 12:07:32 -0700 | [diff] [blame] | 509 | |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 510 | static void Startup(); |
Elliott Hughes | 038a806 | 2011-09-18 14:12:41 -0700 | [diff] [blame] | 511 | static void FinishStartup(); |
Elliott Hughes | c1674ed | 2011-08-25 18:09:09 -0700 | [diff] [blame] | 512 | static void Shutdown(); |
Carl Shapiro | b557353 | 2011-07-12 18:22:59 -0700 | [diff] [blame] | 513 | |
Andreas Gampe | 5677601 | 2018-01-26 17:40:55 -0800 | [diff] [blame] | 514 | // Notify this thread's thread-group that this thread has started. |
| 515 | // Note: the given thread-group is used as a fast path and verified in debug build. If the value |
| 516 | // is null, the thread's thread-group is loaded from the peer. |
| 517 | void NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable& soa, jobject thread_group = nullptr) |
| 518 | REQUIRES_SHARED(Locks::mutator_lock_); |
| 519 | |
Ian Rogers | b033c75 | 2011-07-20 12:22:35 -0700 | [diff] [blame] | 520 | // JNI methods |
Elliott Hughes | 69f5bc6 | 2011-08-24 09:26:14 -0700 | [diff] [blame] | 521 | JNIEnvExt* GetJniEnv() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 522 | return tlsPtr_.jni_env; |
Ian Rogers | b033c75 | 2011-07-20 12:22:35 -0700 | [diff] [blame] | 523 | } |
| 524 | |
Ian Rogers | 408f79a | 2011-08-23 18:22:33 -0700 | [diff] [blame] | 525 | // Convert a jobject into a Object* |
Mathieu Chartier | c4f3925 | 2016-10-05 18:32:08 -0700 | [diff] [blame] | 526 | ObjPtr<mirror::Object> DecodeJObject(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_); |
Hiroshi Yamauchi | 04302db | 2015-11-11 23:45:34 -0800 | [diff] [blame] | 527 | // Checks if the weak global ref has been cleared by the GC without decoding it. |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 528 | bool IsJWeakCleared(jweak obj) const REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | b033c75 | 2011-07-20 12:22:35 -0700 | [diff] [blame] | 529 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 530 | mirror::Object* GetMonitorEnterObject() const REQUIRES_SHARED(Locks::mutator_lock_) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 531 | return tlsPtr_.monitor_enter_object; |
| 532 | } |
| 533 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 534 | void SetMonitorEnterObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 535 | tlsPtr_.monitor_enter_object = obj; |
| 536 | } |
| 537 | |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 538 | // Implements java.lang.Thread.interrupted. |
Nicolas Geoffray | 365719c | 2017-03-08 13:11:50 +0000 | [diff] [blame] | 539 | bool Interrupted(); |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 540 | // Implements java.lang.Thread.isInterrupted. |
Nicolas Geoffray | 365719c | 2017-03-08 13:11:50 +0000 | [diff] [blame] | 541 | bool IsInterrupted(); |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 542 | void Interrupt(Thread* self) REQUIRES(!*wait_mutex_); |
Nicolas Geoffray | 365719c | 2017-03-08 13:11:50 +0000 | [diff] [blame] | 543 | void SetInterrupted(bool i) { |
| 544 | tls32_.interrupted.StoreSequentiallyConsistent(i); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 545 | } |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 546 | void Notify() REQUIRES(!*wait_mutex_); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 547 | |
Mathieu Chartier | 3f7f03c | 2016-09-26 11:39:52 -0700 | [diff] [blame] | 548 | ALWAYS_INLINE void PoisonObjectPointers() { |
| 549 | ++poison_object_cookie_; |
| 550 | } |
| 551 | |
Mathieu Chartier | a59d9b2 | 2016-09-26 18:13:17 -0700 | [diff] [blame] | 552 | ALWAYS_INLINE static void PoisonObjectPointersIfDebug(); |
| 553 | |
Mathieu Chartier | 3f7f03c | 2016-09-26 11:39:52 -0700 | [diff] [blame] | 554 | ALWAYS_INLINE uintptr_t GetPoisonObjectCookie() const { |
| 555 | return poison_object_cookie_; |
| 556 | } |
| 557 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 558 | private: |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 559 | void NotifyLocked(Thread* self) REQUIRES(wait_mutex_); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 560 | |
| 561 | public: |
| 562 | Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) { |
| 563 | return wait_mutex_; |
| 564 | } |
| 565 | |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 566 | ConditionVariable* GetWaitConditionVariable() const REQUIRES(wait_mutex_) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 567 | return wait_cond_; |
| 568 | } |
| 569 | |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 570 | Monitor* GetWaitMonitor() const REQUIRES(wait_mutex_) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 571 | return wait_monitor_; |
| 572 | } |
| 573 | |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 574 | void SetWaitMonitor(Monitor* mon) REQUIRES(wait_mutex_) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 575 | wait_monitor_ = mon; |
| 576 | } |
| 577 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 578 | // Waiter link-list support. |
| 579 | Thread* GetWaitNext() const { |
| 580 | return tlsPtr_.wait_next; |
| 581 | } |
| 582 | |
| 583 | void SetWaitNext(Thread* next) { |
| 584 | tlsPtr_.wait_next = next; |
| 585 | } |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 586 | |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 587 | jobject GetClassLoaderOverride() { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 588 | return tlsPtr_.class_loader_override; |
buzbee | c143c55 | 2011-08-20 17:38:58 -0700 | [diff] [blame] | 589 | } |
| 590 | |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 591 | void SetClassLoaderOverride(jobject class_loader_override); |
buzbee | c143c55 | 2011-08-20 17:38:58 -0700 | [diff] [blame] | 592 | |
Ian Rogers | aaa2080 | 2011-09-11 21:47:37 -0700 | [diff] [blame] | 593 | // Create the internal representation of a stack trace, that is more time |
Sebastien Hertz | ee1d79a | 2014-02-21 15:46:30 +0100 | [diff] [blame] | 594 | // and space efficient to compute than the StackTraceElement[]. |
| 595 | template<bool kTransactionActive> |
Mathieu Chartier | 2b7c4d1 | 2014-05-19 10:52:16 -0700 | [diff] [blame] | 596 | jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 597 | REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | aaa2080 | 2011-09-11 21:47:37 -0700 | [diff] [blame] | 598 | |
Elliott Hughes | 01158d7 | 2011-09-19 19:47:10 -0700 | [diff] [blame] | 599 | // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 600 | // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many |
| 601 | // frames as will fit are written into the given array. If stack_depth is non-null, it's updated |
Elliott Hughes | 01158d7 | 2011-09-19 19:47:10 -0700 | [diff] [blame] | 602 | // with the number of valid frames in the returned array. |
Mathieu Chartier | 2b7c4d1 | 2014-05-19 10:52:16 -0700 | [diff] [blame] | 603 | static jobjectArray InternalStackTraceToStackTraceElementArray( |
| 604 | const ScopedObjectAccessAlreadyRunnable& soa, jobject internal, |
| 605 | jobjectArray output_array = nullptr, int* stack_depth = nullptr) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 606 | REQUIRES_SHARED(Locks::mutator_lock_); |
Shih-wei Liao | 55df06b | 2011-08-26 14:39:27 -0700 | [diff] [blame] | 607 | |
Andreas Gampe | fb6b0b1 | 2017-12-11 20:47:56 -0800 | [diff] [blame] | 608 | jobjectArray CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const |
| 609 | REQUIRES_SHARED(Locks::mutator_lock_); |
| 610 | |
Mingyao Yang | 99170c6 | 2015-07-06 11:10:37 -0700 | [diff] [blame] | 611 | bool HasDebuggerShadowFrames() const { |
| 612 | return tlsPtr_.frame_id_to_shadow_frame != nullptr; |
| 613 | } |
| 614 | |
Andreas Gampe | 513061a | 2017-06-01 09:17:34 -0700 | [diff] [blame] | 615 | void VisitRoots(RootVisitor* visitor, VisitRootFlags flags) |
Andreas Gampe | 585da95 | 2016-12-02 14:52:29 -0800 | [diff] [blame] | 616 | REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | 410c0c8 | 2011-09-01 17:58:25 -0700 | [diff] [blame] | 617 | |
Andreas Gampe | b486a98 | 2017-06-01 13:45:54 -0700 | [diff] [blame] | 618 | void VerifyStack() REQUIRES_SHARED(Locks::mutator_lock_) { |
| 619 | if (kVerifyStack) { |
| 620 | VerifyStackImpl(); |
| 621 | } |
| 622 | } |
jeffhao | 2504552 | 2012-03-13 19:34:37 -0700 | [diff] [blame] | 623 | |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 624 | // |
| 625 | // Offsets of various members of native Thread class, used by compiled code. |
| 626 | // |
| 627 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 628 | template<PointerSize pointer_size> |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 629 | static ThreadOffset<pointer_size> ThinLockIdOffset() { |
| 630 | return ThreadOffset<pointer_size>( |
| 631 | OFFSETOF_MEMBER(Thread, tls32_) + |
| 632 | OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id)); |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 633 | } |
| 634 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 635 | template<PointerSize pointer_size> |
Nicolas Geoffray | 365719c | 2017-03-08 13:11:50 +0000 | [diff] [blame] | 636 | static ThreadOffset<pointer_size> InterruptedOffset() { |
| 637 | return ThreadOffset<pointer_size>( |
| 638 | OFFSETOF_MEMBER(Thread, tls32_) + |
| 639 | OFFSETOF_MEMBER(tls_32bit_sized_values, interrupted)); |
| 640 | } |
| 641 | |
| 642 | template<PointerSize pointer_size> |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 643 | static ThreadOffset<pointer_size> ThreadFlagsOffset() { |
| 644 | return ThreadOffset<pointer_size>( |
| 645 | OFFSETOF_MEMBER(Thread, tls32_) + |
| 646 | OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags)); |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 647 | } |
| 648 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 649 | template<PointerSize pointer_size> |
Roland Levillain | 7c1559a | 2015-12-15 10:55:36 +0000 | [diff] [blame] | 650 | static ThreadOffset<pointer_size> IsGcMarkingOffset() { |
| 651 | return ThreadOffset<pointer_size>( |
| 652 | OFFSETOF_MEMBER(Thread, tls32_) + |
| 653 | OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking)); |
| 654 | } |
| 655 | |
Igor Murashkin | ae7ff92 | 2016-10-06 14:59:19 -0700 | [diff] [blame] | 656 | static constexpr size_t IsGcMarkingSize() { |
| 657 | return sizeof(tls32_.is_gc_marking); |
| 658 | } |
| 659 | |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 660 | // Deoptimize the Java stack. |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 661 | void DeoptimizeWithDeoptimizationException(JValue* result) REQUIRES_SHARED(Locks::mutator_lock_); |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 662 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 663 | private: |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 664 | template<PointerSize pointer_size> |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 665 | static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) { |
| 666 | size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_); |
| 667 | size_t scale; |
| 668 | size_t shrink; |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 669 | if (pointer_size == kRuntimePointerSize) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 670 | scale = 1; |
| 671 | shrink = 1; |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 672 | } else if (pointer_size > kRuntimePointerSize) { |
| 673 | scale = static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 674 | shrink = 1; |
| 675 | } else { |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 676 | DCHECK_GT(kRuntimePointerSize, pointer_size); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 677 | scale = 1; |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 678 | shrink = static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 679 | } |
| 680 | return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink)); |
Ian Rogers | 07ec8e1 | 2012-12-01 01:26:51 -0800 | [diff] [blame] | 681 | } |
| 682 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 683 | public: |
Jeff Hao | 848f70a | 2014-01-15 13:49:50 -0800 | [diff] [blame] | 684 | static uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset, |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 685 | PointerSize pointer_size) { |
| 686 | if (pointer_size == PointerSize::k32) { |
| 687 | return QuickEntryPointOffset<PointerSize::k32>(quick_entrypoint_offset). |
| 688 | Uint32Value(); |
Jeff Hao | 848f70a | 2014-01-15 13:49:50 -0800 | [diff] [blame] | 689 | } else { |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 690 | return QuickEntryPointOffset<PointerSize::k64>(quick_entrypoint_offset). |
| 691 | Uint32Value(); |
Jeff Hao | 848f70a | 2014-01-15 13:49:50 -0800 | [diff] [blame] | 692 | } |
| 693 | } |
| 694 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 695 | template<PointerSize pointer_size> |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 696 | static ThreadOffset<pointer_size> QuickEntryPointOffset(size_t quick_entrypoint_offset) { |
| 697 | return ThreadOffsetFromTlsPtr<pointer_size>( |
| 698 | OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset); |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 699 | } |
| 700 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 701 | template<PointerSize pointer_size> |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 702 | static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) { |
| 703 | return ThreadOffsetFromTlsPtr<pointer_size>( |
| 704 | OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset); |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 705 | } |
| 706 | |
Roland Levillain | 97c4646 | 2017-05-11 14:04:03 +0100 | [diff] [blame] | 707 | // Return the entry point offset integer value for ReadBarrierMarkRegX, where X is `reg`. |
| 708 | template <PointerSize pointer_size> |
| 709 | static int32_t ReadBarrierMarkEntryPointsOffset(size_t reg) { |
| 710 | // The entry point list defines 30 ReadBarrierMarkRegX entry points. |
| 711 | DCHECK_LT(reg, 30u); |
| 712 | // The ReadBarrierMarkRegX entry points are ordered by increasing |
| 713 | // register number in Thread::tls_Ptr_.quick_entrypoints. |
| 714 | return QUICK_ENTRYPOINT_OFFSET(pointer_size, pReadBarrierMarkReg00).Int32Value() |
| 715 | + static_cast<size_t>(pointer_size) * reg; |
| 716 | } |
| 717 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 718 | template<PointerSize pointer_size> |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 719 | static ThreadOffset<pointer_size> SelfOffset() { |
| 720 | return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self)); |
| 721 | } |
| 722 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 723 | template<PointerSize pointer_size> |
buzbee | 1452bee | 2015-03-06 14:43:04 -0800 | [diff] [blame] | 724 | static ThreadOffset<pointer_size> MterpCurrentIBaseOffset() { |
| 725 | return ThreadOffsetFromTlsPtr<pointer_size>( |
| 726 | OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_current_ibase)); |
| 727 | } |
| 728 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 729 | template<PointerSize pointer_size> |
buzbee | 1452bee | 2015-03-06 14:43:04 -0800 | [diff] [blame] | 730 | static ThreadOffset<pointer_size> MterpDefaultIBaseOffset() { |
| 731 | return ThreadOffsetFromTlsPtr<pointer_size>( |
| 732 | OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_default_ibase)); |
| 733 | } |
| 734 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 735 | template<PointerSize pointer_size> |
buzbee | 1452bee | 2015-03-06 14:43:04 -0800 | [diff] [blame] | 736 | static ThreadOffset<pointer_size> MterpAltIBaseOffset() { |
| 737 | return ThreadOffsetFromTlsPtr<pointer_size>( |
| 738 | OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_alt_ibase)); |
| 739 | } |
| 740 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 741 | template<PointerSize pointer_size> |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 742 | static ThreadOffset<pointer_size> ExceptionOffset() { |
| 743 | return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception)); |
| 744 | } |
| 745 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 746 | template<PointerSize pointer_size> |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 747 | static ThreadOffset<pointer_size> PeerOffset() { |
| 748 | return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer)); |
| 749 | } |
| 750 | |
| 751 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 752 | template<PointerSize pointer_size> |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 753 | static ThreadOffset<pointer_size> CardTableOffset() { |
| 754 | return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table)); |
| 755 | } |
| 756 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 757 | template<PointerSize pointer_size> |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 758 | static ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() { |
| 759 | return ThreadOffsetFromTlsPtr<pointer_size>( |
| 760 | OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger)); |
Dave Allison | b373e09 | 2014-02-20 16:06:36 -0800 | [diff] [blame] | 761 | } |
| 762 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 763 | template<PointerSize pointer_size> |
Hiroshi Yamauchi | e01a520 | 2015-03-19 12:35:04 -0700 | [diff] [blame] | 764 | static ThreadOffset<pointer_size> ThreadLocalPosOffset() { |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 765 | return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, |
| 766 | thread_local_pos)); |
Hiroshi Yamauchi | e01a520 | 2015-03-19 12:35:04 -0700 | [diff] [blame] | 767 | } |
| 768 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 769 | template<PointerSize pointer_size> |
Hiroshi Yamauchi | e01a520 | 2015-03-19 12:35:04 -0700 | [diff] [blame] | 770 | static ThreadOffset<pointer_size> ThreadLocalEndOffset() { |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 771 | return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, |
| 772 | thread_local_end)); |
Hiroshi Yamauchi | e01a520 | 2015-03-19 12:35:04 -0700 | [diff] [blame] | 773 | } |
| 774 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 775 | template<PointerSize pointer_size> |
Hiroshi Yamauchi | e01a520 | 2015-03-19 12:35:04 -0700 | [diff] [blame] | 776 | static ThreadOffset<pointer_size> ThreadLocalObjectsOffset() { |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 777 | return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, |
| 778 | thread_local_objects)); |
Hiroshi Yamauchi | e01a520 | 2015-03-19 12:35:04 -0700 | [diff] [blame] | 779 | } |
| 780 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 781 | template<PointerSize pointer_size> |
Hiroshi Yamauchi | dc412b6 | 2015-10-15 12:26:57 -0700 | [diff] [blame] | 782 | static ThreadOffset<pointer_size> RosAllocRunsOffset() { |
| 783 | return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, |
| 784 | rosalloc_runs)); |
| 785 | } |
| 786 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 787 | template<PointerSize pointer_size> |
Hiroshi Yamauchi | dc412b6 | 2015-10-15 12:26:57 -0700 | [diff] [blame] | 788 | static ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() { |
| 789 | return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, |
| 790 | thread_local_alloc_stack_top)); |
| 791 | } |
| 792 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 793 | template<PointerSize pointer_size> |
Hiroshi Yamauchi | dc412b6 | 2015-10-15 12:26:57 -0700 | [diff] [blame] | 794 | static ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() { |
| 795 | return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, |
| 796 | thread_local_alloc_stack_end)); |
| 797 | } |
| 798 | |
Ian Rogers | 932746a | 2011-09-22 18:57:50 -0700 | [diff] [blame] | 799 | // Size of stack less any space reserved for stack overflow |
jeffhao | d752132 | 2012-11-21 15:38:24 -0800 | [diff] [blame] | 800 | size_t GetStackSize() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 801 | return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin); |
Ian Rogers | 932746a | 2011-09-22 18:57:50 -0700 | [diff] [blame] | 802 | } |
| 803 | |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 804 | uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const { |
Nicolas Geoffray | 535a3fb | 2014-07-22 15:17:38 +0100 | [diff] [blame] | 805 | if (implicit_overflow_check) { |
| 806 | // The interpreter needs the extra overflow bytes that stack_end does |
| 807 | // not include. |
| 808 | return tlsPtr_.stack_end + GetStackOverflowReservedBytes(kRuntimeISA); |
| 809 | } else { |
| 810 | return tlsPtr_.stack_end; |
| 811 | } |
| 812 | } |
| 813 | |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 814 | uint8_t* GetStackEnd() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 815 | return tlsPtr_.stack_end; |
jeffhao | d752132 | 2012-11-21 15:38:24 -0800 | [diff] [blame] | 816 | } |
| 817 | |
Ian Rogers | 932746a | 2011-09-22 18:57:50 -0700 | [diff] [blame] | 818 | // Set the stack end to that to be used during a stack overflow |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 819 | void SetStackEndForStackOverflow() REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 932746a | 2011-09-22 18:57:50 -0700 | [diff] [blame] | 820 | |
| 821 | // Set the stack end to that to be used during regular execution |
Dave Allison | b090a18 | 2014-08-14 17:02:48 +0000 | [diff] [blame] | 822 | void ResetDefaultStackEnd() { |
Ian Rogers | 932746a | 2011-09-22 18:57:50 -0700 | [diff] [blame] | 823 | // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room |
| 824 | // to throw a StackOverflowError. |
Dave Allison | b090a18 | 2014-08-14 17:02:48 +0000 | [diff] [blame] | 825 | tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA); |
Ian Rogers | 932746a | 2011-09-22 18:57:50 -0700 | [diff] [blame] | 826 | } |
| 827 | |
Ian Rogers | 120f1c7 | 2012-09-28 17:17:10 -0700 | [diff] [blame] | 828 | bool IsHandlingStackOverflow() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 829 | return tlsPtr_.stack_end == tlsPtr_.stack_begin; |
Ian Rogers | 120f1c7 | 2012-09-28 17:17:10 -0700 | [diff] [blame] | 830 | } |
| 831 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 832 | template<PointerSize pointer_size> |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 833 | static ThreadOffset<pointer_size> StackEndOffset() { |
| 834 | return ThreadOffsetFromTlsPtr<pointer_size>( |
| 835 | OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end)); |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 836 | } |
| 837 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 838 | template<PointerSize pointer_size> |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 839 | static ThreadOffset<pointer_size> JniEnvOffset() { |
| 840 | return ThreadOffsetFromTlsPtr<pointer_size>( |
| 841 | OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env)); |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 842 | } |
| 843 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 844 | template<PointerSize pointer_size> |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 845 | static ThreadOffset<pointer_size> TopOfManagedStackOffset() { |
| 846 | return ThreadOffsetFromTlsPtr<pointer_size>( |
| 847 | OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) + |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 848 | ManagedStack::TaggedTopQuickFrameOffset()); |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 849 | } |
| 850 | |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 851 | const ManagedStack* GetManagedStack() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 852 | return &tlsPtr_.managed_stack; |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 853 | } |
| 854 | |
| 855 | // Linked list recording fragments of managed stack. |
| 856 | void PushManagedStackFragment(ManagedStack* fragment) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 857 | tlsPtr_.managed_stack.PushManagedStackFragment(fragment); |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 858 | } |
| 859 | void PopManagedStackFragment(const ManagedStack& fragment) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 860 | tlsPtr_.managed_stack.PopManagedStackFragment(fragment); |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 861 | } |
| 862 | |
Andreas Gampe | 513061a | 2017-06-01 09:17:34 -0700 | [diff] [blame] | 863 | ALWAYS_INLINE ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame); |
| 864 | ALWAYS_INLINE ShadowFrame* PopShadowFrame(); |
Logan Chien | f7ad17e | 2012-03-15 03:10:03 +0800 | [diff] [blame] | 865 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 866 | template<PointerSize pointer_size> |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 867 | static ThreadOffset<pointer_size> TopShadowFrameOffset() { |
| 868 | return ThreadOffsetFromTlsPtr<pointer_size>( |
| 869 | OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) + |
| 870 | ManagedStack::TopShadowFrameOffset()); |
TDYa127 | d668a06 | 2012-04-13 12:36:57 -0700 | [diff] [blame] | 871 | } |
| 872 | |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 873 | // Is the given obj in this thread's stack indirect reference table? |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 874 | bool HandleScopeContains(jobject obj) const; |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 875 | |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 876 | void HandleScopeVisitRoots(RootVisitor* visitor, pid_t thread_id) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 877 | REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 878 | |
Mathieu Chartier | e8a3c57 | 2016-10-11 16:52:17 -0700 | [diff] [blame] | 879 | BaseHandleScope* GetTopHandleScope() { |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 880 | return tlsPtr_.top_handle_scope; |
Ian Rogers | 1f53934 | 2012-10-03 21:09:42 -0700 | [diff] [blame] | 881 | } |
| 882 | |
Mathieu Chartier | e8a3c57 | 2016-10-11 16:52:17 -0700 | [diff] [blame] | 883 | void PushHandleScope(BaseHandleScope* handle_scope) { |
Ian Rogers | 59c0706 | 2014-10-10 13:03:39 -0700 | [diff] [blame] | 884 | DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope); |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 885 | tlsPtr_.top_handle_scope = handle_scope; |
| 886 | } |
| 887 | |
Mathieu Chartier | e8a3c57 | 2016-10-11 16:52:17 -0700 | [diff] [blame] | 888 | BaseHandleScope* PopHandleScope() { |
| 889 | BaseHandleScope* handle_scope = tlsPtr_.top_handle_scope; |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 890 | DCHECK(handle_scope != nullptr); |
| 891 | tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink(); |
| 892 | return handle_scope; |
Ian Rogers | 1f53934 | 2012-10-03 21:09:42 -0700 | [diff] [blame] | 893 | } |
Brian Carlstrom | 40381fb | 2011-10-19 14:13:40 -0700 | [diff] [blame] | 894 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 895 | template<PointerSize pointer_size> |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 896 | static ThreadOffset<pointer_size> TopHandleScopeOffset() { |
| 897 | return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, |
| 898 | top_handle_scope)); |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 899 | } |
| 900 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 901 | DebugInvokeReq* GetInvokeReq() const { |
| 902 | return tlsPtr_.debug_invoke_req; |
Elliott Hughes | 475fc23 | 2011-10-25 15:00:35 -0700 | [diff] [blame] | 903 | } |
| 904 | |
Sebastien Hertz | 61b7f1b | 2013-11-15 15:59:30 +0100 | [diff] [blame] | 905 | SingleStepControl* GetSingleStepControl() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 906 | return tlsPtr_.single_step_control; |
Sebastien Hertz | 61b7f1b | 2013-11-15 15:59:30 +0100 | [diff] [blame] | 907 | } |
| 908 | |
Sebastien Hertz | 1558b57 | 2015-02-25 15:05:59 +0100 | [diff] [blame] | 909 | // Indicates whether this thread is ready to invoke a method for debugging. This |
| 910 | // is only true if the thread has been suspended by a debug event. |
| 911 | bool IsReadyForDebugInvoke() const { |
| 912 | return tls32_.ready_for_debug_invoke; |
| 913 | } |
| 914 | |
| 915 | void SetReadyForDebugInvoke(bool ready) { |
| 916 | tls32_.ready_for_debug_invoke = ready; |
| 917 | } |
| 918 | |
Sebastien Hertz | 9d6bf69 | 2015-04-10 12:12:33 +0200 | [diff] [blame] | 919 | bool IsDebugMethodEntry() const { |
| 920 | return tls32_.debug_method_entry_; |
| 921 | } |
| 922 | |
| 923 | void SetDebugMethodEntry() { |
| 924 | tls32_.debug_method_entry_ = true; |
| 925 | } |
| 926 | |
| 927 | void ClearDebugMethodEntry() { |
| 928 | tls32_.debug_method_entry_ = false; |
| 929 | } |
| 930 | |
Hiroshi Yamauchi | 0037082 | 2015-08-18 14:47:25 -0700 | [diff] [blame] | 931 | bool GetIsGcMarking() const { |
| 932 | CHECK(kUseReadBarrier); |
| 933 | return tls32_.is_gc_marking; |
| 934 | } |
| 935 | |
Mathieu Chartier | fe814e8 | 2016-11-09 14:32:49 -0800 | [diff] [blame] | 936 | void SetIsGcMarkingAndUpdateEntrypoints(bool is_marking); |
Hiroshi Yamauchi | 0037082 | 2015-08-18 14:47:25 -0700 | [diff] [blame] | 937 | |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 938 | bool GetWeakRefAccessEnabled() const { |
| 939 | CHECK(kUseReadBarrier); |
| 940 | return tls32_.weak_ref_access_enabled; |
| 941 | } |
| 942 | |
| 943 | void SetWeakRefAccessEnabled(bool enabled) { |
| 944 | CHECK(kUseReadBarrier); |
| 945 | tls32_.weak_ref_access_enabled = enabled; |
| 946 | } |
| 947 | |
Hiroshi Yamauchi | 20a0be0 | 2016-02-19 15:44:06 -0800 | [diff] [blame] | 948 | uint32_t GetDisableThreadFlipCount() const { |
| 949 | CHECK(kUseReadBarrier); |
| 950 | return tls32_.disable_thread_flip_count; |
| 951 | } |
| 952 | |
| 953 | void IncrementDisableThreadFlipCount() { |
| 954 | CHECK(kUseReadBarrier); |
| 955 | ++tls32_.disable_thread_flip_count; |
| 956 | } |
| 957 | |
| 958 | void DecrementDisableThreadFlipCount() { |
| 959 | CHECK(kUseReadBarrier); |
| 960 | DCHECK_GT(tls32_.disable_thread_flip_count, 0U); |
| 961 | --tls32_.disable_thread_flip_count; |
| 962 | } |
| 963 | |
Calin Juravle | ccd5695 | 2016-12-15 17:57:38 +0000 | [diff] [blame] | 964 | // Returns true if the thread is allowed to call into java. |
| 965 | bool CanCallIntoJava() const { |
| 966 | return can_call_into_java_; |
| 967 | } |
| 968 | |
| 969 | void SetCanCallIntoJava(bool can_call_into_java) { |
| 970 | can_call_into_java_ = can_call_into_java; |
| 971 | } |
| 972 | |
Sebastien Hertz | 597c4f0 | 2015-01-26 17:37:14 +0100 | [diff] [blame] | 973 | // Activates single step control for debugging. The thread takes the |
| 974 | // ownership of the given SingleStepControl*. It is deleted by a call |
| 975 | // to DeactivateSingleStepControl or upon thread destruction. |
| 976 | void ActivateSingleStepControl(SingleStepControl* ssc); |
| 977 | |
| 978 | // Deactivates single step control for debugging. |
| 979 | void DeactivateSingleStepControl(); |
| 980 | |
Sebastien Hertz | 1558b57 | 2015-02-25 15:05:59 +0100 | [diff] [blame] | 981 | // Sets debug invoke request for debugging. When the thread is resumed, |
Sebastien Hertz | cbc5064 | 2015-06-01 17:33:12 +0200 | [diff] [blame] | 982 | // it executes the method described by this request then sends the reply |
| 983 | // before suspending itself. The thread takes the ownership of the given |
| 984 | // DebugInvokeReq*. It is deleted by a call to ClearDebugInvokeReq. |
Sebastien Hertz | 1558b57 | 2015-02-25 15:05:59 +0100 | [diff] [blame] | 985 | void SetDebugInvokeReq(DebugInvokeReq* req); |
| 986 | |
| 987 | // Clears debug invoke request for debugging. When the thread completes |
Sebastien Hertz | cbc5064 | 2015-06-01 17:33:12 +0200 | [diff] [blame] | 988 | // method invocation, it deletes its debug invoke request and suspends |
| 989 | // itself. |
Sebastien Hertz | 1558b57 | 2015-02-25 15:05:59 +0100 | [diff] [blame] | 990 | void ClearDebugInvokeReq(); |
Sebastien Hertz | 597c4f0 | 2015-01-26 17:37:14 +0100 | [diff] [blame] | 991 | |
Sebastien Hertz | fd3077e | 2014-04-23 10:32:43 +0200 | [diff] [blame] | 992 | // Returns the fake exception used to activate deoptimization. |
| 993 | static mirror::Throwable* GetDeoptimizationException() { |
Mathieu Chartier | 0795f23 | 2016-09-27 18:43:30 -0700 | [diff] [blame] | 994 | // Note that the mirror::Throwable must be aligned to kObjectAlignment or else it cannot be |
| 995 | // represented by ObjPtr. |
| 996 | return reinterpret_cast<mirror::Throwable*>(0x100); |
Sebastien Hertz | fd3077e | 2014-04-23 10:32:43 +0200 | [diff] [blame] | 997 | } |
| 998 | |
Mingyao Yang | 1f2d3ba | 2015-05-18 12:12:50 -0700 | [diff] [blame] | 999 | // Currently deoptimization invokes verifier which can trigger class loading |
| 1000 | // and execute Java code, so there might be nested deoptimizations happening. |
| 1001 | // We need to save the ongoing deoptimization shadow frames and return |
| 1002 | // values on stacks. |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1003 | // 'from_code' denotes whether the deoptimization was explicitly made from |
| 1004 | // compiled code. |
Mingyao Yang | 2ee1790 | 2017-08-30 11:37:08 -0700 | [diff] [blame] | 1005 | // 'method_type' contains info on whether deoptimization should advance |
| 1006 | // dex_pc. |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1007 | void PushDeoptimizationContext(const JValue& return_value, |
| 1008 | bool is_reference, |
Mingyao Yang | 2ee1790 | 2017-08-30 11:37:08 -0700 | [diff] [blame] | 1009 | ObjPtr<mirror::Throwable> exception, |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1010 | bool from_code, |
Mingyao Yang | 2ee1790 | 2017-08-30 11:37:08 -0700 | [diff] [blame] | 1011 | DeoptimizationMethodType method_type) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1012 | REQUIRES_SHARED(Locks::mutator_lock_); |
Mathieu Chartier | f5769e1 | 2017-01-10 15:54:41 -0800 | [diff] [blame] | 1013 | void PopDeoptimizationContext(JValue* result, |
| 1014 | ObjPtr<mirror::Throwable>* exception, |
Mingyao Yang | 2ee1790 | 2017-08-30 11:37:08 -0700 | [diff] [blame] | 1015 | bool* from_code, |
| 1016 | DeoptimizationMethodType* method_type) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1017 | REQUIRES_SHARED(Locks::mutator_lock_); |
Sebastien Hertz | 0747466 | 2015-08-25 15:12:33 +0000 | [diff] [blame] | 1018 | void AssertHasDeoptimizationContext() |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1019 | REQUIRES_SHARED(Locks::mutator_lock_); |
Mingyao Yang | 1f2d3ba | 2015-05-18 12:12:50 -0700 | [diff] [blame] | 1020 | void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type); |
Andreas Gampe | 639bdd1 | 2015-06-03 11:22:45 -0700 | [diff] [blame] | 1021 | ShadowFrame* PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present = true); |
Andreas Gampe | 2a0d4ec | 2014-06-02 22:05:22 -0700 | [diff] [blame] | 1022 | |
Mingyao Yang | 99170c6 | 2015-07-06 11:10:37 -0700 | [diff] [blame] | 1023 | // For debugger, find the shadow frame that corresponds to a frame id. |
| 1024 | // Or return null if there is none. |
| 1025 | ShadowFrame* FindDebuggerShadowFrame(size_t frame_id) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1026 | REQUIRES_SHARED(Locks::mutator_lock_); |
Mingyao Yang | 99170c6 | 2015-07-06 11:10:37 -0700 | [diff] [blame] | 1027 | // For debugger, find the bool array that keeps track of the updated vreg set |
| 1028 | // for a frame id. |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1029 | bool* GetUpdatedVRegFlags(size_t frame_id) REQUIRES_SHARED(Locks::mutator_lock_); |
Mingyao Yang | 99170c6 | 2015-07-06 11:10:37 -0700 | [diff] [blame] | 1030 | // For debugger, find the shadow frame that corresponds to a frame id. If |
| 1031 | // one doesn't exist yet, create one and track it in frame_id_to_shadow_frame. |
| 1032 | ShadowFrame* FindOrCreateDebuggerShadowFrame(size_t frame_id, |
| 1033 | uint32_t num_vregs, |
| 1034 | ArtMethod* method, |
| 1035 | uint32_t dex_pc) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1036 | REQUIRES_SHARED(Locks::mutator_lock_); |
Mingyao Yang | 99170c6 | 2015-07-06 11:10:37 -0700 | [diff] [blame] | 1037 | |
| 1038 | // Delete the entry that maps from frame_id to shadow_frame. |
| 1039 | void RemoveDebuggerShadowFrameMapping(size_t frame_id) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1040 | REQUIRES_SHARED(Locks::mutator_lock_); |
Mingyao Yang | 99170c6 | 2015-07-06 11:10:37 -0700 | [diff] [blame] | 1041 | |
Ian Rogers | 62d6c77 | 2013-02-27 08:32:07 -0800 | [diff] [blame] | 1042 | std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1043 | return tlsPtr_.instrumentation_stack; |
jeffhao | e343b76 | 2011-12-05 16:36:44 -0800 | [diff] [blame] | 1044 | } |
| 1045 | |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 1046 | std::vector<ArtMethod*>* GetStackTraceSample() const { |
Andreas Gampe | 2c19f5b | 2016-11-28 08:10:18 -0800 | [diff] [blame] | 1047 | DCHECK(!IsAotCompiler()); |
Nicolas Geoffray | 340dafa | 2016-11-18 16:03:10 +0000 | [diff] [blame] | 1048 | return tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample; |
Jeff Hao | 5ce4b17 | 2013-08-16 16:27:18 -0700 | [diff] [blame] | 1049 | } |
| 1050 | |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 1051 | void SetStackTraceSample(std::vector<ArtMethod*>* sample) { |
Andreas Gampe | 2c19f5b | 2016-11-28 08:10:18 -0800 | [diff] [blame] | 1052 | DCHECK(!IsAotCompiler()); |
Nicolas Geoffray | 340dafa | 2016-11-18 16:03:10 +0000 | [diff] [blame] | 1053 | tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample = sample; |
| 1054 | } |
| 1055 | |
| 1056 | verifier::VerifierDeps* GetVerifierDeps() const { |
Andreas Gampe | 2c19f5b | 2016-11-28 08:10:18 -0800 | [diff] [blame] | 1057 | DCHECK(IsAotCompiler()); |
Nicolas Geoffray | 340dafa | 2016-11-18 16:03:10 +0000 | [diff] [blame] | 1058 | return tlsPtr_.deps_or_stack_trace_sample.verifier_deps; |
| 1059 | } |
| 1060 | |
| 1061 | // It is the responsability of the caller to make sure the verifier_deps |
| 1062 | // entry in the thread is cleared before destruction of the actual VerifierDeps |
| 1063 | // object, or the thread. |
| 1064 | void SetVerifierDeps(verifier::VerifierDeps* verifier_deps) { |
Andreas Gampe | 2c19f5b | 2016-11-28 08:10:18 -0800 | [diff] [blame] | 1065 | DCHECK(IsAotCompiler()); |
Nicolas Geoffray | e424c93 | 2016-11-23 12:52:01 +0000 | [diff] [blame] | 1066 | DCHECK(verifier_deps == nullptr || tlsPtr_.deps_or_stack_trace_sample.verifier_deps == nullptr); |
Nicolas Geoffray | 340dafa | 2016-11-18 16:03:10 +0000 | [diff] [blame] | 1067 | tlsPtr_.deps_or_stack_trace_sample.verifier_deps = verifier_deps; |
Jeff Hao | 5ce4b17 | 2013-08-16 16:27:18 -0700 | [diff] [blame] | 1068 | } |
| 1069 | |
| 1070 | uint64_t GetTraceClockBase() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1071 | return tls64_.trace_clock_base; |
Jeff Hao | 5ce4b17 | 2013-08-16 16:27:18 -0700 | [diff] [blame] | 1072 | } |
| 1073 | |
| 1074 | void SetTraceClockBase(uint64_t clock_base) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1075 | tls64_.trace_clock_base = clock_base; |
Jeff Hao | 5ce4b17 | 2013-08-16 16:27:18 -0700 | [diff] [blame] | 1076 | } |
| 1077 | |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 1078 | BaseMutex* GetHeldMutex(LockLevel level) const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1079 | return tlsPtr_.held_mutexes[level]; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 1080 | } |
| 1081 | |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 1082 | void SetHeldMutex(LockLevel level, BaseMutex* mutex) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1083 | tlsPtr_.held_mutexes[level] = mutex; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 1084 | } |
Elliott Hughes | ffb465f | 2012-03-01 18:46:05 -0800 | [diff] [blame] | 1085 | |
Yu Li | eac4424 | 2015-06-29 10:50:03 +0800 | [diff] [blame] | 1086 | void ClearSuspendBarrier(AtomicInteger* target) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 1087 | REQUIRES(Locks::thread_suspend_count_lock_); |
Yu Li | eac4424 | 2015-06-29 10:50:03 +0800 | [diff] [blame] | 1088 | |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 1089 | bool ReadFlag(ThreadFlag flag) const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1090 | return (tls32_.state_and_flags.as_struct.flags & flag) != 0; |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 1091 | } |
| 1092 | |
Jeff Hao | 9cec247 | 2013-05-14 18:17:06 -0700 | [diff] [blame] | 1093 | bool TestAllFlags() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1094 | return (tls32_.state_and_flags.as_struct.flags != 0); |
Jeff Hao | 9cec247 | 2013-05-14 18:17:06 -0700 | [diff] [blame] | 1095 | } |
| 1096 | |
Ian Rogers | 8c1b5f7 | 2014-07-09 22:02:36 -0700 | [diff] [blame] | 1097 | void AtomicSetFlag(ThreadFlag flag) { |
Orion Hodson | 4131d10 | 2018-01-03 14:04:42 +0000 | [diff] [blame] | 1098 | tls32_.state_and_flags.as_atomic_int.FetchAndBitwiseOrSequentiallyConsistent(flag); |
Ian Rogers | 8c1b5f7 | 2014-07-09 22:02:36 -0700 | [diff] [blame] | 1099 | } |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 1100 | |
Ian Rogers | 8c1b5f7 | 2014-07-09 22:02:36 -0700 | [diff] [blame] | 1101 | void AtomicClearFlag(ThreadFlag flag) { |
Orion Hodson | 4131d10 | 2018-01-03 14:04:42 +0000 | [diff] [blame] | 1102 | tls32_.state_and_flags.as_atomic_int.FetchAndBitwiseAndSequentiallyConsistent(-1 ^ flag); |
Ian Rogers | 8c1b5f7 | 2014-07-09 22:02:36 -0700 | [diff] [blame] | 1103 | } |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 1104 | |
Mathieu Chartier | 5ace201 | 2016-11-30 10:15:41 -0800 | [diff] [blame] | 1105 | void ResetQuickAllocEntryPointsForThread(bool is_marking); |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 1106 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1107 | // Returns the remaining space in the TLAB. |
Mathieu Chartier | 6bc7774 | 2017-04-18 17:46:23 -0700 | [diff] [blame] | 1108 | size_t TlabSize() const { |
| 1109 | return tlsPtr_.thread_local_end - tlsPtr_.thread_local_pos; |
| 1110 | } |
| 1111 | |
| 1112 | // Returns the remaining space in the TLAB if we were to expand it to maximum capacity. |
| 1113 | size_t TlabRemainingCapacity() const { |
| 1114 | return tlsPtr_.thread_local_limit - tlsPtr_.thread_local_pos; |
| 1115 | } |
| 1116 | |
| 1117 | // Expand the TLAB by a fixed number of bytes. There must be enough capacity to do so. |
| 1118 | void ExpandTlab(size_t bytes) { |
| 1119 | tlsPtr_.thread_local_end += bytes; |
| 1120 | DCHECK_LE(tlsPtr_.thread_local_end, tlsPtr_.thread_local_limit); |
| 1121 | } |
| 1122 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1123 | // Doesn't check that there is room. |
| 1124 | mirror::Object* AllocTlab(size_t bytes); |
Mathieu Chartier | 6bc7774 | 2017-04-18 17:46:23 -0700 | [diff] [blame] | 1125 | void SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1126 | bool HasTlab() const; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 1127 | uint8_t* GetTlabStart() { |
| 1128 | return tlsPtr_.thread_local_start; |
| 1129 | } |
| 1130 | uint8_t* GetTlabPos() { |
| 1131 | return tlsPtr_.thread_local_pos; |
| 1132 | } |
Elliott Hughes | 5d96a71 | 2012-06-28 12:24:27 -0700 | [diff] [blame] | 1133 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1134 | // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value |
| 1135 | // equal to a valid pointer. |
| 1136 | // TODO: does this need to atomic? I don't think so. |
| 1137 | void RemoveSuspendTrigger() { |
| 1138 | tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger); |
| 1139 | } |
| 1140 | |
| 1141 | // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer. |
| 1142 | // The next time a suspend check is done, it will load from the value at this address |
| 1143 | // and trigger a SIGSEGV. |
| 1144 | void TriggerSuspend() { |
| 1145 | tlsPtr_.suspend_trigger = nullptr; |
| 1146 | } |
| 1147 | |
| 1148 | |
| 1149 | // Push an object onto the allocation stack. |
Mathieu Chartier | cb535da | 2015-01-23 13:50:03 -0800 | [diff] [blame] | 1150 | bool PushOnThreadLocalAllocationStack(mirror::Object* obj) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1151 | REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1152 | |
| 1153 | // Set the thread local allocation pointers to the given pointers. |
Mathieu Chartier | cb535da | 2015-01-23 13:50:03 -0800 | [diff] [blame] | 1154 | void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start, |
| 1155 | StackReference<mirror::Object>* end); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1156 | |
| 1157 | // Resets the thread local allocation pointers. |
| 1158 | void RevokeThreadLocalAllocationStack(); |
| 1159 | |
| 1160 | size_t GetThreadLocalBytesAllocated() const { |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 1161 | return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1162 | } |
| 1163 | |
| 1164 | size_t GetThreadLocalObjectsAllocated() const { |
| 1165 | return tlsPtr_.thread_local_objects; |
| 1166 | } |
| 1167 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1168 | void* GetRosAllocRun(size_t index) const { |
| 1169 | return tlsPtr_.rosalloc_runs[index]; |
| 1170 | } |
| 1171 | |
| 1172 | void SetRosAllocRun(size_t index, void* run) { |
| 1173 | tlsPtr_.rosalloc_runs[index] = run; |
| 1174 | } |
| 1175 | |
Andreas Gampe | 2c2d2a0 | 2016-03-17 21:27:19 -0700 | [diff] [blame] | 1176 | bool ProtectStack(bool fatal_on_error = true); |
Dave Allison | 648d711 | 2014-07-25 16:15:27 -0700 | [diff] [blame] | 1177 | bool UnprotectStack(); |
| 1178 | |
buzbee | 1452bee | 2015-03-06 14:43:04 -0800 | [diff] [blame] | 1179 | void SetMterpDefaultIBase(void* ibase) { |
| 1180 | tlsPtr_.mterp_default_ibase = ibase; |
| 1181 | } |
| 1182 | |
| 1183 | void SetMterpCurrentIBase(void* ibase) { |
| 1184 | tlsPtr_.mterp_current_ibase = ibase; |
| 1185 | } |
| 1186 | |
| 1187 | void SetMterpAltIBase(void* ibase) { |
| 1188 | tlsPtr_.mterp_alt_ibase = ibase; |
| 1189 | } |
| 1190 | |
| 1191 | const void* GetMterpDefaultIBase() const { |
| 1192 | return tlsPtr_.mterp_default_ibase; |
| 1193 | } |
| 1194 | |
| 1195 | const void* GetMterpCurrentIBase() const { |
| 1196 | return tlsPtr_.mterp_current_ibase; |
| 1197 | } |
| 1198 | |
| 1199 | const void* GetMterpAltIBase() const { |
| 1200 | return tlsPtr_.mterp_alt_ibase; |
| 1201 | } |
| 1202 | |
Josh Gao | efd20cb | 2017-02-28 16:53:59 -0800 | [diff] [blame] | 1203 | bool HandlingSignal() const { |
| 1204 | return tls32_.handling_signal_; |
Dave Allison | 648d711 | 2014-07-25 16:15:27 -0700 | [diff] [blame] | 1205 | } |
| 1206 | |
Josh Gao | efd20cb | 2017-02-28 16:53:59 -0800 | [diff] [blame] | 1207 | void SetHandlingSignal(bool handling_signal) { |
| 1208 | tls32_.handling_signal_ = handling_signal; |
Dave Allison | 8ce6b90 | 2014-08-26 11:07:58 -0700 | [diff] [blame] | 1209 | } |
| 1210 | |
Hiroshi Yamauchi | ee23582 | 2016-08-19 17:03:27 -0700 | [diff] [blame] | 1211 | bool IsTransitioningToRunnable() const { |
| 1212 | return tls32_.is_transitioning_to_runnable; |
| 1213 | } |
| 1214 | |
| 1215 | void SetIsTransitioningToRunnable(bool value) { |
| 1216 | tls32_.is_transitioning_to_runnable = value; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 1217 | } |
| 1218 | |
Mathieu Chartier | d0ad2ee | 2015-03-31 14:59:59 -0700 | [diff] [blame] | 1219 | void PushVerifier(verifier::MethodVerifier* verifier); |
| 1220 | void PopVerifier(verifier::MethodVerifier* verifier); |
Mathieu Chartier | 12d625f | 2015-03-13 11:33:37 -0700 | [diff] [blame] | 1221 | |
Jeff Hao | 848f70a | 2014-01-15 13:49:50 -0800 | [diff] [blame] | 1222 | void InitStringEntryPoints(); |
| 1223 | |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 1224 | void ModifyDebugDisallowReadBarrier(int8_t delta) { |
| 1225 | debug_disallow_read_barrier_ += delta; |
| 1226 | } |
| 1227 | |
| 1228 | uint8_t GetDebugDisallowReadBarrierCount() const { |
| 1229 | return debug_disallow_read_barrier_; |
| 1230 | } |
| 1231 | |
Alex Light | 092a404 | 2017-07-12 08:46:44 -0700 | [diff] [blame] | 1232 | void* GetCustomTLS() const REQUIRES(Locks::thread_list_lock_) { |
Andreas Gampe | f26bf2d | 2017-01-13 16:47:14 -0800 | [diff] [blame] | 1233 | return custom_tls_; |
| 1234 | } |
| 1235 | |
Alex Light | 092a404 | 2017-07-12 08:46:44 -0700 | [diff] [blame] | 1236 | void SetCustomTLS(void* data) REQUIRES(Locks::thread_list_lock_) { |
Andreas Gampe | f26bf2d | 2017-01-13 16:47:14 -0800 | [diff] [blame] | 1237 | custom_tls_ = data; |
| 1238 | } |
| 1239 | |
Calin Juravle | 97cbc92 | 2016-04-15 16:16:35 +0100 | [diff] [blame] | 1240 | // Returns true if the current thread is the jit sensitive thread. |
| 1241 | bool IsJitSensitiveThread() const { |
| 1242 | return this == jit_sensitive_thread_; |
| 1243 | } |
| 1244 | |
| 1245 | // Returns true if StrictMode events are traced for the current thread. |
Calin Juravle | b2771b4 | 2016-04-07 17:09:25 +0100 | [diff] [blame] | 1246 | static bool IsSensitiveThread() { |
| 1247 | if (is_sensitive_thread_hook_ != nullptr) { |
| 1248 | return (*is_sensitive_thread_hook_)(); |
| 1249 | } |
| 1250 | return false; |
| 1251 | } |
| 1252 | |
Mathieu Chartier | 3768ade | 2017-05-02 14:04:39 -0700 | [diff] [blame] | 1253 | // Set to the read barrier marking entrypoints to be non-null. |
| 1254 | void SetReadBarrierEntrypoints(); |
| 1255 | |
Andreas Gampe | bad529d | 2017-02-13 18:52:10 -0800 | [diff] [blame] | 1256 | static jobject CreateCompileTimePeer(JNIEnv* env, |
| 1257 | const char* name, |
| 1258 | bool as_daemon, |
| 1259 | jobject thread_group) |
| 1260 | REQUIRES_SHARED(Locks::mutator_lock_); |
| 1261 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1262 | private: |
Ian Rogers | 52673ff | 2012-06-27 23:25:34 -0700 | [diff] [blame] | 1263 | explicit Thread(bool daemon); |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 1264 | ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_); |
Elliott Hughes | c0f0933 | 2012-03-26 13:27:06 -0700 | [diff] [blame] | 1265 | void Destroy(); |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 1266 | |
Andreas Gampe | 732b0ac | 2017-01-18 15:23:39 -0800 | [diff] [blame] | 1267 | // Attaches the calling native thread to the runtime, returning the new native peer. |
| 1268 | // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls. |
| 1269 | template <typename PeerAction> |
| 1270 | static Thread* Attach(const char* thread_name, |
| 1271 | bool as_daemon, |
| 1272 | PeerAction p); |
| 1273 | |
Ian Rogers | 365c102 | 2012-06-22 15:05:28 -0700 | [diff] [blame] | 1274 | void CreatePeer(const char* name, bool as_daemon, jobject thread_group); |
Elliott Hughes | 5fe594f | 2011-09-08 12:33:17 -0700 | [diff] [blame] | 1275 | |
Sebastien Hertz | d2fe10a | 2014-01-15 10:20:56 +0100 | [diff] [blame] | 1276 | template<bool kTransactionActive> |
Andreas Gampe | bad529d | 2017-02-13 18:52:10 -0800 | [diff] [blame] | 1277 | static void InitPeer(ScopedObjectAccessAlreadyRunnable& soa, |
| 1278 | ObjPtr<mirror::Object> peer, |
| 1279 | jboolean thread_is_daemon, |
| 1280 | jobject thread_group, |
| 1281 | jobject thread_name, |
| 1282 | jint thread_priority) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1283 | REQUIRES_SHARED(Locks::mutator_lock_); |
Sebastien Hertz | d2fe10a | 2014-01-15 10:20:56 +0100 | [diff] [blame] | 1284 | |
Ian Rogers | 62d6c77 | 2013-02-27 08:32:07 -0800 | [diff] [blame] | 1285 | // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and |
Hiroshi Yamauchi | 98810e3 | 2016-05-24 14:55:40 -0700 | [diff] [blame] | 1286 | // Dbg::ManageDeoptimization. |
Ian Rogers | 474b6da | 2012-09-25 00:20:38 -0700 | [diff] [blame] | 1287 | ThreadState SetStateUnsafe(ThreadState new_state) { |
| 1288 | ThreadState old_state = GetState(); |
Mathieu Chartier | 8ac9c91 | 2015-10-01 15:58:41 -0700 | [diff] [blame] | 1289 | if (old_state == kRunnable && new_state != kRunnable) { |
| 1290 | // Need to run pending checkpoint and suspend barriers. Run checkpoints in runnable state in |
| 1291 | // case they need to use a ScopedObjectAccess. If we are holding the mutator lock and a SOA |
| 1292 | // attempts to TransitionFromSuspendedToRunnable, it results in a deadlock. |
| 1293 | TransitionToSuspendedAndRunCheckpoints(new_state); |
| 1294 | // Since we transitioned to a suspended state, check the pass barrier requests. |
| 1295 | PassActiveSuspendBarriers(); |
| 1296 | } else { |
| 1297 | tls32_.state_and_flags.as_struct.state = new_state; |
Yu Li | eac4424 | 2015-06-29 10:50:03 +0800 | [diff] [blame] | 1298 | } |
Ian Rogers | c747cff | 2012-08-31 18:20:08 -0700 | [diff] [blame] | 1299 | return old_state; |
| 1300 | } |
Ian Rogers | c747cff | 2012-08-31 18:20:08 -0700 | [diff] [blame] | 1301 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1302 | void VerifyStackImpl() REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 04d7aa9 | 2013-03-16 14:29:17 -0700 | [diff] [blame] | 1303 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1304 | void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_); |
Nicolas Geoffray | a73280d | 2016-02-15 13:05:16 +0000 | [diff] [blame] | 1305 | void DumpStack(std::ostream& os, |
| 1306 | bool dump_native_stack = true, |
Hiroshi Yamauchi | 13c1635 | 2017-01-31 10:15:08 -0800 | [diff] [blame] | 1307 | BacktraceMap* backtrace_map = nullptr, |
| 1308 | bool force_dump_stack = false) const |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 1309 | REQUIRES(!Locks::thread_suspend_count_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1310 | REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | d92bec4 | 2011-09-02 17:04:36 -0700 | [diff] [blame] | 1311 | |
Elliott Hughes | accd83d | 2011-10-17 14:25:58 -0700 | [diff] [blame] | 1312 | // Out-of-line conveniences for debugging in gdb. |
Brian Carlstrom | 7934ac2 | 2013-07-26 10:54:15 -0700 | [diff] [blame] | 1313 | static Thread* CurrentFromGdb(); // Like Thread::Current. |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 1314 | // Like Thread::Dump(std::cerr). |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1315 | void DumpFromGdb() const REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | accd83d | 2011-10-17 14:25:58 -0700 | [diff] [blame] | 1316 | |
Elliott Hughes | 93e74e8 | 2011-09-13 11:07:03 -0700 | [diff] [blame] | 1317 | static void* CreateCallback(void* arg); |
| 1318 | |
Andreas Gampe | 513061a | 2017-06-01 09:17:34 -0700 | [diff] [blame] | 1319 | void HandleUncaughtExceptions(ScopedObjectAccessAlreadyRunnable& soa) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1320 | REQUIRES_SHARED(Locks::mutator_lock_); |
Andreas Gampe | 513061a | 2017-06-01 09:17:34 -0700 | [diff] [blame] | 1321 | void RemoveFromThreadGroup(ScopedObjectAccessAlreadyRunnable& soa) |
| 1322 | REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | accd83d | 2011-10-17 14:25:58 -0700 | [diff] [blame] | 1323 | |
Andreas Gampe | 449357d | 2015-06-01 22:29:51 -0700 | [diff] [blame] | 1324 | // Initialize a thread. |
| 1325 | // |
| 1326 | // The third parameter is not mandatory. If given, the thread will use this JNIEnvExt. In case |
| 1327 | // Init succeeds, this means the thread takes ownership of it. If Init fails, it is the caller's |
| 1328 | // responsibility to destroy the given JNIEnvExt. If the parameter is null, Init will try to |
| 1329 | // create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value |
| 1330 | // of false). |
| 1331 | bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 1332 | REQUIRES(Locks::runtime_shutdown_lock_); |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 1333 | void InitCardTable(); |
Ian Rogers | b033c75 | 2011-07-20 12:22:35 -0700 | [diff] [blame] | 1334 | void InitCpu(); |
Alexei Zavjalov | 1efa0a9 | 2014-02-04 02:08:31 +0700 | [diff] [blame] | 1335 | void CleanupCpu(); |
Ian Rogers | 848871b | 2013-08-05 10:56:33 -0700 | [diff] [blame] | 1336 | void InitTlsEntryPoints(); |
Brian Carlstrom | caabb1b | 2011-10-11 18:09:13 -0700 | [diff] [blame] | 1337 | void InitTid(); |
Brian Carlstrom | caabb1b | 2011-10-11 18:09:13 -0700 | [diff] [blame] | 1338 | void InitPthreadKeySelf(); |
Ian Rogers | f4d4da1 | 2014-11-11 16:10:33 -0800 | [diff] [blame] | 1339 | bool InitStackHwm(); |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 1340 | |
Elliott Hughes | d6a23bd | 2013-07-16 14:19:52 -0700 | [diff] [blame] | 1341 | void SetUpAlternateSignalStack(); |
| 1342 | void TearDownAlternateSignalStack(); |
| 1343 | |
Mathieu Chartier | 8ac9c91 | 2015-10-01 15:58:41 -0700 | [diff] [blame] | 1344 | ALWAYS_INLINE void TransitionToSuspendedAndRunCheckpoints(ThreadState new_state) |
| 1345 | REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_); |
| 1346 | |
| 1347 | ALWAYS_INLINE void PassActiveSuspendBarriers() |
| 1348 | REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_); |
| 1349 | |
Calin Juravle | 97cbc92 | 2016-04-15 16:16:35 +0100 | [diff] [blame] | 1350 | // Registers the current thread as the jit sensitive thread. Should be called just once. |
| 1351 | static void SetJitSensitiveThread() { |
| 1352 | if (jit_sensitive_thread_ == nullptr) { |
| 1353 | jit_sensitive_thread_ = Thread::Current(); |
| 1354 | } else { |
| 1355 | LOG(WARNING) << "Attempt to set the sensitive thread twice. Tid:" |
| 1356 | << Thread::Current()->GetTid(); |
| 1357 | } |
| 1358 | } |
| 1359 | |
Calin Juravle | b2771b4 | 2016-04-07 17:09:25 +0100 | [diff] [blame] | 1360 | static void SetSensitiveThreadHook(bool (*is_sensitive_thread_hook)()) { |
| 1361 | is_sensitive_thread_hook_ = is_sensitive_thread_hook; |
| 1362 | } |
| 1363 | |
Hiroshi Yamauchi | 02e7f1a | 2016-10-03 15:32:01 -0700 | [diff] [blame] | 1364 | bool ModifySuspendCountInternal(Thread* self, |
| 1365 | int delta, |
| 1366 | AtomicInteger* suspend_barrier, |
Alex Light | 46f9340 | 2017-06-29 11:59:50 -0700 | [diff] [blame] | 1367 | SuspendReason reason) |
Sebastien Hertz | 1c8f4ff | 2017-04-14 15:05:12 +0200 | [diff] [blame] | 1368 | WARN_UNUSED |
Hiroshi Yamauchi | 02e7f1a | 2016-10-03 15:32:01 -0700 | [diff] [blame] | 1369 | REQUIRES(Locks::thread_suspend_count_lock_); |
| 1370 | |
Alex Light | df00a1e | 2017-11-01 09:29:53 -0700 | [diff] [blame] | 1371 | // Runs a single checkpoint function. If there are no more pending checkpoint functions it will |
| 1372 | // clear the kCheckpointRequest flag. The caller is responsible for calling this in a loop until |
| 1373 | // the kCheckpointRequest flag is cleared. |
Andreas Gampe | 0a85576 | 2016-10-26 13:43:14 -0700 | [diff] [blame] | 1374 | void RunCheckpointFunction(); |
Hiroshi Yamauchi | 3049324 | 2016-11-03 13:06:52 -0700 | [diff] [blame] | 1375 | void RunEmptyCheckpoint(); |
Andreas Gampe | 0a85576 | 2016-10-26 13:43:14 -0700 | [diff] [blame] | 1376 | |
| 1377 | bool PassActiveSuspendBarriers(Thread* self) |
| 1378 | REQUIRES(!Locks::thread_suspend_count_lock_); |
| 1379 | |
| 1380 | // Install the protected region for implicit stack checks. |
| 1381 | void InstallImplicitProtection(); |
| 1382 | |
Andreas Gampe | 585da95 | 2016-12-02 14:52:29 -0800 | [diff] [blame] | 1383 | template <bool kPrecise> |
| 1384 | void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_); |
| 1385 | |
Andreas Gampe | 2c19f5b | 2016-11-28 08:10:18 -0800 | [diff] [blame] | 1386 | static bool IsAotCompiler(); |
| 1387 | |
Ian Rogers | 474b6da | 2012-09-25 00:20:38 -0700 | [diff] [blame] | 1388 | // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to |
| 1389 | // change from being Suspended to Runnable without a suspend request occurring. |
Chris Dearman | 59cde53 | 2013-12-04 18:53:49 -0800 | [diff] [blame] | 1390 | union PACKED(4) StateAndFlags { |
| 1391 | StateAndFlags() {} |
Ian Rogers | df1ce91 | 2012-11-27 17:07:11 -0800 | [diff] [blame] | 1392 | struct PACKED(4) { |
Ian Rogers | 30e173f | 2012-09-26 14:35:03 -0700 | [diff] [blame] | 1393 | // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See |
| 1394 | // ThreadFlags for bit field meanings. |
| 1395 | volatile uint16_t flags; |
| 1396 | // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable) |
| 1397 | // transitions. Changing to Runnable requires that the suspend_request be part of the atomic |
| 1398 | // operation. If a thread is suspended and a suspend_request is present, a thread may not |
| 1399 | // change to Runnable as a GC or other operation is in progress. |
Ian Rogers | 01ae580 | 2012-09-28 16:14:01 -0700 | [diff] [blame] | 1400 | volatile uint16_t state; |
Ian Rogers | 30e173f | 2012-09-26 14:35:03 -0700 | [diff] [blame] | 1401 | } as_struct; |
Ian Rogers | b8e087e | 2014-07-09 21:12:06 -0700 | [diff] [blame] | 1402 | AtomicInteger as_atomic_int; |
Ian Rogers | 01ae580 | 2012-09-28 16:14:01 -0700 | [diff] [blame] | 1403 | volatile int32_t as_int; |
Chris Dearman | 59cde53 | 2013-12-04 18:53:49 -0800 | [diff] [blame] | 1404 | |
| 1405 | private: |
| 1406 | // gcc does not handle struct with volatile member assignments correctly. |
| 1407 | // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409 |
| 1408 | DISALLOW_COPY_AND_ASSIGN(StateAndFlags); |
Ian Rogers | 474b6da | 2012-09-25 00:20:38 -0700 | [diff] [blame] | 1409 | }; |
Andreas Gampe | 575e78c | 2014-11-03 23:41:03 -0800 | [diff] [blame] | 1410 | static_assert(sizeof(StateAndFlags) == sizeof(int32_t), "Weird state_and_flags size"); |
Ian Rogers | 474b6da | 2012-09-25 00:20:38 -0700 | [diff] [blame] | 1411 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1412 | static void ThreadExitCallback(void* arg); |
Elliott Hughes | 5d96a71 | 2012-06-28 12:24:27 -0700 | [diff] [blame] | 1413 | |
Yu Li | eac4424 | 2015-06-29 10:50:03 +0800 | [diff] [blame] | 1414 | // Maximum number of suspend barriers. |
| 1415 | static constexpr uint32_t kMaxSuspendBarriers = 3; |
| 1416 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1417 | // Has Thread::Startup been called? |
| 1418 | static bool is_started_; |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 1419 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1420 | // TLS key used to retrieve the Thread*. |
| 1421 | static pthread_key_t pthread_key_self_; |
Ian Rogers | a32a6fd | 2012-02-06 20:18:44 -0800 | [diff] [blame] | 1422 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1423 | // Used to notify threads that they should attempt to resume, they will suspend again if |
| 1424 | // their suspend count is > 0. |
| 1425 | static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_); |
Dave Allison | b373e09 | 2014-02-20 16:06:36 -0800 | [diff] [blame] | 1426 | |
Calin Juravle | b2771b4 | 2016-04-07 17:09:25 +0100 | [diff] [blame] | 1427 | // Hook passed by framework which returns true |
| 1428 | // when StrictMode events are traced for the current thread. |
| 1429 | static bool (*is_sensitive_thread_hook_)(); |
Calin Juravle | 97cbc92 | 2016-04-15 16:16:35 +0100 | [diff] [blame] | 1430 | // Stores the jit sensitive thread (which for now is the UI thread). |
| 1431 | static Thread* jit_sensitive_thread_; |
Calin Juravle | b2771b4 | 2016-04-07 17:09:25 +0100 | [diff] [blame] | 1432 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1433 | /***********************************************************************************************/ |
| 1434 | // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for |
| 1435 | // pointer size differences. To encourage shorter encoding, more frequently used values appear |
| 1436 | // first if possible. |
| 1437 | /***********************************************************************************************/ |
Elliott Hughes | 6a607ad | 2012-07-13 20:40:00 -0700 | [diff] [blame] | 1438 | |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 1439 | struct PACKED(4) tls_32bit_sized_values { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1440 | // We have no control over the size of 'bool', but want our boolean fields |
| 1441 | // to be 4-byte quantities. |
| 1442 | typedef uint32_t bool32_t; |
Ian Rogers | 22f454c | 2012-09-08 11:06:29 -0700 | [diff] [blame] | 1443 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1444 | explicit tls_32bit_sized_values(bool is_daemon) : |
| 1445 | suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0), |
| 1446 | daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0), |
Mingyao Yang | 1f2d3ba | 2015-05-18 12:12:50 -0700 | [diff] [blame] | 1447 | thread_exit_check_count(0), handling_signal_(false), |
Hiroshi Yamauchi | ee23582 | 2016-08-19 17:03:27 -0700 | [diff] [blame] | 1448 | is_transitioning_to_runnable(false), ready_for_debug_invoke(false), |
Hiroshi Yamauchi | 20a0be0 | 2016-02-19 15:44:06 -0800 | [diff] [blame] | 1449 | debug_method_entry_(false), is_gc_marking(false), weak_ref_access_enabled(true), |
Alex Light | 88fd720 | 2017-06-30 08:31:59 -0700 | [diff] [blame] | 1450 | disable_thread_flip_count(0), user_code_suspend_count(0) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1451 | } |
Dave Allison | b373e09 | 2014-02-20 16:06:36 -0800 | [diff] [blame] | 1452 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1453 | union StateAndFlags state_and_flags; |
Andreas Gampe | 575e78c | 2014-11-03 23:41:03 -0800 | [diff] [blame] | 1454 | static_assert(sizeof(union StateAndFlags) == sizeof(int32_t), |
| 1455 | "Size of state_and_flags and int32 are different"); |
Dave Allison | b373e09 | 2014-02-20 16:06:36 -0800 | [diff] [blame] | 1456 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1457 | // A non-zero value is used to tell the current thread to enter a safe point |
| 1458 | // at the next poll. |
| 1459 | int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1460 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1461 | // How much of 'suspend_count_' is by request of the debugger, used to set things right |
| 1462 | // when the debugger detaches. Must be <= suspend_count_. |
| 1463 | int debug_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_); |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 1464 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1465 | // Thin lock thread id. This is a small integer used by the thin lock implementation. |
| 1466 | // This is not to be confused with the native thread's tid, nor is it the value returned |
| 1467 | // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One |
| 1468 | // important difference between this id and the ids visible to managed code is that these |
| 1469 | // ones get reused (to ensure that they fit in the number of bits available). |
| 1470 | uint32_t thin_lock_thread_id; |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 1471 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1472 | // System thread id. |
| 1473 | uint32_t tid; |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 1474 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1475 | // Is the thread a daemon? |
| 1476 | const bool32_t daemon; |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 1477 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1478 | // A boolean telling us whether we're recursively throwing OOME. |
| 1479 | bool32_t throwing_OutOfMemoryError; |
| 1480 | |
| 1481 | // A positive value implies we're in a region where thread suspension isn't expected. |
| 1482 | uint32_t no_thread_suspension; |
| 1483 | |
| 1484 | // How many times has our pthread key's destructor been called? |
| 1485 | uint32_t thread_exit_check_count; |
Sebastien Hertz | 9f10203 | 2014-05-23 08:59:42 +0200 | [diff] [blame] | 1486 | |
Dave Allison | 648d711 | 2014-07-25 16:15:27 -0700 | [diff] [blame] | 1487 | // True if signal is being handled by this thread. |
| 1488 | bool32_t handling_signal_; |
| 1489 | |
Hiroshi Yamauchi | ee23582 | 2016-08-19 17:03:27 -0700 | [diff] [blame] | 1490 | // True if the thread is in TransitionFromSuspendedToRunnable(). This is used to distinguish the |
| 1491 | // non-runnable threads (eg. kNative, kWaiting) that are about to transition to runnable from |
| 1492 | // the rest of them. |
| 1493 | bool32_t is_transitioning_to_runnable; |
Sebastien Hertz | 1558b57 | 2015-02-25 15:05:59 +0100 | [diff] [blame] | 1494 | |
| 1495 | // True if the thread has been suspended by a debugger event. This is |
| 1496 | // used to invoke method from the debugger which is only allowed when |
| 1497 | // the thread is suspended by an event. |
| 1498 | bool32_t ready_for_debug_invoke; |
Sebastien Hertz | 9d6bf69 | 2015-04-10 12:12:33 +0200 | [diff] [blame] | 1499 | |
| 1500 | // True if the thread enters a method. This is used to detect method entry |
| 1501 | // event for the debugger. |
| 1502 | bool32_t debug_method_entry_; |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 1503 | |
Hiroshi Yamauchi | 0037082 | 2015-08-18 14:47:25 -0700 | [diff] [blame] | 1504 | // True if the GC is in the marking phase. This is used for the CC collector only. This is |
| 1505 | // thread local so that we can simplify the logic to check for the fast path of read barriers of |
| 1506 | // GC roots. |
| 1507 | bool32_t is_gc_marking; |
| 1508 | |
Nicolas Geoffray | 365719c | 2017-03-08 13:11:50 +0000 | [diff] [blame] | 1509 | // Thread "interrupted" status; stays raised until queried or thrown. |
| 1510 | Atomic<bool32_t> interrupted; |
| 1511 | |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 1512 | // True if the thread is allowed to access a weak ref (Reference::GetReferent() and system |
| 1513 | // weaks) and to potentially mark an object alive/gray. This is used for concurrent reference |
| 1514 | // processing of the CC collector only. This is thread local so that we can enable/disable weak |
| 1515 | // ref access by using a checkpoint and avoid a race around the time weak ref access gets |
| 1516 | // disabled and concurrent reference processing begins (if weak ref access is disabled during a |
| 1517 | // pause, this is not an issue.) Other collectors use Runtime::DisallowNewSystemWeaks() and |
| 1518 | // ReferenceProcessor::EnableSlowPath(). |
| 1519 | bool32_t weak_ref_access_enabled; |
Hiroshi Yamauchi | 20a0be0 | 2016-02-19 15:44:06 -0800 | [diff] [blame] | 1520 | |
| 1521 | // A thread local version of Heap::disable_thread_flip_count_. This keeps track of how many |
| 1522 | // levels of (nested) JNI critical sections the thread is in and is used to detect a nested JNI |
| 1523 | // critical section enter. |
| 1524 | uint32_t disable_thread_flip_count; |
Alex Light | 88fd720 | 2017-06-30 08:31:59 -0700 | [diff] [blame] | 1525 | |
| 1526 | // How much of 'suspend_count_' is by request of user code, used to distinguish threads |
| 1527 | // suspended by the runtime from those suspended by user code. |
| 1528 | // This should have GUARDED_BY(Locks::user_code_suspension_lock_) but auto analysis cannot be |
| 1529 | // told that AssertHeld should be good enough. |
| 1530 | int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1531 | } tls32_; |
| 1532 | |
| 1533 | struct PACKED(8) tls_64bit_sized_values { |
Sebastien Hertz | 0747466 | 2015-08-25 15:12:33 +0000 | [diff] [blame] | 1534 | tls_64bit_sized_values() : trace_clock_base(0) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1535 | } |
| 1536 | |
| 1537 | // The clock base used for tracing. |
| 1538 | uint64_t trace_clock_base; |
| 1539 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1540 | RuntimeStats stats; |
| 1541 | } tls64_; |
| 1542 | |
Andreas Gampe | 6aa1370 | 2015-10-28 10:57:25 -0700 | [diff] [blame] | 1543 | struct PACKED(sizeof(void*)) tls_ptr_sized_values { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1544 | tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr), |
Andreas Gampe | 449357d | 2015-06-01 22:29:51 -0700 | [diff] [blame] | 1545 | managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), tmp_jni_env(nullptr), |
| 1546 | self(nullptr), opeer(nullptr), jpeer(nullptr), stack_begin(nullptr), stack_size(0), |
Nicolas Geoffray | 340dafa | 2016-11-18 16:03:10 +0000 | [diff] [blame] | 1547 | deps_or_stack_trace_sample(), wait_next(nullptr), monitor_enter_object(nullptr), |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 1548 | top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr), |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1549 | instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr), |
Sebastien Hertz | 0747466 | 2015-08-25 15:12:33 +0000 | [diff] [blame] | 1550 | stacked_shadow_frame_record(nullptr), deoptimization_context_stack(nullptr), |
Mingyao Yang | 99170c6 | 2015-07-06 11:10:37 -0700 | [diff] [blame] | 1551 | frame_id_to_shadow_frame(nullptr), name(nullptr), pthread_self(0), |
Vladimir Marko | 0584647 | 2016-09-14 12:49:57 +0100 | [diff] [blame] | 1552 | last_no_thread_suspension_cause(nullptr), checkpoint_function(nullptr), |
Roland Levillain | e71b354 | 2017-01-16 14:58:23 +0000 | [diff] [blame] | 1553 | thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr), |
Mathieu Chartier | 6bc7774 | 2017-04-18 17:46:23 -0700 | [diff] [blame] | 1554 | thread_local_limit(nullptr), |
Vladimir Marko | 0584647 | 2016-09-14 12:49:57 +0100 | [diff] [blame] | 1555 | thread_local_objects(0), mterp_current_ibase(nullptr), mterp_default_ibase(nullptr), |
| 1556 | mterp_alt_ibase(nullptr), thread_local_alloc_stack_top(nullptr), |
Josh Gao | efd20cb | 2017-02-28 16:53:59 -0800 | [diff] [blame] | 1557 | thread_local_alloc_stack_end(nullptr), |
Alex Light | 848574c | 2017-09-25 16:59:39 -0700 | [diff] [blame] | 1558 | flip_function(nullptr), method_verifier(nullptr), thread_local_mark_stack(nullptr), |
| 1559 | async_exception(nullptr) { |
Mathieu Chartier | 12d625f | 2015-03-13 11:33:37 -0700 | [diff] [blame] | 1560 | std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1561 | } |
| 1562 | |
| 1563 | // The biased card table, see CardTable for details. |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 1564 | uint8_t* card_table; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1565 | |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 1566 | // The pending exception or null. |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1567 | mirror::Throwable* exception; |
| 1568 | |
| 1569 | // The end of this thread's stack. This is the lowest safely-addressable address on the stack. |
| 1570 | // We leave extra space so there's room for the code that throws StackOverflowError. |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 1571 | uint8_t* stack_end; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1572 | |
| 1573 | // The top of the managed stack often manipulated directly by compiler generated code. |
| 1574 | ManagedStack managed_stack; |
| 1575 | |
| 1576 | // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check. It is |
| 1577 | // normally set to the address of itself. |
| 1578 | uintptr_t* suspend_trigger; |
| 1579 | |
| 1580 | // Every thread may have an associated JNI environment |
| 1581 | JNIEnvExt* jni_env; |
| 1582 | |
Andreas Gampe | 449357d | 2015-06-01 22:29:51 -0700 | [diff] [blame] | 1583 | // Temporary storage to transfer a pre-allocated JNIEnvExt from the creating thread to the |
| 1584 | // created thread. |
| 1585 | JNIEnvExt* tmp_jni_env; |
| 1586 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1587 | // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current |
| 1588 | // is easy but getting the address of Thread::Current is hard. This field can be read off of |
| 1589 | // Thread::Current to give the address. |
| 1590 | Thread* self; |
| 1591 | |
| 1592 | // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread |
| 1593 | // start up, until the thread is registered and the local opeer_ is used. |
| 1594 | mirror::Object* opeer; |
| 1595 | jobject jpeer; |
| 1596 | |
| 1597 | // The "lowest addressable byte" of the stack. |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 1598 | uint8_t* stack_begin; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1599 | |
| 1600 | // Size of the stack. |
| 1601 | size_t stack_size; |
| 1602 | |
Nicolas Geoffray | 340dafa | 2016-11-18 16:03:10 +0000 | [diff] [blame] | 1603 | // Sampling profiler and AOT verification cannot happen on the same run, so we share |
| 1604 | // the same entry for the stack trace and the verifier deps. |
| 1605 | union DepsOrStackTraceSample { |
| 1606 | DepsOrStackTraceSample() { |
| 1607 | verifier_deps = nullptr; |
| 1608 | stack_trace_sample = nullptr; |
| 1609 | } |
| 1610 | // Pointer to previous stack trace captured by sampling profiler. |
| 1611 | std::vector<ArtMethod*>* stack_trace_sample; |
| 1612 | // When doing AOT verification, per-thread VerifierDeps. |
| 1613 | verifier::VerifierDeps* verifier_deps; |
| 1614 | } deps_or_stack_trace_sample; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1615 | |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 1616 | // The next thread in the wait set this thread is part of or null if not waiting. |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1617 | Thread* wait_next; |
| 1618 | |
| 1619 | // If we're blocked in MonitorEnter, this is the object we're trying to lock. |
| 1620 | mirror::Object* monitor_enter_object; |
| 1621 | |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 1622 | // Top of linked list of handle scopes or null for none. |
Mathieu Chartier | e8a3c57 | 2016-10-11 16:52:17 -0700 | [diff] [blame] | 1623 | BaseHandleScope* top_handle_scope; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1624 | |
| 1625 | // Needed to get the right ClassLoader in JNI_OnLoad, but also |
| 1626 | // useful for testing. |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 1627 | jobject class_loader_override; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1628 | |
| 1629 | // Thread local, lazily allocated, long jump context. Used to deliver exceptions. |
| 1630 | Context* long_jump_context; |
| 1631 | |
| 1632 | // Additional stack used by method instrumentation to store method and return pc values. |
| 1633 | // Stored as a pointer since std::deque is not PACKED. |
| 1634 | std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack; |
| 1635 | |
| 1636 | // JDWP invoke-during-breakpoint support. |
| 1637 | DebugInvokeReq* debug_invoke_req; |
| 1638 | |
| 1639 | // JDWP single-stepping support. |
| 1640 | SingleStepControl* single_step_control; |
| 1641 | |
Mingyao Yang | 1f2d3ba | 2015-05-18 12:12:50 -0700 | [diff] [blame] | 1642 | // For gc purpose, a shadow frame record stack that keeps track of: |
| 1643 | // 1) shadow frames under construction. |
| 1644 | // 2) deoptimization shadow frames. |
| 1645 | StackedShadowFrameRecord* stacked_shadow_frame_record; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1646 | |
Mingyao Yang | 1f2d3ba | 2015-05-18 12:12:50 -0700 | [diff] [blame] | 1647 | // Deoptimization return value record stack. |
Sebastien Hertz | 0747466 | 2015-08-25 15:12:33 +0000 | [diff] [blame] | 1648 | DeoptimizationContextRecord* deoptimization_context_stack; |
Andreas Gampe | 2a0d4ec | 2014-06-02 22:05:22 -0700 | [diff] [blame] | 1649 | |
Mingyao Yang | 99170c6 | 2015-07-06 11:10:37 -0700 | [diff] [blame] | 1650 | // For debugger, a linked list that keeps the mapping from frame_id to shadow frame. |
| 1651 | // Shadow frames may be created before deoptimization happens so that the debugger can |
| 1652 | // set local values there first. |
| 1653 | FrameIdToShadowFrame* frame_id_to_shadow_frame; |
| 1654 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1655 | // A cached copy of the java.lang.Thread's name. |
| 1656 | std::string* name; |
| 1657 | |
| 1658 | // A cached pthread_t for the pthread underlying this Thread*. |
| 1659 | pthread_t pthread_self; |
| 1660 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1661 | // If no_thread_suspension_ is > 0, what is causing that assertion. |
| 1662 | const char* last_no_thread_suspension_cause; |
| 1663 | |
Mathieu Chartier | 952e1e3 | 2016-06-13 14:04:02 -0700 | [diff] [blame] | 1664 | // Pending checkpoint function or null if non-pending. If this checkpoint is set and someone\ |
| 1665 | // requests another checkpoint, it goes to the checkpoint overflow list. |
| 1666 | Closure* checkpoint_function GUARDED_BY(Locks::thread_suspend_count_lock_); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1667 | |
Yu Li | eac4424 | 2015-06-29 10:50:03 +0800 | [diff] [blame] | 1668 | // Pending barriers that require passing or NULL if non-pending. Installation guarding by |
| 1669 | // Locks::thread_suspend_count_lock_. |
| 1670 | // They work effectively as art::Barrier, but implemented directly using AtomicInteger and futex |
| 1671 | // to avoid additional cost of a mutex and a condition variable, as used in art::Barrier. |
| 1672 | AtomicInteger* active_suspend_barriers[kMaxSuspendBarriers]; |
| 1673 | |
Roland Levillain | e71b354 | 2017-01-16 14:58:23 +0000 | [diff] [blame] | 1674 | // Thread-local allocation pointer. Moved here to force alignment for thread_local_pos on ARM. |
| 1675 | uint8_t* thread_local_start; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1676 | |
Hiroshi Yamauchi | 7e1ce28 | 2015-12-11 15:46:19 -0800 | [diff] [blame] | 1677 | // thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for |
| 1678 | // potentially better performance. |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 1679 | uint8_t* thread_local_pos; |
| 1680 | uint8_t* thread_local_end; |
Igor Murashkin | af1e299 | 2016-10-12 17:44:50 -0700 | [diff] [blame] | 1681 | |
Mathieu Chartier | 6bc7774 | 2017-04-18 17:46:23 -0700 | [diff] [blame] | 1682 | // Thread local limit is how much we can expand the thread local buffer to, it is greater or |
| 1683 | // equal to thread_local_end. |
| 1684 | uint8_t* thread_local_limit; |
| 1685 | |
Vladimir Marko | 0584647 | 2016-09-14 12:49:57 +0100 | [diff] [blame] | 1686 | size_t thread_local_objects; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1687 | |
Roland Levillain | e71b354 | 2017-01-16 14:58:23 +0000 | [diff] [blame] | 1688 | // Entrypoint function pointers. |
| 1689 | // TODO: move this to more of a global offset table model to avoid per-thread duplication. |
| 1690 | JniEntryPoints jni_entrypoints; |
| 1691 | QuickEntryPoints quick_entrypoints; |
| 1692 | |
buzbee | 1452bee | 2015-03-06 14:43:04 -0800 | [diff] [blame] | 1693 | // Mterp jump table bases. |
| 1694 | void* mterp_current_ibase; |
| 1695 | void* mterp_default_ibase; |
| 1696 | void* mterp_alt_ibase; |
| 1697 | |
Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 1698 | // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread. |
Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 1699 | void* rosalloc_runs[kNumRosAllocThreadLocalSizeBracketsInThread]; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1700 | |
| 1701 | // Thread-local allocation stack data/routines. |
Mathieu Chartier | cb535da | 2015-01-23 13:50:03 -0800 | [diff] [blame] | 1702 | StackReference<mirror::Object>* thread_local_alloc_stack_top; |
| 1703 | StackReference<mirror::Object>* thread_local_alloc_stack_end; |
Chao-ying Fu | 9e36931 | 2014-05-21 11:20:52 -0700 | [diff] [blame] | 1704 | |
| 1705 | // Support for Mutex lock hierarchy bug detection. |
| 1706 | BaseMutex* held_mutexes[kLockLevelCount]; |
Dave Allison | 8ce6b90 | 2014-08-26 11:07:58 -0700 | [diff] [blame] | 1707 | |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 1708 | // The function used for thread flip. |
| 1709 | Closure* flip_function; |
Mathieu Chartier | 12d625f | 2015-03-13 11:33:37 -0700 | [diff] [blame] | 1710 | |
| 1711 | // Current method verifier, used for root marking. |
| 1712 | verifier::MethodVerifier* method_verifier; |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 1713 | |
| 1714 | // Thread-local mark stack for the concurrent copying collector. |
| 1715 | gc::accounting::AtomicStack<mirror::Object>* thread_local_mark_stack; |
Alex Light | 848574c | 2017-09-25 16:59:39 -0700 | [diff] [blame] | 1716 | |
| 1717 | // The pending async-exception or null. |
| 1718 | mirror::Throwable* async_exception; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1719 | } tlsPtr_; |
| 1720 | |
Nicolas Geoffray | 365719c | 2017-03-08 13:11:50 +0000 | [diff] [blame] | 1721 | // Guards the 'wait_monitor_' members. |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1722 | Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER; |
| 1723 | |
| 1724 | // Condition variable waited upon during a wait. |
| 1725 | ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_); |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 1726 | // Pointer to the monitor lock we're currently waiting on or null if not waiting. |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1727 | Monitor* wait_monitor_ GUARDED_BY(wait_mutex_); |
| 1728 | |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 1729 | // Debug disable read barrier count, only is checked for debug builds and only in the runtime. |
| 1730 | uint8_t debug_disallow_read_barrier_ = 0; |
| 1731 | |
Mathieu Chartier | 3f7f03c | 2016-09-26 11:39:52 -0700 | [diff] [blame] | 1732 | // Note that it is not in the packed struct, may not be accessed for cross compilation. |
| 1733 | uintptr_t poison_object_cookie_ = 0; |
| 1734 | |
Mathieu Chartier | 952e1e3 | 2016-06-13 14:04:02 -0700 | [diff] [blame] | 1735 | // Pending extra checkpoints if checkpoint_function_ is already used. |
| 1736 | std::list<Closure*> checkpoint_overflow_ GUARDED_BY(Locks::thread_suspend_count_lock_); |
| 1737 | |
Andreas Gampe | f26bf2d | 2017-01-13 16:47:14 -0800 | [diff] [blame] | 1738 | // Custom TLS field that can be used by plugins. |
| 1739 | // TODO: Generalize once we have more plugins. |
Alex Light | 092a404 | 2017-07-12 08:46:44 -0700 | [diff] [blame] | 1740 | void* custom_tls_; |
Andreas Gampe | f26bf2d | 2017-01-13 16:47:14 -0800 | [diff] [blame] | 1741 | |
Calin Juravle | ccd5695 | 2016-12-15 17:57:38 +0000 | [diff] [blame] | 1742 | // True if the thread is allowed to call back into java (for e.g. during class resolution). |
| 1743 | // By default this is true. |
| 1744 | bool can_call_into_java_; |
| 1745 | |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 1746 | friend class Dbg; // For SetStateUnsafe. |
Mathieu Chartier | 15d3402 | 2014-02-26 17:16:38 -0800 | [diff] [blame] | 1747 | friend class gc::collector::SemiSpace; // For getting stack traces. |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 1748 | friend class Runtime; // For CreatePeer. |
Ian Rogers | 5cf9819 | 2014-05-29 21:31:50 -0700 | [diff] [blame] | 1749 | friend class QuickExceptionHandler; // For dumping the stack. |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 1750 | friend class ScopedThreadStateChange; |
Mathieu Chartier | 119c6bd | 2014-05-09 14:11:47 -0700 | [diff] [blame] | 1751 | friend class StubTest; // For accessing entrypoints. |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 1752 | friend class ThreadList; // For ~Thread and Destroy. |
| 1753 | |
Andreas Gampe | 4352b45 | 2014-06-04 18:59:01 -0700 | [diff] [blame] | 1754 | friend class EntrypointsOrderTest; // To test the order of tls entries. |
| 1755 | |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 1756 | DISALLOW_COPY_AND_ASSIGN(Thread); |
| 1757 | }; |
Ian Rogers | bdb0391 | 2011-09-14 00:55:44 -0700 | [diff] [blame] | 1758 | |
Mathieu Chartier | 4e2cb09 | 2015-07-22 16:17:51 -0700 | [diff] [blame] | 1759 | class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension { |
Mathieu Chartier | 2d5f39e | 2014-09-19 17:52:37 -0700 | [diff] [blame] | 1760 | public: |
Mingyao Yang | f26828b | 2017-07-27 12:49:01 -0700 | [diff] [blame] | 1761 | ALWAYS_INLINE ScopedAssertNoThreadSuspension(const char* cause, |
| 1762 | bool enabled = true) |
| 1763 | ACQUIRE(Roles::uninterruptible_) |
| 1764 | : enabled_(enabled) { |
| 1765 | if (!enabled_) { |
| 1766 | return; |
| 1767 | } |
Mathieu Chartier | 268764d | 2016-09-13 12:09:38 -0700 | [diff] [blame] | 1768 | if (kIsDebugBuild) { |
| 1769 | self_ = Thread::Current(); |
| 1770 | old_cause_ = self_->StartAssertNoThreadSuspension(cause); |
| 1771 | } else { |
| 1772 | Roles::uninterruptible_.Acquire(); // No-op. |
| 1773 | } |
Mathieu Chartier | 2d5f39e | 2014-09-19 17:52:37 -0700 | [diff] [blame] | 1774 | } |
Mathieu Chartier | 268764d | 2016-09-13 12:09:38 -0700 | [diff] [blame] | 1775 | ALWAYS_INLINE ~ScopedAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) { |
Mingyao Yang | f26828b | 2017-07-27 12:49:01 -0700 | [diff] [blame] | 1776 | if (!enabled_) { |
| 1777 | return; |
| 1778 | } |
Mathieu Chartier | 268764d | 2016-09-13 12:09:38 -0700 | [diff] [blame] | 1779 | if (kIsDebugBuild) { |
| 1780 | self_->EndAssertNoThreadSuspension(old_cause_); |
| 1781 | } else { |
| 1782 | Roles::uninterruptible_.Release(); // No-op. |
| 1783 | } |
Mathieu Chartier | 2d5f39e | 2014-09-19 17:52:37 -0700 | [diff] [blame] | 1784 | } |
| 1785 | |
| 1786 | private: |
Mathieu Chartier | 268764d | 2016-09-13 12:09:38 -0700 | [diff] [blame] | 1787 | Thread* self_; |
Mingyao Yang | f26828b | 2017-07-27 12:49:01 -0700 | [diff] [blame] | 1788 | const bool enabled_; |
Mathieu Chartier | 268764d | 2016-09-13 12:09:38 -0700 | [diff] [blame] | 1789 | const char* old_cause_; |
Mathieu Chartier | 2d5f39e | 2014-09-19 17:52:37 -0700 | [diff] [blame] | 1790 | }; |
| 1791 | |
Mingyao Yang | 1f2d3ba | 2015-05-18 12:12:50 -0700 | [diff] [blame] | 1792 | class ScopedStackedShadowFramePusher { |
| 1793 | public: |
| 1794 | ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf, StackedShadowFrameType type) |
| 1795 | : self_(self), type_(type) { |
| 1796 | self_->PushStackedShadowFrame(sf, type); |
| 1797 | } |
| 1798 | ~ScopedStackedShadowFramePusher() { |
| 1799 | self_->PopStackedShadowFrame(type_); |
| 1800 | } |
| 1801 | |
| 1802 | private: |
| 1803 | Thread* const self_; |
| 1804 | const StackedShadowFrameType type_; |
| 1805 | |
| 1806 | DISALLOW_COPY_AND_ASSIGN(ScopedStackedShadowFramePusher); |
| 1807 | }; |
| 1808 | |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 1809 | // Only works for debug builds. |
| 1810 | class ScopedDebugDisallowReadBarriers { |
| 1811 | public: |
| 1812 | explicit ScopedDebugDisallowReadBarriers(Thread* self) : self_(self) { |
| 1813 | self_->ModifyDebugDisallowReadBarrier(1); |
| 1814 | } |
| 1815 | ~ScopedDebugDisallowReadBarriers() { |
| 1816 | self_->ModifyDebugDisallowReadBarrier(-1); |
| 1817 | } |
| 1818 | |
| 1819 | private: |
| 1820 | Thread* const self_; |
| 1821 | }; |
| 1822 | |
Hiroshi Yamauchi | ee23582 | 2016-08-19 17:03:27 -0700 | [diff] [blame] | 1823 | class ScopedTransitioningToRunnable : public ValueObject { |
| 1824 | public: |
| 1825 | explicit ScopedTransitioningToRunnable(Thread* self) |
| 1826 | : self_(self) { |
| 1827 | DCHECK_EQ(self, Thread::Current()); |
| 1828 | if (kUseReadBarrier) { |
| 1829 | self_->SetIsTransitioningToRunnable(true); |
| 1830 | } |
| 1831 | } |
| 1832 | |
| 1833 | ~ScopedTransitioningToRunnable() { |
| 1834 | if (kUseReadBarrier) { |
| 1835 | self_->SetIsTransitioningToRunnable(false); |
| 1836 | } |
| 1837 | } |
| 1838 | |
| 1839 | private: |
| 1840 | Thread* const self_; |
| 1841 | }; |
| 1842 | |
Andreas Gampe | 04bbb5b | 2017-01-19 17:49:03 +0000 | [diff] [blame] | 1843 | class ThreadLifecycleCallback { |
| 1844 | public: |
| 1845 | virtual ~ThreadLifecycleCallback() {} |
| 1846 | |
| 1847 | virtual void ThreadStart(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0; |
| 1848 | virtual void ThreadDeath(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0; |
| 1849 | }; |
| 1850 | |
Elliott Hughes | 330304d | 2011-08-12 14:28:05 -0700 | [diff] [blame] | 1851 | std::ostream& operator<<(std::ostream& os, const Thread& thread); |
Sebastien Hertz | f795869 | 2015-06-09 14:09:14 +0200 | [diff] [blame] | 1852 | std::ostream& operator<<(std::ostream& os, const StackedShadowFrameType& thread); |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 1853 | |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 1854 | } // namespace art |
| 1855 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 1856 | #endif // ART_RUNTIME_THREAD_H_ |