Elliott Hughes | 8d768a9 | 2011-09-14 16:35:25 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 16 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 17 | #ifndef ART_RUNTIME_THREAD_H_ |
| 18 | #define ART_RUNTIME_THREAD_H_ |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 19 | |
David Srbecky | 28f6cff | 2018-10-16 15:07:28 +0100 | [diff] [blame] | 20 | #include <atomic> |
Elliott Hughes | 02b48d1 | 2011-09-07 17:15:51 -0700 | [diff] [blame] | 21 | #include <bitset> |
Ian Rogers | 306057f | 2012-11-26 12:45:53 -0800 | [diff] [blame] | 22 | #include <deque> |
Elliott Hughes | a095764 | 2011-09-02 14:27:33 -0700 | [diff] [blame] | 23 | #include <iosfwd> |
Ian Rogers | b033c75 | 2011-07-20 12:22:35 -0700 | [diff] [blame] | 24 | #include <list> |
Ian Rogers | 700a402 | 2014-05-19 16:49:03 -0700 | [diff] [blame] | 25 | #include <memory> |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 26 | #include <string> |
Carl Shapiro | b557353 | 2011-07-12 18:22:59 -0700 | [diff] [blame] | 27 | |
David Sehr | c431b9d | 2018-03-02 12:01:51 -0800 | [diff] [blame] | 28 | #include "base/atomic.h" |
Vladimir Marko | ddf4fd3 | 2021-11-22 16:31:57 +0000 | [diff] [blame] | 29 | #include "base/bit_field.h" |
| 30 | #include "base/bit_utils.h" |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 31 | #include "base/enums.h" |
Andreas Gampe | 7fbc4a5 | 2018-11-28 08:26:47 -0800 | [diff] [blame] | 32 | #include "base/locks.h" |
Elliott Hughes | 7616005 | 2012-12-12 16:31:20 -0800 | [diff] [blame] | 33 | #include "base/macros.h" |
Alex Light | 184f075 | 2018-07-13 11:18:22 -0700 | [diff] [blame] | 34 | #include "base/safe_map.h" |
Andreas Gampe | a1ffdba | 2019-01-04 16:08:51 -0800 | [diff] [blame] | 35 | #include "base/value_object.h" |
Ian Rogers | 848871b | 2013-08-05 10:56:33 -0700 | [diff] [blame] | 36 | #include "entrypoints/jni/jni_entrypoints.h" |
Ian Rogers | 7655f29 | 2013-07-29 11:07:13 -0700 | [diff] [blame] | 37 | #include "entrypoints/quick/quick_entrypoints.h" |
Alex Light | b7c640d | 2019-03-20 15:52:13 -0700 | [diff] [blame] | 38 | #include "handle.h" |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 39 | #include "handle_scope.h" |
David Srbecky | 912f36c | 2018-09-08 12:22:58 +0100 | [diff] [blame] | 40 | #include "interpreter/interpreter_cache.h" |
Wessam Hassanein | b5a10be | 2020-11-11 16:42:52 -0800 | [diff] [blame] | 41 | #include "javaheapprof/javaheapsampler.h" |
Ian Rogers | 306057f | 2012-11-26 12:45:53 -0800 | [diff] [blame] | 42 | #include "jvalue.h" |
Andreas Gampe | 513061a | 2017-06-01 09:17:34 -0700 | [diff] [blame] | 43 | #include "managed_stack.h" |
Brian Carlstrom | 578bbdc | 2011-07-21 14:07:47 -0700 | [diff] [blame] | 44 | #include "offsets.h" |
Andreas Gampe | 217488a | 2017-09-18 08:34:42 -0700 | [diff] [blame] | 45 | #include "read_barrier_config.h" |
Alex Light | 55eccdf | 2019-10-07 13:51:13 +0000 | [diff] [blame] | 46 | #include "reflective_handle_scope.h" |
Andreas Gampe | 5a0430d | 2019-01-04 14:33:57 -0800 | [diff] [blame] | 47 | #include "runtime_globals.h" |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 48 | #include "runtime_stats.h" |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 49 | #include "thread_state.h" |
Ian Rogers | b033c75 | 2011-07-20 12:22:35 -0700 | [diff] [blame] | 50 | |
Christopher Ferris | 6cff48f | 2014-01-26 21:36:13 -0800 | [diff] [blame] | 51 | class BacktraceMap; |
| 52 | |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 53 | namespace art { |
| 54 | |
Mathieu Chartier | 15d3402 | 2014-02-26 17:16:38 -0800 | [diff] [blame] | 55 | namespace gc { |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 56 | namespace accounting { |
Igor Murashkin | 2ffb703 | 2017-11-08 13:35:21 -0800 | [diff] [blame] | 57 | template<class T> class AtomicStack; |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 58 | } // namespace accounting |
Mathieu Chartier | 15d3402 | 2014-02-26 17:16:38 -0800 | [diff] [blame] | 59 | namespace collector { |
Igor Murashkin | 2ffb703 | 2017-11-08 13:35:21 -0800 | [diff] [blame] | 60 | class SemiSpace; |
Mathieu Chartier | 15d3402 | 2014-02-26 17:16:38 -0800 | [diff] [blame] | 61 | } // namespace collector |
| 62 | } // namespace gc |
| 63 | |
Andreas Gampe | d77abd9 | 2019-01-02 16:10:20 -0800 | [diff] [blame] | 64 | namespace instrumentation { |
| 65 | struct InstrumentationStackFrame; |
| 66 | } // namespace instrumentation |
| 67 | |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 68 | namespace mirror { |
Igor Murashkin | 2ffb703 | 2017-11-08 13:35:21 -0800 | [diff] [blame] | 69 | class Array; |
| 70 | class Class; |
| 71 | class ClassLoader; |
| 72 | class Object; |
| 73 | template<class T> class ObjectArray; |
| 74 | template<class T> class PrimitiveArray; |
Vladimir Marko | 4f99071 | 2021-07-14 12:45:13 +0100 | [diff] [blame] | 75 | using IntArray = PrimitiveArray<int32_t>; |
Igor Murashkin | 2ffb703 | 2017-11-08 13:35:21 -0800 | [diff] [blame] | 76 | class StackTraceElement; |
| 77 | class String; |
| 78 | class Throwable; |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 79 | } // namespace mirror |
Mathieu Chartier | 12d625f | 2015-03-13 11:33:37 -0700 | [diff] [blame] | 80 | |
| 81 | namespace verifier { |
Igor Murashkin | 2ffb703 | 2017-11-08 13:35:21 -0800 | [diff] [blame] | 82 | class MethodVerifier; |
| 83 | class VerifierDeps; |
Mathieu Chartier | 12d625f | 2015-03-13 11:33:37 -0700 | [diff] [blame] | 84 | } // namespace verifier |
| 85 | |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 86 | class ArtMethod; |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 87 | class BaseMutex; |
| 88 | class ClassLinker; |
Ian Rogers | 7a22fa6 | 2013-01-23 12:16:16 -0800 | [diff] [blame] | 89 | class Closure; |
Ian Rogers | bdb0391 | 2011-09-14 00:55:44 -0700 | [diff] [blame] | 90 | class Context; |
Sebastien Hertz | 0747466 | 2015-08-25 15:12:33 +0000 | [diff] [blame] | 91 | class DeoptimizationContextRecord; |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 92 | class DexFile; |
Mingyao Yang | 99170c6 | 2015-07-06 11:10:37 -0700 | [diff] [blame] | 93 | class FrameIdToShadowFrame; |
Nicolas Geoffray | e3f775b | 2019-12-04 14:41:52 +0000 | [diff] [blame] | 94 | class IsMarkedVisitor; |
Ian Rogers | b48b9eb | 2014-02-28 16:20:21 -0800 | [diff] [blame] | 95 | class JavaVMExt; |
Ian Rogers | 55256cb | 2017-12-21 17:07:11 -0800 | [diff] [blame] | 96 | class JNIEnvExt; |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 97 | class Monitor; |
Andreas Gampe | 513061a | 2017-06-01 09:17:34 -0700 | [diff] [blame] | 98 | class RootVisitor; |
Mathieu Chartier | 2b7c4d1 | 2014-05-19 10:52:16 -0700 | [diff] [blame] | 99 | class ScopedObjectAccessAlreadyRunnable; |
Logan Chien | f7ad17e | 2012-03-15 03:10:03 +0800 | [diff] [blame] | 100 | class ShadowFrame; |
Sebastien Hertz | f795869 | 2015-06-09 14:09:14 +0200 | [diff] [blame] | 101 | class StackedShadowFrameRecord; |
Andreas Gampe | 0c2313c | 2019-05-14 09:47:00 -0700 | [diff] [blame] | 102 | enum class SuspendReason : char; |
Brian Carlstrom | 40381fb | 2011-10-19 14:13:40 -0700 | [diff] [blame] | 103 | class Thread; |
| 104 | class ThreadList; |
Andreas Gampe | 513061a | 2017-06-01 09:17:34 -0700 | [diff] [blame] | 105 | enum VisitRootFlags : uint8_t; |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 106 | |
Alex Light | 184f075 | 2018-07-13 11:18:22 -0700 | [diff] [blame] | 107 | // A piece of data that can be held in the CustomTls. The destructor will be called during thread |
| 108 | // shutdown. The thread the destructor is called on is not necessarily the same thread it was stored |
| 109 | // on. |
| 110 | class TLSData { |
| 111 | public: |
| 112 | virtual ~TLSData() {} |
| 113 | }; |
| 114 | |
Elliott Hughes | 34e0696 | 2012-04-09 13:55:55 -0700 | [diff] [blame] | 115 | // Thread priorities. These must match the Thread.MIN_PRIORITY, |
| 116 | // Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants. |
| 117 | enum ThreadPriority { |
| 118 | kMinThreadPriority = 1, |
| 119 | kNormThreadPriority = 5, |
| 120 | kMaxThreadPriority = 10, |
| 121 | }; |
| 122 | |
Vladimir Marko | ddf4fd3 | 2021-11-22 16:31:57 +0000 | [diff] [blame] | 123 | enum class ThreadFlag : uint32_t { |
| 124 | // If set, implies that suspend_count_ > 0 and the Thread should enter the safepoint handler. |
| 125 | kSuspendRequest = 1u << 0, |
| 126 | |
| 127 | // Request that the thread do some checkpoint work and then continue. |
| 128 | kCheckpointRequest = 1u << 1, |
| 129 | |
| 130 | // Request that the thread do empty checkpoint and then continue. |
| 131 | kEmptyCheckpointRequest = 1u << 2, |
| 132 | |
| 133 | // Register that at least 1 suspend barrier needs to be passed. |
| 134 | kActiveSuspendBarrier = 1u << 3, |
| 135 | |
Vladimir Marko | 9c0f764 | 2021-12-06 16:17:52 +0000 | [diff] [blame] | 136 | // Marks that a "flip function" needs to be executed on this thread. |
| 137 | kPendingFlipFunction = 1u << 4, |
| 138 | |
| 139 | // Marks that the "flip function" is being executed by another thread. |
| 140 | // |
| 141 | // This is used to guards against multiple threads trying to run the |
| 142 | // "flip function" for the same thread while the thread is suspended. |
| 143 | // |
| 144 | // This is not needed when the thread is running the flip function |
| 145 | // on its own after transitioning to Runnable. |
| 146 | kRunningFlipFunction = 1u << 5, |
| 147 | |
| 148 | // Marks that a thread is wating for "flip function" to complete. |
| 149 | // |
| 150 | // This is used to check if we need to broadcast the completion of the |
| 151 | // "flip function" to other threads. See also `kRunningFlipFunction`. |
| 152 | kWaitingForFlipFunction = 1u << 6, |
| 153 | |
Vladimir Marko | ce2a344 | 2021-11-24 15:10:26 +0000 | [diff] [blame] | 154 | // Request that compiled JNI stubs do not transition to Native or Runnable with |
| 155 | // inlined code, but take a slow path for monitoring method entry and exit events. |
Vladimir Marko | 9c0f764 | 2021-12-06 16:17:52 +0000 | [diff] [blame] | 156 | kMonitorJniEntryExit = 1u << 7, |
Vladimir Marko | ce2a344 | 2021-11-24 15:10:26 +0000 | [diff] [blame] | 157 | |
Vladimir Marko | ddf4fd3 | 2021-11-22 16:31:57 +0000 | [diff] [blame] | 158 | // Indicates the last flag. Used for checking that the flags do not overlap thread state. |
Vladimir Marko | ce2a344 | 2021-11-24 15:10:26 +0000 | [diff] [blame] | 159 | kLastFlag = kMonitorJniEntryExit |
Ian Rogers | 474b6da | 2012-09-25 00:20:38 -0700 | [diff] [blame] | 160 | }; |
| 161 | |
Sebastien Hertz | f795869 | 2015-06-09 14:09:14 +0200 | [diff] [blame] | 162 | enum class StackedShadowFrameType { |
Mingyao Yang | 1f2d3ba | 2015-05-18 12:12:50 -0700 | [diff] [blame] | 163 | kShadowFrameUnderConstruction, |
Andreas Gampe | 639bdd1 | 2015-06-03 11:22:45 -0700 | [diff] [blame] | 164 | kDeoptimizationShadowFrame, |
Mingyao Yang | 1f2d3ba | 2015-05-18 12:12:50 -0700 | [diff] [blame] | 165 | }; |
| 166 | |
Mingyao Yang | 2ee1790 | 2017-08-30 11:37:08 -0700 | [diff] [blame] | 167 | // The type of method that triggers deoptimization. It contains info on whether |
| 168 | // the deoptimized method should advance dex_pc. |
| 169 | enum class DeoptimizationMethodType { |
| 170 | kKeepDexPc, // dex pc is required to be kept upon deoptimization. |
| 171 | kDefault // dex pc may or may not advance depending on other conditions. |
| 172 | }; |
| 173 | |
Hans Boehm | 1b3ec0f | 2022-01-26 16:53:07 +0000 | [diff] [blame] | 174 | // For the CC colector, normal weak reference access can be disabled on a per-thread basis, while |
| 175 | // processing references. After finishing, the reference processor asynchronously sets the |
| 176 | // per-thread flags back to kEnabled with release memory ordering semantics. Each mutator thread |
| 177 | // should check its flag with acquire semantics before assuming that it is enabled. However, |
| 178 | // that is often too expensive, so the reading thread sets it to kVisiblyEnabled after seeing it |
| 179 | // kEnabled. The Reference.get() intrinsic can thus read it in relaxed mode, and reread (by |
| 180 | // resorting to the slow path) with acquire semantics if it sees a value of kEnabled rather than |
| 181 | // kVisiblyEnabled. |
| 182 | enum class WeakRefAccessState : int32_t { |
| 183 | kVisiblyEnabled = 0, // Enabled, and previously read with acquire load by this thread. |
| 184 | kEnabled, |
| 185 | kDisabled |
| 186 | }; |
| 187 | |
Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 188 | // This should match RosAlloc::kNumThreadLocalSizeBrackets. |
| 189 | static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16; |
Ian Rogers | e63db27 | 2014-07-15 15:36:11 -0700 | [diff] [blame] | 190 | |
Nicolas Geoffray | f9ae8e3 | 2022-02-15 22:54:11 +0000 | [diff] [blame] | 191 | static constexpr size_t kSharedMethodHotnessThreshold = 0xffff; |
| 192 | |
Dave Allison | 648d711 | 2014-07-25 16:15:27 -0700 | [diff] [blame] | 193 | // Thread's stack layout for implicit stack overflow checks: |
| 194 | // |
| 195 | // +---------------------+ <- highest address of stack memory |
| 196 | // | | |
| 197 | // . . <- SP |
| 198 | // | | |
| 199 | // | | |
| 200 | // +---------------------+ <- stack_end |
| 201 | // | | |
| 202 | // | Gap | |
| 203 | // | | |
| 204 | // +---------------------+ <- stack_begin |
| 205 | // | | |
| 206 | // | Protected region | |
| 207 | // | | |
| 208 | // +---------------------+ <- lowest address of stack memory |
| 209 | // |
| 210 | // The stack always grows down in memory. At the lowest address is a region of memory |
| 211 | // that is set mprotect(PROT_NONE). Any attempt to read/write to this region will |
| 212 | // result in a segmentation fault signal. At any point, the thread's SP will be somewhere |
| 213 | // between the stack_end and the highest address in stack memory. An implicit stack |
| 214 | // overflow check is a read of memory at a certain offset below the current SP (4K typically). |
| 215 | // If the thread's SP is below the stack_end address this will be a read into the protected |
| 216 | // region. If the SP is above the stack_end address, the thread is guaranteed to have |
| 217 | // at least 4K of space. Because stack overflow checks are only performed in generated code, |
| 218 | // if the thread makes a call out to a native function (through JNI), that native function |
| 219 | // might only have 4K of memory (if the SP is adjacent to stack_end). |
| 220 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 221 | class Thread { |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 222 | public: |
Andreas Gampe | 7ea6f79 | 2014-07-14 16:21:44 -0700 | [diff] [blame] | 223 | static const size_t kStackOverflowImplicitCheckSize; |
Andreas Gampe | b486a98 | 2017-06-01 13:45:54 -0700 | [diff] [blame] | 224 | static constexpr bool kVerifyStack = kIsDebugBuild; |
Dave Allison | f943914 | 2014-03-27 15:10:22 -0700 | [diff] [blame] | 225 | |
Elliott Hughes | 462c944 | 2012-03-23 18:47:50 -0700 | [diff] [blame] | 226 | // Creates a new native thread corresponding to the given managed peer. |
| 227 | // Used to implement Thread.start. |
Ian Rogers | 52673ff | 2012-06-27 23:25:34 -0700 | [diff] [blame] | 228 | static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon); |
Carl Shapiro | 61e019d | 2011-07-14 16:53:09 -0700 | [diff] [blame] | 229 | |
Elliott Hughes | 462c944 | 2012-03-23 18:47:50 -0700 | [diff] [blame] | 230 | // Attaches the calling native thread to the runtime, returning the new native peer. |
| 231 | // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls. |
Mathieu Chartier | 664bebf | 2012-11-12 16:54:11 -0800 | [diff] [blame] | 232 | static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group, |
| 233 | bool create_peer); |
Andreas Gampe | 732b0ac | 2017-01-18 15:23:39 -0800 | [diff] [blame] | 234 | // Attaches the calling native thread to the runtime, returning the new native peer. |
| 235 | static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_peer); |
Carl Shapiro | b557353 | 2011-07-12 18:22:59 -0700 | [diff] [blame] | 236 | |
Brian Carlstrom | caabb1b | 2011-10-11 18:09:13 -0700 | [diff] [blame] | 237 | // Reset internal state of child thread after fork. |
| 238 | void InitAfterFork(); |
| 239 | |
Ian Rogers | 6f3dbba | 2014-10-14 17:41:57 -0700 | [diff] [blame] | 240 | // Get the currently executing thread, frequently referred to as 'self'. This call has reasonably |
| 241 | // high cost and so we favor passing self around when possible. |
| 242 | // TODO: mark as PURE so the compiler may coalesce and remove? |
Ian Rogers | 02ed4c0 | 2013-09-06 13:10:04 -0700 | [diff] [blame] | 243 | static Thread* Current(); |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 244 | |
Ian Rogers | 7b078e8 | 2014-09-10 14:44:24 -0700 | [diff] [blame] | 245 | // On a runnable thread, check for pending thread suspension request and handle if pending. |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 246 | void AllowThreadSuspension() REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 7b078e8 | 2014-09-10 14:44:24 -0700 | [diff] [blame] | 247 | |
| 248 | // Process pending thread suspension request and handle if pending. |
Vladimir Marko | e45883e | 2022-01-11 12:38:35 +0000 | [diff] [blame] | 249 | void CheckSuspend(bool implicit = false) REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 7b078e8 | 2014-09-10 14:44:24 -0700 | [diff] [blame] | 250 | |
Hiroshi Yamauchi | 3049324 | 2016-11-03 13:06:52 -0700 | [diff] [blame] | 251 | // Process a pending empty checkpoint if pending. |
Hiroshi Yamauchi | a222404 | 2017-02-08 16:35:45 -0800 | [diff] [blame] | 252 | void CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex); |
| 253 | void CheckEmptyCheckpointFromMutex(); |
Hiroshi Yamauchi | 3049324 | 2016-11-03 13:06:52 -0700 | [diff] [blame] | 254 | |
Mathieu Chartier | 2b7c4d1 | 2014-05-19 10:52:16 -0700 | [diff] [blame] | 255 | static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, |
Mathieu Chartier | f5769e1 | 2017-01-10 15:54:41 -0800 | [diff] [blame] | 256 | ObjPtr<mirror::Object> thread_peer) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 257 | REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 258 | REQUIRES_SHARED(Locks::mutator_lock_); |
Mathieu Chartier | 2b7c4d1 | 2014-05-19 10:52:16 -0700 | [diff] [blame] | 259 | static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 260 | REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 261 | REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 262 | |
Elliott Hughes | 28fa76d | 2012-04-09 17:31:46 -0700 | [diff] [blame] | 263 | // Translates 172 to pAllocArrayFromCode and so on. |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 264 | template<PointerSize size_of_pointers> |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 265 | static void DumpThreadOffset(std::ostream& os, uint32_t offset); |
Elliott Hughes | 28fa76d | 2012-04-09 17:31:46 -0700 | [diff] [blame] | 266 | |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 267 | // Dumps a one-line summary of thread state (used for operator<<). |
| 268 | void ShortDump(std::ostream& os) const; |
| 269 | |
| 270 | // Dumps the detailed thread state and the thread stack (used for SIGQUIT). |
Nicolas Geoffray | a73280d | 2016-02-15 13:05:16 +0000 | [diff] [blame] | 271 | void Dump(std::ostream& os, |
Nicolas Geoffray | 6ee4971 | 2018-03-30 14:39:05 +0000 | [diff] [blame] | 272 | bool dump_native_stack = true, |
Hiroshi Yamauchi | 13c1635 | 2017-01-31 10:15:08 -0800 | [diff] [blame] | 273 | BacktraceMap* backtrace_map = nullptr, |
| 274 | bool force_dump_stack = false) const |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 275 | REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | a095764 | 2011-09-02 14:27:33 -0700 | [diff] [blame] | 276 | |
Hiroshi Yamauchi | 02f365f | 2017-02-03 15:06:00 -0800 | [diff] [blame] | 277 | void DumpJavaStack(std::ostream& os, |
| 278 | bool check_suspended = true, |
| 279 | bool dump_locks = true) const |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 280 | REQUIRES_SHARED(Locks::mutator_lock_); |
Mathieu Chartier | c751fdc | 2014-03-30 15:25:44 -0700 | [diff] [blame] | 281 | |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 282 | // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which |
Elliott Hughes | abbe07d | 2012-06-05 17:42:23 -0700 | [diff] [blame] | 283 | // case we use 'tid' to identify the thread, and we'll include as much information as we can. |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 284 | static void DumpState(std::ostream& os, const Thread* thread, pid_t tid) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 285 | REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | abbe07d | 2012-06-05 17:42:23 -0700 | [diff] [blame] | 286 | |
Ian Rogers | 474b6da | 2012-09-25 00:20:38 -0700 | [diff] [blame] | 287 | ThreadState GetState() const { |
Vladimir Marko | 9c0f764 | 2021-12-06 16:17:52 +0000 | [diff] [blame] | 288 | return GetStateAndFlags(std::memory_order_relaxed).GetState(); |
Dave Allison | 0aded08 | 2013-11-07 13:15:11 -0800 | [diff] [blame] | 289 | } |
| 290 | |
Ian Rogers | 474b6da | 2012-09-25 00:20:38 -0700 | [diff] [blame] | 291 | ThreadState SetState(ThreadState new_state); |
Ian Rogers | 52673ff | 2012-06-27 23:25:34 -0700 | [diff] [blame] | 292 | |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 293 | int GetSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 294 | return tls32_.suspend_count; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 295 | } |
Elliott Hughes | 038a806 | 2011-09-18 14:12:41 -0700 | [diff] [blame] | 296 | |
Alex Light | 88fd720 | 2017-06-30 08:31:59 -0700 | [diff] [blame] | 297 | int GetUserCodeSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_, |
| 298 | Locks::user_code_suspension_lock_) { |
| 299 | return tls32_.user_code_suspend_count; |
| 300 | } |
| 301 | |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 302 | bool IsSuspended() const { |
Vladimir Marko | 9c0f764 | 2021-12-06 16:17:52 +0000 | [diff] [blame] | 303 | StateAndFlags state_and_flags = GetStateAndFlags(std::memory_order_relaxed); |
Vladimir Marko | ddf4fd3 | 2021-11-22 16:31:57 +0000 | [diff] [blame] | 304 | return state_and_flags.GetState() != ThreadState::kRunnable && |
| 305 | state_and_flags.IsFlagSet(ThreadFlag::kSuspendRequest); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 306 | } |
| 307 | |
Alex Light | 270db1c | 2019-12-03 12:20:01 +0000 | [diff] [blame] | 308 | void DecrDefineClassCount() { |
| 309 | tls32_.define_class_counter--; |
| 310 | } |
| 311 | |
| 312 | void IncrDefineClassCount() { |
| 313 | tls32_.define_class_counter++; |
| 314 | } |
| 315 | uint32_t GetDefineClassCount() const { |
| 316 | return tls32_.define_class_counter; |
| 317 | } |
| 318 | |
Hiroshi Yamauchi | 02e7f1a | 2016-10-03 15:32:01 -0700 | [diff] [blame] | 319 | // If delta > 0 and (this != self or suspend_barrier is not null), this function may temporarily |
| 320 | // release thread_suspend_count_lock_ internally. |
| 321 | ALWAYS_INLINE |
| 322 | bool ModifySuspendCount(Thread* self, |
| 323 | int delta, |
| 324 | AtomicInteger* suspend_barrier, |
Alex Light | 46f9340 | 2017-06-29 11:59:50 -0700 | [diff] [blame] | 325 | SuspendReason reason) |
Sebastien Hertz | 1c8f4ff | 2017-04-14 15:05:12 +0200 | [diff] [blame] | 326 | WARN_UNUSED |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 327 | REQUIRES(Locks::thread_suspend_count_lock_); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 328 | |
Hans Boehm | 891cb88 | 2020-07-31 12:06:58 -0700 | [diff] [blame] | 329 | // Requests a checkpoint closure to run on another thread. The closure will be run when the |
| 330 | // thread notices the request, either in an explicit runtime CheckSuspend() call, or in a call |
| 331 | // originating from a compiler generated suspend point check. This returns true if the closure |
| 332 | // was added and will (eventually) be executed. It returns false otherwise. |
Alex Light | 318afe6 | 2018-03-22 16:50:10 -0700 | [diff] [blame] | 333 | // |
Hans Boehm | 891cb88 | 2020-07-31 12:06:58 -0700 | [diff] [blame] | 334 | // Since multiple closures can be queued and some closures can delay other threads from running, |
| 335 | // no closure should attempt to suspend another thread while running. |
Alex Light | 318afe6 | 2018-03-22 16:50:10 -0700 | [diff] [blame] | 336 | // TODO We should add some debug option that verifies this. |
Hans Boehm | 891cb88 | 2020-07-31 12:06:58 -0700 | [diff] [blame] | 337 | // |
| 338 | // This guarantees that the RequestCheckpoint invocation happens-before the function invocation: |
| 339 | // RequestCheckpointFunction holds thread_suspend_count_lock_, and RunCheckpointFunction |
| 340 | // acquires it. |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 341 | bool RequestCheckpoint(Closure* function) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 342 | REQUIRES(Locks::thread_suspend_count_lock_); |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 343 | |
| 344 | // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. This is |
| 345 | // due to the fact that Thread::Current() needs to go to sleep to allow the targeted thread to |
Alex Light | 318afe6 | 2018-03-22 16:50:10 -0700 | [diff] [blame] | 346 | // execute the checkpoint for us if it is Runnable. The suspend_state is the state that the thread |
| 347 | // will go into while it is awaiting the checkpoint to be run. |
| 348 | // NB Passing ThreadState::kRunnable may cause the current thread to wait in a condition variable |
| 349 | // while holding the mutator_lock_. Callers should ensure that this will not cause any problems |
| 350 | // for the closure or the rest of the system. |
| 351 | // NB Since multiple closures can be queued and some closures can delay other threads from running |
| 352 | // no closure should attempt to suspend another thread while running. |
| 353 | bool RequestSynchronousCheckpoint(Closure* function, |
| 354 | ThreadState suspend_state = ThreadState::kWaiting) |
Andreas Gampe | 28c4a23 | 2017-06-21 21:21:31 -0700 | [diff] [blame] | 355 | REQUIRES_SHARED(Locks::mutator_lock_) |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 356 | RELEASE(Locks::thread_list_lock_) |
Andreas Gampe | 28c4a23 | 2017-06-21 21:21:31 -0700 | [diff] [blame] | 357 | REQUIRES(!Locks::thread_suspend_count_lock_); |
Alex Light | 318afe6 | 2018-03-22 16:50:10 -0700 | [diff] [blame] | 358 | |
Hiroshi Yamauchi | 3049324 | 2016-11-03 13:06:52 -0700 | [diff] [blame] | 359 | bool RequestEmptyCheckpoint() |
| 360 | REQUIRES(Locks::thread_suspend_count_lock_); |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 361 | |
Vladimir Marko | 9c0f764 | 2021-12-06 16:17:52 +0000 | [diff] [blame] | 362 | // Set the flip function. This is done with all threads suspended, except for the calling thread. |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 363 | void SetFlipFunction(Closure* function); |
Vladimir Marko | 9c0f764 | 2021-12-06 16:17:52 +0000 | [diff] [blame] | 364 | |
| 365 | // Ensure that thread flip function started running. If no other thread is executing |
| 366 | // it, the calling thread shall run the flip function and then notify other threads |
| 367 | // that have tried to do that concurrently. After this function returns, the |
| 368 | // `ThreadFlag::kPendingFlipFunction` is cleared but another thread may still |
| 369 | // run the flip function as indicated by the `ThreadFlag::kRunningFlipFunction`. |
| 370 | void EnsureFlipFunctionStarted(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); |
| 371 | |
| 372 | // Wait for the flip function to complete if still running on another thread. |
| 373 | void WaitForFlipFunction(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 374 | |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 375 | gc::accounting::AtomicStack<mirror::Object>* GetThreadLocalMarkStack() { |
| 376 | CHECK(kUseReadBarrier); |
| 377 | return tlsPtr_.thread_local_mark_stack; |
| 378 | } |
| 379 | void SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object>* stack) { |
| 380 | CHECK(kUseReadBarrier); |
| 381 | tlsPtr_.thread_local_mark_stack = stack; |
| 382 | } |
| 383 | |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 384 | // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of |
| 385 | // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero. |
Vladimir Marko | e45883e | 2022-01-11 12:38:35 +0000 | [diff] [blame] | 386 | void FullSuspendCheck(bool implicit = false) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 387 | REQUIRES(!Locks::thread_suspend_count_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 388 | REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 389 | |
| 390 | // Transition from non-runnable to runnable state acquiring share on mutator_lock_. |
Mathieu Chartier | 8ac9c91 | 2015-10-01 15:58:41 -0700 | [diff] [blame] | 391 | ALWAYS_INLINE ThreadState TransitionFromSuspendedToRunnable() |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 392 | REQUIRES(!Locks::thread_suspend_count_lock_) |
Mathieu Chartier | 8ac9c91 | 2015-10-01 15:58:41 -0700 | [diff] [blame] | 393 | SHARED_LOCK_FUNCTION(Locks::mutator_lock_); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 394 | |
| 395 | // Transition from runnable into a state where mutator privileges are denied. Releases share of |
| 396 | // mutator lock. |
Mathieu Chartier | 8ac9c91 | 2015-10-01 15:58:41 -0700 | [diff] [blame] | 397 | ALWAYS_INLINE void TransitionFromRunnableToSuspended(ThreadState new_state) |
Mathieu Chartier | 4e2cb09 | 2015-07-22 16:17:51 -0700 | [diff] [blame] | 398 | REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_) |
Mathieu Chartier | 8ac9c91 | 2015-10-01 15:58:41 -0700 | [diff] [blame] | 399 | UNLOCK_FUNCTION(Locks::mutator_lock_); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 400 | |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 401 | // Once called thread suspension will cause an assertion failure. |
Mathieu Chartier | 4e2cb09 | 2015-07-22 16:17:51 -0700 | [diff] [blame] | 402 | const char* StartAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) { |
| 403 | Roles::uninterruptible_.Acquire(); // No-op. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 404 | if (kIsDebugBuild) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 405 | CHECK(cause != nullptr); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 406 | const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause; |
| 407 | tls32_.no_thread_suspension++; |
| 408 | tlsPtr_.last_no_thread_suspension_cause = cause; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 409 | return previous_cause; |
| 410 | } else { |
| 411 | return nullptr; |
| 412 | } |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 413 | } |
Ian Rogers | 52673ff | 2012-06-27 23:25:34 -0700 | [diff] [blame] | 414 | |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 415 | // End region where no thread suspension is expected. |
Mathieu Chartier | 4e2cb09 | 2015-07-22 16:17:51 -0700 | [diff] [blame] | 416 | void EndAssertNoThreadSuspension(const char* old_cause) RELEASE(Roles::uninterruptible_) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 417 | if (kIsDebugBuild) { |
Santiago Aboy Solanes | 6cdabe1 | 2022-02-18 15:27:43 +0000 | [diff] [blame] | 418 | CHECK_IMPLIES(old_cause == nullptr, tls32_.no_thread_suspension == 1); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 419 | CHECK_GT(tls32_.no_thread_suspension, 0U); |
| 420 | tls32_.no_thread_suspension--; |
| 421 | tlsPtr_.last_no_thread_suspension_cause = old_cause; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 422 | } |
Mathieu Chartier | 4e2cb09 | 2015-07-22 16:17:51 -0700 | [diff] [blame] | 423 | Roles::uninterruptible_.Release(); // No-op. |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 424 | } |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 425 | |
Mathieu Chartier | dc540df | 2019-11-15 17:11:44 -0800 | [diff] [blame] | 426 | // End region where no thread suspension is expected. Returns the current open region in case we |
| 427 | // want to reopen it. Used for ScopedAllowThreadSuspension. Not supported if no_thread_suspension |
| 428 | // is larger than one. |
| 429 | const char* EndAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) WARN_UNUSED { |
| 430 | const char* ret = nullptr; |
| 431 | if (kIsDebugBuild) { |
| 432 | CHECK_EQ(tls32_.no_thread_suspension, 1u); |
| 433 | tls32_.no_thread_suspension--; |
| 434 | ret = tlsPtr_.last_no_thread_suspension_cause; |
| 435 | tlsPtr_.last_no_thread_suspension_cause = nullptr; |
| 436 | } |
| 437 | Roles::uninterruptible_.Release(); // No-op. |
| 438 | return ret; |
| 439 | } |
| 440 | |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 441 | void AssertThreadSuspensionIsAllowable(bool check_locks = true) const; |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 442 | |
Mathieu Chartier | 10b218d | 2016-07-25 17:48:52 -0700 | [diff] [blame] | 443 | // Return true if thread suspension is allowable. |
| 444 | bool IsThreadSuspensionAllowable() const; |
| 445 | |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 446 | bool IsDaemon() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 447 | return tls32_.daemon; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 448 | } |
| 449 | |
Mathieu Chartier | 14c3bf9 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 450 | size_t NumberOfHeldMutexes() const; |
| 451 | |
Mathieu Chartier | f5769e1 | 2017-01-10 15:54:41 -0800 | [diff] [blame] | 452 | bool HoldsLock(ObjPtr<mirror::Object> object) const REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 453 | |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 454 | /* |
| 455 | * Changes the priority of this thread to match that of the java.lang.Thread object. |
| 456 | * |
| 457 | * We map a priority value from 1-10 to Linux "nice" values, where lower |
| 458 | * numbers indicate higher priority. |
| 459 | */ |
| 460 | void SetNativePriority(int newPriority); |
| 461 | |
| 462 | /* |
Nicolas Geoffray | fa59588 | 2019-08-06 17:40:09 +0100 | [diff] [blame] | 463 | * Returns the priority of this thread by querying the system. |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 464 | * This is useful when attaching a thread through JNI. |
| 465 | * |
| 466 | * Returns a value from 1 to 10 (compatible with java.lang.Thread values). |
| 467 | */ |
Nicolas Geoffray | fa59588 | 2019-08-06 17:40:09 +0100 | [diff] [blame] | 468 | int GetNativePriority() const; |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 469 | |
Mathieu Chartier | 61b3cd4 | 2016-04-18 11:43:29 -0700 | [diff] [blame] | 470 | // Guaranteed to be non-zero. |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 471 | uint32_t GetThreadId() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 472 | return tls32_.thin_lock_thread_id; |
Carl Shapiro | b557353 | 2011-07-12 18:22:59 -0700 | [diff] [blame] | 473 | } |
| 474 | |
Elliott Hughes | d92bec4 | 2011-09-02 17:04:36 -0700 | [diff] [blame] | 475 | pid_t GetTid() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 476 | return tls32_.tid; |
Elliott Hughes | d92bec4 | 2011-09-02 17:04:36 -0700 | [diff] [blame] | 477 | } |
Elliott Hughes | e27955c | 2011-08-26 15:21:24 -0700 | [diff] [blame] | 478 | |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 479 | // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer. |
Vladimir Marko | 4617d58 | 2019-03-28 13:48:31 +0000 | [diff] [blame] | 480 | ObjPtr<mirror::String> GetThreadName() const REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | 899e789 | 2012-01-24 14:57:32 -0800 | [diff] [blame] | 481 | |
Elliott Hughes | ffb465f | 2012-03-01 18:46:05 -0800 | [diff] [blame] | 482 | // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code, |
| 483 | // allocation, or locking. |
| 484 | void GetThreadName(std::string& name) const; |
| 485 | |
Elliott Hughes | 899e789 | 2012-01-24 14:57:32 -0800 | [diff] [blame] | 486 | // Sets the thread's name. |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 487 | void SetThreadName(const char* name) REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | fc86162 | 2011-10-17 17:57:47 -0700 | [diff] [blame] | 488 | |
Jeff Hao | 57dac6e | 2013-08-15 16:36:24 -0700 | [diff] [blame] | 489 | // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable. |
| 490 | uint64_t GetCpuMicroTime() const; |
| 491 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 492 | mirror::Object* GetPeer() const REQUIRES_SHARED(Locks::mutator_lock_) { |
Nicolas Geoffray | ffc8cad | 2017-02-10 10:59:22 +0000 | [diff] [blame] | 493 | DCHECK(Thread::Current() == this) << "Use GetPeerFromOtherThread instead"; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 494 | CHECK(tlsPtr_.jpeer == nullptr); |
| 495 | return tlsPtr_.opeer; |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 496 | } |
Andreas Gampe | 202f85a | 2017-02-06 10:23:26 -0800 | [diff] [blame] | 497 | // GetPeer is not safe if called on another thread in the middle of the CC thread flip and |
| 498 | // the thread's stack may have not been flipped yet and peer may be a from-space (stale) ref. |
| 499 | // This function will explicitly mark/forward it. |
| 500 | mirror::Object* GetPeerFromOtherThread() const REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 501 | |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 502 | bool HasPeer() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 503 | return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 504 | } |
| 505 | |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 506 | RuntimeStats* GetStats() { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 507 | return &tls64_.stats; |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 508 | } |
| 509 | |
Elliott Hughes | 7dc5166 | 2012-05-16 14:48:43 -0700 | [diff] [blame] | 510 | bool IsStillStarting() const; |
| 511 | |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 512 | bool IsExceptionPending() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 513 | return tlsPtr_.exception != nullptr; |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 514 | } |
| 515 | |
Alex Light | 848574c | 2017-09-25 16:59:39 -0700 | [diff] [blame] | 516 | bool IsAsyncExceptionPending() const { |
| 517 | return tlsPtr_.async_exception != nullptr; |
| 518 | } |
| 519 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 520 | mirror::Throwable* GetException() const REQUIRES_SHARED(Locks::mutator_lock_) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 521 | return tlsPtr_.exception; |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 522 | } |
| 523 | |
Andreas Gampe | d9efea6 | 2014-07-21 22:56:08 -0700 | [diff] [blame] | 524 | void AssertPendingException() const; |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 525 | void AssertPendingOOMException() const REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 526 | void AssertNoPendingException() const; |
Mathieu Chartier | 8d7672e | 2014-02-25 10:57:16 -0800 | [diff] [blame] | 527 | void AssertNoPendingExceptionForNewException(const char* msg) const; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 528 | |
Mathieu Chartier | 0795f23 | 2016-09-27 18:43:30 -0700 | [diff] [blame] | 529 | void SetException(ObjPtr<mirror::Throwable> new_exception) REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 530 | |
Alex Light | 848574c | 2017-09-25 16:59:39 -0700 | [diff] [blame] | 531 | // Set an exception that is asynchronously thrown from a different thread. This will be checked |
| 532 | // periodically and might overwrite the current 'Exception'. This can only be called from a |
| 533 | // checkpoint. |
| 534 | // |
| 535 | // The caller should also make sure that the thread has been deoptimized so that the exception |
| 536 | // could be detected on back-edges. |
| 537 | void SetAsyncException(ObjPtr<mirror::Throwable> new_exception) |
| 538 | REQUIRES_SHARED(Locks::mutator_lock_); |
| 539 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 540 | void ClearException() REQUIRES_SHARED(Locks::mutator_lock_) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 541 | tlsPtr_.exception = nullptr; |
jeffhao | 94d6df4 | 2012-11-26 16:02:12 -0800 | [diff] [blame] | 542 | } |
| 543 | |
Alex Light | 848574c | 2017-09-25 16:59:39 -0700 | [diff] [blame] | 544 | // Move the current async-exception to the main exception. This should be called when the current |
| 545 | // thread is ready to deal with any async exceptions. Returns true if there is an async exception |
| 546 | // that needs to be dealt with, false otherwise. |
| 547 | bool ObserveAsyncException() REQUIRES_SHARED(Locks::mutator_lock_); |
| 548 | |
Ian Rogers | bdb0391 | 2011-09-14 00:55:44 -0700 | [diff] [blame] | 549 | // Find catch block and perform long jump to appropriate exception handle |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 550 | NO_RETURN void QuickDeliverException() REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | bdb0391 | 2011-09-14 00:55:44 -0700 | [diff] [blame] | 551 | |
| 552 | Context* GetLongJumpContext(); |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 553 | void ReleaseLongJumpContext(Context* context) { |
Mingyao Yang | 4dcfc43 | 2015-04-21 16:55:22 -0700 | [diff] [blame] | 554 | if (tlsPtr_.long_jump_context != nullptr) { |
Andreas Gampe | e5d2398 | 2019-01-08 10:34:26 -0800 | [diff] [blame] | 555 | ReleaseLongJumpContextInternal(); |
Mingyao Yang | 4dcfc43 | 2015-04-21 16:55:22 -0700 | [diff] [blame] | 556 | } |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 557 | tlsPtr_.long_jump_context = context; |
Shih-wei Liao | 1a18c8c | 2011-08-14 17:47:36 -0700 | [diff] [blame] | 558 | } |
| 559 | |
Andreas Gampe | 6ec8ebd | 2014-07-25 13:36:56 -0700 | [diff] [blame] | 560 | // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will |
| 561 | // abort the runtime iff abort_on_error is true. |
Hiroshi Yamauchi | 02f365f | 2017-02-03 15:06:00 -0800 | [diff] [blame] | 562 | ArtMethod* GetCurrentMethod(uint32_t* dex_pc, |
| 563 | bool check_suspended = true, |
| 564 | bool abort_on_error = true) const |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 565 | REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 566 | |
Nicolas Geoffray | 7642cfc | 2015-02-26 10:56:09 +0000 | [diff] [blame] | 567 | // Returns whether the given exception was thrown by the current Java method being executed |
| 568 | // (Note that this includes native Java methods). |
Mathieu Chartier | f5769e1 | 2017-01-10 15:54:41 -0800 | [diff] [blame] | 569 | bool IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 570 | REQUIRES_SHARED(Locks::mutator_lock_); |
Nicolas Geoffray | 7642cfc | 2015-02-26 10:56:09 +0000 | [diff] [blame] | 571 | |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 572 | void SetTopOfStack(ArtMethod** top_method) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 573 | tlsPtr_.managed_stack.SetTopQuickFrame(top_method); |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 574 | } |
| 575 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 576 | void SetTopOfStackTagged(ArtMethod** top_method) { |
| 577 | tlsPtr_.managed_stack.SetTopQuickFrameTagged(top_method); |
| 578 | } |
| 579 | |
Jeff Hao | 11ffc2d | 2013-02-01 11:52:17 -0800 | [diff] [blame] | 580 | void SetTopOfShadowStack(ShadowFrame* top) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 581 | tlsPtr_.managed_stack.SetTopShadowFrame(top); |
Jeff Hao | 11ffc2d | 2013-02-01 11:52:17 -0800 | [diff] [blame] | 582 | } |
| 583 | |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 584 | bool HasManagedStack() const { |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 585 | return tlsPtr_.managed_stack.HasTopQuickFrame() || tlsPtr_.managed_stack.HasTopShadowFrame(); |
Ian Rogers | bdb0391 | 2011-09-14 00:55:44 -0700 | [diff] [blame] | 586 | } |
| 587 | |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 588 | // If 'msg' is null, no detail message is set. |
Nicolas Geoffray | 0aa50ce | 2015-03-10 11:03:29 +0000 | [diff] [blame] | 589 | void ThrowNewException(const char* exception_class_descriptor, const char* msg) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 590 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); |
Elliott Hughes | 5cb5ad2 | 2011-10-02 12:13:39 -0700 | [diff] [blame] | 591 | |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 592 | // If 'msg' is null, no detail message is set. An exception must be pending, and will be |
Elliott Hughes | a4f9474 | 2012-05-29 16:28:38 -0700 | [diff] [blame] | 593 | // used as the new exception's cause. |
Nicolas Geoffray | 0aa50ce | 2015-03-10 11:03:29 +0000 | [diff] [blame] | 594 | void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 595 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); |
Elliott Hughes | a4f9474 | 2012-05-29 16:28:38 -0700 | [diff] [blame] | 596 | |
Nicolas Geoffray | 0aa50ce | 2015-03-10 11:03:29 +0000 | [diff] [blame] | 597 | void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) |
| 598 | __attribute__((format(printf, 3, 4))) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 599 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); |
Elliott Hughes | a5b897e | 2011-08-16 11:33:06 -0700 | [diff] [blame] | 600 | |
Nicolas Geoffray | 0aa50ce | 2015-03-10 11:03:29 +0000 | [diff] [blame] | 601 | void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 602 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); |
Elliott Hughes | 4a2b417 | 2011-09-20 17:08:25 -0700 | [diff] [blame] | 603 | |
Elliott Hughes | 2ced6a5 | 2011-10-16 18:44:48 -0700 | [diff] [blame] | 604 | // OutOfMemoryError is special, because we need to pre-allocate an instance. |
Elliott Hughes | 8a8b9cb | 2012-04-13 18:29:22 -0700 | [diff] [blame] | 605 | // Only the GC should call this. |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 606 | void ThrowOutOfMemoryError(const char* msg) REQUIRES_SHARED(Locks::mutator_lock_) |
Mathieu Chartier | ed8990a | 2015-07-23 14:11:16 -0700 | [diff] [blame] | 607 | REQUIRES(!Roles::uninterruptible_); |
Elliott Hughes | 79082e3 | 2011-08-25 12:07:32 -0700 | [diff] [blame] | 608 | |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 609 | static void Startup(); |
Elliott Hughes | 038a806 | 2011-09-18 14:12:41 -0700 | [diff] [blame] | 610 | static void FinishStartup(); |
Elliott Hughes | c1674ed | 2011-08-25 18:09:09 -0700 | [diff] [blame] | 611 | static void Shutdown(); |
Carl Shapiro | b557353 | 2011-07-12 18:22:59 -0700 | [diff] [blame] | 612 | |
Andreas Gampe | 5677601 | 2018-01-26 17:40:55 -0800 | [diff] [blame] | 613 | // Notify this thread's thread-group that this thread has started. |
| 614 | // Note: the given thread-group is used as a fast path and verified in debug build. If the value |
| 615 | // is null, the thread's thread-group is loaded from the peer. |
| 616 | void NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable& soa, jobject thread_group = nullptr) |
| 617 | REQUIRES_SHARED(Locks::mutator_lock_); |
| 618 | |
Ian Rogers | b033c75 | 2011-07-20 12:22:35 -0700 | [diff] [blame] | 619 | // JNI methods |
Elliott Hughes | 69f5bc6 | 2011-08-24 09:26:14 -0700 | [diff] [blame] | 620 | JNIEnvExt* GetJniEnv() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 621 | return tlsPtr_.jni_env; |
Ian Rogers | b033c75 | 2011-07-20 12:22:35 -0700 | [diff] [blame] | 622 | } |
| 623 | |
Ian Rogers | 408f79a | 2011-08-23 18:22:33 -0700 | [diff] [blame] | 624 | // Convert a jobject into a Object* |
Mathieu Chartier | c4f3925 | 2016-10-05 18:32:08 -0700 | [diff] [blame] | 625 | ObjPtr<mirror::Object> DecodeJObject(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_); |
Hiroshi Yamauchi | 04302db | 2015-11-11 23:45:34 -0800 | [diff] [blame] | 626 | // Checks if the weak global ref has been cleared by the GC without decoding it. |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 627 | bool IsJWeakCleared(jweak obj) const REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | b033c75 | 2011-07-20 12:22:35 -0700 | [diff] [blame] | 628 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 629 | mirror::Object* GetMonitorEnterObject() const REQUIRES_SHARED(Locks::mutator_lock_) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 630 | return tlsPtr_.monitor_enter_object; |
| 631 | } |
| 632 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 633 | void SetMonitorEnterObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 634 | tlsPtr_.monitor_enter_object = obj; |
| 635 | } |
| 636 | |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 637 | // Implements java.lang.Thread.interrupted. |
Nicolas Geoffray | 365719c | 2017-03-08 13:11:50 +0000 | [diff] [blame] | 638 | bool Interrupted(); |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 639 | // Implements java.lang.Thread.isInterrupted. |
Nicolas Geoffray | 365719c | 2017-03-08 13:11:50 +0000 | [diff] [blame] | 640 | bool IsInterrupted(); |
Andreas Gampe | 7fbc4a5 | 2018-11-28 08:26:47 -0800 | [diff] [blame] | 641 | void Interrupt(Thread* self) REQUIRES(!wait_mutex_); |
Nicolas Geoffray | 365719c | 2017-03-08 13:11:50 +0000 | [diff] [blame] | 642 | void SetInterrupted(bool i) { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 643 | tls32_.interrupted.store(i, std::memory_order_seq_cst); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 644 | } |
Andreas Gampe | 7fbc4a5 | 2018-11-28 08:26:47 -0800 | [diff] [blame] | 645 | void Notify() REQUIRES(!wait_mutex_); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 646 | |
Mathieu Chartier | 3f7f03c | 2016-09-26 11:39:52 -0700 | [diff] [blame] | 647 | ALWAYS_INLINE void PoisonObjectPointers() { |
| 648 | ++poison_object_cookie_; |
| 649 | } |
| 650 | |
Mathieu Chartier | a59d9b2 | 2016-09-26 18:13:17 -0700 | [diff] [blame] | 651 | ALWAYS_INLINE static void PoisonObjectPointersIfDebug(); |
| 652 | |
Mathieu Chartier | 3f7f03c | 2016-09-26 11:39:52 -0700 | [diff] [blame] | 653 | ALWAYS_INLINE uintptr_t GetPoisonObjectCookie() const { |
| 654 | return poison_object_cookie_; |
| 655 | } |
| 656 | |
Charles Munger | aa31f49 | 2018-11-01 18:57:38 +0000 | [diff] [blame] | 657 | // Parking for 0ns of relative time means an untimed park, negative (though |
| 658 | // should be handled in java code) returns immediately |
| 659 | void Park(bool is_absolute, int64_t time) REQUIRES_SHARED(Locks::mutator_lock_); |
| 660 | void Unpark(); |
| 661 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 662 | private: |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 663 | void NotifyLocked(Thread* self) REQUIRES(wait_mutex_); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 664 | |
| 665 | public: |
| 666 | Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) { |
| 667 | return wait_mutex_; |
| 668 | } |
| 669 | |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 670 | ConditionVariable* GetWaitConditionVariable() const REQUIRES(wait_mutex_) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 671 | return wait_cond_; |
| 672 | } |
| 673 | |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 674 | Monitor* GetWaitMonitor() const REQUIRES(wait_mutex_) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 675 | return wait_monitor_; |
| 676 | } |
| 677 | |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 678 | void SetWaitMonitor(Monitor* mon) REQUIRES(wait_mutex_) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 679 | wait_monitor_ = mon; |
| 680 | } |
| 681 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 682 | // Waiter link-list support. |
| 683 | Thread* GetWaitNext() const { |
| 684 | return tlsPtr_.wait_next; |
| 685 | } |
| 686 | |
| 687 | void SetWaitNext(Thread* next) { |
| 688 | tlsPtr_.wait_next = next; |
| 689 | } |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 690 | |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 691 | jobject GetClassLoaderOverride() { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 692 | return tlsPtr_.class_loader_override; |
buzbee | c143c55 | 2011-08-20 17:38:58 -0700 | [diff] [blame] | 693 | } |
| 694 | |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 695 | void SetClassLoaderOverride(jobject class_loader_override); |
buzbee | c143c55 | 2011-08-20 17:38:58 -0700 | [diff] [blame] | 696 | |
Ian Rogers | aaa2080 | 2011-09-11 21:47:37 -0700 | [diff] [blame] | 697 | // Create the internal representation of a stack trace, that is more time |
Sebastien Hertz | ee1d79a | 2014-02-21 15:46:30 +0100 | [diff] [blame] | 698 | // and space efficient to compute than the StackTraceElement[]. |
Mathieu Chartier | 2b7c4d1 | 2014-05-19 10:52:16 -0700 | [diff] [blame] | 699 | jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 700 | REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | aaa2080 | 2011-09-11 21:47:37 -0700 | [diff] [blame] | 701 | |
Elliott Hughes | 01158d7 | 2011-09-19 19:47:10 -0700 | [diff] [blame] | 702 | // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 703 | // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many |
| 704 | // frames as will fit are written into the given array. If stack_depth is non-null, it's updated |
Elliott Hughes | 01158d7 | 2011-09-19 19:47:10 -0700 | [diff] [blame] | 705 | // with the number of valid frames in the returned array. |
Mathieu Chartier | 2b7c4d1 | 2014-05-19 10:52:16 -0700 | [diff] [blame] | 706 | static jobjectArray InternalStackTraceToStackTraceElementArray( |
| 707 | const ScopedObjectAccessAlreadyRunnable& soa, jobject internal, |
| 708 | jobjectArray output_array = nullptr, int* stack_depth = nullptr) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 709 | REQUIRES_SHARED(Locks::mutator_lock_); |
Shih-wei Liao | 55df06b | 2011-08-26 14:39:27 -0700 | [diff] [blame] | 710 | |
Andreas Gampe | fb6b0b1 | 2017-12-11 20:47:56 -0800 | [diff] [blame] | 711 | jobjectArray CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const |
| 712 | REQUIRES_SHARED(Locks::mutator_lock_); |
| 713 | |
Mingyao Yang | 99170c6 | 2015-07-06 11:10:37 -0700 | [diff] [blame] | 714 | bool HasDebuggerShadowFrames() const { |
| 715 | return tlsPtr_.frame_id_to_shadow_frame != nullptr; |
| 716 | } |
| 717 | |
Andreas Gampe | 513061a | 2017-06-01 09:17:34 -0700 | [diff] [blame] | 718 | void VisitRoots(RootVisitor* visitor, VisitRootFlags flags) |
Andreas Gampe | 585da95 | 2016-12-02 14:52:29 -0800 | [diff] [blame] | 719 | REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | 410c0c8 | 2011-09-01 17:58:25 -0700 | [diff] [blame] | 720 | |
Alex Light | 55eccdf | 2019-10-07 13:51:13 +0000 | [diff] [blame] | 721 | void VisitReflectiveTargets(ReflectiveValueVisitor* visitor) |
| 722 | REQUIRES(Locks::mutator_lock_); |
| 723 | |
Andreas Gampe | b486a98 | 2017-06-01 13:45:54 -0700 | [diff] [blame] | 724 | void VerifyStack() REQUIRES_SHARED(Locks::mutator_lock_) { |
| 725 | if (kVerifyStack) { |
| 726 | VerifyStackImpl(); |
| 727 | } |
| 728 | } |
jeffhao | 2504552 | 2012-03-13 19:34:37 -0700 | [diff] [blame] | 729 | |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 730 | // |
| 731 | // Offsets of various members of native Thread class, used by compiled code. |
| 732 | // |
| 733 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 734 | template<PointerSize pointer_size> |
David Srbecky | 56de89a | 2018-10-01 15:32:20 +0100 | [diff] [blame] | 735 | static constexpr ThreadOffset<pointer_size> ThinLockIdOffset() { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 736 | return ThreadOffset<pointer_size>( |
| 737 | OFFSETOF_MEMBER(Thread, tls32_) + |
| 738 | OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id)); |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 739 | } |
| 740 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 741 | template<PointerSize pointer_size> |
David Srbecky | 56de89a | 2018-10-01 15:32:20 +0100 | [diff] [blame] | 742 | static constexpr ThreadOffset<pointer_size> InterruptedOffset() { |
Nicolas Geoffray | 365719c | 2017-03-08 13:11:50 +0000 | [diff] [blame] | 743 | return ThreadOffset<pointer_size>( |
| 744 | OFFSETOF_MEMBER(Thread, tls32_) + |
| 745 | OFFSETOF_MEMBER(tls_32bit_sized_values, interrupted)); |
| 746 | } |
| 747 | |
| 748 | template<PointerSize pointer_size> |
Vladimir Marko | 01b6552 | 2020-10-28 15:43:54 +0000 | [diff] [blame] | 749 | static constexpr ThreadOffset<pointer_size> WeakRefAccessEnabledOffset() { |
| 750 | return ThreadOffset<pointer_size>( |
| 751 | OFFSETOF_MEMBER(Thread, tls32_) + |
| 752 | OFFSETOF_MEMBER(tls_32bit_sized_values, weak_ref_access_enabled)); |
| 753 | } |
| 754 | |
| 755 | template<PointerSize pointer_size> |
David Srbecky | 56de89a | 2018-10-01 15:32:20 +0100 | [diff] [blame] | 756 | static constexpr ThreadOffset<pointer_size> ThreadFlagsOffset() { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 757 | return ThreadOffset<pointer_size>( |
| 758 | OFFSETOF_MEMBER(Thread, tls32_) + |
| 759 | OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags)); |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 760 | } |
| 761 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 762 | template<PointerSize pointer_size> |
David Srbecky | 56de89a | 2018-10-01 15:32:20 +0100 | [diff] [blame] | 763 | static constexpr ThreadOffset<pointer_size> IsGcMarkingOffset() { |
Roland Levillain | 7c1559a | 2015-12-15 10:55:36 +0000 | [diff] [blame] | 764 | return ThreadOffset<pointer_size>( |
| 765 | OFFSETOF_MEMBER(Thread, tls32_) + |
| 766 | OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking)); |
| 767 | } |
| 768 | |
Igor Murashkin | ae7ff92 | 2016-10-06 14:59:19 -0700 | [diff] [blame] | 769 | static constexpr size_t IsGcMarkingSize() { |
| 770 | return sizeof(tls32_.is_gc_marking); |
| 771 | } |
| 772 | |
Nicolas Geoffray | f9ae8e3 | 2022-02-15 22:54:11 +0000 | [diff] [blame] | 773 | template<PointerSize pointer_size> |
| 774 | static constexpr ThreadOffset<pointer_size> SharedMethodHotnessOffset() { |
| 775 | return ThreadOffset<pointer_size>( |
| 776 | OFFSETOF_MEMBER(Thread, tls32_) + |
| 777 | OFFSETOF_MEMBER(tls_32bit_sized_values, shared_method_hotness)); |
| 778 | } |
| 779 | |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 780 | // Deoptimize the Java stack. |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 781 | void DeoptimizeWithDeoptimizationException(JValue* result) REQUIRES_SHARED(Locks::mutator_lock_); |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 782 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 783 | private: |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 784 | template<PointerSize pointer_size> |
David Srbecky | 56de89a | 2018-10-01 15:32:20 +0100 | [diff] [blame] | 785 | static constexpr ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 786 | size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_); |
David Srbecky | 56de89a | 2018-10-01 15:32:20 +0100 | [diff] [blame] | 787 | size_t scale = (pointer_size > kRuntimePointerSize) ? |
| 788 | static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize) : 1; |
| 789 | size_t shrink = (kRuntimePointerSize > pointer_size) ? |
| 790 | static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size) : 1; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 791 | return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink)); |
Ian Rogers | 07ec8e1 | 2012-12-01 01:26:51 -0800 | [diff] [blame] | 792 | } |
| 793 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 794 | public: |
Nicolas Geoffray | a00b54b | 2019-12-03 14:36:42 +0000 | [diff] [blame] | 795 | template<PointerSize pointer_size> |
| 796 | static constexpr ThreadOffset<pointer_size> QuickEntryPointOffset( |
| 797 | size_t quick_entrypoint_offset) { |
| 798 | return ThreadOffsetFromTlsPtr<pointer_size>( |
| 799 | OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset); |
| 800 | } |
| 801 | |
| 802 | static constexpr uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset, |
Nicolas Geoffray | e3f775b | 2019-12-04 14:41:52 +0000 | [diff] [blame] | 803 | PointerSize pointer_size) { |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 804 | if (pointer_size == PointerSize::k32) { |
| 805 | return QuickEntryPointOffset<PointerSize::k32>(quick_entrypoint_offset). |
| 806 | Uint32Value(); |
Jeff Hao | 848f70a | 2014-01-15 13:49:50 -0800 | [diff] [blame] | 807 | } else { |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 808 | return QuickEntryPointOffset<PointerSize::k64>(quick_entrypoint_offset). |
| 809 | Uint32Value(); |
Jeff Hao | 848f70a | 2014-01-15 13:49:50 -0800 | [diff] [blame] | 810 | } |
| 811 | } |
| 812 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 813 | template<PointerSize pointer_size> |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 814 | static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) { |
| 815 | return ThreadOffsetFromTlsPtr<pointer_size>( |
| 816 | OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset); |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 817 | } |
| 818 | |
Roland Levillain | 97c4646 | 2017-05-11 14:04:03 +0100 | [diff] [blame] | 819 | // Return the entry point offset integer value for ReadBarrierMarkRegX, where X is `reg`. |
| 820 | template <PointerSize pointer_size> |
Nicolas Geoffray | a00b54b | 2019-12-03 14:36:42 +0000 | [diff] [blame] | 821 | static constexpr int32_t ReadBarrierMarkEntryPointsOffset(size_t reg) { |
Roland Levillain | 97c4646 | 2017-05-11 14:04:03 +0100 | [diff] [blame] | 822 | // The entry point list defines 30 ReadBarrierMarkRegX entry points. |
| 823 | DCHECK_LT(reg, 30u); |
| 824 | // The ReadBarrierMarkRegX entry points are ordered by increasing |
| 825 | // register number in Thread::tls_Ptr_.quick_entrypoints. |
| 826 | return QUICK_ENTRYPOINT_OFFSET(pointer_size, pReadBarrierMarkReg00).Int32Value() |
| 827 | + static_cast<size_t>(pointer_size) * reg; |
| 828 | } |
| 829 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 830 | template<PointerSize pointer_size> |
David Srbecky | 56de89a | 2018-10-01 15:32:20 +0100 | [diff] [blame] | 831 | static constexpr ThreadOffset<pointer_size> SelfOffset() { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 832 | return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self)); |
| 833 | } |
| 834 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 835 | template<PointerSize pointer_size> |
David Srbecky | 56de89a | 2018-10-01 15:32:20 +0100 | [diff] [blame] | 836 | static constexpr ThreadOffset<pointer_size> ExceptionOffset() { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 837 | return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception)); |
| 838 | } |
| 839 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 840 | template<PointerSize pointer_size> |
David Srbecky | 56de89a | 2018-10-01 15:32:20 +0100 | [diff] [blame] | 841 | static constexpr ThreadOffset<pointer_size> PeerOffset() { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 842 | return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer)); |
| 843 | } |
| 844 | |
| 845 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 846 | template<PointerSize pointer_size> |
David Srbecky | 56de89a | 2018-10-01 15:32:20 +0100 | [diff] [blame] | 847 | static constexpr ThreadOffset<pointer_size> CardTableOffset() { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 848 | return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table)); |
| 849 | } |
| 850 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 851 | template<PointerSize pointer_size> |
David Srbecky | 56de89a | 2018-10-01 15:32:20 +0100 | [diff] [blame] | 852 | static constexpr ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 853 | return ThreadOffsetFromTlsPtr<pointer_size>( |
| 854 | OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger)); |
Dave Allison | b373e09 | 2014-02-20 16:06:36 -0800 | [diff] [blame] | 855 | } |
| 856 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 857 | template<PointerSize pointer_size> |
David Srbecky | 56de89a | 2018-10-01 15:32:20 +0100 | [diff] [blame] | 858 | static constexpr ThreadOffset<pointer_size> ThreadLocalPosOffset() { |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 859 | return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, |
| 860 | thread_local_pos)); |
Hiroshi Yamauchi | e01a520 | 2015-03-19 12:35:04 -0700 | [diff] [blame] | 861 | } |
| 862 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 863 | template<PointerSize pointer_size> |
David Srbecky | 56de89a | 2018-10-01 15:32:20 +0100 | [diff] [blame] | 864 | static constexpr ThreadOffset<pointer_size> ThreadLocalEndOffset() { |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 865 | return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, |
| 866 | thread_local_end)); |
Hiroshi Yamauchi | e01a520 | 2015-03-19 12:35:04 -0700 | [diff] [blame] | 867 | } |
| 868 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 869 | template<PointerSize pointer_size> |
David Srbecky | 56de89a | 2018-10-01 15:32:20 +0100 | [diff] [blame] | 870 | static constexpr ThreadOffset<pointer_size> ThreadLocalObjectsOffset() { |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 871 | return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, |
| 872 | thread_local_objects)); |
Hiroshi Yamauchi | e01a520 | 2015-03-19 12:35:04 -0700 | [diff] [blame] | 873 | } |
| 874 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 875 | template<PointerSize pointer_size> |
David Srbecky | 56de89a | 2018-10-01 15:32:20 +0100 | [diff] [blame] | 876 | static constexpr ThreadOffset<pointer_size> RosAllocRunsOffset() { |
Hiroshi Yamauchi | dc412b6 | 2015-10-15 12:26:57 -0700 | [diff] [blame] | 877 | return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, |
| 878 | rosalloc_runs)); |
| 879 | } |
| 880 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 881 | template<PointerSize pointer_size> |
David Srbecky | 56de89a | 2018-10-01 15:32:20 +0100 | [diff] [blame] | 882 | static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() { |
Hiroshi Yamauchi | dc412b6 | 2015-10-15 12:26:57 -0700 | [diff] [blame] | 883 | return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, |
| 884 | thread_local_alloc_stack_top)); |
| 885 | } |
| 886 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 887 | template<PointerSize pointer_size> |
David Srbecky | 56de89a | 2018-10-01 15:32:20 +0100 | [diff] [blame] | 888 | static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() { |
Hiroshi Yamauchi | dc412b6 | 2015-10-15 12:26:57 -0700 | [diff] [blame] | 889 | return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, |
| 890 | thread_local_alloc_stack_end)); |
| 891 | } |
| 892 | |
Ian Rogers | 932746a | 2011-09-22 18:57:50 -0700 | [diff] [blame] | 893 | // Size of stack less any space reserved for stack overflow |
jeffhao | d752132 | 2012-11-21 15:38:24 -0800 | [diff] [blame] | 894 | size_t GetStackSize() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 895 | return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin); |
Ian Rogers | 932746a | 2011-09-22 18:57:50 -0700 | [diff] [blame] | 896 | } |
| 897 | |
Andreas Gampe | 639b2b1 | 2019-01-08 10:32:50 -0800 | [diff] [blame] | 898 | ALWAYS_INLINE uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const; |
Nicolas Geoffray | 535a3fb | 2014-07-22 15:17:38 +0100 | [diff] [blame] | 899 | |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 900 | uint8_t* GetStackEnd() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 901 | return tlsPtr_.stack_end; |
jeffhao | d752132 | 2012-11-21 15:38:24 -0800 | [diff] [blame] | 902 | } |
| 903 | |
Ian Rogers | 932746a | 2011-09-22 18:57:50 -0700 | [diff] [blame] | 904 | // Set the stack end to that to be used during a stack overflow |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 905 | void SetStackEndForStackOverflow() REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 932746a | 2011-09-22 18:57:50 -0700 | [diff] [blame] | 906 | |
| 907 | // Set the stack end to that to be used during regular execution |
Andreas Gampe | 639b2b1 | 2019-01-08 10:32:50 -0800 | [diff] [blame] | 908 | ALWAYS_INLINE void ResetDefaultStackEnd(); |
Ian Rogers | 932746a | 2011-09-22 18:57:50 -0700 | [diff] [blame] | 909 | |
Ian Rogers | 120f1c7 | 2012-09-28 17:17:10 -0700 | [diff] [blame] | 910 | bool IsHandlingStackOverflow() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 911 | return tlsPtr_.stack_end == tlsPtr_.stack_begin; |
Ian Rogers | 120f1c7 | 2012-09-28 17:17:10 -0700 | [diff] [blame] | 912 | } |
| 913 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 914 | template<PointerSize pointer_size> |
David Srbecky | 56de89a | 2018-10-01 15:32:20 +0100 | [diff] [blame] | 915 | static constexpr ThreadOffset<pointer_size> StackEndOffset() { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 916 | return ThreadOffsetFromTlsPtr<pointer_size>( |
| 917 | OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end)); |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 918 | } |
| 919 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 920 | template<PointerSize pointer_size> |
David Srbecky | 56de89a | 2018-10-01 15:32:20 +0100 | [diff] [blame] | 921 | static constexpr ThreadOffset<pointer_size> JniEnvOffset() { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 922 | return ThreadOffsetFromTlsPtr<pointer_size>( |
| 923 | OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env)); |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 924 | } |
| 925 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 926 | template<PointerSize pointer_size> |
David Srbecky | 56de89a | 2018-10-01 15:32:20 +0100 | [diff] [blame] | 927 | static constexpr ThreadOffset<pointer_size> TopOfManagedStackOffset() { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 928 | return ThreadOffsetFromTlsPtr<pointer_size>( |
| 929 | OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) + |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 930 | ManagedStack::TaggedTopQuickFrameOffset()); |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 931 | } |
| 932 | |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 933 | const ManagedStack* GetManagedStack() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 934 | return &tlsPtr_.managed_stack; |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 935 | } |
| 936 | |
| 937 | // Linked list recording fragments of managed stack. |
| 938 | void PushManagedStackFragment(ManagedStack* fragment) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 939 | tlsPtr_.managed_stack.PushManagedStackFragment(fragment); |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 940 | } |
| 941 | void PopManagedStackFragment(const ManagedStack& fragment) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 942 | tlsPtr_.managed_stack.PopManagedStackFragment(fragment); |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 943 | } |
| 944 | |
Andreas Gampe | 513061a | 2017-06-01 09:17:34 -0700 | [diff] [blame] | 945 | ALWAYS_INLINE ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame); |
| 946 | ALWAYS_INLINE ShadowFrame* PopShadowFrame(); |
Logan Chien | f7ad17e | 2012-03-15 03:10:03 +0800 | [diff] [blame] | 947 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 948 | template<PointerSize pointer_size> |
David Srbecky | 56de89a | 2018-10-01 15:32:20 +0100 | [diff] [blame] | 949 | static constexpr ThreadOffset<pointer_size> TopShadowFrameOffset() { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 950 | return ThreadOffsetFromTlsPtr<pointer_size>( |
| 951 | OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) + |
| 952 | ManagedStack::TopShadowFrameOffset()); |
TDYa127 | d668a06 | 2012-04-13 12:36:57 -0700 | [diff] [blame] | 953 | } |
| 954 | |
Vladimir Marko | cedec9d | 2021-02-08 16:16:13 +0000 | [diff] [blame] | 955 | // Is the given obj in one of this thread's JNI transition frames? |
| 956 | bool IsJniTransitionReference(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 957 | |
Eric Holk | f1e1dd1 | 2020-08-21 15:38:12 -0700 | [diff] [blame] | 958 | void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 959 | REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 0399dde | 2012-06-06 17:09:28 -0700 | [diff] [blame] | 960 | |
Vladimir Marko | 1d326f9 | 2021-06-01 09:26:55 +0100 | [diff] [blame] | 961 | BaseHandleScope* GetTopHandleScope() REQUIRES_SHARED(Locks::mutator_lock_) { |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 962 | return tlsPtr_.top_handle_scope; |
Ian Rogers | 1f53934 | 2012-10-03 21:09:42 -0700 | [diff] [blame] | 963 | } |
| 964 | |
Vladimir Marko | 1d326f9 | 2021-06-01 09:26:55 +0100 | [diff] [blame] | 965 | void PushHandleScope(BaseHandleScope* handle_scope) REQUIRES_SHARED(Locks::mutator_lock_) { |
Ian Rogers | 59c0706 | 2014-10-10 13:03:39 -0700 | [diff] [blame] | 966 | DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope); |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 967 | tlsPtr_.top_handle_scope = handle_scope; |
| 968 | } |
| 969 | |
Vladimir Marko | 1d326f9 | 2021-06-01 09:26:55 +0100 | [diff] [blame] | 970 | BaseHandleScope* PopHandleScope() REQUIRES_SHARED(Locks::mutator_lock_) { |
Mathieu Chartier | e8a3c57 | 2016-10-11 16:52:17 -0700 | [diff] [blame] | 971 | BaseHandleScope* handle_scope = tlsPtr_.top_handle_scope; |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 972 | DCHECK(handle_scope != nullptr); |
| 973 | tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink(); |
| 974 | return handle_scope; |
Ian Rogers | 1f53934 | 2012-10-03 21:09:42 -0700 | [diff] [blame] | 975 | } |
Brian Carlstrom | 40381fb | 2011-10-19 14:13:40 -0700 | [diff] [blame] | 976 | |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 977 | template<PointerSize pointer_size> |
David Srbecky | 56de89a | 2018-10-01 15:32:20 +0100 | [diff] [blame] | 978 | static constexpr ThreadOffset<pointer_size> TopHandleScopeOffset() { |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 979 | return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, |
| 980 | top_handle_scope)); |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 981 | } |
| 982 | |
Vladimir Marko | ce2a344 | 2021-11-24 15:10:26 +0000 | [diff] [blame] | 983 | template<PointerSize pointer_size> |
Vladimir Marko | e74e0ce | 2021-12-08 14:16:21 +0000 | [diff] [blame] | 984 | static constexpr ThreadOffset<pointer_size> MutatorLockOffset() { |
| 985 | return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, |
| 986 | mutator_lock)); |
| 987 | } |
| 988 | |
| 989 | template<PointerSize pointer_size> |
Vladimir Marko | ce2a344 | 2021-11-24 15:10:26 +0000 | [diff] [blame] | 990 | static constexpr ThreadOffset<pointer_size> HeldMutexOffset(LockLevel level) { |
| 991 | DCHECK_LT(enum_cast<size_t>(level), arraysize(tlsPtr_.held_mutexes)); |
| 992 | return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, |
| 993 | held_mutexes[level])); |
| 994 | } |
| 995 | |
Alex Light | 55eccdf | 2019-10-07 13:51:13 +0000 | [diff] [blame] | 996 | BaseReflectiveHandleScope* GetTopReflectiveHandleScope() { |
| 997 | return tlsPtr_.top_reflective_handle_scope; |
| 998 | } |
| 999 | |
| 1000 | void PushReflectiveHandleScope(BaseReflectiveHandleScope* scope) { |
| 1001 | DCHECK_EQ(scope->GetLink(), tlsPtr_.top_reflective_handle_scope); |
| 1002 | DCHECK_EQ(scope->GetThread(), this); |
| 1003 | tlsPtr_.top_reflective_handle_scope = scope; |
| 1004 | } |
| 1005 | |
| 1006 | BaseReflectiveHandleScope* PopReflectiveHandleScope() { |
| 1007 | BaseReflectiveHandleScope* handle_scope = tlsPtr_.top_reflective_handle_scope; |
| 1008 | DCHECK(handle_scope != nullptr); |
| 1009 | tlsPtr_.top_reflective_handle_scope = tlsPtr_.top_reflective_handle_scope->GetLink(); |
| 1010 | return handle_scope; |
| 1011 | } |
| 1012 | |
Hiroshi Yamauchi | 0037082 | 2015-08-18 14:47:25 -0700 | [diff] [blame] | 1013 | bool GetIsGcMarking() const { |
| 1014 | CHECK(kUseReadBarrier); |
| 1015 | return tls32_.is_gc_marking; |
| 1016 | } |
| 1017 | |
Mathieu Chartier | fe814e8 | 2016-11-09 14:32:49 -0800 | [diff] [blame] | 1018 | void SetIsGcMarkingAndUpdateEntrypoints(bool is_marking); |
Hiroshi Yamauchi | 0037082 | 2015-08-18 14:47:25 -0700 | [diff] [blame] | 1019 | |
Hans Boehm | 1b3ec0f | 2022-01-26 16:53:07 +0000 | [diff] [blame] | 1020 | bool GetWeakRefAccessEnabled() const; // Only safe for current thread. |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 1021 | |
| 1022 | void SetWeakRefAccessEnabled(bool enabled) { |
| 1023 | CHECK(kUseReadBarrier); |
Hans Boehm | 1b3ec0f | 2022-01-26 16:53:07 +0000 | [diff] [blame] | 1024 | WeakRefAccessState new_state = enabled ? |
| 1025 | WeakRefAccessState::kEnabled : WeakRefAccessState::kDisabled; |
| 1026 | tls32_.weak_ref_access_enabled.store(new_state, std::memory_order_release); |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 1027 | } |
| 1028 | |
Hiroshi Yamauchi | 20a0be0 | 2016-02-19 15:44:06 -0800 | [diff] [blame] | 1029 | uint32_t GetDisableThreadFlipCount() const { |
| 1030 | CHECK(kUseReadBarrier); |
| 1031 | return tls32_.disable_thread_flip_count; |
| 1032 | } |
| 1033 | |
| 1034 | void IncrementDisableThreadFlipCount() { |
| 1035 | CHECK(kUseReadBarrier); |
| 1036 | ++tls32_.disable_thread_flip_count; |
| 1037 | } |
| 1038 | |
| 1039 | void DecrementDisableThreadFlipCount() { |
| 1040 | CHECK(kUseReadBarrier); |
| 1041 | DCHECK_GT(tls32_.disable_thread_flip_count, 0U); |
| 1042 | --tls32_.disable_thread_flip_count; |
| 1043 | } |
| 1044 | |
Alex Light | 185a461 | 2018-10-04 15:54:25 -0700 | [diff] [blame] | 1045 | // Returns true if the thread is a runtime thread (eg from a ThreadPool). |
Alex Light | e9f6103 | 2018-09-24 16:04:51 -0700 | [diff] [blame] | 1046 | bool IsRuntimeThread() const { |
| 1047 | return is_runtime_thread_; |
Calin Juravle | ccd5695 | 2016-12-15 17:57:38 +0000 | [diff] [blame] | 1048 | } |
| 1049 | |
Alex Light | e9f6103 | 2018-09-24 16:04:51 -0700 | [diff] [blame] | 1050 | void SetIsRuntimeThread(bool is_runtime_thread) { |
| 1051 | is_runtime_thread_ = is_runtime_thread; |
Calin Juravle | ccd5695 | 2016-12-15 17:57:38 +0000 | [diff] [blame] | 1052 | } |
| 1053 | |
Orion Hodson | 01ecfa1 | 2019-07-18 12:57:47 +0100 | [diff] [blame] | 1054 | uint32_t CorePlatformApiCookie() { |
| 1055 | return core_platform_api_cookie_; |
| 1056 | } |
| 1057 | |
| 1058 | void SetCorePlatformApiCookie(uint32_t cookie) { |
| 1059 | core_platform_api_cookie_ = cookie; |
| 1060 | } |
| 1061 | |
Alex Light | e9f6103 | 2018-09-24 16:04:51 -0700 | [diff] [blame] | 1062 | // Returns true if the thread is allowed to load java classes. |
| 1063 | bool CanLoadClasses() const; |
| 1064 | |
Sebastien Hertz | fd3077e | 2014-04-23 10:32:43 +0200 | [diff] [blame] | 1065 | // Returns the fake exception used to activate deoptimization. |
| 1066 | static mirror::Throwable* GetDeoptimizationException() { |
Mathieu Chartier | 0795f23 | 2016-09-27 18:43:30 -0700 | [diff] [blame] | 1067 | // Note that the mirror::Throwable must be aligned to kObjectAlignment or else it cannot be |
| 1068 | // represented by ObjPtr. |
| 1069 | return reinterpret_cast<mirror::Throwable*>(0x100); |
Sebastien Hertz | fd3077e | 2014-04-23 10:32:43 +0200 | [diff] [blame] | 1070 | } |
| 1071 | |
Mingyao Yang | 1f2d3ba | 2015-05-18 12:12:50 -0700 | [diff] [blame] | 1072 | // Currently deoptimization invokes verifier which can trigger class loading |
| 1073 | // and execute Java code, so there might be nested deoptimizations happening. |
| 1074 | // We need to save the ongoing deoptimization shadow frames and return |
| 1075 | // values on stacks. |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1076 | // 'from_code' denotes whether the deoptimization was explicitly made from |
| 1077 | // compiled code. |
Mingyao Yang | 2ee1790 | 2017-08-30 11:37:08 -0700 | [diff] [blame] | 1078 | // 'method_type' contains info on whether deoptimization should advance |
| 1079 | // dex_pc. |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1080 | void PushDeoptimizationContext(const JValue& return_value, |
| 1081 | bool is_reference, |
Mingyao Yang | 2ee1790 | 2017-08-30 11:37:08 -0700 | [diff] [blame] | 1082 | ObjPtr<mirror::Throwable> exception, |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1083 | bool from_code, |
Mingyao Yang | 2ee1790 | 2017-08-30 11:37:08 -0700 | [diff] [blame] | 1084 | DeoptimizationMethodType method_type) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1085 | REQUIRES_SHARED(Locks::mutator_lock_); |
Mathieu Chartier | f5769e1 | 2017-01-10 15:54:41 -0800 | [diff] [blame] | 1086 | void PopDeoptimizationContext(JValue* result, |
| 1087 | ObjPtr<mirror::Throwable>* exception, |
Mingyao Yang | 2ee1790 | 2017-08-30 11:37:08 -0700 | [diff] [blame] | 1088 | bool* from_code, |
| 1089 | DeoptimizationMethodType* method_type) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1090 | REQUIRES_SHARED(Locks::mutator_lock_); |
Sebastien Hertz | 0747466 | 2015-08-25 15:12:33 +0000 | [diff] [blame] | 1091 | void AssertHasDeoptimizationContext() |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1092 | REQUIRES_SHARED(Locks::mutator_lock_); |
Mingyao Yang | 1f2d3ba | 2015-05-18 12:12:50 -0700 | [diff] [blame] | 1093 | void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type); |
Andreas Gampe | 639bdd1 | 2015-06-03 11:22:45 -0700 | [diff] [blame] | 1094 | ShadowFrame* PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present = true); |
Andreas Gampe | 2a0d4ec | 2014-06-02 22:05:22 -0700 | [diff] [blame] | 1095 | |
Mingyao Yang | 99170c6 | 2015-07-06 11:10:37 -0700 | [diff] [blame] | 1096 | // For debugger, find the shadow frame that corresponds to a frame id. |
| 1097 | // Or return null if there is none. |
| 1098 | ShadowFrame* FindDebuggerShadowFrame(size_t frame_id) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1099 | REQUIRES_SHARED(Locks::mutator_lock_); |
Mingyao Yang | 99170c6 | 2015-07-06 11:10:37 -0700 | [diff] [blame] | 1100 | // For debugger, find the bool array that keeps track of the updated vreg set |
| 1101 | // for a frame id. |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1102 | bool* GetUpdatedVRegFlags(size_t frame_id) REQUIRES_SHARED(Locks::mutator_lock_); |
Mingyao Yang | 99170c6 | 2015-07-06 11:10:37 -0700 | [diff] [blame] | 1103 | // For debugger, find the shadow frame that corresponds to a frame id. If |
| 1104 | // one doesn't exist yet, create one and track it in frame_id_to_shadow_frame. |
| 1105 | ShadowFrame* FindOrCreateDebuggerShadowFrame(size_t frame_id, |
| 1106 | uint32_t num_vregs, |
| 1107 | ArtMethod* method, |
| 1108 | uint32_t dex_pc) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1109 | REQUIRES_SHARED(Locks::mutator_lock_); |
Mingyao Yang | 99170c6 | 2015-07-06 11:10:37 -0700 | [diff] [blame] | 1110 | |
| 1111 | // Delete the entry that maps from frame_id to shadow_frame. |
| 1112 | void RemoveDebuggerShadowFrameMapping(size_t frame_id) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1113 | REQUIRES_SHARED(Locks::mutator_lock_); |
Mingyao Yang | 99170c6 | 2015-07-06 11:10:37 -0700 | [diff] [blame] | 1114 | |
Nicolas Geoffray | e91e795 | 2020-01-23 10:15:56 +0000 | [diff] [blame] | 1115 | // While getting this map requires shared the mutator lock, manipulating it |
| 1116 | // should actually follow these rules: |
| 1117 | // (1) The owner of this map (the thread) can change it with its mutator lock. |
| 1118 | // (2) Other threads can read this map when the owner is suspended and they |
| 1119 | // hold the mutator lock. |
| 1120 | // (3) Other threads can change this map when owning the mutator lock exclusively. |
| 1121 | // |
| 1122 | // The reason why (3) needs the mutator lock exclusively (and not just having |
| 1123 | // the owner suspended) is that we don't want other threads to concurrently read the map. |
| 1124 | // |
| 1125 | // TODO: Add a class abstraction to express these rules. |
| 1126 | std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() |
| 1127 | REQUIRES_SHARED(Locks::mutator_lock_) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1128 | return tlsPtr_.instrumentation_stack; |
jeffhao | e343b76 | 2011-12-05 16:36:44 -0800 | [diff] [blame] | 1129 | } |
| 1130 | |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 1131 | std::vector<ArtMethod*>* GetStackTraceSample() const { |
Andreas Gampe | 2c19f5b | 2016-11-28 08:10:18 -0800 | [diff] [blame] | 1132 | DCHECK(!IsAotCompiler()); |
Nicolas Geoffray | 340dafa | 2016-11-18 16:03:10 +0000 | [diff] [blame] | 1133 | return tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample; |
Jeff Hao | 5ce4b17 | 2013-08-16 16:27:18 -0700 | [diff] [blame] | 1134 | } |
| 1135 | |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 1136 | void SetStackTraceSample(std::vector<ArtMethod*>* sample) { |
Andreas Gampe | 2c19f5b | 2016-11-28 08:10:18 -0800 | [diff] [blame] | 1137 | DCHECK(!IsAotCompiler()); |
Nicolas Geoffray | 340dafa | 2016-11-18 16:03:10 +0000 | [diff] [blame] | 1138 | tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample = sample; |
| 1139 | } |
| 1140 | |
| 1141 | verifier::VerifierDeps* GetVerifierDeps() const { |
Andreas Gampe | 2c19f5b | 2016-11-28 08:10:18 -0800 | [diff] [blame] | 1142 | DCHECK(IsAotCompiler()); |
Nicolas Geoffray | 340dafa | 2016-11-18 16:03:10 +0000 | [diff] [blame] | 1143 | return tlsPtr_.deps_or_stack_trace_sample.verifier_deps; |
| 1144 | } |
| 1145 | |
| 1146 | // It is the responsability of the caller to make sure the verifier_deps |
| 1147 | // entry in the thread is cleared before destruction of the actual VerifierDeps |
| 1148 | // object, or the thread. |
| 1149 | void SetVerifierDeps(verifier::VerifierDeps* verifier_deps) { |
Andreas Gampe | 2c19f5b | 2016-11-28 08:10:18 -0800 | [diff] [blame] | 1150 | DCHECK(IsAotCompiler()); |
Nicolas Geoffray | e424c93 | 2016-11-23 12:52:01 +0000 | [diff] [blame] | 1151 | DCHECK(verifier_deps == nullptr || tlsPtr_.deps_or_stack_trace_sample.verifier_deps == nullptr); |
Nicolas Geoffray | 340dafa | 2016-11-18 16:03:10 +0000 | [diff] [blame] | 1152 | tlsPtr_.deps_or_stack_trace_sample.verifier_deps = verifier_deps; |
Jeff Hao | 5ce4b17 | 2013-08-16 16:27:18 -0700 | [diff] [blame] | 1153 | } |
| 1154 | |
| 1155 | uint64_t GetTraceClockBase() const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1156 | return tls64_.trace_clock_base; |
Jeff Hao | 5ce4b17 | 2013-08-16 16:27:18 -0700 | [diff] [blame] | 1157 | } |
| 1158 | |
| 1159 | void SetTraceClockBase(uint64_t clock_base) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1160 | tls64_.trace_clock_base = clock_base; |
Jeff Hao | 5ce4b17 | 2013-08-16 16:27:18 -0700 | [diff] [blame] | 1161 | } |
| 1162 | |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 1163 | BaseMutex* GetHeldMutex(LockLevel level) const { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1164 | return tlsPtr_.held_mutexes[level]; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 1165 | } |
| 1166 | |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 1167 | void SetHeldMutex(LockLevel level, BaseMutex* mutex) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1168 | tlsPtr_.held_mutexes[level] = mutex; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 1169 | } |
Elliott Hughes | ffb465f | 2012-03-01 18:46:05 -0800 | [diff] [blame] | 1170 | |
Yu Li | eac4424 | 2015-06-29 10:50:03 +0800 | [diff] [blame] | 1171 | void ClearSuspendBarrier(AtomicInteger* target) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 1172 | REQUIRES(Locks::thread_suspend_count_lock_); |
Yu Li | eac4424 | 2015-06-29 10:50:03 +0800 | [diff] [blame] | 1173 | |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 1174 | bool ReadFlag(ThreadFlag flag) const { |
Vladimir Marko | 9c0f764 | 2021-12-06 16:17:52 +0000 | [diff] [blame] | 1175 | return GetStateAndFlags(std::memory_order_relaxed).IsFlagSet(flag); |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 1176 | } |
| 1177 | |
Vladimir Marko | 9c0f764 | 2021-12-06 16:17:52 +0000 | [diff] [blame] | 1178 | void AtomicSetFlag(ThreadFlag flag, std::memory_order order = std::memory_order_seq_cst) { |
| 1179 | tls32_.state_and_flags.fetch_or(enum_cast<uint32_t>(flag), order); |
Ian Rogers | 8c1b5f7 | 2014-07-09 22:02:36 -0700 | [diff] [blame] | 1180 | } |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 1181 | |
Vladimir Marko | 9c0f764 | 2021-12-06 16:17:52 +0000 | [diff] [blame] | 1182 | void AtomicClearFlag(ThreadFlag flag, std::memory_order order = std::memory_order_seq_cst) { |
| 1183 | tls32_.state_and_flags.fetch_and(~enum_cast<uint32_t>(flag), order); |
Ian Rogers | 8c1b5f7 | 2014-07-09 22:02:36 -0700 | [diff] [blame] | 1184 | } |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 1185 | |
Lokesh Gidra | 7e678d3 | 2020-04-28 16:17:49 -0700 | [diff] [blame] | 1186 | void ResetQuickAllocEntryPointsForThread(); |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 1187 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1188 | // Returns the remaining space in the TLAB. |
Mathieu Chartier | 6bc7774 | 2017-04-18 17:46:23 -0700 | [diff] [blame] | 1189 | size_t TlabSize() const { |
| 1190 | return tlsPtr_.thread_local_end - tlsPtr_.thread_local_pos; |
| 1191 | } |
| 1192 | |
Wessam Hassanein | b5a10be | 2020-11-11 16:42:52 -0800 | [diff] [blame] | 1193 | // Returns pos offset from start. |
| 1194 | size_t GetTlabPosOffset() const { |
| 1195 | return tlsPtr_.thread_local_pos - tlsPtr_.thread_local_start; |
| 1196 | } |
| 1197 | |
Mathieu Chartier | 6bc7774 | 2017-04-18 17:46:23 -0700 | [diff] [blame] | 1198 | // Returns the remaining space in the TLAB if we were to expand it to maximum capacity. |
| 1199 | size_t TlabRemainingCapacity() const { |
| 1200 | return tlsPtr_.thread_local_limit - tlsPtr_.thread_local_pos; |
| 1201 | } |
| 1202 | |
| 1203 | // Expand the TLAB by a fixed number of bytes. There must be enough capacity to do so. |
| 1204 | void ExpandTlab(size_t bytes) { |
| 1205 | tlsPtr_.thread_local_end += bytes; |
| 1206 | DCHECK_LE(tlsPtr_.thread_local_end, tlsPtr_.thread_local_limit); |
| 1207 | } |
| 1208 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1209 | // Doesn't check that there is room. |
| 1210 | mirror::Object* AllocTlab(size_t bytes); |
Mathieu Chartier | 6bc7774 | 2017-04-18 17:46:23 -0700 | [diff] [blame] | 1211 | void SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1212 | bool HasTlab() const; |
Mathieu Chartier | c4bf667 | 2020-01-13 13:07:16 -0800 | [diff] [blame] | 1213 | void ResetTlab(); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 1214 | uint8_t* GetTlabStart() { |
| 1215 | return tlsPtr_.thread_local_start; |
| 1216 | } |
| 1217 | uint8_t* GetTlabPos() { |
| 1218 | return tlsPtr_.thread_local_pos; |
| 1219 | } |
Lokesh Gidra | 4f9d62b | 2020-01-06 15:06:04 -0800 | [diff] [blame] | 1220 | uint8_t* GetTlabEnd() { |
| 1221 | return tlsPtr_.thread_local_end; |
| 1222 | } |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1223 | // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value |
| 1224 | // equal to a valid pointer. |
| 1225 | // TODO: does this need to atomic? I don't think so. |
| 1226 | void RemoveSuspendTrigger() { |
| 1227 | tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger); |
| 1228 | } |
| 1229 | |
| 1230 | // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer. |
| 1231 | // The next time a suspend check is done, it will load from the value at this address |
| 1232 | // and trigger a SIGSEGV. |
Hans Boehm | 891cb88 | 2020-07-31 12:06:58 -0700 | [diff] [blame] | 1233 | // Only needed if Runtime::implicit_suspend_checks_ is true and fully implemented. It currently |
| 1234 | // is always false. Client code currently just looks at the thread flags directly to determine |
| 1235 | // whether we should suspend, so this call is currently unnecessary. |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1236 | void TriggerSuspend() { |
| 1237 | tlsPtr_.suspend_trigger = nullptr; |
| 1238 | } |
| 1239 | |
| 1240 | |
| 1241 | // Push an object onto the allocation stack. |
Mathieu Chartier | cb535da | 2015-01-23 13:50:03 -0800 | [diff] [blame] | 1242 | bool PushOnThreadLocalAllocationStack(mirror::Object* obj) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1243 | REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1244 | |
| 1245 | // Set the thread local allocation pointers to the given pointers. |
Mathieu Chartier | cb535da | 2015-01-23 13:50:03 -0800 | [diff] [blame] | 1246 | void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start, |
| 1247 | StackReference<mirror::Object>* end); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1248 | |
| 1249 | // Resets the thread local allocation pointers. |
| 1250 | void RevokeThreadLocalAllocationStack(); |
| 1251 | |
| 1252 | size_t GetThreadLocalBytesAllocated() const { |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 1253 | return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1254 | } |
| 1255 | |
| 1256 | size_t GetThreadLocalObjectsAllocated() const { |
| 1257 | return tlsPtr_.thread_local_objects; |
| 1258 | } |
| 1259 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1260 | void* GetRosAllocRun(size_t index) const { |
| 1261 | return tlsPtr_.rosalloc_runs[index]; |
| 1262 | } |
| 1263 | |
| 1264 | void SetRosAllocRun(size_t index, void* run) { |
| 1265 | tlsPtr_.rosalloc_runs[index] = run; |
| 1266 | } |
| 1267 | |
Andreas Gampe | 2c2d2a0 | 2016-03-17 21:27:19 -0700 | [diff] [blame] | 1268 | bool ProtectStack(bool fatal_on_error = true); |
Dave Allison | 648d711 | 2014-07-25 16:15:27 -0700 | [diff] [blame] | 1269 | bool UnprotectStack(); |
| 1270 | |
Hiroshi Yamauchi | ee23582 | 2016-08-19 17:03:27 -0700 | [diff] [blame] | 1271 | bool IsTransitioningToRunnable() const { |
| 1272 | return tls32_.is_transitioning_to_runnable; |
| 1273 | } |
| 1274 | |
| 1275 | void SetIsTransitioningToRunnable(bool value) { |
| 1276 | tls32_.is_transitioning_to_runnable = value; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 1277 | } |
| 1278 | |
Alex Light | 3dacdd6 | 2019-03-12 15:45:47 +0000 | [diff] [blame] | 1279 | uint32_t DecrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) { |
| 1280 | return --tls32_.force_interpreter_count; |
| 1281 | } |
| 1282 | |
| 1283 | uint32_t IncrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) { |
| 1284 | return ++tls32_.force_interpreter_count; |
| 1285 | } |
| 1286 | |
| 1287 | void SetForceInterpreterCount(uint32_t value) REQUIRES(Locks::thread_list_lock_) { |
| 1288 | tls32_.force_interpreter_count = value; |
| 1289 | } |
| 1290 | |
| 1291 | uint32_t ForceInterpreterCount() const { |
| 1292 | return tls32_.force_interpreter_count; |
| 1293 | } |
| 1294 | |
| 1295 | bool IsForceInterpreter() const { |
| 1296 | return tls32_.force_interpreter_count != 0; |
| 1297 | } |
| 1298 | |
Vladimir Marko | bf12191 | 2019-06-04 13:49:05 +0100 | [diff] [blame] | 1299 | bool IncrementMakeVisiblyInitializedCounter() { |
| 1300 | tls32_.make_visibly_initialized_counter += 1u; |
| 1301 | return tls32_.make_visibly_initialized_counter == kMakeVisiblyInitializedCounterTriggerCount; |
| 1302 | } |
| 1303 | |
| 1304 | void ClearMakeVisiblyInitializedCounter() { |
| 1305 | tls32_.make_visibly_initialized_counter = 0u; |
| 1306 | } |
| 1307 | |
Mathieu Chartier | d0ad2ee | 2015-03-31 14:59:59 -0700 | [diff] [blame] | 1308 | void PushVerifier(verifier::MethodVerifier* verifier); |
| 1309 | void PopVerifier(verifier::MethodVerifier* verifier); |
Mathieu Chartier | 12d625f | 2015-03-13 11:33:37 -0700 | [diff] [blame] | 1310 | |
Jeff Hao | 848f70a | 2014-01-15 13:49:50 -0800 | [diff] [blame] | 1311 | void InitStringEntryPoints(); |
| 1312 | |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 1313 | void ModifyDebugDisallowReadBarrier(int8_t delta) { |
Vladimir Marko | 468de59 | 2022-02-04 16:47:46 +0000 | [diff] [blame] | 1314 | if (kCheckDebugDisallowReadBarrierCount) { |
| 1315 | debug_disallow_read_barrier_ += delta; |
| 1316 | } |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 1317 | } |
| 1318 | |
| 1319 | uint8_t GetDebugDisallowReadBarrierCount() const { |
Vladimir Marko | 468de59 | 2022-02-04 16:47:46 +0000 | [diff] [blame] | 1320 | return kCheckDebugDisallowReadBarrierCount ? debug_disallow_read_barrier_ : 0u; |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 1321 | } |
| 1322 | |
Alex Light | 184f075 | 2018-07-13 11:18:22 -0700 | [diff] [blame] | 1323 | // Gets the current TLSData associated with the key or nullptr if there isn't any. Note that users |
| 1324 | // do not gain ownership of TLSData and must synchronize with SetCustomTls themselves to prevent |
| 1325 | // it from being deleted. |
| 1326 | TLSData* GetCustomTLS(const char* key) REQUIRES(!Locks::custom_tls_lock_); |
Andreas Gampe | f26bf2d | 2017-01-13 16:47:14 -0800 | [diff] [blame] | 1327 | |
Alex Light | 184f075 | 2018-07-13 11:18:22 -0700 | [diff] [blame] | 1328 | // Sets the tls entry at 'key' to data. The thread takes ownership of the TLSData. The destructor |
| 1329 | // will be run when the thread exits or when SetCustomTLS is called again with the same key. |
| 1330 | void SetCustomTLS(const char* key, TLSData* data) REQUIRES(!Locks::custom_tls_lock_); |
Andreas Gampe | f26bf2d | 2017-01-13 16:47:14 -0800 | [diff] [blame] | 1331 | |
Calin Juravle | 97cbc92 | 2016-04-15 16:16:35 +0100 | [diff] [blame] | 1332 | // Returns true if the current thread is the jit sensitive thread. |
| 1333 | bool IsJitSensitiveThread() const { |
| 1334 | return this == jit_sensitive_thread_; |
| 1335 | } |
| 1336 | |
Alex Light | e0b2ce4 | 2019-02-21 19:23:42 +0000 | [diff] [blame] | 1337 | bool IsSystemDaemon() const REQUIRES_SHARED(Locks::mutator_lock_); |
| 1338 | |
Calin Juravle | 97cbc92 | 2016-04-15 16:16:35 +0100 | [diff] [blame] | 1339 | // Returns true if StrictMode events are traced for the current thread. |
Calin Juravle | b2771b4 | 2016-04-07 17:09:25 +0100 | [diff] [blame] | 1340 | static bool IsSensitiveThread() { |
| 1341 | if (is_sensitive_thread_hook_ != nullptr) { |
| 1342 | return (*is_sensitive_thread_hook_)(); |
| 1343 | } |
| 1344 | return false; |
| 1345 | } |
| 1346 | |
Mathieu Chartier | 3768ade | 2017-05-02 14:04:39 -0700 | [diff] [blame] | 1347 | // Set to the read barrier marking entrypoints to be non-null. |
| 1348 | void SetReadBarrierEntrypoints(); |
| 1349 | |
Andreas Gampe | bad529d | 2017-02-13 18:52:10 -0800 | [diff] [blame] | 1350 | static jobject CreateCompileTimePeer(JNIEnv* env, |
| 1351 | const char* name, |
| 1352 | bool as_daemon, |
| 1353 | jobject thread_group) |
| 1354 | REQUIRES_SHARED(Locks::mutator_lock_); |
| 1355 | |
David Srbecky | 912f36c | 2018-09-08 12:22:58 +0100 | [diff] [blame] | 1356 | ALWAYS_INLINE InterpreterCache* GetInterpreterCache() { |
| 1357 | return &interpreter_cache_; |
| 1358 | } |
| 1359 | |
| 1360 | // Clear all thread-local interpreter caches. |
| 1361 | // |
| 1362 | // Since the caches are keyed by memory pointer to dex instructions, this must be |
| 1363 | // called when any dex code is unloaded (before different code gets loaded at the |
| 1364 | // same memory location). |
| 1365 | // |
| 1366 | // If presence of cache entry implies some pre-conditions, this must also be |
| 1367 | // called if the pre-conditions might no longer hold true. |
| 1368 | static void ClearAllInterpreterCaches(); |
| 1369 | |
| 1370 | template<PointerSize pointer_size> |
David Srbecky | 56de89a | 2018-10-01 15:32:20 +0100 | [diff] [blame] | 1371 | static constexpr ThreadOffset<pointer_size> InterpreterCacheOffset() { |
David Srbecky | 912f36c | 2018-09-08 12:22:58 +0100 | [diff] [blame] | 1372 | return ThreadOffset<pointer_size>(OFFSETOF_MEMBER(Thread, interpreter_cache_)); |
| 1373 | } |
| 1374 | |
Hans Boehm | 3d2f148 | 2022-01-17 01:32:55 +0000 | [diff] [blame] | 1375 | static constexpr int InterpreterCacheSizeLog2() { |
| 1376 | return WhichPowerOf2(InterpreterCache::kSize); |
| 1377 | } |
| 1378 | |
Vladimir Marko | 254a858 | 2021-11-29 14:08:37 +0000 | [diff] [blame] | 1379 | static constexpr uint32_t AllThreadFlags() { |
| 1380 | return enum_cast<uint32_t>(ThreadFlag::kLastFlag) | |
| 1381 | (enum_cast<uint32_t>(ThreadFlag::kLastFlag) - 1u); |
| 1382 | } |
| 1383 | |
| 1384 | static constexpr uint32_t SuspendOrCheckpointRequestFlags() { |
| 1385 | return enum_cast<uint32_t>(ThreadFlag::kSuspendRequest) | |
| 1386 | enum_cast<uint32_t>(ThreadFlag::kCheckpointRequest) | |
| 1387 | enum_cast<uint32_t>(ThreadFlag::kEmptyCheckpointRequest); |
| 1388 | } |
| 1389 | |
Vladimir Marko | 9c0f764 | 2021-12-06 16:17:52 +0000 | [diff] [blame] | 1390 | static constexpr uint32_t FlipFunctionFlags() { |
| 1391 | return enum_cast<uint32_t>(ThreadFlag::kPendingFlipFunction) | |
| 1392 | enum_cast<uint32_t>(ThreadFlag::kRunningFlipFunction) | |
| 1393 | enum_cast<uint32_t>(ThreadFlag::kWaitingForFlipFunction); |
| 1394 | } |
| 1395 | |
Vladimir Marko | ce2a344 | 2021-11-24 15:10:26 +0000 | [diff] [blame] | 1396 | static constexpr uint32_t StoredThreadStateValue(ThreadState state) { |
| 1397 | return StateAndFlags::EncodeState(state); |
| 1398 | } |
| 1399 | |
Nicolas Geoffray | f9ae8e3 | 2022-02-15 22:54:11 +0000 | [diff] [blame] | 1400 | void ResetSharedMethodHotness() { |
| 1401 | tls32_.shared_method_hotness = kSharedMethodHotnessThreshold; |
| 1402 | } |
| 1403 | |
| 1404 | uint32_t GetSharedMethodHotness() const { |
| 1405 | return tls32_.shared_method_hotness; |
| 1406 | } |
| 1407 | |
| 1408 | uint32_t DecrementSharedMethodHotness() { |
| 1409 | tls32_.shared_method_hotness = (tls32_.shared_method_hotness - 1) & 0xffff; |
| 1410 | return tls32_.shared_method_hotness; |
| 1411 | } |
| 1412 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1413 | private: |
Ian Rogers | 52673ff | 2012-06-27 23:25:34 -0700 | [diff] [blame] | 1414 | explicit Thread(bool daemon); |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 1415 | ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_); |
Elliott Hughes | c0f0933 | 2012-03-26 13:27:06 -0700 | [diff] [blame] | 1416 | void Destroy(); |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 1417 | |
Alex Light | 4847a07 | 2019-12-12 16:13:47 -0800 | [diff] [blame] | 1418 | // Deletes and clears the tlsPtr_.jpeer field. Done in a way so that both it and opeer cannot be |
| 1419 | // observed to be set at the same time by instrumentation. |
| 1420 | void DeleteJPeer(JNIEnv* env); |
| 1421 | |
Andreas Gampe | 732b0ac | 2017-01-18 15:23:39 -0800 | [diff] [blame] | 1422 | // Attaches the calling native thread to the runtime, returning the new native peer. |
| 1423 | // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls. |
| 1424 | template <typename PeerAction> |
| 1425 | static Thread* Attach(const char* thread_name, |
| 1426 | bool as_daemon, |
| 1427 | PeerAction p); |
| 1428 | |
Ian Rogers | 365c102 | 2012-06-22 15:05:28 -0700 | [diff] [blame] | 1429 | void CreatePeer(const char* name, bool as_daemon, jobject thread_group); |
Elliott Hughes | 5fe594f | 2011-09-08 12:33:17 -0700 | [diff] [blame] | 1430 | |
Sebastien Hertz | d2fe10a | 2014-01-15 10:20:56 +0100 | [diff] [blame] | 1431 | template<bool kTransactionActive> |
Andreas Gampe | bad529d | 2017-02-13 18:52:10 -0800 | [diff] [blame] | 1432 | static void InitPeer(ScopedObjectAccessAlreadyRunnable& soa, |
| 1433 | ObjPtr<mirror::Object> peer, |
| 1434 | jboolean thread_is_daemon, |
| 1435 | jobject thread_group, |
| 1436 | jobject thread_name, |
| 1437 | jint thread_priority) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1438 | REQUIRES_SHARED(Locks::mutator_lock_); |
Sebastien Hertz | d2fe10a | 2014-01-15 10:20:56 +0100 | [diff] [blame] | 1439 | |
Vladimir Marko | ddf4fd3 | 2021-11-22 16:31:57 +0000 | [diff] [blame] | 1440 | // Avoid use, callers should use SetState. |
| 1441 | // Used only by `Thread` destructor and stack trace collection in semi-space GC (currently |
| 1442 | // disabled by `kStoreStackTraces = false`). |
Vladimir Marko | 9c0f764 | 2021-12-06 16:17:52 +0000 | [diff] [blame] | 1443 | // NO_THREAD_SAFETY_ANALYSIS: This function is "Unsafe" and can be called in |
| 1444 | // different states, so clang cannot perform the thread safety analysis. |
| 1445 | ThreadState SetStateUnsafe(ThreadState new_state) NO_THREAD_SAFETY_ANALYSIS { |
| 1446 | StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed); |
Vladimir Marko | ddf4fd3 | 2021-11-22 16:31:57 +0000 | [diff] [blame] | 1447 | ThreadState old_state = old_state_and_flags.GetState(); |
| 1448 | if (old_state == new_state) { |
| 1449 | // Nothing to do. |
| 1450 | } else if (old_state == ThreadState::kRunnable) { |
Mathieu Chartier | 8ac9c91 | 2015-10-01 15:58:41 -0700 | [diff] [blame] | 1451 | // Need to run pending checkpoint and suspend barriers. Run checkpoints in runnable state in |
| 1452 | // case they need to use a ScopedObjectAccess. If we are holding the mutator lock and a SOA |
| 1453 | // attempts to TransitionFromSuspendedToRunnable, it results in a deadlock. |
| 1454 | TransitionToSuspendedAndRunCheckpoints(new_state); |
| 1455 | // Since we transitioned to a suspended state, check the pass barrier requests. |
| 1456 | PassActiveSuspendBarriers(); |
| 1457 | } else { |
Vladimir Marko | ddf4fd3 | 2021-11-22 16:31:57 +0000 | [diff] [blame] | 1458 | while (true) { |
| 1459 | StateAndFlags new_state_and_flags = old_state_and_flags; |
| 1460 | new_state_and_flags.SetState(new_state); |
| 1461 | if (LIKELY(tls32_.state_and_flags.CompareAndSetWeakAcquire( |
| 1462 | old_state_and_flags.GetValue(), |
| 1463 | new_state_and_flags.GetValue()))) { |
| 1464 | break; |
| 1465 | } |
| 1466 | // Reload state and flags. |
Vladimir Marko | 9c0f764 | 2021-12-06 16:17:52 +0000 | [diff] [blame] | 1467 | old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed); |
Vladimir Marko | ddf4fd3 | 2021-11-22 16:31:57 +0000 | [diff] [blame] | 1468 | DCHECK_EQ(old_state, old_state_and_flags.GetState()); |
| 1469 | } |
Yu Li | eac4424 | 2015-06-29 10:50:03 +0800 | [diff] [blame] | 1470 | } |
Ian Rogers | c747cff | 2012-08-31 18:20:08 -0700 | [diff] [blame] | 1471 | return old_state; |
| 1472 | } |
Ian Rogers | c747cff | 2012-08-31 18:20:08 -0700 | [diff] [blame] | 1473 | |
Vladimir Marko | 23cf32f | 2021-11-15 13:38:02 +0000 | [diff] [blame] | 1474 | MutatorMutex* GetMutatorLock() RETURN_CAPABILITY(Locks::mutator_lock_) { |
| 1475 | DCHECK_EQ(tlsPtr_.mutator_lock, Locks::mutator_lock_); |
| 1476 | return tlsPtr_.mutator_lock; |
| 1477 | } |
| 1478 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1479 | void VerifyStackImpl() REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | 04d7aa9 | 2013-03-16 14:29:17 -0700 | [diff] [blame] | 1480 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1481 | void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_); |
Nicolas Geoffray | a73280d | 2016-02-15 13:05:16 +0000 | [diff] [blame] | 1482 | void DumpStack(std::ostream& os, |
Nicolas Geoffray | 6ee4971 | 2018-03-30 14:39:05 +0000 | [diff] [blame] | 1483 | bool dump_native_stack = true, |
Hiroshi Yamauchi | 13c1635 | 2017-01-31 10:15:08 -0800 | [diff] [blame] | 1484 | BacktraceMap* backtrace_map = nullptr, |
| 1485 | bool force_dump_stack = false) const |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1486 | REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | d92bec4 | 2011-09-02 17:04:36 -0700 | [diff] [blame] | 1487 | |
Elliott Hughes | accd83d | 2011-10-17 14:25:58 -0700 | [diff] [blame] | 1488 | // Out-of-line conveniences for debugging in gdb. |
Brian Carlstrom | 7934ac2 | 2013-07-26 10:54:15 -0700 | [diff] [blame] | 1489 | static Thread* CurrentFromGdb(); // Like Thread::Current. |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 1490 | // Like Thread::Dump(std::cerr). |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1491 | void DumpFromGdb() const REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | accd83d | 2011-10-17 14:25:58 -0700 | [diff] [blame] | 1492 | |
Elliott Hughes | 93e74e8 | 2011-09-13 11:07:03 -0700 | [diff] [blame] | 1493 | static void* CreateCallback(void* arg); |
| 1494 | |
Andreas Gampe | 513061a | 2017-06-01 09:17:34 -0700 | [diff] [blame] | 1495 | void HandleUncaughtExceptions(ScopedObjectAccessAlreadyRunnable& soa) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 1496 | REQUIRES_SHARED(Locks::mutator_lock_); |
Andreas Gampe | 513061a | 2017-06-01 09:17:34 -0700 | [diff] [blame] | 1497 | void RemoveFromThreadGroup(ScopedObjectAccessAlreadyRunnable& soa) |
| 1498 | REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | accd83d | 2011-10-17 14:25:58 -0700 | [diff] [blame] | 1499 | |
Andreas Gampe | 449357d | 2015-06-01 22:29:51 -0700 | [diff] [blame] | 1500 | // Initialize a thread. |
| 1501 | // |
| 1502 | // The third parameter is not mandatory. If given, the thread will use this JNIEnvExt. In case |
| 1503 | // Init succeeds, this means the thread takes ownership of it. If Init fails, it is the caller's |
| 1504 | // responsibility to destroy the given JNIEnvExt. If the parameter is null, Init will try to |
| 1505 | // create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value |
| 1506 | // of false). |
| 1507 | bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 1508 | REQUIRES(Locks::runtime_shutdown_lock_); |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 1509 | void InitCardTable(); |
Ian Rogers | b033c75 | 2011-07-20 12:22:35 -0700 | [diff] [blame] | 1510 | void InitCpu(); |
Alexei Zavjalov | 1efa0a9 | 2014-02-04 02:08:31 +0700 | [diff] [blame] | 1511 | void CleanupCpu(); |
Ian Rogers | 848871b | 2013-08-05 10:56:33 -0700 | [diff] [blame] | 1512 | void InitTlsEntryPoints(); |
Brian Carlstrom | caabb1b | 2011-10-11 18:09:13 -0700 | [diff] [blame] | 1513 | void InitTid(); |
Brian Carlstrom | caabb1b | 2011-10-11 18:09:13 -0700 | [diff] [blame] | 1514 | void InitPthreadKeySelf(); |
Ian Rogers | f4d4da1 | 2014-11-11 16:10:33 -0800 | [diff] [blame] | 1515 | bool InitStackHwm(); |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 1516 | |
Elliott Hughes | d6a23bd | 2013-07-16 14:19:52 -0700 | [diff] [blame] | 1517 | void SetUpAlternateSignalStack(); |
| 1518 | void TearDownAlternateSignalStack(); |
Vladimir Marko | e45883e | 2022-01-11 12:38:35 +0000 | [diff] [blame] | 1519 | void MadviseAwayAlternateSignalStack(); |
Elliott Hughes | d6a23bd | 2013-07-16 14:19:52 -0700 | [diff] [blame] | 1520 | |
Mathieu Chartier | 8ac9c91 | 2015-10-01 15:58:41 -0700 | [diff] [blame] | 1521 | ALWAYS_INLINE void TransitionToSuspendedAndRunCheckpoints(ThreadState new_state) |
Vladimir Marko | 9c0f764 | 2021-12-06 16:17:52 +0000 | [diff] [blame] | 1522 | REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_) |
| 1523 | REQUIRES_SHARED(Locks::mutator_lock_); |
Mathieu Chartier | 8ac9c91 | 2015-10-01 15:58:41 -0700 | [diff] [blame] | 1524 | |
| 1525 | ALWAYS_INLINE void PassActiveSuspendBarriers() |
| 1526 | REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_); |
| 1527 | |
Calin Juravle | 97cbc92 | 2016-04-15 16:16:35 +0100 | [diff] [blame] | 1528 | // Registers the current thread as the jit sensitive thread. Should be called just once. |
| 1529 | static void SetJitSensitiveThread() { |
| 1530 | if (jit_sensitive_thread_ == nullptr) { |
| 1531 | jit_sensitive_thread_ = Thread::Current(); |
| 1532 | } else { |
| 1533 | LOG(WARNING) << "Attempt to set the sensitive thread twice. Tid:" |
| 1534 | << Thread::Current()->GetTid(); |
| 1535 | } |
| 1536 | } |
| 1537 | |
Calin Juravle | b2771b4 | 2016-04-07 17:09:25 +0100 | [diff] [blame] | 1538 | static void SetSensitiveThreadHook(bool (*is_sensitive_thread_hook)()) { |
| 1539 | is_sensitive_thread_hook_ = is_sensitive_thread_hook; |
| 1540 | } |
| 1541 | |
Hiroshi Yamauchi | 02e7f1a | 2016-10-03 15:32:01 -0700 | [diff] [blame] | 1542 | bool ModifySuspendCountInternal(Thread* self, |
| 1543 | int delta, |
| 1544 | AtomicInteger* suspend_barrier, |
Alex Light | 46f9340 | 2017-06-29 11:59:50 -0700 | [diff] [blame] | 1545 | SuspendReason reason) |
Sebastien Hertz | 1c8f4ff | 2017-04-14 15:05:12 +0200 | [diff] [blame] | 1546 | WARN_UNUSED |
Hiroshi Yamauchi | 02e7f1a | 2016-10-03 15:32:01 -0700 | [diff] [blame] | 1547 | REQUIRES(Locks::thread_suspend_count_lock_); |
| 1548 | |
Alex Light | df00a1e | 2017-11-01 09:29:53 -0700 | [diff] [blame] | 1549 | // Runs a single checkpoint function. If there are no more pending checkpoint functions it will |
| 1550 | // clear the kCheckpointRequest flag. The caller is responsible for calling this in a loop until |
| 1551 | // the kCheckpointRequest flag is cleared. |
Vladimir Marko | 9c0f764 | 2021-12-06 16:17:52 +0000 | [diff] [blame] | 1552 | void RunCheckpointFunction() |
| 1553 | REQUIRES(!Locks::thread_suspend_count_lock_) |
| 1554 | REQUIRES_SHARED(Locks::mutator_lock_); |
Hiroshi Yamauchi | 3049324 | 2016-11-03 13:06:52 -0700 | [diff] [blame] | 1555 | void RunEmptyCheckpoint(); |
Andreas Gampe | 0a85576 | 2016-10-26 13:43:14 -0700 | [diff] [blame] | 1556 | |
| 1557 | bool PassActiveSuspendBarriers(Thread* self) |
| 1558 | REQUIRES(!Locks::thread_suspend_count_lock_); |
| 1559 | |
| 1560 | // Install the protected region for implicit stack checks. |
| 1561 | void InstallImplicitProtection(); |
| 1562 | |
Andreas Gampe | 585da95 | 2016-12-02 14:52:29 -0800 | [diff] [blame] | 1563 | template <bool kPrecise> |
| 1564 | void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_); |
| 1565 | |
David Srbecky | be2b109 | 2022-02-19 23:13:27 +0000 | [diff] [blame] | 1566 | static void SweepInterpreterCaches(IsMarkedVisitor* visitor) |
| 1567 | REQUIRES_SHARED(Locks::mutator_lock_); |
Nicolas Geoffray | e3f775b | 2019-12-04 14:41:52 +0000 | [diff] [blame] | 1568 | |
Andreas Gampe | 2c19f5b | 2016-11-28 08:10:18 -0800 | [diff] [blame] | 1569 | static bool IsAotCompiler(); |
| 1570 | |
Andreas Gampe | e5d2398 | 2019-01-08 10:34:26 -0800 | [diff] [blame] | 1571 | void ReleaseLongJumpContextInternal(); |
| 1572 | |
Hans Boehm | 30bc777 | 2022-01-28 15:07:02 -0800 | [diff] [blame] | 1573 | void SetCachedThreadName(const char* name); |
| 1574 | |
Vladimir Marko | ddf4fd3 | 2021-11-22 16:31:57 +0000 | [diff] [blame] | 1575 | // Helper class for manipulating the 32 bits of atomically changed state and flags. |
| 1576 | class StateAndFlags { |
| 1577 | public: |
| 1578 | explicit StateAndFlags(uint32_t value) :value_(value) {} |
| 1579 | |
| 1580 | uint32_t GetValue() const { |
| 1581 | return value_; |
| 1582 | } |
| 1583 | |
| 1584 | void SetValue(uint32_t value) { |
| 1585 | value_ = value; |
| 1586 | } |
| 1587 | |
Vladimir Marko | 254a858 | 2021-11-29 14:08:37 +0000 | [diff] [blame] | 1588 | bool IsAnyOfFlagsSet(uint32_t flags) const { |
| 1589 | DCHECK_EQ(flags & ~AllThreadFlags(), 0u); |
| 1590 | return (value_ & flags) != 0u; |
| 1591 | } |
| 1592 | |
Vladimir Marko | ddf4fd3 | 2021-11-22 16:31:57 +0000 | [diff] [blame] | 1593 | bool IsFlagSet(ThreadFlag flag) const { |
| 1594 | return (value_ & enum_cast<uint32_t>(flag)) != 0u; |
| 1595 | } |
| 1596 | |
| 1597 | void SetFlag(ThreadFlag flag) { |
| 1598 | value_ |= enum_cast<uint32_t>(flag); |
| 1599 | } |
| 1600 | |
Vladimir Marko | 9c0f764 | 2021-12-06 16:17:52 +0000 | [diff] [blame] | 1601 | StateAndFlags WithFlag(ThreadFlag flag) const { |
| 1602 | StateAndFlags result = *this; |
| 1603 | result.SetFlag(flag); |
| 1604 | return result; |
| 1605 | } |
| 1606 | |
| 1607 | StateAndFlags WithoutFlag(ThreadFlag flag) const { |
| 1608 | StateAndFlags result = *this; |
| 1609 | result.ClearFlag(flag); |
| 1610 | return result; |
| 1611 | } |
| 1612 | |
Vladimir Marko | ddf4fd3 | 2021-11-22 16:31:57 +0000 | [diff] [blame] | 1613 | void ClearFlag(ThreadFlag flag) { |
| 1614 | value_ &= ~enum_cast<uint32_t>(flag); |
| 1615 | } |
| 1616 | |
| 1617 | ThreadState GetState() const { |
| 1618 | ThreadState state = ThreadStateField::Decode(value_); |
| 1619 | ValidateThreadState(state); |
| 1620 | return state; |
| 1621 | } |
| 1622 | |
| 1623 | void SetState(ThreadState state) { |
| 1624 | ValidateThreadState(state); |
| 1625 | value_ = ThreadStateField::Update(state, value_); |
| 1626 | } |
Chris Dearman | 59cde53 | 2013-12-04 18:53:49 -0800 | [diff] [blame] | 1627 | |
Vladimir Marko | 9c0f764 | 2021-12-06 16:17:52 +0000 | [diff] [blame] | 1628 | StateAndFlags WithState(ThreadState state) const { |
| 1629 | StateAndFlags result = *this; |
| 1630 | result.SetState(state); |
| 1631 | return result; |
| 1632 | } |
| 1633 | |
Vladimir Marko | ce2a344 | 2021-11-24 15:10:26 +0000 | [diff] [blame] | 1634 | static constexpr uint32_t EncodeState(ThreadState state) { |
| 1635 | ValidateThreadState(state); |
| 1636 | return ThreadStateField::Encode(state); |
| 1637 | } |
| 1638 | |
Chris Dearman | 59cde53 | 2013-12-04 18:53:49 -0800 | [diff] [blame] | 1639 | private: |
Vladimir Marko | ce2a344 | 2021-11-24 15:10:26 +0000 | [diff] [blame] | 1640 | static constexpr void ValidateThreadState(ThreadState state) { |
Vladimir Marko | ddf4fd3 | 2021-11-22 16:31:57 +0000 | [diff] [blame] | 1641 | if (kIsDebugBuild && state != ThreadState::kRunnable) { |
| 1642 | CHECK_GE(state, ThreadState::kTerminated); |
| 1643 | CHECK_LE(state, ThreadState::kSuspended); |
| 1644 | CHECK_NE(state, ThreadState::kObsoleteRunnable); |
| 1645 | } |
| 1646 | } |
| 1647 | |
| 1648 | // The value holds thread flags and thread state. |
| 1649 | uint32_t value_; |
| 1650 | |
| 1651 | static constexpr size_t kThreadStateBitSize = BitSizeOf<std::underlying_type_t<ThreadState>>(); |
| 1652 | static constexpr size_t kThreadStatePosition = BitSizeOf<uint32_t>() - kThreadStateBitSize; |
| 1653 | using ThreadStateField = BitField<ThreadState, kThreadStatePosition, kThreadStateBitSize>; |
| 1654 | static_assert( |
| 1655 | WhichPowerOf2(enum_cast<uint32_t>(ThreadFlag::kLastFlag)) < kThreadStatePosition); |
Ian Rogers | 474b6da | 2012-09-25 00:20:38 -0700 | [diff] [blame] | 1656 | }; |
Vladimir Marko | ddf4fd3 | 2021-11-22 16:31:57 +0000 | [diff] [blame] | 1657 | static_assert(sizeof(StateAndFlags) == sizeof(uint32_t), "Unexpected StateAndFlags size"); |
Ian Rogers | 474b6da | 2012-09-25 00:20:38 -0700 | [diff] [blame] | 1658 | |
Vladimir Marko | 9c0f764 | 2021-12-06 16:17:52 +0000 | [diff] [blame] | 1659 | StateAndFlags GetStateAndFlags(std::memory_order order) const { |
| 1660 | return StateAndFlags(tls32_.state_and_flags.load(order)); |
| 1661 | } |
| 1662 | |
Hans Boehm | 9d27fbc | 2021-05-21 09:23:38 -0700 | [diff] [blame] | 1663 | // Format state and flags as a hex string. For diagnostic output. |
| 1664 | std::string StateAndFlagsAsHexString() const; |
| 1665 | |
Vladimir Marko | 9c0f764 | 2021-12-06 16:17:52 +0000 | [diff] [blame] | 1666 | // Run the flip function and, if requested, notify other threads that may have tried |
| 1667 | // to do that concurrently. |
| 1668 | void RunFlipFunction(Thread* self, bool notify) REQUIRES_SHARED(Locks::mutator_lock_); |
| 1669 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1670 | static void ThreadExitCallback(void* arg); |
Elliott Hughes | 5d96a71 | 2012-06-28 12:24:27 -0700 | [diff] [blame] | 1671 | |
Yu Li | eac4424 | 2015-06-29 10:50:03 +0800 | [diff] [blame] | 1672 | // Maximum number of suspend barriers. |
| 1673 | static constexpr uint32_t kMaxSuspendBarriers = 3; |
| 1674 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1675 | // Has Thread::Startup been called? |
| 1676 | static bool is_started_; |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 1677 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1678 | // TLS key used to retrieve the Thread*. |
| 1679 | static pthread_key_t pthread_key_self_; |
Ian Rogers | a32a6fd | 2012-02-06 20:18:44 -0800 | [diff] [blame] | 1680 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1681 | // Used to notify threads that they should attempt to resume, they will suspend again if |
| 1682 | // their suspend count is > 0. |
| 1683 | static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_); |
Dave Allison | b373e09 | 2014-02-20 16:06:36 -0800 | [diff] [blame] | 1684 | |
Calin Juravle | b2771b4 | 2016-04-07 17:09:25 +0100 | [diff] [blame] | 1685 | // Hook passed by framework which returns true |
| 1686 | // when StrictMode events are traced for the current thread. |
| 1687 | static bool (*is_sensitive_thread_hook_)(); |
Calin Juravle | 97cbc92 | 2016-04-15 16:16:35 +0100 | [diff] [blame] | 1688 | // Stores the jit sensitive thread (which for now is the UI thread). |
| 1689 | static Thread* jit_sensitive_thread_; |
Calin Juravle | b2771b4 | 2016-04-07 17:09:25 +0100 | [diff] [blame] | 1690 | |
Vladimir Marko | 9f18fbc | 2019-07-31 15:06:12 +0100 | [diff] [blame] | 1691 | static constexpr uint32_t kMakeVisiblyInitializedCounterTriggerCount = 128; |
Vladimir Marko | bf12191 | 2019-06-04 13:49:05 +0100 | [diff] [blame] | 1692 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1693 | /***********************************************************************************************/ |
| 1694 | // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for |
| 1695 | // pointer size differences. To encourage shorter encoding, more frequently used values appear |
| 1696 | // first if possible. |
| 1697 | /***********************************************************************************************/ |
Elliott Hughes | 6a607ad | 2012-07-13 20:40:00 -0700 | [diff] [blame] | 1698 | |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 1699 | struct PACKED(4) tls_32bit_sized_values { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1700 | // We have no control over the size of 'bool', but want our boolean fields |
| 1701 | // to be 4-byte quantities. |
Vladimir Marko | 4f99071 | 2021-07-14 12:45:13 +0100 | [diff] [blame] | 1702 | using bool32_t = uint32_t; |
Ian Rogers | 22f454c | 2012-09-08 11:06:29 -0700 | [diff] [blame] | 1703 | |
Vladimir Marko | bf12191 | 2019-06-04 13:49:05 +0100 | [diff] [blame] | 1704 | explicit tls_32bit_sized_values(bool is_daemon) |
Vladimir Marko | ddf4fd3 | 2021-11-22 16:31:57 +0000 | [diff] [blame] | 1705 | : state_and_flags(0u), |
| 1706 | suspend_count(0), |
Vladimir Marko | bf12191 | 2019-06-04 13:49:05 +0100 | [diff] [blame] | 1707 | thin_lock_thread_id(0), |
| 1708 | tid(0), |
| 1709 | daemon(is_daemon), |
| 1710 | throwing_OutOfMemoryError(false), |
| 1711 | no_thread_suspension(0), |
| 1712 | thread_exit_check_count(0), |
Vladimir Marko | bf12191 | 2019-06-04 13:49:05 +0100 | [diff] [blame] | 1713 | is_transitioning_to_runnable(false), |
Vladimir Marko | bf12191 | 2019-06-04 13:49:05 +0100 | [diff] [blame] | 1714 | is_gc_marking(false), |
Hans Boehm | 1b3ec0f | 2022-01-26 16:53:07 +0000 | [diff] [blame] | 1715 | weak_ref_access_enabled(WeakRefAccessState::kVisiblyEnabled), |
Vladimir Marko | bf12191 | 2019-06-04 13:49:05 +0100 | [diff] [blame] | 1716 | disable_thread_flip_count(0), |
| 1717 | user_code_suspend_count(0), |
| 1718 | force_interpreter_count(0), |
Alex Light | 270db1c | 2019-12-03 12:20:01 +0000 | [diff] [blame] | 1719 | make_visibly_initialized_counter(0), |
Hans Boehm | 30bc777 | 2022-01-28 15:07:02 -0800 | [diff] [blame] | 1720 | define_class_counter(0), |
Nicolas Geoffray | f9ae8e3 | 2022-02-15 22:54:11 +0000 | [diff] [blame] | 1721 | num_name_readers(0), |
| 1722 | shared_method_hotness(kSharedMethodHotnessThreshold) |
| 1723 | {} |
Dave Allison | b373e09 | 2014-02-20 16:06:36 -0800 | [diff] [blame] | 1724 | |
Vladimir Marko | ddf4fd3 | 2021-11-22 16:31:57 +0000 | [diff] [blame] | 1725 | // The state and flags field must be changed atomically so that flag values aren't lost. |
| 1726 | // See `StateAndFlags` for bit assignments of `ThreadFlag` and `ThreadState` values. |
| 1727 | // Keeping the state and flags together allows an atomic CAS to change from being |
| 1728 | // Suspended to Runnable without a suspend request occurring. |
| 1729 | Atomic<uint32_t> state_and_flags; |
| 1730 | static_assert(sizeof(state_and_flags) == sizeof(uint32_t), |
| 1731 | "Size of state_and_flags and uint32 are different"); |
Dave Allison | b373e09 | 2014-02-20 16:06:36 -0800 | [diff] [blame] | 1732 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1733 | // A non-zero value is used to tell the current thread to enter a safe point |
| 1734 | // at the next poll. |
| 1735 | int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1736 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1737 | // Thin lock thread id. This is a small integer used by the thin lock implementation. |
| 1738 | // This is not to be confused with the native thread's tid, nor is it the value returned |
| 1739 | // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One |
| 1740 | // important difference between this id and the ids visible to managed code is that these |
| 1741 | // ones get reused (to ensure that they fit in the number of bits available). |
| 1742 | uint32_t thin_lock_thread_id; |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 1743 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1744 | // System thread id. |
| 1745 | uint32_t tid; |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 1746 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1747 | // Is the thread a daemon? |
| 1748 | const bool32_t daemon; |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 1749 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1750 | // A boolean telling us whether we're recursively throwing OOME. |
| 1751 | bool32_t throwing_OutOfMemoryError; |
| 1752 | |
| 1753 | // A positive value implies we're in a region where thread suspension isn't expected. |
| 1754 | uint32_t no_thread_suspension; |
| 1755 | |
| 1756 | // How many times has our pthread key's destructor been called? |
| 1757 | uint32_t thread_exit_check_count; |
Sebastien Hertz | 9f10203 | 2014-05-23 08:59:42 +0200 | [diff] [blame] | 1758 | |
Hiroshi Yamauchi | ee23582 | 2016-08-19 17:03:27 -0700 | [diff] [blame] | 1759 | // True if the thread is in TransitionFromSuspendedToRunnable(). This is used to distinguish the |
| 1760 | // non-runnable threads (eg. kNative, kWaiting) that are about to transition to runnable from |
| 1761 | // the rest of them. |
| 1762 | bool32_t is_transitioning_to_runnable; |
Sebastien Hertz | 1558b57 | 2015-02-25 15:05:59 +0100 | [diff] [blame] | 1763 | |
Hiroshi Yamauchi | 0037082 | 2015-08-18 14:47:25 -0700 | [diff] [blame] | 1764 | // True if the GC is in the marking phase. This is used for the CC collector only. This is |
| 1765 | // thread local so that we can simplify the logic to check for the fast path of read barriers of |
| 1766 | // GC roots. |
| 1767 | bool32_t is_gc_marking; |
| 1768 | |
Nicolas Geoffray | 365719c | 2017-03-08 13:11:50 +0000 | [diff] [blame] | 1769 | // Thread "interrupted" status; stays raised until queried or thrown. |
| 1770 | Atomic<bool32_t> interrupted; |
| 1771 | |
Charles Munger | aa31f49 | 2018-11-01 18:57:38 +0000 | [diff] [blame] | 1772 | AtomicInteger park_state_; |
| 1773 | |
Hans Boehm | 1b3ec0f | 2022-01-26 16:53:07 +0000 | [diff] [blame] | 1774 | // Determines whether the thread is allowed to directly access a weak ref |
| 1775 | // (Reference::GetReferent() and system weaks) and to potentially mark an object alive/gray. |
| 1776 | // This is used for concurrent reference processing of the CC collector only. This is thread |
| 1777 | // local so that we can enable/disable weak ref access by using a checkpoint and avoid a race |
| 1778 | // around the time weak ref access gets disabled and concurrent reference processing begins |
| 1779 | // (if weak ref access is disabled during a pause, this is not an issue.) Other collectors use |
| 1780 | // Runtime::DisallowNewSystemWeaks() and ReferenceProcessor::EnableSlowPath(). Can be |
| 1781 | // concurrently accessed by GetReferent() and set (by iterating over threads). |
| 1782 | // Can be changed from kEnabled to kVisiblyEnabled by readers. No other concurrent access is |
| 1783 | // possible when that happens. |
| 1784 | mutable std::atomic<WeakRefAccessState> weak_ref_access_enabled; |
Hiroshi Yamauchi | 20a0be0 | 2016-02-19 15:44:06 -0800 | [diff] [blame] | 1785 | |
| 1786 | // A thread local version of Heap::disable_thread_flip_count_. This keeps track of how many |
| 1787 | // levels of (nested) JNI critical sections the thread is in and is used to detect a nested JNI |
| 1788 | // critical section enter. |
| 1789 | uint32_t disable_thread_flip_count; |
Alex Light | 88fd720 | 2017-06-30 08:31:59 -0700 | [diff] [blame] | 1790 | |
Alex Light | cea4215 | 2018-09-18 22:51:55 +0000 | [diff] [blame] | 1791 | // How much of 'suspend_count_' is by request of user code, used to distinguish threads |
| 1792 | // suspended by the runtime from those suspended by user code. |
Alex Light | 88fd720 | 2017-06-30 08:31:59 -0700 | [diff] [blame] | 1793 | // This should have GUARDED_BY(Locks::user_code_suspension_lock_) but auto analysis cannot be |
| 1794 | // told that AssertHeld should be good enough. |
| 1795 | int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_); |
David Srbecky | 28f6cff | 2018-10-16 15:07:28 +0100 | [diff] [blame] | 1796 | |
Alex Light | 3dacdd6 | 2019-03-12 15:45:47 +0000 | [diff] [blame] | 1797 | // Count of how many times this thread has been forced to interpreter. If this is not 0 the |
| 1798 | // thread must remain in interpreted code as much as possible. |
| 1799 | uint32_t force_interpreter_count; |
| 1800 | |
Vladimir Marko | bf12191 | 2019-06-04 13:49:05 +0100 | [diff] [blame] | 1801 | // Counter for calls to initialize a class that's initialized but not visibly initialized. |
| 1802 | // When this reaches kMakeVisiblyInitializedCounterTriggerCount, we call the runtime to |
| 1803 | // make initialized classes visibly initialized. This is needed because we usually make |
| 1804 | // classes visibly initialized in batches but we do not want to be stuck with a class |
| 1805 | // initialized but not visibly initialized for a long time even if no more classes are |
| 1806 | // being initialized anymore. |
| 1807 | uint32_t make_visibly_initialized_counter; |
Alex Light | 270db1c | 2019-12-03 12:20:01 +0000 | [diff] [blame] | 1808 | |
| 1809 | // Counter for how many nested define-classes are ongoing in this thread. Used to allow waiting |
| 1810 | // for threads to be done with class-definition work. |
| 1811 | uint32_t define_class_counter; |
Hans Boehm | 30bc777 | 2022-01-28 15:07:02 -0800 | [diff] [blame] | 1812 | |
| 1813 | // A count of the number of readers of tlsPtr_.name that may still be looking at a string they |
| 1814 | // retrieved. |
| 1815 | mutable std::atomic<uint32_t> num_name_readers; |
| 1816 | static_assert(std::atomic<uint32_t>::is_always_lock_free); |
Nicolas Geoffray | f9ae8e3 | 2022-02-15 22:54:11 +0000 | [diff] [blame] | 1817 | |
| 1818 | // Thread-local hotness counter for shared memory methods. Initialized with |
| 1819 | // `kSharedMethodHotnessThreshold`. The interpreter decrements it and goes |
| 1820 | // into the runtime when hitting zero. Note that all previous decrements |
| 1821 | // could have been executed by another method than the one seeing zero. |
| 1822 | // There is a second level counter in `Jit::shared_method_counters_` to make |
| 1823 | // sure we at least have a few samples before compiling a method. |
| 1824 | uint32_t shared_method_hotness; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1825 | } tls32_; |
| 1826 | |
| 1827 | struct PACKED(8) tls_64bit_sized_values { |
Sebastien Hertz | 0747466 | 2015-08-25 15:12:33 +0000 | [diff] [blame] | 1828 | tls_64bit_sized_values() : trace_clock_base(0) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1829 | } |
| 1830 | |
| 1831 | // The clock base used for tracing. |
| 1832 | uint64_t trace_clock_base; |
| 1833 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1834 | RuntimeStats stats; |
| 1835 | } tls64_; |
| 1836 | |
Andreas Gampe | 6aa1370 | 2015-10-28 10:57:25 -0700 | [diff] [blame] | 1837 | struct PACKED(sizeof(void*)) tls_ptr_sized_values { |
Nicolas Geoffray | f9795d1 | 2021-10-04 16:28:54 +0100 | [diff] [blame] | 1838 | tls_ptr_sized_values() : card_table(nullptr), |
| 1839 | exception(nullptr), |
| 1840 | stack_end(nullptr), |
| 1841 | managed_stack(), |
| 1842 | suspend_trigger(nullptr), |
| 1843 | jni_env(nullptr), |
| 1844 | tmp_jni_env(nullptr), |
| 1845 | self(nullptr), |
| 1846 | opeer(nullptr), |
| 1847 | jpeer(nullptr), |
| 1848 | stack_begin(nullptr), |
| 1849 | stack_size(0), |
| 1850 | deps_or_stack_trace_sample(), |
| 1851 | wait_next(nullptr), |
| 1852 | monitor_enter_object(nullptr), |
| 1853 | top_handle_scope(nullptr), |
| 1854 | class_loader_override(nullptr), |
| 1855 | long_jump_context(nullptr), |
| 1856 | instrumentation_stack(nullptr), |
| 1857 | stacked_shadow_frame_record(nullptr), |
| 1858 | deoptimization_context_stack(nullptr), |
| 1859 | frame_id_to_shadow_frame(nullptr), |
| 1860 | name(nullptr), |
| 1861 | pthread_self(0), |
| 1862 | last_no_thread_suspension_cause(nullptr), |
| 1863 | checkpoint_function(nullptr), |
| 1864 | thread_local_start(nullptr), |
| 1865 | thread_local_pos(nullptr), |
| 1866 | thread_local_end(nullptr), |
| 1867 | thread_local_limit(nullptr), |
| 1868 | thread_local_objects(0), |
| 1869 | thread_local_alloc_stack_top(nullptr), |
| 1870 | thread_local_alloc_stack_end(nullptr), |
Vladimir Marko | 23cf32f | 2021-11-15 13:38:02 +0000 | [diff] [blame] | 1871 | mutator_lock(nullptr), |
Nicolas Geoffray | f9795d1 | 2021-10-04 16:28:54 +0100 | [diff] [blame] | 1872 | flip_function(nullptr), |
| 1873 | method_verifier(nullptr), |
| 1874 | thread_local_mark_stack(nullptr), |
| 1875 | async_exception(nullptr), |
| 1876 | top_reflective_handle_scope(nullptr) { |
Mathieu Chartier | 12d625f | 2015-03-13 11:33:37 -0700 | [diff] [blame] | 1877 | std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1878 | } |
| 1879 | |
| 1880 | // The biased card table, see CardTable for details. |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 1881 | uint8_t* card_table; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1882 | |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 1883 | // The pending exception or null. |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1884 | mirror::Throwable* exception; |
| 1885 | |
| 1886 | // The end of this thread's stack. This is the lowest safely-addressable address on the stack. |
| 1887 | // We leave extra space so there's room for the code that throws StackOverflowError. |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 1888 | uint8_t* stack_end; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1889 | |
| 1890 | // The top of the managed stack often manipulated directly by compiler generated code. |
| 1891 | ManagedStack managed_stack; |
| 1892 | |
| 1893 | // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check. It is |
| 1894 | // normally set to the address of itself. |
| 1895 | uintptr_t* suspend_trigger; |
| 1896 | |
| 1897 | // Every thread may have an associated JNI environment |
| 1898 | JNIEnvExt* jni_env; |
| 1899 | |
Andreas Gampe | 449357d | 2015-06-01 22:29:51 -0700 | [diff] [blame] | 1900 | // Temporary storage to transfer a pre-allocated JNIEnvExt from the creating thread to the |
| 1901 | // created thread. |
| 1902 | JNIEnvExt* tmp_jni_env; |
| 1903 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1904 | // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current |
| 1905 | // is easy but getting the address of Thread::Current is hard. This field can be read off of |
| 1906 | // Thread::Current to give the address. |
| 1907 | Thread* self; |
| 1908 | |
| 1909 | // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread |
| 1910 | // start up, until the thread is registered and the local opeer_ is used. |
| 1911 | mirror::Object* opeer; |
| 1912 | jobject jpeer; |
| 1913 | |
| 1914 | // The "lowest addressable byte" of the stack. |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 1915 | uint8_t* stack_begin; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1916 | |
| 1917 | // Size of the stack. |
| 1918 | size_t stack_size; |
| 1919 | |
Nicolas Geoffray | 340dafa | 2016-11-18 16:03:10 +0000 | [diff] [blame] | 1920 | // Sampling profiler and AOT verification cannot happen on the same run, so we share |
| 1921 | // the same entry for the stack trace and the verifier deps. |
| 1922 | union DepsOrStackTraceSample { |
| 1923 | DepsOrStackTraceSample() { |
| 1924 | verifier_deps = nullptr; |
| 1925 | stack_trace_sample = nullptr; |
| 1926 | } |
| 1927 | // Pointer to previous stack trace captured by sampling profiler. |
| 1928 | std::vector<ArtMethod*>* stack_trace_sample; |
| 1929 | // When doing AOT verification, per-thread VerifierDeps. |
| 1930 | verifier::VerifierDeps* verifier_deps; |
| 1931 | } deps_or_stack_trace_sample; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1932 | |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 1933 | // The next thread in the wait set this thread is part of or null if not waiting. |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1934 | Thread* wait_next; |
| 1935 | |
| 1936 | // If we're blocked in MonitorEnter, this is the object we're trying to lock. |
| 1937 | mirror::Object* monitor_enter_object; |
| 1938 | |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 1939 | // Top of linked list of handle scopes or null for none. |
Mathieu Chartier | e8a3c57 | 2016-10-11 16:52:17 -0700 | [diff] [blame] | 1940 | BaseHandleScope* top_handle_scope; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1941 | |
| 1942 | // Needed to get the right ClassLoader in JNI_OnLoad, but also |
| 1943 | // useful for testing. |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 1944 | jobject class_loader_override; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1945 | |
| 1946 | // Thread local, lazily allocated, long jump context. Used to deliver exceptions. |
| 1947 | Context* long_jump_context; |
| 1948 | |
| 1949 | // Additional stack used by method instrumentation to store method and return pc values. |
Nicolas Geoffray | e91e795 | 2020-01-23 10:15:56 +0000 | [diff] [blame] | 1950 | // Stored as a pointer since std::map is not PACKED. |
| 1951 | // !DO NOT CHANGE! to std::unordered_map: the users of this map require an |
| 1952 | // ordered iteration on the keys (which are stack addresses). |
| 1953 | // Also see Thread::GetInstrumentationStack for the requirements on |
| 1954 | // manipulating and reading this map. |
| 1955 | std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* instrumentation_stack; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1956 | |
Mingyao Yang | 1f2d3ba | 2015-05-18 12:12:50 -0700 | [diff] [blame] | 1957 | // For gc purpose, a shadow frame record stack that keeps track of: |
| 1958 | // 1) shadow frames under construction. |
| 1959 | // 2) deoptimization shadow frames. |
| 1960 | StackedShadowFrameRecord* stacked_shadow_frame_record; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1961 | |
Mingyao Yang | 1f2d3ba | 2015-05-18 12:12:50 -0700 | [diff] [blame] | 1962 | // Deoptimization return value record stack. |
Sebastien Hertz | 0747466 | 2015-08-25 15:12:33 +0000 | [diff] [blame] | 1963 | DeoptimizationContextRecord* deoptimization_context_stack; |
Andreas Gampe | 2a0d4ec | 2014-06-02 22:05:22 -0700 | [diff] [blame] | 1964 | |
Mingyao Yang | 99170c6 | 2015-07-06 11:10:37 -0700 | [diff] [blame] | 1965 | // For debugger, a linked list that keeps the mapping from frame_id to shadow frame. |
| 1966 | // Shadow frames may be created before deoptimization happens so that the debugger can |
| 1967 | // set local values there first. |
| 1968 | FrameIdToShadowFrame* frame_id_to_shadow_frame; |
| 1969 | |
Hans Boehm | 30bc777 | 2022-01-28 15:07:02 -0800 | [diff] [blame] | 1970 | // A cached copy of the java.lang.Thread's (modified UTF-8) name. |
| 1971 | // If this is not null or kThreadNameDuringStartup, then it owns the malloc memory holding |
| 1972 | // the string. Updated in an RCU-like manner. |
| 1973 | std::atomic<const char*> name; |
| 1974 | static_assert(std::atomic<const char*>::is_always_lock_free); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1975 | |
| 1976 | // A cached pthread_t for the pthread underlying this Thread*. |
| 1977 | pthread_t pthread_self; |
| 1978 | |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1979 | // If no_thread_suspension_ is > 0, what is causing that assertion. |
| 1980 | const char* last_no_thread_suspension_cause; |
| 1981 | |
Mathieu Chartier | 952e1e3 | 2016-06-13 14:04:02 -0700 | [diff] [blame] | 1982 | // Pending checkpoint function or null if non-pending. If this checkpoint is set and someone\ |
| 1983 | // requests another checkpoint, it goes to the checkpoint overflow list. |
| 1984 | Closure* checkpoint_function GUARDED_BY(Locks::thread_suspend_count_lock_); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1985 | |
Yu Li | eac4424 | 2015-06-29 10:50:03 +0800 | [diff] [blame] | 1986 | // Pending barriers that require passing or NULL if non-pending. Installation guarding by |
| 1987 | // Locks::thread_suspend_count_lock_. |
| 1988 | // They work effectively as art::Barrier, but implemented directly using AtomicInteger and futex |
| 1989 | // to avoid additional cost of a mutex and a condition variable, as used in art::Barrier. |
| 1990 | AtomicInteger* active_suspend_barriers[kMaxSuspendBarriers]; |
| 1991 | |
Roland Levillain | e71b354 | 2017-01-16 14:58:23 +0000 | [diff] [blame] | 1992 | // Thread-local allocation pointer. Moved here to force alignment for thread_local_pos on ARM. |
| 1993 | uint8_t* thread_local_start; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1994 | |
Hiroshi Yamauchi | 7e1ce28 | 2015-12-11 15:46:19 -0800 | [diff] [blame] | 1995 | // thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for |
| 1996 | // potentially better performance. |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 1997 | uint8_t* thread_local_pos; |
| 1998 | uint8_t* thread_local_end; |
Igor Murashkin | af1e299 | 2016-10-12 17:44:50 -0700 | [diff] [blame] | 1999 | |
Mathieu Chartier | 6bc7774 | 2017-04-18 17:46:23 -0700 | [diff] [blame] | 2000 | // Thread local limit is how much we can expand the thread local buffer to, it is greater or |
| 2001 | // equal to thread_local_end. |
| 2002 | uint8_t* thread_local_limit; |
| 2003 | |
Vladimir Marko | 0584647 | 2016-09-14 12:49:57 +0100 | [diff] [blame] | 2004 | size_t thread_local_objects; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 2005 | |
Roland Levillain | e71b354 | 2017-01-16 14:58:23 +0000 | [diff] [blame] | 2006 | // Entrypoint function pointers. |
| 2007 | // TODO: move this to more of a global offset table model to avoid per-thread duplication. |
| 2008 | JniEntryPoints jni_entrypoints; |
| 2009 | QuickEntryPoints quick_entrypoints; |
| 2010 | |
Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 2011 | // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread. |
Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 2012 | void* rosalloc_runs[kNumRosAllocThreadLocalSizeBracketsInThread]; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 2013 | |
| 2014 | // Thread-local allocation stack data/routines. |
Mathieu Chartier | cb535da | 2015-01-23 13:50:03 -0800 | [diff] [blame] | 2015 | StackReference<mirror::Object>* thread_local_alloc_stack_top; |
| 2016 | StackReference<mirror::Object>* thread_local_alloc_stack_end; |
Chao-ying Fu | 9e36931 | 2014-05-21 11:20:52 -0700 | [diff] [blame] | 2017 | |
Vladimir Marko | 23cf32f | 2021-11-15 13:38:02 +0000 | [diff] [blame] | 2018 | // Pointer to the mutator lock. |
| 2019 | // This is the same as `Locks::mutator_lock_` but cached for faster state transitions. |
| 2020 | MutatorMutex* mutator_lock; |
| 2021 | |
Chao-ying Fu | 9e36931 | 2014-05-21 11:20:52 -0700 | [diff] [blame] | 2022 | // Support for Mutex lock hierarchy bug detection. |
| 2023 | BaseMutex* held_mutexes[kLockLevelCount]; |
Dave Allison | 8ce6b90 | 2014-08-26 11:07:58 -0700 | [diff] [blame] | 2024 | |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 2025 | // The function used for thread flip. |
| 2026 | Closure* flip_function; |
Mathieu Chartier | 12d625f | 2015-03-13 11:33:37 -0700 | [diff] [blame] | 2027 | |
| 2028 | // Current method verifier, used for root marking. |
| 2029 | verifier::MethodVerifier* method_verifier; |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 2030 | |
| 2031 | // Thread-local mark stack for the concurrent copying collector. |
| 2032 | gc::accounting::AtomicStack<mirror::Object>* thread_local_mark_stack; |
Alex Light | 848574c | 2017-09-25 16:59:39 -0700 | [diff] [blame] | 2033 | |
| 2034 | // The pending async-exception or null. |
| 2035 | mirror::Throwable* async_exception; |
Alex Light | 55eccdf | 2019-10-07 13:51:13 +0000 | [diff] [blame] | 2036 | |
| 2037 | // Top of the linked-list for reflective-handle scopes or null if none. |
| 2038 | BaseReflectiveHandleScope* top_reflective_handle_scope; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 2039 | } tlsPtr_; |
| 2040 | |
Alex Light | bb68fda | 2018-10-01 13:21:47 -0700 | [diff] [blame] | 2041 | // Small thread-local cache to be used from the interpreter. |
| 2042 | // It is keyed by dex instruction pointer. |
| 2043 | // The value is opcode-depended (e.g. field offset). |
| 2044 | InterpreterCache interpreter_cache_; |
| 2045 | |
| 2046 | // All fields below this line should not be accessed by native code. This means these fields can |
| 2047 | // be modified, rearranged, added or removed without having to modify asm_support.h |
| 2048 | |
Nicolas Geoffray | 365719c | 2017-03-08 13:11:50 +0000 | [diff] [blame] | 2049 | // Guards the 'wait_monitor_' members. |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 2050 | Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER; |
| 2051 | |
| 2052 | // Condition variable waited upon during a wait. |
| 2053 | ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_); |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 2054 | // Pointer to the monitor lock we're currently waiting on or null if not waiting. |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 2055 | Monitor* wait_monitor_ GUARDED_BY(wait_mutex_); |
| 2056 | |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 2057 | // Debug disable read barrier count, only is checked for debug builds and only in the runtime. |
| 2058 | uint8_t debug_disallow_read_barrier_ = 0; |
| 2059 | |
Mathieu Chartier | 3f7f03c | 2016-09-26 11:39:52 -0700 | [diff] [blame] | 2060 | // Note that it is not in the packed struct, may not be accessed for cross compilation. |
| 2061 | uintptr_t poison_object_cookie_ = 0; |
| 2062 | |
Mathieu Chartier | 952e1e3 | 2016-06-13 14:04:02 -0700 | [diff] [blame] | 2063 | // Pending extra checkpoints if checkpoint_function_ is already used. |
| 2064 | std::list<Closure*> checkpoint_overflow_ GUARDED_BY(Locks::thread_suspend_count_lock_); |
| 2065 | |
Alex Light | 184f075 | 2018-07-13 11:18:22 -0700 | [diff] [blame] | 2066 | // Custom TLS field that can be used by plugins or the runtime. Should not be accessed directly by |
| 2067 | // compiled code or entrypoints. |
Alex Light | 6e1b7d8 | 2021-05-06 09:58:46 -0700 | [diff] [blame] | 2068 | SafeMap<std::string, std::unique_ptr<TLSData>, std::less<>> custom_tls_ |
| 2069 | GUARDED_BY(Locks::custom_tls_lock_); |
Andreas Gampe | f26bf2d | 2017-01-13 16:47:14 -0800 | [diff] [blame] | 2070 | |
Andreas Gampe | 8237200 | 2019-07-24 15:42:09 -0700 | [diff] [blame] | 2071 | #ifndef __BIONIC__ |
Andreas Gampe | 2be4306 | 2019-07-24 16:52:33 -0700 | [diff] [blame] | 2072 | __attribute__((tls_model("initial-exec"))) |
Andreas Gampe | 8237200 | 2019-07-24 15:42:09 -0700 | [diff] [blame] | 2073 | static thread_local Thread* self_tls_; |
| 2074 | #endif |
| 2075 | |
Alex Light | e9f6103 | 2018-09-24 16:04:51 -0700 | [diff] [blame] | 2076 | // True if the thread is some form of runtime thread (ex, GC or JIT). |
| 2077 | bool is_runtime_thread_; |
Calin Juravle | ccd5695 | 2016-12-15 17:57:38 +0000 | [diff] [blame] | 2078 | |
Orion Hodson | 01ecfa1 | 2019-07-18 12:57:47 +0100 | [diff] [blame] | 2079 | // Set during execution of JNI methods that get field and method id's as part of determining if |
| 2080 | // the caller is allowed to access all fields and methods in the Core Platform API. |
| 2081 | uint32_t core_platform_api_cookie_ = 0; |
| 2082 | |
Mathieu Chartier | 15d3402 | 2014-02-26 17:16:38 -0800 | [diff] [blame] | 2083 | friend class gc::collector::SemiSpace; // For getting stack traces. |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 2084 | friend class Runtime; // For CreatePeer. |
Ian Rogers | 5cf9819 | 2014-05-29 21:31:50 -0700 | [diff] [blame] | 2085 | friend class QuickExceptionHandler; // For dumping the stack. |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 2086 | friend class ScopedThreadStateChange; |
Mathieu Chartier | 119c6bd | 2014-05-09 14:11:47 -0700 | [diff] [blame] | 2087 | friend class StubTest; // For accessing entrypoints. |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 2088 | friend class ThreadList; // For ~Thread and Destroy. |
| 2089 | |
Andreas Gampe | 4352b45 | 2014-06-04 18:59:01 -0700 | [diff] [blame] | 2090 | friend class EntrypointsOrderTest; // To test the order of tls entries. |
Vladimir Marko | cedec9d | 2021-02-08 16:16:13 +0000 | [diff] [blame] | 2091 | friend class JniCompilerTest; // For intercepting JNI entrypoint calls. |
Andreas Gampe | 4352b45 | 2014-06-04 18:59:01 -0700 | [diff] [blame] | 2092 | |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 2093 | DISALLOW_COPY_AND_ASSIGN(Thread); |
| 2094 | }; |
Ian Rogers | bdb0391 | 2011-09-14 00:55:44 -0700 | [diff] [blame] | 2095 | |
Mathieu Chartier | 4e2cb09 | 2015-07-22 16:17:51 -0700 | [diff] [blame] | 2096 | class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension { |
Mathieu Chartier | 2d5f39e | 2014-09-19 17:52:37 -0700 | [diff] [blame] | 2097 | public: |
Mingyao Yang | f26828b | 2017-07-27 12:49:01 -0700 | [diff] [blame] | 2098 | ALWAYS_INLINE ScopedAssertNoThreadSuspension(const char* cause, |
| 2099 | bool enabled = true) |
| 2100 | ACQUIRE(Roles::uninterruptible_) |
| 2101 | : enabled_(enabled) { |
| 2102 | if (!enabled_) { |
| 2103 | return; |
| 2104 | } |
Mathieu Chartier | 268764d | 2016-09-13 12:09:38 -0700 | [diff] [blame] | 2105 | if (kIsDebugBuild) { |
| 2106 | self_ = Thread::Current(); |
| 2107 | old_cause_ = self_->StartAssertNoThreadSuspension(cause); |
| 2108 | } else { |
| 2109 | Roles::uninterruptible_.Acquire(); // No-op. |
| 2110 | } |
Mathieu Chartier | 2d5f39e | 2014-09-19 17:52:37 -0700 | [diff] [blame] | 2111 | } |
Mathieu Chartier | 268764d | 2016-09-13 12:09:38 -0700 | [diff] [blame] | 2112 | ALWAYS_INLINE ~ScopedAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) { |
Mingyao Yang | f26828b | 2017-07-27 12:49:01 -0700 | [diff] [blame] | 2113 | if (!enabled_) { |
| 2114 | return; |
| 2115 | } |
Mathieu Chartier | 268764d | 2016-09-13 12:09:38 -0700 | [diff] [blame] | 2116 | if (kIsDebugBuild) { |
| 2117 | self_->EndAssertNoThreadSuspension(old_cause_); |
| 2118 | } else { |
| 2119 | Roles::uninterruptible_.Release(); // No-op. |
| 2120 | } |
Mathieu Chartier | 2d5f39e | 2014-09-19 17:52:37 -0700 | [diff] [blame] | 2121 | } |
| 2122 | |
| 2123 | private: |
Mathieu Chartier | 268764d | 2016-09-13 12:09:38 -0700 | [diff] [blame] | 2124 | Thread* self_; |
Mingyao Yang | f26828b | 2017-07-27 12:49:01 -0700 | [diff] [blame] | 2125 | const bool enabled_; |
Mathieu Chartier | 268764d | 2016-09-13 12:09:38 -0700 | [diff] [blame] | 2126 | const char* old_cause_; |
Mathieu Chartier | 2d5f39e | 2014-09-19 17:52:37 -0700 | [diff] [blame] | 2127 | }; |
| 2128 | |
Mathieu Chartier | dc540df | 2019-11-15 17:11:44 -0800 | [diff] [blame] | 2129 | class ScopedAllowThreadSuspension { |
| 2130 | public: |
| 2131 | ALWAYS_INLINE ScopedAllowThreadSuspension() RELEASE(Roles::uninterruptible_) { |
| 2132 | if (kIsDebugBuild) { |
| 2133 | self_ = Thread::Current(); |
| 2134 | old_cause_ = self_->EndAssertNoThreadSuspension(); |
| 2135 | } else { |
| 2136 | Roles::uninterruptible_.Release(); // No-op. |
| 2137 | } |
| 2138 | } |
| 2139 | ALWAYS_INLINE ~ScopedAllowThreadSuspension() ACQUIRE(Roles::uninterruptible_) { |
| 2140 | if (kIsDebugBuild) { |
| 2141 | CHECK(self_->StartAssertNoThreadSuspension(old_cause_) == nullptr); |
| 2142 | } else { |
| 2143 | Roles::uninterruptible_.Acquire(); // No-op. |
| 2144 | } |
| 2145 | } |
| 2146 | |
| 2147 | private: |
| 2148 | Thread* self_; |
| 2149 | const char* old_cause_; |
| 2150 | }; |
| 2151 | |
| 2152 | |
Mingyao Yang | 1f2d3ba | 2015-05-18 12:12:50 -0700 | [diff] [blame] | 2153 | class ScopedStackedShadowFramePusher { |
| 2154 | public: |
| 2155 | ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf, StackedShadowFrameType type) |
| 2156 | : self_(self), type_(type) { |
| 2157 | self_->PushStackedShadowFrame(sf, type); |
| 2158 | } |
| 2159 | ~ScopedStackedShadowFramePusher() { |
| 2160 | self_->PopStackedShadowFrame(type_); |
| 2161 | } |
| 2162 | |
| 2163 | private: |
| 2164 | Thread* const self_; |
| 2165 | const StackedShadowFrameType type_; |
| 2166 | |
| 2167 | DISALLOW_COPY_AND_ASSIGN(ScopedStackedShadowFramePusher); |
| 2168 | }; |
| 2169 | |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 2170 | // Only works for debug builds. |
| 2171 | class ScopedDebugDisallowReadBarriers { |
| 2172 | public: |
| 2173 | explicit ScopedDebugDisallowReadBarriers(Thread* self) : self_(self) { |
| 2174 | self_->ModifyDebugDisallowReadBarrier(1); |
| 2175 | } |
| 2176 | ~ScopedDebugDisallowReadBarriers() { |
| 2177 | self_->ModifyDebugDisallowReadBarrier(-1); |
| 2178 | } |
| 2179 | |
| 2180 | private: |
| 2181 | Thread* const self_; |
| 2182 | }; |
| 2183 | |
Hiroshi Yamauchi | ee23582 | 2016-08-19 17:03:27 -0700 | [diff] [blame] | 2184 | class ScopedTransitioningToRunnable : public ValueObject { |
| 2185 | public: |
| 2186 | explicit ScopedTransitioningToRunnable(Thread* self) |
| 2187 | : self_(self) { |
| 2188 | DCHECK_EQ(self, Thread::Current()); |
| 2189 | if (kUseReadBarrier) { |
| 2190 | self_->SetIsTransitioningToRunnable(true); |
| 2191 | } |
| 2192 | } |
| 2193 | |
| 2194 | ~ScopedTransitioningToRunnable() { |
| 2195 | if (kUseReadBarrier) { |
| 2196 | self_->SetIsTransitioningToRunnable(false); |
| 2197 | } |
| 2198 | } |
| 2199 | |
| 2200 | private: |
| 2201 | Thread* const self_; |
| 2202 | }; |
| 2203 | |
Andreas Gampe | 04bbb5b | 2017-01-19 17:49:03 +0000 | [diff] [blame] | 2204 | class ThreadLifecycleCallback { |
| 2205 | public: |
| 2206 | virtual ~ThreadLifecycleCallback() {} |
| 2207 | |
| 2208 | virtual void ThreadStart(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0; |
| 2209 | virtual void ThreadDeath(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0; |
| 2210 | }; |
| 2211 | |
Alex Light | b7c640d | 2019-03-20 15:52:13 -0700 | [diff] [blame] | 2212 | // Store an exception from the thread and suppress it for the duration of this object. |
| 2213 | class ScopedExceptionStorage { |
| 2214 | public: |
| 2215 | explicit ScopedExceptionStorage(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); |
Alex Light | 79d6c80 | 2019-06-27 15:50:11 +0000 | [diff] [blame] | 2216 | void SuppressOldException(const char* message = "") REQUIRES_SHARED(Locks::mutator_lock_); |
Alex Light | b7c640d | 2019-03-20 15:52:13 -0700 | [diff] [blame] | 2217 | ~ScopedExceptionStorage() REQUIRES_SHARED(Locks::mutator_lock_); |
| 2218 | |
| 2219 | private: |
| 2220 | Thread* self_; |
| 2221 | StackHandleScope<1> hs_; |
Alex Light | 79d6c80 | 2019-06-27 15:50:11 +0000 | [diff] [blame] | 2222 | MutableHandle<mirror::Throwable> excp_; |
Alex Light | b7c640d | 2019-03-20 15:52:13 -0700 | [diff] [blame] | 2223 | }; |
| 2224 | |
Elliott Hughes | 330304d | 2011-08-12 14:28:05 -0700 | [diff] [blame] | 2225 | std::ostream& operator<<(std::ostream& os, const Thread& thread); |
Vladimir Marko | 9974e3c | 2020-06-10 16:27:06 +0100 | [diff] [blame] | 2226 | std::ostream& operator<<(std::ostream& os, StackedShadowFrameType thread); |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 2227 | |
Carl Shapiro | 0e5d75d | 2011-07-06 18:28:37 -0700 | [diff] [blame] | 2228 | } // namespace art |
| 2229 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 2230 | #endif // ART_RUNTIME_THREAD_H_ |