Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 17 | #ifndef ART_RUNTIME_SCOPED_THREAD_STATE_CHANGE_H_ |
| 18 | #define ART_RUNTIME_SCOPED_THREAD_STATE_CHANGE_H_ |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 19 | |
Elliott Hughes | 1aa246d | 2012-12-13 09:29:36 -0800 | [diff] [blame] | 20 | #include "base/casts.h" |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 21 | #include "java_vm_ext.h" |
| 22 | #include "jni_env_ext-inl.h" |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 23 | #include "mirror/art_field.h" |
Hiroshi Yamauchi | 94f7b49 | 2014-07-22 18:08:23 -0700 | [diff] [blame] | 24 | #include "read_barrier.h" |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 25 | #include "thread-inl.h" |
Mathieu Chartier | 4e30541 | 2014-02-19 10:54:44 -0800 | [diff] [blame] | 26 | #include "verify_object.h" |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 27 | |
| 28 | namespace art { |
| 29 | |
| 30 | // Scoped change into and out of a particular state. Handles Runnable transitions that require |
| 31 | // more complicated suspension checking. The subclasses ScopedObjectAccessUnchecked and |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 32 | // ScopedObjectAccess are used to handle the change into Runnable to Get direct access to objects, |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 33 | // the unchecked variant doesn't aid annotalysis. |
| 34 | class ScopedThreadStateChange { |
| 35 | public: |
| 36 | ScopedThreadStateChange(Thread* self, ThreadState new_thread_state) |
Ian Rogers | 1ffa32f | 2013-02-05 18:29:08 -0800 | [diff] [blame] | 37 | LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 38 | : self_(self), thread_state_(new_thread_state), expected_has_no_thread_(false) { |
Ian Rogers | c0fa3ad | 2013-02-05 00:11:55 -0800 | [diff] [blame] | 39 | if (UNLIKELY(self_ == NULL)) { |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 40 | // Value chosen arbitrarily and won't be used in the destructor since thread_ == NULL. |
| 41 | old_thread_state_ = kTerminated; |
Ian Rogers | 120f1c7 | 2012-09-28 17:17:10 -0700 | [diff] [blame] | 42 | Runtime* runtime = Runtime::Current(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 43 | CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDown(self_)); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 44 | } else { |
Ian Rogers | 22f454c | 2012-09-08 11:06:29 -0700 | [diff] [blame] | 45 | DCHECK_EQ(self, Thread::Current()); |
| 46 | // Read state without locks, ok as state is effectively thread local and we're not interested |
| 47 | // in the suspend count (this will be handled in the runnable transitions). |
Ian Rogers | 474b6da | 2012-09-25 00:20:38 -0700 | [diff] [blame] | 48 | old_thread_state_ = self->GetState(); |
Mathieu Chartier | 92b7889 | 2014-04-24 16:14:43 -0700 | [diff] [blame] | 49 | if (old_thread_state_ != new_thread_state) { |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 50 | if (new_thread_state == kRunnable) { |
| 51 | self_->TransitionFromSuspendedToRunnable(); |
Mathieu Chartier | 92b7889 | 2014-04-24 16:14:43 -0700 | [diff] [blame] | 52 | } else if (old_thread_state_ == kRunnable) { |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 53 | self_->TransitionFromRunnableToSuspended(new_thread_state); |
Mathieu Chartier | 92b7889 | 2014-04-24 16:14:43 -0700 | [diff] [blame] | 54 | } else { |
| 55 | // A suspended transition to another effectively suspended transition, ok to use Unsafe. |
| 56 | self_->SetState(new_thread_state); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 57 | } |
| 58 | } |
| 59 | } |
| 60 | } |
| 61 | |
Ian Rogers | 1ffa32f | 2013-02-05 18:29:08 -0800 | [diff] [blame] | 62 | ~ScopedThreadStateChange() LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE { |
Ian Rogers | c0fa3ad | 2013-02-05 00:11:55 -0800 | [diff] [blame] | 63 | if (UNLIKELY(self_ == NULL)) { |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 64 | if (!expected_has_no_thread_) { |
Ian Rogers | 120f1c7 | 2012-09-28 17:17:10 -0700 | [diff] [blame] | 65 | Runtime* runtime = Runtime::Current(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 66 | bool shutting_down = (runtime == NULL) || runtime->IsShuttingDown(nullptr); |
Ian Rogers | 120f1c7 | 2012-09-28 17:17:10 -0700 | [diff] [blame] | 67 | CHECK(shutting_down); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 68 | } |
| 69 | } else { |
| 70 | if (old_thread_state_ != thread_state_) { |
| 71 | if (old_thread_state_ == kRunnable) { |
| 72 | self_->TransitionFromSuspendedToRunnable(); |
| 73 | } else if (thread_state_ == kRunnable) { |
| 74 | self_->TransitionFromRunnableToSuspended(old_thread_state_); |
| 75 | } else { |
Ian Rogers | 22f454c | 2012-09-08 11:06:29 -0700 | [diff] [blame] | 76 | // A suspended transition to another effectively suspended transition, ok to use Unsafe. |
Ian Rogers | 474b6da | 2012-09-25 00:20:38 -0700 | [diff] [blame] | 77 | self_->SetState(old_thread_state_); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 78 | } |
| 79 | } |
| 80 | } |
| 81 | } |
| 82 | |
| 83 | Thread* Self() const { |
| 84 | return self_; |
| 85 | } |
| 86 | |
| 87 | protected: |
| 88 | // Constructor used by ScopedJniThreadState for an unattached thread that has access to the VM*. |
| 89 | ScopedThreadStateChange() |
| 90 | : self_(NULL), thread_state_(kTerminated), old_thread_state_(kTerminated), |
| 91 | expected_has_no_thread_(true) {} |
| 92 | |
| 93 | Thread* const self_; |
| 94 | const ThreadState thread_state_; |
| 95 | |
| 96 | private: |
| 97 | ThreadState old_thread_state_; |
| 98 | const bool expected_has_no_thread_; |
| 99 | |
Mathieu Chartier | 2b7c4d1 | 2014-05-19 10:52:16 -0700 | [diff] [blame] | 100 | friend class ScopedObjectAccessUnchecked; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 101 | DISALLOW_COPY_AND_ASSIGN(ScopedThreadStateChange); |
| 102 | }; |
| 103 | |
Mathieu Chartier | 2b7c4d1 | 2014-05-19 10:52:16 -0700 | [diff] [blame] | 104 | // Assumes we are already runnable. |
| 105 | class ScopedObjectAccessAlreadyRunnable { |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 106 | public: |
Mathieu Chartier | 2b7c4d1 | 2014-05-19 10:52:16 -0700 | [diff] [blame] | 107 | Thread* Self() const { |
| 108 | return self_; |
Ian Rogers | c0fa3ad | 2013-02-05 00:11:55 -0800 | [diff] [blame] | 109 | } |
| 110 | |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 111 | JNIEnvExt* Env() const { |
| 112 | return env_; |
| 113 | } |
| 114 | |
| 115 | JavaVMExt* Vm() const { |
| 116 | return vm_; |
| 117 | } |
| 118 | |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 119 | bool ForceCopy() const { |
| 120 | return vm_->ForceCopy(); |
| 121 | } |
| 122 | |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 123 | /* |
| 124 | * Add a local reference for an object to the indirect reference table associated with the |
| 125 | * current stack frame. When the native function returns, the reference will be discarded. |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 126 | * |
Elliott Hughes | 9dcd45c | 2013-07-29 14:40:52 -0700 | [diff] [blame] | 127 | * We need to allow the same reference to be added multiple times, and cope with NULL. |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 128 | * |
Elliott Hughes | 9dcd45c | 2013-07-29 14:40:52 -0700 | [diff] [blame] | 129 | * This will be called on otherwise unreferenced objects. We cannot do GC allocations here, and |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 130 | * it's best if we don't grab a mutex. |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 131 | */ |
| 132 | template<typename T> |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 133 | T AddLocalReference(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { |
Ian Rogers | 1eb512d | 2013-10-18 15:42:20 -0700 | [diff] [blame] | 134 | Locks::mutator_lock_->AssertSharedHeld(Self()); |
Mathieu Chartier | 2b7c4d1 | 2014-05-19 10:52:16 -0700 | [diff] [blame] | 135 | DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states. |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 136 | if (obj == NULL) { |
| 137 | return NULL; |
| 138 | } |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 139 | DCHECK_NE((reinterpret_cast<uintptr_t>(obj) & 0xffff0000), 0xebad0000); |
Ian Rogers | 987560f | 2014-04-22 11:42:59 -0700 | [diff] [blame] | 140 | return Env()->AddLocalReference<T>(obj); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 141 | } |
| 142 | |
| 143 | template<typename T> |
| 144 | T Decode(jobject obj) const |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 145 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 146 | Locks::mutator_lock_->AssertSharedHeld(Self()); |
Mathieu Chartier | 2b7c4d1 | 2014-05-19 10:52:16 -0700 | [diff] [blame] | 147 | DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states. |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 148 | return down_cast<T>(Self()->DecodeJObject(obj)); |
| 149 | } |
| 150 | |
Brian Carlstrom | ea46f95 | 2013-07-30 01:26:50 -0700 | [diff] [blame] | 151 | mirror::ArtField* DecodeField(jfieldID fid) const |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 152 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 153 | Locks::mutator_lock_->AssertSharedHeld(Self()); |
Mathieu Chartier | 2b7c4d1 | 2014-05-19 10:52:16 -0700 | [diff] [blame] | 154 | DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 155 | CHECK(!kMovingFields); |
Hiroshi Yamauchi | ea2e1bd | 2014-06-18 13:47:35 -0700 | [diff] [blame] | 156 | mirror::ArtField* field = reinterpret_cast<mirror::ArtField*>(fid); |
| 157 | return ReadBarrier::BarrierForRoot<mirror::ArtField, kWithReadBarrier>(&field); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 158 | } |
| 159 | |
Brian Carlstrom | ea46f95 | 2013-07-30 01:26:50 -0700 | [diff] [blame] | 160 | jfieldID EncodeField(mirror::ArtField* field) const |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 161 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 162 | Locks::mutator_lock_->AssertSharedHeld(Self()); |
Mathieu Chartier | 2b7c4d1 | 2014-05-19 10:52:16 -0700 | [diff] [blame] | 163 | DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 164 | CHECK(!kMovingFields); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 165 | return reinterpret_cast<jfieldID>(field); |
| 166 | } |
| 167 | |
Brian Carlstrom | ea46f95 | 2013-07-30 01:26:50 -0700 | [diff] [blame] | 168 | mirror::ArtMethod* DecodeMethod(jmethodID mid) const |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 169 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 170 | Locks::mutator_lock_->AssertSharedHeld(Self()); |
Mathieu Chartier | 2b7c4d1 | 2014-05-19 10:52:16 -0700 | [diff] [blame] | 171 | DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 172 | CHECK(!kMovingMethods); |
Hiroshi Yamauchi | ea2e1bd | 2014-06-18 13:47:35 -0700 | [diff] [blame] | 173 | mirror::ArtMethod* method = reinterpret_cast<mirror::ArtMethod*>(mid); |
| 174 | return ReadBarrier::BarrierForRoot<mirror::ArtMethod, kWithReadBarrier>(&method); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 175 | } |
| 176 | |
Brian Carlstrom | ea46f95 | 2013-07-30 01:26:50 -0700 | [diff] [blame] | 177 | jmethodID EncodeMethod(mirror::ArtMethod* method) const |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 178 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 179 | Locks::mutator_lock_->AssertSharedHeld(Self()); |
Mathieu Chartier | 2b7c4d1 | 2014-05-19 10:52:16 -0700 | [diff] [blame] | 180 | DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 181 | CHECK(!kMovingMethods); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 182 | return reinterpret_cast<jmethodID>(method); |
| 183 | } |
| 184 | |
Mathieu Chartier | 2b7c4d1 | 2014-05-19 10:52:16 -0700 | [diff] [blame] | 185 | bool IsRunnable() const { |
| 186 | return self_->GetState() == kRunnable; |
| 187 | } |
| 188 | |
| 189 | protected: |
| 190 | explicit ScopedObjectAccessAlreadyRunnable(JNIEnv* env) |
| 191 | LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE |
| 192 | : self_(ThreadForEnv(env)), env_(down_cast<JNIEnvExt*>(env)), vm_(env_->vm) { |
| 193 | } |
| 194 | |
| 195 | explicit ScopedObjectAccessAlreadyRunnable(Thread* self) |
| 196 | LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE |
| 197 | : self_(self), env_(down_cast<JNIEnvExt*>(self->GetJniEnv())), |
| 198 | vm_(env_ != nullptr ? env_->vm : nullptr) { |
| 199 | } |
| 200 | |
| 201 | // Used when we want a scoped JNI thread state but have no thread/JNIEnv. Consequently doesn't |
| 202 | // change into Runnable or acquire a share on the mutator_lock_. |
| 203 | explicit ScopedObjectAccessAlreadyRunnable(JavaVM* vm) |
| 204 | : self_(nullptr), env_(nullptr), vm_(down_cast<JavaVMExt*>(vm)) {} |
| 205 | |
| 206 | // Here purely to force inlining. |
| 207 | ~ScopedObjectAccessAlreadyRunnable() ALWAYS_INLINE { |
| 208 | } |
| 209 | |
| 210 | // Self thread, can be null. |
| 211 | Thread* const self_; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 212 | // The full JNIEnv. |
| 213 | JNIEnvExt* const env_; |
| 214 | // The full JavaVM. |
| 215 | JavaVMExt* const vm_; |
Mathieu Chartier | 2b7c4d1 | 2014-05-19 10:52:16 -0700 | [diff] [blame] | 216 | }; |
| 217 | |
| 218 | // Entry/exit processing for transitions from Native to Runnable (ie within JNI functions). |
| 219 | // |
| 220 | // This class performs the necessary thread state switching to and from Runnable and lets us |
| 221 | // amortize the cost of working out the current thread. Additionally it lets us check (and repair) |
| 222 | // apps that are using a JNIEnv on the wrong thread. The class also decodes and encodes Objects |
| 223 | // into jobjects via methods of this class. Performing this here enforces the Runnable thread state |
| 224 | // for use of Object, thereby inhibiting the Object being modified by GC whilst native or VM code |
| 225 | // is also manipulating the Object. |
| 226 | // |
| 227 | // The destructor transitions back to the previous thread state, typically Native. In this state |
| 228 | // GC and thread suspension may occur. |
| 229 | // |
| 230 | // For annotalysis the subclass ScopedObjectAccess (below) makes it explicit that a shared of |
| 231 | // the mutator_lock_ will be acquired on construction. |
| 232 | class ScopedObjectAccessUnchecked : public ScopedObjectAccessAlreadyRunnable { |
| 233 | public: |
| 234 | explicit ScopedObjectAccessUnchecked(JNIEnv* env) |
| 235 | LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE |
| 236 | : ScopedObjectAccessAlreadyRunnable(env), tsc_(Self(), kRunnable) { |
| 237 | Self()->VerifyStack(); |
| 238 | Locks::mutator_lock_->AssertSharedHeld(Self()); |
| 239 | } |
| 240 | |
| 241 | explicit ScopedObjectAccessUnchecked(Thread* self) |
| 242 | LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE |
| 243 | : ScopedObjectAccessAlreadyRunnable(self), tsc_(self, kRunnable) { |
| 244 | Self()->VerifyStack(); |
| 245 | Locks::mutator_lock_->AssertSharedHeld(Self()); |
| 246 | } |
| 247 | |
| 248 | // Used when we want a scoped JNI thread state but have no thread/JNIEnv. Consequently doesn't |
| 249 | // change into Runnable or acquire a share on the mutator_lock_. |
| 250 | explicit ScopedObjectAccessUnchecked(JavaVM* vm) ALWAYS_INLINE |
| 251 | : ScopedObjectAccessAlreadyRunnable(vm), tsc_() {} |
| 252 | |
| 253 | private: |
| 254 | // The scoped thread state change makes sure that we are runnable and restores the thread state |
| 255 | // in the destructor. |
| 256 | const ScopedThreadStateChange tsc_; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 257 | |
| 258 | DISALLOW_COPY_AND_ASSIGN(ScopedObjectAccessUnchecked); |
| 259 | }; |
| 260 | |
| 261 | // Annotalysis helping variant of the above. |
| 262 | class ScopedObjectAccess : public ScopedObjectAccessUnchecked { |
| 263 | public: |
| 264 | explicit ScopedObjectAccess(JNIEnv* env) |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 265 | LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) |
Ian Rogers | 1ffa32f | 2013-02-05 18:29:08 -0800 | [diff] [blame] | 266 | SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 267 | : ScopedObjectAccessUnchecked(env) { |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 268 | } |
| 269 | |
| 270 | explicit ScopedObjectAccess(Thread* self) |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 271 | LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) |
Mathieu Chartier | 2b7c4d1 | 2014-05-19 10:52:16 -0700 | [diff] [blame] | 272 | SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 273 | : ScopedObjectAccessUnchecked(self) { |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 274 | } |
| 275 | |
Ian Rogers | 1ffa32f | 2013-02-05 18:29:08 -0800 | [diff] [blame] | 276 | ~ScopedObjectAccess() UNLOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE { |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 277 | // Base class will release share of lock. Invoked after this destructor. |
| 278 | } |
| 279 | |
| 280 | private: |
| 281 | // TODO: remove this constructor. It is used by check JNI's ScopedCheck to make it believe that |
| 282 | // routines operating with just a VM are sound, they are not, but when you have just a VM |
| 283 | // you cannot call the unsound routines. |
| 284 | explicit ScopedObjectAccess(JavaVM* vm) |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 285 | SHARED_LOCK_FUNCTION(Locks::mutator_lock_) |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 286 | : ScopedObjectAccessUnchecked(vm) {} |
| 287 | |
| 288 | friend class ScopedCheck; |
| 289 | DISALLOW_COPY_AND_ASSIGN(ScopedObjectAccess); |
| 290 | }; |
| 291 | |
| 292 | } // namespace art |
| 293 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 294 | #endif // ART_RUNTIME_SCOPED_THREAD_STATE_CHANGE_H_ |