Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "jni_env_ext.h" |
| 18 | |
Andreas Gampe | 5f4a09a | 2015-09-28 13:16:33 -0700 | [diff] [blame] | 19 | #include <algorithm> |
| 20 | #include <vector> |
| 21 | |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 22 | #include "check_jni.h" |
| 23 | #include "indirect_reference_table.h" |
| 24 | #include "java_vm_ext.h" |
| 25 | #include "jni_internal.h" |
Andreas Gampe | 5f4a09a | 2015-09-28 13:16:33 -0700 | [diff] [blame] | 26 | #include "lock_word.h" |
| 27 | #include "mirror/object-inl.h" |
| 28 | #include "nth_caller_visitor.h" |
| 29 | #include "thread-inl.h" |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 30 | |
| 31 | namespace art { |
| 32 | |
| 33 | static constexpr size_t kMonitorsInitial = 32; // Arbitrary. |
| 34 | static constexpr size_t kMonitorsMax = 4096; // Arbitrary sanity check. |
| 35 | |
| 36 | static constexpr size_t kLocalsInitial = 64; // Arbitrary. |
| 37 | |
Andreas Gampe | 3f5881f | 2015-04-08 10:26:16 -0700 | [diff] [blame] | 38 | // Checking "locals" requires the mutator lock, but at creation time we're really only interested |
| 39 | // in validity, which isn't changing. To avoid grabbing the mutator lock, factored out and tagged |
| 40 | // with NO_THREAD_SAFETY_ANALYSIS. |
| 41 | static bool CheckLocalsValid(JNIEnvExt* in) NO_THREAD_SAFETY_ANALYSIS { |
| 42 | if (in == nullptr) { |
| 43 | return false; |
| 44 | } |
| 45 | return in->locals.IsValid(); |
| 46 | } |
| 47 | |
Alex Light | 185d134 | 2016-08-11 10:48:03 -0700 | [diff] [blame^] | 48 | jint JNIEnvExt::GetEnvHandler(JavaVMExt* vm, /*out*/void** env, jint version) { |
| 49 | UNUSED(vm); |
| 50 | // GetEnv always returns a JNIEnv* for the most current supported JNI version, |
| 51 | // and unlike other calls that take a JNI version doesn't care if you supply |
| 52 | // JNI_VERSION_1_1, which we don't otherwise support. |
| 53 | if (JavaVMExt::IsBadJniVersion(version) && version != JNI_VERSION_1_1) { |
| 54 | return JNI_EVERSION; |
| 55 | } |
| 56 | Thread* thread = Thread::Current(); |
| 57 | CHECK(thread != nullptr); |
| 58 | *env = thread->GetJniEnv(); |
| 59 | return JNI_OK; |
| 60 | } |
| 61 | |
Andreas Gampe | 3f5881f | 2015-04-08 10:26:16 -0700 | [diff] [blame] | 62 | JNIEnvExt* JNIEnvExt::Create(Thread* self_in, JavaVMExt* vm_in) { |
| 63 | std::unique_ptr<JNIEnvExt> ret(new JNIEnvExt(self_in, vm_in)); |
| 64 | if (CheckLocalsValid(ret.get())) { |
| 65 | return ret.release(); |
| 66 | } |
| 67 | return nullptr; |
| 68 | } |
| 69 | |
Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 70 | JNIEnvExt::JNIEnvExt(Thread* self_in, JavaVMExt* vm_in) |
| 71 | : self(self_in), |
| 72 | vm(vm_in), |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 73 | local_ref_cookie(IRT_FIRST_SEGMENT), |
Andreas Gampe | 3f5881f | 2015-04-08 10:26:16 -0700 | [diff] [blame] | 74 | locals(kLocalsInitial, kLocalsMax, kLocal, false), |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 75 | check_jni(false), |
Mathieu Chartier | 4d87df6 | 2016-01-07 15:14:19 -0800 | [diff] [blame] | 76 | runtime_deleted(false), |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 77 | critical(0), |
| 78 | monitors("monitors", kMonitorsInitial, kMonitorsMax) { |
| 79 | functions = unchecked_functions = GetJniNativeInterface(); |
| 80 | if (vm->IsCheckJniEnabled()) { |
| 81 | SetCheckJniEnabled(true); |
| 82 | } |
| 83 | } |
| 84 | |
Mathieu Chartier | 4d87df6 | 2016-01-07 15:14:19 -0800 | [diff] [blame] | 85 | void JNIEnvExt::SetFunctionsToRuntimeShutdownFunctions() { |
| 86 | functions = GetRuntimeShutdownNativeInterface(); |
| 87 | runtime_deleted = true; |
| 88 | } |
| 89 | |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 90 | JNIEnvExt::~JNIEnvExt() { |
| 91 | } |
| 92 | |
Andreas Gampe | 5f4a09a | 2015-09-28 13:16:33 -0700 | [diff] [blame] | 93 | jobject JNIEnvExt::NewLocalRef(mirror::Object* obj) { |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 94 | if (obj == nullptr) { |
| 95 | return nullptr; |
| 96 | } |
| 97 | return reinterpret_cast<jobject>(locals.Add(local_ref_cookie, obj)); |
| 98 | } |
| 99 | |
Andreas Gampe | 5f4a09a | 2015-09-28 13:16:33 -0700 | [diff] [blame] | 100 | void JNIEnvExt::DeleteLocalRef(jobject obj) { |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 101 | if (obj != nullptr) { |
| 102 | locals.Remove(local_ref_cookie, reinterpret_cast<IndirectRef>(obj)); |
| 103 | } |
| 104 | } |
| 105 | |
| 106 | void JNIEnvExt::SetCheckJniEnabled(bool enabled) { |
| 107 | check_jni = enabled; |
| 108 | functions = enabled ? GetCheckJniNativeInterface() : GetJniNativeInterface(); |
| 109 | } |
| 110 | |
| 111 | void JNIEnvExt::DumpReferenceTables(std::ostream& os) { |
| 112 | locals.Dump(os); |
| 113 | monitors.Dump(os); |
| 114 | } |
| 115 | |
Roland Levillain | 4b8f1ec | 2015-08-26 18:34:03 +0100 | [diff] [blame] | 116 | void JNIEnvExt::PushFrame(int capacity ATTRIBUTE_UNUSED) { |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 117 | // TODO: take 'capacity' into account. |
| 118 | stacked_local_ref_cookies.push_back(local_ref_cookie); |
| 119 | local_ref_cookie = locals.GetSegmentState(); |
| 120 | } |
| 121 | |
Andreas Gampe | 5f4a09a | 2015-09-28 13:16:33 -0700 | [diff] [blame] | 122 | void JNIEnvExt::PopFrame() { |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 123 | locals.SetSegmentState(local_ref_cookie); |
| 124 | local_ref_cookie = stacked_local_ref_cookies.back(); |
| 125 | stacked_local_ref_cookies.pop_back(); |
| 126 | } |
| 127 | |
Andreas Gampe | 4d98c84 | 2015-12-09 15:14:04 -0800 | [diff] [blame] | 128 | // Note: the offset code is brittle, as we can't use OFFSETOF_MEMBER or offsetof easily. Thus, there |
| 129 | // are tests in jni_internal_test to match the results against the actual values. |
| 130 | |
| 131 | // This is encoding the knowledge of the structure and layout of JNIEnv fields. |
| 132 | static size_t JNIEnvSize(size_t pointer_size) { |
| 133 | // A single pointer. |
| 134 | return pointer_size; |
| 135 | } |
| 136 | |
| 137 | Offset JNIEnvExt::SegmentStateOffset(size_t pointer_size) { |
| 138 | size_t locals_offset = JNIEnvSize(pointer_size) + |
| 139 | 2 * pointer_size + // Thread* self + JavaVMExt* vm. |
| 140 | 4 + // local_ref_cookie. |
| 141 | (pointer_size - 4); // Padding. |
| 142 | size_t irt_segment_state_offset = |
| 143 | IndirectReferenceTable::SegmentStateOffset(pointer_size).Int32Value(); |
| 144 | return Offset(locals_offset + irt_segment_state_offset); |
| 145 | } |
| 146 | |
| 147 | Offset JNIEnvExt::LocalRefCookieOffset(size_t pointer_size) { |
| 148 | return Offset(JNIEnvSize(pointer_size) + |
| 149 | 2 * pointer_size); // Thread* self + JavaVMExt* vm |
| 150 | } |
| 151 | |
| 152 | Offset JNIEnvExt::SelfOffset(size_t pointer_size) { |
| 153 | return Offset(JNIEnvSize(pointer_size)); |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 154 | } |
| 155 | |
Andreas Gampe | 5f4a09a | 2015-09-28 13:16:33 -0700 | [diff] [blame] | 156 | // Use some defining part of the caller's frame as the identifying mark for the JNI segment. |
| 157 | static uintptr_t GetJavaCallFrame(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) { |
| 158 | NthCallerVisitor zeroth_caller(self, 0, false); |
| 159 | zeroth_caller.WalkStack(); |
| 160 | if (zeroth_caller.caller == nullptr) { |
| 161 | // No Java code, must be from pure native code. |
| 162 | return 0; |
| 163 | } else if (zeroth_caller.GetCurrentQuickFrame() == nullptr) { |
| 164 | // Shadow frame = interpreter. Use the actual shadow frame's address. |
| 165 | DCHECK(zeroth_caller.GetCurrentShadowFrame() != nullptr); |
| 166 | return reinterpret_cast<uintptr_t>(zeroth_caller.GetCurrentShadowFrame()); |
| 167 | } else { |
| 168 | // Quick frame = compiled code. Use the bottom of the frame. |
| 169 | return reinterpret_cast<uintptr_t>(zeroth_caller.GetCurrentQuickFrame()); |
| 170 | } |
| 171 | } |
| 172 | |
| 173 | void JNIEnvExt::RecordMonitorEnter(jobject obj) { |
| 174 | locked_objects_.push_back(std::make_pair(GetJavaCallFrame(self), obj)); |
| 175 | } |
| 176 | |
| 177 | static std::string ComputeMonitorDescription(Thread* self, |
| 178 | jobject obj) SHARED_REQUIRES(Locks::mutator_lock_) { |
| 179 | mirror::Object* o = self->DecodeJObject(obj); |
| 180 | if ((o->GetLockWord(false).GetState() == LockWord::kThinLocked) && |
| 181 | Locks::mutator_lock_->IsExclusiveHeld(self)) { |
| 182 | // Getting the identity hashcode here would result in lock inflation and suspension of the |
| 183 | // current thread, which isn't safe if this is the only runnable thread. |
| 184 | return StringPrintf("<@addr=0x%" PRIxPTR "> (a %s)", |
| 185 | reinterpret_cast<intptr_t>(o), |
| 186 | PrettyTypeOf(o).c_str()); |
| 187 | } else { |
| 188 | // IdentityHashCode can cause thread suspension, which would invalidate o if it moved. So |
| 189 | // we get the pretty type before we call IdentityHashCode. |
| 190 | const std::string pretty_type(PrettyTypeOf(o)); |
| 191 | return StringPrintf("<0x%08x> (a %s)", o->IdentityHashCode(), pretty_type.c_str()); |
| 192 | } |
| 193 | } |
| 194 | |
| 195 | static void RemoveMonitors(Thread* self, |
| 196 | uintptr_t frame, |
| 197 | ReferenceTable* monitors, |
| 198 | std::vector<std::pair<uintptr_t, jobject>>* locked_objects) |
| 199 | SHARED_REQUIRES(Locks::mutator_lock_) { |
| 200 | auto kept_end = std::remove_if( |
| 201 | locked_objects->begin(), |
| 202 | locked_objects->end(), |
| 203 | [self, frame, monitors](const std::pair<uintptr_t, jobject>& pair) |
| 204 | SHARED_REQUIRES(Locks::mutator_lock_) { |
| 205 | if (frame == pair.first) { |
| 206 | mirror::Object* o = self->DecodeJObject(pair.second); |
| 207 | monitors->Remove(o); |
| 208 | return true; |
| 209 | } |
| 210 | return false; |
| 211 | }); |
| 212 | locked_objects->erase(kept_end, locked_objects->end()); |
| 213 | } |
| 214 | |
| 215 | void JNIEnvExt::CheckMonitorRelease(jobject obj) { |
| 216 | uintptr_t current_frame = GetJavaCallFrame(self); |
| 217 | std::pair<uintptr_t, jobject> exact_pair = std::make_pair(current_frame, obj); |
| 218 | auto it = std::find(locked_objects_.begin(), locked_objects_.end(), exact_pair); |
| 219 | bool will_abort = false; |
| 220 | if (it != locked_objects_.end()) { |
| 221 | locked_objects_.erase(it); |
| 222 | } else { |
| 223 | // Check whether this monitor was locked in another JNI "session." |
| 224 | mirror::Object* mirror_obj = self->DecodeJObject(obj); |
| 225 | for (std::pair<uintptr_t, jobject>& pair : locked_objects_) { |
| 226 | if (self->DecodeJObject(pair.second) == mirror_obj) { |
| 227 | std::string monitor_descr = ComputeMonitorDescription(self, pair.second); |
| 228 | vm->JniAbortF("<JNI MonitorExit>", |
| 229 | "Unlocking monitor that wasn't locked here: %s", |
| 230 | monitor_descr.c_str()); |
| 231 | will_abort = true; |
| 232 | break; |
| 233 | } |
| 234 | } |
| 235 | } |
| 236 | |
| 237 | // When we abort, also make sure that any locks from the current "session" are removed from |
| 238 | // the monitors table, otherwise we may visit local objects in GC during abort (which won't be |
| 239 | // valid anymore). |
| 240 | if (will_abort) { |
| 241 | RemoveMonitors(self, current_frame, &monitors, &locked_objects_); |
| 242 | } |
| 243 | } |
| 244 | |
| 245 | void JNIEnvExt::CheckNoHeldMonitors() { |
| 246 | uintptr_t current_frame = GetJavaCallFrame(self); |
| 247 | // The locked_objects_ are grouped by their stack frame component, as this enforces structured |
| 248 | // locking, and the groups form a stack. So the current frame entries are at the end. Check |
| 249 | // whether the vector is empty, and when there are elements, whether the last element belongs |
| 250 | // to this call - this signals that there are unlocked monitors. |
| 251 | if (!locked_objects_.empty()) { |
| 252 | std::pair<uintptr_t, jobject>& pair = locked_objects_[locked_objects_.size() - 1]; |
| 253 | if (pair.first == current_frame) { |
| 254 | std::string monitor_descr = ComputeMonitorDescription(self, pair.second); |
| 255 | vm->JniAbortF("<JNI End>", |
| 256 | "Still holding a locked object on JNI end: %s", |
| 257 | monitor_descr.c_str()); |
| 258 | // When we abort, also make sure that any locks from the current "session" are removed from |
| 259 | // the monitors table, otherwise we may visit local objects in GC during abort. |
| 260 | RemoveMonitors(self, current_frame, &monitors, &locked_objects_); |
| 261 | } else if (kIsDebugBuild) { |
| 262 | // Make sure there are really no other entries and our checking worked as expected. |
| 263 | for (std::pair<uintptr_t, jobject>& check_pair : locked_objects_) { |
| 264 | CHECK_NE(check_pair.first, current_frame); |
| 265 | } |
| 266 | } |
| 267 | } |
| 268 | } |
| 269 | |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 270 | } // namespace art |