blob: a15e6678f1b26b0a9b225e192a8abe7c6cc0e9ef [file] [log] [blame]
Alex Light0fa17862017-10-24 13:43:05 -07001/* Copyright (C) 2017 The Android Open Source Project
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This file implements interfaces from the file jvmti.h. This implementation
5 * is licensed under the same terms as the file jvmti.h. The
6 * copyright and license information for the file jvmti.h follows.
7 *
8 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
9 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
10 *
11 * This code is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 only, as
13 * published by the Free Software Foundation. Oracle designates this
14 * particular file as subject to the "Classpath" exception as provided
15 * by Oracle in the LICENSE file that accompanied this code.
16 *
17 * This code is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * version 2 for more details (a copy is included in the LICENSE file that
21 * accompanied this code).
22 *
23 * You should have received a copy of the GNU General Public License version
24 * 2 along with this work; if not, write to the Free Software Foundation,
25 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
26 *
27 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
28 * or visit www.oracle.com if you need additional information or have any
29 * questions.
30 */
31
32#include <functional>
Alex Lightb2146942019-03-12 15:46:40 +000033#include <iosfwd>
34#include <mutex>
Alex Light0fa17862017-10-24 13:43:05 -070035
36#include "deopt_manager.h"
37
38#include "art_jvmti.h"
39#include "art_method-inl.h"
40#include "base/enums.h"
41#include "base/mutex-inl.h"
David Sehr9e734c72018-01-04 17:56:19 -080042#include "dex/dex_file_annotations.h"
David Sehr8c0961f2018-01-23 16:11:38 -080043#include "dex/modifiers.h"
Alex Light0fa17862017-10-24 13:43:05 -070044#include "events-inl.h"
Alex Lighta4cdd362019-04-18 09:17:10 -070045#include "gc/collector_type.h"
Alex Light3b8aa772018-08-13 15:55:44 -070046#include "gc/heap.h"
47#include "gc/scoped_gc_critical_section.h"
Alex Light40607862019-05-06 18:16:24 +000048#include "instrumentation.h"
Alex Light60fbefc2018-04-18 15:19:15 -070049#include "jit/jit.h"
Vladimir Markoa3ad0cd2018-05-04 10:06:38 +010050#include "jni/jni_internal.h"
Alex Light0fa17862017-10-24 13:43:05 -070051#include "mirror/class-inl.h"
52#include "mirror/object_array-inl.h"
Alex Light0fa17862017-10-24 13:43:05 -070053#include "nativehelper/scoped_local_ref.h"
Alex Lighta4cdd362019-04-18 09:17:10 -070054#include "read_barrier_config.h"
Alex Light0fa17862017-10-24 13:43:05 -070055#include "runtime_callbacks.h"
56#include "scoped_thread_state_change-inl.h"
Alex Light3dacdd62019-03-12 15:45:47 +000057#include "scoped_thread_state_change.h"
Alex Light0fa17862017-10-24 13:43:05 -070058#include "thread-current-inl.h"
59#include "thread_list.h"
60#include "ti_phase.h"
61
62namespace openjdkjvmti {
63
64// TODO We should make this much more selective in the future so we only return true when we
Alex Lightf2858632018-04-02 11:28:50 -070065// actually care about the method at this time (ie active frames had locals changed). For now we
66// just assume that if anything has changed any frame's locals we care about all methods. If nothing
67// has we only care about methods with active breakpoints on them. In the future we should probably
68// rewrite all of this to instead do this at the ShadowFrame or thread granularity.
69bool JvmtiMethodInspectionCallback::IsMethodBeingInspected(art::ArtMethod* method) {
Mythri Alle8f3beae2022-01-12 10:02:38 +000070 // In non-java-debuggable runtimes the breakpoint check would miss if we have breakpoints on
71 // methods that are inlined. Since these features are best effort in non-java-debuggable
72 // runtimes it is OK to be less precise. For debuggable runtimes, inlining is disabled.
73 return manager_->HaveLocalsChanged() || manager_->MethodHasBreakpoints(method);
Alex Lightf2858632018-04-02 11:28:50 -070074}
75
Alex Light0fa17862017-10-24 13:43:05 -070076DeoptManager::DeoptManager()
Alex Light2ce6fc82017-12-18 16:42:36 -080077 : deoptimization_status_lock_("JVMTI_DeoptimizationStatusLock",
78 static_cast<art::LockLevel>(
79 art::LockLevel::kClassLinkerClassesLock + 1)),
Alex Light0fa17862017-10-24 13:43:05 -070080 deoptimization_condition_("JVMTI_DeoptimizationCondition", deoptimization_status_lock_),
81 performing_deoptimization_(false),
82 global_deopt_count_(0),
83 deopter_count_(0),
Alex Lightf2858632018-04-02 11:28:50 -070084 breakpoint_status_lock_("JVMTI_BreakpointStatusLock",
85 static_cast<art::LockLevel>(art::LockLevel::kAbortLock + 1)),
86 inspection_callback_(this),
David Srbeckyd25eb2c2018-07-19 12:17:04 +000087 set_local_variable_called_(false) { }
Alex Light0fa17862017-10-24 13:43:05 -070088
89void DeoptManager::Setup() {
90 art::ScopedThreadStateChange stsc(art::Thread::Current(),
91 art::ThreadState::kWaitingForDebuggerToAttach);
92 art::ScopedSuspendAll ssa("Add method Inspection Callback");
93 art::RuntimeCallbacks* callbacks = art::Runtime::Current()->GetRuntimeCallbacks();
94 callbacks->AddMethodInspectionCallback(&inspection_callback_);
95}
96
97void DeoptManager::Shutdown() {
98 art::ScopedThreadStateChange stsc(art::Thread::Current(),
99 art::ThreadState::kWaitingForDebuggerToAttach);
100 art::ScopedSuspendAll ssa("remove method Inspection Callback");
101 art::RuntimeCallbacks* callbacks = art::Runtime::Current()->GetRuntimeCallbacks();
102 callbacks->RemoveMethodInspectionCallback(&inspection_callback_);
103}
104
Alex Lightb2146942019-03-12 15:46:40 +0000105void DeoptManager::DumpDeoptInfo(art::Thread* self, std::ostream& stream) {
106 art::ScopedObjectAccess soa(self);
107 art::MutexLock mutll(self, *art::Locks::thread_list_lock_);
108 art::MutexLock mudsl(self, deoptimization_status_lock_);
109 art::MutexLock mubsl(self, breakpoint_status_lock_);
110 stream << "Deoptimizer count: " << deopter_count_ << "\n";
111 stream << "Global deopt count: " << global_deopt_count_ << "\n";
112 stream << "Can perform OSR: " << !set_local_variable_called_.load() << "\n";
113 for (const auto& [bp, loc] : this->breakpoint_status_) {
114 stream << "Breakpoint: " << bp->PrettyMethod() << " @ 0x" << std::hex << loc << "\n";
115 }
116 struct DumpThreadDeoptCount : public art::Closure {
117 public:
118 DumpThreadDeoptCount(std::ostream& stream, std::mutex& mu)
119 : cnt_(0), stream_(stream), mu_(mu) {}
120 void Run(art::Thread* self) override {
121 {
122 std::lock_guard<std::mutex> lg(mu_);
123 std::string name;
124 self->GetThreadName(name);
125 stream_ << "Thread " << name << " (id: " << std::dec << self->GetThreadId()
126 << ") force interpreter count " << self->ForceInterpreterCount() << "\n";
127 }
128 // Increment this after unlocking the mutex so we won't race its destructor.
129 cnt_++;
130 }
131
132 void WaitForCount(size_t threads) {
133 while (cnt_.load() != threads) {
134 sched_yield();
135 }
136 }
137
138 private:
139 std::atomic<size_t> cnt_;
140 std::ostream& stream_;
141 std::mutex& mu_;
142 };
143
144 std::mutex mu;
145 DumpThreadDeoptCount dtdc(stream, mu);
146 auto func = [](art::Thread* thread, void* ctx) {
147 reinterpret_cast<DumpThreadDeoptCount*>(ctx)->Run(thread);
148 };
149 art::Runtime::Current()->GetThreadList()->ForEach(func, &dtdc);
150}
151
Alex Light2ce6fc82017-12-18 16:42:36 -0800152void DeoptManager::FinishSetup() {
153 art::Thread* self = art::Thread::Current();
154 art::MutexLock mu(self, deoptimization_status_lock_);
155
156 art::Runtime* runtime = art::Runtime::Current();
157 // See if we need to do anything.
158 if (!runtime->IsJavaDebuggable()) {
159 // See if we can enable all JVMTI functions. If this is false, only kArtTiVersion agents can be
160 // retrieved and they will all be best-effort.
161 if (PhaseUtil::GetPhaseUnchecked() == JVMTI_PHASE_ONLOAD) {
162 // We are still early enough to change the compiler options and get full JVMTI support.
163 LOG(INFO) << "Openjdkjvmti plugin loaded on a non-debuggable runtime. Changing runtime to "
164 << "debuggable state. Please pass '--debuggable' to dex2oat and "
165 << "'-Xcompiler-option --debuggable' to dalvikvm in the future.";
166 DCHECK(runtime->GetJit() == nullptr) << "Jit should not be running yet!";
167 runtime->AddCompilerOption("--debuggable");
168 runtime->SetJavaDebuggable(true);
169 } else {
170 LOG(WARNING) << "Openjdkjvmti plugin was loaded on a non-debuggable Runtime. Plugin was "
171 << "loaded too late to change runtime state to DEBUGGABLE. Only kArtTiVersion "
172 << "(0x" << std::hex << kArtTiVersion << ") environments are available. Some "
173 << "functionality might not work properly.";
Alex Light60fbefc2018-04-18 15:19:15 -0700174 if (runtime->GetJit() == nullptr &&
175 runtime->GetJITOptions()->UseJitCompilation() &&
176 !runtime->GetInstrumentation()->IsForcedInterpretOnly()) {
177 // If we don't have a jit we should try to start the jit for performance reasons. We only
178 // need to do this for late attach on non-debuggable processes because for debuggable
179 // processes we already rely on jit and we cannot force this jit to start if we are still in
180 // OnLoad since the runtime hasn't started up sufficiently. This is only expected to happen
181 // on userdebug/eng builds.
182 LOG(INFO) << "Attempting to start jit for openjdkjvmti plugin.";
Andreas Gampec6bd42a2018-11-07 13:39:41 -0800183 // Note: use rwx allowed = true, because if this is the system server, we will not be
184 // allowed to allocate any JIT code cache, anyways.
185 runtime->CreateJitCodeCache(/*rwx_memory_allowed=*/true);
Alex Light60fbefc2018-04-18 15:19:15 -0700186 runtime->CreateJit();
187 if (runtime->GetJit() == nullptr) {
188 LOG(WARNING) << "Could not start jit for openjdkjvmti plugin. This process might be "
189 << "quite slow as it is running entirely in the interpreter. Try running "
190 << "'setenforce 0' and restarting this process.";
191 }
192 }
Alex Light2ce6fc82017-12-18 16:42:36 -0800193 }
194 runtime->DeoptimizeBootImage();
195 }
196}
197
Alex Light0fa17862017-10-24 13:43:05 -0700198bool DeoptManager::MethodHasBreakpoints(art::ArtMethod* method) {
Alex Lightf2858632018-04-02 11:28:50 -0700199 art::MutexLock lk(art::Thread::Current(), breakpoint_status_lock_);
Alex Light0fa17862017-10-24 13:43:05 -0700200 return MethodHasBreakpointsLocked(method);
201}
202
203bool DeoptManager::MethodHasBreakpointsLocked(art::ArtMethod* method) {
Alex Light0fa17862017-10-24 13:43:05 -0700204 auto elem = breakpoint_status_.find(method);
205 return elem != breakpoint_status_.end() && elem->second != 0;
206}
207
David Srbeckyd25eb2c2018-07-19 12:17:04 +0000208void DeoptManager::RemoveDeoptimizeAllMethods() {
Alex Light0fa17862017-10-24 13:43:05 -0700209 art::Thread* self = art::Thread::Current();
Vladimir Markoddf4fd32021-11-22 16:31:57 +0000210 art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended);
Alex Light0fa17862017-10-24 13:43:05 -0700211 deoptimization_status_lock_.ExclusiveLock(self);
David Srbeckyd25eb2c2018-07-19 12:17:04 +0000212 RemoveDeoptimizeAllMethodsLocked(self);
Alex Light0fa17862017-10-24 13:43:05 -0700213}
214
David Srbeckyd25eb2c2018-07-19 12:17:04 +0000215void DeoptManager::AddDeoptimizeAllMethods() {
Alex Light0fa17862017-10-24 13:43:05 -0700216 art::Thread* self = art::Thread::Current();
Vladimir Markoddf4fd32021-11-22 16:31:57 +0000217 art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended);
Alex Light0fa17862017-10-24 13:43:05 -0700218 deoptimization_status_lock_.ExclusiveLock(self);
David Srbeckyd25eb2c2018-07-19 12:17:04 +0000219 AddDeoptimizeAllMethodsLocked(self);
Alex Light0fa17862017-10-24 13:43:05 -0700220}
221
222void DeoptManager::AddMethodBreakpoint(art::ArtMethod* method) {
223 DCHECK(method->IsInvokable());
224 DCHECK(!method->IsProxyMethod()) << method->PrettyMethod();
225 DCHECK(!method->IsNative()) << method->PrettyMethod();
226
227 art::Thread* self = art::Thread::Current();
228 method = method->GetCanonicalMethod();
229 bool is_default = method->IsDefault();
230
Vladimir Markoddf4fd32021-11-22 16:31:57 +0000231 art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended);
Alex Light0fa17862017-10-24 13:43:05 -0700232 deoptimization_status_lock_.ExclusiveLock(self);
Alex Lightf2858632018-04-02 11:28:50 -0700233 {
234 breakpoint_status_lock_.ExclusiveLock(self);
Alex Light0fa17862017-10-24 13:43:05 -0700235
Alex Lightf2858632018-04-02 11:28:50 -0700236 DCHECK_GT(deopter_count_, 0u) << "unexpected deotpimization request";
Alex Light0fa17862017-10-24 13:43:05 -0700237
Alex Lightf2858632018-04-02 11:28:50 -0700238 if (MethodHasBreakpointsLocked(method)) {
239 // Don't need to do anything extra.
240 breakpoint_status_[method]++;
241 // Another thread might be deoptimizing the very method we just added new breakpoints for.
242 // Wait for any deopts to finish before moving on.
243 breakpoint_status_lock_.ExclusiveUnlock(self);
244 WaitForDeoptimizationToFinish(self);
245 return;
246 }
247 breakpoint_status_[method] = 1;
248 breakpoint_status_lock_.ExclusiveUnlock(self);
Alex Light0fa17862017-10-24 13:43:05 -0700249 }
Alex Light0fa17862017-10-24 13:43:05 -0700250 auto instrumentation = art::Runtime::Current()->GetInstrumentation();
251 if (instrumentation->IsForcedInterpretOnly()) {
252 // We are already interpreting everything so no need to do anything.
253 deoptimization_status_lock_.ExclusiveUnlock(self);
254 return;
255 } else if (is_default) {
David Srbeckyd25eb2c2018-07-19 12:17:04 +0000256 AddDeoptimizeAllMethodsLocked(self);
Alex Light0fa17862017-10-24 13:43:05 -0700257 } else {
258 PerformLimitedDeoptimization(self, method);
259 }
260}
261
262void DeoptManager::RemoveMethodBreakpoint(art::ArtMethod* method) {
263 DCHECK(method->IsInvokable()) << method->PrettyMethod();
264 DCHECK(!method->IsProxyMethod()) << method->PrettyMethod();
265 DCHECK(!method->IsNative()) << method->PrettyMethod();
266
267 art::Thread* self = art::Thread::Current();
268 method = method->GetCanonicalMethod();
269 bool is_default = method->IsDefault();
270
Vladimir Markoddf4fd32021-11-22 16:31:57 +0000271 art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended);
Alex Light0fa17862017-10-24 13:43:05 -0700272 // Ideally we should do a ScopedSuspendAll right here to get the full mutator_lock_ that we might
273 // need but since that is very heavy we will instead just use a condition variable to make sure we
274 // don't race with ourselves.
275 deoptimization_status_lock_.ExclusiveLock(self);
Alex Lightf2858632018-04-02 11:28:50 -0700276 bool is_last_breakpoint;
277 {
278 art::MutexLock mu(self, breakpoint_status_lock_);
Alex Light0fa17862017-10-24 13:43:05 -0700279
Alex Lightf2858632018-04-02 11:28:50 -0700280 DCHECK_GT(deopter_count_, 0u) << "unexpected deotpimization request";
281 DCHECK(MethodHasBreakpointsLocked(method)) << "Breakpoint on a method was removed without "
282 << "breakpoints present!";
283 breakpoint_status_[method] -= 1;
284 is_last_breakpoint = (breakpoint_status_[method] == 0);
285 }
Alex Light0fa17862017-10-24 13:43:05 -0700286 auto instrumentation = art::Runtime::Current()->GetInstrumentation();
Alex Light0fa17862017-10-24 13:43:05 -0700287 if (UNLIKELY(instrumentation->IsForcedInterpretOnly())) {
288 // We don't need to do anything since we are interpreting everything anyway.
289 deoptimization_status_lock_.ExclusiveUnlock(self);
290 return;
Alex Lightf2858632018-04-02 11:28:50 -0700291 } else if (is_last_breakpoint) {
Alex Light0fa17862017-10-24 13:43:05 -0700292 if (UNLIKELY(is_default)) {
David Srbeckyd25eb2c2018-07-19 12:17:04 +0000293 RemoveDeoptimizeAllMethodsLocked(self);
Alex Light0fa17862017-10-24 13:43:05 -0700294 } else {
295 PerformLimitedUndeoptimization(self, method);
296 }
297 } else {
298 // Another thread might be deoptimizing the very methods we just removed breakpoints from. Wait
299 // for any deopts to finish before moving on.
300 WaitForDeoptimizationToFinish(self);
301 }
302}
303
304void DeoptManager::WaitForDeoptimizationToFinishLocked(art::Thread* self) {
305 while (performing_deoptimization_) {
306 deoptimization_condition_.Wait(self);
307 }
308}
309
310void DeoptManager::WaitForDeoptimizationToFinish(art::Thread* self) {
311 WaitForDeoptimizationToFinishLocked(self);
312 deoptimization_status_lock_.ExclusiveUnlock(self);
313}
314
Alex Light3b8aa772018-08-13 15:55:44 -0700315// Users should make sure that only gc-critical-section safe code is used while a
316// ScopedDeoptimizationContext exists.
Alex Light0fa17862017-10-24 13:43:05 -0700317class ScopedDeoptimizationContext : public art::ValueObject {
318 public:
319 ScopedDeoptimizationContext(art::Thread* self, DeoptManager* deopt)
320 RELEASE(deopt->deoptimization_status_lock_)
321 ACQUIRE(art::Locks::mutator_lock_)
322 ACQUIRE(art::Roles::uninterruptible_)
Alex Light3b8aa772018-08-13 15:55:44 -0700323 : self_(self),
324 deopt_(deopt),
325 critical_section_(self_, "JVMTI Deoptimizing methods"),
326 uninterruptible_cause_(nullptr) {
Alex Light0fa17862017-10-24 13:43:05 -0700327 deopt_->WaitForDeoptimizationToFinishLocked(self_);
328 DCHECK(!deopt->performing_deoptimization_)
329 << "Already performing deoptimization on another thread!";
330 // Use performing_deoptimization_ to keep track of the lock.
331 deopt_->performing_deoptimization_ = true;
332 deopt_->deoptimization_status_lock_.Unlock(self_);
Alex Light3b8aa772018-08-13 15:55:44 -0700333 uninterruptible_cause_ = critical_section_.Enter(art::gc::kGcCauseInstrumentation,
334 art::gc::kCollectorTypeCriticalSection);
Alex Light0fa17862017-10-24 13:43:05 -0700335 art::Runtime::Current()->GetThreadList()->SuspendAll("JMVTI Deoptimizing methods",
Andreas Gampe6e897762018-10-16 13:09:32 -0700336 /*long_suspend=*/ false);
Alex Light0fa17862017-10-24 13:43:05 -0700337 }
338
339 ~ScopedDeoptimizationContext()
340 RELEASE(art::Locks::mutator_lock_)
341 RELEASE(art::Roles::uninterruptible_) {
342 // Can be suspended again.
Alex Light3b8aa772018-08-13 15:55:44 -0700343 critical_section_.Exit(uninterruptible_cause_);
Alex Light0fa17862017-10-24 13:43:05 -0700344 // Release the mutator lock.
345 art::Runtime::Current()->GetThreadList()->ResumeAll();
346 // Let other threads know it's fine to proceed.
347 art::MutexLock lk(self_, deopt_->deoptimization_status_lock_);
348 deopt_->performing_deoptimization_ = false;
349 deopt_->deoptimization_condition_.Broadcast(self_);
350 }
351
352 private:
353 art::Thread* self_;
354 DeoptManager* deopt_;
Alex Light3b8aa772018-08-13 15:55:44 -0700355 art::gc::GCCriticalSection critical_section_;
Alex Light0fa17862017-10-24 13:43:05 -0700356 const char* uninterruptible_cause_;
357};
358
David Srbeckyd25eb2c2018-07-19 12:17:04 +0000359void DeoptManager::AddDeoptimizeAllMethodsLocked(art::Thread* self) {
Alex Light0fa17862017-10-24 13:43:05 -0700360 global_deopt_count_++;
361 if (global_deopt_count_ == 1) {
David Srbeckyd25eb2c2018-07-19 12:17:04 +0000362 PerformGlobalDeoptimization(self);
Alex Light0fa17862017-10-24 13:43:05 -0700363 } else {
364 WaitForDeoptimizationToFinish(self);
365 }
366}
367
David Srbeckyd25eb2c2018-07-19 12:17:04 +0000368void DeoptManager::RemoveDeoptimizeAllMethodsLocked(art::Thread* self) {
Roland Levillainef012222017-06-21 16:28:06 +0100369 DCHECK_GT(global_deopt_count_, 0u) << "Request to remove non-existent global deoptimization!";
Alex Light0fa17862017-10-24 13:43:05 -0700370 global_deopt_count_--;
371 if (global_deopt_count_ == 0) {
David Srbeckyd25eb2c2018-07-19 12:17:04 +0000372 PerformGlobalUndeoptimization(self);
Alex Light0fa17862017-10-24 13:43:05 -0700373 } else {
374 WaitForDeoptimizationToFinish(self);
375 }
376}
377
378void DeoptManager::PerformLimitedDeoptimization(art::Thread* self, art::ArtMethod* method) {
379 ScopedDeoptimizationContext sdc(self, this);
380 art::Runtime::Current()->GetInstrumentation()->Deoptimize(method);
381}
382
383void DeoptManager::PerformLimitedUndeoptimization(art::Thread* self, art::ArtMethod* method) {
384 ScopedDeoptimizationContext sdc(self, this);
385 art::Runtime::Current()->GetInstrumentation()->Undeoptimize(method);
386}
387
David Srbeckyd25eb2c2018-07-19 12:17:04 +0000388void DeoptManager::PerformGlobalDeoptimization(art::Thread* self) {
Alex Light0fa17862017-10-24 13:43:05 -0700389 ScopedDeoptimizationContext sdc(self, this);
David Srbeckyd25eb2c2018-07-19 12:17:04 +0000390 art::Runtime::Current()->GetInstrumentation()->DeoptimizeEverything(
391 kDeoptManagerInstrumentationKey);
Alex Light0fa17862017-10-24 13:43:05 -0700392}
393
David Srbeckyd25eb2c2018-07-19 12:17:04 +0000394void DeoptManager::PerformGlobalUndeoptimization(art::Thread* self) {
Alex Light0fa17862017-10-24 13:43:05 -0700395 ScopedDeoptimizationContext sdc(self, this);
David Srbeckyd25eb2c2018-07-19 12:17:04 +0000396 art::Runtime::Current()->GetInstrumentation()->UndeoptimizeEverything(
397 kDeoptManagerInstrumentationKey);
Alex Light0fa17862017-10-24 13:43:05 -0700398}
399
Alex Light3dacdd62019-03-12 15:45:47 +0000400jvmtiError DeoptManager::AddDeoptimizeThreadMethods(art::ScopedObjectAccessUnchecked& soa, jthread jtarget) {
401 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
402 art::Thread* target = nullptr;
403 jvmtiError err = OK;
404 if (!ThreadUtil::GetNativeThread(jtarget, soa, &target, &err)) {
405 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
406 return err;
407 }
408 // We don't need additional locking here because we hold the Thread_list_lock_.
Alex Lightb2146942019-03-12 15:46:40 +0000409 if (target->IncrementForceInterpreterCount() == 1) {
Alex Light3dacdd62019-03-12 15:45:47 +0000410 struct DeoptClosure : public art::Closure {
411 public:
Orion Hodson26ab2702020-07-29 09:54:10 +0100412 explicit DeoptClosure(DeoptManager* manager) : manager_(manager) {}
Alex Light3dacdd62019-03-12 15:45:47 +0000413 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Orion Hodson26ab2702020-07-29 09:54:10 +0100414 manager_->DeoptimizeThread(self);
Alex Light3dacdd62019-03-12 15:45:47 +0000415 }
416
417 private:
Orion Hodson26ab2702020-07-29 09:54:10 +0100418 DeoptManager* manager_;
Alex Light3dacdd62019-03-12 15:45:47 +0000419 };
420 DeoptClosure c(this);
421 target->RequestSynchronousCheckpoint(&c);
422 } else {
423 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
424 }
425 return OK;
426}
427
428jvmtiError DeoptManager::RemoveDeoptimizeThreadMethods(art::ScopedObjectAccessUnchecked& soa, jthread jtarget) {
429 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
430 art::Thread* target = nullptr;
431 jvmtiError err = OK;
432 if (!ThreadUtil::GetNativeThread(jtarget, soa, &target, &err)) {
433 return err;
434 }
435 // We don't need additional locking here because we hold the Thread_list_lock_.
436 DCHECK_GT(target->ForceInterpreterCount(), 0u);
437 target->DecrementForceInterpreterCount();
438 return OK;
439}
Alex Light0fa17862017-10-24 13:43:05 -0700440
Nicolas Geoffraye91532e2021-12-10 09:52:18 +0000441static constexpr const char* kInstrumentationKey = "JVMTI_DeoptRequester";
442
Alex Light0fa17862017-10-24 13:43:05 -0700443void DeoptManager::RemoveDeoptimizationRequester() {
444 art::Thread* self = art::Thread::Current();
Vladimir Markoddf4fd32021-11-22 16:31:57 +0000445 art::ScopedThreadStateChange sts(self, art::ThreadState::kSuspended);
Alex Light0fa17862017-10-24 13:43:05 -0700446 deoptimization_status_lock_.ExclusiveLock(self);
447 DCHECK_GT(deopter_count_, 0u) << "Removing deoptimization requester without any being present";
448 deopter_count_--;
449 if (deopter_count_ == 0) {
450 ScopedDeoptimizationContext sdc(self, this);
Nicolas Geoffraye91532e2021-12-10 09:52:18 +0000451 art::Runtime::Current()->GetInstrumentation()->DisableDeoptimization(kInstrumentationKey);
Alex Light0fa17862017-10-24 13:43:05 -0700452 return;
453 } else {
454 deoptimization_status_lock_.ExclusiveUnlock(self);
455 }
456}
457
458void DeoptManager::AddDeoptimizationRequester() {
459 art::Thread* self = art::Thread::Current();
Vladimir Markoddf4fd32021-11-22 16:31:57 +0000460 art::ScopedThreadStateChange stsc(self, art::ThreadState::kSuspended);
Alex Light0fa17862017-10-24 13:43:05 -0700461 deoptimization_status_lock_.ExclusiveLock(self);
462 deopter_count_++;
463 if (deopter_count_ == 1) {
464 ScopedDeoptimizationContext sdc(self, this);
Alex Light40607862019-05-06 18:16:24 +0000465 art::instrumentation::Instrumentation* instrumentation =
466 art::Runtime::Current()->GetInstrumentation();
Alex Light40607862019-05-06 18:16:24 +0000467 // Tell instrumentation we will be deopting single threads.
Nicolas Geoffraye91532e2021-12-10 09:52:18 +0000468 instrumentation->EnableSingleThreadDeopt(kInstrumentationKey);
Alex Light0fa17862017-10-24 13:43:05 -0700469 } else {
470 deoptimization_status_lock_.ExclusiveUnlock(self);
471 }
472}
473
474void DeoptManager::DeoptimizeThread(art::Thread* target) {
Alex Lighta4cdd362019-04-18 09:17:10 -0700475 // We might or might not be running on the target thread (self) so get Thread::Current
476 // directly.
Vladimir Markoddf4fd32021-11-22 16:31:57 +0000477 art::ScopedThreadSuspension sts(art::Thread::Current(), art::ThreadState::kSuspended);
Alex Lighta4cdd362019-04-18 09:17:10 -0700478 art::gc::ScopedGCCriticalSection sgccs(art::Thread::Current(),
479 art::gc::GcCause::kGcCauseDebugger,
480 art::gc::CollectorType::kCollectorTypeDebugger);
Nicolas Geoffraye91e7952020-01-23 10:15:56 +0000481 art::ScopedSuspendAll ssa("Instrument thread stack");
Mythri Alle5097f832021-11-02 14:52:30 +0000482 // Prepare the stack so methods can be deoptimized as and when required.
483 // This by itself doesn't cause any methods to deoptimize but enables
484 // deoptimization on demand.
485 art::Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(
486 target,
487 /* deopt_all_frames= */ false);
Alex Light0fa17862017-10-24 13:43:05 -0700488}
489
Alex Light0e841182018-02-12 17:42:50 +0000490extern DeoptManager* gDeoptManager;
Alex Light0fa17862017-10-24 13:43:05 -0700491DeoptManager* DeoptManager::Get() {
Alex Light0e841182018-02-12 17:42:50 +0000492 return gDeoptManager;
Alex Light0fa17862017-10-24 13:43:05 -0700493}
494
495} // namespace openjdkjvmti