Merge "Code cleanup between debugger and jit."
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index e2ef7ac..2b43dfb 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -7557,34 +7557,6 @@
return descriptor;
}
-bool ClassLinker::MayBeCalledWithDirectCodePointer(ArtMethod* m) {
- Runtime* const runtime = Runtime::Current();
- if (runtime->UseJit()) {
- // JIT can have direct code pointers from any method to any other method.
- return true;
- }
- // Non-image methods don't use direct code pointer.
- if (!m->GetDeclaringClass()->IsBootStrapClassLoaded()) {
- return false;
- }
- if (m->IsPrivate()) {
- // The method can only be called inside its own oat file. Therefore it won't be called using
- // its direct code if the oat file has been compiled in PIC mode.
- const DexFile& dex_file = m->GetDeclaringClass()->GetDexFile();
- const OatFile::OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
- if (oat_dex_file == nullptr) {
- // No oat file: the method has not been compiled.
- return false;
- }
- const OatFile* oat_file = oat_dex_file->GetOatFile();
- return oat_file != nullptr && !oat_file->IsPic();
- } else {
- // The method can be called outside its own oat file. Therefore it won't be called using its
- // direct code pointer only if all loaded oat files have been compiled in PIC mode.
- return runtime->GetOatFileManager().HaveNonPicOatFile();
- }
-}
-
jobject ClassLinker::CreatePathClassLoader(Thread* self, std::vector<const DexFile*>& dex_files) {
// SOAAlreadyRunnable is protected, and we need something to add a global reference.
// We could move the jobject to the callers, but all call-sites do this...
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 36ed820..c368a3a 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -549,10 +549,6 @@
REQUIRES(!Locks::classlinker_classes_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
- // Returns true if the method can be called with its direct code pointer, false otherwise.
- bool MayBeCalledWithDirectCodePointer(ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
-
// Creates a GlobalRef PathClassLoader that can be used to load classes from the given dex files.
// Note: the objects are not completely set up. Do not use this outside of tests and the compiler.
jobject CreatePathClassLoader(Thread* self, std::vector<const DexFile*>& dex_files)
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index c375bba..109e03d 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -44,7 +44,6 @@
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
#include "mirror/throwable.h"
-#include "quick/inline_method_analyser.h"
#include "reflection.h"
#include "safe_map.h"
#include "scoped_thread_state_change.h"
@@ -53,7 +52,6 @@
#include "handle_scope-inl.h"
#include "thread_list.h"
#include "utf.h"
-#include "verifier/method_verifier-inl.h"
#include "well_known_classes.h"
namespace art {
@@ -3239,27 +3237,6 @@
CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
}
-static bool IsMethodPossiblyInlined(Thread* self, ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- const DexFile::CodeItem* code_item = m->GetCodeItem();
- if (code_item == nullptr) {
- // TODO We should not be asked to watch location in a native or abstract method so the code item
- // should never be null. We could just check we never encounter this case.
- return false;
- }
- // Note: method verifier may cause thread suspension.
- self->AssertThreadSuspensionIsAllowable();
- StackHandleScope<2> hs(self);
- mirror::Class* declaring_class = m->GetDeclaringClass();
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
- verifier::MethodVerifier verifier(self, dex_cache->GetDexFile(), dex_cache, class_loader,
- &m->GetClassDef(), code_item, m->GetDexMethodIndex(), m,
- m->GetAccessFlags(), false, true, false, true);
- // Note: we don't need to verify the method.
- return InlineMethodAnalyser::AnalyseMethodCode(&verifier, nullptr);
-}
-
static const Breakpoint* FindFirstBreakpointForMethod(ArtMethod* m)
SHARED_REQUIRES(Locks::mutator_lock_, Locks::breakpoint_lock_) {
for (Breakpoint& breakpoint : gBreakpoints) {
@@ -3322,33 +3299,22 @@
}
if (first_breakpoint == nullptr) {
- // There is no breakpoint on this method yet: we need to deoptimize. If this method may be
- // inlined or default, we deoptimize everything; otherwise we deoptimize only this method. We
+ // There is no breakpoint on this method yet: we need to deoptimize. If this method is default,
+ // we deoptimize everything; otherwise we deoptimize only this method. We
// deoptimize with defaults because we do not know everywhere they are used. It is possible some
- // of the copies could be inlined or otherwise missed.
+ // of the copies could be missed.
// TODO Deoptimizing on default methods might not be necessary in all cases.
- // Note: IsMethodPossiblyInlined goes into the method verifier and may cause thread suspension.
- // Therefore we must not hold any lock when we call it.
- bool need_full_deoptimization = m->IsDefault() || IsMethodPossiblyInlined(self, m);
+ bool need_full_deoptimization = m->IsDefault();
if (need_full_deoptimization) {
- VLOG(jdwp) << "Need full deoptimization because of possible inlining or copying of method "
+ VLOG(jdwp) << "Need full deoptimization because of copying of method "
<< PrettyMethod(m);
return DeoptimizationRequest::kFullDeoptimization;
} else {
// We don't need to deoptimize if the method has not been compiled.
const bool is_compiled = m->HasAnyCompiledCode();
if (is_compiled) {
- ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- // If the method may be called through its direct code pointer (without loading
- // its updated entrypoint), we need full deoptimization to not miss the breakpoint.
- if (class_linker->MayBeCalledWithDirectCodePointer(m)) {
- VLOG(jdwp) << "Need full deoptimization because of possible direct code call "
- << "into image for compiled method " << PrettyMethod(m);
- return DeoptimizationRequest::kFullDeoptimization;
- } else {
- VLOG(jdwp) << "Need selective deoptimization for compiled method " << PrettyMethod(m);
- return DeoptimizationRequest::kSelectiveDeoptimization;
- }
+ VLOG(jdwp) << "Need selective deoptimization for compiled method " << PrettyMethod(m);
+ return DeoptimizationRequest::kSelectiveDeoptimization;
} else {
// Method is not compiled: we don't need to deoptimize.
VLOG(jdwp) << "No need for deoptimization for non-compiled method " << PrettyMethod(m);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index c681ed7..344fcb9 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -366,7 +366,8 @@
if (osr) {
number_of_osr_compilations_++;
osr_code_map_.Put(method, code_ptr);
- } else {
+ } else if (!Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()) {
+ // TODO(ngeoffray): Clean up instrumentation and code cache interactions.
Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
method, method_header->GetEntryPoint());
}