Incorporate additional feedback to native JNI checks

Bug: 124338141
Test: m art_libartbase_tests_memory_type_table_test && \
      out/host/linux-x86/nativetest/art_libartbase_tests/memory_type_table_test
Test: m && flashall && boot

(cherry picked from commit fbbda47e032d5cf0db33d2ce671bd37def1058fd)

Change-Id: I26f0a06cf36c2591f4dc30fc6dc3ed443bd8e4a4
Merged-In: I26f0a06cf36c2591f4dc30fc6dc3ed443bd8e4a4
diff --git a/libartbase/base/memory_type_table.h b/libartbase/base/memory_type_table.h
index 9fb4e66..89d4ad5 100644
--- a/libartbase/base/memory_type_table.h
+++ b/libartbase/base/memory_type_table.h
@@ -37,27 +37,33 @@
   MemoryTypeRange(const MemoryTypeRange& other) = default;
   MemoryTypeRange& operator=(const MemoryTypeRange& other) = default;
 
-  uintptr_t Start() const { return start_; }
+  uintptr_t Start() const {
+    DCHECK(IsValid());
+    return start_;
+  }
 
-  uintptr_t Limit() const { return limit_; }
+  uintptr_t Limit() const {
+    DCHECK(IsValid());
+    return limit_;
+  }
 
-  uintptr_t Size() const { return limit_ - start_; }
+  uintptr_t Size() const { return Limit() - Start(); }
 
   const T& Type() const { return type_; }
 
-  bool IsValid() const { return Start() < Limit(); }
+  bool IsValid() const { return start_ <= limit_; }
 
   bool Contains(uintptr_t address) const {
-    return address >= start_ && address < limit_;
+    return address >= Start() && address < Limit();
   }
 
   bool Overlaps(const MemoryTypeRange& other) const {
-    bool disjoint = limit_ <= other.start_ || start_ >= other.limit_;
+    bool disjoint = Limit() <= other.Start() || Start() >= other.Limit();
     return !disjoint;
   }
 
   bool Adjoins(const MemoryTypeRange& other) const {
-    return other.start_ == limit_ || other.limit_ == start_;
+    return other.Start() == Limit() || other.Limit() == Start();
   }
 
   bool CombinableWith(const MemoryTypeRange& other) const {
@@ -184,7 +190,7 @@
 
 template <typename T>
 bool MemoryTypeTable<T>::Builder::Add(const MemoryTypeRange<T>& range) {
-  if (!range.IsValid()) {
+  if (UNLIKELY(!range.IsValid() || range.Size() == 0u)) {
     return false;
   }
 
diff --git a/libartbase/base/memory_type_table_test.cc b/libartbase/base/memory_type_table_test.cc
index f46d4b95..1ffefef 100644
--- a/libartbase/base/memory_type_table_test.cc
+++ b/libartbase/base/memory_type_table_test.cc
@@ -50,7 +50,7 @@
 
   {
     // |<----- a ----->| |<----- c ----->|
-    MemoryTypeRange<int> c(a.Limit()+ a.Size(), a.Limit() + 2 * a.Size(), kMemoryType);
+    MemoryTypeRange<int> c(a.Limit() + a.Size(), a.Limit() + 2 * a.Size(), kMemoryType);
     EXPECT_FALSE(a.Overlaps(c));
     EXPECT_FALSE(c.Overlaps(a));
   }
@@ -101,7 +101,7 @@
 
   {
     // |<--- a --->| |<--- c --->|
-    MemoryTypeRange<int> c(a.Limit()+ a.Size(), a.Limit() + 2 * a.Size(), kMemoryType);
+    MemoryTypeRange<int> c(a.Limit() + a.Size(), a.Limit() + 2 * a.Size(), kMemoryType);
     EXPECT_FALSE(a.Adjoins(c));
     EXPECT_FALSE(c.Adjoins(a));
   }
@@ -109,7 +109,7 @@
   {
     // |<--- a --->|
     //       |<--- d --->|
-    MemoryTypeRange<int> d(a.Start()+ a.Size() / 2, a.Limit() + a.Size() / 2, kMemoryType);
+    MemoryTypeRange<int> d(a.Start() + a.Size() / 2, a.Limit() + a.Size() / 2, kMemoryType);
     EXPECT_FALSE(a.Adjoins(d));
     EXPECT_FALSE(d.Adjoins(a));
   }
@@ -141,7 +141,7 @@
                                    std::numeric_limits<uintptr_t>::max(),
                                    0).IsValid());
   EXPECT_TRUE(MemoryTypeRange<int>(1u, 2u, 0).IsValid());
-  EXPECT_FALSE(MemoryTypeRange<int>(0u, 0u, 0).IsValid());
+  EXPECT_TRUE(MemoryTypeRange<int>(0u, 0u, 0).IsValid());
   EXPECT_FALSE(MemoryTypeRange<int>(2u, 1u, 0).IsValid());
   EXPECT_FALSE(MemoryTypeRange<int>(std::numeric_limits<uintptr_t>::max(),
                                     std::numeric_limits<uintptr_t>::min(),
diff --git a/runtime/jni/jni_internal.cc b/runtime/jni/jni_internal.cc
index bf1e933..3522820 100644
--- a/runtime/jni/jni_internal.cc
+++ b/runtime/jni/jni_internal.cc
@@ -81,15 +81,15 @@
   va_list* args;
 };
 
-static constexpr int kMaxReturnAddressDepth = 4;
+static constexpr size_t kMaxReturnAddressDepth = 4;
 
-inline void* GetReturnAddress(int depth) {
+inline void* GetReturnAddress(size_t depth) {
   DCHECK_LT(depth, kMaxReturnAddressDepth);
   switch (depth) {
-    case 0: return __builtin_return_address(0);
-    case 1: return __builtin_return_address(1);
-    case 2: return __builtin_return_address(2);
-    case 3: return __builtin_return_address(3);
+    case 0u: return __builtin_return_address(0);
+    case 1u: return __builtin_return_address(1);
+    case 2u: return __builtin_return_address(2);
+    case 3u: return __builtin_return_address(3);
     default:
       return nullptr;
   }
@@ -120,9 +120,9 @@
 // resolutions within the Core Platform API for native callers.
 class CodeRangeCache final {
  public:
-  static CodeRangeCache& Instance() {
-    static CodeRangeCache instance;
-    return instance;
+  static CodeRangeCache& GetSingleton() {
+    static CodeRangeCache Singleton;
+    return Singleton;
   }
 
   SharedObjectKind GetSharedObjectKind(void* pc) {
@@ -134,9 +134,13 @@
     return SharedObjectKind::kOther;
   }
 
-  void BuildCache() {
-    art::MemoryTypeTable<SharedObjectKind>::Builder builder;
+  bool HasCache() const {
+    return memory_type_table_.Size() != 0;
+  }
 
+  void BuildCache() {
+    DCHECK(!HasCache());
+    art::MemoryTypeTable<SharedObjectKind>::Builder builder;
     builder_ = &builder;
     libjavacore_loaded_ = false;
     libnativehelper_loaded_ = false;
@@ -153,6 +157,10 @@
     builder_ = nullptr;
   }
 
+  void DropCache() {
+    memory_type_table_ = {};
+  }
+
  private:
   CodeRangeCache() {}
 
@@ -170,7 +178,7 @@
     auto cache = reinterpret_cast<CodeRangeCache*>(data);
     art::MemoryTypeTable<SharedObjectKind>::Builder* builder = cache->builder_;
 
-    for (size_t i = 0; i < info->dlpi_phnum; ++i) {
+    for (size_t i = 0u; i < info->dlpi_phnum; ++i) {
       const ElfW(Phdr)& phdr = info->dlpi_phdr[i];
       if (phdr.p_type != PT_LOAD || ((phdr.p_flags & PF_X) != PF_X)) {
         continue;  // Skip anything other than code pages
@@ -178,14 +186,14 @@
       uintptr_t start = info->dlpi_addr + phdr.p_vaddr;
       const uintptr_t limit = art::RoundUp(start + phdr.p_memsz, art::kPageSize);
       SharedObjectKind kind = GetKind(info->dlpi_name, start, limit);
-      art::MemoryTypeRange<SharedObjectKind> range(start, limit, kind);
+      art::MemoryTypeRange<SharedObjectKind> range{start, limit, kind};
       if (!builder->Add(range)) {
-        LOG(WARNING) << "Overlapping range found in ELF headers: " << range;
+        LOG(WARNING) << "Overlapping/invalid range found in ELF headers: " << range;
       }
     }
 
     // Update sanity check state.
-    std::string_view dlpi_name(info->dlpi_name);
+    std::string_view dlpi_name{info->dlpi_name};
     if (!cache->libjavacore_loaded_) {
       cache->libjavacore_loaded_ = art::EndsWith(dlpi_name, kLibjavacore);
     }
@@ -234,12 +242,12 @@
   if (!art::kIsTargetBuild) {
     return false;
   }
-  for (int i = 0; i < kMaxReturnAddressDepth; ++i) {
+  for (size_t i = 0; i < kMaxReturnAddressDepth; ++i) {
     void* return_address = GetReturnAddress(i);
     if (return_address == nullptr) {
       return false;
     }
-    SharedObjectKind kind = CodeRangeCache::Instance().GetSharedObjectKind(return_address);
+    SharedObjectKind kind = CodeRangeCache::GetSingleton().GetSharedObjectKind(return_address);
     if (kind != SharedObjectKind::kRuntime) {
       return kind == SharedObjectKind::kApexModule;
     }
@@ -3352,8 +3360,14 @@
   return reinterpret_cast<JNINativeInterface*>(&gJniSleepForeverStub);
 }
 
-void JNIInitializeNativeCallerCheck() {
-  CodeRangeCache::Instance().BuildCache();
+void JniInitializeNativeCallerCheck() {
+  // This method should be called only once and before there are multiple runtime threads.
+  DCHECK(!CodeRangeCache::GetSingleton().HasCache());
+  CodeRangeCache::GetSingleton().BuildCache();
+}
+
+void JniShutdownNativeCallerCheck() {
+  CodeRangeCache::GetSingleton().DropCache();
 }
 
 }  // namespace art
diff --git a/runtime/jni/jni_internal.h b/runtime/jni/jni_internal.h
index 192251a..4359074 100644
--- a/runtime/jni/jni_internal.h
+++ b/runtime/jni/jni_internal.h
@@ -34,7 +34,10 @@
 
 // Enables native stack checking for field and method resolutions via JNI. This should be called
 // during runtime initialization after libjavacore and libopenjdk have been dlopen()'ed.
-void JNIInitializeNativeCallerCheck();
+void JniInitializeNativeCallerCheck();
+
+// Removes native stack checking state.
+void JniShutdownNativeCallerCheck();
 
 namespace jni {
 
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index bc6b9b7..8133193 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -492,6 +492,8 @@
   // instance. We rely on a small initialization order issue in Runtime::Start() that requires
   // elements of WellKnownClasses to be null, see b/65500943.
   WellKnownClasses::Clear();
+
+  JniShutdownNativeCallerCheck();
 }
 
 struct AbortState {
@@ -1826,7 +1828,7 @@
 
   // Having loaded native libraries for Managed Core library, enable field and
   // method resolution checks via JNI from native code.
-  JNIInitializeNativeCallerCheck();
+  JniInitializeNativeCallerCheck();
 
   VLOG(startup) << "Runtime::InitNativeMethods exiting";
 }