Revert^2 "Remove support for Valgrind in ART."

- Disable test configuration art-gtest-valgrind64
  (art-gtest-valgrind32 was already disabled).
- Remove Makefile logic regarding testing with Valgrind.
- Remove occurrences of `TEST_DISABLED_FOR_MEMORY_TOOL_VALGRIND`.
- Replace occurrences of `TEST_DISABLED_FOR_MEMORY_TOOL_ASAN` with
  `TEST_DISABLED_FOR_MEMORY_TOOL`.
- Replace the potentially dynamically evaluated
  `RUNNING_ON_MEMORY_TOOL` expression with constant
  `kRunningOnMemoryTool`.
- Simplify and fold the logic of
  `art::ArenaAllocatorMemoryToolCheckImpl` and
  `art::ArenaAllocatorMemoryToolCheck` into
  `art::ArenaAllocatorMemoryTool`.
- Adjust comments regarding memory tools.
- Remove Valgrind suppression files.
- Remove `--callgrind` option from tools/art.

This reverts commit 8b362a87d52a6668ffd2283ef6ffc274315f41c8.

Change-Id: I23c76845e6ccf766f19b22b58a0d5161f60842a9
Test: art/test.py
Test: art/test/testrunner/run_build_test_target.py art-asan
Bug: 77856586
Bug: 29282211
diff --git a/runtime/base/mem_map_arena_pool.cc b/runtime/base/mem_map_arena_pool.cc
index 9ac7886..702f0e4 100644
--- a/runtime/base/mem_map_arena_pool.cc
+++ b/runtime/base/mem_map_arena_pool.cc
@@ -125,7 +125,7 @@
 }
 
 void MemMapArenaPool::FreeArenaChain(Arena* first) {
-  if (UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) {
+  if (kRunningOnMemoryTool) {
     for (Arena* arena = first; arena != nullptr; arena = arena->next_) {
       MEMORY_TOOL_MAKE_UNDEFINED(arena->memory_, arena->bytes_allocated_);
     }
diff --git a/runtime/exec_utils_test.cc b/runtime/exec_utils_test.cc
index 68edfa8..a9c1ea2 100644
--- a/runtime/exec_utils_test.cc
+++ b/runtime/exec_utils_test.cc
@@ -36,8 +36,10 @@
     command.push_back("/usr/bin/id");
   }
   std::string error_msg;
-  if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
-    // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks.
+  if (!(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) {
+    // Running on Valgrind fails due to some memory that leaks in thread alternate signal stacks.
+    // TODO: Valgrind is no longer supported, but Address Sanitizer is:
+    // check whether the following code works with ASan.
     EXPECT_TRUE(Exec(command, &error_msg));
   }
   EXPECT_EQ(0U, error_msg.size()) << error_msg;
@@ -50,8 +52,10 @@
   std::vector<std::string> command;
   command.push_back("bogus");
   std::string error_msg;
-  if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
-    // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks.
+  if (!(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) {
+    // Running on Valgrind fails due to some memory that leaks in thread alternate signal stacks.
+    // TODO: Valgrind is no longer supported, but Address Sanitizer is:
+    // check whether the following code works with ASan.
     EXPECT_FALSE(Exec(command, &error_msg));
     EXPECT_FALSE(error_msg.empty());
   }
@@ -72,8 +76,10 @@
   }
   command.push_back(kModifiedVariable);
   std::string error_msg;
-  if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
-    // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks.
+  if (!(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) {
+    // Running on Valgrind fails due to some memory that leaks in thread alternate signal stacks.
+    // TODO: Valgrind is no longer supported, but Address Sanitizer is:
+    // check whether the following code works with ASan.
     EXPECT_FALSE(Exec(command, &error_msg));
     EXPECT_NE(0U, error_msg.size()) << error_msg;
   }
@@ -97,8 +103,10 @@
   }
   command.push_back(kDeletedVariable);
   std::string error_msg;
-  if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
-    // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks.
+  if (!(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) {
+    // Running on Valgrind fails due to some memory that leaks in thread alternate signal stacks.
+    // TODO: Valgrind is no longer supported, but Address Sanitizer is:
+    // check whether the following code works with ASan.
     EXPECT_TRUE(Exec(command, &error_msg));
     EXPECT_EQ(0U, error_msg.size()) << error_msg;
   }
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 150fe95..30213d5 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -625,7 +625,7 @@
 
   // If true, check that the returned memory is actually zero.
   static constexpr bool kCheckZeroMemory = kIsDebugBuild;
-  // Valgrind protects memory, so do not check memory when running under valgrind. In a normal
+  // Do not check memory when running under a memory tool. In a normal
   // build with kCheckZeroMemory the whole test should be optimized away.
   // TODO: Unprotect before checks.
   ALWAYS_INLINE bool ShouldCheckZeroMemory();
@@ -768,7 +768,7 @@
   // greater than or equal to this value, release pages.
   const size_t page_release_size_threshold_;
 
-  // Whether this allocator is running under Valgrind.
+  // Whether this allocator is running on a memory tool.
   bool is_running_on_memory_tool_;
 
   // The base address of the memory region that's managed by this allocator.
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 948d233..6756868 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -272,7 +272,7 @@
     }
     case kAllocatorTypeRosAlloc: {
       if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
-        // If running on valgrind or asan, we should be using the instrumented path.
+        // If running on ASan, we should be using the instrumented path.
         size_t max_bytes_tl_bulk_allocated = rosalloc_space_->MaxBytesBulkAllocatedFor(alloc_size);
         if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type,
                                                max_bytes_tl_bulk_allocated,
@@ -303,7 +303,7 @@
     }
     case kAllocatorTypeDlMalloc: {
       if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
-        // If running on valgrind, we should be using the instrumented path.
+        // If running on ASan, we should be using the instrumented path.
         ret = dlmalloc_space_->Alloc(self,
                                      alloc_size,
                                      bytes_allocated,
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 25ed652..8e3bbde 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -2248,7 +2248,8 @@
       // Add a new bin with the remaining space.
       AddBin(size - alloc_size, pos + alloc_size);
     }
-    // Copy the object over to its new location. Don't use alloc_size to avoid valgrind error.
+    // Copy the object over to its new location.
+    // Historical note: We did not use `alloc_size` to avoid a Valgrind error.
     memcpy(reinterpret_cast<void*>(forward_address), obj, obj_size);
     if (kUseBakerReadBarrier) {
       obj->AssertReadBarrierState();
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 512cde4..a24ca32 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -45,8 +45,9 @@
   }
 
   ~MemoryToolLargeObjectMapSpace() OVERRIDE {
-    // Keep valgrind happy if there is any large objects such as dex cache arrays which aren't
-    // freed since they are held live by the class linker.
+    // Historical note: We were deleting large objects to keep Valgrind happy if there were
+    // any large objects such as Dex cache arrays which aren't freed since they are held live
+    // by the class linker.
     MutexLock mu(Thread::Current(), lock_);
     for (auto& m : large_objects_) {
       delete m.second.mem_map;
diff --git a/runtime/gc/space/memory_tool_malloc_space-inl.h b/runtime/gc/space/memory_tool_malloc_space-inl.h
index 8282f3d..c022171 100644
--- a/runtime/gc/space/memory_tool_malloc_space-inl.h
+++ b/runtime/gc/space/memory_tool_malloc_space-inl.h
@@ -30,11 +30,14 @@
 namespace memory_tool_details {
 
 template <size_t kMemoryToolRedZoneBytes, bool kUseObjSizeForUsable>
-inline mirror::Object* AdjustForValgrind(void* obj_with_rdz, size_t num_bytes,
-                                         size_t bytes_allocated, size_t usable_size,
-                                         size_t bytes_tl_bulk_allocated,
-                                         size_t* bytes_allocated_out, size_t* usable_size_out,
-                                         size_t* bytes_tl_bulk_allocated_out) {
+inline mirror::Object* AdjustForMemoryTool(void* obj_with_rdz,
+                                           size_t num_bytes,
+                                           size_t bytes_allocated,
+                                           size_t usable_size,
+                                           size_t bytes_tl_bulk_allocated,
+                                           size_t* bytes_allocated_out,
+                                           size_t* usable_size_out,
+                                           size_t* bytes_tl_bulk_allocated_out) {
   if (bytes_allocated_out != nullptr) {
     *bytes_allocated_out = bytes_allocated;
   }
@@ -84,24 +87,31 @@
           bool kUseObjSizeForUsable>
 mirror::Object*
 MemoryToolMallocSpace<S,
-                    kMemoryToolRedZoneBytes,
-                    kAdjustForRedzoneInAllocSize,
-                    kUseObjSizeForUsable>::AllocWithGrowth(
-    Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out,
+                      kMemoryToolRedZoneBytes,
+                      kAdjustForRedzoneInAllocSize,
+                      kUseObjSizeForUsable>::AllocWithGrowth(
+    Thread* self,
+    size_t num_bytes,
+    size_t* bytes_allocated_out,
+    size_t* usable_size_out,
     size_t* bytes_tl_bulk_allocated_out) {
   size_t bytes_allocated;
   size_t usable_size;
   size_t bytes_tl_bulk_allocated;
-  void* obj_with_rdz = S::AllocWithGrowth(self, num_bytes + 2 * kMemoryToolRedZoneBytes,
-                                          &bytes_allocated, &usable_size,
+  void* obj_with_rdz = S::AllocWithGrowth(self,
+                                          num_bytes + 2 * kMemoryToolRedZoneBytes,
+                                          &bytes_allocated,
+                                          &usable_size,
                                           &bytes_tl_bulk_allocated);
   if (obj_with_rdz == nullptr) {
     return nullptr;
   }
 
-  return memory_tool_details::AdjustForValgrind<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>(
-      obj_with_rdz, num_bytes,
-      bytes_allocated, usable_size,
+  return memory_tool_details::AdjustForMemoryTool<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>(
+      obj_with_rdz,
+      num_bytes,
+      bytes_allocated,
+      usable_size,
       bytes_tl_bulk_allocated,
       bytes_allocated_out,
       usable_size_out,
@@ -113,27 +123,35 @@
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
 mirror::Object* MemoryToolMallocSpace<S,
-                                    kMemoryToolRedZoneBytes,
-                                    kAdjustForRedzoneInAllocSize,
-                                    kUseObjSizeForUsable>::Alloc(
-    Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out,
+                                      kMemoryToolRedZoneBytes,
+                                      kAdjustForRedzoneInAllocSize,
+                                      kUseObjSizeForUsable>::Alloc(
+    Thread* self,
+    size_t num_bytes,
+    size_t* bytes_allocated_out,
+    size_t* usable_size_out,
     size_t* bytes_tl_bulk_allocated_out) {
   size_t bytes_allocated;
   size_t usable_size;
   size_t bytes_tl_bulk_allocated;
-  void* obj_with_rdz = S::Alloc(self, num_bytes + 2 * kMemoryToolRedZoneBytes,
-                                &bytes_allocated, &usable_size, &bytes_tl_bulk_allocated);
+  void* obj_with_rdz = S::Alloc(self,
+                                num_bytes + 2 * kMemoryToolRedZoneBytes,
+                                &bytes_allocated,
+                                &usable_size,
+                                &bytes_tl_bulk_allocated);
   if (obj_with_rdz == nullptr) {
     return nullptr;
   }
 
-  return memory_tool_details::AdjustForValgrind<kMemoryToolRedZoneBytes,
-                                             kUseObjSizeForUsable>(obj_with_rdz, num_bytes,
-                                                                   bytes_allocated, usable_size,
-                                                                   bytes_tl_bulk_allocated,
-                                                                   bytes_allocated_out,
-                                                                   usable_size_out,
-                                                                   bytes_tl_bulk_allocated_out);
+  return memory_tool_details::AdjustForMemoryTool<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>(
+      obj_with_rdz,
+      num_bytes,
+      bytes_allocated,
+      usable_size,
+      bytes_tl_bulk_allocated,
+      bytes_allocated_out,
+      usable_size_out,
+      bytes_tl_bulk_allocated_out);
 }
 
 template <typename S,
@@ -141,24 +159,31 @@
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
 mirror::Object* MemoryToolMallocSpace<S,
-                                    kMemoryToolRedZoneBytes,
-                                    kAdjustForRedzoneInAllocSize,
-                                    kUseObjSizeForUsable>::AllocThreadUnsafe(
-    Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out,
+                                      kMemoryToolRedZoneBytes,
+                                      kAdjustForRedzoneInAllocSize,
+                                      kUseObjSizeForUsable>::AllocThreadUnsafe(
+    Thread* self,
+    size_t num_bytes,
+    size_t* bytes_allocated_out,
+    size_t* usable_size_out,
     size_t* bytes_tl_bulk_allocated_out) {
   size_t bytes_allocated;
   size_t usable_size;
   size_t bytes_tl_bulk_allocated;
-  void* obj_with_rdz = S::AllocThreadUnsafe(self, num_bytes + 2 * kMemoryToolRedZoneBytes,
-                                            &bytes_allocated, &usable_size,
+  void* obj_with_rdz = S::AllocThreadUnsafe(self,
+                                            num_bytes + 2 * kMemoryToolRedZoneBytes,
+                                            &bytes_allocated,
+                                            &usable_size,
                                             &bytes_tl_bulk_allocated);
   if (obj_with_rdz == nullptr) {
     return nullptr;
   }
 
-  return memory_tool_details::AdjustForValgrind<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>(
-      obj_with_rdz, num_bytes,
-      bytes_allocated, usable_size,
+  return memory_tool_details::AdjustForMemoryTool<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>(
+      obj_with_rdz,
+      num_bytes,
+      bytes_allocated,
+      usable_size,
       bytes_tl_bulk_allocated,
       bytes_allocated_out,
       usable_size_out,
@@ -170,12 +195,14 @@
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
 size_t MemoryToolMallocSpace<S,
-                           kMemoryToolRedZoneBytes,
-                           kAdjustForRedzoneInAllocSize,
-                           kUseObjSizeForUsable>::AllocationSize(
+                             kMemoryToolRedZoneBytes,
+                             kAdjustForRedzoneInAllocSize,
+                             kUseObjSizeForUsable>::AllocationSize(
     mirror::Object* obj, size_t* usable_size) {
-  size_t result = S::AllocationSize(reinterpret_cast<mirror::Object*>(
-      reinterpret_cast<uint8_t*>(obj) - (kAdjustForRedzoneInAllocSize ? kMemoryToolRedZoneBytes : 0)),
+  size_t result = S::AllocationSize(
+      reinterpret_cast<mirror::Object*>(
+          reinterpret_cast<uint8_t*>(obj)
+              - (kAdjustForRedzoneInAllocSize ? kMemoryToolRedZoneBytes : 0)),
       usable_size);
   if (usable_size != nullptr) {
     if (kUseObjSizeForUsable) {
@@ -192,10 +219,9 @@
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
 size_t MemoryToolMallocSpace<S,
-                           kMemoryToolRedZoneBytes,
-                           kAdjustForRedzoneInAllocSize,
-                           kUseObjSizeForUsable>::Free(
-    Thread* self, mirror::Object* ptr) {
+                             kMemoryToolRedZoneBytes,
+                             kAdjustForRedzoneInAllocSize,
+                             kUseObjSizeForUsable>::Free(Thread* self, mirror::Object* ptr) {
   void* obj_after_rdz = reinterpret_cast<void*>(ptr);
   uint8_t* obj_with_rdz = reinterpret_cast<uint8_t*>(obj_after_rdz) - kMemoryToolRedZoneBytes;
 
@@ -220,10 +246,10 @@
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
 size_t MemoryToolMallocSpace<S,
-                           kMemoryToolRedZoneBytes,
-                           kAdjustForRedzoneInAllocSize,
-                           kUseObjSizeForUsable>::FreeList(
-    Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
+                             kMemoryToolRedZoneBytes,
+                             kAdjustForRedzoneInAllocSize,
+                             kUseObjSizeForUsable>::FreeList(
+                                 Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
   size_t freed = 0;
   for (size_t i = 0; i < num_ptrs; i++) {
     freed += Free(self, ptrs[i]);
@@ -238,11 +264,12 @@
           bool kUseObjSizeForUsable>
 template <typename... Params>
 MemoryToolMallocSpace<S,
-                    kMemoryToolRedZoneBytes,
-                    kAdjustForRedzoneInAllocSize,
-                    kUseObjSizeForUsable>::MemoryToolMallocSpace(
-    MemMap* mem_map, size_t initial_size, Params... params) : S(mem_map, initial_size, params...) {
-  // Don't want to change the valgrind states of the mem map here as the allocator is already
+                      kMemoryToolRedZoneBytes,
+                      kAdjustForRedzoneInAllocSize,
+                      kUseObjSizeForUsable>::MemoryToolMallocSpace(
+                          MemMap* mem_map, size_t initial_size, Params... params)
+                          : S(mem_map, initial_size, params...) {
+  // Don't want to change the memory tool states of the mem map here as the allocator is already
   // initialized at this point and that may interfere with what the allocator does internally. Note
   // that the tail beyond the initial size is mprotected.
 }
@@ -252,9 +279,9 @@
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
 size_t MemoryToolMallocSpace<S,
-                           kMemoryToolRedZoneBytes,
-                           kAdjustForRedzoneInAllocSize,
-                           kUseObjSizeForUsable>::MaxBytesBulkAllocatedFor(size_t num_bytes) {
+                             kMemoryToolRedZoneBytes,
+                             kAdjustForRedzoneInAllocSize,
+                             kUseObjSizeForUsable>::MaxBytesBulkAllocatedFor(size_t num_bytes) {
   return S::MaxBytesBulkAllocatedFor(num_bytes + 2 * kMemoryToolRedZoneBytes);
 }
 
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index e786536..b0402e4 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -77,7 +77,7 @@
 
   // Everything is set so record in immutable structure and leave
   uint8_t* begin = mem_map->Begin();
-  // TODO: Fix RosAllocSpace to support Valgrind/ASan. There is currently some issues with
+  // TODO: Fix RosAllocSpace to support ASan. There is currently some issues with
   // AllocationSize caused by redzones. b/12944686
   if (running_on_memory_tool) {
     return new MemoryToolMallocSpace<RosAllocSpace, kDefaultMemoryToolRedZoneBytes, false, true>(
@@ -382,12 +382,12 @@
   size_t size = obj->SizeOf<kVerifyNone>();
   bool add_redzones = false;
   if (kMaybeIsRunningOnMemoryTool) {
-    add_redzones = RUNNING_ON_MEMORY_TOOL ? kMemoryToolAddsRedzones : 0;
+    add_redzones = kRunningOnMemoryTool && kMemoryToolAddsRedzones;
     if (add_redzones) {
       size += 2 * kDefaultMemoryToolRedZoneBytes;
     }
   } else {
-    DCHECK_EQ(RUNNING_ON_MEMORY_TOOL, 0U);
+    DCHECK(!kRunningOnMemoryTool);
   }
   size_t size_by_size = rosalloc_->UsableSize(size);
   if (kIsDebugBuild) {
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 9d16b87..4c17233 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -159,8 +159,8 @@
 
   void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
                         size_t maximum_size, bool low_memory_mode) OVERRIDE {
-    return CreateRosAlloc(base, morecore_start, initial_size, maximum_size, low_memory_mode,
-                          RUNNING_ON_MEMORY_TOOL != 0);
+    return CreateRosAlloc(
+        base, morecore_start, initial_size, maximum_size, low_memory_mode, kRunningOnMemoryTool);
   }
   static allocator::RosAlloc* CreateRosAlloc(void* base, size_t morecore_start, size_t initial_size,
                                              size_t maximum_size, bool low_memory_mode,
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index 655713e..01e7496 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -867,11 +867,6 @@
 }
 
 TEST_F(UnstartedRuntimeTest, Pow) {
-  // Valgrind seems to get this wrong, actually. Disable for valgrind.
-  if (RUNNING_ON_MEMORY_TOOL != 0 && kMemoryToolIsValgrind) {
-    return;
-  }
-
   Thread* self = Thread::Current();
   ScopedObjectAccess soa(self);
 
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index b7b779c..f31a24e 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -333,7 +333,7 @@
     }
 
     // When running sanitized, let all tasks finish to not leak. Otherwise just clear the queue.
-    if (!RUNNING_ON_MEMORY_TOOL) {
+    if (!kRunningOnMemoryTool) {
       pool->StopWorkers(self);
       pool->RemoveAllTasks(self);
     }
diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc
index 14f3f45..b3a47c3 100644
--- a/runtime/native_stack_dump.cc
+++ b/runtime/native_stack_dump.cc
@@ -289,8 +289,10 @@
                      ArtMethod* current_method,
                      void* ucontext_ptr,
                      bool skip_frames) {
-  // b/18119146
-  if (RUNNING_ON_MEMORY_TOOL != 0) {
+  // Historical note: This was disabled when running under Valgrind (b/18119146).
+  // TODO: Valgrind is no longer supported, but Address Sanitizer is:
+  // check whether this test works with ASan.
+  if (kRunningOnMemoryTool) {
     return;
   }
 
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 1e327fc..6d10a22 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -240,7 +240,7 @@
       exit_(nullptr),
       abort_(nullptr),
       stats_enabled_(false),
-      is_running_on_memory_tool_(RUNNING_ON_MEMORY_TOOL),
+      is_running_on_memory_tool_(kRunningOnMemoryTool),
       instrumentation_(),
       main_thread_group_(nullptr),
       system_thread_group_(nullptr),
@@ -1362,8 +1362,8 @@
     case InstructionSet::kMips:
     case InstructionSet::kMips64:
       implicit_null_checks_ = true;
-      // Installing stack protection does not play well with valgrind.
-      implicit_so_checks_ = !(RUNNING_ON_MEMORY_TOOL && kMemoryToolIsValgrind);
+      // Historical note: Installing stack protection was not playing well with Valgrind.
+      implicit_so_checks_ = true;
       break;
     default:
       // Keep the defaults.
@@ -1378,8 +1378,8 @@
       // These need to be in a specific order.  The null point check handler must be
       // after the suspend check and stack overflow check handlers.
       //
-      // Note: the instances attach themselves to the fault manager and are handled by it. The manager
-      //       will delete the instance on Shutdown().
+      // Note: the instances attach themselves to the fault manager and are handled by it. The
+      //       manager will delete the instance on Shutdown().
       if (implicit_suspend_checks_) {
         new SuspensionHandler(&fault_manager);
       }
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index 72d9919..54769f9 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -339,8 +339,8 @@
 };
 
 TEST_F(RuntimeSigQuitCallbackRuntimeCallbacksTest, SigQuit) {
-  // SigQuit induces a dump. ASAN isn't happy with libunwind reading memory.
-  TEST_DISABLED_FOR_MEMORY_TOOL_ASAN();
+  // SigQuit induces a dump. ASan isn't happy with libunwind reading memory.
+  TEST_DISABLED_FOR_MEMORY_TOOL();
 
   // The runtime needs to be started for the signal handler.
   Thread* self = Thread::Current();
diff --git a/runtime/thread.cc b/runtime/thread.cc
index a8133a1..210e1b0 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1116,21 +1116,10 @@
   Runtime* runtime = Runtime::Current();
   bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsAotCompiler();
 
-  // Valgrind on arm doesn't give the right values here. Do not install the guard page, and
-  // effectively disable stack overflow checks (we'll get segfaults, potentially) by setting
-  // stack_begin to 0.
-  const bool valgrind_on_arm =
-      (kRuntimeISA == InstructionSet::kArm || kRuntimeISA == InstructionSet::kArm64) &&
-      kMemoryToolIsValgrind &&
-      RUNNING_ON_MEMORY_TOOL != 0;
-  if (valgrind_on_arm) {
-    tlsPtr_.stack_begin = nullptr;
-  }
-
   ResetDefaultStackEnd();
 
   // Install the protected region if we are doing implicit overflow checks.
-  if (implicit_stack_check && !valgrind_on_arm) {
+  if (implicit_stack_check) {
     // The thread might have protected region at the bottom.  We need
     // to install our own region so we need to move the limits
     // of the stack to make room for it.