Add valgrind support to compiler arena allocator.
Change-Id: Id9974301d3810bfac968ee562b01a11098e402c2
diff --git a/compiler/dex/arena_allocator.cc b/compiler/dex/arena_allocator.cc
index 5a91d27..2da8064 100644
--- a/compiler/dex/arena_allocator.cc
+++ b/compiler/dex/arena_allocator.cc
@@ -20,12 +20,14 @@
#include "base/logging.h"
#include "base/mutex.h"
#include "thread-inl.h"
+#include <memcheck/memcheck.h>
namespace art {
// Memmap is a bit slower than malloc according to my measurements.
static constexpr bool kUseMemMap = false;
static constexpr bool kUseMemSet = true && kUseMemMap;
+static constexpr size_t kValgrindRedZoneBytes = 8;
static const char* alloc_names[ArenaAllocator::kNumAllocKinds] = {
"Misc ",
@@ -108,6 +110,9 @@
void ArenaPool::FreeArena(Arena* arena) {
Thread* self = Thread::Current();
+ if (UNLIKELY(RUNNING_ON_VALGRIND)) {
+ VALGRIND_MAKE_MEM_UNDEFINED(arena->memory_, arena->bytes_allocated_);
+ }
{
MutexLock lock(self, lock_);
arena->next_ = free_arenas_;
@@ -129,7 +134,8 @@
end_(nullptr),
ptr_(nullptr),
arena_head_(nullptr),
- num_allocations_(0) {
+ num_allocations_(0),
+ running_on_valgrind_(RUNNING_ON_VALGRIND) {
memset(&alloc_stats_[0], 0, sizeof(alloc_stats_));
}
@@ -141,6 +147,29 @@
}
}
+void* ArenaAllocator::AllocValgrind(size_t bytes, ArenaAllocKind kind) {
+ size_t rounded_bytes = (bytes + 3 + kValgrindRedZoneBytes) & ~3;
+ if (UNLIKELY(ptr_ + rounded_bytes > end_)) {
+ // Obtain a new block.
+ ObtainNewArenaForAllocation(rounded_bytes);
+ if (UNLIKELY(ptr_ == nullptr)) {
+ return nullptr;
+ }
+ }
+ if (kCountAllocations) {
+ alloc_stats_[kind] += rounded_bytes;
+ ++num_allocations_;
+ }
+ uint8_t* ret = ptr_;
+ ptr_ += rounded_bytes;
+ // Check that the memory is already zeroed out.
+ for (uint8_t* ptr = ret; ptr < ptr_; ++ptr) {
+ CHECK_EQ(*ptr, 0U);
+ }
+ VALGRIND_MAKE_MEM_NOACCESS(ret + bytes, rounded_bytes - bytes);
+ return ret;
+}
+
ArenaAllocator::~ArenaAllocator() {
// Reclaim all the arenas by giving them back to the thread pool.
UpdateBytesAllocated();