GC clean up.

Greater use of directories and namespaces.
Fix bugs that cause verify options to fail.
Address numerous other issues:

GC barrier wait occurring holding locks:
GC barrier waits occur when we wait for threads to run the check point function
on themselves. This is happening with the heap bitmap and mutator lock held
meaning that a thread that tries to take either lock exclusively will block
waiting on a thread that is waiting. If this thread is the thread we're waiting
to run the check point then the VM will deadlock.
This deadlock occurred unnoticed as the call to check for wait safety was
removed in: https://googleplex-android-review.googlesource.com/#/c/249423/1.

NewTimingLogger:
Existing timing log states when a split ends but not when it begins. This isn't
good for systrace, in the context of GC it means that races between mutators
and the GC are hard to discover what phase the GC is in, we know what phase it
just finished and derive but that's not ideal.

Support for only 1 discontinuous space:
Code special cases continuous and large object space, rather than assuming we
can have a collection of both.

Sorted atomic stacks:
Used to improve verification performance. Simplify their use and add extra
checks.

Simplify mod-union table abstractions.

Reduce use of std::strings and their associated overhead in hot code.

Make time units of fields explicit.

Reduce confusion that IsAllocSpace is really IsDlMallocSpace.

Make GetTotalMemory (exposed via System) equal to the footprint (as in Dalvik)
rather than the max memory footprint.

Change-Id: Ie87067140fa4499b15edab691fe6565d79599812
diff --git a/src/gc/space/large_object_space.h b/src/gc/space/large_object_space.h
new file mode 100644
index 0000000..197fad3
--- /dev/null
+++ b/src/gc/space/large_object_space.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_SPACE_LARGE_OBJECT_SPACE_H_
+#define ART_SRC_GC_SPACE_LARGE_OBJECT_SPACE_H_
+
+
+#include "dlmalloc_space.h"
+#include "safe_map.h"
+#include "space.h"
+
+#include <set>
+#include <vector>
+
+namespace art {
+namespace gc {
+namespace space {
+
+// Abstraction implemented by all large object spaces.
+class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
+ public:
+  virtual SpaceType GetType() const {
+    return kSpaceTypeLargeObjectSpace;
+  }
+
+  virtual void SwapBitmaps();
+  virtual void CopyLiveToMarked();
+  virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0;
+  virtual ~LargeObjectSpace() {}
+
+  uint64_t GetBytesAllocated() const {
+    return num_bytes_allocated_;
+  }
+
+  uint64_t GetObjectsAllocated() const {
+    return num_objects_allocated_;
+  }
+
+  uint64_t GetTotalBytesAllocated() const {
+    return total_bytes_allocated_;
+  }
+
+  uint64_t GetTotalObjectsAllocated() const {
+    return total_objects_allocated_;
+  }
+
+  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs);
+
+ protected:
+
+  LargeObjectSpace(const std::string& name);
+
+  // Approximate number of bytes which have been allocated into the space.
+  size_t num_bytes_allocated_;
+  size_t num_objects_allocated_;
+  size_t total_bytes_allocated_;
+  size_t total_objects_allocated_;
+
+  friend class Space;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(LargeObjectSpace);
+};
+
+// A discontinuous large object space implemented by individual mmap/munmap calls.
+class LargeObjectMapSpace : public LargeObjectSpace {
+ public:
+  // Creates a large object space. Allocations into the large object space use memory maps instead
+  // of malloc.
+  static LargeObjectMapSpace* Create(const std::string& name);
+
+  // Return the storage space required by obj.
+  size_t AllocationSize(const mirror::Object* obj);
+  mirror::Object* Alloc(Thread* self, size_t num_bytes);
+  size_t Free(Thread* self, mirror::Object* ptr);
+  void Walk(DlMallocSpace::WalkCallback, void* arg);
+  // TODO: disabling thread safety analysis as this may be called when we already hold lock_.
+  bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
+
+private:
+  LargeObjectMapSpace(const std::string& name);
+  virtual ~LargeObjectMapSpace() {}
+
+  // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
+  mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  std::vector<mirror::Object*> large_objects_ GUARDED_BY(lock_);
+  typedef SafeMap<mirror::Object*, MemMap*> MemMaps;
+  MemMaps mem_maps_ GUARDED_BY(lock_);
+};
+
+// A continuous large object space with a free-list to handle holes.
+// TODO: this implementation is buggy.
+class FreeListSpace : public LargeObjectSpace {
+ public:
+  virtual ~FreeListSpace();
+  static FreeListSpace* Create(const std::string& name, byte* requested_begin, size_t capacity);
+
+  size_t AllocationSize(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+  mirror::Object* Alloc(Thread* self, size_t num_bytes);
+  size_t Free(Thread* self, mirror::Object* obj);
+  bool Contains(const mirror::Object* obj) const;
+  void Walk(DlMallocSpace::WalkCallback callback, void* arg);
+
+  // Address at which the space begins.
+  byte* Begin() const {
+    return begin_;
+  }
+
+  // Address at which the space ends, which may vary as the space is filled.
+  byte* End() const {
+    return end_;
+  }
+
+  // Current size of space
+  size_t Size() const {
+    return End() - Begin();
+  }
+
+  void Dump(std::ostream& os) const;
+
+ private:
+  static const size_t kAlignment = kPageSize;
+
+  class Chunk {
+   public:
+    static const size_t kFreeFlag = 0x80000000;
+
+    struct SortBySize {
+      bool operator()(const Chunk* a, const Chunk* b) const {
+        return a->GetSize() < b->GetSize();
+      }
+    };
+
+    bool IsFree() const {
+      return (m_size & kFreeFlag) != 0;
+    }
+
+    void SetSize(size_t size, bool is_free = false) {
+      m_size = size | (is_free ? kFreeFlag : 0);
+    }
+
+    size_t GetSize() const {
+      return m_size & (kFreeFlag - 1);
+    }
+
+    Chunk* GetPrevious() {
+      return m_previous;
+    }
+
+    void SetPrevious(Chunk* previous) {
+      m_previous = previous;
+      DCHECK(m_previous == NULL ||
+            (m_previous != NULL && m_previous + m_previous->GetSize() / kAlignment == this));
+    }
+   private:
+    size_t m_size;
+    Chunk* m_previous;
+  };
+
+  FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end);
+  void AddFreeChunk(void* address, size_t size, Chunk* previous) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+  Chunk* ChunkFromAddr(void* address) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+  void* AddrFromChunk(Chunk* chunk) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+  void RemoveFreeChunk(Chunk* chunk) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+  Chunk* GetNextChunk(Chunk* chunk);
+
+  typedef std::multiset<Chunk*, Chunk::SortBySize> FreeChunks;
+  byte* const begin_;
+  byte* const end_;
+  UniquePtr<MemMap> mem_map_;
+  Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  std::vector<Chunk> chunks_ GUARDED_BY(lock_);
+  FreeChunks free_chunks_ GUARDED_BY(lock_);
+};
+
+}  // namespace space
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_SRC_GC_SPACE_LARGE_OBJECT_SPACE_H_