Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2014 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #ifndef ART_RUNTIME_MONITOR_POOL_H_ |
| 18 | #define ART_RUNTIME_MONITOR_POOL_H_ |
| 19 | |
Ian Rogers | b0fa5dc | 2014-04-28 16:47:08 -0700 | [diff] [blame] | 20 | #include "monitor.h" |
| 21 | |
Mathieu Chartier | bad0267 | 2014-08-25 13:08:22 -0700 | [diff] [blame] | 22 | #include "base/allocator.h" |
Ian Rogers | 44d6ff1 | 2014-03-06 23:11:11 -0800 | [diff] [blame] | 23 | #ifdef __LP64__ |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 24 | #include <stdint.h> |
David Sehr | c431b9d | 2018-03-02 12:01:51 -0800 | [diff] [blame] | 25 | #include "base/atomic.h" |
Ian Rogers | 44d6ff1 | 2014-03-06 23:11:11 -0800 | [diff] [blame] | 26 | #include "runtime.h" |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 27 | #else |
| 28 | #include "base/stl_util.h" // STLDeleteElements |
Ian Rogers | 44d6ff1 | 2014-03-06 23:11:11 -0800 | [diff] [blame] | 29 | #endif |
| 30 | |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 31 | namespace art { |
| 32 | |
| 33 | // Abstraction to keep monitors small enough to fit in a lock word (32bits). On 32bit systems the |
| 34 | // monitor id loses the alignment bits of the Monitor*. |
| 35 | class MonitorPool { |
| 36 | public: |
| 37 | static MonitorPool* Create() { |
| 38 | #ifndef __LP64__ |
| 39 | return nullptr; |
| 40 | #else |
| 41 | return new MonitorPool(); |
| 42 | #endif |
| 43 | } |
| 44 | |
Vladimir Marko | f52d92f | 2019-03-29 12:33:02 +0000 | [diff] [blame] | 45 | static Monitor* CreateMonitor(Thread* self, |
| 46 | Thread* owner, |
| 47 | ObjPtr<mirror::Object> obj, |
| 48 | int32_t hash_code) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 49 | REQUIRES_SHARED(Locks::mutator_lock_) { |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 50 | #ifndef __LP64__ |
Hiroshi Yamauchi | e15ea08 | 2015-02-09 17:11:42 -0800 | [diff] [blame] | 51 | Monitor* mon = new Monitor(self, owner, obj, hash_code); |
| 52 | DCHECK_ALIGNED(mon, LockWord::kMonitorIdAlignment); |
| 53 | return mon; |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 54 | #else |
| 55 | return GetMonitorPool()->CreateMonitorInPool(self, owner, obj, hash_code); |
| 56 | #endif |
| 57 | } |
| 58 | |
| 59 | static void ReleaseMonitor(Thread* self, Monitor* monitor) { |
| 60 | #ifndef __LP64__ |
Ian Rogers | 6a3c1fc | 2014-10-31 00:33:20 -0700 | [diff] [blame] | 61 | UNUSED(self); |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 62 | delete monitor; |
| 63 | #else |
| 64 | GetMonitorPool()->ReleaseMonitorToPool(self, monitor); |
| 65 | #endif |
| 66 | } |
| 67 | |
Mathieu Chartier | bad0267 | 2014-08-25 13:08:22 -0700 | [diff] [blame] | 68 | static void ReleaseMonitors(Thread* self, MonitorList::Monitors* monitors) { |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 69 | #ifndef __LP64__ |
Ian Rogers | 6a3c1fc | 2014-10-31 00:33:20 -0700 | [diff] [blame] | 70 | UNUSED(self); |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 71 | STLDeleteElements(monitors); |
| 72 | #else |
| 73 | GetMonitorPool()->ReleaseMonitorsToPool(self, monitors); |
| 74 | #endif |
| 75 | } |
| 76 | |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 77 | static Monitor* MonitorFromMonitorId(MonitorId mon_id) { |
| 78 | #ifndef __LP64__ |
Hiroshi Yamauchi | e15ea08 | 2015-02-09 17:11:42 -0800 | [diff] [blame] | 79 | return reinterpret_cast<Monitor*>(mon_id << LockWord::kMonitorIdAlignmentShift); |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 80 | #else |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 81 | return GetMonitorPool()->LookupMonitor(mon_id); |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 82 | #endif |
| 83 | } |
| 84 | |
| 85 | static MonitorId MonitorIdFromMonitor(Monitor* mon) { |
| 86 | #ifndef __LP64__ |
Hiroshi Yamauchi | e15ea08 | 2015-02-09 17:11:42 -0800 | [diff] [blame] | 87 | return reinterpret_cast<MonitorId>(mon) >> LockWord::kMonitorIdAlignmentShift; |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 88 | #else |
| 89 | return mon->GetMonitorId(); |
| 90 | #endif |
| 91 | } |
| 92 | |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 93 | static MonitorId ComputeMonitorId(Monitor* mon, Thread* self) { |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 94 | #ifndef __LP64__ |
Ian Rogers | 6a3c1fc | 2014-10-31 00:33:20 -0700 | [diff] [blame] | 95 | UNUSED(self); |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 96 | return MonitorIdFromMonitor(mon); |
| 97 | #else |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 98 | return GetMonitorPool()->ComputeMonitorIdInPool(mon, self); |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 99 | #endif |
| 100 | } |
| 101 | |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 102 | static MonitorPool* GetMonitorPool() { |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 103 | #ifndef __LP64__ |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 104 | return nullptr; |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 105 | #else |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 106 | return Runtime::Current()->GetMonitorPool(); |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 107 | #endif |
| 108 | } |
| 109 | |
Andreas Gampe | 057134b | 2016-03-10 08:33:45 -0800 | [diff] [blame] | 110 | ~MonitorPool() { |
| 111 | #ifdef __LP64__ |
| 112 | FreeInternal(); |
| 113 | #endif |
| 114 | } |
| 115 | |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 116 | private: |
| 117 | #ifdef __LP64__ |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 118 | // When we create a monitor pool, threads have not been initialized, yet, so ignore thread-safety |
| 119 | // analysis. |
| 120 | MonitorPool() NO_THREAD_SAFETY_ANALYSIS; |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 121 | |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 122 | void AllocateChunk() REQUIRES(Locks::allocated_monitor_ids_lock_); |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 123 | |
Andreas Gampe | 057134b | 2016-03-10 08:33:45 -0800 | [diff] [blame] | 124 | // Release all chunks and metadata. This is done on shutdown, where threads have been destroyed, |
| 125 | // so ignore thead-safety analysis. |
| 126 | void FreeInternal() NO_THREAD_SAFETY_ANALYSIS; |
| 127 | |
Vladimir Marko | f52d92f | 2019-03-29 12:33:02 +0000 | [diff] [blame] | 128 | Monitor* CreateMonitorInPool(Thread* self, |
| 129 | Thread* owner, |
| 130 | ObjPtr<mirror::Object> obj, |
| 131 | int32_t hash_code) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 132 | REQUIRES_SHARED(Locks::mutator_lock_); |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 133 | |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 134 | void ReleaseMonitorToPool(Thread* self, Monitor* monitor); |
Mathieu Chartier | bad0267 | 2014-08-25 13:08:22 -0700 | [diff] [blame] | 135 | void ReleaseMonitorsToPool(Thread* self, MonitorList::Monitors* monitors); |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 136 | |
Hans Boehm | a319f4d | 2016-04-27 15:04:24 -0700 | [diff] [blame] | 137 | // Note: This is safe as we do not ever move chunks. All needed entries in the monitor_chunks_ |
| 138 | // data structure are read-only once we get here. Updates happen-before this call because |
| 139 | // the lock word was stored with release semantics and we read it with acquire semantics to |
| 140 | // retrieve the id. |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 141 | Monitor* LookupMonitor(MonitorId mon_id) { |
| 142 | size_t offset = MonitorIdToOffset(mon_id); |
| 143 | size_t index = offset / kChunkSize; |
Hans Boehm | a319f4d | 2016-04-27 15:04:24 -0700 | [diff] [blame] | 144 | size_t top_index = index / kMaxListSize; |
| 145 | size_t list_index = index % kMaxListSize; |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 146 | size_t offset_in_chunk = offset % kChunkSize; |
Hans Boehm | a319f4d | 2016-04-27 15:04:24 -0700 | [diff] [blame] | 147 | uintptr_t base = monitor_chunks_[top_index][list_index]; |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 148 | return reinterpret_cast<Monitor*>(base + offset_in_chunk); |
| 149 | } |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 150 | |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 151 | static bool IsInChunk(uintptr_t base_addr, Monitor* mon) { |
| 152 | uintptr_t mon_ptr = reinterpret_cast<uintptr_t>(mon); |
| 153 | return base_addr <= mon_ptr && (mon_ptr - base_addr < kChunkSize); |
| 154 | } |
| 155 | |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 156 | MonitorId ComputeMonitorIdInPool(Monitor* mon, Thread* self) { |
| 157 | MutexLock mu(self, *Locks::allocated_monitor_ids_lock_); |
Hans Boehm | a319f4d | 2016-04-27 15:04:24 -0700 | [diff] [blame] | 158 | for (size_t i = 0; i <= current_chunk_list_index_; ++i) { |
| 159 | for (size_t j = 0; j < ChunkListCapacity(i); ++j) { |
| 160 | if (j >= num_chunks_ && i == current_chunk_list_index_) { |
| 161 | break; |
| 162 | } |
| 163 | uintptr_t chunk_addr = monitor_chunks_[i][j]; |
| 164 | if (IsInChunk(chunk_addr, mon)) { |
| 165 | return OffsetToMonitorId( |
| 166 | reinterpret_cast<uintptr_t>(mon) - chunk_addr |
| 167 | + i * (kMaxListSize * kChunkSize) + j * kChunkSize); |
| 168 | } |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 169 | } |
| 170 | } |
| 171 | LOG(FATAL) << "Did not find chunk that contains monitor."; |
| 172 | return 0; |
| 173 | } |
| 174 | |
Hans Boehm | a319f4d | 2016-04-27 15:04:24 -0700 | [diff] [blame] | 175 | static constexpr size_t MonitorIdToOffset(MonitorId id) { |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 176 | return id << 3; |
| 177 | } |
| 178 | |
Hans Boehm | a319f4d | 2016-04-27 15:04:24 -0700 | [diff] [blame] | 179 | static constexpr MonitorId OffsetToMonitorId(size_t offset) { |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 180 | return static_cast<MonitorId>(offset >> 3); |
| 181 | } |
| 182 | |
Hans Boehm | a319f4d | 2016-04-27 15:04:24 -0700 | [diff] [blame] | 183 | static constexpr size_t ChunkListCapacity(size_t index) { |
| 184 | return kInitialChunkStorage << index; |
| 185 | } |
| 186 | |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 187 | // TODO: There are assumptions in the code that monitor addresses are 8B aligned (>>3). |
| 188 | static constexpr size_t kMonitorAlignment = 8; |
| 189 | // Size of a monitor, rounded up to a multiple of alignment. |
| 190 | static constexpr size_t kAlignedMonitorSize = (sizeof(Monitor) + kMonitorAlignment - 1) & |
| 191 | -kMonitorAlignment; |
| 192 | // As close to a page as we can get seems a good start. |
| 193 | static constexpr size_t kChunkCapacity = kPageSize / kAlignedMonitorSize; |
| 194 | // Chunk size that is referenced in the id. We can collapse this to the actually used storage |
| 195 | // in a chunk, i.e., kChunkCapacity * kAlignedMonitorSize, but this will mean proper divisions. |
| 196 | static constexpr size_t kChunkSize = kPageSize; |
Hans Boehm | a319f4d | 2016-04-27 15:04:24 -0700 | [diff] [blame] | 197 | static_assert(IsPowerOfTwo(kChunkSize), "kChunkSize must be power of 2"); |
| 198 | // The number of chunks of storage that can be referenced by the initial chunk list. |
| 199 | // The total number of usable monitor chunks is typically 255 times this number, so it |
| 200 | // should be large enough that we don't run out. We run out of address bits if it's > 512. |
| 201 | // Currently we set it a bit smaller, to save half a page per process. We make it tiny in |
| 202 | // debug builds to catch growth errors. The only value we really expect to tune. |
| 203 | static constexpr size_t kInitialChunkStorage = kIsDebugBuild ? 1U : 256U; |
| 204 | static_assert(IsPowerOfTwo(kInitialChunkStorage), "kInitialChunkStorage must be power of 2"); |
| 205 | // The number of lists, each containing pointers to storage chunks. |
| 206 | static constexpr size_t kMaxChunkLists = 8; // Dictated by 3 bit index. Don't increase above 8. |
| 207 | static_assert(IsPowerOfTwo(kMaxChunkLists), "kMaxChunkLists must be power of 2"); |
| 208 | static constexpr size_t kMaxListSize = kInitialChunkStorage << (kMaxChunkLists - 1); |
| 209 | // We lose 3 bits in monitor id due to 3 bit monitor_chunks_ index, and gain it back from |
| 210 | // the 3 bit alignment constraint on monitors: |
| 211 | static_assert(kMaxListSize * kChunkSize < (1 << LockWord::kMonitorIdSize), |
| 212 | "Monitor id bits don't fit"); |
| 213 | static_assert(IsPowerOfTwo(kMaxListSize), "kMaxListSize must be power of 2"); |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 214 | |
Hans Boehm | a319f4d | 2016-04-27 15:04:24 -0700 | [diff] [blame] | 215 | // Array of pointers to lists (again arrays) of pointers to chunks containing monitors. |
| 216 | // Zeroth entry points to a list (array) of kInitialChunkStorage pointers to chunks. |
| 217 | // Each subsequent list as twice as large as the preceding one. |
| 218 | // Monitor Ids are interpreted as follows: |
| 219 | // Top 3 bits (of 28): index into monitor_chunks_. |
| 220 | // Next 16 bits: index into the chunk list, i.e. monitor_chunks_[i]. |
| 221 | // Last 9 bits: offset within chunk, expressed as multiple of kMonitorAlignment. |
| 222 | // If we set kInitialChunkStorage to 512, this would allow us to use roughly 128K chunks of |
| 223 | // monitors, which is 0.5GB of monitors. With this maximum setting, the largest chunk list |
| 224 | // contains 64K entries, and we make full use of the available index space. With a |
| 225 | // kInitialChunkStorage value of 256, this is proportionately reduced to 0.25GB of monitors. |
| 226 | // Updates to monitor_chunks_ are guarded by allocated_monitor_ids_lock_ . |
| 227 | // No field in this entire data structure is ever updated once a monitor id whose lookup |
| 228 | // requires it has been made visible to another thread. Thus readers never race with |
| 229 | // updates, in spite of the fact that they acquire no locks. |
| 230 | uintptr_t* monitor_chunks_[kMaxChunkLists]; // uintptr_t is really a Monitor* . |
| 231 | // Highest currently used index in monitor_chunks_ . Used for newly allocated chunks. |
| 232 | size_t current_chunk_list_index_ GUARDED_BY(Locks::allocated_monitor_ids_lock_); |
| 233 | // Number of chunk pointers stored in monitor_chunks_[current_chunk_list_index_] so far. |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 234 | size_t num_chunks_ GUARDED_BY(Locks::allocated_monitor_ids_lock_); |
Hans Boehm | a319f4d | 2016-04-27 15:04:24 -0700 | [diff] [blame] | 235 | // After the initial allocation, this is always equal to |
| 236 | // ChunkListCapacity(current_chunk_list_index_). |
| 237 | size_t current_chunk_list_capacity_ GUARDED_BY(Locks::allocated_monitor_ids_lock_); |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 238 | |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 239 | typedef TrackingAllocator<uint8_t, kAllocatorTagMonitorPool> Allocator; |
Mathieu Chartier | bad0267 | 2014-08-25 13:08:22 -0700 | [diff] [blame] | 240 | Allocator allocator_; |
| 241 | |
Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 242 | // Start of free list of monitors. |
| 243 | // Note: these point to the right memory regions, but do *not* denote initialized objects. |
| 244 | Monitor* first_free_ GUARDED_BY(Locks::allocated_monitor_ids_lock_); |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 245 | #endif |
| 246 | }; |
| 247 | |
| 248 | } // namespace art |
| 249 | |
| 250 | #endif // ART_RUNTIME_MONITOR_POOL_H_ |