Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2008 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
David Sehr | 79e2607 | 2018-04-06 17:58:50 -0700 | [diff] [blame] | 17 | #ifndef ART_LIBARTBASE_BASE_MEM_MAP_H_ |
| 18 | #define ART_LIBARTBASE_BASE_MEM_MAP_H_ |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 19 | |
Brian Carlstrom | 27ec961 | 2011-09-19 20:20:38 -0700 | [diff] [blame] | 20 | #include <stddef.h> |
| 21 | #include <sys/types.h> |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 22 | |
Andreas Gampe | 0dfc315 | 2017-04-24 07:58:06 -0700 | [diff] [blame] | 23 | #include <map> |
Igor Murashkin | 5573c37 | 2017-11-16 13:34:30 -0800 | [diff] [blame] | 24 | #include <mutex> |
Andreas Gampe | 0dfc315 | 2017-04-24 07:58:06 -0700 | [diff] [blame] | 25 | #include <string> |
| 26 | |
| 27 | #include "android-base/thread_annotations.h" |
David Sehr | 1979c64 | 2018-04-26 14:41:18 -0700 | [diff] [blame] | 28 | #include "macros.h" |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 29 | |
| 30 | namespace art { |
| 31 | |
Vladimir Marko | be0d3cf | 2020-02-12 10:52:22 +0000 | [diff] [blame] | 32 | #if defined(__LP64__) && !defined(__Fuchsia__) && (defined(__aarch64__) || defined(__APPLE__)) |
Ian Rogers | c3ccc10 | 2014-06-25 11:52:14 -0700 | [diff] [blame] | 33 | #define USE_ART_LOW_4G_ALLOCATOR 1 |
| 34 | #else |
Steve Austin | 882ed6b | 2018-06-08 11:40:38 -0700 | [diff] [blame] | 35 | #if defined(__LP64__) && !defined(__Fuchsia__) && !defined(__x86_64__) |
Andreas Gampe | 651ba59 | 2017-06-14 14:41:33 -0700 | [diff] [blame] | 36 | #error "Unrecognized 64-bit architecture." |
| 37 | #endif |
Ian Rogers | c3ccc10 | 2014-06-25 11:52:14 -0700 | [diff] [blame] | 38 | #define USE_ART_LOW_4G_ALLOCATOR 0 |
| 39 | #endif |
| 40 | |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 41 | #ifdef __linux__ |
| 42 | static constexpr bool kMadviseZeroes = true; |
Alex Light | ca97ada | 2018-02-02 09:25:31 -0800 | [diff] [blame] | 43 | #define HAVE_MREMAP_SYSCALL true |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 44 | #else |
| 45 | static constexpr bool kMadviseZeroes = false; |
Alex Light | ca97ada | 2018-02-02 09:25:31 -0800 | [diff] [blame] | 46 | // We cannot ever perform MemMap::ReplaceWith on non-linux hosts since the syscall is not |
| 47 | // present. |
| 48 | #define HAVE_MREMAP_SYSCALL false |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 49 | #endif |
| 50 | |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 51 | // Used to keep track of mmap segments. |
Andreas Gampe | d8f26db | 2014-05-19 17:01:13 -0700 | [diff] [blame] | 52 | // |
| 53 | // On 64b systems not supporting MAP_32BIT, the implementation of MemMap will do a linear scan |
| 54 | // for free pages. For security, the start of this scan should be randomized. This requires a |
| 55 | // dynamic initializer. |
| 56 | // For this to work, it is paramount that there are no other static initializers that access MemMap. |
| 57 | // Otherwise, calls might see uninitialized values. |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 58 | class MemMap { |
| 59 | public: |
Alex Light | ca97ada | 2018-02-02 09:25:31 -0800 | [diff] [blame] | 60 | static constexpr bool kCanReplaceMapping = HAVE_MREMAP_SYSCALL; |
| 61 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 62 | // Creates an invalid mapping. |
| 63 | MemMap() {} |
| 64 | |
| 65 | // Creates an invalid mapping. Used when we want to be more explicit than MemMap(). |
| 66 | static MemMap Invalid() { |
| 67 | return MemMap(); |
| 68 | } |
| 69 | |
Andreas Gampe | 44b3174 | 2018-10-01 19:30:57 -0700 | [diff] [blame] | 70 | MemMap(MemMap&& other) noexcept REQUIRES(!MemMap::mem_maps_lock_); |
| 71 | MemMap& operator=(MemMap&& other) noexcept REQUIRES(!MemMap::mem_maps_lock_) { |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 72 | Reset(); |
| 73 | swap(other); |
| 74 | return *this; |
| 75 | } |
| 76 | |
| 77 | // Releases the memory mapping. |
| 78 | ~MemMap() REQUIRES(!MemMap::mem_maps_lock_); |
| 79 | |
| 80 | // Swap two MemMaps. |
| 81 | void swap(MemMap& other); |
| 82 | |
| 83 | void Reset() { |
| 84 | if (IsValid()) { |
| 85 | DoReset(); |
| 86 | } |
| 87 | } |
| 88 | |
| 89 | bool IsValid() const { |
| 90 | return base_size_ != 0u; |
| 91 | } |
| 92 | |
Alex Light | ca97ada | 2018-02-02 09:25:31 -0800 | [diff] [blame] | 93 | // Replace the data in this memmmap with the data in the memmap pointed to by source. The caller |
| 94 | // relinquishes ownership of the source mmap. |
| 95 | // |
| 96 | // For the call to be successful: |
| 97 | // * The range [dest->Begin, dest->Begin() + source->Size()] must not overlap with |
| 98 | // [source->Begin(), source->End()]. |
| 99 | // * Neither source nor dest may be 'reused' mappings (they must own all the pages associated |
| 100 | // with them. |
| 101 | // * kCanReplaceMapping must be true. |
| 102 | // * Neither source nor dest may use manual redzones. |
| 103 | // * Both source and dest must have the same offset from the nearest page boundary. |
| 104 | // * mremap must succeed when called on the mappings. |
| 105 | // |
| 106 | // If this call succeeds it will return true and: |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 107 | // * Invalidate *source |
Alex Light | ca97ada | 2018-02-02 09:25:31 -0800 | [diff] [blame] | 108 | // * The protection of this will remain the same. |
| 109 | // * The size of this will be the size of the source |
| 110 | // * The data in this will be the data from source. |
| 111 | // |
| 112 | // If this call fails it will return false and make no changes to *source or this. The ownership |
| 113 | // of the source mmap is returned to the caller. |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 114 | bool ReplaceWith(/*in-out*/MemMap* source, /*out*/std::string* error); |
Alex Light | ca97ada | 2018-02-02 09:25:31 -0800 | [diff] [blame] | 115 | |
Joel Fernandes (Google) | 92597a8 | 2018-08-17 16:19:19 -0700 | [diff] [blame] | 116 | // Set a debug friendly name for a map. It will be prefixed with "dalvik-". |
| 117 | static void SetDebugName(void* map_ptr, const char* name, size_t size); |
| 118 | |
Elliott Hughes | ecd3a6f | 2012-06-06 18:16:37 -0700 | [diff] [blame] | 119 | // Request an anonymous region of length 'byte_count' and a requested base address. |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 120 | // Use null as the requested base address if you don't care. |
Vladimir Marko | c09cd05 | 2018-08-23 16:36:36 +0100 | [diff] [blame] | 121 | // |
| 122 | // `reuse` allows re-mapping an address range from an existing mapping which retains the |
| 123 | // ownership of the memory. Alternatively, `reservation` allows re-mapping the start of an |
| 124 | // existing reservation mapping, transferring the ownership of the memory to the new MemMap. |
Elliott Hughes | 6c9c06d | 2011-11-07 16:43:47 -0800 | [diff] [blame] | 125 | // |
| 126 | // The word "anonymous" in this context means "not backed by a file". The supplied |
Nicolas Geoffray | a25dce9 | 2016-01-12 16:41:10 +0000 | [diff] [blame] | 127 | // 'name' will be used -- on systems that support it -- to give the mapping |
Elliott Hughes | 6c9c06d | 2011-11-07 16:43:47 -0800 | [diff] [blame] | 128 | // a name. |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 129 | // |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 130 | // On success, returns returns a valid MemMap. On failure, returns an invalid MemMap. |
| 131 | static MemMap MapAnonymous(const char* name, |
| 132 | uint8_t* addr, |
| 133 | size_t byte_count, |
| 134 | int prot, |
| 135 | bool low_4gb, |
| 136 | bool reuse, |
Vladimir Marko | c09cd05 | 2018-08-23 16:36:36 +0100 | [diff] [blame] | 137 | /*inout*/MemMap* reservation, |
| 138 | /*out*/std::string* error_msg, |
Joel Fernandes (Google) | 92597a8 | 2018-08-17 16:19:19 -0700 | [diff] [blame] | 139 | bool use_debug_name = true); |
Vladimir Marko | f6985bd | 2018-08-24 09:02:28 +0100 | [diff] [blame] | 140 | static MemMap MapAnonymous(const char* name, |
Vladimir Marko | f6985bd | 2018-08-24 09:02:28 +0100 | [diff] [blame] | 141 | size_t byte_count, |
| 142 | int prot, |
| 143 | bool low_4gb, |
Vladimir Marko | c09cd05 | 2018-08-23 16:36:36 +0100 | [diff] [blame] | 144 | /*out*/std::string* error_msg) { |
| 145 | return MapAnonymous(name, |
Vladimir Marko | 1130659 | 2018-10-26 14:22:59 +0100 | [diff] [blame] | 146 | /*addr=*/ nullptr, |
Vladimir Marko | c09cd05 | 2018-08-23 16:36:36 +0100 | [diff] [blame] | 147 | byte_count, |
| 148 | prot, |
| 149 | low_4gb, |
Vladimir Marko | 1130659 | 2018-10-26 14:22:59 +0100 | [diff] [blame] | 150 | /*reuse=*/ false, |
| 151 | /*reservation=*/ nullptr, |
| 152 | error_msg); |
| 153 | } |
| 154 | static MemMap MapAnonymous(const char* name, |
| 155 | size_t byte_count, |
| 156 | int prot, |
| 157 | bool low_4gb, |
| 158 | MemMap* reservation, |
| 159 | /*out*/std::string* error_msg) { |
| 160 | return MapAnonymous(name, |
| 161 | /*addr=*/ (reservation != nullptr) ? reservation->Begin() : nullptr, |
| 162 | byte_count, |
| 163 | prot, |
| 164 | low_4gb, |
| 165 | /*reuse=*/ false, |
| 166 | reservation, |
Vladimir Marko | c09cd05 | 2018-08-23 16:36:36 +0100 | [diff] [blame] | 167 | error_msg); |
Vladimir Marko | f6985bd | 2018-08-24 09:02:28 +0100 | [diff] [blame] | 168 | } |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 169 | |
David Srbecky | 1baabf0 | 2015-06-16 17:12:34 +0000 | [diff] [blame] | 170 | // Create placeholder for a region allocated by direct call to mmap. |
| 171 | // This is useful when we do not have control over the code calling mmap, |
| 172 | // but when we still want to keep track of it in the list. |
| 173 | // The region is not considered to be owned and will not be unmmaped. |
Vladimir Marko | 07f7890 | 2020-07-27 11:35:12 +0000 | [diff] [blame] | 174 | static MemMap MapPlaceholder(const char* name, uint8_t* addr, size_t byte_count); |
David Srbecky | 1baabf0 | 2015-06-16 17:12:34 +0000 | [diff] [blame] | 175 | |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 176 | // Map part of a file, taking care of non-page aligned offsets. The |
| 177 | // "start" offset is absolute, not relative. |
| 178 | // |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 179 | // On success, returns returns a valid MemMap. On failure, returns an invalid MemMap. |
| 180 | static MemMap MapFile(size_t byte_count, |
| 181 | int prot, |
| 182 | int flags, |
| 183 | int fd, |
| 184 | off_t start, |
| 185 | bool low_4gb, |
| 186 | const char* filename, |
| 187 | std::string* error_msg) { |
Mathieu Chartier | 42bddce | 2015-11-09 15:16:56 -0800 | [diff] [blame] | 188 | return MapFileAtAddress(nullptr, |
| 189 | byte_count, |
| 190 | prot, |
| 191 | flags, |
| 192 | fd, |
| 193 | start, |
Vladimir Marko | 1130659 | 2018-10-26 14:22:59 +0100 | [diff] [blame] | 194 | /*low_4gb=*/ low_4gb, |
Mathieu Chartier | 42bddce | 2015-11-09 15:16:56 -0800 | [diff] [blame] | 195 | filename, |
Vladimir Marko | 1130659 | 2018-10-26 14:22:59 +0100 | [diff] [blame] | 196 | /*reuse=*/ false, |
| 197 | /*reservation=*/ nullptr, |
Mathieu Chartier | 42bddce | 2015-11-09 15:16:56 -0800 | [diff] [blame] | 198 | error_msg); |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 199 | } |
| 200 | |
Mathieu Chartier | ebe2dfc | 2015-11-24 13:47:52 -0800 | [diff] [blame] | 201 | // Map part of a file, taking care of non-page aligned offsets. The "start" offset is absolute, |
| 202 | // not relative. This version allows requesting a specific address for the base of the mapping. |
Vladimir Marko | c09cd05 | 2018-08-23 16:36:36 +0100 | [diff] [blame] | 203 | // |
| 204 | // `reuse` allows re-mapping an address range from an existing mapping which retains the |
| 205 | // ownership of the memory. Alternatively, `reservation` allows re-mapping the start of an |
| 206 | // existing reservation mapping, transferring the ownership of the memory to the new MemMap. |
| 207 | // |
| 208 | // If error_msg is null then we do not print /proc/maps to the log if MapFileAtAddress fails. |
| 209 | // This helps improve performance of the fail case since reading and printing /proc/maps takes |
| 210 | // several milliseconds in the worst case. |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 211 | // |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 212 | // On success, returns returns a valid MemMap. On failure, returns an invalid MemMap. |
| 213 | static MemMap MapFileAtAddress(uint8_t* addr, |
| 214 | size_t byte_count, |
| 215 | int prot, |
| 216 | int flags, |
| 217 | int fd, |
| 218 | off_t start, |
| 219 | bool low_4gb, |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 220 | const char* filename, |
Vladimir Marko | c09cd05 | 2018-08-23 16:36:36 +0100 | [diff] [blame] | 221 | bool reuse, |
| 222 | /*inout*/MemMap* reservation, |
| 223 | /*out*/std::string* error_msg); |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 224 | |
Brian Carlstrom | 0d6adac | 2014-02-05 17:39:16 -0800 | [diff] [blame] | 225 | const std::string& GetName() const { |
| 226 | return name_; |
| 227 | } |
| 228 | |
Vladimir Marko | 9bdf108 | 2016-01-21 12:15:52 +0000 | [diff] [blame] | 229 | bool Sync(); |
| 230 | |
Logan Chien | d88fa26 | 2012-06-06 15:23:32 +0800 | [diff] [blame] | 231 | bool Protect(int prot); |
| 232 | |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 233 | void MadviseDontNeedAndZero(); |
Nicolas Geoffray | 88f3fd9 | 2019-06-27 16:32:13 +0100 | [diff] [blame] | 234 | int MadviseDontFork(); |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 235 | |
Ian Rogers | 1c849e5 | 2012-06-28 14:00:33 -0700 | [diff] [blame] | 236 | int GetProtect() const { |
| 237 | return prot_; |
| 238 | } |
| 239 | |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 240 | uint8_t* Begin() const { |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 241 | return begin_; |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 242 | } |
| 243 | |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 244 | size_t Size() const { |
| 245 | return size_; |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 246 | } |
| 247 | |
Mathieu Chartier | 379d09f | 2015-01-08 11:28:13 -0800 | [diff] [blame] | 248 | // Resize the mem-map by unmapping pages at the end. Currently only supports shrinking. |
| 249 | void SetSize(size_t new_size); |
| 250 | |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 251 | uint8_t* End() const { |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 252 | return Begin() + Size(); |
| 253 | } |
| 254 | |
Brian Carlstrom | 0d6adac | 2014-02-05 17:39:16 -0800 | [diff] [blame] | 255 | void* BaseBegin() const { |
| 256 | return base_begin_; |
| 257 | } |
| 258 | |
| 259 | size_t BaseSize() const { |
| 260 | return base_size_; |
| 261 | } |
| 262 | |
| 263 | void* BaseEnd() const { |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 264 | return reinterpret_cast<uint8_t*>(BaseBegin()) + BaseSize(); |
Brian Carlstrom | 0d6adac | 2014-02-05 17:39:16 -0800 | [diff] [blame] | 265 | } |
| 266 | |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 267 | bool HasAddress(const void* addr) const { |
| 268 | return Begin() <= addr && addr < End(); |
Brian Carlstrom | b765be0 | 2011-08-17 23:54:10 -0700 | [diff] [blame] | 269 | } |
| 270 | |
Hiroshi Yamauchi | fd7e7f1 | 2013-10-22 14:17:48 -0700 | [diff] [blame] | 271 | // Unmap the pages at end and remap them to create another memory map. |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 272 | MemMap RemapAtEnd(uint8_t* new_end, |
| 273 | const char* tail_name, |
| 274 | int tail_prot, |
| 275 | std::string* error_msg, |
Joel Fernandes (Google) | 92597a8 | 2018-08-17 16:19:19 -0700 | [diff] [blame] | 276 | bool use_debug_name = true); |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 277 | |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 278 | // Unmap the pages of a file at end and remap them to create another memory map. |
| 279 | MemMap RemapAtEnd(uint8_t* new_end, |
| 280 | const char* tail_name, |
| 281 | int tail_prot, |
| 282 | int tail_flags, |
| 283 | int fd, |
| 284 | off_t offset, |
| 285 | std::string* error_msg, |
| 286 | bool use_debug_name = true); |
| 287 | |
Vladimir Marko | c09cd05 | 2018-08-23 16:36:36 +0100 | [diff] [blame] | 288 | // Take ownership of pages at the beginning of the mapping. The mapping must be an |
| 289 | // anonymous reservation mapping, owning entire pages. The `byte_count` must not |
| 290 | // exceed the size of this reservation. |
| 291 | // |
| 292 | // Returns a mapping owning `byte_count` bytes rounded up to entire pages |
| 293 | // with size set to the passed `byte_count`. |
| 294 | MemMap TakeReservedMemory(size_t byte_count); |
| 295 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 296 | static bool CheckNoGaps(MemMap& begin_map, MemMap& end_map) |
David Sehr | 1b14fb8 | 2017-02-01 10:42:11 -0800 | [diff] [blame] | 297 | REQUIRES(!MemMap::mem_maps_lock_); |
Vladimir Marko | 17a924a | 2015-05-08 15:17:32 +0100 | [diff] [blame] | 298 | static void DumpMaps(std::ostream& os, bool terse = false) |
David Sehr | 1b14fb8 | 2017-02-01 10:42:11 -0800 | [diff] [blame] | 299 | REQUIRES(!MemMap::mem_maps_lock_); |
Hiroshi Yamauchi | 3eed93d | 2014-06-04 11:43:59 -0700 | [diff] [blame] | 300 | |
David Sehr | 1b14fb8 | 2017-02-01 10:42:11 -0800 | [diff] [blame] | 301 | // Init and Shutdown are NOT thread safe. |
| 302 | // Both may be called multiple times and MemMap objects may be created any |
| 303 | // time after the first call to Init and before the first call to Shutodwn. |
| 304 | static void Init() REQUIRES(!MemMap::mem_maps_lock_); |
| 305 | static void Shutdown() REQUIRES(!MemMap::mem_maps_lock_); |
Mathieu Chartier | 6e88ef6 | 2014-10-14 15:01:24 -0700 | [diff] [blame] | 306 | |
Hiroshi Yamauchi | 6edb9ae | 2016-02-08 14:18:21 -0800 | [diff] [blame] | 307 | // If the map is PROT_READ, try to read each page of the map to check it is in fact readable (not |
| 308 | // faulting). This is used to diagnose a bug b/19894268 where mprotect doesn't seem to be working |
| 309 | // intermittently. |
| 310 | void TryReadable(); |
| 311 | |
Hiroshi Yamauchi | 3c3c4a1 | 2017-02-21 16:49:59 -0800 | [diff] [blame] | 312 | // Align the map by unmapping the unaligned parts at the lower and the higher ends. |
| 313 | void AlignBy(size_t size); |
| 314 | |
Andreas Gampe | 0dfc315 | 2017-04-24 07:58:06 -0700 | [diff] [blame] | 315 | // For annotation reasons. |
| 316 | static std::mutex* GetMemMapsLock() RETURN_CAPABILITY(mem_maps_lock_) { |
| 317 | return nullptr; |
| 318 | } |
| 319 | |
Nicolas Geoffray | 88f3fd9 | 2019-06-27 16:32:13 +0100 | [diff] [blame] | 320 | // Reset in a forked process the MemMap whose memory has been madvised MADV_DONTFORK |
| 321 | // in the parent process. |
| 322 | void ResetInForkedProcess(); |
| 323 | |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 324 | private: |
Mathieu Chartier | 42bddce | 2015-11-09 15:16:56 -0800 | [diff] [blame] | 325 | MemMap(const std::string& name, |
| 326 | uint8_t* begin, |
| 327 | size_t size, |
| 328 | void* base_begin, |
| 329 | size_t base_size, |
| 330 | int prot, |
| 331 | bool reuse, |
David Sehr | 1b14fb8 | 2017-02-01 10:42:11 -0800 | [diff] [blame] | 332 | size_t redzone_size = 0) REQUIRES(!MemMap::mem_maps_lock_); |
Hiroshi Yamauchi | 3eed93d | 2014-06-04 11:43:59 -0700 | [diff] [blame] | 333 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 334 | void DoReset(); |
| 335 | void Invalidate(); |
| 336 | void SwapMembers(MemMap& other); |
| 337 | |
Vladimir Marko | 17a924a | 2015-05-08 15:17:32 +0100 | [diff] [blame] | 338 | static void DumpMapsLocked(std::ostream& os, bool terse) |
David Sehr | 1b14fb8 | 2017-02-01 10:42:11 -0800 | [diff] [blame] | 339 | REQUIRES(MemMap::mem_maps_lock_); |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 340 | static bool HasMemMap(MemMap& map) |
David Sehr | 1b14fb8 | 2017-02-01 10:42:11 -0800 | [diff] [blame] | 341 | REQUIRES(MemMap::mem_maps_lock_); |
Hiroshi Yamauchi | 3eed93d | 2014-06-04 11:43:59 -0700 | [diff] [blame] | 342 | static MemMap* GetLargestMemMapAt(void* address) |
David Sehr | 1b14fb8 | 2017-02-01 10:42:11 -0800 | [diff] [blame] | 343 | REQUIRES(MemMap::mem_maps_lock_); |
Mathieu Chartier | e58991b | 2015-10-13 07:59:34 -0700 | [diff] [blame] | 344 | static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg) |
David Sehr | 1b14fb8 | 2017-02-01 10:42:11 -0800 | [diff] [blame] | 345 | REQUIRES(!MemMap::mem_maps_lock_); |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 346 | |
Mathieu Chartier | 42bddce | 2015-11-09 15:16:56 -0800 | [diff] [blame] | 347 | // Internal version of mmap that supports low 4gb emulation. |
| 348 | static void* MapInternal(void* addr, |
| 349 | size_t length, |
| 350 | int prot, |
| 351 | int flags, |
| 352 | int fd, |
| 353 | off_t offset, |
Andreas Gampe | 651ba59 | 2017-06-14 14:41:33 -0700 | [diff] [blame] | 354 | bool low_4gb) |
| 355 | REQUIRES(!MemMap::mem_maps_lock_); |
| 356 | static void* MapInternalArtLow4GBAllocator(size_t length, |
| 357 | int prot, |
| 358 | int flags, |
| 359 | int fd, |
| 360 | off_t offset) |
| 361 | REQUIRES(!MemMap::mem_maps_lock_); |
Mathieu Chartier | 42bddce | 2015-11-09 15:16:56 -0800 | [diff] [blame] | 362 | |
Vladimir Marko | c09cd05 | 2018-08-23 16:36:36 +0100 | [diff] [blame] | 363 | // Release memory owned by a reservation mapping. |
| 364 | void ReleaseReservedMemory(size_t byte_count); |
| 365 | |
Steve Austin | 882ed6b | 2018-06-08 11:40:38 -0700 | [diff] [blame] | 366 | // member function to access real_munmap |
| 367 | static bool CheckMapRequest(uint8_t* expected_ptr, |
| 368 | void* actual_ptr, |
| 369 | size_t byte_count, |
| 370 | std::string* error_msg); |
| 371 | |
Vladimir Marko | c09cd05 | 2018-08-23 16:36:36 +0100 | [diff] [blame] | 372 | static bool CheckReservation(uint8_t* expected_ptr, |
| 373 | size_t byte_count, |
| 374 | const char* name, |
| 375 | const MemMap& reservation, |
| 376 | /*out*/std::string* error_msg); |
| 377 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 378 | std::string name_; |
| 379 | uint8_t* begin_ = nullptr; // Start of data. May be changed by AlignBy. |
| 380 | size_t size_ = 0u; // Length of data. |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 381 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 382 | void* base_begin_ = nullptr; // Page-aligned base address. May be changed by AlignBy. |
| 383 | size_t base_size_ = 0u; // Length of mapping. May be changed by RemapAtEnd (ie Zygote). |
| 384 | int prot_ = 0; // Protection of the map. |
Hiroshi Yamauchi | fd7e7f1 | 2013-10-22 14:17:48 -0700 | [diff] [blame] | 385 | |
Jim_Guo | a62a588 | 2014-04-28 11:11:57 +0800 | [diff] [blame] | 386 | // When reuse_ is true, this is just a view of an existing mapping |
| 387 | // and we do not take ownership and are not responsible for |
| 388 | // unmapping. |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 389 | bool reuse_ = false; |
Jim_Guo | a62a588 | 2014-04-28 11:11:57 +0800 | [diff] [blame] | 390 | |
Alex Light | ca97ada | 2018-02-02 09:25:31 -0800 | [diff] [blame] | 391 | // When already_unmapped_ is true the destructor will not call munmap. |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 392 | bool already_unmapped_ = false; |
Alex Light | ca97ada | 2018-02-02 09:25:31 -0800 | [diff] [blame] | 393 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 394 | size_t redzone_size_ = 0u; |
Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 395 | |
Ian Rogers | c3ccc10 | 2014-06-25 11:52:14 -0700 | [diff] [blame] | 396 | #if USE_ART_LOW_4G_ALLOCATOR |
| 397 | static uintptr_t next_mem_pos_; // Next memory location to check for low_4g extent. |
Steve Austin | 882ed6b | 2018-06-08 11:40:38 -0700 | [diff] [blame] | 398 | |
| 399 | static void* TryMemMapLow4GB(void* ptr, |
| 400 | size_t page_aligned_byte_count, |
| 401 | int prot, |
| 402 | int flags, |
| 403 | int fd, |
| 404 | off_t offset); |
Stuart Monteith | 8dba5aa | 2014-03-12 12:44:01 +0000 | [diff] [blame] | 405 | #endif |
| 406 | |
Steve Austin | 882ed6b | 2018-06-08 11:40:38 -0700 | [diff] [blame] | 407 | static void TargetMMapInit(); |
| 408 | static void* TargetMMap(void* start, size_t len, int prot, int flags, int fd, off_t fd_off); |
| 409 | static int TargetMUnmap(void* start, size_t len); |
| 410 | |
David Sehr | 1b14fb8 | 2017-02-01 10:42:11 -0800 | [diff] [blame] | 411 | static std::mutex* mem_maps_lock_; |
| 412 | |
Hiroshi Yamauchi | fd7e7f1 | 2013-10-22 14:17:48 -0700 | [diff] [blame] | 413 | friend class MemMapTest; // To allow access to base_begin_ and base_size_. |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 414 | }; |
Mathieu Chartier | 6e6078a | 2016-10-24 15:45:41 -0700 | [diff] [blame] | 415 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 416 | inline void swap(MemMap& lhs, MemMap& rhs) { |
| 417 | lhs.swap(rhs); |
| 418 | } |
| 419 | |
Brian Carlstrom | 0d6adac | 2014-02-05 17:39:16 -0800 | [diff] [blame] | 420 | std::ostream& operator<<(std::ostream& os, const MemMap& mem_map); |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 421 | |
Mathieu Chartier | 6e6078a | 2016-10-24 15:45:41 -0700 | [diff] [blame] | 422 | // Zero and release pages if possible, no requirements on alignments. |
| 423 | void ZeroAndReleasePages(void* address, size_t length); |
| 424 | |
Brian Carlstrom | db4d540 | 2011-08-09 12:18:28 -0700 | [diff] [blame] | 425 | } // namespace art |
| 426 | |
David Sehr | 79e2607 | 2018-04-06 17:58:50 -0700 | [diff] [blame] | 427 | #endif // ART_LIBARTBASE_BASE_MEM_MAP_H_ |