blob: 4c41388121cb9891bc2939132b3b653d1b4b3d4c [file] [log] [blame]
Brian Carlstromdb4d5402011-08-09 12:18:28 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
David Sehr79e26072018-04-06 17:58:50 -070017#ifndef ART_LIBARTBASE_BASE_MEM_MAP_H_
18#define ART_LIBARTBASE_BASE_MEM_MAP_H_
Brian Carlstromdb4d5402011-08-09 12:18:28 -070019
Brian Carlstrom27ec9612011-09-19 20:20:38 -070020#include <stddef.h>
21#include <sys/types.h>
Brian Carlstromdb4d5402011-08-09 12:18:28 -070022
Andreas Gampe0dfc3152017-04-24 07:58:06 -070023#include <map>
Igor Murashkin5573c372017-11-16 13:34:30 -080024#include <mutex>
Andreas Gampe0dfc3152017-04-24 07:58:06 -070025#include <string>
26
27#include "android-base/thread_annotations.h"
David Sehr1979c642018-04-26 14:41:18 -070028#include "macros.h"
Brian Carlstromdb4d5402011-08-09 12:18:28 -070029
30namespace art {
31
Vladimir Markobe0d3cf2020-02-12 10:52:22 +000032#if defined(__LP64__) && !defined(__Fuchsia__) && (defined(__aarch64__) || defined(__APPLE__))
Ian Rogersc3ccc102014-06-25 11:52:14 -070033#define USE_ART_LOW_4G_ALLOCATOR 1
34#else
Steve Austin882ed6b2018-06-08 11:40:38 -070035#if defined(__LP64__) && !defined(__Fuchsia__) && !defined(__x86_64__)
Andreas Gampe651ba592017-06-14 14:41:33 -070036#error "Unrecognized 64-bit architecture."
37#endif
Ian Rogersc3ccc102014-06-25 11:52:14 -070038#define USE_ART_LOW_4G_ALLOCATOR 0
39#endif
40
Ian Rogersc5f17732014-06-05 20:48:42 -070041#ifdef __linux__
42static constexpr bool kMadviseZeroes = true;
Alex Lightca97ada2018-02-02 09:25:31 -080043#define HAVE_MREMAP_SYSCALL true
Ian Rogersc5f17732014-06-05 20:48:42 -070044#else
45static constexpr bool kMadviseZeroes = false;
Alex Lightca97ada2018-02-02 09:25:31 -080046// We cannot ever perform MemMap::ReplaceWith on non-linux hosts since the syscall is not
47// present.
48#define HAVE_MREMAP_SYSCALL false
Ian Rogersc5f17732014-06-05 20:48:42 -070049#endif
50
Brian Carlstromdb4d5402011-08-09 12:18:28 -070051// Used to keep track of mmap segments.
Andreas Gamped8f26db2014-05-19 17:01:13 -070052//
53// On 64b systems not supporting MAP_32BIT, the implementation of MemMap will do a linear scan
54// for free pages. For security, the start of this scan should be randomized. This requires a
55// dynamic initializer.
56// For this to work, it is paramount that there are no other static initializers that access MemMap.
57// Otherwise, calls might see uninitialized values.
Brian Carlstromdb4d5402011-08-09 12:18:28 -070058class MemMap {
59 public:
Alex Lightca97ada2018-02-02 09:25:31 -080060 static constexpr bool kCanReplaceMapping = HAVE_MREMAP_SYSCALL;
61
Vladimir Markoc34bebf2018-08-16 16:12:49 +010062 // Creates an invalid mapping.
63 MemMap() {}
64
65 // Creates an invalid mapping. Used when we want to be more explicit than MemMap().
66 static MemMap Invalid() {
67 return MemMap();
68 }
69
Andreas Gampe44b31742018-10-01 19:30:57 -070070 MemMap(MemMap&& other) noexcept REQUIRES(!MemMap::mem_maps_lock_);
71 MemMap& operator=(MemMap&& other) noexcept REQUIRES(!MemMap::mem_maps_lock_) {
Vladimir Markoc34bebf2018-08-16 16:12:49 +010072 Reset();
73 swap(other);
74 return *this;
75 }
76
77 // Releases the memory mapping.
78 ~MemMap() REQUIRES(!MemMap::mem_maps_lock_);
79
80 // Swap two MemMaps.
81 void swap(MemMap& other);
82
83 void Reset() {
84 if (IsValid()) {
85 DoReset();
86 }
87 }
88
89 bool IsValid() const {
90 return base_size_ != 0u;
91 }
92
Alex Lightca97ada2018-02-02 09:25:31 -080093 // Replace the data in this memmmap with the data in the memmap pointed to by source. The caller
94 // relinquishes ownership of the source mmap.
95 //
96 // For the call to be successful:
97 // * The range [dest->Begin, dest->Begin() + source->Size()] must not overlap with
98 // [source->Begin(), source->End()].
99 // * Neither source nor dest may be 'reused' mappings (they must own all the pages associated
100 // with them.
101 // * kCanReplaceMapping must be true.
102 // * Neither source nor dest may use manual redzones.
103 // * Both source and dest must have the same offset from the nearest page boundary.
104 // * mremap must succeed when called on the mappings.
105 //
106 // If this call succeeds it will return true and:
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100107 // * Invalidate *source
Alex Lightca97ada2018-02-02 09:25:31 -0800108 // * The protection of this will remain the same.
109 // * The size of this will be the size of the source
110 // * The data in this will be the data from source.
111 //
112 // If this call fails it will return false and make no changes to *source or this. The ownership
113 // of the source mmap is returned to the caller.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100114 bool ReplaceWith(/*in-out*/MemMap* source, /*out*/std::string* error);
Alex Lightca97ada2018-02-02 09:25:31 -0800115
Joel Fernandes (Google)92597a82018-08-17 16:19:19 -0700116 // Set a debug friendly name for a map. It will be prefixed with "dalvik-".
117 static void SetDebugName(void* map_ptr, const char* name, size_t size);
118
Elliott Hughesecd3a6f2012-06-06 18:16:37 -0700119 // Request an anonymous region of length 'byte_count' and a requested base address.
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700120 // Use null as the requested base address if you don't care.
Vladimir Markoc09cd052018-08-23 16:36:36 +0100121 //
122 // `reuse` allows re-mapping an address range from an existing mapping which retains the
123 // ownership of the memory. Alternatively, `reservation` allows re-mapping the start of an
124 // existing reservation mapping, transferring the ownership of the memory to the new MemMap.
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800125 //
126 // The word "anonymous" in this context means "not backed by a file". The supplied
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000127 // 'name' will be used -- on systems that support it -- to give the mapping
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800128 // a name.
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700129 //
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100130 // On success, returns returns a valid MemMap. On failure, returns an invalid MemMap.
131 static MemMap MapAnonymous(const char* name,
132 uint8_t* addr,
133 size_t byte_count,
134 int prot,
135 bool low_4gb,
136 bool reuse,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100137 /*inout*/MemMap* reservation,
138 /*out*/std::string* error_msg,
Joel Fernandes (Google)92597a82018-08-17 16:19:19 -0700139 bool use_debug_name = true);
Vladimir Markof6985bd2018-08-24 09:02:28 +0100140 static MemMap MapAnonymous(const char* name,
Vladimir Markof6985bd2018-08-24 09:02:28 +0100141 size_t byte_count,
142 int prot,
143 bool low_4gb,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100144 /*out*/std::string* error_msg) {
145 return MapAnonymous(name,
Vladimir Marko11306592018-10-26 14:22:59 +0100146 /*addr=*/ nullptr,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100147 byte_count,
148 prot,
149 low_4gb,
Vladimir Marko11306592018-10-26 14:22:59 +0100150 /*reuse=*/ false,
151 /*reservation=*/ nullptr,
152 error_msg);
153 }
154 static MemMap MapAnonymous(const char* name,
155 size_t byte_count,
156 int prot,
157 bool low_4gb,
158 MemMap* reservation,
159 /*out*/std::string* error_msg) {
160 return MapAnonymous(name,
161 /*addr=*/ (reservation != nullptr) ? reservation->Begin() : nullptr,
162 byte_count,
163 prot,
164 low_4gb,
165 /*reuse=*/ false,
166 reservation,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100167 error_msg);
Vladimir Markof6985bd2018-08-24 09:02:28 +0100168 }
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700169
David Srbecky1baabf02015-06-16 17:12:34 +0000170 // Create placeholder for a region allocated by direct call to mmap.
171 // This is useful when we do not have control over the code calling mmap,
172 // but when we still want to keep track of it in the list.
173 // The region is not considered to be owned and will not be unmmaped.
Vladimir Marko07f78902020-07-27 11:35:12 +0000174 static MemMap MapPlaceholder(const char* name, uint8_t* addr, size_t byte_count);
David Srbecky1baabf02015-06-16 17:12:34 +0000175
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700176 // Map part of a file, taking care of non-page aligned offsets. The
177 // "start" offset is absolute, not relative.
178 //
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100179 // On success, returns returns a valid MemMap. On failure, returns an invalid MemMap.
180 static MemMap MapFile(size_t byte_count,
181 int prot,
182 int flags,
183 int fd,
184 off_t start,
185 bool low_4gb,
186 const char* filename,
187 std::string* error_msg) {
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800188 return MapFileAtAddress(nullptr,
189 byte_count,
190 prot,
191 flags,
192 fd,
193 start,
Vladimir Marko11306592018-10-26 14:22:59 +0100194 /*low_4gb=*/ low_4gb,
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800195 filename,
Vladimir Marko11306592018-10-26 14:22:59 +0100196 /*reuse=*/ false,
197 /*reservation=*/ nullptr,
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800198 error_msg);
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700199 }
200
Mathieu Chartierebe2dfc2015-11-24 13:47:52 -0800201 // Map part of a file, taking care of non-page aligned offsets. The "start" offset is absolute,
202 // not relative. This version allows requesting a specific address for the base of the mapping.
Vladimir Markoc09cd052018-08-23 16:36:36 +0100203 //
204 // `reuse` allows re-mapping an address range from an existing mapping which retains the
205 // ownership of the memory. Alternatively, `reservation` allows re-mapping the start of an
206 // existing reservation mapping, transferring the ownership of the memory to the new MemMap.
207 //
208 // If error_msg is null then we do not print /proc/maps to the log if MapFileAtAddress fails.
209 // This helps improve performance of the fail case since reading and printing /proc/maps takes
210 // several milliseconds in the worst case.
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700211 //
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100212 // On success, returns returns a valid MemMap. On failure, returns an invalid MemMap.
213 static MemMap MapFileAtAddress(uint8_t* addr,
214 size_t byte_count,
215 int prot,
216 int flags,
217 int fd,
218 off_t start,
219 bool low_4gb,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100220 const char* filename,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100221 bool reuse,
222 /*inout*/MemMap* reservation,
223 /*out*/std::string* error_msg);
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700224
Brian Carlstrom0d6adac2014-02-05 17:39:16 -0800225 const std::string& GetName() const {
226 return name_;
227 }
228
Vladimir Marko9bdf1082016-01-21 12:15:52 +0000229 bool Sync();
230
Logan Chiend88fa262012-06-06 15:23:32 +0800231 bool Protect(int prot);
232
Ian Rogersc5f17732014-06-05 20:48:42 -0700233 void MadviseDontNeedAndZero();
Nicolas Geoffray88f3fd92019-06-27 16:32:13 +0100234 int MadviseDontFork();
Ian Rogersc5f17732014-06-05 20:48:42 -0700235
Ian Rogers1c849e52012-06-28 14:00:33 -0700236 int GetProtect() const {
237 return prot_;
238 }
239
Ian Rogers13735952014-10-08 12:43:28 -0700240 uint8_t* Begin() const {
Ian Rogers30fab402012-01-23 15:43:46 -0800241 return begin_;
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700242 }
243
Ian Rogers30fab402012-01-23 15:43:46 -0800244 size_t Size() const {
245 return size_;
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700246 }
247
Mathieu Chartier379d09f2015-01-08 11:28:13 -0800248 // Resize the mem-map by unmapping pages at the end. Currently only supports shrinking.
249 void SetSize(size_t new_size);
250
Ian Rogers13735952014-10-08 12:43:28 -0700251 uint8_t* End() const {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700252 return Begin() + Size();
253 }
254
Brian Carlstrom0d6adac2014-02-05 17:39:16 -0800255 void* BaseBegin() const {
256 return base_begin_;
257 }
258
259 size_t BaseSize() const {
260 return base_size_;
261 }
262
263 void* BaseEnd() const {
Ian Rogers13735952014-10-08 12:43:28 -0700264 return reinterpret_cast<uint8_t*>(BaseBegin()) + BaseSize();
Brian Carlstrom0d6adac2014-02-05 17:39:16 -0800265 }
266
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700267 bool HasAddress(const void* addr) const {
268 return Begin() <= addr && addr < End();
Brian Carlstromb765be02011-08-17 23:54:10 -0700269 }
270
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700271 // Unmap the pages at end and remap them to create another memory map.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100272 MemMap RemapAtEnd(uint8_t* new_end,
273 const char* tail_name,
274 int tail_prot,
275 std::string* error_msg,
Joel Fernandes (Google)92597a82018-08-17 16:19:19 -0700276 bool use_debug_name = true);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700277
Orion Hodson1d3fd082018-09-28 09:38:35 +0100278 // Unmap the pages of a file at end and remap them to create another memory map.
279 MemMap RemapAtEnd(uint8_t* new_end,
280 const char* tail_name,
281 int tail_prot,
282 int tail_flags,
283 int fd,
284 off_t offset,
285 std::string* error_msg,
286 bool use_debug_name = true);
287
Vladimir Markoc09cd052018-08-23 16:36:36 +0100288 // Take ownership of pages at the beginning of the mapping. The mapping must be an
289 // anonymous reservation mapping, owning entire pages. The `byte_count` must not
290 // exceed the size of this reservation.
291 //
292 // Returns a mapping owning `byte_count` bytes rounded up to entire pages
293 // with size set to the passed `byte_count`.
294 MemMap TakeReservedMemory(size_t byte_count);
295
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100296 static bool CheckNoGaps(MemMap& begin_map, MemMap& end_map)
David Sehr1b14fb82017-02-01 10:42:11 -0800297 REQUIRES(!MemMap::mem_maps_lock_);
Vladimir Marko17a924a2015-05-08 15:17:32 +0100298 static void DumpMaps(std::ostream& os, bool terse = false)
David Sehr1b14fb82017-02-01 10:42:11 -0800299 REQUIRES(!MemMap::mem_maps_lock_);
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700300
David Sehr1b14fb82017-02-01 10:42:11 -0800301 // Init and Shutdown are NOT thread safe.
302 // Both may be called multiple times and MemMap objects may be created any
303 // time after the first call to Init and before the first call to Shutodwn.
304 static void Init() REQUIRES(!MemMap::mem_maps_lock_);
305 static void Shutdown() REQUIRES(!MemMap::mem_maps_lock_);
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700306
Hiroshi Yamauchi6edb9ae2016-02-08 14:18:21 -0800307 // If the map is PROT_READ, try to read each page of the map to check it is in fact readable (not
308 // faulting). This is used to diagnose a bug b/19894268 where mprotect doesn't seem to be working
309 // intermittently.
310 void TryReadable();
311
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800312 // Align the map by unmapping the unaligned parts at the lower and the higher ends.
313 void AlignBy(size_t size);
314
Andreas Gampe0dfc3152017-04-24 07:58:06 -0700315 // For annotation reasons.
316 static std::mutex* GetMemMapsLock() RETURN_CAPABILITY(mem_maps_lock_) {
317 return nullptr;
318 }
319
Nicolas Geoffray88f3fd92019-06-27 16:32:13 +0100320 // Reset in a forked process the MemMap whose memory has been madvised MADV_DONTFORK
321 // in the parent process.
322 void ResetInForkedProcess();
323
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700324 private:
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800325 MemMap(const std::string& name,
326 uint8_t* begin,
327 size_t size,
328 void* base_begin,
329 size_t base_size,
330 int prot,
331 bool reuse,
David Sehr1b14fb82017-02-01 10:42:11 -0800332 size_t redzone_size = 0) REQUIRES(!MemMap::mem_maps_lock_);
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700333
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100334 void DoReset();
335 void Invalidate();
336 void SwapMembers(MemMap& other);
337
Vladimir Marko17a924a2015-05-08 15:17:32 +0100338 static void DumpMapsLocked(std::ostream& os, bool terse)
David Sehr1b14fb82017-02-01 10:42:11 -0800339 REQUIRES(MemMap::mem_maps_lock_);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100340 static bool HasMemMap(MemMap& map)
David Sehr1b14fb82017-02-01 10:42:11 -0800341 REQUIRES(MemMap::mem_maps_lock_);
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700342 static MemMap* GetLargestMemMapAt(void* address)
David Sehr1b14fb82017-02-01 10:42:11 -0800343 REQUIRES(MemMap::mem_maps_lock_);
Mathieu Chartiere58991b2015-10-13 07:59:34 -0700344 static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg)
David Sehr1b14fb82017-02-01 10:42:11 -0800345 REQUIRES(!MemMap::mem_maps_lock_);
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700346
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800347 // Internal version of mmap that supports low 4gb emulation.
348 static void* MapInternal(void* addr,
349 size_t length,
350 int prot,
351 int flags,
352 int fd,
353 off_t offset,
Andreas Gampe651ba592017-06-14 14:41:33 -0700354 bool low_4gb)
355 REQUIRES(!MemMap::mem_maps_lock_);
356 static void* MapInternalArtLow4GBAllocator(size_t length,
357 int prot,
358 int flags,
359 int fd,
360 off_t offset)
361 REQUIRES(!MemMap::mem_maps_lock_);
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800362
Vladimir Markoc09cd052018-08-23 16:36:36 +0100363 // Release memory owned by a reservation mapping.
364 void ReleaseReservedMemory(size_t byte_count);
365
Steve Austin882ed6b2018-06-08 11:40:38 -0700366 // member function to access real_munmap
367 static bool CheckMapRequest(uint8_t* expected_ptr,
368 void* actual_ptr,
369 size_t byte_count,
370 std::string* error_msg);
371
Vladimir Markoc09cd052018-08-23 16:36:36 +0100372 static bool CheckReservation(uint8_t* expected_ptr,
373 size_t byte_count,
374 const char* name,
375 const MemMap& reservation,
376 /*out*/std::string* error_msg);
377
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100378 std::string name_;
379 uint8_t* begin_ = nullptr; // Start of data. May be changed by AlignBy.
380 size_t size_ = 0u; // Length of data.
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700381
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100382 void* base_begin_ = nullptr; // Page-aligned base address. May be changed by AlignBy.
383 size_t base_size_ = 0u; // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
384 int prot_ = 0; // Protection of the map.
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700385
Jim_Guoa62a5882014-04-28 11:11:57 +0800386 // When reuse_ is true, this is just a view of an existing mapping
387 // and we do not take ownership and are not responsible for
388 // unmapping.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100389 bool reuse_ = false;
Jim_Guoa62a5882014-04-28 11:11:57 +0800390
Alex Lightca97ada2018-02-02 09:25:31 -0800391 // When already_unmapped_ is true the destructor will not call munmap.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100392 bool already_unmapped_ = false;
Alex Lightca97ada2018-02-02 09:25:31 -0800393
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100394 size_t redzone_size_ = 0u;
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700395
Ian Rogersc3ccc102014-06-25 11:52:14 -0700396#if USE_ART_LOW_4G_ALLOCATOR
397 static uintptr_t next_mem_pos_; // Next memory location to check for low_4g extent.
Steve Austin882ed6b2018-06-08 11:40:38 -0700398
399 static void* TryMemMapLow4GB(void* ptr,
400 size_t page_aligned_byte_count,
401 int prot,
402 int flags,
403 int fd,
404 off_t offset);
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000405#endif
406
Steve Austin882ed6b2018-06-08 11:40:38 -0700407 static void TargetMMapInit();
408 static void* TargetMMap(void* start, size_t len, int prot, int flags, int fd, off_t fd_off);
409 static int TargetMUnmap(void* start, size_t len);
410
David Sehr1b14fb82017-02-01 10:42:11 -0800411 static std::mutex* mem_maps_lock_;
412
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700413 friend class MemMapTest; // To allow access to base_begin_ and base_size_.
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700414};
Mathieu Chartier6e6078a2016-10-24 15:45:41 -0700415
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100416inline void swap(MemMap& lhs, MemMap& rhs) {
417 lhs.swap(rhs);
418}
419
Brian Carlstrom0d6adac2014-02-05 17:39:16 -0800420std::ostream& operator<<(std::ostream& os, const MemMap& mem_map);
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700421
Mathieu Chartier6e6078a2016-10-24 15:45:41 -0700422// Zero and release pages if possible, no requirements on alignments.
423void ZeroAndReleasePages(void* address, size_t length);
424
Brian Carlstromdb4d5402011-08-09 12:18:28 -0700425} // namespace art
426
David Sehr79e26072018-04-06 17:58:50 -0700427#endif // ART_LIBARTBASE_BASE_MEM_MAP_H_