blob: 813d90d0bd1da149be0d48ca2a0781e6e586f6d7 [file] [log] [blame]
Brian Carlstrom9004cb62013-07-26 15:48:31 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mem_map.h"
18
Andreas Gamped4901292017-05-30 18:41:34 -070019#include <sys/mman.h>
20
Ian Rogers700a4022014-05-19 16:49:03 -070021#include <memory>
Alex Lightca97ada2018-02-02 09:25:31 -080022#include <random>
Ian Rogers700a4022014-05-19 16:49:03 -070023
Andreas Gampec857f4a2018-10-25 13:12:37 -070024#include "common_art_test.h"
David Sehrd5f8de82018-04-27 14:12:03 -070025#include "common_runtime_test.h" // For TEST_DISABLED_FOR_MIPS
Andreas Gampec857f4a2018-10-25 13:12:37 -070026#include "logging.h"
David Sehr1979c642018-04-26 14:41:18 -070027#include "memory_tool.h"
28#include "unix_file/fd_file.h"
Brian Carlstrom9004cb62013-07-26 15:48:31 -070029
30namespace art {
31
David Sehrd5f8de82018-04-27 14:12:03 -070032class MemMapTest : public CommonArtTest {
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070033 public:
Alex Lightca97ada2018-02-02 09:25:31 -080034 static bool IsAddressMapped(void* addr) {
35 bool res = msync(addr, 1, MS_SYNC) == 0;
36 if (!res && errno != ENOMEM) {
37 PLOG(FATAL) << "Unexpected error occurred on msync";
38 }
39 return res;
40 }
41
42 static std::vector<uint8_t> RandomData(size_t size) {
43 std::random_device rd;
44 std::uniform_int_distribution<uint8_t> dist;
45 std::vector<uint8_t> res;
46 res.resize(size);
47 for (size_t i = 0; i < size; i++) {
48 res[i] = dist(rd);
49 }
50 return res;
51 }
52
Mathieu Chartier16d29f82015-11-10 10:32:52 -080053 static uint8_t* GetValidMapAddress(size_t size, bool low_4gb) {
54 // Find a valid map address and unmap it before returning.
55 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +010056 MemMap map = MemMap::MapAnonymous("temp",
Andreas Gampe0de385f2018-10-11 11:11:13 -070057 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +010058 size,
59 PROT_READ,
60 low_4gb,
Vladimir Markoc34bebf2018-08-16 16:12:49 +010061 &error_msg);
62 CHECK(map.IsValid());
63 return map.Begin();
Mathieu Chartier16d29f82015-11-10 10:32:52 -080064 }
65
Ian Rogersef7d42f2014-01-06 12:55:46 -080066 static void RemapAtEndTest(bool low_4gb) {
67 std::string error_msg;
68 // Cast the page size to size_t.
69 const size_t page_size = static_cast<size_t>(kPageSize);
70 // Map a two-page memory region.
Vladimir Markoc34bebf2018-08-16 16:12:49 +010071 MemMap m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
Andreas Gampe0de385f2018-10-11 11:11:13 -070072 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +010073 2 * page_size,
74 PROT_READ | PROT_WRITE,
75 low_4gb,
Vladimir Markoc34bebf2018-08-16 16:12:49 +010076 &error_msg);
Ian Rogersef7d42f2014-01-06 12:55:46 -080077 // Check its state and write to it.
Vladimir Markoc34bebf2018-08-16 16:12:49 +010078 ASSERT_TRUE(m0.IsValid());
79 uint8_t* base0 = m0.Begin();
Ian Rogersef7d42f2014-01-06 12:55:46 -080080 ASSERT_TRUE(base0 != nullptr) << error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +010081 size_t size0 = m0.Size();
82 EXPECT_EQ(m0.Size(), 2 * page_size);
83 EXPECT_EQ(m0.BaseBegin(), base0);
84 EXPECT_EQ(m0.BaseSize(), size0);
Ian Rogersef7d42f2014-01-06 12:55:46 -080085 memset(base0, 42, 2 * page_size);
86 // Remap the latter half into a second MemMap.
Vladimir Markoc34bebf2018-08-16 16:12:49 +010087 MemMap m1 = m0.RemapAtEnd(base0 + page_size,
88 "MemMapTest_RemapAtEndTest_map1",
89 PROT_READ | PROT_WRITE,
90 &error_msg);
Ian Rogersef7d42f2014-01-06 12:55:46 -080091 // Check the states of the two maps.
Vladimir Markoc34bebf2018-08-16 16:12:49 +010092 EXPECT_EQ(m0.Begin(), base0) << error_msg;
93 EXPECT_EQ(m0.Size(), page_size);
94 EXPECT_EQ(m0.BaseBegin(), base0);
95 EXPECT_EQ(m0.BaseSize(), page_size);
96 uint8_t* base1 = m1.Begin();
97 size_t size1 = m1.Size();
Ian Rogersef7d42f2014-01-06 12:55:46 -080098 EXPECT_EQ(base1, base0 + page_size);
99 EXPECT_EQ(size1, page_size);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100100 EXPECT_EQ(m1.BaseBegin(), base1);
101 EXPECT_EQ(m1.BaseSize(), size1);
Ian Rogersef7d42f2014-01-06 12:55:46 -0800102 // Write to the second region.
103 memset(base1, 43, page_size);
104 // Check the contents of the two regions.
105 for (size_t i = 0; i < page_size; ++i) {
106 EXPECT_EQ(base0[i], 42);
107 }
108 for (size_t i = 0; i < page_size; ++i) {
109 EXPECT_EQ(base1[i], 43);
110 }
111 // Unmap the first region.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100112 m0.Reset();
Ian Rogersef7d42f2014-01-06 12:55:46 -0800113 // Make sure the second region is still accessible after the first
114 // region is unmapped.
115 for (size_t i = 0; i < page_size; ++i) {
116 EXPECT_EQ(base1[i], 43);
117 }
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100118 MemMap m2 = m1.RemapAtEnd(m1.Begin(),
119 "MemMapTest_RemapAtEndTest_map1",
120 PROT_READ | PROT_WRITE,
121 &error_msg);
122 ASSERT_TRUE(m2.IsValid()) << error_msg;
123 ASSERT_FALSE(m1.IsValid());
Ian Rogersef7d42f2014-01-06 12:55:46 -0800124 }
Andreas Gamped8f26db2014-05-19 17:01:13 -0700125
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700126 void CommonInit() {
127 MemMap::Init();
128 }
129
Andreas Gamped8f26db2014-05-19 17:01:13 -0700130#if defined(__LP64__) && !defined(__x86_64__)
131 static uintptr_t GetLinearScanPos() {
132 return MemMap::next_mem_pos_;
133 }
134#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700135};
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700136
Andreas Gamped8f26db2014-05-19 17:01:13 -0700137#if defined(__LP64__) && !defined(__x86_64__)
138
139#ifdef __BIONIC__
140extern uintptr_t CreateStartPos(uint64_t input);
141#endif
142
143TEST_F(MemMapTest, Start) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700144 CommonInit();
Andreas Gamped8f26db2014-05-19 17:01:13 -0700145 uintptr_t start = GetLinearScanPos();
146 EXPECT_LE(64 * KB, start);
147 EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
Andreas Gamped8f26db2014-05-19 17:01:13 -0700148#ifdef __BIONIC__
149 // Test a couple of values. Make sure they are different.
150 uintptr_t last = 0;
151 for (size_t i = 0; i < 100; ++i) {
152 uintptr_t random_start = CreateStartPos(i * kPageSize);
153 EXPECT_NE(last, random_start);
154 last = random_start;
155 }
156
157 // Even on max, should be below ART_BASE_ADDRESS.
158 EXPECT_LT(CreateStartPos(~0), static_cast<uintptr_t>(ART_BASE_ADDRESS));
159#endif
160 // End of test.
161}
162#endif
163
Alex Lightca97ada2018-02-02 09:25:31 -0800164// We need mremap to be able to test ReplaceMapping at all
165#if HAVE_MREMAP_SYSCALL
166TEST_F(MemMapTest, ReplaceMapping_SameSize) {
167 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100168 MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700169 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100170 kPageSize,
171 PROT_READ,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700172 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100173 &error_msg);
174 ASSERT_TRUE(dest.IsValid());
175 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700176 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100177 kPageSize,
178 PROT_WRITE | PROT_READ,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700179 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100180 &error_msg);
181 ASSERT_TRUE(source.IsValid());
182 void* source_addr = source.Begin();
183 void* dest_addr = dest.Begin();
Alex Lightca97ada2018-02-02 09:25:31 -0800184 ASSERT_TRUE(IsAddressMapped(source_addr));
185 ASSERT_TRUE(IsAddressMapped(dest_addr));
186
187 std::vector<uint8_t> data = RandomData(kPageSize);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100188 memcpy(source.Begin(), data.data(), data.size());
Alex Lightca97ada2018-02-02 09:25:31 -0800189
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100190 ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
Alex Lightca97ada2018-02-02 09:25:31 -0800191
192 ASSERT_FALSE(IsAddressMapped(source_addr));
193 ASSERT_TRUE(IsAddressMapped(dest_addr));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100194 ASSERT_FALSE(source.IsValid());
Alex Lightca97ada2018-02-02 09:25:31 -0800195
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100196 ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
Alex Lightca97ada2018-02-02 09:25:31 -0800197
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100198 ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
Alex Lightca97ada2018-02-02 09:25:31 -0800199}
200
201TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
202 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100203 MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700204 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100205 5 * kPageSize, // Need to make it larger
206 // initially so we know
207 // there won't be mappings
208 // in the way we we move
209 // source.
210 PROT_READ,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700211 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100212 &error_msg);
213 ASSERT_TRUE(dest.IsValid());
214 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700215 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100216 3 * kPageSize,
217 PROT_WRITE | PROT_READ,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700218 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100219 &error_msg);
220 ASSERT_TRUE(source.IsValid());
221 uint8_t* source_addr = source.Begin();
222 uint8_t* dest_addr = dest.Begin();
Alex Lightca97ada2018-02-02 09:25:31 -0800223 ASSERT_TRUE(IsAddressMapped(source_addr));
224
225 // Fill the source with random data.
226 std::vector<uint8_t> data = RandomData(3 * kPageSize);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100227 memcpy(source.Begin(), data.data(), data.size());
Alex Lightca97ada2018-02-02 09:25:31 -0800228
229 // Make the dest smaller so that we know we'll have space.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100230 dest.SetSize(kPageSize);
Alex Lightca97ada2018-02-02 09:25:31 -0800231
232 ASSERT_TRUE(IsAddressMapped(dest_addr));
233 ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100234 ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
Alex Lightca97ada2018-02-02 09:25:31 -0800235
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100236 ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
Alex Lightca97ada2018-02-02 09:25:31 -0800237
238 ASSERT_FALSE(IsAddressMapped(source_addr));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100239 ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * kPageSize));
Alex Lightca97ada2018-02-02 09:25:31 -0800240 ASSERT_TRUE(IsAddressMapped(dest_addr));
241 ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100242 ASSERT_FALSE(source.IsValid());
Alex Lightca97ada2018-02-02 09:25:31 -0800243
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100244 ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
Alex Lightca97ada2018-02-02 09:25:31 -0800245}
246
247TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
248 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100249 MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700250 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100251 3 * kPageSize,
252 PROT_READ,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700253 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100254 &error_msg);
255 ASSERT_TRUE(dest.IsValid());
256 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700257 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100258 kPageSize,
259 PROT_WRITE | PROT_READ,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700260 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100261 &error_msg);
262 ASSERT_TRUE(source.IsValid());
263 uint8_t* source_addr = source.Begin();
264 uint8_t* dest_addr = dest.Begin();
Alex Lightca97ada2018-02-02 09:25:31 -0800265 ASSERT_TRUE(IsAddressMapped(source_addr));
266 ASSERT_TRUE(IsAddressMapped(dest_addr));
267 ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100268 ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * kPageSize));
Alex Lightca97ada2018-02-02 09:25:31 -0800269
270 std::vector<uint8_t> data = RandomData(kPageSize);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100271 memcpy(source.Begin(), data.data(), kPageSize);
Alex Lightca97ada2018-02-02 09:25:31 -0800272
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100273 ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
Alex Lightca97ada2018-02-02 09:25:31 -0800274
275 ASSERT_FALSE(IsAddressMapped(source_addr));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100276 ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
Alex Lightca97ada2018-02-02 09:25:31 -0800277 ASSERT_TRUE(IsAddressMapped(dest_addr));
278 ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100279 ASSERT_FALSE(source.IsValid());
Alex Lightca97ada2018-02-02 09:25:31 -0800280
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100281 ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
Alex Lightca97ada2018-02-02 09:25:31 -0800282}
283
284TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
285 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100286 MemMap dest =
Alex Lightca97ada2018-02-02 09:25:31 -0800287 MemMap::MapAnonymous(
288 "MapAnonymousEmpty-atomic-replace-dest",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700289 /* addr= */ nullptr,
Alex Lightca97ada2018-02-02 09:25:31 -0800290 3 * kPageSize, // Need to make it larger initially so we know there won't be mappings in
291 // the way we we move source.
292 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700293 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100294 &error_msg);
295 ASSERT_TRUE(dest.IsValid());
Alex Lightca97ada2018-02-02 09:25:31 -0800296 // Resize down to 1 page so we can remap the rest.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100297 dest.SetSize(kPageSize);
Alex Lightca97ada2018-02-02 09:25:31 -0800298 // Create source from the last 2 pages
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100299 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
300 dest.Begin() + kPageSize,
301 2 * kPageSize,
302 PROT_WRITE | PROT_READ,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700303 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100304 &error_msg);
305 ASSERT_TRUE(source.IsValid());
306 ASSERT_EQ(dest.Begin() + kPageSize, source.Begin());
307 uint8_t* source_addr = source.Begin();
308 uint8_t* dest_addr = dest.Begin();
Alex Lightca97ada2018-02-02 09:25:31 -0800309 ASSERT_TRUE(IsAddressMapped(source_addr));
310
311 // Fill the source and dest with random data.
312 std::vector<uint8_t> data = RandomData(2 * kPageSize);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100313 memcpy(source.Begin(), data.data(), data.size());
Alex Lightca97ada2018-02-02 09:25:31 -0800314 std::vector<uint8_t> dest_data = RandomData(kPageSize);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100315 memcpy(dest.Begin(), dest_data.data(), dest_data.size());
Alex Lightca97ada2018-02-02 09:25:31 -0800316
317 ASSERT_TRUE(IsAddressMapped(dest_addr));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100318 ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
Alex Lightca97ada2018-02-02 09:25:31 -0800319
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100320 ASSERT_FALSE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
Alex Lightca97ada2018-02-02 09:25:31 -0800321
Alex Lightca97ada2018-02-02 09:25:31 -0800322 ASSERT_TRUE(IsAddressMapped(source_addr));
323 ASSERT_TRUE(IsAddressMapped(dest_addr));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100324 ASSERT_EQ(source.Size(), data.size());
325 ASSERT_EQ(dest.Size(), dest_data.size());
Alex Lightca97ada2018-02-02 09:25:31 -0800326
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100327 ASSERT_EQ(memcmp(source.Begin(), data.data(), data.size()), 0);
328 ASSERT_EQ(memcmp(dest.Begin(), dest_data.data(), dest_data.size()), 0);
Alex Lightca97ada2018-02-02 09:25:31 -0800329}
330#endif // HAVE_MREMAP_SYSCALL
331
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700332TEST_F(MemMapTest, MapAnonymousEmpty) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700333 CommonInit();
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700334 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100335 MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700336 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100337 0,
338 PROT_READ,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700339 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100340 &error_msg);
341 ASSERT_FALSE(map.IsValid()) << error_msg;
342 ASSERT_FALSE(error_msg.empty());
343
344 error_msg.clear();
345 map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700346 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100347 kPageSize,
348 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700349 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100350 &error_msg);
351 ASSERT_TRUE(map.IsValid()) << error_msg;
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700352 ASSERT_TRUE(error_msg.empty());
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700353}
354
Mathieu Chartier486932a2016-02-24 10:09:23 -0800355TEST_F(MemMapTest, MapAnonymousFailNullError) {
356 CommonInit();
357 // Test that we don't crash with a null error_str when mapping at an invalid location.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100358 MemMap map = MemMap::MapAnonymous("MapAnonymousInvalid",
359 reinterpret_cast<uint8_t*>(kPageSize),
360 0x20000,
361 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700362 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100363 nullptr);
364 ASSERT_FALSE(map.IsValid());
Mathieu Chartier486932a2016-02-24 10:09:23 -0800365}
366
Ian Rogersef7d42f2014-01-06 12:55:46 -0800367#ifdef __LP64__
368TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700369 CommonInit();
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700370 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100371 MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700372 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100373 0,
374 PROT_READ,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700375 /* low_4gb= */ true,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100376 &error_msg);
377 ASSERT_FALSE(map.IsValid()) << error_msg;
378 ASSERT_FALSE(error_msg.empty());
379
380 error_msg.clear();
381 map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700382 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100383 kPageSize,
384 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700385 /* low_4gb= */ true,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100386 &error_msg);
387 ASSERT_TRUE(map.IsValid()) << error_msg;
Ian Rogersef7d42f2014-01-06 12:55:46 -0800388 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100389 ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700390}
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800391TEST_F(MemMapTest, MapFile32Bit) {
392 CommonInit();
393 std::string error_msg;
394 ScratchFile scratch_file;
395 constexpr size_t kMapSize = kPageSize;
396 std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
397 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
Andreas Gampe0de385f2018-10-11 11:11:13 -0700398 MemMap map = MemMap::MapFile(/*byte_count=*/kMapSize,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100399 PROT_READ,
400 MAP_PRIVATE,
401 scratch_file.GetFd(),
Andreas Gampe0de385f2018-10-11 11:11:13 -0700402 /*start=*/0,
403 /*low_4gb=*/true,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100404 scratch_file.GetFilename().c_str(),
405 &error_msg);
406 ASSERT_TRUE(map.IsValid()) << error_msg;
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800407 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100408 ASSERT_EQ(map.Size(), kMapSize);
409 ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800410}
Ian Rogersef7d42f2014-01-06 12:55:46 -0800411#endif
412
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700413TEST_F(MemMapTest, MapAnonymousExactAddr) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700414 CommonInit();
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700415 std::string error_msg;
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800416 // Find a valid address.
Andreas Gampe0de385f2018-10-11 11:11:13 -0700417 uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb=*/false);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700418 // Map at an address that should work, which should succeed.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100419 MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
420 valid_address,
421 kPageSize,
422 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700423 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100424 &error_msg);
425 ASSERT_TRUE(map0.IsValid()) << error_msg;
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700426 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100427 ASSERT_TRUE(map0.BaseBegin() == valid_address);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700428 // Map at an unspecified address, which should succeed.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100429 MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700430 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100431 kPageSize,
432 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700433 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100434 &error_msg);
435 ASSERT_TRUE(map1.IsValid()) << error_msg;
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700436 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100437 ASSERT_TRUE(map1.BaseBegin() != nullptr);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700438 // Attempt to map at the same address, which should fail.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100439 MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
440 reinterpret_cast<uint8_t*>(map1.BaseBegin()),
441 kPageSize,
442 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700443 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100444 &error_msg);
445 ASSERT_FALSE(map2.IsValid()) << error_msg;
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700446 ASSERT_TRUE(!error_msg.empty());
447}
448
Ian Rogersef7d42f2014-01-06 12:55:46 -0800449TEST_F(MemMapTest, RemapAtEnd) {
450 RemapAtEndTest(false);
451}
452
453#ifdef __LP64__
454TEST_F(MemMapTest, RemapAtEnd32bit) {
455 RemapAtEndTest(true);
456}
457#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700458
Orion Hodson1d3fd082018-09-28 09:38:35 +0100459TEST_F(MemMapTest, RemapFileViewAtEnd) {
460 CommonInit();
461 std::string error_msg;
462 ScratchFile scratch_file;
463
464 // Create a scratch file 3 pages large.
465 constexpr size_t kMapSize = 3 * kPageSize;
466 std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
467 memset(data.get(), 1, kPageSize);
468 memset(&data[0], 0x55, kPageSize);
469 memset(&data[kPageSize], 0x5a, kPageSize);
470 memset(&data[2 * kPageSize], 0xaa, kPageSize);
471 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
472
Andreas Gampe0de385f2018-10-11 11:11:13 -0700473 MemMap map = MemMap::MapFile(/*byte_count=*/kMapSize,
Orion Hodson1d3fd082018-09-28 09:38:35 +0100474 PROT_READ,
475 MAP_PRIVATE,
476 scratch_file.GetFd(),
Andreas Gampe0de385f2018-10-11 11:11:13 -0700477 /*start=*/0,
478 /*low_4gb=*/true,
Orion Hodson1d3fd082018-09-28 09:38:35 +0100479 scratch_file.GetFilename().c_str(),
480 &error_msg);
481 ASSERT_TRUE(map.IsValid()) << error_msg;
482 ASSERT_TRUE(error_msg.empty());
483 ASSERT_EQ(map.Size(), kMapSize);
484 ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
485 ASSERT_EQ(data[0], *map.Begin());
486 ASSERT_EQ(data[kPageSize], *(map.Begin() + kPageSize));
487 ASSERT_EQ(data[2 * kPageSize], *(map.Begin() + 2 * kPageSize));
488
489 for (size_t offset = 2 * kPageSize; offset > 0; offset -= kPageSize) {
490 MemMap tail = map.RemapAtEnd(map.Begin() + offset,
491 "bad_offset_map",
492 PROT_READ,
493 MAP_PRIVATE | MAP_FIXED,
494 scratch_file.GetFd(),
495 offset,
496 &error_msg);
497 ASSERT_TRUE(tail.IsValid()) << error_msg;
498 ASSERT_TRUE(error_msg.empty());
499 ASSERT_EQ(offset, map.Size());
500 ASSERT_EQ(static_cast<size_t>(kPageSize), tail.Size());
501 ASSERT_EQ(tail.Begin(), map.Begin() + map.Size());
502 ASSERT_EQ(data[offset], *tail.Begin());
503 }
504}
505
Qiming Shi84d49cc2014-04-24 15:38:41 +0800506TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
Roland Levillain14306b82016-01-20 12:13:57 +0000507 // Some MIPS32 hardware (namely the Creator Ci20 development board)
508 // cannot allocate in the 2GB-4GB region.
509 TEST_DISABLED_FOR_MIPS();
510
Roland Levillain0b0d3b42018-06-14 13:55:49 +0100511 // This test does not work under AddressSanitizer.
512 // Historical note: This test did not work under Valgrind either.
Roland Levillain05e34f42018-05-24 13:19:05 +0000513 TEST_DISABLED_FOR_MEMORY_TOOL();
514
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700515 CommonInit();
Roland Levillain05e34f42018-05-24 13:19:05 +0000516 constexpr size_t size = 0x100000;
517 // Try all addresses starting from 2GB to 4GB.
518 size_t start_addr = 2 * GB;
519 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100520 MemMap map;
Roland Levillain05e34f42018-05-24 13:19:05 +0000521 for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100522 map = MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
523 reinterpret_cast<uint8_t*>(start_addr),
524 size,
525 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700526 /*low_4gb=*/ true,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100527 &error_msg);
528 if (map.IsValid()) {
Roland Levillain05e34f42018-05-24 13:19:05 +0000529 break;
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800530 }
Andreas Gampe928f72b2014-09-09 19:53:48 -0700531 }
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100532 ASSERT_TRUE(map.IsValid()) << error_msg;
533 ASSERT_GE(reinterpret_cast<uintptr_t>(map.End()), 2u * GB);
Roland Levillain05e34f42018-05-24 13:19:05 +0000534 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100535 ASSERT_EQ(map.BaseBegin(), reinterpret_cast<void*>(start_addr));
Qiming Shi84d49cc2014-04-24 15:38:41 +0800536}
537
538TEST_F(MemMapTest, MapAnonymousOverflow) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700539 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800540 std::string error_msg;
541 uintptr_t ptr = 0;
542 ptr -= kPageSize; // Now it's close to the top.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100543 MemMap map = MemMap::MapAnonymous("MapAnonymousOverflow",
544 reinterpret_cast<uint8_t*>(ptr),
545 2 * kPageSize, // brings it over the top.
546 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700547 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100548 &error_msg);
549 ASSERT_FALSE(map.IsValid());
Qiming Shi84d49cc2014-04-24 15:38:41 +0800550 ASSERT_FALSE(error_msg.empty());
551}
552
553#ifdef __LP64__
554TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700555 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800556 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100557 MemMap map =
Vladimir Marko5c42c292015-02-25 12:02:49 +0000558 MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
559 reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
560 kPageSize,
561 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700562 /* low_4gb= */ true,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100563 &error_msg);
564 ASSERT_FALSE(map.IsValid());
Qiming Shi84d49cc2014-04-24 15:38:41 +0800565 ASSERT_FALSE(error_msg.empty());
566}
567
568TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700569 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800570 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100571 MemMap map = MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
572 reinterpret_cast<uint8_t*>(0xF0000000),
573 0x20000000,
574 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700575 /* low_4gb= */ true,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100576 &error_msg);
577 ASSERT_FALSE(map.IsValid());
Qiming Shi84d49cc2014-04-24 15:38:41 +0800578 ASSERT_FALSE(error_msg.empty());
579}
580#endif
581
Vladimir Marko5c42c292015-02-25 12:02:49 +0000582TEST_F(MemMapTest, MapAnonymousReuse) {
583 CommonInit();
584 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100585 MemMap map = MemMap::MapAnonymous("MapAnonymousReserve",
586 nullptr,
587 0x20000,
588 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700589 /* low_4gb= */ false,
590 /* reuse= */ false,
591 /* reservation= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100592 &error_msg);
593 ASSERT_TRUE(map.IsValid());
Vladimir Marko5c42c292015-02-25 12:02:49 +0000594 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100595 MemMap map2 = MemMap::MapAnonymous("MapAnonymousReused",
596 reinterpret_cast<uint8_t*>(map.BaseBegin()),
597 0x10000,
598 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700599 /* low_4gb= */ false,
600 /* reuse= */ true,
601 /* reservation= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100602 &error_msg);
603 ASSERT_TRUE(map2.IsValid());
Vladimir Marko5c42c292015-02-25 12:02:49 +0000604 ASSERT_TRUE(error_msg.empty());
605}
606
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700607TEST_F(MemMapTest, CheckNoGaps) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700608 CommonInit();
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700609 std::string error_msg;
610 constexpr size_t kNumPages = 3;
611 // Map a 3-page mem map.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100612 MemMap map = MemMap::MapAnonymous("MapAnonymous0",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700613 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100614 kPageSize * kNumPages,
615 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700616 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100617 &error_msg);
618 ASSERT_TRUE(map.IsValid()) << error_msg;
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700619 ASSERT_TRUE(error_msg.empty());
620 // Record the base address.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100621 uint8_t* map_base = reinterpret_cast<uint8_t*>(map.BaseBegin());
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700622 // Unmap it.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100623 map.Reset();
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700624
625 // Map at the same address, but in page-sized separate mem maps,
626 // assuming the space at the address is still available.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100627 MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
628 map_base,
629 kPageSize,
630 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700631 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100632 &error_msg);
633 ASSERT_TRUE(map0.IsValid()) << error_msg;
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700634 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100635 MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
636 map_base + kPageSize,
637 kPageSize,
638 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700639 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100640 &error_msg);
641 ASSERT_TRUE(map1.IsValid()) << error_msg;
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700642 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100643 MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
644 map_base + kPageSize * 2,
645 kPageSize,
646 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700647 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100648 &error_msg);
649 ASSERT_TRUE(map2.IsValid()) << error_msg;
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700650 ASSERT_TRUE(error_msg.empty());
651
652 // One-map cases.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100653 ASSERT_TRUE(MemMap::CheckNoGaps(map0, map0));
654 ASSERT_TRUE(MemMap::CheckNoGaps(map1, map1));
655 ASSERT_TRUE(MemMap::CheckNoGaps(map2, map2));
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700656
657 // Two or three-map cases.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100658 ASSERT_TRUE(MemMap::CheckNoGaps(map0, map1));
659 ASSERT_TRUE(MemMap::CheckNoGaps(map1, map2));
660 ASSERT_TRUE(MemMap::CheckNoGaps(map0, map2));
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700661
662 // Unmap the middle one.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100663 map1.Reset();
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700664
665 // Should return false now that there's a gap in the middle.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100666 ASSERT_FALSE(MemMap::CheckNoGaps(map0, map2));
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700667}
668
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800669TEST_F(MemMapTest, AlignBy) {
670 CommonInit();
671 std::string error_msg;
672 // Cast the page size to size_t.
673 const size_t page_size = static_cast<size_t>(kPageSize);
674 // Map a region.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100675 MemMap m0 = MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700676 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100677 14 * page_size,
678 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700679 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100680 &error_msg);
681 ASSERT_TRUE(m0.IsValid());
682 uint8_t* base0 = m0.Begin();
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800683 ASSERT_TRUE(base0 != nullptr) << error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100684 ASSERT_EQ(m0.Size(), 14 * page_size);
685 ASSERT_EQ(m0.BaseBegin(), base0);
686 ASSERT_EQ(m0.BaseSize(), m0.Size());
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800687
688 // Break it into several regions by using RemapAtEnd.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100689 MemMap m1 = m0.RemapAtEnd(base0 + 3 * page_size,
690 "MemMapTest_AlignByTest_map1",
691 PROT_READ | PROT_WRITE,
692 &error_msg);
693 uint8_t* base1 = m1.Begin();
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800694 ASSERT_TRUE(base1 != nullptr) << error_msg;
695 ASSERT_EQ(base1, base0 + 3 * page_size);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100696 ASSERT_EQ(m0.Size(), 3 * page_size);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800697
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100698 MemMap m2 = m1.RemapAtEnd(base1 + 4 * page_size,
699 "MemMapTest_AlignByTest_map2",
700 PROT_READ | PROT_WRITE,
701 &error_msg);
702 uint8_t* base2 = m2.Begin();
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800703 ASSERT_TRUE(base2 != nullptr) << error_msg;
704 ASSERT_EQ(base2, base1 + 4 * page_size);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100705 ASSERT_EQ(m1.Size(), 4 * page_size);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800706
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100707 MemMap m3 = m2.RemapAtEnd(base2 + 3 * page_size,
708 "MemMapTest_AlignByTest_map1",
709 PROT_READ | PROT_WRITE,
710 &error_msg);
711 uint8_t* base3 = m3.Begin();
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800712 ASSERT_TRUE(base3 != nullptr) << error_msg;
713 ASSERT_EQ(base3, base2 + 3 * page_size);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100714 ASSERT_EQ(m2.Size(), 3 * page_size);
715 ASSERT_EQ(m3.Size(), 4 * page_size);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800716
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100717 uint8_t* end0 = base0 + m0.Size();
718 uint8_t* end1 = base1 + m1.Size();
719 uint8_t* end2 = base2 + m2.Size();
720 uint8_t* end3 = base3 + m3.Size();
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800721
722 ASSERT_EQ(static_cast<size_t>(end3 - base0), 14 * page_size);
723
724 if (IsAlignedParam(base0, 2 * page_size)) {
725 ASSERT_FALSE(IsAlignedParam(base1, 2 * page_size));
726 ASSERT_FALSE(IsAlignedParam(base2, 2 * page_size));
727 ASSERT_TRUE(IsAlignedParam(base3, 2 * page_size));
728 ASSERT_TRUE(IsAlignedParam(end3, 2 * page_size));
729 } else {
730 ASSERT_TRUE(IsAlignedParam(base1, 2 * page_size));
731 ASSERT_TRUE(IsAlignedParam(base2, 2 * page_size));
732 ASSERT_FALSE(IsAlignedParam(base3, 2 * page_size));
733 ASSERT_FALSE(IsAlignedParam(end3, 2 * page_size));
734 }
735
736 // Align by 2 * page_size;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100737 m0.AlignBy(2 * page_size);
738 m1.AlignBy(2 * page_size);
739 m2.AlignBy(2 * page_size);
740 m3.AlignBy(2 * page_size);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800741
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100742 EXPECT_TRUE(IsAlignedParam(m0.Begin(), 2 * page_size));
743 EXPECT_TRUE(IsAlignedParam(m1.Begin(), 2 * page_size));
744 EXPECT_TRUE(IsAlignedParam(m2.Begin(), 2 * page_size));
745 EXPECT_TRUE(IsAlignedParam(m3.Begin(), 2 * page_size));
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800746
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100747 EXPECT_TRUE(IsAlignedParam(m0.Begin() + m0.Size(), 2 * page_size));
748 EXPECT_TRUE(IsAlignedParam(m1.Begin() + m1.Size(), 2 * page_size));
749 EXPECT_TRUE(IsAlignedParam(m2.Begin() + m2.Size(), 2 * page_size));
750 EXPECT_TRUE(IsAlignedParam(m3.Begin() + m3.Size(), 2 * page_size));
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800751
752 if (IsAlignedParam(base0, 2 * page_size)) {
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100753 EXPECT_EQ(m0.Begin(), base0);
754 EXPECT_EQ(m0.Begin() + m0.Size(), end0 - page_size);
755 EXPECT_EQ(m1.Begin(), base1 + page_size);
756 EXPECT_EQ(m1.Begin() + m1.Size(), end1 - page_size);
757 EXPECT_EQ(m2.Begin(), base2 + page_size);
758 EXPECT_EQ(m2.Begin() + m2.Size(), end2);
759 EXPECT_EQ(m3.Begin(), base3);
760 EXPECT_EQ(m3.Begin() + m3.Size(), end3);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800761 } else {
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100762 EXPECT_EQ(m0.Begin(), base0 + page_size);
763 EXPECT_EQ(m0.Begin() + m0.Size(), end0);
764 EXPECT_EQ(m1.Begin(), base1);
765 EXPECT_EQ(m1.Begin() + m1.Size(), end1);
766 EXPECT_EQ(m2.Begin(), base2);
767 EXPECT_EQ(m2.Begin() + m2.Size(), end2 - page_size);
768 EXPECT_EQ(m3.Begin(), base3 + page_size);
769 EXPECT_EQ(m3.Begin() + m3.Size(), end3 - page_size);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800770 }
771}
772
Vladimir Markoc09cd052018-08-23 16:36:36 +0100773TEST_F(MemMapTest, Reservation) {
774 CommonInit();
775 std::string error_msg;
776 ScratchFile scratch_file;
777 constexpr size_t kMapSize = 5 * kPageSize;
778 std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
779 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
780
781 MemMap reservation = MemMap::MapAnonymous("Test reservation",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700782 /* addr= */ nullptr,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100783 kMapSize,
784 PROT_NONE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700785 /* low_4gb= */ false,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100786 &error_msg);
787 ASSERT_TRUE(reservation.IsValid());
788 ASSERT_TRUE(error_msg.empty());
789
790 // Map first part of the reservation.
791 constexpr size_t kChunk1Size = kPageSize - 1u;
792 static_assert(kChunk1Size < kMapSize, "We want to split the reservation.");
793 uint8_t* addr1 = reservation.Begin();
794 MemMap map1 = MemMap::MapFileAtAddress(addr1,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700795 /* byte_count= */ kChunk1Size,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100796 PROT_READ,
797 MAP_PRIVATE,
798 scratch_file.GetFd(),
Andreas Gampe0de385f2018-10-11 11:11:13 -0700799 /* start= */ 0,
800 /* low_4gb= */ false,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100801 scratch_file.GetFilename().c_str(),
Andreas Gampe0de385f2018-10-11 11:11:13 -0700802 /* reuse= */ false,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100803 &reservation,
804 &error_msg);
805 ASSERT_TRUE(map1.IsValid()) << error_msg;
806 ASSERT_TRUE(error_msg.empty());
807 ASSERT_EQ(map1.Size(), kChunk1Size);
808 ASSERT_EQ(addr1, map1.Begin());
809 ASSERT_TRUE(reservation.IsValid());
810 // Entire pages are taken from the `reservation`.
811 ASSERT_LT(map1.End(), map1.BaseEnd());
812 ASSERT_EQ(map1.BaseEnd(), reservation.Begin());
813
814 // Map second part as an anonymous mapping.
815 constexpr size_t kChunk2Size = 2 * kPageSize;
816 DCHECK_LT(kChunk2Size, reservation.Size()); // We want to split the reservation.
817 uint8_t* addr2 = reservation.Begin();
818 MemMap map2 = MemMap::MapAnonymous("MiddleReservation",
819 addr2,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700820 /* byte_count= */ kChunk2Size,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100821 PROT_READ,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700822 /* low_4gb= */ false,
823 /* reuse= */ false,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100824 &reservation,
825 &error_msg);
826 ASSERT_TRUE(map2.IsValid()) << error_msg;
827 ASSERT_TRUE(error_msg.empty());
828 ASSERT_EQ(map2.Size(), kChunk2Size);
829 ASSERT_EQ(addr2, map2.Begin());
830 ASSERT_EQ(map2.End(), map2.BaseEnd()); // kChunk2Size is page aligned.
831 ASSERT_EQ(map2.BaseEnd(), reservation.Begin());
832
833 // Map the rest of the reservation except the last byte.
834 const size_t kChunk3Size = reservation.Size() - 1u;
835 uint8_t* addr3 = reservation.Begin();
836 MemMap map3 = MemMap::MapFileAtAddress(addr3,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700837 /* byte_count= */ kChunk3Size,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100838 PROT_READ,
839 MAP_PRIVATE,
840 scratch_file.GetFd(),
Andreas Gampe0de385f2018-10-11 11:11:13 -0700841 /* start= */ dchecked_integral_cast<size_t>(addr3 - addr1),
842 /* low_4gb= */ false,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100843 scratch_file.GetFilename().c_str(),
Andreas Gampe0de385f2018-10-11 11:11:13 -0700844 /* reuse= */ false,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100845 &reservation,
846 &error_msg);
847 ASSERT_TRUE(map3.IsValid()) << error_msg;
848 ASSERT_TRUE(error_msg.empty());
849 ASSERT_EQ(map3.Size(), kChunk3Size);
850 ASSERT_EQ(addr3, map3.Begin());
851 // Entire pages are taken from the `reservation`, so it's now exhausted.
852 ASSERT_FALSE(reservation.IsValid());
853
854 // Now split the MiddleReservation.
855 constexpr size_t kChunk2ASize = kPageSize - 1u;
856 DCHECK_LT(kChunk2ASize, map2.Size()); // We want to split the reservation.
857 MemMap map2a = map2.TakeReservedMemory(kChunk2ASize);
858 ASSERT_TRUE(map2a.IsValid()) << error_msg;
859 ASSERT_TRUE(error_msg.empty());
860 ASSERT_EQ(map2a.Size(), kChunk2ASize);
861 ASSERT_EQ(addr2, map2a.Begin());
862 ASSERT_TRUE(map2.IsValid());
863 ASSERT_LT(map2a.End(), map2a.BaseEnd());
864 ASSERT_EQ(map2a.BaseEnd(), map2.Begin());
865
866 // And take the rest of the middle reservation.
867 const size_t kChunk2BSize = map2.Size() - 1u;
868 uint8_t* addr2b = map2.Begin();
869 MemMap map2b = map2.TakeReservedMemory(kChunk2BSize);
870 ASSERT_TRUE(map2b.IsValid()) << error_msg;
871 ASSERT_TRUE(error_msg.empty());
872 ASSERT_EQ(map2b.Size(), kChunk2ASize);
873 ASSERT_EQ(addr2b, map2b.Begin());
874 ASSERT_FALSE(map2.IsValid());
875}
876
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700877} // namespace art
Andreas Gampec857f4a2018-10-25 13:12:37 -0700878
879namespace {
880
881class DumpMapsOnFailListener : public testing::EmptyTestEventListener {
882 void OnTestPartResult(const testing::TestPartResult& result) override {
883 switch (result.type()) {
884 case testing::TestPartResult::kFatalFailure:
885 art::PrintFileToLog("/proc/self/maps", android::base::LogSeverity::ERROR);
886 break;
887
888 // TODO: Could consider logging on EXPECT failures.
889 case testing::TestPartResult::kNonFatalFailure:
890 case testing::TestPartResult::kSuccess:
891 break;
892 }
893 }
894};
895
896} // namespace
897
898// Inject our listener into the test runner.
899extern "C"
900__attribute__((visibility("default"))) __attribute__((used))
901void ArtTestGlobalInit() {
902 LOG(ERROR) << "Installing listener";
903 testing::UnitTest::GetInstance()->listeners().Append(new DumpMapsOnFailListener());
904}