blob: 8532a643f3d67e04ccdb1a61430c898031ff5004 [file] [log] [blame]
Mathieu Chartier12f1b992017-01-19 18:00:45 -08001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
David Sehr1ce2b3b2018-04-05 11:02:03 -070017#ifndef ART_LIBARTBASE_BASE_BIT_MEMORY_REGION_H_
18#define ART_LIBARTBASE_BASE_BIT_MEMORY_REGION_H_
Mathieu Chartier12f1b992017-01-19 18:00:45 -080019
20#include "memory_region.h"
21
David Srbecky68fefac2018-05-10 17:49:33 +010022#include "bit_utils.h"
23#include "memory_tool.h"
24
David Srbecky6c4ec5c2019-06-20 07:23:19 +000025#include <array>
26
Mathieu Chartier12f1b992017-01-19 18:00:45 -080027namespace art {
28
29// Bit memory region is a bit offset subregion of a normal memoryregion. This is useful for
30// abstracting away the bit start offset to avoid needing passing as an argument everywhere.
Roland Levillainbbc6e7e2018-08-24 16:58:47 +010031class BitMemoryRegion final : public ValueObject {
Mathieu Chartier12f1b992017-01-19 18:00:45 -080032 public:
David Srbeckyb73323c2018-07-15 23:58:44 +010033 struct Less {
David Srbeckyd1606412018-07-31 15:05:14 +010034 bool operator()(const BitMemoryRegion& lhs, const BitMemoryRegion& rhs) const {
35 return Compare(lhs, rhs) < 0;
David Srbeckyb73323c2018-07-15 23:58:44 +010036 }
37 };
38
Mathieu Chartier12f1b992017-01-19 18:00:45 -080039 BitMemoryRegion() = default;
David Srbeckyd1606412018-07-31 15:05:14 +010040 ALWAYS_INLINE BitMemoryRegion(uint8_t* data, ssize_t bit_start, size_t bit_size) {
41 // Normalize the data pointer. Note that bit_start may be negative.
David Srbecky6c4ec5c2019-06-20 07:23:19 +000042 data_ = AlignDown(data + (bit_start >> kBitsPerByteLog2), kPageSize);
43 bit_start_ = bit_start + kBitsPerByte * (data - data_);
David Srbeckyd1606412018-07-31 15:05:14 +010044 bit_size_ = bit_size;
David Srbeckyb73323c2018-07-15 23:58:44 +010045 }
David Srbecky68fefac2018-05-10 17:49:33 +010046 ALWAYS_INLINE explicit BitMemoryRegion(MemoryRegion region)
David Srbeckyb73323c2018-07-15 23:58:44 +010047 : BitMemoryRegion(region.begin(), /* bit_start */ 0, region.size_in_bits()) {
David Srbecky68fefac2018-05-10 17:49:33 +010048 }
49 ALWAYS_INLINE BitMemoryRegion(MemoryRegion region, size_t bit_offset, size_t bit_length)
50 : BitMemoryRegion(region) {
David Srbeckyd1606412018-07-31 15:05:14 +010051 *this = Subregion(bit_offset, bit_length);
Mathieu Chartier12f1b992017-01-19 18:00:45 -080052 }
53
David Srbecky68fefac2018-05-10 17:49:33 +010054 ALWAYS_INLINE bool IsValid() const { return data_ != nullptr; }
55
David Srbecky42deda82018-08-10 11:23:27 +010056 const uint8_t* data() const {
57 DCHECK_ALIGNED(bit_start_, kBitsPerByte);
David Srbecky6c4ec5c2019-06-20 07:23:19 +000058 return data_ + bit_start_ / kBitsPerByte;
David Srbecky42deda82018-08-10 11:23:27 +010059 }
60
Mathieu Chartier12f1b992017-01-19 18:00:45 -080061 size_t size_in_bits() const {
David Srbecky68fefac2018-05-10 17:49:33 +010062 return bit_size_;
Mathieu Chartier12f1b992017-01-19 18:00:45 -080063 }
64
David Srbeckyd1606412018-07-31 15:05:14 +010065 void Resize(size_t bit_size) {
66 bit_size_ = bit_size;
67 }
68
David Srbecky68fefac2018-05-10 17:49:33 +010069 ALWAYS_INLINE BitMemoryRegion Subregion(size_t bit_offset, size_t bit_length) const {
70 DCHECK_LE(bit_offset, bit_size_);
71 DCHECK_LE(bit_length, bit_size_ - bit_offset);
72 BitMemoryRegion result = *this;
73 result.bit_start_ += bit_offset;
74 result.bit_size_ = bit_length;
75 return result;
Mathieu Chartier575d3e62017-02-06 11:00:40 -080076 }
77
David Srbeckyd1606412018-07-31 15:05:14 +010078 ALWAYS_INLINE BitMemoryRegion Subregion(size_t bit_offset) const {
79 DCHECK_LE(bit_offset, bit_size_);
David Srbeckya38e6cf2018-06-26 18:13:49 +010080 BitMemoryRegion result = *this;
David Srbeckyd1606412018-07-31 15:05:14 +010081 result.bit_start_ += bit_offset;
82 result.bit_size_ -= bit_offset;
David Srbeckya38e6cf2018-06-26 18:13:49 +010083 return result;
84 }
85
Mathieu Chartier12f1b992017-01-19 18:00:45 -080086 // Load a single bit in the region. The bit at offset 0 is the least
87 // significant bit in the first byte.
Vladimir Marko718d86d2018-10-03 11:24:47 +010088 ALWAYS_INLINE bool LoadBit(size_t bit_offset) const {
David Srbecky68fefac2018-05-10 17:49:33 +010089 DCHECK_LT(bit_offset, bit_size_);
Vladimir Marko718d86d2018-10-03 11:24:47 +010090 size_t index = (bit_start_ + bit_offset) / kBitsPerByte;
91 size_t shift = (bit_start_ + bit_offset) % kBitsPerByte;
David Srbecky6c4ec5c2019-06-20 07:23:19 +000092 return ((data_[index] >> shift) & 1) != 0;
Mathieu Chartier12f1b992017-01-19 18:00:45 -080093 }
94
Vladimir Marko718d86d2018-10-03 11:24:47 +010095 ALWAYS_INLINE void StoreBit(size_t bit_offset, bool value) {
David Srbecky68fefac2018-05-10 17:49:33 +010096 DCHECK_LT(bit_offset, bit_size_);
David Srbecky68fefac2018-05-10 17:49:33 +010097 size_t index = (bit_start_ + bit_offset) / kBitsPerByte;
98 size_t shift = (bit_start_ + bit_offset) % kBitsPerByte;
David Srbecky6c4ec5c2019-06-20 07:23:19 +000099 data_[index] &= ~(1 << shift); // Clear bit.
100 data_[index] |= (value ? 1 : 0) << shift; // Set bit.
David Srbecky68fefac2018-05-10 17:49:33 +0100101 DCHECK_EQ(value, LoadBit(bit_offset));
Mathieu Chartier12f1b992017-01-19 18:00:45 -0800102 }
103
David Srbecky68fefac2018-05-10 17:49:33 +0100104 // Load `bit_length` bits from `data` starting at given `bit_offset`.
105 // The least significant bit is stored in the smallest memory offset.
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000106 template<typename Result = size_t>
David Srbecky68fefac2018-05-10 17:49:33 +0100107 ATTRIBUTE_NO_SANITIZE_ADDRESS // We might touch extra bytes due to the alignment.
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000108 ALWAYS_INLINE Result LoadBits(size_t bit_offset, size_t bit_length) const {
109 static_assert(std::is_integral<Result>::value, "Result must be integral");
110 static_assert(std::is_unsigned<Result>::value, "Result must be unsigned");
111 DCHECK(IsAligned<sizeof(Result)>(data_));
David Srbecky68fefac2018-05-10 17:49:33 +0100112 DCHECK_LE(bit_offset, bit_size_);
113 DCHECK_LE(bit_length, bit_size_ - bit_offset);
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000114 DCHECK_LE(bit_length, BitSizeOf<Result>());
David Srbecky68fefac2018-05-10 17:49:33 +0100115 if (bit_length == 0) {
116 return 0;
117 }
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000118 Result* data = reinterpret_cast<Result*>(data_);
119 size_t width = BitSizeOf<Result>();
120 Result clear = (std::numeric_limits<Result>::max() << 1) << (bit_length - 1);
121 size_t index = (bit_start_ + bit_offset) / width;
122 size_t shift = (bit_start_ + bit_offset) % width;
123 Result value = data[index] >> shift;
124 size_t finished_bits = width - shift;
David Srbecky68fefac2018-05-10 17:49:33 +0100125 if (finished_bits < bit_length) {
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000126 value |= data[index + 1] << finished_bits;
David Srbecky68fefac2018-05-10 17:49:33 +0100127 }
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000128 return value & ~clear;
Mathieu Chartier12f1b992017-01-19 18:00:45 -0800129 }
130
David Srbecky68fefac2018-05-10 17:49:33 +0100131 // Store `bit_length` bits in `data` starting at given `bit_offset`.
132 // The least significant bit is stored in the smallest memory offset.
133 ALWAYS_INLINE void StoreBits(size_t bit_offset, uint32_t value, size_t bit_length) {
134 DCHECK_LE(bit_offset, bit_size_);
135 DCHECK_LE(bit_length, bit_size_ - bit_offset);
136 DCHECK_LE(bit_length, BitSizeOf<uint32_t>());
137 DCHECK_LE(value, MaxInt<uint32_t>(bit_length));
138 if (bit_length == 0) {
139 return;
140 }
141 // Write data byte by byte to avoid races with other threads
142 // on bytes that do not overlap with this region.
David Srbecky68fefac2018-05-10 17:49:33 +0100143 uint32_t mask = std::numeric_limits<uint32_t>::max() >> (BitSizeOf<uint32_t>() - bit_length);
144 size_t index = (bit_start_ + bit_offset) / kBitsPerByte;
145 size_t shift = (bit_start_ + bit_offset) % kBitsPerByte;
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000146 data_[index] &= ~(mask << shift); // Clear bits.
147 data_[index] |= (value << shift); // Set bits.
David Srbecky68fefac2018-05-10 17:49:33 +0100148 size_t finished_bits = kBitsPerByte - shift;
149 for (int i = 1; finished_bits < bit_length; i++, finished_bits += kBitsPerByte) {
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000150 data_[index + i] &= ~(mask >> finished_bits); // Clear bits.
151 data_[index + i] |= (value >> finished_bits); // Set bits.
David Srbecky68fefac2018-05-10 17:49:33 +0100152 }
153 DCHECK_EQ(value, LoadBits(bit_offset, bit_length));
154 }
155
David Srbecky5513c2b2018-05-24 15:03:20 +0100156 // Store bits from other bit region.
157 ALWAYS_INLINE void StoreBits(size_t bit_offset, const BitMemoryRegion& src, size_t bit_length) {
158 DCHECK_LE(bit_offset, bit_size_);
159 DCHECK_LE(bit_length, bit_size_ - bit_offset);
160 size_t bit = 0;
161 constexpr size_t kNumBits = BitSizeOf<uint32_t>();
162 for (; bit + kNumBits <= bit_length; bit += kNumBits) {
163 StoreBits(bit_offset + bit, src.LoadBits(bit, kNumBits), kNumBits);
164 }
165 size_t num_bits = bit_length - bit;
166 StoreBits(bit_offset + bit, src.LoadBits(bit, num_bits), num_bits);
167 }
168
David Srbecky6de88332018-06-03 12:00:11 +0100169 // Count the number of set bits within the given bit range.
170 ALWAYS_INLINE size_t PopCount(size_t bit_offset, size_t bit_length) const {
171 DCHECK_LE(bit_offset, bit_size_);
172 DCHECK_LE(bit_length, bit_size_ - bit_offset);
173 size_t count = 0;
174 size_t bit = 0;
175 constexpr size_t kNumBits = BitSizeOf<uint32_t>();
176 for (; bit + kNumBits <= bit_length; bit += kNumBits) {
177 count += POPCOUNT(LoadBits(bit_offset + bit, kNumBits));
178 }
179 count += POPCOUNT(LoadBits(bit_offset + bit, bit_length - bit));
180 return count;
181 }
182
David Srbeckyd1606412018-07-31 15:05:14 +0100183 static int Compare(const BitMemoryRegion& lhs, const BitMemoryRegion& rhs) {
184 if (lhs.size_in_bits() != rhs.size_in_bits()) {
185 return (lhs.size_in_bits() < rhs.size_in_bits()) ? -1 : 1;
186 }
187 size_t bit = 0;
188 constexpr size_t kNumBits = BitSizeOf<uint32_t>();
189 for (; bit + kNumBits <= lhs.size_in_bits(); bit += kNumBits) {
190 uint32_t lhs_bits = lhs.LoadBits(bit, kNumBits);
191 uint32_t rhs_bits = rhs.LoadBits(bit, kNumBits);
192 if (lhs_bits != rhs_bits) {
193 return (lhs_bits < rhs_bits) ? -1 : 1;
194 }
195 }
196 size_t num_bits = lhs.size_in_bits() - bit;
197 uint32_t lhs_bits = lhs.LoadBits(bit, num_bits);
198 uint32_t rhs_bits = rhs.LoadBits(bit, num_bits);
199 if (lhs_bits != rhs_bits) {
200 return (lhs_bits < rhs_bits) ? -1 : 1;
201 }
202 return 0;
Mathieu Chartier12f1b992017-01-19 18:00:45 -0800203 }
204
205 private:
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000206 uint8_t* data_ = nullptr; // The pointer is page aligned.
Mathieu Chartier12f1b992017-01-19 18:00:45 -0800207 size_t bit_start_ = 0;
David Srbecky68fefac2018-05-10 17:49:33 +0100208 size_t bit_size_ = 0;
Mathieu Chartier12f1b992017-01-19 18:00:45 -0800209};
210
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000211constexpr uint32_t kVarintBits = 4; // Minimum number of bits used for varint.
212constexpr uint32_t kVarintMax = 11; // Maximum value which is stored "inline".
David Srbecky0c3aa312018-08-03 14:52:32 +0100213
David Srbecky078d7ba2018-06-21 15:36:48 +0100214class BitMemoryReader {
215 public:
David Srbeckyd1606412018-07-31 15:05:14 +0100216 BitMemoryReader(BitMemoryReader&&) = default;
217 explicit BitMemoryReader(BitMemoryRegion data)
218 : finished_region_(data.Subregion(0, 0) /* set the length to zero */ ) {
219 }
220 explicit BitMemoryReader(const uint8_t* data, ssize_t bit_offset = 0)
221 : finished_region_(const_cast<uint8_t*>(data), bit_offset, /* bit_length */ 0) {
David Srbeckya38e6cf2018-06-26 18:13:49 +0100222 }
David Srbecky078d7ba2018-06-21 15:36:48 +0100223
David Srbecky42deda82018-08-10 11:23:27 +0100224 const uint8_t* data() const { return finished_region_.data(); }
225
David Srbeckyd1606412018-07-31 15:05:14 +0100226 BitMemoryRegion GetReadRegion() const { return finished_region_; }
David Srbecky078d7ba2018-06-21 15:36:48 +0100227
David Srbeckyd1606412018-07-31 15:05:14 +0100228 size_t NumberOfReadBits() const { return finished_region_.size_in_bits(); }
David Srbecky078d7ba2018-06-21 15:36:48 +0100229
David Srbeckyd1606412018-07-31 15:05:14 +0100230 ALWAYS_INLINE BitMemoryRegion ReadRegion(size_t bit_length) {
231 size_t bit_offset = finished_region_.size_in_bits();
232 finished_region_.Resize(bit_offset + bit_length);
233 return finished_region_.Subregion(bit_offset, bit_length);
David Srbeckyb73323c2018-07-15 23:58:44 +0100234 }
235
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000236 template<typename Result = size_t>
237 ALWAYS_INLINE Result ReadBits(size_t bit_length) {
238 return ReadRegion(bit_length).LoadBits<Result>(/* bit_offset */ 0, bit_length);
David Srbecky078d7ba2018-06-21 15:36:48 +0100239 }
240
David Srbeckyb73323c2018-07-15 23:58:44 +0100241 ALWAYS_INLINE bool ReadBit() {
David Srbeckyd1606412018-07-31 15:05:14 +0100242 return ReadRegion(/* bit_length */ 1).LoadBit(/* bit_offset */ 0);
David Srbeckyb73323c2018-07-15 23:58:44 +0100243 }
244
David Srbecky0c3aa312018-08-03 14:52:32 +0100245 // Read variable-length bit-packed integer.
246 // The first four bits determine the variable length of the encoded integer:
247 // Values 0..11 represent the result as-is, with no further following bits.
248 // Values 12..15 mean the result is in the next 8/16/24/32-bits respectively.
249 ALWAYS_INLINE uint32_t ReadVarint() {
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000250 uint32_t x = ReadBits(kVarintBits);
251 return (x <= kVarintMax) ? x : ReadBits((x - kVarintMax) * kBitsPerByte);
David Srbecky0c3aa312018-08-03 14:52:32 +0100252 }
253
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000254 // Read N 'interleaved' varints (different to just reading consecutive varints).
255 // All small values are stored first and the large values are stored after them.
256 // This requires fewer bit-reads compared to indidually storing the varints.
257 template<size_t N>
258 ALWAYS_INLINE std::array<uint32_t, N> ReadInterleavedVarints() {
259 static_assert(N * kVarintBits <= sizeof(uint64_t) * kBitsPerByte, "N too big");
260 std::array<uint32_t, N> values;
261 // StackMap BitTable uses over 8 varints in the header, so we need uint64_t.
262 uint64_t data = ReadBits<uint64_t>(N * kVarintBits);
263 for (size_t i = 0; i < N; i++) {
David Srbecky6c0c7c82019-05-28 22:28:36 +0100264 values[i] = BitFieldExtract(data, i * kVarintBits, kVarintBits);
265 }
266 // Do the second part in its own loop as that seems to produce better code in clang.
267 for (size_t i = 0; i < N; i++) {
268 if (UNLIKELY(values[i] > kVarintMax)) {
269 values[i] = ReadBits((values[i] - kVarintMax) * kBitsPerByte);
270 }
David Srbecky67ba8722019-05-23 15:32:18 +0100271 }
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000272 return values;
David Srbecky67ba8722019-05-23 15:32:18 +0100273 }
274
David Srbecky078d7ba2018-06-21 15:36:48 +0100275 private:
David Srbeckya38e6cf2018-06-26 18:13:49 +0100276 // Represents all of the bits which were read so far. There is no upper bound.
277 // Therefore, by definition, the "cursor" is always at the end of the region.
278 BitMemoryRegion finished_region_;
David Srbecky078d7ba2018-06-21 15:36:48 +0100279
280 DISALLOW_COPY_AND_ASSIGN(BitMemoryReader);
281};
282
283template<typename Vector>
284class BitMemoryWriter {
285 public:
286 explicit BitMemoryWriter(Vector* out, size_t bit_offset = 0)
David Srbeckyd1606412018-07-31 15:05:14 +0100287 : out_(out), bit_start_(bit_offset), bit_offset_(bit_offset) {
288 DCHECK_EQ(NumberOfWrittenBits(), 0u);
289 }
290
291 BitMemoryRegion GetWrittenRegion() const {
292 return BitMemoryRegion(out_->data(), bit_start_, bit_offset_ - bit_start_);
David Srbeckya38e6cf2018-06-26 18:13:49 +0100293 }
294
295 const uint8_t* data() const { return out_->data(); }
David Srbecky078d7ba2018-06-21 15:36:48 +0100296
David Srbeckyd1606412018-07-31 15:05:14 +0100297 size_t NumberOfWrittenBits() const { return bit_offset_ - bit_start_; }
David Srbecky078d7ba2018-06-21 15:36:48 +0100298
299 ALWAYS_INLINE BitMemoryRegion Allocate(size_t bit_length) {
300 out_->resize(BitsToBytesRoundUp(bit_offset_ + bit_length));
David Srbeckyd1606412018-07-31 15:05:14 +0100301 BitMemoryRegion region(out_->data(), bit_offset_, bit_length);
David Srbeckyb73323c2018-07-15 23:58:44 +0100302 DCHECK_LE(bit_length, std::numeric_limits<size_t>::max() - bit_offset_) << "Overflow";
David Srbecky078d7ba2018-06-21 15:36:48 +0100303 bit_offset_ += bit_length;
304 return region;
305 }
306
David Srbeckyd1606412018-07-31 15:05:14 +0100307 ALWAYS_INLINE void WriteRegion(const BitMemoryRegion& region) {
308 Allocate(region.size_in_bits()).StoreBits(/* bit_offset */ 0, region, region.size_in_bits());
309 }
310
David Srbecky078d7ba2018-06-21 15:36:48 +0100311 ALWAYS_INLINE void WriteBits(uint32_t value, size_t bit_length) {
David Srbeckyd1606412018-07-31 15:05:14 +0100312 Allocate(bit_length).StoreBits(/* bit_offset */ 0, value, bit_length);
David Srbecky078d7ba2018-06-21 15:36:48 +0100313 }
314
David Srbeckyb73323c2018-07-15 23:58:44 +0100315 ALWAYS_INLINE void WriteBit(bool value) {
David Srbeckyd1606412018-07-31 15:05:14 +0100316 Allocate(1).StoreBit(/* bit_offset */ 0, value);
David Srbeckyb73323c2018-07-15 23:58:44 +0100317 }
318
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000319 template<size_t N>
320 ALWAYS_INLINE void WriteInterleavedVarints(std::array<uint32_t, N> values) {
321 // Write small values (or the number of bytes needed for the large values).
322 for (uint32_t value : values) {
323 if (value > kVarintMax) {
324 WriteBits(kVarintMax + BitsToBytesRoundUp(MinimumBitsToStore(value)), kVarintBits);
325 } else {
326 WriteBits(value, kVarintBits);
327 }
Raylin Hsu1b2a49b2019-06-20 01:41:31 +0000328 }
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000329 // Write large values.
330 for (uint32_t value : values) {
331 if (value > kVarintMax) {
332 WriteBits(value, BitsToBytesRoundUp(MinimumBitsToStore(value)) * kBitsPerByte);
333 }
334 }
335 }
336
337 ALWAYS_INLINE void WriteVarint(uint32_t value) {
338 WriteInterleavedVarints<1>({value});
David Srbecky0c3aa312018-08-03 14:52:32 +0100339 }
340
David Srbeckyd1606412018-07-31 15:05:14 +0100341 ALWAYS_INLINE void ByteAlign() {
342 size_t end = bit_start_ + bit_offset_;
343 bit_offset_ += RoundUp(end, kBitsPerByte) - end;
David Srbeckyb73323c2018-07-15 23:58:44 +0100344 }
345
David Srbecky078d7ba2018-06-21 15:36:48 +0100346 private:
347 Vector* out_;
David Srbeckyd1606412018-07-31 15:05:14 +0100348 size_t bit_start_;
David Srbecky078d7ba2018-06-21 15:36:48 +0100349 size_t bit_offset_;
350
351 DISALLOW_COPY_AND_ASSIGN(BitMemoryWriter);
352};
353
Mathieu Chartier12f1b992017-01-19 18:00:45 -0800354} // namespace art
355
David Sehr1ce2b3b2018-04-05 11:02:03 -0700356#endif // ART_LIBARTBASE_BASE_BIT_MEMORY_REGION_H_