blob: e98da2449255f7ef7d078ce15c916f7edfa9f9f3 [file] [log] [blame]
Mathieu Chartier12f1b992017-01-19 18:00:45 -08001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
David Sehr1ce2b3b2018-04-05 11:02:03 -070017#ifndef ART_LIBARTBASE_BASE_BIT_MEMORY_REGION_H_
18#define ART_LIBARTBASE_BASE_BIT_MEMORY_REGION_H_
Mathieu Chartier12f1b992017-01-19 18:00:45 -080019
20#include "memory_region.h"
21
David Srbecky68fefac2018-05-10 17:49:33 +010022#include "bit_utils.h"
23#include "memory_tool.h"
24
David Srbecky6c4ec5c2019-06-20 07:23:19 +000025#include <array>
26
Mathieu Chartier12f1b992017-01-19 18:00:45 -080027namespace art {
28
29// Bit memory region is a bit offset subregion of a normal memoryregion. This is useful for
30// abstracting away the bit start offset to avoid needing passing as an argument everywhere.
Roland Levillainbbc6e7e2018-08-24 16:58:47 +010031class BitMemoryRegion final : public ValueObject {
Mathieu Chartier12f1b992017-01-19 18:00:45 -080032 public:
David Srbeckyb73323c2018-07-15 23:58:44 +010033 struct Less {
David Srbeckyd1606412018-07-31 15:05:14 +010034 bool operator()(const BitMemoryRegion& lhs, const BitMemoryRegion& rhs) const {
35 return Compare(lhs, rhs) < 0;
David Srbeckyb73323c2018-07-15 23:58:44 +010036 }
37 };
38
Mathieu Chartier12f1b992017-01-19 18:00:45 -080039 BitMemoryRegion() = default;
David Srbeckyd1606412018-07-31 15:05:14 +010040 ALWAYS_INLINE BitMemoryRegion(uint8_t* data, ssize_t bit_start, size_t bit_size) {
41 // Normalize the data pointer. Note that bit_start may be negative.
David Srbecky6c4ec5c2019-06-20 07:23:19 +000042 data_ = AlignDown(data + (bit_start >> kBitsPerByteLog2), kPageSize);
43 bit_start_ = bit_start + kBitsPerByte * (data - data_);
David Srbeckyd1606412018-07-31 15:05:14 +010044 bit_size_ = bit_size;
David Srbeckyb73323c2018-07-15 23:58:44 +010045 }
David Srbecky68fefac2018-05-10 17:49:33 +010046 ALWAYS_INLINE explicit BitMemoryRegion(MemoryRegion region)
David Srbeckyb73323c2018-07-15 23:58:44 +010047 : BitMemoryRegion(region.begin(), /* bit_start */ 0, region.size_in_bits()) {
David Srbecky68fefac2018-05-10 17:49:33 +010048 }
49 ALWAYS_INLINE BitMemoryRegion(MemoryRegion region, size_t bit_offset, size_t bit_length)
50 : BitMemoryRegion(region) {
David Srbeckyd1606412018-07-31 15:05:14 +010051 *this = Subregion(bit_offset, bit_length);
Mathieu Chartier12f1b992017-01-19 18:00:45 -080052 }
53
David Srbecky68fefac2018-05-10 17:49:33 +010054 ALWAYS_INLINE bool IsValid() const { return data_ != nullptr; }
55
David Srbecky42deda82018-08-10 11:23:27 +010056 const uint8_t* data() const {
57 DCHECK_ALIGNED(bit_start_, kBitsPerByte);
David Srbecky6c4ec5c2019-06-20 07:23:19 +000058 return data_ + bit_start_ / kBitsPerByte;
David Srbecky42deda82018-08-10 11:23:27 +010059 }
60
Mathieu Chartier12f1b992017-01-19 18:00:45 -080061 size_t size_in_bits() const {
David Srbecky68fefac2018-05-10 17:49:33 +010062 return bit_size_;
Mathieu Chartier12f1b992017-01-19 18:00:45 -080063 }
64
David Srbeckyd1606412018-07-31 15:05:14 +010065 void Resize(size_t bit_size) {
66 bit_size_ = bit_size;
67 }
68
David Srbecky68fefac2018-05-10 17:49:33 +010069 ALWAYS_INLINE BitMemoryRegion Subregion(size_t bit_offset, size_t bit_length) const {
70 DCHECK_LE(bit_offset, bit_size_);
71 DCHECK_LE(bit_length, bit_size_ - bit_offset);
72 BitMemoryRegion result = *this;
73 result.bit_start_ += bit_offset;
74 result.bit_size_ = bit_length;
75 return result;
Mathieu Chartier575d3e62017-02-06 11:00:40 -080076 }
77
David Srbeckyd1606412018-07-31 15:05:14 +010078 ALWAYS_INLINE BitMemoryRegion Subregion(size_t bit_offset) const {
79 DCHECK_LE(bit_offset, bit_size_);
David Srbeckya38e6cf2018-06-26 18:13:49 +010080 BitMemoryRegion result = *this;
David Srbeckyd1606412018-07-31 15:05:14 +010081 result.bit_start_ += bit_offset;
82 result.bit_size_ -= bit_offset;
David Srbeckya38e6cf2018-06-26 18:13:49 +010083 return result;
84 }
85
Mathieu Chartier12f1b992017-01-19 18:00:45 -080086 // Load a single bit in the region. The bit at offset 0 is the least
87 // significant bit in the first byte.
Vladimir Marko718d86d2018-10-03 11:24:47 +010088 ALWAYS_INLINE bool LoadBit(size_t bit_offset) const {
David Srbecky68fefac2018-05-10 17:49:33 +010089 DCHECK_LT(bit_offset, bit_size_);
Vladimir Marko718d86d2018-10-03 11:24:47 +010090 size_t index = (bit_start_ + bit_offset) / kBitsPerByte;
91 size_t shift = (bit_start_ + bit_offset) % kBitsPerByte;
David Srbecky6c4ec5c2019-06-20 07:23:19 +000092 return ((data_[index] >> shift) & 1) != 0;
Mathieu Chartier12f1b992017-01-19 18:00:45 -080093 }
94
Vladimir Marko718d86d2018-10-03 11:24:47 +010095 ALWAYS_INLINE void StoreBit(size_t bit_offset, bool value) {
David Srbecky68fefac2018-05-10 17:49:33 +010096 DCHECK_LT(bit_offset, bit_size_);
David Srbecky68fefac2018-05-10 17:49:33 +010097 size_t index = (bit_start_ + bit_offset) / kBitsPerByte;
98 size_t shift = (bit_start_ + bit_offset) % kBitsPerByte;
David Srbecky6c4ec5c2019-06-20 07:23:19 +000099 data_[index] &= ~(1 << shift); // Clear bit.
100 data_[index] |= (value ? 1 : 0) << shift; // Set bit.
David Srbecky68fefac2018-05-10 17:49:33 +0100101 DCHECK_EQ(value, LoadBit(bit_offset));
Mathieu Chartier12f1b992017-01-19 18:00:45 -0800102 }
103
David Srbecky68fefac2018-05-10 17:49:33 +0100104 // Load `bit_length` bits from `data` starting at given `bit_offset`.
105 // The least significant bit is stored in the smallest memory offset.
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000106 template<typename Result = size_t>
David Srbecky68fefac2018-05-10 17:49:33 +0100107 ATTRIBUTE_NO_SANITIZE_ADDRESS // We might touch extra bytes due to the alignment.
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000108 ALWAYS_INLINE Result LoadBits(size_t bit_offset, size_t bit_length) const {
109 static_assert(std::is_integral<Result>::value, "Result must be integral");
110 static_assert(std::is_unsigned<Result>::value, "Result must be unsigned");
111 DCHECK(IsAligned<sizeof(Result)>(data_));
David Srbecky68fefac2018-05-10 17:49:33 +0100112 DCHECK_LE(bit_offset, bit_size_);
113 DCHECK_LE(bit_length, bit_size_ - bit_offset);
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000114 DCHECK_LE(bit_length, BitSizeOf<Result>());
David Srbecky68fefac2018-05-10 17:49:33 +0100115 if (bit_length == 0) {
116 return 0;
117 }
David Srbecky025bba42019-06-16 22:14:40 +0100118 // Load naturally-aligned value which contains the least significant bit.
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000119 Result* data = reinterpret_cast<Result*>(data_);
120 size_t width = BitSizeOf<Result>();
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000121 size_t index = (bit_start_ + bit_offset) / width;
122 size_t shift = (bit_start_ + bit_offset) % width;
123 Result value = data[index] >> shift;
David Srbecky025bba42019-06-16 22:14:40 +0100124 // Load extra value containing the most significant bit (it might be the same one).
125 // We can not just load the following value as that could potentially cause SIGSEGV.
126 Result extra = data[index + (shift + (bit_length - 1)) / width];
127 // Mask to clear unwanted bits (the 1s are needed to avoid avoid undefined shift).
128 Result clear = (std::numeric_limits<Result>::max() << 1) << (bit_length - 1);
129 // Prepend the extra value. We add explicit '& (width - 1)' so that the shift is defined.
130 // It is a no-op for `shift != 0` and if `shift == 0` then `value == extra` because of
131 // bit_length <= width causing the `value` and `extra` to be read from the same location.
132 // The '& (width - 1)' is implied by the shift instruction on ARM and removed by compiler.
133 return (value | (extra << ((width - shift) & (width - 1)))) & ~clear;
Mathieu Chartier12f1b992017-01-19 18:00:45 -0800134 }
135
David Srbecky68fefac2018-05-10 17:49:33 +0100136 // Store `bit_length` bits in `data` starting at given `bit_offset`.
137 // The least significant bit is stored in the smallest memory offset.
138 ALWAYS_INLINE void StoreBits(size_t bit_offset, uint32_t value, size_t bit_length) {
139 DCHECK_LE(bit_offset, bit_size_);
140 DCHECK_LE(bit_length, bit_size_ - bit_offset);
141 DCHECK_LE(bit_length, BitSizeOf<uint32_t>());
142 DCHECK_LE(value, MaxInt<uint32_t>(bit_length));
143 if (bit_length == 0) {
144 return;
145 }
146 // Write data byte by byte to avoid races with other threads
147 // on bytes that do not overlap with this region.
David Srbecky68fefac2018-05-10 17:49:33 +0100148 uint32_t mask = std::numeric_limits<uint32_t>::max() >> (BitSizeOf<uint32_t>() - bit_length);
149 size_t index = (bit_start_ + bit_offset) / kBitsPerByte;
150 size_t shift = (bit_start_ + bit_offset) % kBitsPerByte;
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000151 data_[index] &= ~(mask << shift); // Clear bits.
152 data_[index] |= (value << shift); // Set bits.
David Srbecky68fefac2018-05-10 17:49:33 +0100153 size_t finished_bits = kBitsPerByte - shift;
154 for (int i = 1; finished_bits < bit_length; i++, finished_bits += kBitsPerByte) {
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000155 data_[index + i] &= ~(mask >> finished_bits); // Clear bits.
156 data_[index + i] |= (value >> finished_bits); // Set bits.
David Srbecky68fefac2018-05-10 17:49:33 +0100157 }
158 DCHECK_EQ(value, LoadBits(bit_offset, bit_length));
159 }
160
David Srbecky5513c2b2018-05-24 15:03:20 +0100161 // Store bits from other bit region.
162 ALWAYS_INLINE void StoreBits(size_t bit_offset, const BitMemoryRegion& src, size_t bit_length) {
163 DCHECK_LE(bit_offset, bit_size_);
164 DCHECK_LE(bit_length, bit_size_ - bit_offset);
165 size_t bit = 0;
166 constexpr size_t kNumBits = BitSizeOf<uint32_t>();
167 for (; bit + kNumBits <= bit_length; bit += kNumBits) {
168 StoreBits(bit_offset + bit, src.LoadBits(bit, kNumBits), kNumBits);
169 }
170 size_t num_bits = bit_length - bit;
171 StoreBits(bit_offset + bit, src.LoadBits(bit, num_bits), num_bits);
172 }
173
David Srbecky6de88332018-06-03 12:00:11 +0100174 // Count the number of set bits within the given bit range.
175 ALWAYS_INLINE size_t PopCount(size_t bit_offset, size_t bit_length) const {
176 DCHECK_LE(bit_offset, bit_size_);
177 DCHECK_LE(bit_length, bit_size_ - bit_offset);
178 size_t count = 0;
179 size_t bit = 0;
180 constexpr size_t kNumBits = BitSizeOf<uint32_t>();
181 for (; bit + kNumBits <= bit_length; bit += kNumBits) {
182 count += POPCOUNT(LoadBits(bit_offset + bit, kNumBits));
183 }
184 count += POPCOUNT(LoadBits(bit_offset + bit, bit_length - bit));
185 return count;
186 }
187
David Srbeckyd1606412018-07-31 15:05:14 +0100188 static int Compare(const BitMemoryRegion& lhs, const BitMemoryRegion& rhs) {
189 if (lhs.size_in_bits() != rhs.size_in_bits()) {
190 return (lhs.size_in_bits() < rhs.size_in_bits()) ? -1 : 1;
191 }
192 size_t bit = 0;
193 constexpr size_t kNumBits = BitSizeOf<uint32_t>();
194 for (; bit + kNumBits <= lhs.size_in_bits(); bit += kNumBits) {
195 uint32_t lhs_bits = lhs.LoadBits(bit, kNumBits);
196 uint32_t rhs_bits = rhs.LoadBits(bit, kNumBits);
197 if (lhs_bits != rhs_bits) {
198 return (lhs_bits < rhs_bits) ? -1 : 1;
199 }
200 }
201 size_t num_bits = lhs.size_in_bits() - bit;
202 uint32_t lhs_bits = lhs.LoadBits(bit, num_bits);
203 uint32_t rhs_bits = rhs.LoadBits(bit, num_bits);
204 if (lhs_bits != rhs_bits) {
205 return (lhs_bits < rhs_bits) ? -1 : 1;
206 }
207 return 0;
Mathieu Chartier12f1b992017-01-19 18:00:45 -0800208 }
209
210 private:
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000211 uint8_t* data_ = nullptr; // The pointer is page aligned.
Mathieu Chartier12f1b992017-01-19 18:00:45 -0800212 size_t bit_start_ = 0;
David Srbecky68fefac2018-05-10 17:49:33 +0100213 size_t bit_size_ = 0;
Mathieu Chartier12f1b992017-01-19 18:00:45 -0800214};
215
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000216constexpr uint32_t kVarintBits = 4; // Minimum number of bits used for varint.
217constexpr uint32_t kVarintMax = 11; // Maximum value which is stored "inline".
David Srbecky0c3aa312018-08-03 14:52:32 +0100218
David Srbecky078d7ba2018-06-21 15:36:48 +0100219class BitMemoryReader {
220 public:
David Srbeckyd1606412018-07-31 15:05:14 +0100221 BitMemoryReader(BitMemoryReader&&) = default;
222 explicit BitMemoryReader(BitMemoryRegion data)
223 : finished_region_(data.Subregion(0, 0) /* set the length to zero */ ) {
224 }
225 explicit BitMemoryReader(const uint8_t* data, ssize_t bit_offset = 0)
226 : finished_region_(const_cast<uint8_t*>(data), bit_offset, /* bit_length */ 0) {
David Srbeckya38e6cf2018-06-26 18:13:49 +0100227 }
David Srbecky078d7ba2018-06-21 15:36:48 +0100228
David Srbecky42deda82018-08-10 11:23:27 +0100229 const uint8_t* data() const { return finished_region_.data(); }
230
David Srbeckyd1606412018-07-31 15:05:14 +0100231 BitMemoryRegion GetReadRegion() const { return finished_region_; }
David Srbecky078d7ba2018-06-21 15:36:48 +0100232
David Srbeckyd1606412018-07-31 15:05:14 +0100233 size_t NumberOfReadBits() const { return finished_region_.size_in_bits(); }
David Srbecky078d7ba2018-06-21 15:36:48 +0100234
David Srbeckyd1606412018-07-31 15:05:14 +0100235 ALWAYS_INLINE BitMemoryRegion ReadRegion(size_t bit_length) {
236 size_t bit_offset = finished_region_.size_in_bits();
237 finished_region_.Resize(bit_offset + bit_length);
238 return finished_region_.Subregion(bit_offset, bit_length);
David Srbeckyb73323c2018-07-15 23:58:44 +0100239 }
240
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000241 template<typename Result = size_t>
242 ALWAYS_INLINE Result ReadBits(size_t bit_length) {
243 return ReadRegion(bit_length).LoadBits<Result>(/* bit_offset */ 0, bit_length);
David Srbecky078d7ba2018-06-21 15:36:48 +0100244 }
245
David Srbeckyb73323c2018-07-15 23:58:44 +0100246 ALWAYS_INLINE bool ReadBit() {
David Srbeckyd1606412018-07-31 15:05:14 +0100247 return ReadRegion(/* bit_length */ 1).LoadBit(/* bit_offset */ 0);
David Srbeckyb73323c2018-07-15 23:58:44 +0100248 }
249
David Srbecky0c3aa312018-08-03 14:52:32 +0100250 // Read variable-length bit-packed integer.
251 // The first four bits determine the variable length of the encoded integer:
252 // Values 0..11 represent the result as-is, with no further following bits.
253 // Values 12..15 mean the result is in the next 8/16/24/32-bits respectively.
254 ALWAYS_INLINE uint32_t ReadVarint() {
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000255 uint32_t x = ReadBits(kVarintBits);
256 return (x <= kVarintMax) ? x : ReadBits((x - kVarintMax) * kBitsPerByte);
David Srbecky0c3aa312018-08-03 14:52:32 +0100257 }
258
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000259 // Read N 'interleaved' varints (different to just reading consecutive varints).
260 // All small values are stored first and the large values are stored after them.
261 // This requires fewer bit-reads compared to indidually storing the varints.
262 template<size_t N>
263 ALWAYS_INLINE std::array<uint32_t, N> ReadInterleavedVarints() {
264 static_assert(N * kVarintBits <= sizeof(uint64_t) * kBitsPerByte, "N too big");
265 std::array<uint32_t, N> values;
266 // StackMap BitTable uses over 8 varints in the header, so we need uint64_t.
267 uint64_t data = ReadBits<uint64_t>(N * kVarintBits);
268 for (size_t i = 0; i < N; i++) {
David Srbecky6c0c7c82019-05-28 22:28:36 +0100269 values[i] = BitFieldExtract(data, i * kVarintBits, kVarintBits);
270 }
271 // Do the second part in its own loop as that seems to produce better code in clang.
272 for (size_t i = 0; i < N; i++) {
273 if (UNLIKELY(values[i] > kVarintMax)) {
274 values[i] = ReadBits((values[i] - kVarintMax) * kBitsPerByte);
275 }
David Srbecky67ba8722019-05-23 15:32:18 +0100276 }
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000277 return values;
David Srbecky67ba8722019-05-23 15:32:18 +0100278 }
279
David Srbecky078d7ba2018-06-21 15:36:48 +0100280 private:
David Srbeckya38e6cf2018-06-26 18:13:49 +0100281 // Represents all of the bits which were read so far. There is no upper bound.
282 // Therefore, by definition, the "cursor" is always at the end of the region.
283 BitMemoryRegion finished_region_;
David Srbecky078d7ba2018-06-21 15:36:48 +0100284
285 DISALLOW_COPY_AND_ASSIGN(BitMemoryReader);
286};
287
288template<typename Vector>
289class BitMemoryWriter {
290 public:
291 explicit BitMemoryWriter(Vector* out, size_t bit_offset = 0)
David Srbeckyd1606412018-07-31 15:05:14 +0100292 : out_(out), bit_start_(bit_offset), bit_offset_(bit_offset) {
293 DCHECK_EQ(NumberOfWrittenBits(), 0u);
294 }
295
296 BitMemoryRegion GetWrittenRegion() const {
297 return BitMemoryRegion(out_->data(), bit_start_, bit_offset_ - bit_start_);
David Srbeckya38e6cf2018-06-26 18:13:49 +0100298 }
299
300 const uint8_t* data() const { return out_->data(); }
David Srbecky078d7ba2018-06-21 15:36:48 +0100301
David Srbeckyd1606412018-07-31 15:05:14 +0100302 size_t NumberOfWrittenBits() const { return bit_offset_ - bit_start_; }
David Srbecky078d7ba2018-06-21 15:36:48 +0100303
304 ALWAYS_INLINE BitMemoryRegion Allocate(size_t bit_length) {
305 out_->resize(BitsToBytesRoundUp(bit_offset_ + bit_length));
David Srbeckyd1606412018-07-31 15:05:14 +0100306 BitMemoryRegion region(out_->data(), bit_offset_, bit_length);
David Srbeckyb73323c2018-07-15 23:58:44 +0100307 DCHECK_LE(bit_length, std::numeric_limits<size_t>::max() - bit_offset_) << "Overflow";
David Srbecky078d7ba2018-06-21 15:36:48 +0100308 bit_offset_ += bit_length;
309 return region;
310 }
311
David Srbeckyd1606412018-07-31 15:05:14 +0100312 ALWAYS_INLINE void WriteRegion(const BitMemoryRegion& region) {
313 Allocate(region.size_in_bits()).StoreBits(/* bit_offset */ 0, region, region.size_in_bits());
314 }
315
David Srbecky078d7ba2018-06-21 15:36:48 +0100316 ALWAYS_INLINE void WriteBits(uint32_t value, size_t bit_length) {
David Srbeckyd1606412018-07-31 15:05:14 +0100317 Allocate(bit_length).StoreBits(/* bit_offset */ 0, value, bit_length);
David Srbecky078d7ba2018-06-21 15:36:48 +0100318 }
319
David Srbeckyb73323c2018-07-15 23:58:44 +0100320 ALWAYS_INLINE void WriteBit(bool value) {
David Srbeckyd1606412018-07-31 15:05:14 +0100321 Allocate(1).StoreBit(/* bit_offset */ 0, value);
David Srbeckyb73323c2018-07-15 23:58:44 +0100322 }
323
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000324 template<size_t N>
325 ALWAYS_INLINE void WriteInterleavedVarints(std::array<uint32_t, N> values) {
326 // Write small values (or the number of bytes needed for the large values).
327 for (uint32_t value : values) {
328 if (value > kVarintMax) {
329 WriteBits(kVarintMax + BitsToBytesRoundUp(MinimumBitsToStore(value)), kVarintBits);
330 } else {
331 WriteBits(value, kVarintBits);
332 }
Raylin Hsu1b2a49b2019-06-20 01:41:31 +0000333 }
David Srbecky6c4ec5c2019-06-20 07:23:19 +0000334 // Write large values.
335 for (uint32_t value : values) {
336 if (value > kVarintMax) {
337 WriteBits(value, BitsToBytesRoundUp(MinimumBitsToStore(value)) * kBitsPerByte);
338 }
339 }
340 }
341
342 ALWAYS_INLINE void WriteVarint(uint32_t value) {
343 WriteInterleavedVarints<1>({value});
David Srbecky0c3aa312018-08-03 14:52:32 +0100344 }
345
David Srbeckyd1606412018-07-31 15:05:14 +0100346 ALWAYS_INLINE void ByteAlign() {
347 size_t end = bit_start_ + bit_offset_;
348 bit_offset_ += RoundUp(end, kBitsPerByte) - end;
David Srbeckyb73323c2018-07-15 23:58:44 +0100349 }
350
David Srbecky078d7ba2018-06-21 15:36:48 +0100351 private:
352 Vector* out_;
David Srbeckyd1606412018-07-31 15:05:14 +0100353 size_t bit_start_;
David Srbecky078d7ba2018-06-21 15:36:48 +0100354 size_t bit_offset_;
355
356 DISALLOW_COPY_AND_ASSIGN(BitMemoryWriter);
357};
358
Mathieu Chartier12f1b992017-01-19 18:00:45 -0800359} // namespace art
360
David Sehr1ce2b3b2018-04-05 11:02:03 -0700361#endif // ART_LIBARTBASE_BASE_BIT_MEMORY_REGION_H_