Mathieu Chartier | 12f1b99 | 2017-01-19 18:00:45 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2017 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
David Sehr | 1ce2b3b | 2018-04-05 11:02:03 -0700 | [diff] [blame] | 17 | #ifndef ART_LIBARTBASE_BASE_BIT_MEMORY_REGION_H_ |
| 18 | #define ART_LIBARTBASE_BASE_BIT_MEMORY_REGION_H_ |
Mathieu Chartier | 12f1b99 | 2017-01-19 18:00:45 -0800 | [diff] [blame] | 19 | |
| 20 | #include "memory_region.h" |
| 21 | |
David Srbecky | 68fefac | 2018-05-10 17:49:33 +0100 | [diff] [blame] | 22 | #include "bit_utils.h" |
| 23 | #include "memory_tool.h" |
| 24 | |
David Srbecky | 6c4ec5c | 2019-06-20 07:23:19 +0000 | [diff] [blame] | 25 | #include <array> |
| 26 | |
Mathieu Chartier | 12f1b99 | 2017-01-19 18:00:45 -0800 | [diff] [blame] | 27 | namespace art { |
| 28 | |
| 29 | // Bit memory region is a bit offset subregion of a normal memoryregion. This is useful for |
| 30 | // abstracting away the bit start offset to avoid needing passing as an argument everywhere. |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 31 | class BitMemoryRegion final : public ValueObject { |
Mathieu Chartier | 12f1b99 | 2017-01-19 18:00:45 -0800 | [diff] [blame] | 32 | public: |
David Srbecky | b73323c | 2018-07-15 23:58:44 +0100 | [diff] [blame] | 33 | struct Less { |
David Srbecky | d160641 | 2018-07-31 15:05:14 +0100 | [diff] [blame] | 34 | bool operator()(const BitMemoryRegion& lhs, const BitMemoryRegion& rhs) const { |
| 35 | return Compare(lhs, rhs) < 0; |
David Srbecky | b73323c | 2018-07-15 23:58:44 +0100 | [diff] [blame] | 36 | } |
| 37 | }; |
| 38 | |
Mathieu Chartier | 12f1b99 | 2017-01-19 18:00:45 -0800 | [diff] [blame] | 39 | BitMemoryRegion() = default; |
David Srbecky | d160641 | 2018-07-31 15:05:14 +0100 | [diff] [blame] | 40 | ALWAYS_INLINE BitMemoryRegion(uint8_t* data, ssize_t bit_start, size_t bit_size) { |
| 41 | // Normalize the data pointer. Note that bit_start may be negative. |
David Srbecky | 6c4ec5c | 2019-06-20 07:23:19 +0000 | [diff] [blame] | 42 | data_ = AlignDown(data + (bit_start >> kBitsPerByteLog2), kPageSize); |
| 43 | bit_start_ = bit_start + kBitsPerByte * (data - data_); |
David Srbecky | d160641 | 2018-07-31 15:05:14 +0100 | [diff] [blame] | 44 | bit_size_ = bit_size; |
David Srbecky | b73323c | 2018-07-15 23:58:44 +0100 | [diff] [blame] | 45 | } |
David Srbecky | 68fefac | 2018-05-10 17:49:33 +0100 | [diff] [blame] | 46 | ALWAYS_INLINE explicit BitMemoryRegion(MemoryRegion region) |
David Srbecky | b73323c | 2018-07-15 23:58:44 +0100 | [diff] [blame] | 47 | : BitMemoryRegion(region.begin(), /* bit_start */ 0, region.size_in_bits()) { |
David Srbecky | 68fefac | 2018-05-10 17:49:33 +0100 | [diff] [blame] | 48 | } |
| 49 | ALWAYS_INLINE BitMemoryRegion(MemoryRegion region, size_t bit_offset, size_t bit_length) |
| 50 | : BitMemoryRegion(region) { |
David Srbecky | d160641 | 2018-07-31 15:05:14 +0100 | [diff] [blame] | 51 | *this = Subregion(bit_offset, bit_length); |
Mathieu Chartier | 12f1b99 | 2017-01-19 18:00:45 -0800 | [diff] [blame] | 52 | } |
| 53 | |
David Srbecky | 68fefac | 2018-05-10 17:49:33 +0100 | [diff] [blame] | 54 | ALWAYS_INLINE bool IsValid() const { return data_ != nullptr; } |
| 55 | |
David Srbecky | 42deda8 | 2018-08-10 11:23:27 +0100 | [diff] [blame] | 56 | const uint8_t* data() const { |
| 57 | DCHECK_ALIGNED(bit_start_, kBitsPerByte); |
David Srbecky | 6c4ec5c | 2019-06-20 07:23:19 +0000 | [diff] [blame] | 58 | return data_ + bit_start_ / kBitsPerByte; |
David Srbecky | 42deda8 | 2018-08-10 11:23:27 +0100 | [diff] [blame] | 59 | } |
| 60 | |
Mathieu Chartier | 12f1b99 | 2017-01-19 18:00:45 -0800 | [diff] [blame] | 61 | size_t size_in_bits() const { |
David Srbecky | 68fefac | 2018-05-10 17:49:33 +0100 | [diff] [blame] | 62 | return bit_size_; |
Mathieu Chartier | 12f1b99 | 2017-01-19 18:00:45 -0800 | [diff] [blame] | 63 | } |
| 64 | |
David Srbecky | d160641 | 2018-07-31 15:05:14 +0100 | [diff] [blame] | 65 | void Resize(size_t bit_size) { |
| 66 | bit_size_ = bit_size; |
| 67 | } |
| 68 | |
David Srbecky | 68fefac | 2018-05-10 17:49:33 +0100 | [diff] [blame] | 69 | ALWAYS_INLINE BitMemoryRegion Subregion(size_t bit_offset, size_t bit_length) const { |
| 70 | DCHECK_LE(bit_offset, bit_size_); |
| 71 | DCHECK_LE(bit_length, bit_size_ - bit_offset); |
| 72 | BitMemoryRegion result = *this; |
| 73 | result.bit_start_ += bit_offset; |
| 74 | result.bit_size_ = bit_length; |
| 75 | return result; |
Mathieu Chartier | 575d3e6 | 2017-02-06 11:00:40 -0800 | [diff] [blame] | 76 | } |
| 77 | |
David Srbecky | d160641 | 2018-07-31 15:05:14 +0100 | [diff] [blame] | 78 | ALWAYS_INLINE BitMemoryRegion Subregion(size_t bit_offset) const { |
| 79 | DCHECK_LE(bit_offset, bit_size_); |
David Srbecky | a38e6cf | 2018-06-26 18:13:49 +0100 | [diff] [blame] | 80 | BitMemoryRegion result = *this; |
David Srbecky | d160641 | 2018-07-31 15:05:14 +0100 | [diff] [blame] | 81 | result.bit_start_ += bit_offset; |
| 82 | result.bit_size_ -= bit_offset; |
David Srbecky | a38e6cf | 2018-06-26 18:13:49 +0100 | [diff] [blame] | 83 | return result; |
| 84 | } |
| 85 | |
Mathieu Chartier | 12f1b99 | 2017-01-19 18:00:45 -0800 | [diff] [blame] | 86 | // Load a single bit in the region. The bit at offset 0 is the least |
| 87 | // significant bit in the first byte. |
Vladimir Marko | 718d86d | 2018-10-03 11:24:47 +0100 | [diff] [blame] | 88 | ALWAYS_INLINE bool LoadBit(size_t bit_offset) const { |
David Srbecky | 68fefac | 2018-05-10 17:49:33 +0100 | [diff] [blame] | 89 | DCHECK_LT(bit_offset, bit_size_); |
Vladimir Marko | 718d86d | 2018-10-03 11:24:47 +0100 | [diff] [blame] | 90 | size_t index = (bit_start_ + bit_offset) / kBitsPerByte; |
| 91 | size_t shift = (bit_start_ + bit_offset) % kBitsPerByte; |
David Srbecky | 6c4ec5c | 2019-06-20 07:23:19 +0000 | [diff] [blame] | 92 | return ((data_[index] >> shift) & 1) != 0; |
Mathieu Chartier | 12f1b99 | 2017-01-19 18:00:45 -0800 | [diff] [blame] | 93 | } |
| 94 | |
Vladimir Marko | 718d86d | 2018-10-03 11:24:47 +0100 | [diff] [blame] | 95 | ALWAYS_INLINE void StoreBit(size_t bit_offset, bool value) { |
David Srbecky | 68fefac | 2018-05-10 17:49:33 +0100 | [diff] [blame] | 96 | DCHECK_LT(bit_offset, bit_size_); |
David Srbecky | 68fefac | 2018-05-10 17:49:33 +0100 | [diff] [blame] | 97 | size_t index = (bit_start_ + bit_offset) / kBitsPerByte; |
| 98 | size_t shift = (bit_start_ + bit_offset) % kBitsPerByte; |
David Srbecky | 6c4ec5c | 2019-06-20 07:23:19 +0000 | [diff] [blame] | 99 | data_[index] &= ~(1 << shift); // Clear bit. |
| 100 | data_[index] |= (value ? 1 : 0) << shift; // Set bit. |
David Srbecky | 68fefac | 2018-05-10 17:49:33 +0100 | [diff] [blame] | 101 | DCHECK_EQ(value, LoadBit(bit_offset)); |
Mathieu Chartier | 12f1b99 | 2017-01-19 18:00:45 -0800 | [diff] [blame] | 102 | } |
| 103 | |
David Srbecky | 68fefac | 2018-05-10 17:49:33 +0100 | [diff] [blame] | 104 | // Load `bit_length` bits from `data` starting at given `bit_offset`. |
| 105 | // The least significant bit is stored in the smallest memory offset. |
David Srbecky | 6c4ec5c | 2019-06-20 07:23:19 +0000 | [diff] [blame] | 106 | template<typename Result = size_t> |
David Srbecky | 68fefac | 2018-05-10 17:49:33 +0100 | [diff] [blame] | 107 | ATTRIBUTE_NO_SANITIZE_ADDRESS // We might touch extra bytes due to the alignment. |
David Srbecky | 6c4ec5c | 2019-06-20 07:23:19 +0000 | [diff] [blame] | 108 | ALWAYS_INLINE Result LoadBits(size_t bit_offset, size_t bit_length) const { |
| 109 | static_assert(std::is_integral<Result>::value, "Result must be integral"); |
| 110 | static_assert(std::is_unsigned<Result>::value, "Result must be unsigned"); |
| 111 | DCHECK(IsAligned<sizeof(Result)>(data_)); |
David Srbecky | 68fefac | 2018-05-10 17:49:33 +0100 | [diff] [blame] | 112 | DCHECK_LE(bit_offset, bit_size_); |
| 113 | DCHECK_LE(bit_length, bit_size_ - bit_offset); |
David Srbecky | 6c4ec5c | 2019-06-20 07:23:19 +0000 | [diff] [blame] | 114 | DCHECK_LE(bit_length, BitSizeOf<Result>()); |
David Srbecky | 68fefac | 2018-05-10 17:49:33 +0100 | [diff] [blame] | 115 | if (bit_length == 0) { |
| 116 | return 0; |
| 117 | } |
David Srbecky | 025bba4 | 2019-06-16 22:14:40 +0100 | [diff] [blame] | 118 | // Load naturally-aligned value which contains the least significant bit. |
David Srbecky | 6c4ec5c | 2019-06-20 07:23:19 +0000 | [diff] [blame] | 119 | Result* data = reinterpret_cast<Result*>(data_); |
| 120 | size_t width = BitSizeOf<Result>(); |
David Srbecky | 6c4ec5c | 2019-06-20 07:23:19 +0000 | [diff] [blame] | 121 | size_t index = (bit_start_ + bit_offset) / width; |
| 122 | size_t shift = (bit_start_ + bit_offset) % width; |
| 123 | Result value = data[index] >> shift; |
David Srbecky | 025bba4 | 2019-06-16 22:14:40 +0100 | [diff] [blame] | 124 | // Load extra value containing the most significant bit (it might be the same one). |
| 125 | // We can not just load the following value as that could potentially cause SIGSEGV. |
| 126 | Result extra = data[index + (shift + (bit_length - 1)) / width]; |
| 127 | // Mask to clear unwanted bits (the 1s are needed to avoid avoid undefined shift). |
| 128 | Result clear = (std::numeric_limits<Result>::max() << 1) << (bit_length - 1); |
| 129 | // Prepend the extra value. We add explicit '& (width - 1)' so that the shift is defined. |
| 130 | // It is a no-op for `shift != 0` and if `shift == 0` then `value == extra` because of |
| 131 | // bit_length <= width causing the `value` and `extra` to be read from the same location. |
| 132 | // The '& (width - 1)' is implied by the shift instruction on ARM and removed by compiler. |
| 133 | return (value | (extra << ((width - shift) & (width - 1)))) & ~clear; |
Mathieu Chartier | 12f1b99 | 2017-01-19 18:00:45 -0800 | [diff] [blame] | 134 | } |
| 135 | |
David Srbecky | 68fefac | 2018-05-10 17:49:33 +0100 | [diff] [blame] | 136 | // Store `bit_length` bits in `data` starting at given `bit_offset`. |
| 137 | // The least significant bit is stored in the smallest memory offset. |
| 138 | ALWAYS_INLINE void StoreBits(size_t bit_offset, uint32_t value, size_t bit_length) { |
| 139 | DCHECK_LE(bit_offset, bit_size_); |
| 140 | DCHECK_LE(bit_length, bit_size_ - bit_offset); |
| 141 | DCHECK_LE(bit_length, BitSizeOf<uint32_t>()); |
| 142 | DCHECK_LE(value, MaxInt<uint32_t>(bit_length)); |
| 143 | if (bit_length == 0) { |
| 144 | return; |
| 145 | } |
| 146 | // Write data byte by byte to avoid races with other threads |
| 147 | // on bytes that do not overlap with this region. |
David Srbecky | 68fefac | 2018-05-10 17:49:33 +0100 | [diff] [blame] | 148 | uint32_t mask = std::numeric_limits<uint32_t>::max() >> (BitSizeOf<uint32_t>() - bit_length); |
| 149 | size_t index = (bit_start_ + bit_offset) / kBitsPerByte; |
| 150 | size_t shift = (bit_start_ + bit_offset) % kBitsPerByte; |
David Srbecky | 6c4ec5c | 2019-06-20 07:23:19 +0000 | [diff] [blame] | 151 | data_[index] &= ~(mask << shift); // Clear bits. |
| 152 | data_[index] |= (value << shift); // Set bits. |
David Srbecky | 68fefac | 2018-05-10 17:49:33 +0100 | [diff] [blame] | 153 | size_t finished_bits = kBitsPerByte - shift; |
| 154 | for (int i = 1; finished_bits < bit_length; i++, finished_bits += kBitsPerByte) { |
David Srbecky | 6c4ec5c | 2019-06-20 07:23:19 +0000 | [diff] [blame] | 155 | data_[index + i] &= ~(mask >> finished_bits); // Clear bits. |
| 156 | data_[index + i] |= (value >> finished_bits); // Set bits. |
David Srbecky | 68fefac | 2018-05-10 17:49:33 +0100 | [diff] [blame] | 157 | } |
| 158 | DCHECK_EQ(value, LoadBits(bit_offset, bit_length)); |
| 159 | } |
| 160 | |
David Srbecky | 5513c2b | 2018-05-24 15:03:20 +0100 | [diff] [blame] | 161 | // Store bits from other bit region. |
| 162 | ALWAYS_INLINE void StoreBits(size_t bit_offset, const BitMemoryRegion& src, size_t bit_length) { |
| 163 | DCHECK_LE(bit_offset, bit_size_); |
| 164 | DCHECK_LE(bit_length, bit_size_ - bit_offset); |
| 165 | size_t bit = 0; |
| 166 | constexpr size_t kNumBits = BitSizeOf<uint32_t>(); |
| 167 | for (; bit + kNumBits <= bit_length; bit += kNumBits) { |
| 168 | StoreBits(bit_offset + bit, src.LoadBits(bit, kNumBits), kNumBits); |
| 169 | } |
| 170 | size_t num_bits = bit_length - bit; |
| 171 | StoreBits(bit_offset + bit, src.LoadBits(bit, num_bits), num_bits); |
| 172 | } |
| 173 | |
David Srbecky | 6de8833 | 2018-06-03 12:00:11 +0100 | [diff] [blame] | 174 | // Count the number of set bits within the given bit range. |
| 175 | ALWAYS_INLINE size_t PopCount(size_t bit_offset, size_t bit_length) const { |
| 176 | DCHECK_LE(bit_offset, bit_size_); |
| 177 | DCHECK_LE(bit_length, bit_size_ - bit_offset); |
| 178 | size_t count = 0; |
| 179 | size_t bit = 0; |
| 180 | constexpr size_t kNumBits = BitSizeOf<uint32_t>(); |
| 181 | for (; bit + kNumBits <= bit_length; bit += kNumBits) { |
| 182 | count += POPCOUNT(LoadBits(bit_offset + bit, kNumBits)); |
| 183 | } |
| 184 | count += POPCOUNT(LoadBits(bit_offset + bit, bit_length - bit)); |
| 185 | return count; |
| 186 | } |
| 187 | |
David Srbecky | d160641 | 2018-07-31 15:05:14 +0100 | [diff] [blame] | 188 | static int Compare(const BitMemoryRegion& lhs, const BitMemoryRegion& rhs) { |
| 189 | if (lhs.size_in_bits() != rhs.size_in_bits()) { |
| 190 | return (lhs.size_in_bits() < rhs.size_in_bits()) ? -1 : 1; |
| 191 | } |
| 192 | size_t bit = 0; |
| 193 | constexpr size_t kNumBits = BitSizeOf<uint32_t>(); |
| 194 | for (; bit + kNumBits <= lhs.size_in_bits(); bit += kNumBits) { |
| 195 | uint32_t lhs_bits = lhs.LoadBits(bit, kNumBits); |
| 196 | uint32_t rhs_bits = rhs.LoadBits(bit, kNumBits); |
| 197 | if (lhs_bits != rhs_bits) { |
| 198 | return (lhs_bits < rhs_bits) ? -1 : 1; |
| 199 | } |
| 200 | } |
| 201 | size_t num_bits = lhs.size_in_bits() - bit; |
| 202 | uint32_t lhs_bits = lhs.LoadBits(bit, num_bits); |
| 203 | uint32_t rhs_bits = rhs.LoadBits(bit, num_bits); |
| 204 | if (lhs_bits != rhs_bits) { |
| 205 | return (lhs_bits < rhs_bits) ? -1 : 1; |
| 206 | } |
| 207 | return 0; |
Mathieu Chartier | 12f1b99 | 2017-01-19 18:00:45 -0800 | [diff] [blame] | 208 | } |
| 209 | |
| 210 | private: |
David Srbecky | 6c4ec5c | 2019-06-20 07:23:19 +0000 | [diff] [blame] | 211 | uint8_t* data_ = nullptr; // The pointer is page aligned. |
Mathieu Chartier | 12f1b99 | 2017-01-19 18:00:45 -0800 | [diff] [blame] | 212 | size_t bit_start_ = 0; |
David Srbecky | 68fefac | 2018-05-10 17:49:33 +0100 | [diff] [blame] | 213 | size_t bit_size_ = 0; |
Mathieu Chartier | 12f1b99 | 2017-01-19 18:00:45 -0800 | [diff] [blame] | 214 | }; |
| 215 | |
David Srbecky | 6c4ec5c | 2019-06-20 07:23:19 +0000 | [diff] [blame] | 216 | constexpr uint32_t kVarintBits = 4; // Minimum number of bits used for varint. |
| 217 | constexpr uint32_t kVarintMax = 11; // Maximum value which is stored "inline". |
David Srbecky | 0c3aa31 | 2018-08-03 14:52:32 +0100 | [diff] [blame] | 218 | |
David Srbecky | 078d7ba | 2018-06-21 15:36:48 +0100 | [diff] [blame] | 219 | class BitMemoryReader { |
| 220 | public: |
David Srbecky | d160641 | 2018-07-31 15:05:14 +0100 | [diff] [blame] | 221 | BitMemoryReader(BitMemoryReader&&) = default; |
| 222 | explicit BitMemoryReader(BitMemoryRegion data) |
| 223 | : finished_region_(data.Subregion(0, 0) /* set the length to zero */ ) { |
| 224 | } |
| 225 | explicit BitMemoryReader(const uint8_t* data, ssize_t bit_offset = 0) |
| 226 | : finished_region_(const_cast<uint8_t*>(data), bit_offset, /* bit_length */ 0) { |
David Srbecky | a38e6cf | 2018-06-26 18:13:49 +0100 | [diff] [blame] | 227 | } |
David Srbecky | 078d7ba | 2018-06-21 15:36:48 +0100 | [diff] [blame] | 228 | |
David Srbecky | 42deda8 | 2018-08-10 11:23:27 +0100 | [diff] [blame] | 229 | const uint8_t* data() const { return finished_region_.data(); } |
| 230 | |
David Srbecky | d160641 | 2018-07-31 15:05:14 +0100 | [diff] [blame] | 231 | BitMemoryRegion GetReadRegion() const { return finished_region_; } |
David Srbecky | 078d7ba | 2018-06-21 15:36:48 +0100 | [diff] [blame] | 232 | |
David Srbecky | d160641 | 2018-07-31 15:05:14 +0100 | [diff] [blame] | 233 | size_t NumberOfReadBits() const { return finished_region_.size_in_bits(); } |
David Srbecky | 078d7ba | 2018-06-21 15:36:48 +0100 | [diff] [blame] | 234 | |
David Srbecky | d160641 | 2018-07-31 15:05:14 +0100 | [diff] [blame] | 235 | ALWAYS_INLINE BitMemoryRegion ReadRegion(size_t bit_length) { |
| 236 | size_t bit_offset = finished_region_.size_in_bits(); |
| 237 | finished_region_.Resize(bit_offset + bit_length); |
| 238 | return finished_region_.Subregion(bit_offset, bit_length); |
David Srbecky | b73323c | 2018-07-15 23:58:44 +0100 | [diff] [blame] | 239 | } |
| 240 | |
David Srbecky | 6c4ec5c | 2019-06-20 07:23:19 +0000 | [diff] [blame] | 241 | template<typename Result = size_t> |
| 242 | ALWAYS_INLINE Result ReadBits(size_t bit_length) { |
| 243 | return ReadRegion(bit_length).LoadBits<Result>(/* bit_offset */ 0, bit_length); |
David Srbecky | 078d7ba | 2018-06-21 15:36:48 +0100 | [diff] [blame] | 244 | } |
| 245 | |
David Srbecky | b73323c | 2018-07-15 23:58:44 +0100 | [diff] [blame] | 246 | ALWAYS_INLINE bool ReadBit() { |
David Srbecky | d160641 | 2018-07-31 15:05:14 +0100 | [diff] [blame] | 247 | return ReadRegion(/* bit_length */ 1).LoadBit(/* bit_offset */ 0); |
David Srbecky | b73323c | 2018-07-15 23:58:44 +0100 | [diff] [blame] | 248 | } |
| 249 | |
David Srbecky | 0c3aa31 | 2018-08-03 14:52:32 +0100 | [diff] [blame] | 250 | // Read variable-length bit-packed integer. |
| 251 | // The first four bits determine the variable length of the encoded integer: |
| 252 | // Values 0..11 represent the result as-is, with no further following bits. |
| 253 | // Values 12..15 mean the result is in the next 8/16/24/32-bits respectively. |
| 254 | ALWAYS_INLINE uint32_t ReadVarint() { |
David Srbecky | 6c4ec5c | 2019-06-20 07:23:19 +0000 | [diff] [blame] | 255 | uint32_t x = ReadBits(kVarintBits); |
| 256 | return (x <= kVarintMax) ? x : ReadBits((x - kVarintMax) * kBitsPerByte); |
David Srbecky | 0c3aa31 | 2018-08-03 14:52:32 +0100 | [diff] [blame] | 257 | } |
| 258 | |
David Srbecky | 6c4ec5c | 2019-06-20 07:23:19 +0000 | [diff] [blame] | 259 | // Read N 'interleaved' varints (different to just reading consecutive varints). |
| 260 | // All small values are stored first and the large values are stored after them. |
| 261 | // This requires fewer bit-reads compared to indidually storing the varints. |
| 262 | template<size_t N> |
| 263 | ALWAYS_INLINE std::array<uint32_t, N> ReadInterleavedVarints() { |
| 264 | static_assert(N * kVarintBits <= sizeof(uint64_t) * kBitsPerByte, "N too big"); |
| 265 | std::array<uint32_t, N> values; |
| 266 | // StackMap BitTable uses over 8 varints in the header, so we need uint64_t. |
| 267 | uint64_t data = ReadBits<uint64_t>(N * kVarintBits); |
| 268 | for (size_t i = 0; i < N; i++) { |
David Srbecky | 6c0c7c8 | 2019-05-28 22:28:36 +0100 | [diff] [blame] | 269 | values[i] = BitFieldExtract(data, i * kVarintBits, kVarintBits); |
| 270 | } |
| 271 | // Do the second part in its own loop as that seems to produce better code in clang. |
| 272 | for (size_t i = 0; i < N; i++) { |
| 273 | if (UNLIKELY(values[i] > kVarintMax)) { |
| 274 | values[i] = ReadBits((values[i] - kVarintMax) * kBitsPerByte); |
| 275 | } |
David Srbecky | 67ba872 | 2019-05-23 15:32:18 +0100 | [diff] [blame] | 276 | } |
David Srbecky | 6c4ec5c | 2019-06-20 07:23:19 +0000 | [diff] [blame] | 277 | return values; |
David Srbecky | 67ba872 | 2019-05-23 15:32:18 +0100 | [diff] [blame] | 278 | } |
| 279 | |
David Srbecky | 078d7ba | 2018-06-21 15:36:48 +0100 | [diff] [blame] | 280 | private: |
David Srbecky | a38e6cf | 2018-06-26 18:13:49 +0100 | [diff] [blame] | 281 | // Represents all of the bits which were read so far. There is no upper bound. |
| 282 | // Therefore, by definition, the "cursor" is always at the end of the region. |
| 283 | BitMemoryRegion finished_region_; |
David Srbecky | 078d7ba | 2018-06-21 15:36:48 +0100 | [diff] [blame] | 284 | |
| 285 | DISALLOW_COPY_AND_ASSIGN(BitMemoryReader); |
| 286 | }; |
| 287 | |
| 288 | template<typename Vector> |
| 289 | class BitMemoryWriter { |
| 290 | public: |
| 291 | explicit BitMemoryWriter(Vector* out, size_t bit_offset = 0) |
David Srbecky | d160641 | 2018-07-31 15:05:14 +0100 | [diff] [blame] | 292 | : out_(out), bit_start_(bit_offset), bit_offset_(bit_offset) { |
| 293 | DCHECK_EQ(NumberOfWrittenBits(), 0u); |
| 294 | } |
| 295 | |
| 296 | BitMemoryRegion GetWrittenRegion() const { |
| 297 | return BitMemoryRegion(out_->data(), bit_start_, bit_offset_ - bit_start_); |
David Srbecky | a38e6cf | 2018-06-26 18:13:49 +0100 | [diff] [blame] | 298 | } |
| 299 | |
| 300 | const uint8_t* data() const { return out_->data(); } |
David Srbecky | 078d7ba | 2018-06-21 15:36:48 +0100 | [diff] [blame] | 301 | |
David Srbecky | d160641 | 2018-07-31 15:05:14 +0100 | [diff] [blame] | 302 | size_t NumberOfWrittenBits() const { return bit_offset_ - bit_start_; } |
David Srbecky | 078d7ba | 2018-06-21 15:36:48 +0100 | [diff] [blame] | 303 | |
| 304 | ALWAYS_INLINE BitMemoryRegion Allocate(size_t bit_length) { |
| 305 | out_->resize(BitsToBytesRoundUp(bit_offset_ + bit_length)); |
David Srbecky | d160641 | 2018-07-31 15:05:14 +0100 | [diff] [blame] | 306 | BitMemoryRegion region(out_->data(), bit_offset_, bit_length); |
David Srbecky | b73323c | 2018-07-15 23:58:44 +0100 | [diff] [blame] | 307 | DCHECK_LE(bit_length, std::numeric_limits<size_t>::max() - bit_offset_) << "Overflow"; |
David Srbecky | 078d7ba | 2018-06-21 15:36:48 +0100 | [diff] [blame] | 308 | bit_offset_ += bit_length; |
| 309 | return region; |
| 310 | } |
| 311 | |
David Srbecky | d160641 | 2018-07-31 15:05:14 +0100 | [diff] [blame] | 312 | ALWAYS_INLINE void WriteRegion(const BitMemoryRegion& region) { |
| 313 | Allocate(region.size_in_bits()).StoreBits(/* bit_offset */ 0, region, region.size_in_bits()); |
| 314 | } |
| 315 | |
David Srbecky | 078d7ba | 2018-06-21 15:36:48 +0100 | [diff] [blame] | 316 | ALWAYS_INLINE void WriteBits(uint32_t value, size_t bit_length) { |
David Srbecky | d160641 | 2018-07-31 15:05:14 +0100 | [diff] [blame] | 317 | Allocate(bit_length).StoreBits(/* bit_offset */ 0, value, bit_length); |
David Srbecky | 078d7ba | 2018-06-21 15:36:48 +0100 | [diff] [blame] | 318 | } |
| 319 | |
David Srbecky | b73323c | 2018-07-15 23:58:44 +0100 | [diff] [blame] | 320 | ALWAYS_INLINE void WriteBit(bool value) { |
David Srbecky | d160641 | 2018-07-31 15:05:14 +0100 | [diff] [blame] | 321 | Allocate(1).StoreBit(/* bit_offset */ 0, value); |
David Srbecky | b73323c | 2018-07-15 23:58:44 +0100 | [diff] [blame] | 322 | } |
| 323 | |
David Srbecky | 6c4ec5c | 2019-06-20 07:23:19 +0000 | [diff] [blame] | 324 | template<size_t N> |
| 325 | ALWAYS_INLINE void WriteInterleavedVarints(std::array<uint32_t, N> values) { |
| 326 | // Write small values (or the number of bytes needed for the large values). |
| 327 | for (uint32_t value : values) { |
| 328 | if (value > kVarintMax) { |
| 329 | WriteBits(kVarintMax + BitsToBytesRoundUp(MinimumBitsToStore(value)), kVarintBits); |
| 330 | } else { |
| 331 | WriteBits(value, kVarintBits); |
| 332 | } |
Raylin Hsu | 1b2a49b | 2019-06-20 01:41:31 +0000 | [diff] [blame] | 333 | } |
David Srbecky | 6c4ec5c | 2019-06-20 07:23:19 +0000 | [diff] [blame] | 334 | // Write large values. |
| 335 | for (uint32_t value : values) { |
| 336 | if (value > kVarintMax) { |
| 337 | WriteBits(value, BitsToBytesRoundUp(MinimumBitsToStore(value)) * kBitsPerByte); |
| 338 | } |
| 339 | } |
| 340 | } |
| 341 | |
| 342 | ALWAYS_INLINE void WriteVarint(uint32_t value) { |
| 343 | WriteInterleavedVarints<1>({value}); |
David Srbecky | 0c3aa31 | 2018-08-03 14:52:32 +0100 | [diff] [blame] | 344 | } |
| 345 | |
David Srbecky | d160641 | 2018-07-31 15:05:14 +0100 | [diff] [blame] | 346 | ALWAYS_INLINE void ByteAlign() { |
| 347 | size_t end = bit_start_ + bit_offset_; |
| 348 | bit_offset_ += RoundUp(end, kBitsPerByte) - end; |
David Srbecky | b73323c | 2018-07-15 23:58:44 +0100 | [diff] [blame] | 349 | } |
| 350 | |
David Srbecky | 078d7ba | 2018-06-21 15:36:48 +0100 | [diff] [blame] | 351 | private: |
| 352 | Vector* out_; |
David Srbecky | d160641 | 2018-07-31 15:05:14 +0100 | [diff] [blame] | 353 | size_t bit_start_; |
David Srbecky | 078d7ba | 2018-06-21 15:36:48 +0100 | [diff] [blame] | 354 | size_t bit_offset_; |
| 355 | |
| 356 | DISALLOW_COPY_AND_ASSIGN(BitMemoryWriter); |
| 357 | }; |
| 358 | |
Mathieu Chartier | 12f1b99 | 2017-01-19 18:00:45 -0800 | [diff] [blame] | 359 | } // namespace art |
| 360 | |
David Sehr | 1ce2b3b | 2018-04-05 11:02:03 -0700 | [diff] [blame] | 361 | #endif // ART_LIBARTBASE_BASE_BIT_MEMORY_REGION_H_ |