Rewrite TypeLookupTable.
Improve bit-packing of the data to store twice as many bits
of the hash as previously. Check for bucket mismatch after
the first partial hash conflict (previous comments alluded
to the bucket check but it was not implemented).
Avoid an unnecessary unique_ptr<> indirection by making the
TypeLookupTable moveable.
Test: Rely on Treehugger.
Bug: 79514364
Change-Id: I9fa6f712b037a6e6904d09c88670966486f56621
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index 4046dc1..9951668 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -4032,13 +4032,13 @@
// TypeLookupTable allocates its own and OatDexFile takes ownership.
const DexFile& dex_file = *opened_dex_files[i];
{
- std::unique_ptr<TypeLookupTable> type_lookup_table =
- TypeLookupTable::Create(dex_file, /* storage */ nullptr);
+ TypeLookupTable type_lookup_table = TypeLookupTable::Create(dex_file);
type_lookup_table_oat_dex_files_.push_back(
std::make_unique<art::OatDexFile>(std::move(type_lookup_table)));
dex_file.SetOatDexFile(type_lookup_table_oat_dex_files_.back().get());
}
- TypeLookupTable* const table = type_lookup_table_oat_dex_files_.back()->GetTypeLookupTable();
+ const TypeLookupTable& table = type_lookup_table_oat_dex_files_.back()->GetTypeLookupTable();
+ DCHECK(table.Valid());
// Type tables are required to be 4 byte aligned.
size_t initial_offset = oat_size_;
@@ -4057,9 +4057,9 @@
DCHECK_EQ(oat_data_offset_ + rodata_offset,
static_cast<size_t>(oat_rodata->Seek(0u, kSeekCurrent)));
- DCHECK_EQ(table_size, table->RawDataLength());
+ DCHECK_EQ(table_size, table.RawDataLength());
- if (!oat_rodata->WriteFully(table->RawData(), table_size)) {
+ if (!oat_rodata->WriteFully(table.RawData(), table_size)) {
PLOG(ERROR) << "Failed to write lookup table."
<< " File: " << oat_dex_file->GetLocation()
<< " Output: " << oat_rodata->GetLocation();