Merge "ART: Add DEX support for MethodHandle and CallSite info."
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index e525808..5c49f19 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -93,7 +93,7 @@
ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod StaticLeafMethods ProfileTestMultiDex
ART_GTEST_dex_cache_test_DEX_DEPS := Main Packages MethodTypes
ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Main Nested
-ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Statics
+ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Statics VerifierDeps
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
ART_GTEST_image_test_DEX_DEPS := ImageLayoutA ImageLayoutB
ART_GTEST_imtable_test_DEX_DEPS := IMTA IMTB
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index f8e01b7..1bcc8e1 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -38,19 +38,14 @@
current_entry_.native_pc_code_offset = CodeOffset::FromOffset(native_pc_offset, instruction_set_);
current_entry_.register_mask = register_mask;
current_entry_.sp_mask = sp_mask;
- current_entry_.num_dex_registers = num_dex_registers;
current_entry_.inlining_depth = inlining_depth;
- current_entry_.dex_register_locations_start_index = dex_register_locations_.size();
current_entry_.inline_infos_start_index = inline_infos_.size();
- current_entry_.dex_register_map_hash = 0;
- current_entry_.same_dex_register_map_as_ = kNoSameDexMapFound;
current_entry_.stack_mask_index = 0;
- if (num_dex_registers != 0) {
- current_entry_.live_dex_registers_mask =
- ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream);
- } else {
- current_entry_.live_dex_registers_mask = nullptr;
- }
+ current_entry_.dex_register_entry.num_dex_registers = num_dex_registers;
+ current_entry_.dex_register_entry.locations_start_index = dex_register_locations_.size();
+ current_entry_.dex_register_entry.live_dex_registers_mask = (num_dex_registers != 0)
+ ? ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream)
+ : nullptr;
if (sp_mask != nullptr) {
stack_mask_max_ = std::max(stack_mask_max_, sp_mask->GetHighestBitSet());
@@ -65,7 +60,7 @@
}
void StackMapStream::EndStackMapEntry() {
- current_entry_.same_dex_register_map_as_ = FindEntryWithTheSameDexMap();
+ current_entry_.dex_register_map_index = AddDexRegisterMapEntry(current_entry_.dex_register_entry);
stack_maps_.push_back(current_entry_);
current_entry_ = StackMapEntry();
}
@@ -91,19 +86,15 @@
dex_register_locations_.push_back(index);
location_catalog_entries_indices_.Insert(std::make_pair(location, index));
}
-
- if (in_inline_frame_) {
- // TODO: Support sharing DexRegisterMap across InlineInfo.
- DCHECK_LT(current_dex_register_, current_inline_info_.num_dex_registers);
- current_inline_info_.live_dex_registers_mask->SetBit(current_dex_register_);
- } else {
- DCHECK_LT(current_dex_register_, current_entry_.num_dex_registers);
- current_entry_.live_dex_registers_mask->SetBit(current_dex_register_);
- current_entry_.dex_register_map_hash += (1 <<
- (current_dex_register_ % (sizeof(current_entry_.dex_register_map_hash) * kBitsPerByte)));
- current_entry_.dex_register_map_hash += static_cast<uint32_t>(value);
- current_entry_.dex_register_map_hash += static_cast<uint32_t>(kind);
- }
+ DexRegisterMapEntry* const entry = in_inline_frame_
+ ? ¤t_inline_info_.dex_register_entry
+ : ¤t_entry_.dex_register_entry;
+ DCHECK_LT(current_dex_register_, entry->num_dex_registers);
+ entry->live_dex_registers_mask->SetBit(current_dex_register_);
+ entry->hash += (1 <<
+ (current_dex_register_ % (sizeof(DexRegisterMapEntry::hash) * kBitsPerByte)));
+ entry->hash += static_cast<uint32_t>(value);
+ entry->hash += static_cast<uint32_t>(kind);
}
current_dex_register_++;
}
@@ -124,20 +115,19 @@
current_inline_info_.method_index = method->GetDexMethodIndexUnchecked();
}
current_inline_info_.dex_pc = dex_pc;
- current_inline_info_.num_dex_registers = num_dex_registers;
- current_inline_info_.dex_register_locations_start_index = dex_register_locations_.size();
- if (num_dex_registers != 0) {
- current_inline_info_.live_dex_registers_mask =
- ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream);
- } else {
- current_inline_info_.live_dex_registers_mask = nullptr;
- }
+ current_inline_info_.dex_register_entry.num_dex_registers = num_dex_registers;
+ current_inline_info_.dex_register_entry.locations_start_index = dex_register_locations_.size();
+ current_inline_info_.dex_register_entry.live_dex_registers_mask = (num_dex_registers != 0)
+ ? ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream)
+ : nullptr;
current_dex_register_ = 0;
}
void StackMapStream::EndInlineInfoEntry() {
+ current_inline_info_.dex_register_map_index =
+ AddDexRegisterMapEntry(current_inline_info_.dex_register_entry);
DCHECK(in_inline_frame_);
- DCHECK_EQ(current_dex_register_, current_inline_info_.num_dex_registers)
+ DCHECK_EQ(current_dex_register_, current_inline_info_.dex_register_entry.num_dex_registers)
<< "Inline information contains less registers than expected";
in_inline_frame_ = false;
inline_infos_.push_back(current_inline_info_);
@@ -193,8 +183,7 @@
return size;
}
-size_t StackMapStream::ComputeDexRegisterMapSize(uint32_t num_dex_registers,
- const BitVector* live_dex_registers_mask) const {
+size_t StackMapStream::DexRegisterMapEntry::ComputeSize(size_t catalog_size) const {
// For num_dex_registers == 0u live_dex_registers_mask may be null.
if (num_dex_registers == 0u) {
return 0u; // No register map will be emitted.
@@ -208,8 +197,7 @@
// Compute the size of the set of live Dex register entries.
size_t number_of_live_dex_registers = live_dex_registers_mask->NumSetBits();
size_t map_entries_size_in_bits =
- DexRegisterMap::SingleEntrySizeInBits(location_catalog_entries_.size())
- * number_of_live_dex_registers;
+ DexRegisterMap::SingleEntrySizeInBits(catalog_size) * number_of_live_dex_registers;
size_t map_entries_size_in_bytes =
RoundUp(map_entries_size_in_bits, kBitsPerByte) / kBitsPerByte;
size += map_entries_size_in_bytes;
@@ -218,18 +206,8 @@
size_t StackMapStream::ComputeDexRegisterMapsSize() const {
size_t size = 0;
- size_t inline_info_index = 0;
- for (const StackMapEntry& entry : stack_maps_) {
- if (entry.same_dex_register_map_as_ == kNoSameDexMapFound) {
- size += ComputeDexRegisterMapSize(entry.num_dex_registers, entry.live_dex_registers_mask);
- } else {
- // Entries with the same dex map will have the same offset.
- }
- for (size_t j = 0; j < entry.inlining_depth; ++j) {
- InlineInfoEntry inline_entry = inline_infos_[inline_info_index++];
- size += ComputeDexRegisterMapSize(inline_entry.num_dex_registers,
- inline_entry.live_dex_registers_mask);
- }
+ for (const DexRegisterMapEntry& entry : dex_register_entries_) {
+ size += entry.ComputeSize(location_catalog_entries_.size());
}
return size;
}
@@ -264,6 +242,30 @@
encoding->SetFromSizes(method_index_max, dex_pc_max, extra_data_max, dex_register_maps_bytes);
}
+size_t StackMapStream::MaybeCopyDexRegisterMap(DexRegisterMapEntry& entry,
+ size_t* current_offset,
+ MemoryRegion dex_register_locations_region) {
+ DCHECK(current_offset != nullptr);
+ if ((entry.num_dex_registers == 0) || (entry.live_dex_registers_mask->NumSetBits() == 0)) {
+ // No dex register map needed.
+ return StackMap::kNoDexRegisterMap;
+ }
+ if (entry.offset == DexRegisterMapEntry::kOffsetUnassigned) {
+ // Not already copied, need to copy and and assign an offset.
+ entry.offset = *current_offset;
+ const size_t entry_size = entry.ComputeSize(location_catalog_entries_.size());
+ DexRegisterMap dex_register_map(
+ dex_register_locations_region.Subregion(entry.offset, entry_size));
+ *current_offset += entry_size;
+ // Fill in the map since it was just added.
+ FillInDexRegisterMap(dex_register_map,
+ entry.num_dex_registers,
+ *entry.live_dex_registers_mask,
+ entry.locations_start_index);
+ }
+ return entry.offset;
+}
+
void StackMapStream::FillIn(MemoryRegion region) {
DCHECK_EQ(0u, current_entry_.dex_pc) << "EndStackMapEntry not called after BeginStackMapEntry";
DCHECK_NE(0u, needed_size_) << "PrepareForFillIn not called before FillIn";
@@ -311,35 +313,10 @@
stack_map.SetRegisterMaskIndex(encoding.stack_map.encoding, entry.register_mask_index);
stack_map.SetStackMaskIndex(encoding.stack_map.encoding, entry.stack_mask_index);
- if (entry.num_dex_registers == 0 || (entry.live_dex_registers_mask->NumSetBits() == 0)) {
- // No dex map available.
- stack_map.SetDexRegisterMapOffset(encoding.stack_map.encoding, StackMap::kNoDexRegisterMap);
- } else {
- // Search for an entry with the same dex map.
- if (entry.same_dex_register_map_as_ != kNoSameDexMapFound) {
- // If we have a hit reuse the offset.
- stack_map.SetDexRegisterMapOffset(
- encoding.stack_map.encoding,
- code_info.GetStackMapAt(entry.same_dex_register_map_as_, encoding)
- .GetDexRegisterMapOffset(encoding.stack_map.encoding));
- } else {
- // New dex registers maps should be added to the stack map.
- MemoryRegion register_region = dex_register_locations_region.Subregion(
- next_dex_register_map_offset,
- ComputeDexRegisterMapSize(entry.num_dex_registers, entry.live_dex_registers_mask));
- next_dex_register_map_offset += register_region.size();
- DexRegisterMap dex_register_map(register_region);
- stack_map.SetDexRegisterMapOffset(
- encoding.stack_map.encoding,
- register_region.begin() - dex_register_locations_region.begin());
-
- // Set the dex register location.
- FillInDexRegisterMap(dex_register_map,
- entry.num_dex_registers,
- *entry.live_dex_registers_mask,
- entry.dex_register_locations_start_index);
- }
- }
+ size_t offset = MaybeCopyDexRegisterMap(dex_register_entries_[entry.dex_register_map_index],
+ &next_dex_register_map_offset,
+ dex_register_locations_region);
+ stack_map.SetDexRegisterMapOffset(encoding.stack_map.encoding, offset);
// Set the inlining info.
if (entry.inlining_depth != 0) {
@@ -371,29 +348,13 @@
inline_info.SetExtraDataAtDepth(encoding.inline_info.encoding, depth, 1);
}
inline_info.SetDexPcAtDepth(encoding.inline_info.encoding, depth, inline_entry.dex_pc);
- if (inline_entry.num_dex_registers == 0) {
- // No dex map available.
- inline_info.SetDexRegisterMapOffsetAtDepth(encoding.inline_info.encoding,
- depth,
- StackMap::kNoDexRegisterMap);
- DCHECK(inline_entry.live_dex_registers_mask == nullptr);
- } else {
- MemoryRegion register_region = dex_register_locations_region.Subregion(
- next_dex_register_map_offset,
- ComputeDexRegisterMapSize(inline_entry.num_dex_registers,
- inline_entry.live_dex_registers_mask));
- next_dex_register_map_offset += register_region.size();
- DexRegisterMap dex_register_map(register_region);
- inline_info.SetDexRegisterMapOffsetAtDepth(
- encoding.inline_info.encoding,
- depth,
- register_region.begin() - dex_register_locations_region.begin());
-
- FillInDexRegisterMap(dex_register_map,
- inline_entry.num_dex_registers,
- *inline_entry.live_dex_registers_mask,
- inline_entry.dex_register_locations_start_index);
- }
+ size_t dex_register_map_offset = MaybeCopyDexRegisterMap(
+ dex_register_entries_[inline_entry.dex_register_map_index],
+ &next_dex_register_map_offset,
+ dex_register_locations_region);
+ inline_info.SetDexRegisterMapOffsetAtDepth(encoding.inline_info.encoding,
+ depth,
+ dex_register_map_offset);
}
} else if (encoding.stack_map.encoding.GetInlineInfoEncoding().BitSize() > 0) {
stack_map.SetInlineInfoIndex(encoding.stack_map.encoding, StackMap::kNoInlineInfo);
@@ -448,34 +409,31 @@
}
}
-size_t StackMapStream::FindEntryWithTheSameDexMap() {
- size_t current_entry_index = stack_maps_.size();
- auto entries_it = dex_map_hash_to_stack_map_indices_.find(current_entry_.dex_register_map_hash);
+size_t StackMapStream::AddDexRegisterMapEntry(const DexRegisterMapEntry& entry) {
+ const size_t current_entry_index = dex_register_entries_.size();
+ auto entries_it = dex_map_hash_to_stack_map_indices_.find(entry.hash);
if (entries_it == dex_map_hash_to_stack_map_indices_.end()) {
// We don't have a perfect hash functions so we need a list to collect all stack maps
// which might have the same dex register map.
ArenaVector<uint32_t> stack_map_indices(allocator_->Adapter(kArenaAllocStackMapStream));
stack_map_indices.push_back(current_entry_index);
- dex_map_hash_to_stack_map_indices_.Put(current_entry_.dex_register_map_hash,
- std::move(stack_map_indices));
- return kNoSameDexMapFound;
- }
-
- // We might have collisions, so we need to check whether or not we really have a match.
- for (uint32_t test_entry_index : entries_it->second) {
- if (HaveTheSameDexMaps(GetStackMap(test_entry_index), current_entry_)) {
- return test_entry_index;
+ dex_map_hash_to_stack_map_indices_.Put(entry.hash, std::move(stack_map_indices));
+ } else {
+ // We might have collisions, so we need to check whether or not we really have a match.
+ for (uint32_t test_entry_index : entries_it->second) {
+ if (DexRegisterMapEntryEquals(dex_register_entries_[test_entry_index], entry)) {
+ return test_entry_index;
+ }
}
+ entries_it->second.push_back(current_entry_index);
}
- entries_it->second.push_back(current_entry_index);
- return kNoSameDexMapFound;
+ dex_register_entries_.push_back(entry);
+ return current_entry_index;
}
-bool StackMapStream::HaveTheSameDexMaps(const StackMapEntry& a, const StackMapEntry& b) const {
- if (a.live_dex_registers_mask == nullptr && b.live_dex_registers_mask == nullptr) {
- return true;
- }
- if (a.live_dex_registers_mask == nullptr || b.live_dex_registers_mask == nullptr) {
+bool StackMapStream::DexRegisterMapEntryEquals(const DexRegisterMapEntry& a,
+ const DexRegisterMapEntry& b) const {
+ if ((a.live_dex_registers_mask == nullptr) != (b.live_dex_registers_mask == nullptr)) {
return false;
}
if (a.num_dex_registers != b.num_dex_registers) {
@@ -489,12 +447,12 @@
}
size_t number_of_live_dex_registers = a.live_dex_registers_mask->NumSetBits();
DCHECK_LE(number_of_live_dex_registers, dex_register_locations_.size());
- DCHECK_LE(a.dex_register_locations_start_index,
+ DCHECK_LE(a.locations_start_index,
dex_register_locations_.size() - number_of_live_dex_registers);
- DCHECK_LE(b.dex_register_locations_start_index,
+ DCHECK_LE(b.locations_start_index,
dex_register_locations_.size() - number_of_live_dex_registers);
- auto a_begin = dex_register_locations_.begin() + a.dex_register_locations_start_index;
- auto b_begin = dex_register_locations_.begin() + b.dex_register_locations_start_index;
+ auto a_begin = dex_register_locations_.begin() + a.locations_start_index;
+ auto b_begin = dex_register_locations_.begin() + b.locations_start_index;
if (!std::equal(a_begin, a_begin + number_of_live_dex_registers, b_begin)) {
return false;
}
@@ -597,10 +555,10 @@
CheckDexRegisterMap(code_info,
code_info.GetDexRegisterMapOf(
- stack_map, encoding, entry.num_dex_registers),
- entry.num_dex_registers,
- entry.live_dex_registers_mask,
- entry.dex_register_locations_start_index);
+ stack_map, encoding, entry.dex_register_entry.num_dex_registers),
+ entry.dex_register_entry.num_dex_registers,
+ entry.dex_register_entry.live_dex_registers_mask,
+ entry.dex_register_entry.locations_start_index);
// Check inline info.
DCHECK_EQ(stack_map.HasInlineInfo(stack_map_encoding), (entry.inlining_depth != 0));
@@ -623,10 +581,13 @@
CheckDexRegisterMap(code_info,
code_info.GetDexRegisterMapAtDepth(
- d, inline_info, encoding, inline_entry.num_dex_registers),
- inline_entry.num_dex_registers,
- inline_entry.live_dex_registers_mask,
- inline_entry.dex_register_locations_start_index);
+ d,
+ inline_info,
+ encoding,
+ inline_entry.dex_register_entry.num_dex_registers),
+ inline_entry.dex_register_entry.num_dex_registers,
+ inline_entry.dex_register_entry.live_dex_registers_mask,
+ inline_entry.dex_register_entry.locations_start_index);
}
}
}
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 08c1d3e..bba3d51 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -70,6 +70,7 @@
inline_infos_(allocator->Adapter(kArenaAllocStackMapStream)),
stack_masks_(allocator->Adapter(kArenaAllocStackMapStream)),
register_masks_(allocator->Adapter(kArenaAllocStackMapStream)),
+ dex_register_entries_(allocator->Adapter(kArenaAllocStackMapStream)),
stack_mask_max_(-1),
dex_pc_max_(0),
register_mask_max_(0),
@@ -89,30 +90,42 @@
code_info_encoding_.reserve(16);
}
+ // A dex register map entry for a single stack map entry, contains what registers are live as
+ // well as indices into the location catalog.
+ class DexRegisterMapEntry {
+ public:
+ static const size_t kOffsetUnassigned = -1;
+
+ BitVector* live_dex_registers_mask;
+ uint32_t num_dex_registers;
+ size_t locations_start_index;
+ // Computed fields
+ size_t hash = 0;
+ size_t offset = kOffsetUnassigned;
+
+ size_t ComputeSize(size_t catalog_size) const;
+ };
+
// See runtime/stack_map.h to know what these fields contain.
struct StackMapEntry {
uint32_t dex_pc;
CodeOffset native_pc_code_offset;
uint32_t register_mask;
BitVector* sp_mask;
- uint32_t num_dex_registers;
uint8_t inlining_depth;
- size_t dex_register_locations_start_index;
size_t inline_infos_start_index;
- BitVector* live_dex_registers_mask;
- uint32_t dex_register_map_hash;
- size_t same_dex_register_map_as_;
uint32_t stack_mask_index;
uint32_t register_mask_index;
+ DexRegisterMapEntry dex_register_entry;
+ size_t dex_register_map_index;
};
struct InlineInfoEntry {
uint32_t dex_pc; // DexFile::kDexNoIndex for intrinsified native methods.
ArtMethod* method;
uint32_t method_index;
- uint32_t num_dex_registers;
- BitVector* live_dex_registers_mask;
- size_t dex_register_locations_start_index;
+ DexRegisterMapEntry dex_register_entry;
+ size_t dex_register_map_index;
};
void BeginStackMapEntry(uint32_t dex_pc,
@@ -140,7 +153,8 @@
}
void SetStackMapNativePcOffset(size_t i, uint32_t native_pc_offset) {
- stack_maps_[i].native_pc_code_offset = CodeOffset::FromOffset(native_pc_offset, instruction_set_);
+ stack_maps_[i].native_pc_code_offset =
+ CodeOffset::FromOffset(native_pc_offset, instruction_set_);
}
// Prepares the stream to fill in a memory region. Must be called before FillIn.
@@ -150,8 +164,6 @@
private:
size_t ComputeDexRegisterLocationCatalogSize() const;
- size_t ComputeDexRegisterMapSize(uint32_t num_dex_registers,
- const BitVector* live_dex_registers_mask) const;
size_t ComputeDexRegisterMapsSize() const;
void ComputeInlineInfoEncoding(InlineInfoEncoding* encoding,
size_t dex_register_maps_bytes);
@@ -164,15 +176,24 @@
// Returns the number of unique register masks.
size_t PrepareRegisterMasks();
- // Returns the index of an entry with the same dex register map as the current_entry,
- // or kNoSameDexMapFound if no such entry exists.
- size_t FindEntryWithTheSameDexMap();
- bool HaveTheSameDexMaps(const StackMapEntry& a, const StackMapEntry& b) const;
+ // Deduplicate entry if possible and return the corresponding index into dex_register_entries_
+ // array. If entry is not a duplicate, a new entry is added to dex_register_entries_.
+ size_t AddDexRegisterMapEntry(const DexRegisterMapEntry& entry);
+
+ // Return true if the two dex register map entries are equal.
+ bool DexRegisterMapEntryEquals(const DexRegisterMapEntry& a, const DexRegisterMapEntry& b) const;
+
+ // Fill in the corresponding entries of a register map.
void FillInDexRegisterMap(DexRegisterMap dex_register_map,
uint32_t num_dex_registers,
const BitVector& live_dex_registers_mask,
uint32_t start_index_in_dex_register_locations) const;
+ // Returns the offset for the dex register inside of the dex register location region. See FillIn.
+ // Only copies the dex register map if the offset for the entry is not already assigned.
+ size_t MaybeCopyDexRegisterMap(DexRegisterMapEntry& entry,
+ size_t* current_offset,
+ MemoryRegion dex_register_locations_region);
void CheckDexRegisterMap(const CodeInfo& code_info,
const DexRegisterMap& dex_register_map,
size_t num_dex_registers,
@@ -199,6 +220,7 @@
ArenaVector<InlineInfoEntry> inline_infos_;
ArenaVector<uint8_t> stack_masks_;
ArenaVector<uint32_t> register_masks_;
+ ArenaVector<DexRegisterMapEntry> dex_register_entries_;
int stack_mask_max_;
uint32_t dex_pc_max_;
uint32_t register_mask_max_;
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index bd0aa6d..0416951 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -410,6 +410,100 @@
}
}
+TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) {
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ StackMapStream stream(&arena, kRuntimeISA);
+ ArtMethod art_method;
+
+ ArenaBitVector sp_mask1(&arena, 0, true);
+ sp_mask1.SetBit(2);
+ sp_mask1.SetBit(4);
+ const size_t number_of_dex_registers = 2;
+ const size_t number_of_dex_registers_in_inline_info = 2;
+ stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask1, number_of_dex_registers, 1);
+ stream.AddDexRegisterEntry(Kind::kInStack, 0); // Short location.
+ stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location.
+ stream.BeginInlineInfoEntry(&art_method, 3, number_of_dex_registers_in_inline_info);
+ stream.AddDexRegisterEntry(Kind::kInStack, 0); // Short location.
+ stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location.
+ stream.EndInlineInfoEntry();
+ stream.EndStackMapEntry();
+
+ size_t size = stream.PrepareForFillIn();
+ void* memory = arena.Alloc(size, kArenaAllocMisc);
+ MemoryRegion region(memory, size);
+ stream.FillIn(region);
+
+ CodeInfo code_info(region);
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ ASSERT_EQ(1u, code_info.GetNumberOfStackMaps(encoding));
+
+ uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
+ ASSERT_EQ(2u, number_of_catalog_entries);
+ DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
+ // The Dex register location catalog contains:
+ // - one 1-byte short Dex register locations, and
+ // - one 5-byte large Dex register location.
+ const size_t expected_location_catalog_size = 1u + 5u;
+ ASSERT_EQ(expected_location_catalog_size, location_catalog.Size());
+
+ // First stack map.
+ {
+ StackMap stack_map = code_info.GetStackMapAt(0, encoding);
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
+ ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map.encoding));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
+ ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map));
+
+ ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask1));
+
+ ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
+ DexRegisterMap map(code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers));
+ ASSERT_TRUE(map.IsDexRegisterLive(0));
+ ASSERT_TRUE(map.IsDexRegisterLive(1));
+ ASSERT_EQ(2u, map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
+ // The Dex register map contains:
+ // - one 1-byte live bit mask, and
+ // - one 1-byte set of location catalog entry indices composed of two 2-bit values.
+ size_t expected_map_size = 1u + 1u;
+ ASSERT_EQ(expected_map_size, map.Size());
+
+ ASSERT_EQ(Kind::kInStack, map.GetLocationKind(0, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(Kind::kConstant,
+ map.GetLocationKind(1, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(Kind::kInStack,
+ map.GetLocationInternalKind(0, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(Kind::kConstantLargeValue,
+ map.GetLocationInternalKind(1, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(0, map.GetStackOffsetInBytes(0, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(-2, map.GetConstant(1, number_of_dex_registers, code_info, encoding));
+
+ const size_t index0 =
+ map.GetLocationCatalogEntryIndex(0, number_of_dex_registers, number_of_catalog_entries);
+ const size_t index1 =
+ map.GetLocationCatalogEntryIndex(1, number_of_dex_registers, number_of_catalog_entries);
+ ASSERT_EQ(0u, index0);
+ ASSERT_EQ(1u, index1);
+ DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0);
+ DexRegisterLocation location1 = location_catalog.GetDexRegisterLocation(index1);
+ ASSERT_EQ(Kind::kInStack, location0.GetKind());
+ ASSERT_EQ(Kind::kConstant, location1.GetKind());
+ ASSERT_EQ(Kind::kInStack, location0.GetInternalKind());
+ ASSERT_EQ(Kind::kConstantLargeValue, location1.GetInternalKind());
+ ASSERT_EQ(0, location0.GetValue());
+ ASSERT_EQ(-2, location1.GetValue());
+
+ // Test that the inline info dex register map deduplicated to the same offset as the stack map
+ // one.
+ ASSERT_TRUE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
+ InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
+ EXPECT_EQ(inline_info.GetDexRegisterMapOffsetAtDepth(encoding.inline_info.encoding, 0),
+ stack_map.GetDexRegisterMapOffset(encoding.stack_map.encoding));
+ }
+}
+
TEST(StackMapTest, TestNonLiveDexRegisters) {
ArenaPool pool;
ArenaAllocator arena(&pool);
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index c2275ac..e208337 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -231,7 +231,7 @@
}
virtual std::string GetTestDexFileName() {
- return GetDexSrc1();
+ return Dex2oatEnvironmentTest::GetTestDexFileName("VerifierDeps");
}
virtual void CheckResult(bool expect_use) {
@@ -399,11 +399,6 @@
};
TEST_F(Dex2oatSwapUseTest, CheckSwapUsage) {
- // The `native_alloc_2_ >= native_alloc_1_` assertion below may not
- // hold true on some x86 systems when read barriers are enabled;
- // disable this test while we investigate (b/29259363).
- TEST_DISABLED_FOR_READ_BARRIER_ON_X86();
-
RunTest(false /* use_fd */,
false /* expect_use */);
GrabResult1();
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 6cb8544..59e6ac0 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -67,7 +67,6 @@
}
ArtMethod* ArtMethod::GetSingleImplementation(PointerSize pointer_size) {
- DCHECK(!IsNative());
if (!IsAbstract()) {
// A non-abstract's single implementation is itself.
return this;
@@ -442,12 +441,56 @@
UNREACHABLE();
}
+// We use the method's DexFile and declaring class name to find the OatMethod for an obsolete
+// method. This is extremely slow but we need it if we want to be able to have obsolete native
+// methods since we need this to find the size of its stack frames.
+//
+// NB We could (potentially) do this differently and rely on the way the transformation is applied
+// in order to use the entrypoint to find this information. However, for debugging reasons (most
+// notably making sure that new invokes of obsolete methods fail) we choose to instead get the data
+// directly from the dex file.
+static const OatFile::OatMethod FindOatMethodFromDexFileFor(ArtMethod* method, bool* found)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(method->IsObsolete() && method->IsNative());
+ const DexFile* dex_file = method->GetDexFile();
+
+ // recreate the class_def_index from the descriptor.
+ std::string descriptor_storage;
+ const DexFile::TypeId* declaring_class_type_id =
+ dex_file->FindTypeId(method->GetDeclaringClass()->GetDescriptor(&descriptor_storage));
+ CHECK(declaring_class_type_id != nullptr);
+ dex::TypeIndex declaring_class_type_index = dex_file->GetIndexForTypeId(*declaring_class_type_id);
+ const DexFile::ClassDef* declaring_class_type_def =
+ dex_file->FindClassDef(declaring_class_type_index);
+ CHECK(declaring_class_type_def != nullptr);
+ uint16_t declaring_class_def_index = dex_file->GetIndexForClassDef(*declaring_class_type_def);
+
+ size_t oat_method_index = GetOatMethodIndexFromMethodIndex(*dex_file,
+ declaring_class_def_index,
+ method->GetDexMethodIndex());
+
+ OatFile::OatClass oat_class = OatFile::FindOatClass(*dex_file,
+ declaring_class_def_index,
+ found);
+ if (!(*found)) {
+ return OatFile::OatMethod::Invalid();
+ }
+ return oat_class.GetOatMethod(oat_method_index);
+}
+
static const OatFile::OatMethod FindOatMethodFor(ArtMethod* method,
PointerSize pointer_size,
bool* found)
REQUIRES_SHARED(Locks::mutator_lock_) {
- // We shouldn't be calling this with obsolete methods.
- DCHECK(!method->IsObsolete());
+ if (UNLIKELY(method->IsObsolete())) {
+ // We shouldn't be calling this with obsolete methods except for native obsolete methods for
+ // which we need to use the oat method to figure out how large the quick frame is.
+ DCHECK(method->IsNative()) << "We should only be finding the OatMethod of obsolete methods in "
+ << "order to allow stack walking. Other obsolete methods should "
+ << "never need to access this information.";
+ DCHECK_EQ(pointer_size, kRuntimePointerSize) << "Obsolete method in compiler!";
+ return FindOatMethodFromDexFileFor(method, found);
+ }
// Although we overwrite the trampoline of non-static methods, we may get here via the resolution
// method for direct methods (or virtual methods made direct).
mirror::Class* declaring_class = method->GetDeclaringClass();
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 6e102be..7bba944 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -72,6 +72,7 @@
ReaderWriterMutex* Locks::jni_globals_lock_ = nullptr;
Mutex* Locks::jni_weak_globals_lock_ = nullptr;
ReaderWriterMutex* Locks::dex_lock_ = nullptr;
+std::vector<BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_;
struct AllMutexData {
// A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
@@ -146,7 +147,10 @@
const uint64_t start_nano_time_;
};
-BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(name) {
+BaseMutex::BaseMutex(const char* name, LockLevel level)
+ : level_(level),
+ name_(name),
+ should_respond_to_empty_checkpoint_request_(false) {
if (kLogLockContentions) {
ScopedAllMutexesLock mu(this);
std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
@@ -377,6 +381,9 @@
// Failed to acquire, hang up.
ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
num_contenders_++;
+ if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
+ self->CheckEmptyCheckpointFromMutex();
+ }
if (futex(state_.Address(), FUTEX_WAIT, 1, nullptr, nullptr, 0) != 0) {
// EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
// We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
@@ -519,6 +526,18 @@
return os;
}
+void Mutex::WakeupToRespondToEmptyCheckpoint() {
+#if ART_USE_FUTEXES
+ // Wake up all the waiters so they will respond to the emtpy checkpoint.
+ DCHECK(should_respond_to_empty_checkpoint_request_);
+ if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
+ futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
+ }
+#else
+ LOG(FATAL) << "Non futex case isn't supported.";
+#endif
+}
+
ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level)
: BaseMutex(name, level)
#if ART_USE_FUTEXES
@@ -563,6 +582,9 @@
// Failed to acquire, hang up.
ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
++num_pending_writers_;
+ if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
+ self->CheckEmptyCheckpointFromMutex();
+ }
if (futex(state_.Address(), FUTEX_WAIT, cur_state, nullptr, nullptr, 0) != 0) {
// EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
// We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
@@ -639,6 +661,9 @@
}
ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
++num_pending_writers_;
+ if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
+ self->CheckEmptyCheckpointFromMutex();
+ }
if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, nullptr, 0) != 0) {
if (errno == ETIMEDOUT) {
--num_pending_writers_;
@@ -677,6 +702,9 @@
// Owner holds it exclusively, hang up.
ScopedContentionRecorder scr(this, GetExclusiveOwnerTid(), SafeGetTid(self));
++num_pending_readers_;
+ if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
+ self->CheckEmptyCheckpointFromMutex();
+ }
if (futex(state_.Address(), FUTEX_WAIT, cur_state, nullptr, nullptr, 0) != 0) {
if (errno != EAGAIN && errno != EINTR) {
PLOG(FATAL) << "futex wait failed for " << name_;
@@ -749,6 +777,19 @@
return os;
}
+void ReaderWriterMutex::WakeupToRespondToEmptyCheckpoint() {
+#if ART_USE_FUTEXES
+ // Wake up all the waiters so they will respond to the emtpy checkpoint.
+ DCHECK(should_respond_to_empty_checkpoint_request_);
+ if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 ||
+ num_pending_writers_.LoadRelaxed() > 0)) {
+ futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
+ }
+#else
+ LOG(FATAL) << "Non futex case isn't supported.";
+#endif
+}
+
ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
: name_(name), guard_(guard) {
#if ART_USE_FUTEXES
@@ -1121,6 +1162,12 @@
#undef UPDATE_CURRENT_LOCK_LEVEL
+ // List of mutexes that we may hold when accessing a weak ref.
+ dex_lock_->SetShouldRespondToEmptyCheckpointRequest(true);
+ expected_mutexes_on_weak_ref_access_.push_back(dex_lock_);
+ classlinker_classes_lock_->SetShouldRespondToEmptyCheckpointRequest(true);
+ expected_mutexes_on_weak_ref_access_.push_back(classlinker_classes_lock_);
+
InitConditions();
}
}
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index ffe18c6..9b6938f 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -152,6 +152,16 @@
static void DumpAll(std::ostream& os);
+ bool ShouldRespondToEmptyCheckpointRequest() const {
+ return should_respond_to_empty_checkpoint_request_;
+ }
+
+ void SetShouldRespondToEmptyCheckpointRequest(bool value) {
+ should_respond_to_empty_checkpoint_request_ = value;
+ }
+
+ virtual void WakeupToRespondToEmptyCheckpoint() = 0;
+
protected:
friend class ConditionVariable;
@@ -168,6 +178,7 @@
const LockLevel level_; // Support for lock hierarchy.
const char* const name_;
+ bool should_respond_to_empty_checkpoint_request_;
// A log entry that records contention but makes no guarantee that either tid will be held live.
struct ContentionLogEntry {
@@ -266,6 +277,8 @@
// For negative capabilities in clang annotations.
const Mutex& operator!() const { return *this; }
+ void WakeupToRespondToEmptyCheckpoint() OVERRIDE;
+
private:
#if ART_USE_FUTEXES
// 0 is unheld, 1 is held.
@@ -386,6 +399,8 @@
// For negative capabilities in clang annotations.
const ReaderWriterMutex& operator!() const { return *this; }
+ void WakeupToRespondToEmptyCheckpoint() OVERRIDE;
+
private:
#if ART_USE_FUTEXES
// Out-of-inline path for handling contention for a SharedLock.
@@ -713,6 +728,12 @@
// Have an exclusive logging thread.
static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
+
+ // List of mutexes that we expect a thread may hold when accessing weak refs. This is used to
+ // avoid a deadlock in the empty checkpoint while weak ref access is disabled (b/34964016). If we
+ // encounter an unexpected mutex on accessing weak refs,
+ // Thread::CheckEmptyCheckpointFromWeakRefAccess will detect it.
+ static std::vector<BaseMutex*> expected_mutexes_on_weak_ref_access_;
};
class Roles {
diff --git a/runtime/cha.cc b/runtime/cha.cc
index d11b12f..eaba01b 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -200,7 +200,8 @@
if (verify_method != excluded_method) {
DCHECK(!verify_method->HasSingleImplementation())
<< "class: " << verify_class->PrettyClass()
- << " verify_method: " << verify_method->PrettyMethod(true);
+ << " verify_method: " << verify_method->PrettyMethod(true)
+ << " excluded_method: " << excluded_method->PrettyMethod(true);
if (verify_method->IsAbstract()) {
DCHECK(verify_method->GetSingleImplementation(image_pointer_size) == nullptr);
}
@@ -257,9 +258,6 @@
return;
}
- // Native methods don't have single-implementation flag set.
- DCHECK(!method_in_super->IsNative());
-
uint16_t method_index = method_in_super->GetMethodIndex();
if (method_in_super->IsAbstract()) {
if (kIsDebugBuild) {
@@ -374,12 +372,12 @@
// used for static methods or methods of final classes.
return;
}
- if (method->IsNative()) {
- // Native method's invocation overhead is already high and it
- // cannot be inlined. It's not worthwhile to devirtualize the
- // call which can add a deoptimization point.
- DCHECK(!method->HasSingleImplementation());
- } else if (method->IsAbstract()) {
+ if (method->IsAbstract()) {
+ // single-implementation of abstract method shares the same field
+ // that's used for JNI function of native method. It's fine since a method
+ // cannot be both abstract and native.
+ DCHECK(!method->IsNative()) << "Abstract method cannot be native";
+
if (method->GetDeclaringClass()->IsInstantiable()) {
// Rare case, but we do accept it (such as 800-smali/smali/b_26143249.smali).
// Do not attempt to devirtualize it.
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 17e3729..26ec364 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -223,12 +223,6 @@
return; \
}
-#define TEST_DISABLED_FOR_READ_BARRIER_ON_X86() \
- if (kUseReadBarrier && kRuntimeISA == kX86) { \
- printf("WARNING: TEST DISABLED FOR READ BARRIER ON X86\n"); \
- return; \
- }
-
#define TEST_DISABLED_FOR_STRING_COMPRESSION() \
if (mirror::kUseStringCompression) { \
printf("WARNING: TEST DISABLED FOR STRING COMPRESSION\n"); \
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index e18a955..122f779 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -292,7 +292,7 @@
(kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
- self->CheckEmptyCheckpoint();
+ self->CheckEmptyCheckpointFromWeakRefAccess(Locks::alloc_tracker_lock_);
new_record_condition_.WaitHoldingLocks(self);
}
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index f12ad80..f18ffb4 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -835,65 +835,9 @@
void ConcurrentCopying::IssueEmptyCheckpoint() {
Thread* self = Thread::Current();
ThreadList* thread_list = Runtime::Current()->GetThreadList();
- Barrier* barrier = thread_list->EmptyCheckpointBarrier();
- barrier->Init(self, 0);
- std::vector<uint32_t> runnable_thread_ids; // Used in debug build only
- size_t barrier_count = thread_list->RunEmptyCheckpoint(runnable_thread_ids);
- // If there are no threads to wait which implys that all the checkpoint functions are finished,
- // then no need to release the mutator lock.
- if (barrier_count == 0) {
- return;
- }
// Release locks then wait for all mutator threads to pass the barrier.
Locks::mutator_lock_->SharedUnlock(self);
- {
- ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
- if (kIsDebugBuild) {
- static constexpr uint64_t kEmptyCheckpointTimeoutMs = 600 * 1000; // 10 minutes.
- bool timed_out = barrier->Increment(self, barrier_count, kEmptyCheckpointTimeoutMs);
- if (timed_out) {
- std::ostringstream ss;
- ss << "Empty checkpoint timeout\n";
- ss << "Barrier count " << barrier->GetCount(self) << "\n";
- ss << "Runnable thread IDs";
- for (uint32_t tid : runnable_thread_ids) {
- ss << " " << tid;
- }
- ss << "\n";
- Locks::mutator_lock_->Dump(ss);
- ss << "\n";
- LOG(FATAL_WITHOUT_ABORT) << ss.str();
- // Some threads in 'runnable_thread_ids' are probably stuck. Try to dump their stacks.
- // Avoid using ThreadList::Dump() initially because it is likely to get stuck as well.
- {
- ScopedObjectAccess soa(self);
- MutexLock mu1(self, *Locks::thread_list_lock_);
- for (Thread* thread : thread_list->GetList()) {
- uint32_t tid = thread->GetThreadId();
- bool is_in_runnable_thread_ids =
- std::find(runnable_thread_ids.begin(), runnable_thread_ids.end(), tid) !=
- runnable_thread_ids.end();
- if (is_in_runnable_thread_ids &&
- thread->ReadFlag(kEmptyCheckpointRequest)) {
- // Found a runnable thread that hasn't responded to the empty checkpoint request.
- // Assume it's stuck and safe to dump its stack.
- thread->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT),
- /*dump_native_stack*/ true,
- /*backtrace_map*/ nullptr,
- /*force_dump_stack*/ true);
- }
- }
- }
- LOG(FATAL_WITHOUT_ABORT)
- << "Dumped runnable threads that haven't responded to empty checkpoint.";
- // Now use ThreadList::Dump() to dump more threads, noting it may get stuck.
- thread_list->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
- LOG(FATAL) << "Dumped all threads.";
- }
- } else {
- barrier->Increment(self, barrier_count);
- }
- }
+ thread_list->RunEmptyCheckpoint();
Locks::mutator_lock_->SharedLock(self);
}
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index c154836..86b1522 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -104,7 +104,7 @@
}
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
- self->CheckEmptyCheckpoint();
+ self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_);
condition_.WaitHoldingLocks(self);
}
return reference->GetReferent();
@@ -292,7 +292,7 @@
(kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
- self->CheckEmptyCheckpoint();
+ self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_);
condition_.WaitHoldingLocks(self);
}
}
diff --git a/runtime/gc/system_weak.h b/runtime/gc/system_weak.h
index e5cddfc..60105f4 100644
--- a/runtime/gc/system_weak.h
+++ b/runtime/gc/system_weak.h
@@ -82,7 +82,7 @@
(kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
- self->CheckEmptyCheckpoint();
+ self->CheckEmptyCheckpointFromWeakRefAccess(&allow_disallow_lock_);
new_weak_condition_.WaitHoldingLocks(self);
}
}
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index e0f28ad..a341cdb 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -572,7 +572,7 @@
while (!kUseReadBarrier && UNLIKELY(!MayAccessWeakGlobals(self))) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
- self->CheckEmptyCheckpoint();
+ self->CheckEmptyCheckpointFromWeakRefAccess(Locks::jni_weak_globals_lock_);
weak_globals_add_condition_.WaitHoldingLocks(self);
}
IndirectRef ref = weak_globals_.Add(kIRTFirstSegment, obj);
@@ -706,7 +706,7 @@
while (UNLIKELY(!MayAccessWeakGlobals(self))) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
- self->CheckEmptyCheckpoint();
+ self->CheckEmptyCheckpointFromWeakRefAccess(Locks::jni_weak_globals_lock_);
weak_globals_add_condition_.WaitHoldingLocks(self);
}
return weak_globals_.Get(ref);
@@ -731,7 +731,7 @@
while (UNLIKELY(!MayAccessWeakGlobals(self))) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
- self->CheckEmptyCheckpoint();
+ self->CheckEmptyCheckpointFromWeakRefAccess(Locks::jni_weak_globals_lock_);
weak_globals_add_condition_.WaitHoldingLocks(self);
}
// When just checking a weak ref has been cleared, avoid triggering the read barrier in decode
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index f5151b5..0ac388a 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -556,12 +556,13 @@
// Flush data cache, as compiled code references literals in it.
FlushDataCache(reinterpret_cast<char*>(roots_data),
reinterpret_cast<char*>(roots_data + data_size));
- // Flush caches before we remove write permission because on some ARMv8 hardware,
- // flushing caches require write permissions.
+ // Flush caches before we remove write permission because some ARMv8 Qualcomm kernels may
+ // trigger a segfault if a page fault occurs when requesting a cache maintenance operation.
+ // This is a kernel bug that we need to work around until affected devices (e.g. Nexus 5X and
+ // 6P) stop being supported or their kernels are fixed.
//
- // For reference, here are kernel patches discussing about this issue:
- // https://android.googlesource.com/kernel/msm/%2B/0e7f7bcc3fc87489cda5aa6aff8ce40eed912279
- // https://patchwork.kernel.org/patch/9047921/
+ // For reference, this behavior is caused by this commit:
+ // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
FlushInstructionCache(reinterpret_cast<char*>(code_ptr),
reinterpret_cast<char*>(code_ptr + code_size));
DCHECK(!Runtime::Current()->IsAotCompiler());
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index a32003e..f3cb0df 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -1380,7 +1380,7 @@
while (!kUseReadBarrier && UNLIKELY(!allow_new_monitors_)) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
- self->CheckEmptyCheckpoint();
+ self->CheckEmptyCheckpointFromWeakRefAccess(&monitor_list_lock_);
monitor_add_condition_.WaitHoldingLocks(self);
}
list_.push_front(m);
diff --git a/runtime/oat.h b/runtime/oat.h
index e7e8328..a764e0e 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '1', '1', '0', '\0' }; // Clean up code info change.
+ static constexpr uint8_t kOatVersion[] = { '1', '1', '1', '\0' }; // Revert^3 hash-based DexCache types.
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/openjdkjvmti/ti_class.cc b/runtime/openjdkjvmti/ti_class.cc
index cf3cfa4..7ca233f 100644
--- a/runtime/openjdkjvmti/ti_class.cc
+++ b/runtime/openjdkjvmti/ti_class.cc
@@ -50,6 +50,9 @@
#include "mirror/array-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_ext.h"
+#include "mirror/object_reference.h"
+#include "mirror/object-inl.h"
+#include "mirror/reference.h"
#include "runtime.h"
#include "runtime_callbacks.h"
#include "ScopedLocalRef.h"
@@ -347,6 +350,7 @@
FixupGlobalReferenceTables(input, output);
FixupLocalReferenceTables(self, input, output);
+ FixupHeap(input, output);
}
if (heap->IsGcConcurrentAndMoving()) {
heap->DecrementDisableMovingGC(self);
@@ -385,8 +389,7 @@
art::mirror::Class* output_;
};
- void FixupGlobalReferenceTables(art::mirror::Class* input,
- art::mirror::Class* output)
+ void FixupGlobalReferenceTables(art::mirror::Class* input, art::mirror::Class* output)
REQUIRES(art::Locks::mutator_lock_) {
art::JavaVMExt* java_vm = art::Runtime::Current()->GetJavaVM();
@@ -441,6 +444,62 @@
art::Runtime::Current()->GetThreadList()->ForEach(LocalUpdate::Callback, &local_upd);
}
+ void FixupHeap(art::mirror::Class* input, art::mirror::Class* output)
+ REQUIRES(art::Locks::mutator_lock_) {
+ class HeapFixupVisitor {
+ public:
+ HeapFixupVisitor(const art::mirror::Class* root_input, art::mirror::Class* root_output)
+ : input_(root_input), output_(root_output) {}
+
+ void operator()(art::mirror::Object* src,
+ art::MemberOffset field_offset,
+ bool is_static ATTRIBUTE_UNUSED) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::mirror::HeapReference<art::mirror::Object>* trg =
+ src->GetFieldObjectReferenceAddr(field_offset);
+ if (trg->AsMirrorPtr() == input_) {
+ DCHECK_NE(field_offset.Uint32Value(), 0u); // This shouldn't be the class field of
+ // an object.
+ trg->Assign(output_);
+ }
+ }
+
+ void operator()(art::ObjPtr<art::mirror::Class> klass ATTRIBUTE_UNUSED,
+ art::ObjPtr<art::mirror::Reference> reference) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::mirror::Object* val = reference->GetReferent();
+ if (val == input_) {
+ reference->SetReferent<false>(output_);
+ }
+ }
+
+ void VisitRoot(art::mirror::CompressedReference<art::mirror::Object>* root ATTRIBUTE_UNUSED)
+ const {
+ LOG(FATAL) << "Unreachable";
+ }
+
+ void VisitRootIfNonNull(
+ art::mirror::CompressedReference<art::mirror::Object>* root ATTRIBUTE_UNUSED) const {
+ LOG(FATAL) << "Unreachable";
+ }
+
+ static void AllObjectsCallback(art::mirror::Object* obj, void* arg)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ HeapFixupVisitor* hfv = reinterpret_cast<HeapFixupVisitor*>(arg);
+
+ // Visit references, not native roots.
+ obj->VisitReferences<false>(*hfv, *hfv);
+ }
+
+ private:
+ const art::mirror::Class* input_;
+ art::mirror::Class* output_;
+ };
+ HeapFixupVisitor hfv(input, output);
+ art::Runtime::Current()->GetHeap()->VisitObjectsPaused(HeapFixupVisitor::AllObjectsCallback,
+ &hfv);
+ }
+
// A set of all the temp classes we have handed out. We have to fix up references to these.
// For simplicity, we store the temp classes as JNI global references in a vector. Normally a
// Prepare event will closely follow, so the vector should be small.
diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc
index 843fd8c..d767c33 100644
--- a/runtime/openjdkjvmti/ti_redefine.cc
+++ b/runtime/openjdkjvmti/ti_redefine.cc
@@ -68,6 +68,66 @@
using android::base::StringPrintf;
+// A helper that fills in a classes obsolete_methods_ and obsolete_dex_caches_ classExt fields as
+// they are created. This ensures that we can always call any method of an obsolete ArtMethod object
+// almost as soon as they are created since the GetObsoleteDexCache method will succeed.
+class ObsoleteMap {
+ public:
+ art::ArtMethod* FindObsoleteVersion(art::ArtMethod* original)
+ REQUIRES(art::Locks::mutator_lock_, art::Roles::uninterruptible_) {
+ auto method_pair = id_map_.find(original);
+ if (method_pair != id_map_.end()) {
+ art::ArtMethod* res = obsolete_methods_->GetElementPtrSize<art::ArtMethod*>(
+ method_pair->second, art::kRuntimePointerSize);
+ DCHECK(res != nullptr);
+ DCHECK_EQ(original, res->GetNonObsoleteMethod());
+ return res;
+ } else {
+ return nullptr;
+ }
+ }
+
+ void RecordObsolete(art::ArtMethod* original, art::ArtMethod* obsolete)
+ REQUIRES(art::Locks::mutator_lock_, art::Roles::uninterruptible_) {
+ DCHECK(original != nullptr);
+ DCHECK(obsolete != nullptr);
+ int32_t slot = next_free_slot_++;
+ DCHECK_LT(slot, obsolete_methods_->GetLength());
+ DCHECK(nullptr ==
+ obsolete_methods_->GetElementPtrSize<art::ArtMethod*>(slot, art::kRuntimePointerSize));
+ DCHECK(nullptr == obsolete_dex_caches_->Get(slot));
+ obsolete_methods_->SetElementPtrSize(slot, obsolete, art::kRuntimePointerSize);
+ obsolete_dex_caches_->Set(slot, original_dex_cache_);
+ id_map_.insert({original, slot});
+ }
+
+ ObsoleteMap(art::ObjPtr<art::mirror::PointerArray> obsolete_methods,
+ art::ObjPtr<art::mirror::ObjectArray<art::mirror::DexCache>> obsolete_dex_caches,
+ art::ObjPtr<art::mirror::DexCache> original_dex_cache)
+ : next_free_slot_(0),
+ obsolete_methods_(obsolete_methods),
+ obsolete_dex_caches_(obsolete_dex_caches),
+ original_dex_cache_(original_dex_cache) {
+ // Figure out where the first unused slot in the obsolete_methods_ array is.
+ while (obsolete_methods_->GetElementPtrSize<art::ArtMethod*>(
+ next_free_slot_, art::kRuntimePointerSize) != nullptr) {
+ DCHECK(obsolete_dex_caches_->Get(next_free_slot_) != nullptr);
+ next_free_slot_++;
+ }
+ // Sanity check that the same slot in obsolete_dex_caches_ is free.
+ DCHECK(obsolete_dex_caches_->Get(next_free_slot_) == nullptr);
+ }
+
+ private:
+ int32_t next_free_slot_;
+ std::unordered_map<art::ArtMethod*, int32_t> id_map_;
+ // Pointers to the fields in mirror::ClassExt. These can be held as ObjPtr since this is only used
+ // when we have an exclusive mutator_lock_ (i.e. all threads are suspended).
+ art::ObjPtr<art::mirror::PointerArray> obsolete_methods_;
+ art::ObjPtr<art::mirror::ObjectArray<art::mirror::DexCache>> obsolete_dex_caches_;
+ art::ObjPtr<art::mirror::DexCache> original_dex_cache_;
+};
+
// This visitor walks thread stacks and allocates and sets up the obsolete methods. It also does
// some basic sanity checks that the obsolete method is sane.
class ObsoleteMethodStackVisitor : public art::StackVisitor {
@@ -76,7 +136,7 @@
art::Thread* thread,
art::LinearAlloc* allocator,
const std::unordered_set<art::ArtMethod*>& obsoleted_methods,
- /*out*/std::unordered_map<art::ArtMethod*, art::ArtMethod*>* obsolete_maps)
+ ObsoleteMap* obsolete_maps)
: StackVisitor(thread,
/*context*/nullptr,
StackVisitor::StackWalkKind::kIncludeInlinedFrames),
@@ -94,7 +154,7 @@
art::Thread* thread,
art::LinearAlloc* allocator,
const std::unordered_set<art::ArtMethod*>& obsoleted_methods,
- /*out*/std::unordered_map<art::ArtMethod*, art::ArtMethod*>* obsolete_maps)
+ ObsoleteMap* obsolete_maps)
REQUIRES(art::Locks::mutator_lock_) {
ObsoleteMethodStackVisitor visitor(thread,
allocator,
@@ -104,6 +164,7 @@
}
bool VisitFrame() OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+ art::ScopedAssertNoThreadSuspension snts("Fixing up the stack for obsolete methods.");
art::ArtMethod* old_method = GetMethod();
if (obsoleted_methods_.find(old_method) != obsoleted_methods_.end()) {
// We cannot ensure that the right dex file is used in inlined frames so we don't support
@@ -113,9 +174,8 @@
// TODO We should really support redefining intrinsics.
// We don't support intrinsics so check for them here.
DCHECK(!old_method->IsIntrinsic());
- art::ArtMethod* new_obsolete_method = nullptr;
- auto obsolete_method_pair = obsolete_maps_->find(old_method);
- if (obsolete_method_pair == obsolete_maps_->end()) {
+ art::ArtMethod* new_obsolete_method = obsolete_maps_->FindObsoleteVersion(old_method);
+ if (new_obsolete_method == nullptr) {
// Create a new Obsolete Method and put it in the list.
art::Runtime* runtime = art::Runtime::Current();
art::ClassLinker* cl = runtime->GetClassLinker();
@@ -129,7 +189,7 @@
DCHECK_EQ(new_obsolete_method->GetDeclaringClass(), old_method->GetDeclaringClass());
new_obsolete_method->SetIsObsolete();
new_obsolete_method->SetDontCompile();
- obsolete_maps_->insert({old_method, new_obsolete_method});
+ obsolete_maps_->RecordObsolete(old_method, new_obsolete_method);
// Update JIT Data structures to point to the new method.
art::jit::Jit* jit = art::Runtime::Current()->GetJit();
if (jit != nullptr) {
@@ -137,8 +197,6 @@
// structures to keep track of the new obsolete method.
jit->GetCodeCache()->MoveObsoleteMethod(old_method, new_obsolete_method);
}
- } else {
- new_obsolete_method = obsolete_method_pair->second;
}
DCHECK(new_obsolete_method != nullptr);
SetMethod(new_obsolete_method);
@@ -152,9 +210,9 @@
// The set of all methods which could be obsoleted.
const std::unordered_set<art::ArtMethod*>& obsoleted_methods_;
// A map from the original to the newly allocated obsolete method for frames on this thread. The
- // values in this map must be added to the obsolete_methods_ (and obsolete_dex_caches_) fields of
- // the redefined classes ClassExt by the caller.
- std::unordered_map<art::ArtMethod*, art::ArtMethod*>* obsolete_maps_;
+ // values in this map are added to the obsolete_methods_ (and obsolete_dex_caches_) fields of
+ // the redefined classes ClassExt as it is filled.
+ ObsoleteMap* obsolete_maps_;
};
jvmtiError Redefiner::IsModifiableClass(jvmtiEnv* env ATTRIBUTE_UNUSED,
@@ -431,11 +489,12 @@
}
struct CallbackCtx {
+ ObsoleteMap* obsolete_map;
art::LinearAlloc* allocator;
- std::unordered_map<art::ArtMethod*, art::ArtMethod*> obsolete_map;
std::unordered_set<art::ArtMethod*> obsolete_methods;
- explicit CallbackCtx(art::LinearAlloc* alloc) : allocator(alloc) {}
+ explicit CallbackCtx(ObsoleteMap* map, art::LinearAlloc* alloc)
+ : obsolete_map(map), allocator(alloc) {}
};
void DoAllocateObsoleteMethodsCallback(art::Thread* t, void* vdata) NO_THREAD_SAFETY_ANALYSIS {
@@ -443,7 +502,7 @@
ObsoleteMethodStackVisitor::UpdateObsoleteFrames(t,
data->allocator,
data->obsolete_methods,
- &data->obsolete_map);
+ data->obsolete_map);
}
// This creates any ArtMethod* structures needed for obsolete methods and ensures that the stack is
@@ -454,9 +513,18 @@
art::mirror::ClassExt* ext = art_klass->GetExtData();
CHECK(ext->GetObsoleteMethods() != nullptr);
art::ClassLinker* linker = driver_->runtime_->GetClassLinker();
- CallbackCtx ctx(linker->GetAllocatorForClassLoader(art_klass->GetClassLoader()));
+ // This holds pointers to the obsolete methods map fields which are updated as needed.
+ ObsoleteMap map(ext->GetObsoleteMethods(), ext->GetObsoleteDexCaches(), art_klass->GetDexCache());
+ CallbackCtx ctx(&map, linker->GetAllocatorForClassLoader(art_klass->GetClassLoader()));
// Add all the declared methods to the map
for (auto& m : art_klass->GetDeclaredMethods(art::kRuntimePointerSize)) {
+ // It is possible to simply filter out some methods where they cannot really become obsolete,
+ // such as native methods and keep their original (possibly optimized) implementations. We don't
+ // do this, however, since we would need to mark these functions (still in the classes
+ // declared_methods array) as obsolete so we will find the correct dex file to get meta-data
+ // from (for example about stack-frame size). Furthermore we would be unable to get some useful
+ // error checking from the interpreter which ensure we don't try to start executing obsolete
+ // methods.
ctx.obsolete_methods.insert(&m);
// TODO Allow this or check in IsModifiableClass.
DCHECK(!m.IsIntrinsic());
@@ -466,36 +534,6 @@
art::ThreadList* list = art::Runtime::Current()->GetThreadList();
list->ForEach(DoAllocateObsoleteMethodsCallback, static_cast<void*>(&ctx));
}
- FillObsoleteMethodMap(art_klass, ctx.obsolete_map);
-}
-
-// Fills the obsolete method map in the art_klass's extData. This is so obsolete methods are able to
-// figure out their DexCaches.
-void Redefiner::ClassRedefinition::FillObsoleteMethodMap(
- art::mirror::Class* art_klass,
- const std::unordered_map<art::ArtMethod*, art::ArtMethod*>& obsoletes) {
- int32_t index = 0;
- art::mirror::ClassExt* ext_data = art_klass->GetExtData();
- art::mirror::PointerArray* obsolete_methods = ext_data->GetObsoleteMethods();
- art::mirror::ObjectArray<art::mirror::DexCache>* obsolete_dex_caches =
- ext_data->GetObsoleteDexCaches();
- int32_t num_method_slots = obsolete_methods->GetLength();
- // Find the first empty index.
- for (; index < num_method_slots; index++) {
- if (obsolete_methods->GetElementPtrSize<art::ArtMethod*>(
- index, art::kRuntimePointerSize) == nullptr) {
- break;
- }
- }
- // Make sure we have enough space.
- CHECK_GT(num_method_slots, static_cast<int32_t>(obsoletes.size() + index));
- CHECK(obsolete_dex_caches->Get(index) == nullptr);
- // Fill in the map.
- for (auto& obs : obsoletes) {
- obsolete_methods->SetElementPtrSize(index, obs.second, art::kRuntimePointerSize);
- obsolete_dex_caches->Set(index, art_klass->GetDexCache());
- index++;
- }
}
// Try and get the declared method. First try to get a virtual method then a direct method if that's
diff --git a/runtime/openjdkjvmti/ti_redefine.h b/runtime/openjdkjvmti/ti_redefine.h
index c441377..65ee291 100644
--- a/runtime/openjdkjvmti/ti_redefine.h
+++ b/runtime/openjdkjvmti/ti_redefine.h
@@ -155,12 +155,6 @@
void FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass)
REQUIRES(art::Locks::mutator_lock_);
- void FillObsoleteMethodMap(
- art::mirror::Class* art_klass,
- const std::unordered_map<art::ArtMethod*, art::ArtMethod*>& obsoletes)
- REQUIRES(art::Locks::mutator_lock_);
-
-
// Checks that the dex file contains only the single expected class and that the top-level class
// data has not been modified in an incompatible manner.
bool CheckClass() REQUIRES_SHARED(art::Locks::mutator_lock_);
diff --git a/runtime/stack.cc b/runtime/stack.cc
index d7ba1d7..51a24e4 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -874,9 +874,13 @@
CHECK_EQ(GetMethod(), callee) << "Expected: " << ArtMethod::PrettyMethod(callee)
<< " Found: " << ArtMethod::PrettyMethod(GetMethod());
} else {
- CHECK_EQ(instrumentation_frame.method_, GetMethod())
- << "Expected: " << ArtMethod::PrettyMethod(instrumentation_frame.method_)
- << " Found: " << ArtMethod::PrettyMethod(GetMethod());
+ // Instrumentation generally doesn't distinguish between a method's obsolete and
+ // non-obsolete version.
+ CHECK_EQ(instrumentation_frame.method_->GetNonObsoleteMethod(),
+ GetMethod()->GetNonObsoleteMethod())
+ << "Expected: "
+ << ArtMethod::PrettyMethod(instrumentation_frame.method_->GetNonObsoleteMethod())
+ << " Found: " << ArtMethod::PrettyMethod(GetMethod()->GetNonObsoleteMethod());
}
if (num_frames_ != 0) {
// Check agreement of frame Ids only if num_frames_ is computed to avoid infinite
@@ -903,7 +907,7 @@
<< " native=" << method->IsNative()
<< std::noboolalpha
<< " entrypoints=" << method->GetEntryPointFromQuickCompiledCode()
- << "," << method->GetEntryPointFromJni()
+ << "," << (method->IsNative() ? method->GetEntryPointFromJni() : nullptr)
<< " next=" << *cur_quick_frame_;
}
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index c92305f..8d94626 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -80,7 +80,34 @@
}
}
-inline void Thread::CheckEmptyCheckpoint() {
+inline void Thread::CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex) {
+ Thread* self = Thread::Current();
+ DCHECK_EQ(self, this);
+ for (;;) {
+ if (ReadFlag(kEmptyCheckpointRequest)) {
+ RunEmptyCheckpoint();
+ // Check we hold only an expected mutex when accessing weak ref.
+ if (kIsDebugBuild) {
+ for (int i = kLockLevelCount - 1; i >= 0; --i) {
+ BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
+ if (held_mutex != nullptr &&
+ held_mutex != Locks::mutator_lock_ &&
+ held_mutex != cond_var_mutex) {
+ std::vector<BaseMutex*>& expected_mutexes = Locks::expected_mutexes_on_weak_ref_access_;
+ CHECK(std::find(expected_mutexes.begin(), expected_mutexes.end(), held_mutex) !=
+ expected_mutexes.end())
+ << "Holding unexpected mutex " << held_mutex->GetName()
+ << " when accessing weak ref";
+ }
+ }
+ }
+ } else {
+ break;
+ }
+ }
+}
+
+inline void Thread::CheckEmptyCheckpointFromMutex() {
DCHECK_EQ(Thread::Current(), this);
for (;;) {
if (ReadFlag(kEmptyCheckpointRequest)) {
diff --git a/runtime/thread.h b/runtime/thread.h
index 3a1b7da..a46e799 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -176,7 +176,8 @@
void CheckSuspend() REQUIRES_SHARED(Locks::mutator_lock_);
// Process a pending empty checkpoint if pending.
- void CheckEmptyCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
+ void CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex);
+ void CheckEmptyCheckpointFromMutex();
static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
ObjPtr<mirror::Object> thread_peer)
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index df8acc3..caed369 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -379,13 +379,15 @@
return count;
}
-size_t ThreadList::RunEmptyCheckpoint(std::vector<uint32_t>& runnable_thread_ids) {
+void ThreadList::RunEmptyCheckpoint() {
Thread* self = Thread::Current();
Locks::mutator_lock_->AssertNotExclusiveHeld(self);
Locks::thread_list_lock_->AssertNotHeld(self);
Locks::thread_suspend_count_lock_->AssertNotHeld(self);
-
+ std::vector<uint32_t> runnable_thread_ids;
size_t count = 0;
+ Barrier* barrier = empty_checkpoint_barrier_.get();
+ barrier->Init(self, 0);
{
MutexLock mu(self, *Locks::thread_list_lock_);
MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
@@ -415,8 +417,72 @@
// checkpoint request. Otherwise we will hang as they are blocking in the kRunnable state.
Runtime::Current()->GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
Runtime::Current()->BroadcastForNewSystemWeaks(/*broadcast_for_checkpoint*/true);
-
- return count;
+ {
+ ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
+ uint64_t total_wait_time = 0;
+ bool first_iter = true;
+ while (true) {
+ // Wake up the runnable threads blocked on the mutexes that another thread, which is blocked
+ // on a weak ref access, holds (indirectly blocking for weak ref access through another thread
+ // and a mutex.) This needs to be done periodically because the thread may be preempted
+ // between the CheckEmptyCheckpointFromMutex call and the subsequent futex wait in
+ // Mutex::ExclusiveLock, etc. when the wakeup via WakeupToRespondToEmptyCheckpoint
+ // arrives. This could cause a *very rare* deadlock, if not repeated. Most of the cases are
+ // handled in the first iteration.
+ for (BaseMutex* mutex : Locks::expected_mutexes_on_weak_ref_access_) {
+ mutex->WakeupToRespondToEmptyCheckpoint();
+ }
+ static constexpr uint64_t kEmptyCheckpointPeriodicTimeoutMs = 100; // 100ms
+ static constexpr uint64_t kEmptyCheckpointTotalTimeoutMs = 600 * 1000; // 10 minutes.
+ size_t barrier_count = first_iter ? count : 0;
+ first_iter = false; // Don't add to the barrier count from the second iteration on.
+ bool timed_out = barrier->Increment(self, barrier_count, kEmptyCheckpointPeriodicTimeoutMs);
+ if (!timed_out) {
+ break; // Success
+ }
+ // This is a very rare case.
+ total_wait_time += kEmptyCheckpointPeriodicTimeoutMs;
+ if (kIsDebugBuild && total_wait_time > kEmptyCheckpointTotalTimeoutMs) {
+ std::ostringstream ss;
+ ss << "Empty checkpoint timeout\n";
+ ss << "Barrier count " << barrier->GetCount(self) << "\n";
+ ss << "Runnable thread IDs";
+ for (uint32_t tid : runnable_thread_ids) {
+ ss << " " << tid;
+ }
+ ss << "\n";
+ Locks::mutator_lock_->Dump(ss);
+ ss << "\n";
+ LOG(FATAL_WITHOUT_ABORT) << ss.str();
+ // Some threads in 'runnable_thread_ids' are probably stuck. Try to dump their stacks.
+ // Avoid using ThreadList::Dump() initially because it is likely to get stuck as well.
+ {
+ ScopedObjectAccess soa(self);
+ MutexLock mu1(self, *Locks::thread_list_lock_);
+ for (Thread* thread : GetList()) {
+ uint32_t tid = thread->GetThreadId();
+ bool is_in_runnable_thread_ids =
+ std::find(runnable_thread_ids.begin(), runnable_thread_ids.end(), tid) !=
+ runnable_thread_ids.end();
+ if (is_in_runnable_thread_ids &&
+ thread->ReadFlag(kEmptyCheckpointRequest)) {
+ // Found a runnable thread that hasn't responded to the empty checkpoint request.
+ // Assume it's stuck and safe to dump its stack.
+ thread->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT),
+ /*dump_native_stack*/ true,
+ /*backtrace_map*/ nullptr,
+ /*force_dump_stack*/ true);
+ }
+ }
+ }
+ LOG(FATAL_WITHOUT_ABORT)
+ << "Dumped runnable threads that haven't responded to empty checkpoint.";
+ // Now use ThreadList::Dump() to dump more threads, noting it may get stuck.
+ Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
+ LOG(FATAL) << "Dumped all threads.";
+ }
+ }
+ }
}
// Request that a checkpoint function be run on all active (non-suspended)
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index b60fca1..70917eb 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -109,9 +109,7 @@
// in-flight mutator heap access (eg. a read barrier.) Runnable threads will respond by
// decrementing the empty checkpoint barrier count. This works even when the weak ref access is
// disabled. Only one concurrent use is currently supported.
- // In debug build, runnable_thread_ids will be populated with the thread IDS of the runnable
- // thread to wait for.
- size_t RunEmptyCheckpoint(std::vector<uint32_t>& runnable_thread_ids)
+ void RunEmptyCheckpoint()
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
size_t RunCheckpointOnRunnableThreads(Closure* checkpoint_function)
diff --git a/test/616-cha-native/expected.txt b/test/616-cha-native/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/616-cha-native/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/616-cha-native/info.txt b/test/616-cha-native/info.txt
new file mode 100644
index 0000000..a17bcab
--- /dev/null
+++ b/test/616-cha-native/info.txt
@@ -0,0 +1,2 @@
+Test for Class Hierarchy Analysis (CHA) single-implementation status updating
+behavior on an overridden native method.
diff --git a/test/616-cha-native/src/Main.java b/test/616-cha-native/src/Main.java
new file mode 100644
index 0000000..53a463c
--- /dev/null
+++ b/test/616-cha-native/src/Main.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+abstract class A {
+ public abstract void foo();
+}
+
+class B extends A {
+ public native void foo();
+}
+
+class C extends B {
+ public void foo() {}
+}
+
+public class Main {
+ public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+ }
+}
diff --git a/test/616-cha/src/Main.java b/test/616-cha/src/Main.java
index b617944..beea90a 100644
--- a/test/616-cha/src/Main.java
+++ b/test/616-cha/src/Main.java
@@ -196,8 +196,6 @@
// should return true for those cases.
assertSingleImplementation(java.lang.String.class, "charAt", true);
assertSingleImplementation(java.lang.Thread.class, "join", true);
- // We don't set single-implementation modifier bit for native methods.
- assertSingleImplementation(java.lang.Thread.class, "isInterrupted", false);
if (isInterpreted()) {
sIsOptimizing = false;
diff --git a/test/912-classes/classes.cc b/test/912-classes/classes.cc
index b727453..3ccfe86 100644
--- a/test/912-classes/classes.cc
+++ b/test/912-classes/classes.cc
@@ -433,6 +433,13 @@
class ClassLoadPrepareEquality {
public:
static constexpr const char* kClassName = "LMain$ClassE;";
+ static constexpr const char* kStorageFieldName = "STATIC";
+ static constexpr const char* kStorageFieldSig = "Ljava/lang/Object;";
+ static constexpr const char* kStorageWeakFieldName = "WEAK";
+ static constexpr const char* kStorageWeakFieldSig = "Ljava/lang/ref/Reference;";
+ static constexpr const char* kWeakClassName = "java/lang/ref/WeakReference";
+ static constexpr const char* kWeakInitSig = "(Ljava/lang/Object;)V";
+ static constexpr const char* kWeakGetSig = "()Ljava/lang/Object;";
static void JNICALL ClassLoadCallback(jvmtiEnv* jenv,
JNIEnv* jni_env,
@@ -446,6 +453,8 @@
// The following is bad and relies on implementation details. But otherwise a test would be
// a lot more complicated.
local_stored_class_ = jni_env->NewLocalRef(klass);
+ // Store the value into a field in the heap.
+ SetOrCompare(jni_env, klass, true);
}
}
@@ -459,10 +468,58 @@
CHECK(jni_env->IsSameObject(stored_class_, klass));
CHECK(jni_env->IsSameObject(weakly_stored_class_, klass));
CHECK(jni_env->IsSameObject(local_stored_class_, klass));
+ // Look up the value in a field in the heap.
+ SetOrCompare(jni_env, klass, false);
compared_ = true;
}
}
+ static void SetOrCompare(JNIEnv* jni_env, jobject value, bool set) {
+ CHECK(storage_class_ != nullptr);
+
+ // Simple direct storage.
+ jfieldID field = jni_env->GetStaticFieldID(storage_class_, kStorageFieldName, kStorageFieldSig);
+ CHECK(field != nullptr);
+
+ if (set) {
+ jni_env->SetStaticObjectField(storage_class_, field, value);
+ CHECK(!jni_env->ExceptionCheck());
+ } else {
+ ScopedLocalRef<jobject> stored(jni_env, jni_env->GetStaticObjectField(storage_class_, field));
+ CHECK(jni_env->IsSameObject(value, stored.get()));
+ }
+
+ // Storage as a reference.
+ ScopedLocalRef<jclass> weak_ref_class(jni_env, jni_env->FindClass(kWeakClassName));
+ CHECK(weak_ref_class.get() != nullptr);
+ jfieldID weak_field = jni_env->GetStaticFieldID(storage_class_,
+ kStorageWeakFieldName,
+ kStorageWeakFieldSig);
+ CHECK(weak_field != nullptr);
+ if (set) {
+ // Create a WeakReference.
+ jmethodID weak_init = jni_env->GetMethodID(weak_ref_class.get(), "<init>", kWeakInitSig);
+ CHECK(weak_init != nullptr);
+ ScopedLocalRef<jobject> weak_obj(jni_env, jni_env->NewObject(weak_ref_class.get(),
+ weak_init,
+ value));
+ CHECK(weak_obj.get() != nullptr);
+ jni_env->SetStaticObjectField(storage_class_, weak_field, weak_obj.get());
+ CHECK(!jni_env->ExceptionCheck());
+ } else {
+ // Check the reference value.
+ jmethodID get_referent = jni_env->GetMethodID(weak_ref_class.get(), "get", kWeakGetSig);
+ CHECK(get_referent != nullptr);
+ ScopedLocalRef<jobject> weak_obj(jni_env, jni_env->GetStaticObjectField(storage_class_,
+ weak_field));
+ CHECK(weak_obj.get() != nullptr);
+ ScopedLocalRef<jobject> weak_referent(jni_env, jni_env->CallObjectMethod(weak_obj.get(),
+ get_referent));
+ CHECK(weak_referent.get() != nullptr);
+ CHECK(jni_env->IsSameObject(value, weak_referent.get()));
+ }
+ }
+
static void CheckFound() {
CHECK(found_);
CHECK(compared_);
@@ -477,6 +534,8 @@
}
}
+ static jclass storage_class_;
+
private:
static jobject stored_class_;
static jweak weakly_stored_class_;
@@ -484,12 +543,19 @@
static bool found_;
static bool compared_;
};
+jclass ClassLoadPrepareEquality::storage_class_ = nullptr;
jobject ClassLoadPrepareEquality::stored_class_ = nullptr;
jweak ClassLoadPrepareEquality::weakly_stored_class_ = nullptr;
jobject ClassLoadPrepareEquality::local_stored_class_ = nullptr;
bool ClassLoadPrepareEquality::found_ = false;
bool ClassLoadPrepareEquality::compared_ = false;
+extern "C" JNIEXPORT void JNICALL Java_Main_setEqualityEventStorageClass(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jclass klass) {
+ ClassLoadPrepareEquality::storage_class_ =
+ reinterpret_cast<jclass>(env->NewGlobalRef(klass));
+}
+
extern "C" JNIEXPORT void JNICALL Java_Main_enableClassLoadPrepareEqualityEvents(
JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jboolean b) {
EnableEvents(env,
@@ -499,6 +565,8 @@
if (b == JNI_FALSE) {
ClassLoadPrepareEquality::Free(env);
ClassLoadPrepareEquality::CheckFound();
+ env->DeleteGlobalRef(ClassLoadPrepareEquality::storage_class_);
+ ClassLoadPrepareEquality::storage_class_ = nullptr;
}
}
diff --git a/test/912-classes/src/Main.java b/test/912-classes/src/Main.java
index c1de679..005074f 100644
--- a/test/912-classes/src/Main.java
+++ b/test/912-classes/src/Main.java
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+import java.lang.ref.Reference;
import java.lang.reflect.Constructor;
import java.lang.reflect.Proxy;
import java.util.Arrays;
@@ -315,6 +316,8 @@
}
private static void testClassLoadPrepareEquality() throws Exception {
+ setEqualityEventStorageClass(ClassF.class);
+
enableClassLoadPrepareEqualityEvents(true);
Class.forName("Main$ClassE");
@@ -393,6 +396,7 @@
private static native void enableClassLoadSeenEvents(boolean b);
private static native boolean hadLoadEvent();
+ private static native void setEqualityEventStorageClass(Class<?> c);
private static native void enableClassLoadPrepareEqualityEvents(boolean b);
private static class TestForNonInit {
@@ -428,6 +432,11 @@
}
}
+ public static class ClassF {
+ public static Object STATIC = null;
+ public static Reference<Object> WEAK = null;
+ }
+
private static final String DEX1 = System.getenv("DEX_LOCATION") + "/912-classes.jar";
private static final String DEX2 = System.getenv("DEX_LOCATION") + "/912-classes-ex.jar";
diff --git a/test/945-obsolete-native/build b/test/945-obsolete-native/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/945-obsolete-native/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/945-obsolete-native/expected.txt b/test/945-obsolete-native/expected.txt
new file mode 100644
index 0000000..83efda1
--- /dev/null
+++ b/test/945-obsolete-native/expected.txt
@@ -0,0 +1,9 @@
+hello
+Not doing anything here
+goodbye
+hello
+transforming calling function
+goodbye
+Hello - Transformed
+Not doing anything here
+Goodbye - Transformed
diff --git a/test/945-obsolete-native/info.txt b/test/945-obsolete-native/info.txt
new file mode 100644
index 0000000..c8b892c
--- /dev/null
+++ b/test/945-obsolete-native/info.txt
@@ -0,0 +1 @@
+Tests basic obsolete method support
diff --git a/test/945-obsolete-native/obsolete_native.cc b/test/945-obsolete-native/obsolete_native.cc
new file mode 100644
index 0000000..061e7af
--- /dev/null
+++ b/test/945-obsolete-native/obsolete_native.cc
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+#include <memory>
+#include <stdio.h>
+
+#include "android-base/stringprintf.h"
+
+#include "android-base/stringprintf.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "jni.h"
+#include "openjdkjvmti/jvmti.h"
+#include "ScopedLocalRef.h"
+#include "ti-agent/common_helper.h"
+#include "ti-agent/common_load.h"
+
+namespace art {
+namespace Test945ObsoleteNative {
+
+extern "C" JNIEXPORT void JNICALL Java_Main_bindTest945ObsoleteNative(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
+ BindFunctions(jvmti_env, env, "Transform");
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Transform_doExecute(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jobject runnable) {
+ jclass runnable_klass = env->FindClass("java/lang/Runnable");
+ DCHECK(runnable_klass != nullptr);
+ jmethodID run_method = env->GetMethodID(runnable_klass, "run", "()V");
+ env->CallVoidMethod(runnable, run_method);
+}
+
+
+} // namespace Test945ObsoleteNative
+} // namespace art
diff --git a/test/945-obsolete-native/run b/test/945-obsolete-native/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/945-obsolete-native/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/945-obsolete-native/src/Main.java b/test/945-obsolete-native/src/Main.java
new file mode 100644
index 0000000..5e2154e
--- /dev/null
+++ b/test/945-obsolete-native/src/Main.java
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+public class Main {
+ // class Transform {
+ // public void sayHi(Runnable r) {
+ // System.out.println("Hello - Transformed");
+ // doExecute(r);
+ // System.out.println("Goodbye - Transformed");
+ // }
+ //
+ // private static native void doExecute(Runnable r);
+ // }
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAIgoACAASCQATABQIABUKABYAFwoABwAYCAAZBwAaBwAbAQAGPGluaXQ+AQADKClW" +
+ "AQAEQ29kZQEAD0xpbmVOdW1iZXJUYWJsZQEABXNheUhpAQAXKExqYXZhL2xhbmcvUnVubmFibGU7" +
+ "KVYBAAlkb0V4ZWN1dGUBAApTb3VyY2VGaWxlAQAOVHJhbnNmb3JtLmphdmEMAAkACgcAHAwAHQAe" +
+ "AQATSGVsbG8gLSBUcmFuc2Zvcm1lZAcAHwwAIAAhDAAPAA4BABVHb29kYnllIC0gVHJhbnNmb3Jt" +
+ "ZWQBAAlUcmFuc2Zvcm0BABBqYXZhL2xhbmcvT2JqZWN0AQAQamF2YS9sYW5nL1N5c3RlbQEAA291" +
+ "dAEAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwEAE2phdmEvaW8vUHJpbnRTdHJlYW0BAAdwcmludGxu" +
+ "AQAVKExqYXZhL2xhbmcvU3RyaW5nOylWACAABwAIAAAAAAADAAAACQAKAAEACwAAAB0AAQABAAAA" +
+ "BSq3AAGxAAAAAQAMAAAABgABAAAAEQABAA0ADgABAAsAAAA5AAIAAgAAABWyAAISA7YABCu4AAWy" +
+ "AAISBrYABLEAAAABAAwAAAASAAQAAAATAAgAFAAMABUAFAAWAQoADwAOAAAAAQAQAAAAAgAR");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQB1fZcJR/opPuXacK8mIla5shH0LSg72qJYAwAAcAAAAHhWNBIAAAAAAAAAALgCAAAR" +
+ "AAAAcAAAAAcAAAC0AAAAAwAAANAAAAABAAAA9AAAAAUAAAD8AAAAAQAAACQBAAAUAgAARAEAAKIB" +
+ "AACqAQAAwQEAANYBAADjAQAA+gEAAA4CAAAkAgAAOAIAAEwCAABcAgAAXwIAAGMCAABuAgAAggIA" +
+ "AIcCAACQAgAAAwAAAAQAAAAFAAAABgAAAAcAAAAIAAAACgAAAAoAAAAGAAAAAAAAAAsAAAAGAAAA" +
+ "lAEAAAsAAAAGAAAAnAEAAAUAAQAOAAAAAAAAAAAAAAAAAAEADAAAAAAAAQAQAAAAAQACAA8AAAAC" +
+ "AAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAJAAAAAAAAAKUCAAAAAAAAAQABAAEAAACXAgAABAAAAHAQ" +
+ "BAAAAA4ABAACAAIAAACcAgAAFAAAAGIAAAAbAQIAAABuIAMAEABxEAEAAwBiAAAAGwEBAAAAbiAD" +
+ "ABAADgABAAAAAwAAAAEAAAAEAAY8aW5pdD4AFUdvb2RieWUgLSBUcmFuc2Zvcm1lZAATSGVsbG8g" +
+ "LSBUcmFuc2Zvcm1lZAALTFRyYW5zZm9ybTsAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwASTGphdmEv" +
+ "bGFuZy9PYmplY3Q7ABRMamF2YS9sYW5nL1J1bm5hYmxlOwASTGphdmEvbGFuZy9TdHJpbmc7ABJM" +
+ "amF2YS9sYW5nL1N5c3RlbTsADlRyYW5zZm9ybS5qYXZhAAFWAAJWTAAJZG9FeGVjdXRlABJlbWl0" +
+ "dGVyOiBqYWNrLTQuMjUAA291dAAHcHJpbnRsbgAFc2F5SGkAEQAHDgATAQAHDoc8hwAAAAIBAICA" +
+ "BMQCAYoCAAIB3AIADQAAAAAAAAABAAAAAAAAAAEAAAARAAAAcAAAAAIAAAAHAAAAtAAAAAMAAAAD" +
+ "AAAA0AAAAAQAAAABAAAA9AAAAAUAAAAFAAAA/AAAAAYAAAABAAAAJAEAAAEgAAACAAAARAEAAAEQ" +
+ "AAACAAAAlAEAAAIgAAARAAAAogEAAAMgAAACAAAAlwIAAAAgAAABAAAApQIAAAAQAAABAAAAuAIA" +
+ "AA==");
+
+ public static void main(String[] args) {
+ bindTest945ObsoleteNative();
+ doTest(new Transform());
+ }
+
+ public static void doTest(Transform t) {
+ t.sayHi(() -> { System.out.println("Not doing anything here"); });
+ t.sayHi(() -> {
+ System.out.println("transforming calling function");
+ doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+ });
+ t.sayHi(() -> { System.out.println("Not doing anything here"); });
+ }
+
+ // Transforms the class
+ private static native void doCommonClassRedefinition(Class<?> target,
+ byte[] classfile,
+ byte[] dexfile);
+
+ private static native void bindTest945ObsoleteNative();
+}
diff --git a/test/945-obsolete-native/src/Transform.java b/test/945-obsolete-native/src/Transform.java
new file mode 100644
index 0000000..2b7cc1b
--- /dev/null
+++ b/test/945-obsolete-native/src/Transform.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform {
+ public void sayHi(Runnable r) {
+ System.out.println("hello");
+ doExecute(r);
+ System.out.println("goodbye");
+ }
+
+ private static native void doExecute(Runnable r);
+}
diff --git a/test/Android.bp b/test/Android.bp
index d3244a6..00c890a 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -274,6 +274,7 @@
"933-misc-events/misc_events.cc",
"936-search-onload/search_onload.cc",
"944-transform-classloaders/classloader.cc",
+ "945-obsolete-native/obsolete_native.cc",
],
shared_libs: [
"libbase",
diff --git a/test/ti-agent/common_load.cc b/test/ti-agent/common_load.cc
index c5a9356..351857d 100644
--- a/test/ti-agent/common_load.cc
+++ b/test/ti-agent/common_load.cc
@@ -122,6 +122,7 @@
{ "942-private-recursive", common_redefine::OnLoad, nullptr },
{ "943-private-recursive-jit", common_redefine::OnLoad, nullptr },
{ "944-transform-classloaders", common_redefine::OnLoad, nullptr },
+ { "945-obsolete-native", common_redefine::OnLoad, nullptr },
};
static AgentLib* FindAgent(char* name) {
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index 729a3e5..6e123ce 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -14,6 +14,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+# Exit as a stop-gap measure for b/35308152.
+exit 0
+
if [ ! -d libcore ]; then
echo "Script needs to be run at the root of the android tree"
exit 1