Merge "[optimizing] Don't rely on the verifier for String.<init>."
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 31d6412..1319f2c 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -2347,27 +2347,27 @@
}
case Instruction::CMP_LONG: {
- Binop_23x_cmp(instruction, Primitive::kPrimLong, kNoBias, dex_pc);
+ Binop_23x_cmp(instruction, Primitive::kPrimLong, ComparisonBias::kNoBias, dex_pc);
break;
}
case Instruction::CMPG_FLOAT: {
- Binop_23x_cmp(instruction, Primitive::kPrimFloat, kGtBias, dex_pc);
+ Binop_23x_cmp(instruction, Primitive::kPrimFloat, ComparisonBias::kGtBias, dex_pc);
break;
}
case Instruction::CMPG_DOUBLE: {
- Binop_23x_cmp(instruction, Primitive::kPrimDouble, kGtBias, dex_pc);
+ Binop_23x_cmp(instruction, Primitive::kPrimDouble, ComparisonBias::kGtBias, dex_pc);
break;
}
case Instruction::CMPL_FLOAT: {
- Binop_23x_cmp(instruction, Primitive::kPrimFloat, kLtBias, dex_pc);
+ Binop_23x_cmp(instruction, Primitive::kPrimFloat, ComparisonBias::kLtBias, dex_pc);
break;
}
case Instruction::CMPL_DOUBLE: {
- Binop_23x_cmp(instruction, Primitive::kPrimDouble, kLtBias, dex_pc);
+ Binop_23x_cmp(instruction, Primitive::kPrimDouble, ComparisonBias::kLtBias, dex_pc);
break;
}
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 0e099a8..75b8f06 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -334,7 +334,7 @@
#undef __
#define __ down_cast<ArmAssembler*>(GetAssembler())->
-inline Condition ARMCondition(IfCondition cond) {
+inline Condition ARMSignedOrFPCondition(IfCondition cond) {
switch (cond) {
case kCondEQ: return EQ;
case kCondNE: return NE;
@@ -342,24 +342,22 @@
case kCondLE: return LE;
case kCondGT: return GT;
case kCondGE: return GE;
- default:
- LOG(FATAL) << "Unknown if condition";
}
- return EQ; // Unreachable.
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
}
-inline Condition ARMOppositeCondition(IfCondition cond) {
+inline Condition ARMUnsignedCondition(IfCondition cond) {
switch (cond) {
- case kCondEQ: return NE;
- case kCondNE: return EQ;
- case kCondLT: return GE;
- case kCondLE: return GT;
- case kCondGT: return LE;
- case kCondGE: return LT;
- default:
- LOG(FATAL) << "Unknown if condition";
+ case kCondEQ: return EQ;
+ case kCondNE: return NE;
+ case kCondLT: return LO;
+ case kCondLE: return LS;
+ case kCondGT: return HI;
+ case kCondGE: return HS;
}
- return EQ; // Unreachable.
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
}
void CodeGeneratorARM::DumpCoreRegister(std::ostream& stream, int reg) const {
@@ -1008,6 +1006,142 @@
UNUSED(exit);
}
+void InstructionCodeGeneratorARM::GenerateCompareWithImmediate(Register left, int32_t right) {
+ ShifterOperand operand;
+ if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, right, &operand)) {
+ __ cmp(left, operand);
+ } else {
+ Register temp = IP;
+ __ LoadImmediate(temp, right);
+ __ cmp(left, ShifterOperand(temp));
+ }
+}
+
+void InstructionCodeGeneratorARM::GenerateFPJumps(HCondition* cond,
+ Label* true_label,
+ Label* false_label) {
+ __ vmstat(); // transfer FP status register to ARM APSR.
+ if (cond->IsFPConditionTrueIfNaN()) {
+ __ b(true_label, VS); // VS for unordered.
+ } else if (cond->IsFPConditionFalseIfNaN()) {
+ __ b(false_label, VS); // VS for unordered.
+ }
+ __ b(true_label, ARMSignedOrFPCondition(cond->GetCondition()));
+}
+
+void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
+ Label* true_label,
+ Label* false_label) {
+ LocationSummary* locations = cond->GetLocations();
+ Location left = locations->InAt(0);
+ Location right = locations->InAt(1);
+ IfCondition if_cond = cond->GetCondition();
+
+ Register left_high = left.AsRegisterPairHigh<Register>();
+ Register left_low = left.AsRegisterPairLow<Register>();
+ IfCondition true_high_cond = if_cond;
+ IfCondition false_high_cond = cond->GetOppositeCondition();
+ Condition final_condition = ARMUnsignedCondition(if_cond);
+
+ // Set the conditions for the test, remembering that == needs to be
+ // decided using the low words.
+ switch (if_cond) {
+ case kCondEQ:
+ case kCondNE:
+ // Nothing to do.
+ break;
+ case kCondLT:
+ false_high_cond = kCondGT;
+ break;
+ case kCondLE:
+ true_high_cond = kCondLT;
+ break;
+ case kCondGT:
+ false_high_cond = kCondLT;
+ break;
+ case kCondGE:
+ true_high_cond = kCondGT;
+ break;
+ }
+ if (right.IsConstant()) {
+ int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
+ int32_t val_low = Low32Bits(value);
+ int32_t val_high = High32Bits(value);
+
+ GenerateCompareWithImmediate(left_high, val_high);
+ if (if_cond == kCondNE) {
+ __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
+ } else if (if_cond == kCondEQ) {
+ __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
+ } else {
+ __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
+ __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
+ }
+ // Must be equal high, so compare the lows.
+ GenerateCompareWithImmediate(left_low, val_low);
+ } else {
+ Register right_high = right.AsRegisterPairHigh<Register>();
+ Register right_low = right.AsRegisterPairLow<Register>();
+
+ __ cmp(left_high, ShifterOperand(right_high));
+ if (if_cond == kCondNE) {
+ __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
+ } else if (if_cond == kCondEQ) {
+ __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
+ } else {
+ __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
+ __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
+ }
+ // Must be equal high, so compare the lows.
+ __ cmp(left_low, ShifterOperand(right_low));
+ }
+ // The last comparison might be unsigned.
+ __ b(true_label, final_condition);
+}
+
+void InstructionCodeGeneratorARM::GenerateCompareTestAndBranch(HIf* if_instr,
+ HCondition* condition,
+ Label* true_target,
+ Label* false_target,
+ Label* always_true_target) {
+ LocationSummary* locations = condition->GetLocations();
+ Location left = locations->InAt(0);
+ Location right = locations->InAt(1);
+
+ // We don't want true_target as a nullptr.
+ if (true_target == nullptr) {
+ true_target = always_true_target;
+ }
+ bool falls_through = (false_target == nullptr);
+
+ // FP compares don't like null false_targets.
+ if (false_target == nullptr) {
+ false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
+ }
+
+ Primitive::Type type = condition->InputAt(0)->GetType();
+ switch (type) {
+ case Primitive::kPrimLong:
+ GenerateLongComparesAndJumps(condition, true_target, false_target);
+ break;
+ case Primitive::kPrimFloat:
+ __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
+ GenerateFPJumps(condition, true_target, false_target);
+ break;
+ case Primitive::kPrimDouble:
+ __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
+ FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
+ GenerateFPJumps(condition, true_target, false_target);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected compare type " << type;
+ }
+
+ if (!falls_through) {
+ __ b(false_target);
+ }
+}
+
void InstructionCodeGeneratorARM::GenerateTestAndBranch(HInstruction* instruction,
Label* true_target,
Label* false_target,
@@ -1033,25 +1167,27 @@
} else {
// Condition has not been materialized, use its inputs as the
// comparison and its condition as the branch condition.
+ Primitive::Type type =
+ cond->IsCondition() ? cond->InputAt(0)->GetType() : Primitive::kPrimInt;
+ // Is this a long or FP comparison that has been folded into the HCondition?
+ if (type == Primitive::kPrimLong || Primitive::IsFloatingPointType(type)) {
+ // Generate the comparison directly.
+ GenerateCompareTestAndBranch(instruction->AsIf(), cond->AsCondition(),
+ true_target, false_target, always_true_target);
+ return;
+ }
+
LocationSummary* locations = cond->GetLocations();
DCHECK(locations->InAt(0).IsRegister()) << locations->InAt(0);
Register left = locations->InAt(0).AsRegister<Register>();
- if (locations->InAt(1).IsRegister()) {
- __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>()));
+ Location right = locations->InAt(1);
+ if (right.IsRegister()) {
+ __ cmp(left, ShifterOperand(right.AsRegister<Register>()));
} else {
- DCHECK(locations->InAt(1).IsConstant());
- HConstant* constant = locations->InAt(1).GetConstant();
- int32_t value = CodeGenerator::GetInt32ValueOf(constant);
- ShifterOperand operand;
- if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) {
- __ cmp(left, operand);
- } else {
- Register temp = IP;
- __ LoadImmediate(temp, value);
- __ cmp(left, ShifterOperand(temp));
- }
+ DCHECK(right.IsConstant());
+ GenerateCompareWithImmediate(left, CodeGenerator::GetInt32ValueOf(right.GetConstant()));
}
- __ b(true_target, ARMCondition(cond->AsCondition()->GetCondition()));
+ __ b(true_target, ARMSignedOrFPCondition(cond->AsCondition()->GetCondition()));
}
}
if (false_target != nullptr) {
@@ -1104,37 +1240,88 @@
void LocationsBuilderARM::VisitCondition(HCondition* cond) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
- if (cond->NeedsMaterialization()) {
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ // Handle the long/FP comparisons made in instruction simplification.
+ switch (cond->InputAt(0)->GetType()) {
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
+ if (cond->NeedsMaterialization()) {
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ if (cond->NeedsMaterialization()) {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+ break;
+
+ default:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
+ if (cond->NeedsMaterialization()) {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
}
}
void InstructionCodeGeneratorARM::VisitCondition(HCondition* cond) {
- if (!cond->NeedsMaterialization()) return;
- LocationSummary* locations = cond->GetLocations();
- Register left = locations->InAt(0).AsRegister<Register>();
-
- if (locations->InAt(1).IsRegister()) {
- __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>()));
- } else {
- DCHECK(locations->InAt(1).IsConstant());
- int32_t value = CodeGenerator::GetInt32ValueOf(locations->InAt(1).GetConstant());
- ShifterOperand operand;
- if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) {
- __ cmp(left, operand);
- } else {
- Register temp = IP;
- __ LoadImmediate(temp, value);
- __ cmp(left, ShifterOperand(temp));
- }
+ if (!cond->NeedsMaterialization()) {
+ return;
}
- __ it(ARMCondition(cond->GetCondition()), kItElse);
- __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
- ARMCondition(cond->GetCondition()));
- __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0),
- ARMOppositeCondition(cond->GetCondition()));
+
+ LocationSummary* locations = cond->GetLocations();
+ Location left = locations->InAt(0);
+ Location right = locations->InAt(1);
+ Register out = locations->Out().AsRegister<Register>();
+ Label true_label, false_label;
+
+ switch (cond->InputAt(0)->GetType()) {
+ default: {
+ // Integer case.
+ if (right.IsRegister()) {
+ __ cmp(left.AsRegister<Register>(), ShifterOperand(right.AsRegister<Register>()));
+ } else {
+ DCHECK(right.IsConstant());
+ GenerateCompareWithImmediate(left.AsRegister<Register>(),
+ CodeGenerator::GetInt32ValueOf(right.GetConstant()));
+ }
+ __ it(ARMSignedOrFPCondition(cond->GetCondition()), kItElse);
+ __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
+ ARMSignedOrFPCondition(cond->GetCondition()));
+ __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0),
+ ARMSignedOrFPCondition(cond->GetOppositeCondition()));
+ return;
+ }
+ case Primitive::kPrimLong:
+ GenerateLongComparesAndJumps(cond, &true_label, &false_label);
+ break;
+ case Primitive::kPrimFloat:
+ __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
+ GenerateFPJumps(cond, &true_label, &false_label);
+ break;
+ case Primitive::kPrimDouble:
+ __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
+ FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
+ GenerateFPJumps(cond, &true_label, &false_label);
+ break;
+ }
+
+ // Convert the jumps into the result.
+ Label done_label;
+
+ // False case: result = 0.
+ __ Bind(&false_label);
+ __ LoadImmediate(out, 0);
+ __ b(&done_label);
+
+ // True case: result = 1.
+ __ Bind(&true_label);
+ __ LoadImmediate(out, 1);
+ __ Bind(&done_label);
}
void LocationsBuilderARM::VisitEqual(HEqual* comp) {
@@ -2913,7 +3100,7 @@
ShifterOperand(right.AsRegisterPairHigh<Register>())); // Signed compare.
__ b(&less, LT);
__ b(&greater, GT);
- // Do LoadImmediate before any `cmp`, as LoadImmediate might affect the status flags.
+ // Do LoadImmediate before the last `cmp`, as LoadImmediate might affect the status flags.
__ LoadImmediate(out, 0);
__ cmp(left.AsRegisterPairLow<Register>(),
ShifterOperand(right.AsRegisterPairLow<Register>())); // Unsigned compare.
@@ -2936,7 +3123,7 @@
LOG(FATAL) << "Unexpected compare type " << type;
}
__ b(&done, EQ);
- __ b(&less, CC); // CC is for both: unsigned compare for longs and 'less than' for floats.
+ __ b(&less, LO); // LO is for both: unsigned compare for longs and 'less than' for floats.
__ Bind(&greater);
__ LoadImmediate(out, 1);
@@ -3710,7 +3897,7 @@
Register length = locations->InAt(1).AsRegister<Register>();
__ cmp(index, ShifterOperand(length));
- __ b(slow_path->GetEntryLabel(), CS);
+ __ b(slow_path->GetEntryLabel(), HS);
}
void CodeGeneratorARM::MarkGCCard(Register temp,
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 1d10293..53bd766 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -207,6 +207,14 @@
Label* true_target,
Label* false_target,
Label* always_true_target);
+ void GenerateCompareWithImmediate(Register left, int32_t right);
+ void GenerateCompareTestAndBranch(HIf* if_instr,
+ HCondition* condition,
+ Label* true_target,
+ Label* false_target,
+ Label* always_true_target);
+ void GenerateFPJumps(HCondition* cond, Label* true_label, Label* false_label);
+ void GenerateLongComparesAndJumps(HCondition* cond, Label* true_label, Label* false_label);
void DivRemOneOrMinusOne(HBinaryOperation* instruction);
void DivRemByPowerOfTwo(HBinaryOperation* instruction);
void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 97709dd..e15eff9 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -344,7 +344,7 @@
#undef __
#define __ down_cast<X86Assembler*>(GetAssembler())->
-inline Condition X86Condition(IfCondition cond) {
+inline Condition X86SignedCondition(IfCondition cond) {
switch (cond) {
case kCondEQ: return kEqual;
case kCondNE: return kNotEqual;
@@ -352,10 +352,22 @@
case kCondLE: return kLessEqual;
case kCondGT: return kGreater;
case kCondGE: return kGreaterEqual;
- default:
- LOG(FATAL) << "Unknown if condition";
}
- return kEqual;
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+}
+
+inline Condition X86UnsignedOrFPCondition(IfCondition cond) {
+ switch (cond) {
+ case kCondEQ: return kEqual;
+ case kCondNE: return kNotEqual;
+ case kCondLT: return kBelow;
+ case kCondLE: return kBelowEqual;
+ case kCondGT: return kAbove;
+ case kCondGE: return kAboveEqual;
+ }
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
}
void CodeGeneratorX86::DumpCoreRegister(std::ostream& stream, int reg) const {
@@ -892,46 +904,12 @@
void InstructionCodeGeneratorX86::GenerateFPJumps(HCondition* cond,
Label* true_label,
Label* false_label) {
- bool gt_bias = cond->IsGtBias();
- IfCondition if_cond = cond->GetCondition();
- Condition ccode = X86Condition(if_cond);
- switch (if_cond) {
- case kCondEQ:
- if (!gt_bias) {
- __ j(kParityEven, false_label);
- }
- break;
- case kCondNE:
- if (!gt_bias) {
- __ j(kParityEven, true_label);
- }
- break;
- case kCondLT:
- if (gt_bias) {
- __ j(kParityEven, false_label);
- }
- ccode = kBelow;
- break;
- case kCondLE:
- if (gt_bias) {
- __ j(kParityEven, false_label);
- }
- ccode = kBelowEqual;
- break;
- case kCondGT:
- if (gt_bias) {
- __ j(kParityEven, true_label);
- }
- ccode = kAbove;
- break;
- case kCondGE:
- if (gt_bias) {
- __ j(kParityEven, true_label);
- }
- ccode = kAboveEqual;
- break;
+ if (cond->IsFPConditionTrueIfNaN()) {
+ __ j(kUnordered, true_label);
+ } else if (cond->IsFPConditionFalseIfNaN()) {
+ __ j(kUnordered, false_label);
}
- __ j(ccode, true_label);
+ __ j(X86UnsignedOrFPCondition(cond->GetCondition()), true_label);
}
void InstructionCodeGeneratorX86::GenerateLongComparesAndJumps(HCondition* cond,
@@ -942,43 +920,37 @@
Location right = locations->InAt(1);
IfCondition if_cond = cond->GetCondition();
- Register left_low = left.AsRegisterPairLow<Register>();
Register left_high = left.AsRegisterPairHigh<Register>();
+ Register left_low = left.AsRegisterPairLow<Register>();
IfCondition true_high_cond = if_cond;
IfCondition false_high_cond = cond->GetOppositeCondition();
- Condition final_condition = X86Condition(if_cond);
+ Condition final_condition = X86UnsignedOrFPCondition(if_cond);
// Set the conditions for the test, remembering that == needs to be
// decided using the low words.
switch (if_cond) {
case kCondEQ:
- false_high_cond = kCondNE;
- break;
case kCondNE:
- false_high_cond = kCondEQ;
+ // Nothing to do.
break;
case kCondLT:
false_high_cond = kCondGT;
- final_condition = kBelow;
break;
case kCondLE:
true_high_cond = kCondLT;
- final_condition = kBelowEqual;
break;
case kCondGT:
false_high_cond = kCondLT;
- final_condition = kAbove;
break;
case kCondGE:
true_high_cond = kCondGT;
- final_condition = kAboveEqual;
break;
}
if (right.IsConstant()) {
int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
- int32_t val_low = Low32Bits(value);
int32_t val_high = High32Bits(value);
+ int32_t val_low = Low32Bits(value);
if (val_high == 0) {
__ testl(left_high, left_high);
@@ -986,12 +958,12 @@
__ cmpl(left_high, Immediate(val_high));
}
if (if_cond == kCondNE) {
- __ j(X86Condition(true_high_cond), true_label);
+ __ j(X86SignedCondition(true_high_cond), true_label);
} else if (if_cond == kCondEQ) {
- __ j(X86Condition(false_high_cond), false_label);
+ __ j(X86SignedCondition(false_high_cond), false_label);
} else {
- __ j(X86Condition(true_high_cond), true_label);
- __ j(X86Condition(false_high_cond), false_label);
+ __ j(X86SignedCondition(true_high_cond), true_label);
+ __ j(X86SignedCondition(false_high_cond), false_label);
}
// Must be equal high, so compare the lows.
if (val_low == 0) {
@@ -1000,17 +972,17 @@
__ cmpl(left_low, Immediate(val_low));
}
} else {
- Register right_low = right.AsRegisterPairLow<Register>();
Register right_high = right.AsRegisterPairHigh<Register>();
+ Register right_low = right.AsRegisterPairLow<Register>();
__ cmpl(left_high, right_high);
if (if_cond == kCondNE) {
- __ j(X86Condition(true_high_cond), true_label);
+ __ j(X86SignedCondition(true_high_cond), true_label);
} else if (if_cond == kCondEQ) {
- __ j(X86Condition(false_high_cond), false_label);
+ __ j(X86SignedCondition(false_high_cond), false_label);
} else {
- __ j(X86Condition(true_high_cond), true_label);
- __ j(X86Condition(false_high_cond), false_label);
+ __ j(X86SignedCondition(true_high_cond), true_label);
+ __ j(X86SignedCondition(false_high_cond), false_label);
}
// Must be equal high, so compare the lows.
__ cmpl(left_low, right_low);
@@ -1045,12 +1017,10 @@
GenerateLongComparesAndJumps(condition, true_target, false_target);
break;
case Primitive::kPrimFloat:
- DCHECK(right.IsFpuRegister());
__ ucomiss(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>());
GenerateFPJumps(condition, true_target, false_target);
break;
case Primitive::kPrimDouble:
- DCHECK(right.IsFpuRegister());
__ ucomisd(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>());
GenerateFPJumps(condition, true_target, false_target);
break;
@@ -1080,7 +1050,7 @@
DCHECK_EQ(cond_value, 0);
}
} else {
- bool materialized =
+ bool is_materialized =
!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization();
// Moves do not affect the eflags register, so if the condition is
// evaluated just before the if, we don't need to evaluate it
@@ -1089,8 +1059,8 @@
Primitive::Type type = cond->IsCondition() ? cond->InputAt(0)->GetType() : Primitive::kPrimInt;
bool eflags_set = cond->IsCondition()
&& cond->AsCondition()->IsBeforeWhenDisregardMoves(instruction)
- && type == Primitive::kPrimInt;
- if (materialized) {
+ && (type != Primitive::kPrimLong && !Primitive::IsFloatingPointType(type));
+ if (is_materialized) {
if (!eflags_set) {
// Materialized condition, compare against 0.
Location lhs = instruction->GetLocations()->InAt(0);
@@ -1101,9 +1071,12 @@
}
__ j(kNotEqual, true_target);
} else {
- __ j(X86Condition(cond->AsCondition()->GetCondition()), true_target);
+ __ j(X86SignedCondition(cond->AsCondition()->GetCondition()), true_target);
}
} else {
+ // Condition has not been materialized, use its inputs as the
+ // comparison and its condition as the branch condition.
+
// Is this a long or FP comparison that has been folded into the HCondition?
if (type == Primitive::kPrimLong || Primitive::IsFloatingPointType(type)) {
// Generate the comparison directly.
@@ -1114,6 +1087,7 @@
always_true_target);
return;
}
+
Location lhs = cond->GetLocations()->InAt(0);
Location rhs = cond->GetLocations()->InAt(1);
// LHS is guaranteed to be in a register (see
@@ -1130,7 +1104,7 @@
} else {
__ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex()));
}
- __ j(X86Condition(cond->AsCondition()->GetCondition()), true_target);
+ __ j(X86SignedCondition(cond->AsCondition()->GetCondition()), true_target);
}
}
if (false_target != nullptr) {
@@ -1288,7 +1262,7 @@
} else {
__ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex()));
}
- __ setb(X86Condition(cond->GetCondition()), reg);
+ __ setb(X86SignedCondition(cond->GetCondition()), reg);
return;
}
case Primitive::kPrimLong:
@@ -1307,12 +1281,12 @@
// Convert the jumps into the result.
Label done_label;
- // false case: result = 0;
+ // False case: result = 0.
__ Bind(&false_label);
__ xorl(reg, reg);
__ jmp(&done_label);
- // True case: result = 1
+ // True case: result = 1.
__ Bind(&true_label);
__ movl(reg, Immediate(1));
__ Bind(&done_label);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index bcf2444..a95ce68 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -363,7 +363,7 @@
#undef __
#define __ down_cast<X86_64Assembler*>(GetAssembler())->
-inline Condition X86_64Condition(IfCondition cond) {
+inline Condition X86_64IntegerCondition(IfCondition cond) {
switch (cond) {
case kCondEQ: return kEqual;
case kCondNE: return kNotEqual;
@@ -371,10 +371,22 @@
case kCondLE: return kLessEqual;
case kCondGT: return kGreater;
case kCondGE: return kGreaterEqual;
- default:
- LOG(FATAL) << "Unknown if condition";
}
- return kEqual;
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+}
+
+inline Condition X86_64FPCondition(IfCondition cond) {
+ switch (cond) {
+ case kCondEQ: return kEqual;
+ case kCondNE: return kNotEqual;
+ case kCondLT: return kBelow;
+ case kCondLE: return kBelowEqual;
+ case kCondGT: return kAbove;
+ case kCondGE: return kAboveEqual;
+ };
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
}
void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
@@ -836,46 +848,12 @@
void InstructionCodeGeneratorX86_64::GenerateFPJumps(HCondition* cond,
Label* true_label,
Label* false_label) {
- bool gt_bias = cond->IsGtBias();
- IfCondition if_cond = cond->GetCondition();
- Condition ccode = X86_64Condition(if_cond);
- switch (if_cond) {
- case kCondEQ:
- if (!gt_bias) {
- __ j(kParityEven, false_label);
- }
- break;
- case kCondNE:
- if (!gt_bias) {
- __ j(kParityEven, true_label);
- }
- break;
- case kCondLT:
- if (gt_bias) {
- __ j(kParityEven, false_label);
- }
- ccode = kBelow;
- break;
- case kCondLE:
- if (gt_bias) {
- __ j(kParityEven, false_label);
- }
- ccode = kBelowEqual;
- break;
- case kCondGT:
- if (gt_bias) {
- __ j(kParityEven, true_label);
- }
- ccode = kAbove;
- break;
- case kCondGE:
- if (gt_bias) {
- __ j(kParityEven, true_label);
- }
- ccode = kAboveEqual;
- break;
+ if (cond->IsFPConditionTrueIfNaN()) {
+ __ j(kUnordered, true_label);
+ } else if (cond->IsFPConditionFalseIfNaN()) {
+ __ j(kUnordered, false_label);
}
- __ j(ccode, true_label);
+ __ j(X86_64FPCondition(cond->GetCondition()), true_label);
}
void InstructionCodeGeneratorX86_64::GenerateCompareTestAndBranch(HIf* if_instr,
@@ -911,7 +889,7 @@
__ cmpq(left_reg, Immediate(static_cast<int32_t>(value)));
}
} else {
- // Value won't fit in an 32-bit integer.
+ // Value won't fit in a 32-bit integer.
__ cmpq(left_reg, codegen_->LiteralInt64Address(value));
}
} else if (right.IsDoubleStackSlot()) {
@@ -919,7 +897,7 @@
} else {
__ cmpq(left_reg, right.AsRegister<CpuRegister>());
}
- __ j(X86_64Condition(condition->GetCondition()), true_target);
+ __ j(X86_64IntegerCondition(condition->GetCondition()), true_target);
break;
}
case Primitive::kPrimFloat: {
@@ -978,7 +956,7 @@
DCHECK_EQ(cond_value, 0);
}
} else {
- bool materialized =
+ bool is_materialized =
!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization();
// Moves do not affect the eflags register, so if the condition is
// evaluated just before the if, we don't need to evaluate it
@@ -989,7 +967,7 @@
&& cond->AsCondition()->IsBeforeWhenDisregardMoves(instruction)
&& !Primitive::IsFloatingPointType(type);
- if (materialized) {
+ if (is_materialized) {
if (!eflags_set) {
// Materialized condition, compare against 0.
Location lhs = instruction->GetLocations()->InAt(0);
@@ -1001,16 +979,20 @@
}
__ j(kNotEqual, true_target);
} else {
- __ j(X86_64Condition(cond->AsCondition()->GetCondition()), true_target);
+ __ j(X86_64IntegerCondition(cond->AsCondition()->GetCondition()), true_target);
}
} else {
+ // Condition has not been materialized, use its inputs as the
+ // comparison and its condition as the branch condition.
+
// Is this a long or FP comparison that has been folded into the HCondition?
if (type == Primitive::kPrimLong || Primitive::IsFloatingPointType(type)) {
- // Generate the comparison directly
+ // Generate the comparison directly.
GenerateCompareTestAndBranch(instruction->AsIf(), cond->AsCondition(),
true_target, false_target, always_true_target);
return;
}
+
Location lhs = cond->GetLocations()->InAt(0);
Location rhs = cond->GetLocations()->InAt(1);
if (rhs.IsRegister()) {
@@ -1026,7 +1008,7 @@
__ cmpl(lhs.AsRegister<CpuRegister>(),
Address(CpuRegister(RSP), rhs.GetStackIndex()));
}
- __ j(X86_64Condition(cond->AsCondition()->GetCondition()), true_target);
+ __ j(X86_64IntegerCondition(cond->AsCondition()->GetCondition()), true_target);
}
}
if (false_target != nullptr) {
@@ -1175,7 +1157,7 @@
} else {
__ cmpl(lhs.AsRegister<CpuRegister>(), Address(CpuRegister(RSP), rhs.GetStackIndex()));
}
- __ setcc(X86_64Condition(cond->GetCondition()), reg);
+ __ setcc(X86_64IntegerCondition(cond->GetCondition()), reg);
return;
case Primitive::kPrimLong:
// Clear output register: setcc only sets the low byte.
@@ -1198,7 +1180,7 @@
} else {
__ cmpq(lhs.AsRegister<CpuRegister>(), Address(CpuRegister(RSP), rhs.GetStackIndex()));
}
- __ setcc(X86_64Condition(cond->GetCondition()), reg);
+ __ setcc(X86_64IntegerCondition(cond->GetCondition()), reg);
return;
case Primitive::kPrimFloat: {
XmmRegister lhs_reg = lhs.AsFpuRegister<XmmRegister>();
@@ -1231,12 +1213,12 @@
// Convert the jumps into the result.
Label done_label;
- // false case: result = 0;
+ // False case: result = 0.
__ Bind(&false_label);
__ xorl(reg, reg);
__ jmp(&done_label);
- // True case: result = 1
+ // True case: result = 1.
__ Bind(&true_label);
__ movl(reg, Immediate(1));
__ Bind(&done_label);
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index da07a8c..c86d797 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -518,10 +518,10 @@
void InstructionSimplifierVisitor::VisitCondition(HCondition* condition) {
// Try to fold an HCompare into this HCondition.
- // This simplification is currently only supported on x86 and x86_64.
- // TODO: Implement it for ARM, ARM64 and MIPS64.
+ // This simplification is currently only supported on x86, x86_64 and ARM.
+ // TODO: Implement it for ARM64 and MIPS64.
InstructionSet instruction_set = GetGraph()->GetInstructionSet();
- if (instruction_set != kX86 && instruction_set != kX86_64) {
+ if (instruction_set != kX86 && instruction_set != kX86_64 && instruction_set != kThumb2) {
return;
}
diff --git a/compiler/optimizing/instruction_simplifier.h b/compiler/optimizing/instruction_simplifier.h
index faee2dd..cc4b6f6 100644
--- a/compiler/optimizing/instruction_simplifier.h
+++ b/compiler/optimizing/instruction_simplifier.h
@@ -31,7 +31,7 @@
InstructionSimplifier(HGraph* graph,
OptimizingCompilerStats* stats = nullptr,
const char* name = kInstructionSimplifierPassName)
- : HOptimization(graph, name, stats) {}
+ : HOptimization(graph, name, stats) {}
static constexpr const char* kInstructionSimplifierPassName = "instruction_simplifier";
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index b628806..8546a10 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -2150,7 +2150,7 @@
// The comparison bias applies for floating point operations and indicates how NaN
// comparisons are treated:
-enum ComparisonBias {
+enum class ComparisonBias {
kNoBias, // bias is not applicable (i.e. for long operation)
kGtBias, // return 1 for NaN comparisons
kLtBias, // return -1 for NaN comparisons
@@ -2161,7 +2161,7 @@
HCondition(HInstruction* first, HInstruction* second)
: HBinaryOperation(Primitive::kPrimBoolean, first, second),
needs_materialization_(true),
- bias_(kNoBias) {}
+ bias_(ComparisonBias::kNoBias) {}
bool NeedsMaterialization() const { return needs_materialization_; }
void ClearNeedsMaterialization() { needs_materialization_ = false; }
@@ -2176,7 +2176,7 @@
virtual IfCondition GetOppositeCondition() const = 0;
- bool IsGtBias() { return bias_ == kGtBias; }
+ bool IsGtBias() const { return bias_ == ComparisonBias::kGtBias; }
void SetBias(ComparisonBias bias) { bias_ = bias; }
@@ -2184,6 +2184,18 @@
return bias_ == other->AsCondition()->bias_;
}
+ bool IsFPConditionTrueIfNaN() const {
+ DCHECK(Primitive::IsFloatingPointType(InputAt(0)->GetType()));
+ IfCondition if_cond = GetCondition();
+ return IsGtBias() ? ((if_cond == kCondGT) || (if_cond == kCondGE)) : (if_cond == kCondNE);
+ }
+
+ bool IsFPConditionFalseIfNaN() const {
+ DCHECK(Primitive::IsFloatingPointType(InputAt(0)->GetType()));
+ IfCondition if_cond = GetCondition();
+ return IsGtBias() ? ((if_cond == kCondLT) || (if_cond == kCondLE)) : (if_cond == kCondEQ);
+ }
+
private:
// For register allocation purposes, returns whether this instruction needs to be
// materialized (that is, not just be in the processor flags).
@@ -2391,7 +2403,7 @@
ComparisonBias GetBias() const { return bias_; }
- bool IsGtBias() { return bias_ == kGtBias; }
+ bool IsGtBias() { return bias_ == ComparisonBias::kGtBias; }
uint32_t GetDexPc() const { return dex_pc_; }
diff --git a/compiler/utils/arm/constants_arm.h b/compiler/utils/arm/constants_arm.h
index 1513296..6b4daed 100644
--- a/compiler/utils/arm/constants_arm.h
+++ b/compiler/utils/arm/constants_arm.h
@@ -32,8 +32,9 @@
// Defines constants and accessor classes to assemble, disassemble and
// simulate ARM instructions.
//
-// Section references in the code refer to the "ARM Architecture Reference
-// Manual" from July 2005 (available at http://www.arm.com/miscPDFs/14128.pdf)
+// Section references in the code refer to the "ARM Architecture
+// Reference Manual ARMv7-A and ARMv7-R edition", issue C.b (24 July
+// 2012).
//
// Constants for specific fields are defined in their respective named enums.
// General constants are in an anonymous enum in class Instr.
@@ -97,26 +98,32 @@
std::ostream& operator<<(std::ostream& os, const DRegister& rhs);
-// Values for the condition field as defined in section A3.2.
+// Values for the condition field as defined in Table A8-1 "Condition
+// codes" (refer to Section A8.3 "Conditional execution").
enum Condition { // private marker to avoid generate-operator-out.py from processing.
kNoCondition = -1,
- EQ = 0, // equal
- NE = 1, // not equal
- CS = 2, // carry set/unsigned higher or same
- CC = 3, // carry clear/unsigned lower
- MI = 4, // minus/negative
- PL = 5, // plus/positive or zero
- VS = 6, // overflow
- VC = 7, // no overflow
- HI = 8, // unsigned higher
- LS = 9, // unsigned lower or same
- GE = 10, // signed greater than or equal
- LT = 11, // signed less than
- GT = 12, // signed greater than
- LE = 13, // signed less than or equal
- AL = 14, // always (unconditional)
- kSpecialCondition = 15, // special condition (refer to section A3.2.1)
+ // Meaning (integer) | Meaning (floating-point)
+ // ---------------------------------------+-----------------------------------------
+ EQ = 0, // Equal | Equal
+ NE = 1, // Not equal | Not equal, or unordered
+ CS = 2, // Carry set | Greater than, equal, or unordered
+ CC = 3, // Carry clear | Less than
+ MI = 4, // Minus, negative | Less than
+ PL = 5, // Plus, positive or zero | Greater than, equal, or unordered
+ VS = 6, // Overflow | Unordered (i.e. at least one NaN operand)
+ VC = 7, // No overflow | Not unordered
+ HI = 8, // Unsigned higher | Greater than, or unordered
+ LS = 9, // Unsigned lower or same | Less than or equal
+ GE = 10, // Signed greater than or equal | Greater than or equal
+ LT = 11, // Signed less than | Less than, or unordered
+ GT = 12, // Signed greater than | Greater than
+ LE = 13, // Signed less than or equal | Less than, equal, or unordered
+ AL = 14, // Always (unconditional) | Always (unconditional)
+ kSpecialCondition = 15, // Special condition (refer to Section A8.3 "Conditional execution").
kMaxCondition = 16,
+
+ HS = CS, // HS (unsigned higher or same) is a synonym for CS.
+ LO = CC // LO (unsigned lower) is a synonym for CC.
};
std::ostream& operator<<(std::ostream& os, const Condition& rhs);
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index f55dccd..84c465f 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -52,7 +52,7 @@
struct Options gOptions;
/*
- * Output file. Defaults to stdout, but tests can modify.
+ * Output file. Defaults to stdout.
*/
FILE* gOutFile = stdout;
@@ -63,8 +63,6 @@
typedef uint16_t u2;
typedef uint32_t u4;
typedef uint64_t u8;
-typedef int8_t s1;
-typedef int16_t s2;
typedef int32_t s4;
typedef int64_t s8;
@@ -1274,23 +1272,14 @@
return -1;
}
- // Determine if opening file yielded a single dex file. On failure,
- // the parse error message of the original dexdump utility is shown.
- //
- // TODO(ajcbik): this restriction is not really needed, but kept
- // for now to stay close to original dexdump; we can
- // later relax this!
- //
- if (dex_files.size() != 1) {
- fprintf(stderr, "ERROR: DEX parse failed\n");
- return -1;
- }
-
- // Success. Either report checksum verification or process dex file.
+ // Success. Either report checksum verification or process
+ // all dex files found in given file.
if (gOptions.checksumOnly) {
fprintf(gOutFile, "Checksum verified\n");
} else {
- processDexFile(fileName, dex_files[0].get());
+ for (size_t i = 0; i < dex_files.size(); i++) {
+ processDexFile(fileName, dex_files[i].get());
+ }
}
return 0;
}
diff --git a/dexdump/dexdump_main.cc b/dexdump/dexdump_main.cc
index 756f879..9be0922 100644
--- a/dexdump/dexdump_main.cc
+++ b/dexdump/dexdump_main.cc
@@ -108,8 +108,8 @@
default:
wantUsage = true;
break;
- }
- }
+ } // switch
+ } // while
// Detect early problems.
if (optind == argc) {
@@ -138,7 +138,7 @@
int result = 0;
while (optind < argc) {
result |= processFile(argv[optind++]);
- }
+ } // while
return result != 0;
}
diff --git a/dexlist/dexlist.cc b/dexlist/dexlist.cc
index d7c0e4c..d8fd242 100644
--- a/dexlist/dexlist.cc
+++ b/dexlist/dexlist.cc
@@ -51,11 +51,8 @@
* Data types that match the definitions in the VM specification.
*/
typedef uint8_t u1;
-typedef uint16_t u2;
typedef uint32_t u4;
typedef uint64_t u8;
-typedef int32_t s4;
-typedef int64_t s8;
/*
* Returns a newly-allocated string for the "dot version" of the class
@@ -193,23 +190,15 @@
return -1;
}
- // Determine if opening file yielded a single dex file.
- //
- // TODO(ajcbik): this restriction is not really needed, but kept
- // for now to stay close to original dexlist; we can
- // later relax this!
- //
- if (dex_files.size() != 1) {
- fprintf(stderr, "ERROR: DEX parse failed\n");
- return -1;
- }
- const DexFile* pDexFile = dex_files[0].get();
-
- // Success. Iterate over all classes.
+ // Success. Iterate over all dex files found in given file.
fprintf(gOutFile, "#%s\n", fileName);
- const u4 classDefsSize = pDexFile->GetHeader().class_defs_size_;
- for (u4 idx = 0; idx < classDefsSize; idx++) {
- dumpClass(pDexFile, idx);
+ for (size_t i = 0; i < dex_files.size(); i++) {
+ // Iterate over all classes in one dex file.
+ const DexFile* pDexFile = dex_files[i].get();
+ const u4 classDefsSize = pDexFile->GetHeader().class_defs_size_;
+ for (u4 idx = 0; idx < classDefsSize; idx++) {
+ dumpClass(pDexFile, idx);
+ }
}
return 0;
}
@@ -246,7 +235,7 @@
gOptions.outputFileName = optarg;
break;
case 'm':
- // If -m X.Y.Z is given, then find all instances of the
+ // If -m x.y.z is given, then find all instances of the
// fully-qualified method name. This isn't really what
// dexlist is for, but it's easy to do it here.
{
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 26a45d3..0ae9cdf 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1704,11 +1704,12 @@
mirror::Class* instance_class = obj->GetClass();
CHECK(instance_class != nullptr);
for (size_t i = 0; i < instance_counter->classes_.size(); ++i) {
+ mirror::Class* klass = instance_counter->classes_[i];
if (instance_counter->use_is_assignable_from_) {
- if (instance_counter->classes_[i]->IsAssignableFrom(instance_class)) {
+ if (klass != nullptr && klass->IsAssignableFrom(instance_class)) {
++instance_counter->counts_[i];
}
- } else if (instance_class == instance_counter->classes_[i]) {
+ } else if (instance_class == klass) {
++instance_counter->counts_[i];
}
}
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 7f89b1d..fc27315 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -832,11 +832,22 @@
f->VisitRoots(visitor);
}
}
- for (auto& m : GetDirectMethods(pointer_size)) {
- m.VisitRoots(visitor);
+ // We may see GetDirectMethodsPtr() == null with NumDirectMethods() != 0 if the root marking
+ // thread reads a null DirectMethodsBegin() but a non null DirectMethodsBegin() due to a race
+ // SetDirectMethodsPtr from class linking. Same for virtual methods.
+ // In this case, it is safe to avoid marking the roots since we must be either the CC or CMS. If
+ // we are CMS then the roots are already marked through other sources, otherwise the roots are
+ // already marked due to the to-space invariant.
+ // Unchecked versions since we may visit roots of classes that aren't yet loaded.
+ if (GetDirectMethodsPtrUnchecked() != nullptr) {
+ for (auto& m : GetDirectMethods(pointer_size)) {
+ m.VisitRoots(visitor);
+ }
}
- for (auto& m : GetVirtualMethods(pointer_size)) {
- m.VisitRoots(visitor);
+ if (GetVirtualMethodsPtrUnchecked() != nullptr) {
+ for (auto& m : GetVirtualMethods(pointer_size)) {
+ m.VisitRoots(visitor);
+ }
}
}
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index f0b7bfd..5bd6583 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -471,7 +471,8 @@
ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature,
size_t pointer_size) {
for (auto& method : GetVirtualMethods(pointer_size)) {
- if (name == method.GetName() && method.GetSignature() == signature) {
+ ArtMethod* const np_method = method.GetInterfaceMethodIfProxy(pointer_size);
+ if (name == np_method->GetName() && np_method->GetSignature() == signature) {
return &method;
}
}
@@ -481,7 +482,8 @@
ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature,
size_t pointer_size) {
for (auto& method : GetVirtualMethods(pointer_size)) {
- if (name == method.GetName() && signature == method.GetSignature()) {
+ ArtMethod* const np_method = method.GetInterfaceMethodIfProxy(pointer_size);
+ if (name == np_method->GetName() && signature == np_method->GetSignature()) {
return &method;
}
}
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 1078492..8febb62 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -257,21 +257,45 @@
static jlong VMDebug_countInstancesOfClass(JNIEnv* env, jclass, jclass javaClass,
jboolean countAssignable) {
ScopedObjectAccess soa(env);
- gc::Heap* heap = Runtime::Current()->GetHeap();
- // We only want reachable instances, so do a GC. Heap::VisitObjects visits all of the heap
- // objects in the all spaces and the allocation stack.
- heap->CollectGarbage(false);
+ gc::Heap* const heap = Runtime::Current()->GetHeap();
+ // Caller's responsibility to do GC if desired.
mirror::Class* c = soa.Decode<mirror::Class*>(javaClass);
if (c == nullptr) {
return 0;
}
- std::vector<mirror::Class*> classes;
- classes.push_back(c);
+ std::vector<mirror::Class*> classes {c};
uint64_t count = 0;
heap->CountInstances(classes, countAssignable, &count);
return count;
}
+static jlongArray VMDebug_countInstancesOfClasses(JNIEnv* env, jclass, jobjectArray javaClasses,
+ jboolean countAssignable) {
+ ScopedObjectAccess soa(env);
+ gc::Heap* const heap = Runtime::Current()->GetHeap();
+ // Caller's responsibility to do GC if desired.
+ auto* decoded_classes = soa.Decode<mirror::ObjectArray<mirror::Class>*>(javaClasses);
+ if (decoded_classes == nullptr) {
+ return nullptr;
+ }
+ std::vector<mirror::Class*> classes;
+ for (size_t i = 0, count = decoded_classes->GetLength(); i < count; ++i) {
+ classes.push_back(decoded_classes->Get(i));
+ }
+ std::vector<uint64_t> counts(classes.size(), 0u);
+ // Heap::CountInstances can handle null and will put 0 for these classes.
+ heap->CountInstances(classes, countAssignable, &counts[0]);
+ auto* long_counts = mirror::LongArray::Alloc(soa.Self(), counts.size());
+ if (long_counts == nullptr) {
+ soa.Self()->AssertPendingOOMException();
+ return nullptr;
+ }
+ for (size_t i = 0; i < counts.size(); ++i) {
+ long_counts->Set(i, counts[i]);
+ }
+ return soa.AddLocalReference<jlongArray>(long_counts);
+}
+
// We export the VM internal per-heap-space size/alloc/free metrics
// for the zygote space, alloc space (application heap), and the large
// object space for dumpsys meminfo. The other memory region data such
@@ -452,6 +476,7 @@
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(VMDebug, countInstancesOfClass, "(Ljava/lang/Class;Z)J"),
+ NATIVE_METHOD(VMDebug, countInstancesOfClasses, "([Ljava/lang/Class;Z)[J"),
NATIVE_METHOD(VMDebug, crash, "()V"),
NATIVE_METHOD(VMDebug, dumpHprofData, "(Ljava/lang/String;Ljava/io/FileDescriptor;)V"),
NATIVE_METHOD(VMDebug, dumpHprofDataDdms, "()V"),
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 37a86f1..6656fe5 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -388,6 +388,24 @@
void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) {
CHECK(java_peer != nullptr);
Thread* self = static_cast<JNIEnvExt*>(env)->self;
+
+ if (VLOG_IS_ON(threads)) {
+ ScopedObjectAccess soa(env);
+
+ ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
+ mirror::String* java_name = reinterpret_cast<mirror::String*>(f->GetObject(
+ soa.Decode<mirror::Object*>(java_peer)));
+ std::string thread_name;
+ if (java_name != nullptr) {
+ thread_name = java_name->ToModifiedUtf8();
+ } else {
+ thread_name = "(Unnamed)";
+ }
+
+ VLOG(threads) << "Creating native thread for " << thread_name;
+ self->Dump(LOG(INFO));
+ }
+
Runtime* runtime = Runtime::Current();
// Atomically start the birth of the thread ensuring the runtime isn't shutting down.
@@ -556,6 +574,16 @@
}
}
+ if (VLOG_IS_ON(threads)) {
+ if (thread_name != nullptr) {
+ VLOG(threads) << "Attaching thread " << thread_name;
+ } else {
+ VLOG(threads) << "Attaching unnamed thread.";
+ }
+ ScopedObjectAccess soa(self);
+ self->Dump(LOG(INFO));
+ }
+
{
ScopedObjectAccess soa(self);
Dbg::PostThreadStart(self);
diff --git a/test/004-JniTest/jni_test.cc b/test/004-JniTest/jni_test.cc
index ca256ec..db0dd32 100644
--- a/test/004-JniTest/jni_test.cc
+++ b/test/004-JniTest/jni_test.cc
@@ -626,3 +626,7 @@
assert(strcmp(test_array, chars6) == 0);
env->ReleaseStringUTFChars(s6, chars6);
}
+
+extern "C" JNIEXPORT jlong JNICALL Java_Main_testGetMethodID(JNIEnv* env, jclass, jclass c) {
+ return reinterpret_cast<jlong>(env->GetMethodID(c, "a", "()V"));
+}
diff --git a/test/004-JniTest/src/Main.java b/test/004-JniTest/src/Main.java
index ac20417..810dda0 100644
--- a/test/004-JniTest/src/Main.java
+++ b/test/004-JniTest/src/Main.java
@@ -14,7 +14,9 @@
* limitations under the License.
*/
+import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
public class Main {
public static void main(String[] args) {
@@ -35,6 +37,7 @@
testCallNonvirtual();
testNewStringObject();
testRemoveLocalObject();
+ testProxyGetMethodID();
}
private static native void testFindClassOnAttachedNativeThread();
@@ -194,6 +197,31 @@
private static native void testCallNonvirtual();
private static native void testNewStringObject();
+
+ private interface SimpleInterface {
+ void a();
+ }
+
+ private static class DummyInvocationHandler implements InvocationHandler {
+ public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
+ return null;
+ }
+ }
+
+ private static void testProxyGetMethodID() {
+ InvocationHandler handler = new DummyInvocationHandler();
+ SimpleInterface proxy =
+ (SimpleInterface) Proxy.newProxyInstance(SimpleInterface.class.getClassLoader(),
+ new Class[] {SimpleInterface.class}, handler);
+ if (testGetMethodID(SimpleInterface.class) == 0) {
+ throw new AssertionError();
+ }
+ if (testGetMethodID(proxy.getClass()) == 0) {
+ throw new AssertionError();
+ }
+ }
+
+ private static native long testGetMethodID(Class<?> c);
}
class JniCallNonvirtualTest {
diff --git a/test/099-vmdebug/expected.txt b/test/099-vmdebug/expected.txt
index 579f98f..b8d72f6 100644
--- a/test/099-vmdebug/expected.txt
+++ b/test/099-vmdebug/expected.txt
@@ -17,3 +17,9 @@
Got expected exception
Test sampling with bogus (<= 0) interval
Got expected exception
+Instances of ClassA 2
+Instances of ClassB 1
+Instances of null 0
+Instances of ClassA assignable 3
+Array counts [2, 1, 0]
+Array counts assignable [3, 1, 0]
diff --git a/test/099-vmdebug/src/Main.java b/test/099-vmdebug/src/Main.java
index add2ff6..1be5765 100644
--- a/test/099-vmdebug/src/Main.java
+++ b/test/099-vmdebug/src/Main.java
@@ -17,6 +17,8 @@
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Method;
+import java.util.Arrays;
+import java.util.ArrayList;
import java.util.Map;
public class Main {
@@ -30,7 +32,9 @@
return;
}
testMethodTracing();
+ testCountInstances();
testRuntimeStat();
+ testRuntimeStats();
}
private static File createTempFile() throws Exception {
@@ -220,12 +224,39 @@
checkHistogram(blocking_gc_count_rate_histogram);
}
+ static class ClassA { }
+ static class ClassB { }
+ static class ClassC extends ClassA { }
+
+ private static void testCountInstances() throws Exception {
+ ArrayList<Object> l = new ArrayList<Object>();
+ l.add(new ClassA());
+ l.add(new ClassB());
+ l.add(new ClassA());
+ l.add(new ClassC());
+ Runtime.getRuntime().gc();
+ System.out.println("Instances of ClassA " +
+ VMDebug.countInstancesofClass(ClassA.class, false));
+ System.out.println("Instances of ClassB " +
+ VMDebug.countInstancesofClass(ClassB.class, false));
+ System.out.println("Instances of null " + VMDebug.countInstancesofClass(null, false));
+ System.out.println("Instances of ClassA assignable " +
+ VMDebug.countInstancesofClass(ClassA.class, true));
+ Class[] classes = new Class[]{ClassA.class, ClassB.class, null};
+ long[] counts = VMDebug.countInstancesofClasses(classes, false);
+ System.out.println("Array counts " + Arrays.toString(counts));
+ counts = VMDebug.countInstancesofClasses(classes, true);
+ System.out.println("Array counts assignable " + Arrays.toString(counts));
+ }
+
private static class VMDebug {
private static final Method startMethodTracingMethod;
private static final Method stopMethodTracingMethod;
private static final Method getMethodTracingModeMethod;
private static final Method getRuntimeStatMethod;
private static final Method getRuntimeStatsMethod;
+ private static final Method countInstancesOfClassMethod;
+ private static final Method countInstancesOfClassesMethod;
static {
try {
Class c = Class.forName("dalvik.system.VMDebug");
@@ -235,6 +266,10 @@
getMethodTracingModeMethod = c.getDeclaredMethod("getMethodTracingMode");
getRuntimeStatMethod = c.getDeclaredMethod("getRuntimeStat", String.class);
getRuntimeStatsMethod = c.getDeclaredMethod("getRuntimeStats");
+ countInstancesOfClassMethod = c.getDeclaredMethod("countInstancesOfClass",
+ Class.class, Boolean.TYPE);
+ countInstancesOfClassesMethod = c.getDeclaredMethod("countInstancesOfClasses",
+ Class[].class, Boolean.TYPE);
} catch (Exception e) {
throw new RuntimeException(e);
}
@@ -257,5 +292,13 @@
public static Map<String, String> getRuntimeStats() throws Exception {
return (Map<String, String>) getRuntimeStatsMethod.invoke(null);
}
+ public static long countInstancesofClass(Class c, boolean assignable) throws Exception {
+ return (long) countInstancesOfClassMethod.invoke(null, new Object[]{c, assignable});
+ }
+ public static long[] countInstancesofClasses(Class[] classes, boolean assignable)
+ throws Exception {
+ return (long[]) countInstancesOfClassesMethod.invoke(
+ null, new Object[]{classes, assignable});
+ }
}
}