Merge "Disable kBssEntry LoadString sharpening."
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index bab702a..f4a804f 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -2941,6 +2941,104 @@
}
}
+bool InstructionCodeGeneratorMIPS::MaterializeIntCompare(IfCondition cond,
+ LocationSummary* input_locations,
+ Register dst) {
+ Register lhs = input_locations->InAt(0).AsRegister<Register>();
+ Location rhs_location = input_locations->InAt(1);
+ Register rhs_reg = ZERO;
+ int64_t rhs_imm = 0;
+ bool use_imm = rhs_location.IsConstant();
+ if (use_imm) {
+ rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
+ } else {
+ rhs_reg = rhs_location.AsRegister<Register>();
+ }
+
+ switch (cond) {
+ case kCondEQ:
+ case kCondNE:
+ if (use_imm && IsInt<16>(-rhs_imm)) {
+ __ Addiu(dst, lhs, -rhs_imm);
+ } else if (use_imm && IsUint<16>(rhs_imm)) {
+ __ Xori(dst, lhs, rhs_imm);
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Xor(dst, lhs, rhs_reg);
+ }
+ return (cond == kCondEQ);
+
+ case kCondLT:
+ case kCondGE:
+ if (use_imm && IsInt<16>(rhs_imm)) {
+ __ Slti(dst, lhs, rhs_imm);
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Slt(dst, lhs, rhs_reg);
+ }
+ return (cond == kCondGE);
+
+ case kCondLE:
+ case kCondGT:
+ if (use_imm && IsInt<16>(rhs_imm + 1)) {
+ // Simulate lhs <= rhs via lhs < rhs + 1.
+ __ Slti(dst, lhs, rhs_imm + 1);
+ return (cond == kCondGT);
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Slt(dst, rhs_reg, lhs);
+ return (cond == kCondLE);
+ }
+
+ case kCondB:
+ case kCondAE:
+ if (use_imm && IsInt<16>(rhs_imm)) {
+ // Sltiu sign-extends its 16-bit immediate operand before
+ // the comparison and thus lets us compare directly with
+ // unsigned values in the ranges [0, 0x7fff] and
+ // [0xffff8000, 0xffffffff].
+ __ Sltiu(dst, lhs, rhs_imm);
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Sltu(dst, lhs, rhs_reg);
+ }
+ return (cond == kCondAE);
+
+ case kCondBE:
+ case kCondA:
+ if (use_imm && (rhs_imm != -1) && IsInt<16>(rhs_imm + 1)) {
+ // Simulate lhs <= rhs via lhs < rhs + 1.
+ // Note that this only works if rhs + 1 does not overflow
+ // to 0, hence the check above.
+ // Sltiu sign-extends its 16-bit immediate operand before
+ // the comparison and thus lets us compare directly with
+ // unsigned values in the ranges [0, 0x7fff] and
+ // [0xffff8000, 0xffffffff].
+ __ Sltiu(dst, lhs, rhs_imm + 1);
+ return (cond == kCondA);
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Sltu(dst, rhs_reg, lhs);
+ return (cond == kCondBE);
+ }
+ }
+}
+
void InstructionCodeGeneratorMIPS::GenerateIntCompareAndBranch(IfCondition cond,
LocationSummary* locations,
MipsLabel* label) {
@@ -3555,6 +3653,190 @@
}
}
+bool InstructionCodeGeneratorMIPS::MaterializeFpCompareR2(IfCondition cond,
+ bool gt_bias,
+ Primitive::Type type,
+ LocationSummary* input_locations,
+ int cc) {
+ FRegister lhs = input_locations->InAt(0).AsFpuRegister<FRegister>();
+ FRegister rhs = input_locations->InAt(1).AsFpuRegister<FRegister>();
+ CHECK(!codegen_->GetInstructionSetFeatures().IsR6());
+ if (type == Primitive::kPrimFloat) {
+ switch (cond) {
+ case kCondEQ:
+ __ CeqS(cc, lhs, rhs);
+ return false;
+ case kCondNE:
+ __ CeqS(cc, lhs, rhs);
+ return true;
+ case kCondLT:
+ if (gt_bias) {
+ __ ColtS(cc, lhs, rhs);
+ } else {
+ __ CultS(cc, lhs, rhs);
+ }
+ return false;
+ case kCondLE:
+ if (gt_bias) {
+ __ ColeS(cc, lhs, rhs);
+ } else {
+ __ CuleS(cc, lhs, rhs);
+ }
+ return false;
+ case kCondGT:
+ if (gt_bias) {
+ __ CultS(cc, rhs, lhs);
+ } else {
+ __ ColtS(cc, rhs, lhs);
+ }
+ return false;
+ case kCondGE:
+ if (gt_bias) {
+ __ CuleS(cc, rhs, lhs);
+ } else {
+ __ ColeS(cc, rhs, lhs);
+ }
+ return false;
+ default:
+ LOG(FATAL) << "Unexpected non-floating-point condition";
+ UNREACHABLE();
+ }
+ } else {
+ DCHECK_EQ(type, Primitive::kPrimDouble);
+ switch (cond) {
+ case kCondEQ:
+ __ CeqD(cc, lhs, rhs);
+ return false;
+ case kCondNE:
+ __ CeqD(cc, lhs, rhs);
+ return true;
+ case kCondLT:
+ if (gt_bias) {
+ __ ColtD(cc, lhs, rhs);
+ } else {
+ __ CultD(cc, lhs, rhs);
+ }
+ return false;
+ case kCondLE:
+ if (gt_bias) {
+ __ ColeD(cc, lhs, rhs);
+ } else {
+ __ CuleD(cc, lhs, rhs);
+ }
+ return false;
+ case kCondGT:
+ if (gt_bias) {
+ __ CultD(cc, rhs, lhs);
+ } else {
+ __ ColtD(cc, rhs, lhs);
+ }
+ return false;
+ case kCondGE:
+ if (gt_bias) {
+ __ CuleD(cc, rhs, lhs);
+ } else {
+ __ ColeD(cc, rhs, lhs);
+ }
+ return false;
+ default:
+ LOG(FATAL) << "Unexpected non-floating-point condition";
+ UNREACHABLE();
+ }
+ }
+}
+
+bool InstructionCodeGeneratorMIPS::MaterializeFpCompareR6(IfCondition cond,
+ bool gt_bias,
+ Primitive::Type type,
+ LocationSummary* input_locations,
+ FRegister dst) {
+ FRegister lhs = input_locations->InAt(0).AsFpuRegister<FRegister>();
+ FRegister rhs = input_locations->InAt(1).AsFpuRegister<FRegister>();
+ CHECK(codegen_->GetInstructionSetFeatures().IsR6());
+ if (type == Primitive::kPrimFloat) {
+ switch (cond) {
+ case kCondEQ:
+ __ CmpEqS(dst, lhs, rhs);
+ return false;
+ case kCondNE:
+ __ CmpEqS(dst, lhs, rhs);
+ return true;
+ case kCondLT:
+ if (gt_bias) {
+ __ CmpLtS(dst, lhs, rhs);
+ } else {
+ __ CmpUltS(dst, lhs, rhs);
+ }
+ return false;
+ case kCondLE:
+ if (gt_bias) {
+ __ CmpLeS(dst, lhs, rhs);
+ } else {
+ __ CmpUleS(dst, lhs, rhs);
+ }
+ return false;
+ case kCondGT:
+ if (gt_bias) {
+ __ CmpUltS(dst, rhs, lhs);
+ } else {
+ __ CmpLtS(dst, rhs, lhs);
+ }
+ return false;
+ case kCondGE:
+ if (gt_bias) {
+ __ CmpUleS(dst, rhs, lhs);
+ } else {
+ __ CmpLeS(dst, rhs, lhs);
+ }
+ return false;
+ default:
+ LOG(FATAL) << "Unexpected non-floating-point condition";
+ UNREACHABLE();
+ }
+ } else {
+ DCHECK_EQ(type, Primitive::kPrimDouble);
+ switch (cond) {
+ case kCondEQ:
+ __ CmpEqD(dst, lhs, rhs);
+ return false;
+ case kCondNE:
+ __ CmpEqD(dst, lhs, rhs);
+ return true;
+ case kCondLT:
+ if (gt_bias) {
+ __ CmpLtD(dst, lhs, rhs);
+ } else {
+ __ CmpUltD(dst, lhs, rhs);
+ }
+ return false;
+ case kCondLE:
+ if (gt_bias) {
+ __ CmpLeD(dst, lhs, rhs);
+ } else {
+ __ CmpUleD(dst, lhs, rhs);
+ }
+ return false;
+ case kCondGT:
+ if (gt_bias) {
+ __ CmpUltD(dst, rhs, lhs);
+ } else {
+ __ CmpLtD(dst, rhs, lhs);
+ }
+ return false;
+ case kCondGE:
+ if (gt_bias) {
+ __ CmpUleD(dst, rhs, lhs);
+ } else {
+ __ CmpLeD(dst, rhs, lhs);
+ }
+ return false;
+ default:
+ LOG(FATAL) << "Unexpected non-floating-point condition";
+ UNREACHABLE();
+ }
+ }
+}
+
void InstructionCodeGeneratorMIPS::GenerateFpCompareAndBranch(IfCondition cond,
bool gt_bias,
Primitive::Type type,
@@ -3608,6 +3890,7 @@
break;
default:
LOG(FATAL) << "Unexpected non-floating-point condition";
+ UNREACHABLE();
}
} else {
switch (cond) {
@@ -3653,6 +3936,7 @@
break;
default:
LOG(FATAL) << "Unexpected non-floating-point condition";
+ UNREACHABLE();
}
}
} else {
@@ -3701,6 +3985,7 @@
break;
default:
LOG(FATAL) << "Unexpected non-floating-point condition";
+ UNREACHABLE();
}
} else {
switch (cond) {
@@ -3746,6 +4031,7 @@
break;
default:
LOG(FATAL) << "Unexpected non-floating-point condition";
+ UNREACHABLE();
}
}
}
@@ -3862,30 +4148,562 @@
/* false_target */ nullptr);
}
+// This function returns true if a conditional move can be generated for HSelect.
+// Otherwise it returns false and HSelect must be implemented in terms of conditonal
+// branches and regular moves.
+//
+// If `locations_to_set` isn't nullptr, its inputs and outputs are set for HSelect.
+//
+// While determining feasibility of a conditional move and setting inputs/outputs
+// are two distinct tasks, this function does both because they share quite a bit
+// of common logic.
+static bool CanMoveConditionally(HSelect* select, bool is_r6, LocationSummary* locations_to_set) {
+ bool materialized = IsBooleanValueOrMaterializedCondition(select->GetCondition());
+ HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+ HCondition* condition = cond->AsCondition();
+
+ Primitive::Type cond_type = materialized ? Primitive::kPrimInt : condition->InputAt(0)->GetType();
+ Primitive::Type dst_type = select->GetType();
+
+ HConstant* cst_true_value = select->GetTrueValue()->AsConstant();
+ HConstant* cst_false_value = select->GetFalseValue()->AsConstant();
+ bool is_true_value_zero_constant =
+ (cst_true_value != nullptr && cst_true_value->IsZeroBitPattern());
+ bool is_false_value_zero_constant =
+ (cst_false_value != nullptr && cst_false_value->IsZeroBitPattern());
+
+ bool can_move_conditionally = false;
+ bool use_const_for_false_in = false;
+ bool use_const_for_true_in = false;
+
+ if (!cond->IsConstant()) {
+ switch (cond_type) {
+ default:
+ switch (dst_type) {
+ default:
+ // Moving int on int condition.
+ if (is_r6) {
+ if (is_true_value_zero_constant) {
+ // seleqz out_reg, false_reg, cond_reg
+ can_move_conditionally = true;
+ use_const_for_true_in = true;
+ } else if (is_false_value_zero_constant) {
+ // selnez out_reg, true_reg, cond_reg
+ can_move_conditionally = true;
+ use_const_for_false_in = true;
+ } else if (materialized) {
+ // Not materializing unmaterialized int conditions
+ // to keep the instruction count low.
+ // selnez AT, true_reg, cond_reg
+ // seleqz TMP, false_reg, cond_reg
+ // or out_reg, AT, TMP
+ can_move_conditionally = true;
+ }
+ } else {
+ // movn out_reg, true_reg/ZERO, cond_reg
+ can_move_conditionally = true;
+ use_const_for_true_in = is_true_value_zero_constant;
+ }
+ break;
+ case Primitive::kPrimLong:
+ // Moving long on int condition.
+ if (is_r6) {
+ if (is_true_value_zero_constant) {
+ // seleqz out_reg_lo, false_reg_lo, cond_reg
+ // seleqz out_reg_hi, false_reg_hi, cond_reg
+ can_move_conditionally = true;
+ use_const_for_true_in = true;
+ } else if (is_false_value_zero_constant) {
+ // selnez out_reg_lo, true_reg_lo, cond_reg
+ // selnez out_reg_hi, true_reg_hi, cond_reg
+ can_move_conditionally = true;
+ use_const_for_false_in = true;
+ }
+ // Other long conditional moves would generate 6+ instructions,
+ // which is too many.
+ } else {
+ // movn out_reg_lo, true_reg_lo/ZERO, cond_reg
+ // movn out_reg_hi, true_reg_hi/ZERO, cond_reg
+ can_move_conditionally = true;
+ use_const_for_true_in = is_true_value_zero_constant;
+ }
+ break;
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ // Moving float/double on int condition.
+ if (is_r6) {
+ if (materialized) {
+ // Not materializing unmaterialized int conditions
+ // to keep the instruction count low.
+ can_move_conditionally = true;
+ if (is_true_value_zero_constant) {
+ // sltu TMP, ZERO, cond_reg
+ // mtc1 TMP, temp_cond_reg
+ // seleqz.fmt out_reg, false_reg, temp_cond_reg
+ use_const_for_true_in = true;
+ } else if (is_false_value_zero_constant) {
+ // sltu TMP, ZERO, cond_reg
+ // mtc1 TMP, temp_cond_reg
+ // selnez.fmt out_reg, true_reg, temp_cond_reg
+ use_const_for_false_in = true;
+ } else {
+ // sltu TMP, ZERO, cond_reg
+ // mtc1 TMP, temp_cond_reg
+ // sel.fmt temp_cond_reg, false_reg, true_reg
+ // mov.fmt out_reg, temp_cond_reg
+ }
+ }
+ } else {
+ // movn.fmt out_reg, true_reg, cond_reg
+ can_move_conditionally = true;
+ }
+ break;
+ }
+ break;
+ case Primitive::kPrimLong:
+ // We don't materialize long comparison now
+ // and use conditional branches instead.
+ break;
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ switch (dst_type) {
+ default:
+ // Moving int on float/double condition.
+ if (is_r6) {
+ if (is_true_value_zero_constant) {
+ // mfc1 TMP, temp_cond_reg
+ // seleqz out_reg, false_reg, TMP
+ can_move_conditionally = true;
+ use_const_for_true_in = true;
+ } else if (is_false_value_zero_constant) {
+ // mfc1 TMP, temp_cond_reg
+ // selnez out_reg, true_reg, TMP
+ can_move_conditionally = true;
+ use_const_for_false_in = true;
+ } else {
+ // mfc1 TMP, temp_cond_reg
+ // selnez AT, true_reg, TMP
+ // seleqz TMP, false_reg, TMP
+ // or out_reg, AT, TMP
+ can_move_conditionally = true;
+ }
+ } else {
+ // movt out_reg, true_reg/ZERO, cc
+ can_move_conditionally = true;
+ use_const_for_true_in = is_true_value_zero_constant;
+ }
+ break;
+ case Primitive::kPrimLong:
+ // Moving long on float/double condition.
+ if (is_r6) {
+ if (is_true_value_zero_constant) {
+ // mfc1 TMP, temp_cond_reg
+ // seleqz out_reg_lo, false_reg_lo, TMP
+ // seleqz out_reg_hi, false_reg_hi, TMP
+ can_move_conditionally = true;
+ use_const_for_true_in = true;
+ } else if (is_false_value_zero_constant) {
+ // mfc1 TMP, temp_cond_reg
+ // selnez out_reg_lo, true_reg_lo, TMP
+ // selnez out_reg_hi, true_reg_hi, TMP
+ can_move_conditionally = true;
+ use_const_for_false_in = true;
+ }
+ // Other long conditional moves would generate 6+ instructions,
+ // which is too many.
+ } else {
+ // movt out_reg_lo, true_reg_lo/ZERO, cc
+ // movt out_reg_hi, true_reg_hi/ZERO, cc
+ can_move_conditionally = true;
+ use_const_for_true_in = is_true_value_zero_constant;
+ }
+ break;
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ // Moving float/double on float/double condition.
+ if (is_r6) {
+ can_move_conditionally = true;
+ if (is_true_value_zero_constant) {
+ // seleqz.fmt out_reg, false_reg, temp_cond_reg
+ use_const_for_true_in = true;
+ } else if (is_false_value_zero_constant) {
+ // selnez.fmt out_reg, true_reg, temp_cond_reg
+ use_const_for_false_in = true;
+ } else {
+ // sel.fmt temp_cond_reg, false_reg, true_reg
+ // mov.fmt out_reg, temp_cond_reg
+ }
+ } else {
+ // movt.fmt out_reg, true_reg, cc
+ can_move_conditionally = true;
+ }
+ break;
+ }
+ break;
+ }
+ }
+
+ if (can_move_conditionally) {
+ DCHECK(!use_const_for_false_in || !use_const_for_true_in);
+ } else {
+ DCHECK(!use_const_for_false_in);
+ DCHECK(!use_const_for_true_in);
+ }
+
+ if (locations_to_set != nullptr) {
+ if (use_const_for_false_in) {
+ locations_to_set->SetInAt(0, Location::ConstantLocation(cst_false_value));
+ } else {
+ locations_to_set->SetInAt(0,
+ Primitive::IsFloatingPointType(dst_type)
+ ? Location::RequiresFpuRegister()
+ : Location::RequiresRegister());
+ }
+ if (use_const_for_true_in) {
+ locations_to_set->SetInAt(1, Location::ConstantLocation(cst_true_value));
+ } else {
+ locations_to_set->SetInAt(1,
+ Primitive::IsFloatingPointType(dst_type)
+ ? Location::RequiresFpuRegister()
+ : Location::RequiresRegister());
+ }
+ if (materialized) {
+ locations_to_set->SetInAt(2, Location::RequiresRegister());
+ }
+ // On R6 we don't require the output to be the same as the
+ // first input for conditional moves unlike on R2.
+ bool is_out_same_as_first_in = !can_move_conditionally || !is_r6;
+ if (is_out_same_as_first_in) {
+ locations_to_set->SetOut(Location::SameAsFirstInput());
+ } else {
+ locations_to_set->SetOut(Primitive::IsFloatingPointType(dst_type)
+ ? Location::RequiresFpuRegister()
+ : Location::RequiresRegister());
+ }
+ }
+
+ return can_move_conditionally;
+}
+
+void InstructionCodeGeneratorMIPS::GenConditionalMoveR2(HSelect* select) {
+ LocationSummary* locations = select->GetLocations();
+ Location dst = locations->Out();
+ Location src = locations->InAt(1);
+ Register src_reg = ZERO;
+ Register src_reg_high = ZERO;
+ HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+ Register cond_reg = TMP;
+ int cond_cc = 0;
+ Primitive::Type cond_type = Primitive::kPrimInt;
+ bool cond_inverted = false;
+ Primitive::Type dst_type = select->GetType();
+
+ if (IsBooleanValueOrMaterializedCondition(cond)) {
+ cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<Register>();
+ } else {
+ HCondition* condition = cond->AsCondition();
+ LocationSummary* cond_locations = cond->GetLocations();
+ IfCondition if_cond = condition->GetCondition();
+ cond_type = condition->InputAt(0)->GetType();
+ switch (cond_type) {
+ default:
+ DCHECK_NE(cond_type, Primitive::kPrimLong);
+ cond_inverted = MaterializeIntCompare(if_cond, cond_locations, cond_reg);
+ break;
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ cond_inverted = MaterializeFpCompareR2(if_cond,
+ condition->IsGtBias(),
+ cond_type,
+ cond_locations,
+ cond_cc);
+ break;
+ }
+ }
+
+ DCHECK(dst.Equals(locations->InAt(0)));
+ if (src.IsRegister()) {
+ src_reg = src.AsRegister<Register>();
+ } else if (src.IsRegisterPair()) {
+ src_reg = src.AsRegisterPairLow<Register>();
+ src_reg_high = src.AsRegisterPairHigh<Register>();
+ } else if (src.IsConstant()) {
+ DCHECK(src.GetConstant()->IsZeroBitPattern());
+ }
+
+ switch (cond_type) {
+ default:
+ switch (dst_type) {
+ default:
+ if (cond_inverted) {
+ __ Movz(dst.AsRegister<Register>(), src_reg, cond_reg);
+ } else {
+ __ Movn(dst.AsRegister<Register>(), src_reg, cond_reg);
+ }
+ break;
+ case Primitive::kPrimLong:
+ if (cond_inverted) {
+ __ Movz(dst.AsRegisterPairLow<Register>(), src_reg, cond_reg);
+ __ Movz(dst.AsRegisterPairHigh<Register>(), src_reg_high, cond_reg);
+ } else {
+ __ Movn(dst.AsRegisterPairLow<Register>(), src_reg, cond_reg);
+ __ Movn(dst.AsRegisterPairHigh<Register>(), src_reg_high, cond_reg);
+ }
+ break;
+ case Primitive::kPrimFloat:
+ if (cond_inverted) {
+ __ MovzS(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_reg);
+ } else {
+ __ MovnS(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_reg);
+ }
+ break;
+ case Primitive::kPrimDouble:
+ if (cond_inverted) {
+ __ MovzD(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_reg);
+ } else {
+ __ MovnD(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_reg);
+ }
+ break;
+ }
+ break;
+ case Primitive::kPrimLong:
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ switch (dst_type) {
+ default:
+ if (cond_inverted) {
+ __ Movf(dst.AsRegister<Register>(), src_reg, cond_cc);
+ } else {
+ __ Movt(dst.AsRegister<Register>(), src_reg, cond_cc);
+ }
+ break;
+ case Primitive::kPrimLong:
+ if (cond_inverted) {
+ __ Movf(dst.AsRegisterPairLow<Register>(), src_reg, cond_cc);
+ __ Movf(dst.AsRegisterPairHigh<Register>(), src_reg_high, cond_cc);
+ } else {
+ __ Movt(dst.AsRegisterPairLow<Register>(), src_reg, cond_cc);
+ __ Movt(dst.AsRegisterPairHigh<Register>(), src_reg_high, cond_cc);
+ }
+ break;
+ case Primitive::kPrimFloat:
+ if (cond_inverted) {
+ __ MovfS(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_cc);
+ } else {
+ __ MovtS(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_cc);
+ }
+ break;
+ case Primitive::kPrimDouble:
+ if (cond_inverted) {
+ __ MovfD(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_cc);
+ } else {
+ __ MovtD(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_cc);
+ }
+ break;
+ }
+ break;
+ }
+}
+
+void InstructionCodeGeneratorMIPS::GenConditionalMoveR6(HSelect* select) {
+ LocationSummary* locations = select->GetLocations();
+ Location dst = locations->Out();
+ Location false_src = locations->InAt(0);
+ Location true_src = locations->InAt(1);
+ HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+ Register cond_reg = TMP;
+ FRegister fcond_reg = FTMP;
+ Primitive::Type cond_type = Primitive::kPrimInt;
+ bool cond_inverted = false;
+ Primitive::Type dst_type = select->GetType();
+
+ if (IsBooleanValueOrMaterializedCondition(cond)) {
+ cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<Register>();
+ } else {
+ HCondition* condition = cond->AsCondition();
+ LocationSummary* cond_locations = cond->GetLocations();
+ IfCondition if_cond = condition->GetCondition();
+ cond_type = condition->InputAt(0)->GetType();
+ switch (cond_type) {
+ default:
+ DCHECK_NE(cond_type, Primitive::kPrimLong);
+ cond_inverted = MaterializeIntCompare(if_cond, cond_locations, cond_reg);
+ break;
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ cond_inverted = MaterializeFpCompareR6(if_cond,
+ condition->IsGtBias(),
+ cond_type,
+ cond_locations,
+ fcond_reg);
+ break;
+ }
+ }
+
+ if (true_src.IsConstant()) {
+ DCHECK(true_src.GetConstant()->IsZeroBitPattern());
+ }
+ if (false_src.IsConstant()) {
+ DCHECK(false_src.GetConstant()->IsZeroBitPattern());
+ }
+
+ switch (dst_type) {
+ default:
+ if (Primitive::IsFloatingPointType(cond_type)) {
+ __ Mfc1(cond_reg, fcond_reg);
+ }
+ if (true_src.IsConstant()) {
+ if (cond_inverted) {
+ __ Selnez(dst.AsRegister<Register>(), false_src.AsRegister<Register>(), cond_reg);
+ } else {
+ __ Seleqz(dst.AsRegister<Register>(), false_src.AsRegister<Register>(), cond_reg);
+ }
+ } else if (false_src.IsConstant()) {
+ if (cond_inverted) {
+ __ Seleqz(dst.AsRegister<Register>(), true_src.AsRegister<Register>(), cond_reg);
+ } else {
+ __ Selnez(dst.AsRegister<Register>(), true_src.AsRegister<Register>(), cond_reg);
+ }
+ } else {
+ DCHECK_NE(cond_reg, AT);
+ if (cond_inverted) {
+ __ Seleqz(AT, true_src.AsRegister<Register>(), cond_reg);
+ __ Selnez(TMP, false_src.AsRegister<Register>(), cond_reg);
+ } else {
+ __ Selnez(AT, true_src.AsRegister<Register>(), cond_reg);
+ __ Seleqz(TMP, false_src.AsRegister<Register>(), cond_reg);
+ }
+ __ Or(dst.AsRegister<Register>(), AT, TMP);
+ }
+ break;
+ case Primitive::kPrimLong: {
+ if (Primitive::IsFloatingPointType(cond_type)) {
+ __ Mfc1(cond_reg, fcond_reg);
+ }
+ Register dst_lo = dst.AsRegisterPairLow<Register>();
+ Register dst_hi = dst.AsRegisterPairHigh<Register>();
+ if (true_src.IsConstant()) {
+ Register src_lo = false_src.AsRegisterPairLow<Register>();
+ Register src_hi = false_src.AsRegisterPairHigh<Register>();
+ if (cond_inverted) {
+ __ Selnez(dst_lo, src_lo, cond_reg);
+ __ Selnez(dst_hi, src_hi, cond_reg);
+ } else {
+ __ Seleqz(dst_lo, src_lo, cond_reg);
+ __ Seleqz(dst_hi, src_hi, cond_reg);
+ }
+ } else {
+ DCHECK(false_src.IsConstant());
+ Register src_lo = true_src.AsRegisterPairLow<Register>();
+ Register src_hi = true_src.AsRegisterPairHigh<Register>();
+ if (cond_inverted) {
+ __ Seleqz(dst_lo, src_lo, cond_reg);
+ __ Seleqz(dst_hi, src_hi, cond_reg);
+ } else {
+ __ Selnez(dst_lo, src_lo, cond_reg);
+ __ Selnez(dst_hi, src_hi, cond_reg);
+ }
+ }
+ break;
+ }
+ case Primitive::kPrimFloat: {
+ if (!Primitive::IsFloatingPointType(cond_type)) {
+ // sel*.fmt tests bit 0 of the condition register, account for that.
+ __ Sltu(TMP, ZERO, cond_reg);
+ __ Mtc1(TMP, fcond_reg);
+ }
+ FRegister dst_reg = dst.AsFpuRegister<FRegister>();
+ if (true_src.IsConstant()) {
+ FRegister src_reg = false_src.AsFpuRegister<FRegister>();
+ if (cond_inverted) {
+ __ SelnezS(dst_reg, src_reg, fcond_reg);
+ } else {
+ __ SeleqzS(dst_reg, src_reg, fcond_reg);
+ }
+ } else if (false_src.IsConstant()) {
+ FRegister src_reg = true_src.AsFpuRegister<FRegister>();
+ if (cond_inverted) {
+ __ SeleqzS(dst_reg, src_reg, fcond_reg);
+ } else {
+ __ SelnezS(dst_reg, src_reg, fcond_reg);
+ }
+ } else {
+ if (cond_inverted) {
+ __ SelS(fcond_reg,
+ true_src.AsFpuRegister<FRegister>(),
+ false_src.AsFpuRegister<FRegister>());
+ } else {
+ __ SelS(fcond_reg,
+ false_src.AsFpuRegister<FRegister>(),
+ true_src.AsFpuRegister<FRegister>());
+ }
+ __ MovS(dst_reg, fcond_reg);
+ }
+ break;
+ }
+ case Primitive::kPrimDouble: {
+ if (!Primitive::IsFloatingPointType(cond_type)) {
+ // sel*.fmt tests bit 0 of the condition register, account for that.
+ __ Sltu(TMP, ZERO, cond_reg);
+ __ Mtc1(TMP, fcond_reg);
+ }
+ FRegister dst_reg = dst.AsFpuRegister<FRegister>();
+ if (true_src.IsConstant()) {
+ FRegister src_reg = false_src.AsFpuRegister<FRegister>();
+ if (cond_inverted) {
+ __ SelnezD(dst_reg, src_reg, fcond_reg);
+ } else {
+ __ SeleqzD(dst_reg, src_reg, fcond_reg);
+ }
+ } else if (false_src.IsConstant()) {
+ FRegister src_reg = true_src.AsFpuRegister<FRegister>();
+ if (cond_inverted) {
+ __ SeleqzD(dst_reg, src_reg, fcond_reg);
+ } else {
+ __ SelnezD(dst_reg, src_reg, fcond_reg);
+ }
+ } else {
+ if (cond_inverted) {
+ __ SelD(fcond_reg,
+ true_src.AsFpuRegister<FRegister>(),
+ false_src.AsFpuRegister<FRegister>());
+ } else {
+ __ SelD(fcond_reg,
+ false_src.AsFpuRegister<FRegister>(),
+ true_src.AsFpuRegister<FRegister>());
+ }
+ __ MovD(dst_reg, fcond_reg);
+ }
+ break;
+ }
+ }
+}
+
void LocationsBuilderMIPS::VisitSelect(HSelect* select) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
- if (Primitive::IsFloatingPointType(select->GetType())) {
- locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetInAt(1, Location::RequiresFpuRegister());
- } else {
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- }
- if (IsBooleanValueOrMaterializedCondition(select->GetCondition())) {
- locations->SetInAt(2, Location::RequiresRegister());
- }
- locations->SetOut(Location::SameAsFirstInput());
+ CanMoveConditionally(select, codegen_->GetInstructionSetFeatures().IsR6(), locations);
}
void InstructionCodeGeneratorMIPS::VisitSelect(HSelect* select) {
- LocationSummary* locations = select->GetLocations();
- MipsLabel false_target;
- GenerateTestAndBranch(select,
- /* condition_input_index */ 2,
- /* true_target */ nullptr,
- &false_target);
- codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
- __ Bind(&false_target);
+ bool is_r6 = codegen_->GetInstructionSetFeatures().IsR6();
+ if (CanMoveConditionally(select, is_r6, /* locations_to_set */ nullptr)) {
+ if (is_r6) {
+ GenConditionalMoveR6(select);
+ } else {
+ GenConditionalMoveR2(select);
+ }
+ } else {
+ LocationSummary* locations = select->GetLocations();
+ MipsLabel false_target;
+ GenerateTestAndBranch(select,
+ /* condition_input_index */ 2,
+ /* true_target */ nullptr,
+ &false_target);
+ codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
+ __ Bind(&false_target);
+ }
}
void LocationsBuilderMIPS::VisitNativeDebugInfo(HNativeDebugInfo* info) {
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index b8bd96a..e132819 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -247,6 +247,12 @@
Register obj,
uint32_t offset);
void GenerateIntCompare(IfCondition cond, LocationSummary* locations);
+ // When the function returns `false` it means that the condition holds if `dst` is non-zero
+ // and doesn't hold if `dst` is zero. If it returns `true`, the roles of zero and non-zero
+ // `dst` are exchanged.
+ bool MaterializeIntCompare(IfCondition cond,
+ LocationSummary* input_locations,
+ Register dst);
void GenerateIntCompareAndBranch(IfCondition cond,
LocationSummary* locations,
MipsLabel* label);
@@ -257,6 +263,22 @@
bool gt_bias,
Primitive::Type type,
LocationSummary* locations);
+ // When the function returns `false` it means that the condition holds if the condition
+ // code flag `cc` is non-zero and doesn't hold if `cc` is zero. If it returns `true`,
+ // the roles of zero and non-zero values of the `cc` flag are exchanged.
+ bool MaterializeFpCompareR2(IfCondition cond,
+ bool gt_bias,
+ Primitive::Type type,
+ LocationSummary* input_locations,
+ int cc);
+ // When the function returns `false` it means that the condition holds if `dst` is non-zero
+ // and doesn't hold if `dst` is zero. If it returns `true`, the roles of zero and non-zero
+ // `dst` are exchanged.
+ bool MaterializeFpCompareR6(IfCondition cond,
+ bool gt_bias,
+ Primitive::Type type,
+ LocationSummary* input_locations,
+ FRegister dst);
void GenerateFpCompareAndBranch(IfCondition cond,
bool gt_bias,
Primitive::Type type,
@@ -283,6 +305,8 @@
uint32_t num_entries,
HBasicBlock* switch_block,
HBasicBlock* default_block);
+ void GenConditionalMoveR2(HSelect* select);
+ void GenConditionalMoveR6(HSelect* select);
MipsAssembler* const assembler_;
CodeGeneratorMIPS* const codegen_;
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index 92b4c8e..9c65280 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -331,6 +331,18 @@
fmt);
}
+ std::string RepeatFFR(void (Ass::*f)(FPReg, FPReg, Reg), std::string fmt) {
+ return RepeatTemplatedRegisters<FPReg, FPReg, Reg>(
+ f,
+ GetFPRegisters(),
+ GetFPRegisters(),
+ GetRegisters(),
+ &AssemblerTest::GetFPRegName,
+ &AssemblerTest::GetFPRegName,
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ fmt);
+ }
+
std::string RepeatFFI(void (Ass::*f)(FPReg, FPReg, const Imm&),
size_t imm_bytes,
std::string fmt) {
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index b972c70..b29974c 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -192,6 +192,13 @@
DsFsmInstr(instruction, 0, 0, (1u << in1_out), (1u << in1_out) | (1u << in2) | (1u << in3), 0, 0);
}
+void MipsAssembler::DsFsmInstrFffr(uint32_t instruction,
+ FRegister in1_out,
+ FRegister in2,
+ Register in3) {
+ DsFsmInstr(instruction, 0, (1u << in3), (1u << in1_out), (1u << in1_out) | (1u << in2), 0, 0);
+}
+
void MipsAssembler::DsFsmInstrRf(uint32_t instruction, Register out, FRegister in) {
DsFsmInstr(instruction, (1u << out), 0, 0, (1u << in), 0, 0);
}
@@ -1446,6 +1453,26 @@
cc);
}
+void MipsAssembler::MovzS(FRegister fd, FRegister fs, Register rt) {
+ CHECK(!IsR6());
+ DsFsmInstrFffr(EmitFR(0x11, 0x10, static_cast<FRegister>(rt), fs, fd, 0x12), fd, fs, rt);
+}
+
+void MipsAssembler::MovzD(FRegister fd, FRegister fs, Register rt) {
+ CHECK(!IsR6());
+ DsFsmInstrFffr(EmitFR(0x11, 0x11, static_cast<FRegister>(rt), fs, fd, 0x12), fd, fs, rt);
+}
+
+void MipsAssembler::MovnS(FRegister fd, FRegister fs, Register rt) {
+ CHECK(!IsR6());
+ DsFsmInstrFffr(EmitFR(0x11, 0x10, static_cast<FRegister>(rt), fs, fd, 0x13), fd, fs, rt);
+}
+
+void MipsAssembler::MovnD(FRegister fd, FRegister fs, Register rt) {
+ CHECK(!IsR6());
+ DsFsmInstrFffr(EmitFR(0x11, 0x11, static_cast<FRegister>(rt), fs, fd, 0x13), fd, fs, rt);
+}
+
void MipsAssembler::SelS(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
DsFsmInstrFfff(EmitFR(0x11, 0x10, ft, fs, fd, 0x10), fd, fs, ft);
@@ -1456,6 +1483,26 @@
DsFsmInstrFfff(EmitFR(0x11, 0x11, ft, fs, fd, 0x10), fd, fs, ft);
}
+void MipsAssembler::SeleqzS(FRegister fd, FRegister fs, FRegister ft) {
+ CHECK(IsR6());
+ DsFsmInstrFff(EmitFR(0x11, 0x10, ft, fs, fd, 0x14), fd, fs, ft);
+}
+
+void MipsAssembler::SeleqzD(FRegister fd, FRegister fs, FRegister ft) {
+ CHECK(IsR6());
+ DsFsmInstrFff(EmitFR(0x11, 0x11, ft, fs, fd, 0x14), fd, fs, ft);
+}
+
+void MipsAssembler::SelnezS(FRegister fd, FRegister fs, FRegister ft) {
+ CHECK(IsR6());
+ DsFsmInstrFff(EmitFR(0x11, 0x10, ft, fs, fd, 0x17), fd, fs, ft);
+}
+
+void MipsAssembler::SelnezD(FRegister fd, FRegister fs, FRegister ft) {
+ CHECK(IsR6());
+ DsFsmInstrFff(EmitFR(0x11, 0x11, ft, fs, fd, 0x17), fd, fs, ft);
+}
+
void MipsAssembler::ClassS(FRegister fd, FRegister fs) {
CHECK(IsR6());
DsFsmInstrFff(EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x1b), fd, fs, fs);
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index b932fb8..800dc5f 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -414,8 +414,16 @@
void MovfD(FRegister fd, FRegister fs, int cc = 0); // R2
void MovtS(FRegister fd, FRegister fs, int cc = 0); // R2
void MovtD(FRegister fd, FRegister fs, int cc = 0); // R2
+ void MovzS(FRegister fd, FRegister fs, Register rt); // R2
+ void MovzD(FRegister fd, FRegister fs, Register rt); // R2
+ void MovnS(FRegister fd, FRegister fs, Register rt); // R2
+ void MovnD(FRegister fd, FRegister fs, Register rt); // R2
void SelS(FRegister fd, FRegister fs, FRegister ft); // R6
void SelD(FRegister fd, FRegister fs, FRegister ft); // R6
+ void SeleqzS(FRegister fd, FRegister fs, FRegister ft); // R6
+ void SeleqzD(FRegister fd, FRegister fs, FRegister ft); // R6
+ void SelnezS(FRegister fd, FRegister fs, FRegister ft); // R6
+ void SelnezD(FRegister fd, FRegister fs, FRegister ft); // R6
void ClassS(FRegister fd, FRegister fs); // R6
void ClassD(FRegister fd, FRegister fs); // R6
void MinS(FRegister fd, FRegister fs, FRegister ft); // R6
@@ -1257,6 +1265,7 @@
void DsFsmInstrRrrr(uint32_t instruction, Register in1_out, Register in2, Register in3);
void DsFsmInstrFff(uint32_t instruction, FRegister out, FRegister in1, FRegister in2);
void DsFsmInstrFfff(uint32_t instruction, FRegister in1_out, FRegister in2, FRegister in3);
+ void DsFsmInstrFffr(uint32_t instruction, FRegister in1_out, FRegister in2, Register in3);
void DsFsmInstrRf(uint32_t instruction, Register out, FRegister in);
void DsFsmInstrFr(uint32_t instruction, FRegister out, Register in);
void DsFsmInstrFR(uint32_t instruction, FRegister in1, Register in2);
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
index 750a94d..3ef2f94 100644
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -349,6 +349,26 @@
DriverStr(RepeatFFF(&mips::MipsAssembler::SelD, "sel.d ${reg1}, ${reg2}, ${reg3}"), "sel.d");
}
+TEST_F(AssemblerMIPS32r6Test, SeleqzS) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::SeleqzS, "seleqz.s ${reg1}, ${reg2}, ${reg3}"),
+ "seleqz.s");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SeleqzD) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::SeleqzD, "seleqz.d ${reg1}, ${reg2}, ${reg3}"),
+ "seleqz.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SelnezS) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::SelnezS, "selnez.s ${reg1}, ${reg2}, ${reg3}"),
+ "selnez.s");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SelnezD) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::SelnezD, "selnez.d ${reg1}, ${reg2}, ${reg3}"),
+ "selnez.d");
+}
+
TEST_F(AssemblerMIPS32r6Test, ClassS) {
DriverStr(RepeatFF(&mips::MipsAssembler::ClassS, "class.s ${reg1}, ${reg2}"), "class.s");
}
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
index a9abf2f..75149cf 100644
--- a/compiler/utils/mips/assembler_mips_test.cc
+++ b/compiler/utils/mips/assembler_mips_test.cc
@@ -667,6 +667,22 @@
"MovtD");
}
+TEST_F(AssemblerMIPSTest, MovzS) {
+ DriverStr(RepeatFFR(&mips::MipsAssembler::MovzS, "movz.s ${reg1}, ${reg2}, ${reg3}"), "MovzS");
+}
+
+TEST_F(AssemblerMIPSTest, MovzD) {
+ DriverStr(RepeatFFR(&mips::MipsAssembler::MovzD, "movz.d ${reg1}, ${reg2}, ${reg3}"), "MovzD");
+}
+
+TEST_F(AssemblerMIPSTest, MovnS) {
+ DriverStr(RepeatFFR(&mips::MipsAssembler::MovnS, "movn.s ${reg1}, ${reg2}, ${reg3}"), "MovnS");
+}
+
+TEST_F(AssemblerMIPSTest, MovnD) {
+ DriverStr(RepeatFFR(&mips::MipsAssembler::MovnD, "movn.d ${reg1}, ${reg2}, ${reg3}"), "MovnD");
+}
+
TEST_F(AssemblerMIPSTest, CvtSW) {
DriverStr(RepeatFF(&mips::MipsAssembler::Cvtsw, "cvt.s.w ${reg1}, ${reg2}"), "CvtSW");
}
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index 9a73f29..4787395 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -389,6 +389,10 @@
{ kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 15, "floor.w", "fad" },
{ kFpMask | (0x201 << 16), kCop1 | (0x200 << 16) | 17, "movf", "fadc" },
{ kFpMask | (0x201 << 16), kCop1 | (0x201 << 16) | 17, "movt", "fadc" },
+ { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 18, "movz", "fadT" },
+ { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 19, "movn", "fadT" },
+ { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 20, "seleqz", "fadt" },
+ { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 23, "selnez", "fadt" },
{ kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 26, "rint", "fad" },
{ kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 27, "class", "fad" },
{ kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 32, "cvt.s", "fad" },
diff --git a/runtime/interpreter/mterp/mips/op_cmpl_double.S b/runtime/interpreter/mterp/mips/op_cmpl_double.S
index 5a47fd7..db89242 100644
--- a/runtime/interpreter/mterp/mips/op_cmpl_double.S
+++ b/runtime/interpreter/mterp/mips/op_cmpl_double.S
@@ -20,10 +20,10 @@
LOAD64_F(ft0, ft0f, rOBJ)
LOAD64_F(ft1, ft1f, t0)
#ifdef MIPS32REVGE6
- cmp.ult.d ft2, ft0, ft1
+ cmp.lt.d ft2, ft0, ft1
li rTEMP, -1
bc1nez ft2, .L${opcode}_finish
- cmp.ult.d ft2, ft1, ft0
+ cmp.lt.d ft2, ft1, ft0
li rTEMP, 1
bc1nez ft2, .L${opcode}_finish
cmp.eq.d ft2, ft0, ft1
diff --git a/runtime/interpreter/mterp/mips/op_cmpl_float.S b/runtime/interpreter/mterp/mips/op_cmpl_float.S
index cfd87ee..b8c0961 100644
--- a/runtime/interpreter/mterp/mips/op_cmpl_float.S
+++ b/runtime/interpreter/mterp/mips/op_cmpl_float.S
@@ -27,10 +27,10 @@
GET_VREG_F(ft0, a2)
GET_VREG_F(ft1, a3)
#ifdef MIPS32REVGE6
- cmp.ult.s ft2, ft0, ft1 # Is ft0 < ft1
+ cmp.lt.s ft2, ft0, ft1 # Is ft0 < ft1
li rTEMP, -1
bc1nez ft2, .L${opcode}_finish
- cmp.ult.s ft2, ft1, ft0
+ cmp.lt.s ft2, ft1, ft0
li rTEMP, 1
bc1nez ft2, .L${opcode}_finish
cmp.eq.s ft2, ft0, ft1
diff --git a/runtime/interpreter/mterp/mips/op_double_to_int.S b/runtime/interpreter/mterp/mips/op_double_to_int.S
index 30a0a73..b1792ec 100644
--- a/runtime/interpreter/mterp/mips/op_double_to_int.S
+++ b/runtime/interpreter/mterp/mips/op_double_to_int.S
@@ -12,13 +12,13 @@
#ifdef MIPS32REVGE6
la t0, .LDOUBLE_TO_INT_max
LOAD64_F(fa1, fa1f, t0)
- cmp.ule.d ft2, fa1, fa0
+ cmp.le.d ft2, fa1, fa0
l.s fv0, .LDOUBLE_TO_INT_maxret
bc1nez ft2, .L${opcode}_set_vreg_f
la t0, .LDOUBLE_TO_INT_min
LOAD64_F(fa1, fa1f, t0)
- cmp.ule.d ft2, fa0, fa1
+ cmp.le.d ft2, fa0, fa1
l.s fv0, .LDOUBLE_TO_INT_minret
bc1nez ft2, .L${opcode}_set_vreg_f
diff --git a/runtime/interpreter/mterp/mips/op_double_to_long.S b/runtime/interpreter/mterp/mips/op_double_to_long.S
index 4f9e367..7f7a799 100644
--- a/runtime/interpreter/mterp/mips/op_double_to_long.S
+++ b/runtime/interpreter/mterp/mips/op_double_to_long.S
@@ -5,14 +5,14 @@
#ifdef MIPS32REVGE6
la t0, .LDOUBLE_TO_LONG_max
LOAD64_F(fa1, fa1f, t0)
- cmp.ule.d ft2, fa1, fa0
+ cmp.le.d ft2, fa1, fa0
la t0, .LDOUBLE_TO_LONG_ret_max
LOAD64(rRESULT0, rRESULT1, t0)
bc1nez ft2, .L${opcode}_set_vreg
la t0, .LDOUBLE_TO_LONG_min
LOAD64_F(fa1, fa1f, t0)
- cmp.ule.d ft2, fa0, fa1
+ cmp.le.d ft2, fa0, fa1
la t0, .LDOUBLE_TO_LONG_ret_min
LOAD64(rRESULT0, rRESULT1, t0)
bc1nez ft2, .L${opcode}_set_vreg
diff --git a/runtime/interpreter/mterp/mips/op_float_to_int.S b/runtime/interpreter/mterp/mips/op_float_to_int.S
index e032869..8292652 100644
--- a/runtime/interpreter/mterp/mips/op_float_to_int.S
+++ b/runtime/interpreter/mterp/mips/op_float_to_int.S
@@ -7,12 +7,12 @@
f2i_doconv:
#ifdef MIPS32REVGE6
l.s fa1, .LFLOAT_TO_INT_max
- cmp.ule.s ft2, fa1, fa0
+ cmp.le.s ft2, fa1, fa0
l.s fv0, .LFLOAT_TO_INT_ret_max
bc1nez ft2, .L${opcode}_set_vreg_f
l.s fa1, .LFLOAT_TO_INT_min
- cmp.ule.s ft2, fa0, fa1
+ cmp.le.s ft2, fa0, fa1
l.s fv0, .LFLOAT_TO_INT_ret_min
bc1nez ft2, .L${opcode}_set_vreg_f
diff --git a/runtime/interpreter/mterp/mips/op_float_to_long.S b/runtime/interpreter/mterp/mips/op_float_to_long.S
index 77b2c46..a51384f 100644
--- a/runtime/interpreter/mterp/mips/op_float_to_long.S
+++ b/runtime/interpreter/mterp/mips/op_float_to_long.S
@@ -4,13 +4,13 @@
f2l_doconv:
#ifdef MIPS32REVGE6
l.s fa1, .LLONG_TO_max
- cmp.ule.s ft2, fa1, fa0
+ cmp.le.s ft2, fa1, fa0
li rRESULT0, ~0
li rRESULT1, ~0x80000000
bc1nez ft2, .L${opcode}_set_vreg
l.s fa1, .LLONG_TO_min
- cmp.ule.s ft2, fa0, fa1
+ cmp.le.s ft2, fa0, fa1
li rRESULT0, 0
li rRESULT1, 0x80000000
bc1nez ft2, .L${opcode}_set_vreg
diff --git a/runtime/interpreter/mterp/out/mterp_mips.S b/runtime/interpreter/mterp/out/mterp_mips.S
index aadbf20..c1ba794 100644
--- a/runtime/interpreter/mterp/out/mterp_mips.S
+++ b/runtime/interpreter/mterp/out/mterp_mips.S
@@ -1420,10 +1420,10 @@
GET_VREG_F(ft0, a2)
GET_VREG_F(ft1, a3)
#ifdef MIPS32REVGE6
- cmp.ult.s ft2, ft0, ft1 # Is ft0 < ft1
+ cmp.lt.s ft2, ft0, ft1 # Is ft0 < ft1
li rTEMP, -1
bc1nez ft2, .Lop_cmpl_float_finish
- cmp.ult.s ft2, ft1, ft0
+ cmp.lt.s ft2, ft1, ft0
li rTEMP, 1
bc1nez ft2, .Lop_cmpl_float_finish
cmp.eq.s ft2, ft0, ft1
@@ -1476,10 +1476,10 @@
GET_VREG_F(ft0, a2)
GET_VREG_F(ft1, a3)
#ifdef MIPS32REVGE6
- cmp.ult.s ft2, ft0, ft1 # Is ft0 < ft1
+ cmp.lt.s ft2, ft0, ft1 # Is ft0 < ft1
li rTEMP, -1
bc1nez ft2, .Lop_cmpg_float_finish
- cmp.ult.s ft2, ft1, ft0
+ cmp.lt.s ft2, ft1, ft0
li rTEMP, 1
bc1nez ft2, .Lop_cmpg_float_finish
cmp.eq.s ft2, ft0, ft1
@@ -1525,10 +1525,10 @@
LOAD64_F(ft0, ft0f, rOBJ)
LOAD64_F(ft1, ft1f, t0)
#ifdef MIPS32REVGE6
- cmp.ult.d ft2, ft0, ft1
+ cmp.lt.d ft2, ft0, ft1
li rTEMP, -1
bc1nez ft2, .Lop_cmpl_double_finish
- cmp.ult.d ft2, ft1, ft0
+ cmp.lt.d ft2, ft1, ft0
li rTEMP, 1
bc1nez ft2, .Lop_cmpl_double_finish
cmp.eq.d ft2, ft0, ft1
@@ -1574,10 +1574,10 @@
LOAD64_F(ft0, ft0f, rOBJ)
LOAD64_F(ft1, ft1f, t0)
#ifdef MIPS32REVGE6
- cmp.ult.d ft2, ft0, ft1
+ cmp.lt.d ft2, ft0, ft1
li rTEMP, -1
bc1nez ft2, .Lop_cmpg_double_finish
- cmp.ult.d ft2, ft1, ft0
+ cmp.lt.d ft2, ft1, ft0
li rTEMP, 1
bc1nez ft2, .Lop_cmpg_double_finish
cmp.eq.d ft2, ft0, ft1
@@ -7746,12 +7746,12 @@
f2i_doconv:
#ifdef MIPS32REVGE6
l.s fa1, .LFLOAT_TO_INT_max
- cmp.ule.s ft2, fa1, fa0
+ cmp.le.s ft2, fa1, fa0
l.s fv0, .LFLOAT_TO_INT_ret_max
bc1nez ft2, .Lop_float_to_int_set_vreg_f
l.s fa1, .LFLOAT_TO_INT_min
- cmp.ule.s ft2, fa0, fa1
+ cmp.le.s ft2, fa0, fa1
l.s fv0, .LFLOAT_TO_INT_ret_min
bc1nez ft2, .Lop_float_to_int_set_vreg_f
@@ -7793,13 +7793,13 @@
f2l_doconv:
#ifdef MIPS32REVGE6
l.s fa1, .LLONG_TO_max
- cmp.ule.s ft2, fa1, fa0
+ cmp.le.s ft2, fa1, fa0
li rRESULT0, ~0
li rRESULT1, ~0x80000000
bc1nez ft2, .Lop_float_to_long_set_vreg
l.s fa1, .LLONG_TO_min
- cmp.ule.s ft2, fa0, fa1
+ cmp.le.s ft2, fa0, fa1
li rRESULT0, 0
li rRESULT1, 0x80000000
bc1nez ft2, .Lop_float_to_long_set_vreg
@@ -7845,13 +7845,13 @@
#ifdef MIPS32REVGE6
la t0, .LDOUBLE_TO_INT_max
LOAD64_F(fa1, fa1f, t0)
- cmp.ule.d ft2, fa1, fa0
+ cmp.le.d ft2, fa1, fa0
l.s fv0, .LDOUBLE_TO_INT_maxret
bc1nez ft2, .Lop_double_to_int_set_vreg_f
la t0, .LDOUBLE_TO_INT_min
LOAD64_F(fa1, fa1f, t0)
- cmp.ule.d ft2, fa0, fa1
+ cmp.le.d ft2, fa0, fa1
l.s fv0, .LDOUBLE_TO_INT_minret
bc1nez ft2, .Lop_double_to_int_set_vreg_f
@@ -7896,14 +7896,14 @@
#ifdef MIPS32REVGE6
la t0, .LDOUBLE_TO_LONG_max
LOAD64_F(fa1, fa1f, t0)
- cmp.ule.d ft2, fa1, fa0
+ cmp.le.d ft2, fa1, fa0
la t0, .LDOUBLE_TO_LONG_ret_max
LOAD64(rRESULT0, rRESULT1, t0)
bc1nez ft2, .Lop_double_to_long_set_vreg
la t0, .LDOUBLE_TO_LONG_min
LOAD64_F(fa1, fa1f, t0)
- cmp.ule.d ft2, fa0, fa1
+ cmp.le.d ft2, fa0, fa1
la t0, .LDOUBLE_TO_LONG_ret_min
LOAD64(rRESULT0, rRESULT1, t0)
bc1nez ft2, .Lop_double_to_long_set_vreg