Tweak Mir2Lir::GenInstanceofCallingHelper for X86

Make this virtual, and split out the X86 logic.  Take advantage of SETcc
instruction for X86.

I don't think I can do much more due to need to preserve arguments for
the calls.

Change-Id: I10e3eaa61b61ceac384267e3078bb6f75c37cee4
Signed-off-by: Mark Mendell <mark.p.mendell@intel.com>
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index c59f3b8..7329038 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -1100,6 +1100,9 @@
                                          bool can_assume_type_is_in_dex_cache,
                                          uint32_t type_idx, RegLocation rl_dest,
                                          RegLocation rl_src) {
+  // X86 has its own implementation.
+  DCHECK_NE(cu_->instruction_set, kX86);
+
   FlushAllRegs();
   // May generate a call - use explicit registers
   LockCallTemps();
@@ -1181,15 +1184,10 @@
         LoadConstant(rl_result.low_reg, 1);     // assume true
         branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
       }
-      if (cu_->instruction_set != kX86) {
-        int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
-        OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));    // .ne case - arg0 <= class
-        OpReg(kOpBlx, r_tgt);    // .ne case: helper(class, ref->class)
-        FreeTemp(r_tgt);
-      } else {
-        OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
-        OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
-      }
+      int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
+      OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));    // .ne case - arg0 <= class
+      OpReg(kOpBlx, r_tgt);    // .ne case: helper(class, ref->class)
+      FreeTemp(r_tgt);
     }
   }
   // TODO: only clobber when type isn't final?
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 3a68044..37990e8 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -1046,13 +1046,13 @@
 
     void AddSlowPath(LIRSlowPath* slowpath);
 
-  private:
-    void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
-                                    bool type_known_abstract, bool use_declaring_class,
-                                    bool can_assume_type_is_in_dex_cache,
-                                    uint32_t type_idx, RegLocation rl_dest,
-                                    RegLocation rl_src);
+    virtual void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
+                                            bool type_known_abstract, bool use_declaring_class,
+                                            bool can_assume_type_is_in_dex_cache,
+                                            uint32_t type_idx, RegLocation rl_dest,
+                                            RegLocation rl_src);
 
+  private:
     void ClobberBody(RegisterInfo* p);
     void ResetDefBody(RegisterInfo* p) {
       p->def_start = NULL;
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 4c1c171..79b7710 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -189,6 +189,24 @@
      */
     void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
                             RegLocation rl_dest, RegLocation rl_src);
+    /*
+     *
+     * @brief Implement Set up instanceof a class with x86 specific code.
+     * @param needs_access_check 'true' if we must check the access.
+     * @param type_known_final 'true' if the type is known to be a final class.
+     * @param type_known_abstract 'true' if the type is known to be an abstract class.
+     * @param use_declaring_class 'true' if the type can be loaded off the current Method*.
+     * @param can_assume_type_is_in_dex_cache 'true' if the type is known to be in the cache.
+     * @param type_idx Type index to use if use_declaring_class is 'false'.
+     * @param rl_dest Result to be set to 0 or 1.
+     * @param rl_src Object to be tested.
+     */
+    void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
+                                    bool type_known_abstract, bool use_declaring_class,
+                                    bool can_assume_type_is_in_dex_cache,
+                                    uint32_t type_idx, RegLocation rl_dest,
+                                    RegLocation rl_src);
+
     // Single operation generators.
     LIR* OpUnconditionalBranch(LIR* target);
     LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target);
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index a567a8a..613e1d2 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -1717,6 +1717,88 @@
   StoreValue(rl_dest, rl_result);
 }
 
+void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
+                                            bool type_known_abstract, bool use_declaring_class,
+                                            bool can_assume_type_is_in_dex_cache,
+                                            uint32_t type_idx, RegLocation rl_dest,
+                                            RegLocation rl_src) {
+  FlushAllRegs();
+  // May generate a call - use explicit registers.
+  LockCallTemps();
+  LoadCurrMethodDirect(TargetReg(kArg1));  // kArg1 gets current Method*.
+  int class_reg = TargetReg(kArg2);  // kArg2 will hold the Class*.
+  // Reference must end up in kArg0.
+  if (needs_access_check) {
+    // Check we have access to type_idx and if not throw IllegalAccessError,
+    // Caller function returns Class* in kArg0.
+    CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess),
+                         type_idx, true);
+    OpRegCopy(class_reg, TargetReg(kRet0));
+    LoadValueDirectFixed(rl_src, TargetReg(kArg0));
+  } else if (use_declaring_class) {
+    LoadValueDirectFixed(rl_src, TargetReg(kArg0));
+    LoadWordDisp(TargetReg(kArg1),
+                 mirror::ArtMethod::DeclaringClassOffset().Int32Value(), class_reg);
+  } else {
+    // Load dex cache entry into class_reg (kArg2).
+    LoadValueDirectFixed(rl_src, TargetReg(kArg0));
+    LoadWordDisp(TargetReg(kArg1),
+                 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
+    int32_t offset_of_type =
+        mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
+        * type_idx);
+    LoadWordDisp(class_reg, offset_of_type, class_reg);
+    if (!can_assume_type_is_in_dex_cache) {
+      // Need to test presence of type in dex cache at runtime.
+      LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
+      // Type is not resolved. Call out to helper, which will return resolved type in kRet0/kArg0.
+      CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx, true);
+      OpRegCopy(TargetReg(kArg2), TargetReg(kRet0));  // Align usage with fast path.
+      LoadValueDirectFixed(rl_src, TargetReg(kArg0));  /* Reload Ref. */
+      // Rejoin code paths
+      LIR* hop_target = NewLIR0(kPseudoTargetLabel);
+      hop_branch->target = hop_target;
+    }
+  }
+  /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result. */
+  RegLocation rl_result = GetReturn(false);
+
+  // SETcc only works with EAX..EDX.
+  DCHECK_LT(rl_result.low_reg, 4);
+
+  // Is the class NULL?
+  LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
+
+  /* Load object->klass_. */
+  DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
+  LoadWordDisp(TargetReg(kArg0),  mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
+  /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class. */
+  LIR* branchover = nullptr;
+  if (type_known_final) {
+    // Ensure top 3 bytes of result are 0.
+    LoadConstant(rl_result.low_reg, 0);
+    OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2));
+    // Set the low byte of the result to 0 or 1 from the compare condition code.
+    NewLIR2(kX86Set8R, rl_result.low_reg, kX86CondEq);
+  } else {
+    if (!type_known_abstract) {
+      LoadConstant(rl_result.low_reg, 1);     // Assume result succeeds.
+      branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
+    }
+    OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
+    OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
+  }
+  // TODO: only clobber when type isn't final?
+  ClobberCallerSave();
+  /* Branch targets here. */
+  LIR* target = NewLIR0(kPseudoTargetLabel);
+  StoreValue(rl_dest, rl_result);
+  branch1->target = target;
+  if (branchover != nullptr) {
+    branchover->target = target;
+  }
+}
+
 void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
                             RegLocation rl_lhs, RegLocation rl_rhs) {
   OpKind op = kOpBkpt;