Fix bug in StackVisitor::GetVReg.

Floats can be stored in core registers within compiled code, so use the
representation returned by the stack maps to know which register to read
a value.

Bug: 147572335
Test: 457-regs
Change-Id: Ibe6642f2fae8206f2c230006ae85d73b47501c3b
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 8c68c5c..5f497af 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -504,7 +504,7 @@
         case DexRegisterLocation::Kind::kInFpuRegister:
         case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
           uint32_t reg = vreg_map[vreg].GetMachineRegister();
-          bool result = GetRegisterIfAccessible(reg, ToVRegKind(location), &value);
+          bool result = GetRegisterIfAccessible(reg, location, &value);
           CHECK(result);
           if (location == DexRegisterLocation::Kind::kInRegister) {
             if (((1u << reg) & register_mask) != 0) {
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 526386d..a20f40c 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -241,7 +241,7 @@
         uint32_t val2 = *val;
         // The caller already known the register location, so we can use the faster overload
         // which does not decode the stack maps.
-        result = GetVRegFromOptimizedCode(location.value(), kind, val);
+        result = GetVRegFromOptimizedCode(location.value(), val);
         // Compare to the slower overload.
         DCHECK_EQ(result, GetVRegFromOptimizedCode(m, vreg, kind, &val2));
         DCHECK_EQ(*val, val2);
@@ -269,7 +269,9 @@
   }
 }
 
-bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
+bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m,
+                                            uint16_t vreg,
+                                            VRegKind kind,
                                             uint32_t* val) const {
   DCHECK_EQ(m, GetMethod());
   // Can't be null or how would we compile its instructions?
@@ -309,7 +311,7 @@
       if (kind == kReferenceVReg && !(register_mask & (1 << reg))) {
         return false;
       }
-      return GetRegisterIfAccessible(reg, kind, val);
+      return GetRegisterIfAccessible(reg, location_kind, val);
     }
     case DexRegisterLocation::Kind::kInRegisterHigh:
     case DexRegisterLocation::Kind::kInFpuRegister:
@@ -318,7 +320,7 @@
         return false;
       }
       uint32_t reg = dex_register_map[vreg].GetMachineRegister();
-      return GetRegisterIfAccessible(reg, kind, val);
+      return GetRegisterIfAccessible(reg, location_kind, val);
     }
     case DexRegisterLocation::Kind::kConstant: {
       uint32_t result = dex_register_map[vreg].GetConstant();
@@ -336,9 +338,7 @@
   }
 }
 
-bool StackVisitor::GetVRegFromOptimizedCode(DexRegisterLocation location,
-                                            VRegKind kind,
-                                            uint32_t* val) const {
+bool StackVisitor::GetVRegFromOptimizedCode(DexRegisterLocation location, uint32_t* val) const {
   switch (location.GetKind()) {
     case DexRegisterLocation::Kind::kInvalid:
       break;
@@ -351,7 +351,7 @@
     case DexRegisterLocation::Kind::kInRegisterHigh:
     case DexRegisterLocation::Kind::kInFpuRegister:
     case DexRegisterLocation::Kind::kInFpuRegisterHigh:
-      return GetRegisterIfAccessible(location.GetMachineRegister(), kind, val);
+      return GetRegisterIfAccessible(location.GetMachineRegister(), location.GetKind(), val);
     case DexRegisterLocation::Kind::kConstant:
       *val = location.GetConstant();
       return true;
@@ -362,13 +362,18 @@
   UNREACHABLE();
 }
 
-bool StackVisitor::GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const {
-  const bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
+bool StackVisitor::GetRegisterIfAccessible(uint32_t reg,
+                                           DexRegisterLocation::Kind location_kind,
+                                           uint32_t* val) const {
+  const bool is_float = (location_kind == DexRegisterLocation::Kind::kInFpuRegister) ||
+                        (location_kind == DexRegisterLocation::Kind::kInFpuRegisterHigh);
 
   if (kRuntimeISA == InstructionSet::kX86 && is_float) {
     // X86 float registers are 64-bit and each XMM register is provided as two separate
     // 32-bit registers by the context.
-    reg = (kind == kDoubleHiVReg) ? (2 * reg + 1) : (2 * reg);
+    reg = (location_kind == DexRegisterLocation::Kind::kInFpuRegisterHigh)
+        ? (2 * reg + 1)
+        : (2 * reg);
   }
 
   if (!IsAccessibleRegister(reg, is_float)) {
@@ -377,14 +382,10 @@
   uintptr_t ptr_val = GetRegister(reg, is_float);
   const bool target64 = Is64BitInstructionSet(kRuntimeISA);
   if (target64) {
-    const bool wide_lo = (kind == kLongLoVReg) || (kind == kDoubleLoVReg);
-    const bool wide_hi = (kind == kLongHiVReg) || (kind == kDoubleHiVReg);
+    const bool is_high = (location_kind == DexRegisterLocation::Kind::kInRegisterHigh) ||
+                         (location_kind == DexRegisterLocation::Kind::kInFpuRegisterHigh);
     int64_t value_long = static_cast<int64_t>(ptr_val);
-    if (wide_lo) {
-      ptr_val = static_cast<uintptr_t>(Low32Bits(value_long));
-    } else if (wide_hi) {
-      ptr_val = static_cast<uintptr_t>(High32Bits(value_long));
-    }
+    ptr_val = static_cast<uintptr_t>(is_high ? High32Bits(value_long) : Low32Bits(value_long));
   }
   *val = ptr_val;
   return true;
@@ -449,25 +450,6 @@
   return success;
 }
 
-bool StackVisitor::GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi,
-                                               VRegKind kind_lo, uint64_t* val) const {
-  const bool is_float = (kind_lo == kDoubleLoVReg);
-  if (!IsAccessibleRegister(reg_lo, is_float) || !IsAccessibleRegister(reg_hi, is_float)) {
-    return false;
-  }
-  uintptr_t ptr_val_lo = GetRegister(reg_lo, is_float);
-  uintptr_t ptr_val_hi = GetRegister(reg_hi, is_float);
-  bool target64 = Is64BitInstructionSet(kRuntimeISA);
-  if (target64) {
-    int64_t value_long_lo = static_cast<int64_t>(ptr_val_lo);
-    int64_t value_long_hi = static_cast<int64_t>(ptr_val_hi);
-    ptr_val_lo = static_cast<uintptr_t>(Low32Bits(value_long_lo));
-    ptr_val_hi = static_cast<uintptr_t>(High32Bits(value_long_hi));
-  }
-  *val = (static_cast<uint64_t>(ptr_val_hi) << 32) | static_cast<uint32_t>(ptr_val_lo);
-  return true;
-}
-
 ShadowFrame* StackVisitor::PrepareSetVReg(ArtMethod* m, uint16_t vreg, bool wide) {
   CodeItemDataAccessor accessor(m->DexInstructionData());
   if (!accessor.HasCodeItem()) {
diff --git a/runtime/stack.h b/runtime/stack.h
index 30d7533..c746536 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -126,7 +126,7 @@
                StackWalkKind walk_kind,
                bool check_suspended = true);
 
-  bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
+  bool GetRegisterIfAccessible(uint32_t reg, DexRegisterLocation::Kind kind, uint32_t* val) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
  public:
@@ -326,21 +326,24 @@
 
   bool GetVRegFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind, uint32_t* val) const
       REQUIRES_SHARED(Locks::mutator_lock_);
-  bool GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
+  bool GetVRegFromOptimizedCode(ArtMethod* m,
+                                uint16_t vreg,
+                                VRegKind kind,
                                 uint32_t* val) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  bool GetVRegPairFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
+  bool GetVRegPairFromDebuggerShadowFrame(uint16_t vreg,
+                                          VRegKind kind_lo,
+                                          VRegKind kind_hi,
                                           uint64_t* val) const
       REQUIRES_SHARED(Locks::mutator_lock_);
-  bool GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
-                                    VRegKind kind_lo, VRegKind kind_hi,
+  bool GetVRegPairFromOptimizedCode(ArtMethod* m,
+                                    uint16_t vreg,
+                                    VRegKind kind_lo,
+                                    VRegKind kind_hi,
                                     uint64_t* val) const
       REQUIRES_SHARED(Locks::mutator_lock_);
-  bool GetVRegFromOptimizedCode(DexRegisterLocation location, VRegKind kind, uint32_t* val) const
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  bool GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, VRegKind kind_lo,
-                                   uint64_t* val) const
+  bool GetVRegFromOptimizedCode(DexRegisterLocation location, uint32_t* val) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   ShadowFrame* PrepareSetVReg(ArtMethod* m, uint16_t vreg, bool wide)
diff --git a/test/457-regs/expected.txt b/test/457-regs/expected.txt
index 6a5618e..8db7853 100644
--- a/test/457-regs/expected.txt
+++ b/test/457-regs/expected.txt
@@ -1 +1,2 @@
 JNI_OnLoad called
+JNI_OnLoad called
diff --git a/test/457-regs/run b/test/457-regs/run
new file mode 100644
index 0000000..2591855
--- /dev/null
+++ b/test/457-regs/run
@@ -0,0 +1,26 @@
+#!/bin/bash
+#
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+${RUN} "$@"
+return_status1=$?
+
+# Force baseline JIT compilation as the test explicitly requests JIT
+# compilation, which by default is 'optimizing'.
+${RUN} "$@" -Xcompiler-option --baseline
+return_status2=$?
+
+# Make sure we don't silently ignore an early failure.
+(exit $return_status1) && (exit $return_status2)
diff --git a/test/457-regs/src/Main.java b/test/457-regs/src/Main.java
index 3b8df44..428c913 100644
--- a/test/457-regs/src/Main.java
+++ b/test/457-regs/src/Main.java
@@ -27,17 +27,29 @@
     Class<?> c = Class.forName("PhiLiveness");
     Method m = c.getMethod("mergeOk", boolean.class, byte.class);
     m.invoke(null, new Boolean(true), new Byte((byte)2));
+    ensureMethodJitCompiled(m);
+    m.invoke(null, new Boolean(true), new Byte((byte)2));
 
     m = c.getMethod("mergeNotOk", boolean.class, float.class);
     m.invoke(null, new Boolean(true), new Float(4.0f));
+    ensureMethodJitCompiled(m);
+    m.invoke(null, new Boolean(true), new Float(4.0f));
 
     m = c.getMethod("mergeReferences", Main.class);
     m.invoke(null, new Main());
+    ensureMethodJitCompiled(m);
+    m.invoke(null, new Main());
 
     m = c.getMethod("phiEquivalent");
     m.invoke(null);
+    ensureMethodJitCompiled(m);
+    m.invoke(null);
 
     m = c.getMethod("phiAllEquivalents", Main.class);
     m.invoke(null, new Main());
+    ensureMethodJitCompiled(m);
+    m.invoke(null, new Main());
   }
+
+  public native static void ensureMethodJitCompiled(Method method);
 }