Merge "ART: arm64 explicit stack overflow checks"
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index d544397..8218cf1 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -147,6 +147,7 @@
Instruction::RETURN_VOID,
Instruction::RETURN,
Instruction::RETURN_WIDE,
+ Instruction::RETURN_OBJECT,
Instruction::CONST_4,
Instruction::CONST_16,
Instruction::CONST,
@@ -226,6 +227,39 @@
Instruction::SHL_INT_LIT8,
Instruction::SHR_INT_LIT8,
Instruction::USHR_INT_LIT8,
+ Instruction::SGET,
+ Instruction::SGET_BOOLEAN,
+ Instruction::SGET_BYTE,
+ Instruction::SGET_CHAR,
+ Instruction::SGET_SHORT,
+ Instruction::SGET_OBJECT,
+ Instruction::SPUT,
+ Instruction::SPUT_OBJECT,
+ Instruction::SPUT_BOOLEAN,
+ Instruction::SPUT_BYTE,
+ Instruction::SPUT_CHAR,
+ Instruction::SPUT_SHORT,
+ Instruction::MOVE_WIDE,
+ Instruction::MOVE_WIDE_FROM16,
+ Instruction::MOVE_WIDE_16,
+ Instruction::MOVE_OBJECT,
+ Instruction::MOVE_OBJECT_FROM16,
+ Instruction::MOVE_OBJECT_16,
+ Instruction::CMPL_FLOAT,
+ Instruction::CMPG_FLOAT,
+ Instruction::IGET,
+ Instruction::IGET_OBJECT,
+ Instruction::IGET_BOOLEAN,
+ Instruction::IGET_BYTE,
+ Instruction::IGET_CHAR,
+ Instruction::IGET_SHORT,
+ Instruction::IPUT,
+ Instruction::IPUT_OBJECT,
+ Instruction::IPUT_BOOLEAN,
+ Instruction::IPUT_BYTE,
+ Instruction::IPUT_CHAR,
+ Instruction::IPUT_SHORT,
+
// TODO(Arm64): Enable compiler pass
// ----- ExtendedMIROpcode -----
kMirOpPhi,
@@ -244,16 +278,9 @@
kMirOpSelect,
#if ARM64_USE_EXPERIMENTAL_OPCODES
- Instruction::MOVE_WIDE,
- Instruction::MOVE_WIDE_FROM16,
- Instruction::MOVE_WIDE_16,
- Instruction::MOVE_OBJECT,
- Instruction::MOVE_OBJECT_FROM16,
- Instruction::MOVE_OBJECT_16,
// Instruction::MOVE_RESULT,
// Instruction::MOVE_RESULT_WIDE,
// Instruction::MOVE_RESULT_OBJECT,
- // Instruction::RETURN_OBJECT,
// Instruction::CONST_HIGH16,
// Instruction::CONST_WIDE_16,
// Instruction::CONST_WIDE_32,
@@ -269,8 +296,6 @@
// Instruction::FILLED_NEW_ARRAY,
// Instruction::FILLED_NEW_ARRAY_RANGE,
// Instruction::FILL_ARRAY_DATA,
- Instruction::CMPL_FLOAT,
- Instruction::CMPG_FLOAT,
Instruction::CMPL_DOUBLE,
Instruction::CMPG_DOUBLE,
Instruction::CMP_LONG,
@@ -294,34 +319,10 @@
// Instruction::APUT_BYTE,
// Instruction::APUT_CHAR,
// Instruction::APUT_SHORT,
- // Instruction::IGET,
- // Instruction::IGET_WIDE,
- // Instruction::IGET_OBJECT,
- // Instruction::IGET_BOOLEAN,
- // Instruction::IGET_BYTE,
- // Instruction::IGET_CHAR,
- // Instruction::IGET_SHORT,
- // Instruction::IPUT,
// Instruction::IPUT_WIDE,
- // Instruction::IPUT_OBJECT,
- // Instruction::IPUT_BOOLEAN,
- // Instruction::IPUT_BYTE,
- // Instruction::IPUT_CHAR,
- // Instruction::IPUT_SHORT,
- Instruction::SGET,
+ // Instruction::IGET_WIDE,
// Instruction::SGET_WIDE,
- Instruction::SGET_OBJECT,
- // Instruction::SGET_BOOLEAN,
- // Instruction::SGET_BYTE,
- // Instruction::SGET_CHAR,
- // Instruction::SGET_SHORT,
- Instruction::SPUT,
// Instruction::SPUT_WIDE,
- // Instruction::SPUT_OBJECT,
- // Instruction::SPUT_BOOLEAN,
- // Instruction::SPUT_BYTE,
- // Instruction::SPUT_CHAR,
- // Instruction::SPUT_SHORT,
Instruction::INVOKE_VIRTUAL,
Instruction::INVOKE_SUPER,
Instruction::INVOKE_DIRECT,
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 9c801a5..9921d52 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -204,6 +204,8 @@
void GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1, int64_t val,
ConditionCode ccode);
LIR* LoadFPConstantValue(int r_dest, int value);
+ LIR* LoadStoreMaxDisp1020(ArmOpcode opcode, RegStorage r_base, int displacement,
+ RegStorage r_src_dest, RegStorage r_work = RegStorage::InvalidReg());
void ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
void InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
void AssignDataOffsets();
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 86d32f4..6bb0d74 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -819,6 +819,30 @@
return store;
}
+// Helper function for LoadBaseDispBody()/StoreBaseDispBody().
+LIR* ArmMir2Lir::LoadStoreMaxDisp1020(ArmOpcode opcode, RegStorage r_base, int displacement,
+ RegStorage r_src_dest, RegStorage r_work) {
+ DCHECK_EQ(displacement & 3, 0);
+ int encoded_disp = (displacement & 1020) >> 2; // Within range of the instruction.
+ RegStorage r_ptr = r_base;
+ if ((displacement & ~1020) != 0) {
+ r_ptr = r_work.Valid() ? r_work : AllocTemp();
+ // Add displacement & ~1020 to base, it's a single instruction for up to +-256KiB.
+ OpRegRegImm(kOpAdd, r_ptr, r_base, displacement & ~1020);
+ }
+ LIR* lir = nullptr;
+ if (!r_src_dest.IsPair()) {
+ lir = NewLIR3(opcode, r_src_dest.GetReg(), r_ptr.GetReg(), encoded_disp);
+ } else {
+ lir = NewLIR4(opcode, r_src_dest.GetLowReg(), r_src_dest.GetHighReg(), r_ptr.GetReg(),
+ encoded_disp);
+ }
+ if ((displacement & ~1020) != 0 && !r_work.Valid()) {
+ FreeTemp(r_ptr);
+ }
+ return lir;
+}
+
/*
* Load value from base + displacement. Optionally perform null check
* on base (which must have an associated s_reg and MIR). If not
@@ -836,40 +860,26 @@
switch (size) {
case kDouble:
// Intentional fall-though.
- case k64: {
- DCHECK_EQ(displacement & 3, 0);
- encoded_disp = (displacement & 1020) >> 2; // Within range of kThumb2Vldrd/kThumb2LdrdI8.
- RegStorage r_ptr = r_base;
- if ((displacement & ~1020) != 0) {
- // For core register load, use the r_dest.GetLow() for the temporary pointer.
- r_ptr = r_dest.IsFloat() ? AllocTemp() : r_dest.GetLow();
- // Add displacement & ~1020 to base, it's a single instruction for up to +-256KiB.
- OpRegRegImm(kOpAdd, r_ptr, r_base, displacement & ~1020);
- }
+ case k64:
if (r_dest.IsFloat()) {
DCHECK(!r_dest.IsPair());
- load = NewLIR3(kThumb2Vldrd, r_dest.GetReg(), r_ptr.GetReg(), encoded_disp);
+ load = LoadStoreMaxDisp1020(kThumb2Vldrd, r_base, displacement, r_dest);
} else {
- load = NewLIR4(kThumb2LdrdI8, r_dest.GetLowReg(), r_dest.GetHighReg(), r_ptr.GetReg(),
- encoded_disp);
- }
- if ((displacement & ~1020) != 0 && r_dest.IsFloat()) {
- FreeTemp(r_ptr);
+ DCHECK(r_dest.IsPair());
+ // Use the r_dest.GetLow() for the temporary pointer if needed.
+ load = LoadStoreMaxDisp1020(kThumb2LdrdI8, r_base, displacement, r_dest, r_dest.GetLow());
}
already_generated = true;
break;
- }
case kSingle:
// Intentional fall-though.
case k32:
// Intentional fall-though.
case kReference:
if (r_dest.IsFloat()) {
- opcode = kThumb2Vldrs;
- if (displacement <= 1020) {
- short_form = true;
- encoded_disp >>= 2;
- }
+ DCHECK(r_dest.IsSingle());
+ load = LoadStoreMaxDisp1020(kThumb2Vldrs, r_base, displacement, r_dest);
+ already_generated = true;
break;
}
if (r_dest.Low8() && (r_base == rs_rARM_PC) && (displacement <= 1020) &&
@@ -934,13 +944,8 @@
} else {
RegStorage reg_offset = AllocTemp();
LoadConstant(reg_offset, encoded_disp);
- if (r_dest.IsFloat()) {
- // No index ops - must use a long sequence. Turn the offset into a direct pointer.
- OpRegReg(kOpAdd, reg_offset, r_base);
- load = LoadBaseDispBody(reg_offset, 0, r_dest, size);
- } else {
- load = LoadBaseIndexed(r_base, reg_offset, r_dest, 0, size);
- }
+ DCHECK(!r_dest.IsFloat());
+ load = LoadBaseIndexed(r_base, reg_offset, r_dest, 0, size);
FreeTemp(reg_offset);
}
}
@@ -992,28 +997,16 @@
switch (size) {
case kDouble:
// Intentional fall-though.
- case k64: {
- DCHECK_EQ(displacement & 3, 0);
- encoded_disp = (displacement & 1020) >> 2; // Within range of kThumb2Vstrd/kThumb2StrdI8.
- RegStorage r_ptr = r_base;
- if ((displacement & ~1020) != 0) {
- r_ptr = AllocTemp();
- // Add displacement & ~1020 to base, it's a single instruction for up to +-256KiB.
- OpRegRegImm(kOpAdd, r_ptr, r_base, displacement & ~1020);
- }
+ case k64:
if (r_src.IsFloat()) {
DCHECK(!r_src.IsPair());
- store = NewLIR3(kThumb2Vstrd, r_src.GetReg(), r_ptr.GetReg(), encoded_disp);
+ store = LoadStoreMaxDisp1020(kThumb2Vstrd, r_base, displacement, r_src);
} else {
- store = NewLIR4(kThumb2StrdI8, r_src.GetLowReg(), r_src.GetHighReg(), r_ptr.GetReg(),
- encoded_disp);
- }
- if ((displacement & ~1020) != 0) {
- FreeTemp(r_ptr);
+ DCHECK(r_src.IsPair());
+ store = LoadStoreMaxDisp1020(kThumb2StrdI8, r_base, displacement, r_src);
}
already_generated = true;
break;
- }
case kSingle:
// Intentional fall-through.
case k32:
@@ -1021,11 +1014,8 @@
case kReference:
if (r_src.IsFloat()) {
DCHECK(r_src.IsSingle());
- opcode = kThumb2Vstrs;
- if (displacement <= 1020) {
- short_form = true;
- encoded_disp >>= 2;
- }
+ store = LoadStoreMaxDisp1020(kThumb2Vstrs, r_base, displacement, r_src);
+ already_generated = true;
break;
}
if (r_src.Low8() && (r_base == rs_r13sp) && (displacement <= 1020) && (displacement >= 0)) {
@@ -1073,13 +1063,8 @@
} else {
RegStorage r_scratch = AllocTemp();
LoadConstant(r_scratch, encoded_disp);
- if (r_src.IsFloat()) {
- // No index ops - must use a long sequence. Turn the offset into a direct pointer.
- OpRegReg(kOpAdd, r_scratch, r_base);
- store = StoreBaseDispBody(r_scratch, 0, r_src, size);
- } else {
- store = StoreBaseIndexed(r_base, r_scratch, r_src, 0, size);
- }
+ DCHECK(!r_src.IsFloat());
+ store = StoreBaseIndexed(r_base, r_scratch, r_src, 0, size);
FreeTemp(r_scratch);
}
}
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index f261852..59eec3d 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -301,12 +301,14 @@
* Mark garbage collection card. Skip if the value we're storing is null.
*/
void Arm64Mir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) {
- RegStorage reg_card_base = AllocTemp();
+ RegStorage reg_card_base = AllocTempWide();
RegStorage reg_card_no = AllocTemp();
LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
LoadWordDisp(rs_rA64_SELF, Thread::CardTableOffset<8>().Int32Value(), reg_card_base);
OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
- StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
+ // TODO(Arm64): generate "strb wB, [xB, wC, uxtw]" rather than "strb wB, [xB, xC]"?
+ StoreBaseIndexed(reg_card_base, As64BitReg(reg_card_no), As32BitReg(reg_card_base),
+ 0, kUnsignedByte);
LIR* target = NewLIR0(kPseudoTargetLabel);
branch_over->target = target;
FreeTemp(reg_card_base);
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index 11c96d1..a59bfde 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -561,7 +561,6 @@
ArmOpcode alt_opcode = kA64Brk1d;
int32_t log_imm = -1;
bool is_wide = r_dest.Is64Bit();
- CHECK_EQ(r_dest.Is64Bit(), r_src1.Is64Bit());
ArmOpcode wide = (is_wide) ? WIDE(0) : UNWIDE(0);
switch (op) {
diff --git a/compiler/dex/reg_storage.h b/compiler/dex/reg_storage.h
index 7e50c31..3b891f2 100644
--- a/compiler/dex/reg_storage.h
+++ b/compiler/dex/reg_storage.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_DEX_REG_STORAGE_H_
#define ART_COMPILER_DEX_REG_STORAGE_H_
+#include "base/logging.h"
namespace art {
@@ -102,17 +103,21 @@
static const uint16_t kHighRegMask = (kHighRegNumMask << kHighRegShift);
// Reg is [F][LLLLL], will override any existing shape and use rs_kind.
- RegStorage(RegStorageKind rs_kind, int reg) {
- DCHECK_NE(rs_kind, k64BitPair);
- DCHECK_EQ(rs_kind & ~kShapeMask, 0);
- reg_ = kValid | rs_kind | (reg & kRegTypeMask);
+ constexpr RegStorage(RegStorageKind rs_kind, int reg)
+ : reg_(
+ DCHECK_CONSTEXPR(rs_kind != k64BitPair, , 0u)
+ DCHECK_CONSTEXPR((rs_kind & ~kShapeMask) == 0, , 0u)
+ kValid | rs_kind | (reg & kRegTypeMask)) {
}
- RegStorage(RegStorageKind rs_kind, int low_reg, int high_reg) {
- DCHECK_EQ(rs_kind, k64BitPair);
- DCHECK_EQ(low_reg & kFloatingPoint, high_reg & kFloatingPoint);
- DCHECK_LE(high_reg & kRegNumMask, kHighRegNumMask) << "High reg must be in 0..31";
- reg_ = kValid | rs_kind | ((high_reg & kHighRegNumMask) << kHighRegShift) |
- (low_reg & kRegTypeMask);
+ constexpr RegStorage(RegStorageKind rs_kind, int low_reg, int high_reg)
+ : reg_(
+ DCHECK_CONSTEXPR(rs_kind == k64BitPair, << rs_kind, 0u)
+ DCHECK_CONSTEXPR((low_reg & kFloatingPoint) == (high_reg & kFloatingPoint),
+ << low_reg << ", " << high_reg, 0u)
+ DCHECK_CONSTEXPR((high_reg & kRegNumMask) <= kHighRegNumMask,
+ << "High reg must be in 0..31: " << high_reg, false)
+ kValid | rs_kind | ((high_reg & kHighRegNumMask) << kHighRegShift) |
+ (low_reg & kRegTypeMask)) {
}
constexpr explicit RegStorage(uint16_t val) : reg_(val) {}
RegStorage() : reg_(kInvalid) {}
@@ -125,50 +130,53 @@
return (reg_ != rhs.GetRawBits());
}
- bool Valid() const {
+ constexpr bool Valid() const {
return ((reg_ & kValidMask) == kValid);
}
- bool Is32Bit() const {
+ constexpr bool Is32Bit() const {
return ((reg_ & kShapeMask) == k32BitSolo);
}
- bool Is64Bit() const {
+ constexpr bool Is64Bit() const {
return ((reg_ & k64BitMask) == k64Bits);
}
- bool Is64BitSolo() const {
+ constexpr bool Is64BitSolo() const {
return ((reg_ & kShapeMask) == k64BitSolo);
}
- bool IsPair() const {
+ constexpr bool IsPair() const {
return ((reg_ & kShapeMask) == k64BitPair);
}
- bool IsFloat() const {
- DCHECK(Valid());
- return ((reg_ & kFloatingPoint) == kFloatingPoint);
+ constexpr bool IsFloat() const {
+ return
+ DCHECK_CONSTEXPR(Valid(), , false)
+ ((reg_ & kFloatingPoint) == kFloatingPoint);
}
- bool IsDouble() const {
- DCHECK(Valid());
- return (reg_ & (kFloatingPoint | k64BitMask)) == (kFloatingPoint | k64Bits);
+ constexpr bool IsDouble() const {
+ return
+ DCHECK_CONSTEXPR(Valid(), , false)
+ (reg_ & (kFloatingPoint | k64BitMask)) == (kFloatingPoint | k64Bits);
}
- bool IsSingle() const {
- DCHECK(Valid());
- return (reg_ & (kFloatingPoint | k64BitMask)) == kFloatingPoint;
+ constexpr bool IsSingle() const {
+ return
+ DCHECK_CONSTEXPR(Valid(), , false)
+ (reg_ & (kFloatingPoint | k64BitMask)) == kFloatingPoint;
}
- static bool IsFloat(uint16_t reg) {
+ static constexpr bool IsFloat(uint16_t reg) {
return ((reg & kFloatingPoint) == kFloatingPoint);
}
- static bool IsDouble(uint16_t reg) {
+ static constexpr bool IsDouble(uint16_t reg) {
return (reg & (kFloatingPoint | k64BitMask)) == (kFloatingPoint | k64Bits);
}
- static bool IsSingle(uint16_t reg) {
+ static constexpr bool IsSingle(uint16_t reg) {
return (reg & (kFloatingPoint | k64BitMask)) == kFloatingPoint;
}
@@ -221,17 +229,17 @@
}
// Return the register number of low or solo.
- int GetRegNum() const {
+ constexpr int GetRegNum() const {
return reg_ & kRegNumMask;
}
// Is register number in 0..7?
- bool Low8() const {
+ constexpr bool Low8() const {
return GetRegNum() < 8;
}
// Is register number in 0..3?
- bool Low4() const {
+ constexpr bool Low4() const {
return GetRegNum() < 4;
}
@@ -244,11 +252,11 @@
return RegStorage(k64BitPair, low.GetReg(), high.GetReg());
}
- static bool SameRegType(RegStorage reg1, RegStorage reg2) {
+ static constexpr bool SameRegType(RegStorage reg1, RegStorage reg2) {
return (reg1.IsDouble() == reg2.IsDouble()) && (reg1.IsSingle() == reg2.IsSingle());
}
- static bool SameRegType(int reg1, int reg2) {
+ static constexpr bool SameRegType(int reg1, int reg2) {
return (IsDouble(reg1) == IsDouble(reg2)) && (IsSingle(reg1) == IsSingle(reg2));
}
@@ -258,17 +266,17 @@
}
// Create a floating point 32-bit solo.
- static RegStorage FloatSolo32(int reg_num) {
+ static constexpr RegStorage FloatSolo32(int reg_num) {
return RegStorage(k32BitSolo, (reg_num & kRegNumMask) | kFloatingPoint);
}
// Create a 128-bit solo.
- static RegStorage Solo128(int reg_num) {
+ static constexpr RegStorage Solo128(int reg_num) {
return RegStorage(k128BitSolo, reg_num & kRegTypeMask);
}
// Create a 64-bit solo.
- static RegStorage Solo64(int reg_num) {
+ static constexpr RegStorage Solo64(int reg_num) {
return RegStorage(k64BitSolo, reg_num & kRegTypeMask);
}
@@ -277,19 +285,19 @@
return RegStorage(k64BitSolo, (reg_num & kRegNumMask) | kFloatingPoint);
}
- static RegStorage InvalidReg() {
+ static constexpr RegStorage InvalidReg() {
return RegStorage(kInvalid);
}
- static uint16_t RegNum(int raw_reg_bits) {
+ static constexpr uint16_t RegNum(int raw_reg_bits) {
return raw_reg_bits & kRegNumMask;
}
- int GetRawBits() const {
+ constexpr int GetRawBits() const {
return reg_;
}
- size_t StorageSize() {
+ size_t StorageSize() const {
switch (reg_ & kShapeMask) {
case kInvalid: return 0;
case k32BitSolo: return 4;
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index 814195c..caeb946 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -66,6 +66,16 @@
} \
} while (false)
+// CHECK that can be used in a constexpr function. For example,
+// constexpr int half(int n) {
+// return
+// DCHECK_CONSTEXPR(n >= 0, , 0)
+// CHECK_CONSTEXPR((n & 1) == 0), << "Extra debugging output: n = " << n, 0)
+// n / 2;
+// }
+#define CHECK_CONSTEXPR(x, out, dummy) \
+ (UNLIKELY(!(x))) ? (LOG(FATAL) << "Check failed: " << #x out, dummy) :
+
#ifndef NDEBUG
#define DCHECK(x) CHECK(x)
@@ -77,6 +87,7 @@
#define DCHECK_GT(x, y) CHECK_GT(x, y)
#define DCHECK_STREQ(s1, s2) CHECK_STREQ(s1, s2)
#define DCHECK_STRNE(s1, s2) CHECK_STRNE(s1, s2)
+#define DCHECK_CONSTEXPR(x, out, dummy) CHECK_CONSTEXPR(x, out, dummy)
#else // NDEBUG
@@ -116,6 +127,9 @@
while (false) \
CHECK_STRNE(str1, str2)
+#define DCHECK_CONSTEXPR(x, out, dummy) \
+ (false && (x)) ? (dummy) :
+
#endif
#define LOG(severity) ::art::LogMessage(__FILE__, __LINE__, severity, -1).stream()
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 89cdb4d..fe5e104 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -815,6 +815,10 @@
bool VerifyAccess(mirror::Object* obj, mirror::Class* declaring_class, uint32_t access_flags) {
NthCallerVisitor visitor(Thread::Current(), 2);
visitor.WalkStack();
+ if (UNLIKELY(visitor.caller == nullptr)) {
+ // The caller is an attached native thread.
+ return (access_flags & kAccPublic) != 0;
+ }
mirror::Class* caller_class = visitor.caller->GetDeclaringClass();
if (((access_flags & kAccPublic) != 0) || (caller_class == declaring_class)) {
diff --git a/runtime/utils.h b/runtime/utils.h
index 6a4198f..6d52459 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -140,9 +140,8 @@
template<typename T>
static constexpr T RoundDown(T x, typename TypeIdentity<T>::type n) {
return
- // DCHECK(IsPowerOfTwo(n)) in a form acceptable in a constexpr function:
- (kIsDebugBuild && !IsPowerOfTwo(n)) ? (LOG(FATAL) << n << " isn't a power of 2", T(0))
- : (x & -n);
+ DCHECK_CONSTEXPR(IsPowerOfTwo(n), , T(0))
+ (x & -n);
}
template<typename T>
diff --git a/test/JniTest/JniTest.java b/test/JniTest/JniTest.java
index 3c4ed35..33418a9 100644
--- a/test/JniTest/JniTest.java
+++ b/test/JniTest/JniTest.java
@@ -21,6 +21,7 @@
System.loadLibrary("arttest");
testFindClassOnAttachedNativeThread();
testFindFieldOnAttachedNativeThread();
+ testReflectFieldGetFromAttachedNativeThreadNative();
testCallStaticVoidMethodOnSubClass();
testGetMirandaMethod();
testZeroLengthByteBuffers();
@@ -34,6 +35,10 @@
private static boolean testFindFieldOnAttachedNativeThreadField;
+ private static native void testReflectFieldGetFromAttachedNativeThreadNative();
+
+ public static boolean testReflectFieldGetFromAttachedNativeThreadField;
+
private static void testFindFieldOnAttachedNativeThread() {
testFindFieldOnAttachedNativeThreadNative();
if (!testFindFieldOnAttachedNativeThreadField) {
diff --git a/test/JniTest/jni_test.cc b/test/JniTest/jni_test.cc
index 024ba53..36cad72 100644
--- a/test/JniTest/jni_test.cc
+++ b/test/JniTest/jni_test.cc
@@ -103,6 +103,66 @@
assert(pthread_join_result == 0);
}
+static void* testReflectFieldGetFromAttachedNativeThread(void*) {
+ assert(jvm != NULL);
+
+ JNIEnv* env = NULL;
+ JavaVMAttachArgs args = { JNI_VERSION_1_6, __FUNCTION__, NULL };
+ int attach_result = jvm->AttachCurrentThread(&env, &args);
+ assert(attach_result == 0);
+
+ jclass clazz = env->FindClass("JniTest");
+ assert(clazz != NULL);
+ assert(!env->ExceptionCheck());
+
+ jclass class_clazz = env->FindClass("java/lang/Class");
+ assert(class_clazz != NULL);
+ assert(!env->ExceptionCheck());
+
+ jmethodID getFieldMetodId = env->GetMethodID(class_clazz, "getField",
+ "(Ljava/lang/String;)Ljava/lang/reflect/Field;");
+ assert(getFieldMetodId != NULL);
+ assert(!env->ExceptionCheck());
+
+ jstring field_name = env->NewStringUTF("testReflectFieldGetFromAttachedNativeThreadField");
+ assert(field_name != NULL);
+ assert(!env->ExceptionCheck());
+
+ jobject field = env->CallObjectMethod(clazz, getFieldMetodId, field_name);
+ assert(field != NULL);
+ assert(!env->ExceptionCheck());
+
+ jclass field_clazz = env->FindClass("java/lang/reflect/Field");
+ assert(field_clazz != NULL);
+ assert(!env->ExceptionCheck());
+
+ jmethodID getBooleanMetodId = env->GetMethodID(field_clazz, "getBoolean",
+ "(Ljava/lang/Object;)Z");
+ assert(getBooleanMetodId != NULL);
+ assert(!env->ExceptionCheck());
+
+ jboolean value = env->CallBooleanMethod(field, getBooleanMetodId, /* ignored */ clazz);
+ assert(value == false);
+ assert(!env->ExceptionCheck());
+
+ int detach_result = jvm->DetachCurrentThread();
+ assert(detach_result == 0);
+ return NULL;
+}
+
+// http://b/15539150
+extern "C" JNIEXPORT void JNICALL Java_JniTest_testReflectFieldGetFromAttachedNativeThreadNative(
+ JNIEnv*, jclass) {
+ pthread_t pthread;
+ int pthread_create_result = pthread_create(&pthread,
+ NULL,
+ testReflectFieldGetFromAttachedNativeThread,
+ NULL);
+ assert(pthread_create_result == 0);
+ int pthread_join_result = pthread_join(pthread, NULL);
+ assert(pthread_join_result == 0);
+}
+
// http://b/11243757
extern "C" JNIEXPORT void JNICALL Java_JniTest_testCallStaticVoidMethodOnSubClassNative(JNIEnv* env,