Support system server ART-profiling
We don't have to map the code cache as executable if we only want to save
profiles. This enables system server profiling without disabling SElinux
to bypass the jit code cache exec-mapping.
Test: m test-art-host
boot a device with system server profiling enabled.
Bug: 73313191
Change-Id: I7f25a905e0b23456183e39e58ad8f4b829ddf0c5
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index d4fe977..0684b46 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -185,10 +185,12 @@
if (jit_compiler_handle_ == nullptr && !LoadCompiler(error_msg)) {
return nullptr;
}
+ bool code_cache_only_for_profile_data = !options->UseJitCompilation();
jit->code_cache_.reset(JitCodeCache::Create(
options->GetCodeCacheInitialCapacity(),
options->GetCodeCacheMaxCapacity(),
jit->generate_debug_info_,
+ code_cache_only_for_profile_data,
error_msg));
if (jit->GetCodeCache() == nullptr) {
return nullptr;
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 1c8c26c..249a8b0 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -50,7 +50,6 @@
namespace art {
namespace jit {
-static constexpr int kProtAll = PROT_READ | PROT_WRITE | PROT_EXEC;
static constexpr int kProtData = PROT_READ | PROT_WRITE;
static constexpr int kProtCode = PROT_READ | PROT_EXEC;
@@ -161,6 +160,7 @@
JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
size_t max_capacity,
bool generate_debug_info,
+ bool used_only_for_profile_data,
std::string* error_msg) {
ScopedTrace trace(__PRETTY_FUNCTION__);
CHECK_GE(max_capacity, initial_capacity);
@@ -184,6 +184,15 @@
return nullptr;
}
+ // Decide how we should map the code and data sections.
+ // If we use the code cache just for profiling we do not need to map the code section as
+ // executable.
+ // NOTE 1: this is yet another workaround to bypass strict SElinux policies in order to be able
+ // to profile system server.
+ // NOTE 2: We could just not create the code section at all but we will need to
+ // special case too many cases.
+ int memmap_flags_prot_code = used_only_for_profile_data ? (kProtCode & ~PROT_EXEC) : kProtCode;
+
std::string error_str;
// Map name specific for android_os_Debug.cpp accounting.
// Map in low 4gb to simplify accessing root tables for x86_64.
@@ -216,8 +225,11 @@
DCHECK_EQ(code_size + data_size, max_capacity);
uint8_t* divider = data_map->Begin() + data_size;
- MemMap* code_map =
- data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str, use_ashmem);
+ MemMap* code_map = data_map->RemapAtEnd(
+ divider,
+ "jit-code-cache",
+ memmap_flags_prot_code | PROT_WRITE,
+ &error_str, use_ashmem);
if (code_map == nullptr) {
std::ostringstream oss;
oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
@@ -229,7 +241,13 @@
code_size = initial_capacity - data_size;
DCHECK_EQ(code_size + data_size, initial_capacity);
return new JitCodeCache(
- code_map, data_map.release(), code_size, data_size, max_capacity, garbage_collect_code);
+ code_map,
+ data_map.release(),
+ code_size,
+ data_size,
+ max_capacity,
+ garbage_collect_code,
+ memmap_flags_prot_code);
}
JitCodeCache::JitCodeCache(MemMap* code_map,
@@ -237,7 +255,8 @@
size_t initial_code_capacity,
size_t initial_data_capacity,
size_t max_capacity,
- bool garbage_collect_code)
+ bool garbage_collect_code,
+ int memmap_flags_prot_code)
: lock_("Jit code cache", kJitCodeCacheLock),
lock_cond_("Jit code cache condition variable", lock_),
collection_in_progress_(false),
@@ -258,7 +277,8 @@
histogram_code_memory_use_("Memory used for compiled code", 16),
histogram_profiling_info_memory_use_("Memory used for profiling info", 16),
is_weak_access_enabled_(true),
- inline_cache_cond_("Jit inline cache condition variable", lock_) {
+ inline_cache_cond_("Jit inline cache condition variable", lock_),
+ memmap_flags_prot_code_(memmap_flags_prot_code) {
DCHECK_GE(max_capacity, initial_code_capacity + initial_data_capacity);
code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_end_, false /*locked*/);
@@ -274,7 +294,7 @@
"mprotect jit code cache",
code_map_->Begin(),
code_map_->Size(),
- kProtCode);
+ memmap_flags_prot_code_);
CheckedCall(mprotect,
"mprotect jit data cache",
data_map_->Begin(),
@@ -327,19 +347,30 @@
class ScopedCodeCacheWrite : ScopedTrace {
public:
- explicit ScopedCodeCacheWrite(MemMap* code_map)
+ explicit ScopedCodeCacheWrite(const JitCodeCache* const code_cache)
: ScopedTrace("ScopedCodeCacheWrite"),
- code_map_(code_map) {
+ code_cache_(code_cache) {
ScopedTrace trace("mprotect all");
- CheckedCall(mprotect, "make code writable", code_map_->Begin(), code_map_->Size(), kProtAll);
+ CheckedCall(
+ mprotect,
+ "make code writable",
+ code_cache_->code_map_->Begin(),
+ code_cache_->code_map_->Size(),
+ code_cache_->memmap_flags_prot_code_ | PROT_WRITE);
}
+
~ScopedCodeCacheWrite() {
ScopedTrace trace("mprotect code");
- CheckedCall(mprotect, "make code protected", code_map_->Begin(), code_map_->Size(), kProtCode);
+ CheckedCall(
+ mprotect,
+ "make code protected",
+ code_cache_->code_map_->Begin(),
+ code_cache_->code_map_->Size(),
+ code_cache_->memmap_flags_prot_code_);
}
private:
- MemMap* const code_map_;
+ const JitCodeCache* const code_cache_;
DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
};
@@ -557,7 +588,7 @@
// so it's possible for the same method_header to start representing
// different compile code.
MutexLock mu(Thread::Current(), lock_);
- ScopedCodeCacheWrite scc(code_map_.get());
+ ScopedCodeCacheWrite scc(this);
for (const OatQuickMethodHeader* method_header : method_headers) {
FreeCode(method_header->GetCode());
}
@@ -576,7 +607,7 @@
// with the classlinker_classes_lock_ held, and suspending ourselves could
// lead to a deadlock.
{
- ScopedCodeCacheWrite scc(code_map_.get());
+ ScopedCodeCacheWrite scc(this);
for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
it->second.RemoveMethodsIn(alloc);
if (it->second.GetMethods().empty()) {
@@ -715,7 +746,7 @@
MutexLock mu(self, lock_);
WaitForPotentialCollectionToComplete(self);
{
- ScopedCodeCacheWrite scc(code_map_.get());
+ ScopedCodeCacheWrite scc(this);
memory = AllocateCode(total_size);
if (memory == nullptr) {
return nullptr;
@@ -878,7 +909,7 @@
}
bool in_cache = false;
- ScopedCodeCacheWrite ccw(code_map_.get());
+ ScopedCodeCacheWrite ccw(this);
if (UNLIKELY(method->IsNative())) {
auto it = jni_stubs_map_.find(JniStubKey(method));
if (it != jni_stubs_map_.end() && it->second.RemoveMethod(method)) {
@@ -1105,7 +1136,7 @@
DCHECK_EQ(per_space_footprint * 2, new_footprint);
mspace_set_footprint_limit(data_mspace_, per_space_footprint);
{
- ScopedCodeCacheWrite scc(code_map_.get());
+ ScopedCodeCacheWrite scc(this);
mspace_set_footprint_limit(code_mspace_, per_space_footprint);
}
}
@@ -1273,7 +1304,7 @@
std::unordered_set<OatQuickMethodHeader*> method_headers;
{
MutexLock mu(self, lock_);
- ScopedCodeCacheWrite scc(code_map_.get());
+ ScopedCodeCacheWrite scc(this);
// Iterate over all compiled code and remove entries that are not marked.
for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
JniStubData* data = &it->second;
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index f1c99fb..b10f57e 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -68,6 +68,7 @@
namespace jit {
class JitInstrumentationCache;
+class ScopedCodeCacheWrite;
// Alignment in bits that will suit all architectures.
static constexpr int kJitCodeAlignment = 16;
@@ -88,6 +89,7 @@
static JitCodeCache* Create(size_t initial_capacity,
size_t max_capacity,
bool generate_debug_info,
+ bool used_only_for_profile_data,
std::string* error_msg);
~JitCodeCache();
@@ -270,7 +272,8 @@
size_t initial_code_capacity,
size_t initial_data_capacity,
size_t max_capacity,
- bool garbage_collect_code);
+ bool garbage_collect_code,
+ int memmap_flags_prot_code);
// Internal version of 'CommitCode' that will not retry if the
// allocation fails. Return null if the allocation fails.
@@ -442,7 +445,12 @@
// Condition to wait on for accessing inline caches.
ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
+ // Mapping flags for the code section.
+ const int memmap_flags_prot_code_;
+
friend class art::JitJniStubTestHelper;
+ friend class ScopedCodeCacheWrite;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
};
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index e105bab..38c65f5 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -177,6 +177,7 @@
DEBUG_GENERATE_MINI_DEBUG_INFO = 1 << 11,
HIDDEN_API_ENFORCEMENT_POLICY_MASK = (1 << 12)
| (1 << 13),
+ PROFILE_SYSTEM_SERVER = 1 << 14,
// bits to shift (flags & HIDDEN_API_ENFORCEMENT_POLICY_MASK) by to get a value
// corresponding to hiddenapi::EnforcementPolicy
@@ -308,6 +309,9 @@
(runtime_flags & HIDDEN_API_ENFORCEMENT_POLICY_MASK) >> API_ENFORCEMENT_POLICY_SHIFT);
runtime_flags &= ~HIDDEN_API_ENFORCEMENT_POLICY_MASK;
+ bool profile_system_server = (runtime_flags & PROFILE_SYSTEM_SERVER) == PROFILE_SYSTEM_SERVER;
+ runtime_flags &= ~PROFILE_SYSTEM_SERVER;
+
if (runtime_flags != 0) {
LOG(ERROR) << StringPrintf("Unknown bits set in runtime_flags: %#x", runtime_flags);
}
@@ -392,7 +396,11 @@
env, is_system_server, action, isa_string.c_str());
} else {
Runtime::Current()->InitNonZygoteOrPostFork(
- env, is_system_server, Runtime::NativeBridgeAction::kUnload, nullptr);
+ env,
+ is_system_server,
+ Runtime::NativeBridgeAction::kUnload,
+ /*isa*/ nullptr,
+ profile_system_server);
}
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index f86b7a0..b8775b8 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -861,7 +861,11 @@
}
void Runtime::InitNonZygoteOrPostFork(
- JNIEnv* env, bool is_system_server, NativeBridgeAction action, const char* isa) {
+ JNIEnv* env,
+ bool is_system_server,
+ NativeBridgeAction action,
+ const char* isa,
+ bool profile_system_server) {
is_zygote_ = false;
if (is_native_bridge_loaded_) {
@@ -884,8 +888,15 @@
heap_->ResetGcPerformanceInfo();
// We may want to collect profiling samples for system server, but we never want to JIT there.
- if ((!is_system_server || !jit_options_->UseJitCompilation()) &&
- !safe_mode_ &&
+ if (is_system_server) {
+ jit_options_->SetUseJitCompilation(false);
+ jit_options_->SetSaveProfilingInfo(profile_system_server);
+ if (profile_system_server) {
+ jit_options_->SetWaitForJitNotificationsToSaveProfile(false);
+ VLOG(profiler) << "Enabling system server profiles";
+ }
+ }
+ if (!safe_mode_ &&
(jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) &&
jit_ == nullptr) {
// Note that when running ART standalone (not zygote, nor zygote fork),
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 87f5b51..953acbb 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -451,7 +451,11 @@
void PreZygoteFork();
void InitNonZygoteOrPostFork(
- JNIEnv* env, bool is_system_server, NativeBridgeAction action, const char* isa);
+ JNIEnv* env,
+ bool is_system_server,
+ NativeBridgeAction action,
+ const char* isa,
+ bool profile_system_server = false);
const instrumentation::Instrumentation* GetInstrumentation() const {
return &instrumentation_;