blob: 669fb0cac854b5e9bf83f40adfbff403590e0da1 [file] [log] [blame]
Florian Mayer07710c52019-09-16 15:53:38 +00001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "perfetto_hprof"
18
19#include "perfetto_hprof.h"
20
21#include <android-base/logging.h>
Roland Levillainbdf17372021-05-06 00:19:19 +010022#include <base/fast_exit.h>
Florian Mayer07710c52019-09-16 15:53:38 +000023#include <fcntl.h>
24#include <inttypes.h>
25#include <sched.h>
26#include <signal.h>
Florian Mayerb04b30c2020-04-06 11:00:45 +020027#include <sys/socket.h>
Florian Mayer07710c52019-09-16 15:53:38 +000028#include <sys/stat.h>
29#include <sys/types.h>
Florian Mayerb04b30c2020-04-06 11:00:45 +020030#include <sys/un.h>
Florian Mayer6d41e572020-01-24 15:13:59 +000031#include <sys/wait.h>
Florian Mayer07710c52019-09-16 15:53:38 +000032#include <thread>
Florian Mayerc99a2312019-12-17 11:07:34 +000033#include <time.h>
Florian Mayer07710c52019-09-16 15:53:38 +000034
Florian Mayer74f1b072020-11-30 18:08:30 +000035#include <limits>
Florian Mayerd1c8aae2021-03-16 17:22:14 +000036#include <optional>
Florian Mayerc560e1b2020-06-03 14:34:52 +020037#include <type_traits>
38
Florian Mayer07710c52019-09-16 15:53:38 +000039#include "gc/heap-visit-objects-inl.h"
40#include "gc/heap.h"
41#include "gc/scoped_gc_critical_section.h"
42#include "mirror/object-refvisitor-inl.h"
43#include "nativehelper/scoped_local_ref.h"
Florian Mayer4b79ef42020-04-03 15:10:45 +020044#include "perfetto/profiling/parse_smaps.h"
Florian Mayer07710c52019-09-16 15:53:38 +000045#include "perfetto/trace/interned_data/interned_data.pbzero.h"
46#include "perfetto/trace/profiling/heap_graph.pbzero.h"
47#include "perfetto/trace/profiling/profile_common.pbzero.h"
Florian Mayer4b79ef42020-04-03 15:10:45 +020048#include "perfetto/trace/profiling/smaps.pbzero.h"
Florian Mayer4bbc62b2019-09-25 12:13:35 +010049#include "perfetto/config/profiling/java_hprof_config.pbzero.h"
Florian Mayer2c5dfe12019-11-14 11:22:25 +000050#include "perfetto/protozero/packed_repeated_fields.h"
Florian Mayer07710c52019-09-16 15:53:38 +000051#include "perfetto/tracing.h"
52#include "runtime-inl.h"
53#include "runtime_callbacks.h"
54#include "scoped_thread_state_change-inl.h"
55#include "thread_list.h"
56#include "well_known_classes.h"
Florian Mayer29e62c32020-03-19 12:05:46 +010057#include "dex/descriptors_names.h"
Florian Mayer07710c52019-09-16 15:53:38 +000058
59// There are three threads involved in this:
60// * listener thread: this is idle in the background when this plugin gets loaded, and waits
61// for data on on g_signal_pipe_fds.
62// * signal thread: an arbitrary thread that handles the signal and writes data to
63// g_signal_pipe_fds.
64// * perfetto producer thread: once the signal is received, the app forks. In the newly forked
65// child, the Perfetto Client API spawns a thread to communicate with traced.
66
67namespace perfetto_hprof {
68
69constexpr int kJavaHeapprofdSignal = __SIGRTMIN + 6;
70constexpr time_t kWatchdogTimeoutSec = 120;
Florian Mayer2246a4e2020-02-24 16:16:41 +000071// This needs to be lower than the maximum acceptable chunk size, because this
72// is checked *before* writing another submessage. We conservatively assume
73// submessages can be up to 100k here for a 500k chunk size.
74// DropBox has a 500k chunk limit, and each chunk needs to parse as a proto.
75constexpr uint32_t kPacketSizeThreshold = 400000;
Florian Mayer07710c52019-09-16 15:53:38 +000076constexpr char kByte[1] = {'x'};
77static art::Mutex& GetStateMutex() {
78 static art::Mutex state_mutex("perfetto_hprof_state_mutex", art::LockLevel::kGenericBottomLock);
79 return state_mutex;
80}
81
82static art::ConditionVariable& GetStateCV() {
83 static art::ConditionVariable state_cv("perfetto_hprof_state_cv", GetStateMutex());
84 return state_cv;
85}
86
Florian Mayer74f1b072020-11-30 18:08:30 +000087static int requested_tracing_session_id = 0;
Florian Mayer07710c52019-09-16 15:53:38 +000088static State g_state = State::kUninitialized;
89
90// Pipe to signal from the signal handler into a worker thread that handles the
91// dump requests.
92int g_signal_pipe_fds[2];
93static struct sigaction g_orig_act = {};
94
Florian Mayer614bffc2020-06-02 12:15:49 +020095template <typename T>
96uint64_t FindOrAppend(std::map<T, uint64_t>* m, const T& s) {
Florian Mayer07710c52019-09-16 15:53:38 +000097 auto it = m->find(s);
98 if (it == m->end()) {
99 std::tie(it, std::ignore) = m->emplace(s, m->size());
100 }
101 return it->second;
102}
103
104void ArmWatchdogOrDie() {
105 timer_t timerid{};
106 struct sigevent sev {};
107 sev.sigev_notify = SIGEV_SIGNAL;
108 sev.sigev_signo = SIGKILL;
109
110 if (timer_create(CLOCK_MONOTONIC, &sev, &timerid) == -1) {
111 // This only gets called in the child, so we can fatal without impacting
112 // the app.
113 PLOG(FATAL) << "failed to create watchdog timer";
114 }
115
116 struct itimerspec its {};
117 its.it_value.tv_sec = kWatchdogTimeoutSec;
118
119 if (timer_settime(timerid, 0, &its, nullptr) == -1) {
120 // This only gets called in the child, so we can fatal without impacting
121 // the app.
122 PLOG(FATAL) << "failed to arm watchdog timer";
123 }
124}
125
Florian Mayer4b79ef42020-04-03 15:10:45 +0200126bool StartsWith(const std::string& str, const std::string& prefix) {
127 return str.compare(0, prefix.length(), prefix) == 0;
128}
129
130// Sample entries that match one of the following
131// start with /system/
132// start with /vendor/
133// start with /data/app/
134// contains "extracted in memory from Y", where Y matches any of the above
135bool ShouldSampleSmapsEntry(const perfetto::profiling::SmapsEntry& e) {
136 if (StartsWith(e.pathname, "/system/") || StartsWith(e.pathname, "/vendor/") ||
137 StartsWith(e.pathname, "/data/app/")) {
138 return true;
139 }
140 if (StartsWith(e.pathname, "[anon:")) {
141 if (e.pathname.find("extracted in memory from /system/") != std::string::npos) {
142 return true;
143 }
144 if (e.pathname.find("extracted in memory from /vendor/") != std::string::npos) {
145 return true;
146 }
147 if (e.pathname.find("extracted in memory from /data/app/") != std::string::npos) {
148 return true;
149 }
150 }
151 return false;
152}
153
Florian Mayer07710c52019-09-16 15:53:38 +0000154class JavaHprofDataSource : public perfetto::DataSource<JavaHprofDataSource> {
155 public:
Florian Mayer3b1d8e32019-10-01 14:46:58 +0100156 constexpr static perfetto::BufferExhaustedPolicy kBufferExhaustedPolicy =
157 perfetto::BufferExhaustedPolicy::kStall;
Florian Mayer4bbc62b2019-09-25 12:13:35 +0100158 void OnSetup(const SetupArgs& args) override {
Florian Mayer74f1b072020-11-30 18:08:30 +0000159 uint64_t normalized_cfg_tracing_session_id =
160 args.config->tracing_session_id() % std::numeric_limits<int32_t>::max();
161 if (requested_tracing_session_id < 0) {
162 LOG(ERROR) << "invalid requested tracing session id " << requested_tracing_session_id;
163 return;
164 }
165 if (static_cast<uint64_t>(requested_tracing_session_id) != normalized_cfg_tracing_session_id) {
166 return;
167 }
168
Florian Mayer4bbc62b2019-09-25 12:13:35 +0100169 // This is on the heap as it triggers -Wframe-larger-than.
170 std::unique_ptr<perfetto::protos::pbzero::JavaHprofConfig::Decoder> cfg(
171 new perfetto::protos::pbzero::JavaHprofConfig::Decoder(
172 args.config->java_hprof_config_raw()));
173
Florian Mayer4b79ef42020-04-03 15:10:45 +0200174 dump_smaps_ = cfg->dump_smaps();
Florian Mayer4075edd2020-09-24 15:26:23 +0100175 for (auto it = cfg->ignored_types(); it; ++it) {
176 std::string name = (*it).ToStdString();
177 ignored_types_.emplace_back(std::move(name));
178 }
Florian Mayerdc65a452021-05-19 12:36:21 +0100179 // This tracing session ID matches the requesting tracing session ID, so we know heapprofd
180 // has verified it targets this process.
181 enabled_ = true;
Florian Mayer4bbc62b2019-09-25 12:13:35 +0100182 }
183
Florian Mayer4b79ef42020-04-03 15:10:45 +0200184 bool dump_smaps() { return dump_smaps_; }
Florian Mayer4bbc62b2019-09-25 12:13:35 +0100185 bool enabled() { return enabled_; }
Florian Mayer07710c52019-09-16 15:53:38 +0000186
187 void OnStart(const StartArgs&) override {
Florian Mayer4bbc62b2019-09-25 12:13:35 +0100188 if (!enabled()) {
189 return;
190 }
Florian Mayer07710c52019-09-16 15:53:38 +0000191 art::MutexLock lk(art_thread(), GetStateMutex());
192 if (g_state == State::kWaitForStart) {
193 g_state = State::kStart;
194 GetStateCV().Broadcast(art_thread());
195 }
196 }
197
Florian Mayercb4fab12020-09-25 13:57:37 +0100198 // This datasource can be used with a trace config with a short duration_ms
199 // but a long datasource_stop_timeout_ms. In that case, OnStop is called (in
200 // general) before the dump is done. In that case, we handle the stop
201 // asynchronously, and notify the tracing service once we are done.
202 // In case OnStop is called after the dump is done (but before the process)
203 // has exited, we just acknowledge the request.
204 void OnStop(const StopArgs& a) override {
205 art::MutexLock lk(art_thread(), finish_mutex_);
206 if (is_finished_) {
207 return;
208 }
209 is_stopped_ = true;
210 async_stop_ = std::move(a.HandleStopAsynchronously());
211 }
Florian Mayer07710c52019-09-16 15:53:38 +0000212
213 static art::Thread* art_thread() {
214 // TODO(fmayer): Attach the Perfetto producer thread to ART and give it a name. This is
215 // not trivial, we cannot just attach the first time this method is called, because
216 // AttachCurrentThread deadlocks with the ConditionVariable::Wait in WaitForDataSource.
217 //
218 // We should attach the thread as soon as the Client API spawns it, but that needs more
219 // complicated plumbing.
220 return nullptr;
221 }
222
Florian Mayer4075edd2020-09-24 15:26:23 +0100223 std::vector<std::string> ignored_types() { return ignored_types_; }
224
Florian Mayercb4fab12020-09-25 13:57:37 +0100225 void Finish() {
226 art::MutexLock lk(art_thread(), finish_mutex_);
227 if (is_stopped_) {
228 async_stop_();
229 } else {
230 is_finished_ = true;
231 }
232 }
233
Florian Mayer07710c52019-09-16 15:53:38 +0000234 private:
Florian Mayer4bbc62b2019-09-25 12:13:35 +0100235 bool enabled_ = false;
Florian Mayer4b79ef42020-04-03 15:10:45 +0200236 bool dump_smaps_ = false;
Florian Mayer4075edd2020-09-24 15:26:23 +0100237 std::vector<std::string> ignored_types_;
Florian Mayer07710c52019-09-16 15:53:38 +0000238 static art::Thread* self_;
Florian Mayercb4fab12020-09-25 13:57:37 +0100239
240 art::Mutex finish_mutex_{"perfetto_hprof_ds_mutex", art::LockLevel::kGenericBottomLock};
241 bool is_finished_ = false;
242 bool is_stopped_ = false;
243 std::function<void()> async_stop_;
Florian Mayer07710c52019-09-16 15:53:38 +0000244};
245
246art::Thread* JavaHprofDataSource::self_ = nullptr;
247
248
249void WaitForDataSource(art::Thread* self) {
250 perfetto::TracingInitArgs args;
251 args.backends = perfetto::BackendType::kSystemBackend;
252 perfetto::Tracing::Initialize(args);
253
254 perfetto::DataSourceDescriptor dsd;
255 dsd.set_name("android.java_hprof");
Florian Mayercb4fab12020-09-25 13:57:37 +0100256 dsd.set_will_notify_on_stop(true);
Florian Mayer07710c52019-09-16 15:53:38 +0000257 JavaHprofDataSource::Register(dsd);
258
259 LOG(INFO) << "waiting for data source";
260
261 art::MutexLock lk(self, GetStateMutex());
262 while (g_state != State::kStart) {
263 GetStateCV().Wait(self);
264 }
265}
266
Daniele Di Proietto0b7fd872021-10-13 17:25:33 +0100267// Helper class to write Java heap dumps to `ctx`. The whole heap dump can be
268// split into more perfetto.protos.HeapGraph messages, to avoid making each
269// message too big.
Florian Mayer07710c52019-09-16 15:53:38 +0000270class Writer {
271 public:
Daniele Di Proietto0b7fd872021-10-13 17:25:33 +0100272 Writer(pid_t pid, JavaHprofDataSource::TraceContext* ctx, uint64_t timestamp)
273 : pid_(pid), ctx_(ctx), timestamp_(timestamp),
Florian Mayer2246a4e2020-02-24 16:16:41 +0000274 last_written_(ctx_->written()) {}
275
276 // Return whether the next call to GetHeapGraph will create a new TracePacket.
Daniele Di Proietto0b7fd872021-10-13 17:25:33 +0100277 bool will_create_new_packet() const {
Florian Mayer2246a4e2020-02-24 16:16:41 +0000278 return !heap_graph_ || ctx_->written() - last_written_ > kPacketSizeThreshold;
279 }
Florian Mayer07710c52019-09-16 15:53:38 +0000280
281 perfetto::protos::pbzero::HeapGraph* GetHeapGraph() {
Florian Mayer2246a4e2020-02-24 16:16:41 +0000282 if (will_create_new_packet()) {
283 CreateNewHeapGraph();
Florian Mayer07710c52019-09-16 15:53:38 +0000284 }
285 return heap_graph_;
286 }
287
288 void Finalize() {
289 if (trace_packet_) {
290 trace_packet_->Finalize();
291 }
292 heap_graph_ = nullptr;
293 }
294
295 ~Writer() { Finalize(); }
296
297 private:
Daniele Di Proietto0b7fd872021-10-13 17:25:33 +0100298 Writer(const Writer&) = delete;
299 Writer& operator=(const Writer&) = delete;
300 Writer(Writer&&) = delete;
301 Writer& operator=(Writer&&) = delete;
302
303 void CreateNewHeapGraph() {
304 if (heap_graph_) {
305 heap_graph_->set_continued(true);
306 }
307 Finalize();
308
309 uint64_t written = ctx_->written();
310
311 trace_packet_ = ctx_->NewTracePacket();
312 trace_packet_->set_timestamp(timestamp_);
313 heap_graph_ = trace_packet_->set_heap_graph();
314 heap_graph_->set_pid(pid_);
315 heap_graph_->set_index(index_++);
316
317 last_written_ = written;
318 }
319
320 const pid_t pid_;
Florian Mayer07710c52019-09-16 15:53:38 +0000321 JavaHprofDataSource::TraceContext* const ctx_;
Florian Mayerc99a2312019-12-17 11:07:34 +0000322 const uint64_t timestamp_;
Florian Mayer07710c52019-09-16 15:53:38 +0000323
Florian Mayer2246a4e2020-02-24 16:16:41 +0000324 uint64_t last_written_ = 0;
325
Florian Mayer07710c52019-09-16 15:53:38 +0000326 perfetto::DataSource<JavaHprofDataSource>::TraceContext::TracePacketHandle
327 trace_packet_;
328 perfetto::protos::pbzero::HeapGraph* heap_graph_ = nullptr;
329
330 uint64_t index_ = 0;
Florian Mayer07710c52019-09-16 15:53:38 +0000331};
332
333class ReferredObjectsFinder {
334 public:
335 explicit ReferredObjectsFinder(
Daniele Di Proietto0b7fd872021-10-13 17:25:33 +0100336 std::vector<std::pair<std::string, art::mirror::Object*>>* referred_objects)
337 : referred_objects_(referred_objects) {}
Florian Mayer07710c52019-09-16 15:53:38 +0000338
339 // For art::mirror::Object::VisitReferences.
340 void operator()(art::ObjPtr<art::mirror::Object> obj, art::MemberOffset offset,
341 bool is_static) const
342 REQUIRES_SHARED(art::Locks::mutator_lock_) {
Florian Mayer4a525a02020-08-17 17:34:39 +0100343 if (offset.Uint32Value() == art::mirror::Object::ClassOffset().Uint32Value()) {
344 // Skip shadow$klass pointer.
345 return;
346 }
Florian Mayer07710c52019-09-16 15:53:38 +0000347 art::mirror::Object* ref = obj->GetFieldObject<art::mirror::Object>(offset);
348 art::ArtField* field;
349 if (is_static) {
350 field = art::ArtField::FindStaticFieldWithOffset(obj->AsClass(), offset.Uint32Value());
351 } else {
352 field = art::ArtField::FindInstanceFieldWithOffset(obj->GetClass(), offset.Uint32Value());
353 }
354 std::string field_name = "";
355 if (field != nullptr) {
Florian Mayer22be0652020-02-06 17:51:46 +0000356 field_name = field->PrettyField(/*with_type=*/true);
Florian Mayer07710c52019-09-16 15:53:38 +0000357 }
358 referred_objects_->emplace_back(std::move(field_name), ref);
359 }
360
361 void VisitRootIfNonNull(art::mirror::CompressedReference<art::mirror::Object>* root
362 ATTRIBUTE_UNUSED) const {}
363 void VisitRoot(art::mirror::CompressedReference<art::mirror::Object>* root
364 ATTRIBUTE_UNUSED) const {}
365
366 private:
367 // We can use a raw Object* pointer here, because there are no concurrent GC threads after the
368 // fork.
369 std::vector<std::pair<std::string, art::mirror::Object*>>* referred_objects_;
370};
371
Florian Mayer46392352019-10-11 14:25:49 +0100372class RootFinder : public art::SingleRootVisitor {
373 public:
374 explicit RootFinder(
375 std::map<art::RootType, std::vector<art::mirror::Object*>>* root_objects)
376 : root_objects_(root_objects) {}
377
378 void VisitRoot(art::mirror::Object* root, const art::RootInfo& info) override {
379 (*root_objects_)[info.GetType()].emplace_back(root);
380 }
381
382 private:
383 // We can use a raw Object* pointer here, because there are no concurrent GC threads after the
384 // fork.
385 std::map<art::RootType, std::vector<art::mirror::Object*>>* root_objects_;
386};
387
388perfetto::protos::pbzero::HeapGraphRoot::Type ToProtoType(art::RootType art_type) {
Florian Mayer4a525a02020-08-17 17:34:39 +0100389 using perfetto::protos::pbzero::HeapGraphRoot;
Florian Mayer46392352019-10-11 14:25:49 +0100390 switch (art_type) {
391 case art::kRootUnknown:
Florian Mayer4a525a02020-08-17 17:34:39 +0100392 return HeapGraphRoot::ROOT_UNKNOWN;
Florian Mayer46392352019-10-11 14:25:49 +0100393 case art::kRootJNIGlobal:
Florian Mayer4a525a02020-08-17 17:34:39 +0100394 return HeapGraphRoot::ROOT_JNI_GLOBAL;
Florian Mayer46392352019-10-11 14:25:49 +0100395 case art::kRootJNILocal:
Florian Mayer4a525a02020-08-17 17:34:39 +0100396 return HeapGraphRoot::ROOT_JNI_LOCAL;
Florian Mayer46392352019-10-11 14:25:49 +0100397 case art::kRootJavaFrame:
Florian Mayer4a525a02020-08-17 17:34:39 +0100398 return HeapGraphRoot::ROOT_JAVA_FRAME;
Florian Mayer46392352019-10-11 14:25:49 +0100399 case art::kRootNativeStack:
Florian Mayer4a525a02020-08-17 17:34:39 +0100400 return HeapGraphRoot::ROOT_NATIVE_STACK;
Florian Mayer46392352019-10-11 14:25:49 +0100401 case art::kRootStickyClass:
Florian Mayer4a525a02020-08-17 17:34:39 +0100402 return HeapGraphRoot::ROOT_STICKY_CLASS;
Florian Mayer46392352019-10-11 14:25:49 +0100403 case art::kRootThreadBlock:
Florian Mayer4a525a02020-08-17 17:34:39 +0100404 return HeapGraphRoot::ROOT_THREAD_BLOCK;
Florian Mayer46392352019-10-11 14:25:49 +0100405 case art::kRootMonitorUsed:
Florian Mayer4a525a02020-08-17 17:34:39 +0100406 return HeapGraphRoot::ROOT_MONITOR_USED;
Florian Mayer46392352019-10-11 14:25:49 +0100407 case art::kRootThreadObject:
Florian Mayer4a525a02020-08-17 17:34:39 +0100408 return HeapGraphRoot::ROOT_THREAD_OBJECT;
Florian Mayer46392352019-10-11 14:25:49 +0100409 case art::kRootInternedString:
Florian Mayer4a525a02020-08-17 17:34:39 +0100410 return HeapGraphRoot::ROOT_INTERNED_STRING;
Florian Mayer46392352019-10-11 14:25:49 +0100411 case art::kRootFinalizing:
Florian Mayer4a525a02020-08-17 17:34:39 +0100412 return HeapGraphRoot::ROOT_FINALIZING;
Florian Mayer46392352019-10-11 14:25:49 +0100413 case art::kRootDebugger:
Florian Mayer4a525a02020-08-17 17:34:39 +0100414 return HeapGraphRoot::ROOT_DEBUGGER;
Florian Mayer46392352019-10-11 14:25:49 +0100415 case art::kRootReferenceCleanup:
Florian Mayer4a525a02020-08-17 17:34:39 +0100416 return HeapGraphRoot::ROOT_REFERENCE_CLEANUP;
Florian Mayer46392352019-10-11 14:25:49 +0100417 case art::kRootVMInternal:
Florian Mayer4a525a02020-08-17 17:34:39 +0100418 return HeapGraphRoot::ROOT_VM_INTERNAL;
Florian Mayer46392352019-10-11 14:25:49 +0100419 case art::kRootJNIMonitor:
Florian Mayer4a525a02020-08-17 17:34:39 +0100420 return HeapGraphRoot::ROOT_JNI_MONITOR;
421 }
422}
423
424perfetto::protos::pbzero::HeapGraphType::Kind ProtoClassKind(uint32_t class_flags) {
425 using perfetto::protos::pbzero::HeapGraphType;
426 switch (class_flags) {
427 case art::mirror::kClassFlagNormal:
428 return HeapGraphType::KIND_NORMAL;
429 case art::mirror::kClassFlagNoReferenceFields:
430 return HeapGraphType::KIND_NOREFERENCES;
431 case art::mirror::kClassFlagString | art::mirror::kClassFlagNoReferenceFields:
432 return HeapGraphType::KIND_STRING;
433 case art::mirror::kClassFlagObjectArray:
434 return HeapGraphType::KIND_ARRAY;
435 case art::mirror::kClassFlagClass:
436 return HeapGraphType::KIND_CLASS;
437 case art::mirror::kClassFlagClassLoader:
438 return HeapGraphType::KIND_CLASSLOADER;
439 case art::mirror::kClassFlagDexCache:
440 return HeapGraphType::KIND_DEXCACHE;
441 case art::mirror::kClassFlagSoftReference:
442 return HeapGraphType::KIND_SOFT_REFERENCE;
443 case art::mirror::kClassFlagWeakReference:
444 return HeapGraphType::KIND_WEAK_REFERENCE;
445 case art::mirror::kClassFlagFinalizerReference:
446 return HeapGraphType::KIND_FINALIZER_REFERENCE;
447 case art::mirror::kClassFlagPhantomReference:
448 return HeapGraphType::KIND_PHANTOM_REFERENCE;
449 default:
450 return HeapGraphType::KIND_UNKNOWN;
Florian Mayer46392352019-10-11 14:25:49 +0100451 }
452}
453
Florian Mayer29e62c32020-03-19 12:05:46 +0100454std::string PrettyType(art::mirror::Class* klass) NO_THREAD_SAFETY_ANALYSIS {
455 if (klass == nullptr) {
456 return "(raw)";
457 }
458 std::string temp;
459 std::string result(art::PrettyDescriptor(klass->GetDescriptor(&temp)));
460 return result;
461}
462
Florian Mayer4b79ef42020-04-03 15:10:45 +0200463void DumpSmaps(JavaHprofDataSource::TraceContext* ctx) {
464 FILE* smaps = fopen("/proc/self/smaps", "r");
465 if (smaps != nullptr) {
466 auto trace_packet = ctx->NewTracePacket();
467 auto* smaps_packet = trace_packet->set_smaps_packet();
468 smaps_packet->set_pid(getpid());
469 perfetto::profiling::ParseSmaps(smaps,
470 [&smaps_packet](const perfetto::profiling::SmapsEntry& e) {
471 if (ShouldSampleSmapsEntry(e)) {
472 auto* smaps_entry = smaps_packet->add_entries();
473 smaps_entry->set_path(e.pathname);
474 smaps_entry->set_size_kb(e.size_kb);
475 smaps_entry->set_private_dirty_kb(e.private_dirty_kb);
476 smaps_entry->set_swap_kb(e.swap_kb);
477 }
478 });
479 fclose(smaps);
480 } else {
481 PLOG(ERROR) << "failed to open smaps";
482 }
483}
484
Florian Mayerc560e1b2020-06-03 14:34:52 +0200485uint64_t GetObjectId(const art::mirror::Object* obj) {
486 return reinterpret_cast<uint64_t>(obj) / std::alignment_of<art::mirror::Object>::value;
487}
488
Florian Mayer4a525a02020-08-17 17:34:39 +0100489template <typename F>
490void ForInstanceReferenceField(art::mirror::Class* klass, F fn) NO_THREAD_SAFETY_ANALYSIS {
491 for (art::ArtField& af : klass->GetIFields()) {
492 if (af.IsPrimitiveType() ||
493 af.GetOffset().Uint32Value() == art::mirror::Object::ClassOffset().Uint32Value()) {
494 continue;
495 }
496 fn(af.GetOffset());
497 }
498}
499
Florian Mayerf86e1442020-09-30 22:10:24 +0100500size_t EncodedSize(uint64_t n) {
501 if (n == 0) return 1;
502 return 1 + static_cast<size_t>(art::MostSignificantBit(n)) / 7;
503}
504
Daniele Di Proietto0b7fd872021-10-13 17:25:33 +0100505// Returns all the references that `*obj` (an object of type `*klass`) is holding.
506std::vector<std::pair<std::string, art::mirror::Object*>> GetReferences(art::mirror::Object* obj,
507 art::mirror::Class* klass)
508 REQUIRES_SHARED(art::Locks::mutator_lock_) {
509 std::vector<std::pair<std::string, art::mirror::Object*>> referred_objects;
510 ReferredObjectsFinder objf(&referred_objects);
511
Daniele Di Proiettod3f87582021-10-21 19:05:40 +0100512 if (klass->GetClassFlags() != art::mirror::kClassFlagNormal &&
513 klass->GetClassFlags() != art::mirror::kClassFlagPhantomReference) {
Daniele Di Proietto0b7fd872021-10-13 17:25:33 +0100514 obj->VisitReferences(objf, art::VoidFunctor());
515 } else {
516 for (art::mirror::Class* cls = klass; cls != nullptr; cls = cls->GetSuperClass().Ptr()) {
517 ForInstanceReferenceField(cls,
518 [obj, objf](art::MemberOffset offset) NO_THREAD_SAFETY_ANALYSIS {
519 objf(art::ObjPtr<art::mirror::Object>(obj),
520 offset,
521 /*is_static=*/false);
522 });
523 }
524 }
525 return referred_objects;
526}
527
528// Returns the base for delta encoding all the `referred_objects`. If delta
529// encoding would waste space, returns 0.
530uint64_t EncodeBaseObjId(
531 const std::vector<std::pair<std::string, art::mirror::Object*>>& referred_objects,
532 const art::mirror::Object* min_nonnull_ptr) REQUIRES_SHARED(art::Locks::mutator_lock_) {
533 uint64_t base_obj_id = GetObjectId(min_nonnull_ptr);
534 if (base_obj_id <= 1) {
535 return 0;
536 }
537
538 // We need to decrement the base for object ids so that we can tell apart
539 // null references.
540 base_obj_id--;
541 uint64_t bytes_saved = 0;
542 for (const auto& p : referred_objects) {
543 art::mirror::Object* referred_obj = p.second;
544 if (!referred_obj) {
545 continue;
546 }
547 uint64_t referred_obj_id = GetObjectId(referred_obj);
548 bytes_saved += EncodedSize(referred_obj_id) - EncodedSize(referred_obj_id - base_obj_id);
549 }
550
551 // +1 for storing the field id.
552 if (bytes_saved <= EncodedSize(base_obj_id) + 1) {
553 // Subtracting the base ptr gains fewer bytes than it takes to store it.
554 return 0;
555 }
556 return base_obj_id;
557}
558
559// Helper to keep intermediate state while dumping objects and classes from ART into
560// perfetto.protos.HeapGraph.
561class HeapGraphDumper {
562 public:
563 // Instances of classes whose name is in `ignored_types` will be ignored.
564 explicit HeapGraphDumper(const std::vector<std::string>& ignored_types)
565 : ignored_types_(ignored_types),
566 reference_field_ids_(std::make_unique<protozero::PackedVarInt>()),
567 reference_object_ids_(std::make_unique<protozero::PackedVarInt>()) {}
568
569 // Dumps a heap graph from `*runtime` and writes it to `writer`.
570 void Dump(art::Runtime* runtime, Writer& writer) REQUIRES(art::Locks::mutator_lock_) {
571 DumpRootObjects(runtime, writer);
572
573 DumpObjects(runtime, writer);
574
575 WriteInternedData(writer);
576 }
577
578 private:
579 // Dumps the root objects from `*runtime` to `writer`.
580 void DumpRootObjects(art::Runtime* runtime, Writer& writer)
581 REQUIRES_SHARED(art::Locks::mutator_lock_) {
582 std::map<art::RootType, std::vector<art::mirror::Object*>> root_objects;
583 RootFinder rcf(&root_objects);
584 runtime->VisitRoots(&rcf);
585 std::unique_ptr<protozero::PackedVarInt> object_ids(new protozero::PackedVarInt);
586 for (const auto& p : root_objects) {
587 const art::RootType root_type = p.first;
588 const std::vector<art::mirror::Object*>& children = p.second;
589 perfetto::protos::pbzero::HeapGraphRoot* root_proto = writer.GetHeapGraph()->add_roots();
590 root_proto->set_root_type(ToProtoType(root_type));
591 for (art::mirror::Object* obj : children) {
592 if (writer.will_create_new_packet()) {
593 root_proto->set_object_ids(*object_ids);
594 object_ids->Reset();
595 root_proto = writer.GetHeapGraph()->add_roots();
596 root_proto->set_root_type(ToProtoType(root_type));
597 }
598 object_ids->Append(GetObjectId(obj));
599 }
600 root_proto->set_object_ids(*object_ids);
601 object_ids->Reset();
602 }
603 }
604
605 // Dumps all the objects from `*runtime` to `writer`.
606 void DumpObjects(art::Runtime* runtime, Writer& writer) REQUIRES(art::Locks::mutator_lock_) {
607 runtime->GetHeap()->VisitObjectsPaused(
608 [this, &writer](art::mirror::Object* obj)
609 REQUIRES_SHARED(art::Locks::mutator_lock_) { WriteOneObject(obj, writer); });
610 }
611
612 // Writes all the previously accumulated (while dumping objects and roots) interned data to
613 // `writer`.
614 void WriteInternedData(Writer& writer) {
615 for (const auto& p : interned_locations_) {
616 const std::string& str = p.first;
617 uint64_t id = p.second;
618
619 perfetto::protos::pbzero::InternedString* location_proto =
620 writer.GetHeapGraph()->add_location_names();
621 location_proto->set_iid(id);
622 location_proto->set_str(reinterpret_cast<const uint8_t*>(str.c_str()), str.size());
623 }
624 for (const auto& p : interned_fields_) {
625 const std::string& str = p.first;
626 uint64_t id = p.second;
627
628 perfetto::protos::pbzero::InternedString* field_proto =
629 writer.GetHeapGraph()->add_field_names();
630 field_proto->set_iid(id);
631 field_proto->set_str(reinterpret_cast<const uint8_t*>(str.c_str()), str.size());
632 }
633 }
634
635 // Writes `*obj` into `writer`.
636 void WriteOneObject(art::mirror::Object* obj, Writer& writer)
637 REQUIRES_SHARED(art::Locks::mutator_lock_) {
638 if (obj->IsClass()) {
639 WriteClass(obj->AsClass().Ptr(), writer);
640 }
641
642 art::mirror::Class* klass = obj->GetClass();
643 uintptr_t class_ptr = reinterpret_cast<uintptr_t>(klass);
644 // We need to synethesize a new type for Class<Foo>, which does not exist
645 // in the runtime. Otherwise, all the static members of all classes would be
646 // attributed to java.lang.Class.
647 if (klass->IsClassClass()) {
648 class_ptr = WriteSyntheticClassFromObj(obj, writer);
649 }
650
651 if (IsIgnored(obj)) {
652 return;
653 }
654
655 auto class_id = FindOrAppend(&interned_classes_, class_ptr);
656
657 uint64_t object_id = GetObjectId(obj);
658 perfetto::protos::pbzero::HeapGraphObject* object_proto = writer.GetHeapGraph()->add_objects();
659 if (prev_object_id_ && prev_object_id_ < object_id) {
660 object_proto->set_id_delta(object_id - prev_object_id_);
661 } else {
662 object_proto->set_id(object_id);
663 }
664 prev_object_id_ = object_id;
665 object_proto->set_type_id(class_id);
666
667 // Arrays / strings are magic and have an instance dependent size.
668 if (obj->SizeOf() != klass->GetObjectSize()) {
669 object_proto->set_self_size(obj->SizeOf());
670 }
671
672 FillReferences(obj, klass, object_proto);
Daniele Di Proiettodd576452021-10-15 17:51:03 +0100673
674 FillFieldValues(obj, klass, object_proto);
Daniele Di Proietto0b7fd872021-10-13 17:25:33 +0100675 }
676
677 // Writes `*klass` into `writer`.
678 void WriteClass(art::mirror::Class* klass, Writer& writer)
679 REQUIRES_SHARED(art::Locks::mutator_lock_) {
680 perfetto::protos::pbzero::HeapGraphType* type_proto = writer.GetHeapGraph()->add_types();
681 type_proto->set_id(FindOrAppend(&interned_classes_, reinterpret_cast<uintptr_t>(klass)));
682 type_proto->set_class_name(PrettyType(klass));
683 type_proto->set_location_id(FindOrAppend(&interned_locations_, klass->GetLocation()));
684 type_proto->set_object_size(klass->GetObjectSize());
685 type_proto->set_kind(ProtoClassKind(klass->GetClassFlags()));
686 type_proto->set_classloader_id(GetObjectId(klass->GetClassLoader().Ptr()));
687 if (klass->GetSuperClass().Ptr()) {
688 type_proto->set_superclass_id(FindOrAppend(
689 &interned_classes_, reinterpret_cast<uintptr_t>(klass->GetSuperClass().Ptr())));
690 }
691 ForInstanceReferenceField(
692 klass, [klass, this](art::MemberOffset offset) NO_THREAD_SAFETY_ANALYSIS {
693 auto art_field = art::ArtField::FindInstanceFieldWithOffset(klass, offset.Uint32Value());
694 reference_field_ids_->Append(
695 FindOrAppend(&interned_fields_, art_field->PrettyField(true)));
696 });
697 type_proto->set_reference_field_id(*reference_field_ids_);
698 reference_field_ids_->Reset();
699 }
700
701 // Creates a fake class that represents a type only used by `*obj` into `writer`.
702 uintptr_t WriteSyntheticClassFromObj(art::mirror::Object* obj, Writer& writer)
703 REQUIRES_SHARED(art::Locks::mutator_lock_) {
704 CHECK(obj->IsClass());
705 perfetto::protos::pbzero::HeapGraphType* type_proto = writer.GetHeapGraph()->add_types();
706 // All pointers are at least multiples of two, so this way we can make sure
707 // we are not colliding with a real class.
708 uintptr_t class_ptr = reinterpret_cast<uintptr_t>(obj) | 1;
709 auto class_id = FindOrAppend(&interned_classes_, class_ptr);
710 type_proto->set_id(class_id);
711 type_proto->set_class_name(obj->PrettyTypeOf());
712 type_proto->set_location_id(FindOrAppend(&interned_locations_, obj->AsClass()->GetLocation()));
713 return class_ptr;
714 }
715
716 // Fills `*object_proto` with all the references held by `*obj` (an object of type `*klass`).
717 void FillReferences(art::mirror::Object* obj,
718 art::mirror::Class* klass,
719 perfetto::protos::pbzero::HeapGraphObject* object_proto)
720 REQUIRES_SHARED(art::Locks::mutator_lock_) {
721 std::vector<std::pair<std::string, art::mirror::Object*>> referred_objects =
722 GetReferences(obj, klass);
723
724 art::mirror::Object* min_nonnull_ptr = FilterIgnoredReferencesAndFindMin(referred_objects);
725
726 uint64_t base_obj_id = EncodeBaseObjId(referred_objects, min_nonnull_ptr);
727
728 const bool emit_field_ids = klass->GetClassFlags() != art::mirror::kClassFlagObjectArray &&
Daniele Di Proiettod3f87582021-10-21 19:05:40 +0100729 klass->GetClassFlags() != art::mirror::kClassFlagNormal &&
730 klass->GetClassFlags() != art::mirror::kClassFlagPhantomReference;
Daniele Di Proietto0b7fd872021-10-13 17:25:33 +0100731
732 for (const auto& p : referred_objects) {
733 const std::string& field_name = p.first;
734 art::mirror::Object* referred_obj = p.second;
735 if (emit_field_ids) {
736 reference_field_ids_->Append(FindOrAppend(&interned_fields_, field_name));
737 }
738 uint64_t referred_obj_id = GetObjectId(referred_obj);
739 if (referred_obj_id) {
740 referred_obj_id -= base_obj_id;
741 }
742 reference_object_ids_->Append(referred_obj_id);
743 }
744 if (emit_field_ids) {
745 object_proto->set_reference_field_id(*reference_field_ids_);
746 reference_field_ids_->Reset();
747 }
748 if (base_obj_id) {
749 // The field is called `reference_field_id_base`, but it has always been used as a base for
750 // `reference_object_id`. It should be called `reference_object_id_base`.
751 object_proto->set_reference_field_id_base(base_obj_id);
752 }
753 object_proto->set_reference_object_id(*reference_object_ids_);
754 reference_object_ids_->Reset();
755 }
756
757 // Iterates all the `referred_objects` and sets all the objects that are supposed to be ignored
758 // to nullptr. Returns the object with the smallest address (ignoring nullptr).
759 art::mirror::Object* FilterIgnoredReferencesAndFindMin(
760 std::vector<std::pair<std::string, art::mirror::Object*>>& referred_objects) const
761 REQUIRES_SHARED(art::Locks::mutator_lock_) {
762 art::mirror::Object* min_nonnull_ptr = nullptr;
763 for (auto& p : referred_objects) {
764 art::mirror::Object*& referred_obj = p.second;
765 if (referred_obj == nullptr)
766 continue;
767 if (IsIgnored(referred_obj)) {
768 referred_obj = nullptr;
769 continue;
770 }
771 if (min_nonnull_ptr == nullptr || min_nonnull_ptr > referred_obj) {
772 min_nonnull_ptr = referred_obj;
773 }
774 }
775 return min_nonnull_ptr;
776 }
777
Daniele Di Proiettodd576452021-10-15 17:51:03 +0100778 // Fills `*object_proto` with the value of a subset of potentially interesting fields of `*obj`
779 // (an object of type `*klass`).
780 void FillFieldValues(art::mirror::Object* obj,
781 art::mirror::Class* klass,
782 perfetto::protos::pbzero::HeapGraphObject* object_proto) const
783 REQUIRES_SHARED(art::Locks::mutator_lock_) {
784 if (obj->IsClass() || klass->IsClassClass()) {
785 return;
786 }
787
788 for (art::mirror::Class* cls = klass; cls != nullptr; cls = cls->GetSuperClass().Ptr()) {
789 if (cls->IsArrayClass()) {
790 continue;
791 }
792
793 if (cls->DescriptorEquals("Llibcore/util/NativeAllocationRegistry;")) {
794 art::ArtField* af = cls->FindDeclaredInstanceField(
795 "size", art::Primitive::Descriptor(art::Primitive::kPrimLong));
796 if (af) {
797 object_proto->set_native_allocation_registry_size_field(af->GetLong(obj));
798 }
799 }
800 }
801 }
802
Daniele Di Proietto0b7fd872021-10-13 17:25:33 +0100803 // Returns true if `*obj` has a type that's supposed to be ignored.
804 bool IsIgnored(art::mirror::Object* obj) const REQUIRES_SHARED(art::Locks::mutator_lock_) {
805 if (obj->IsClass()) {
806 return false;
807 }
808 art::mirror::Class* klass = obj->GetClass();
809 return std::find(ignored_types_.begin(), ignored_types_.end(), PrettyType(klass)) !=
810 ignored_types_.end();
811 }
812
813 // Name of classes whose instances should be ignored.
814 const std::vector<std::string> ignored_types_;
815
816 // Make sure that intern ID 0 (default proto value for a uint64_t) always maps to ""
817 // (default proto value for a string) or to 0 (default proto value for a uint64).
818
819 // Map from string (the field name) to its index in perfetto.protos.HeapGraph.field_names
820 std::map<std::string, uint64_t> interned_fields_{{"", 0}};
821 // Map from string (the location name) to its index in perfetto.protos.HeapGraph.location_names
822 std::map<std::string, uint64_t> interned_locations_{{"", 0}};
823 // Map from addr (the class pointer) to its id in perfetto.protos.HeapGraph.types
824 std::map<uintptr_t, uint64_t> interned_classes_{{0, 0}};
825
826 // Temporary buffers: used locally in some methods and then cleared.
827 std::unique_ptr<protozero::PackedVarInt> reference_field_ids_;
828 std::unique_ptr<protozero::PackedVarInt> reference_object_ids_;
829
830 // Id of the previous object that was dumped. Used for delta encoding.
831 uint64_t prev_object_id_ = 0;
832};
833
Florian Mayer07710c52019-09-16 15:53:38 +0000834void DumpPerfetto(art::Thread* self) {
835 pid_t parent_pid = getpid();
836 LOG(INFO) << "preparing to dump heap for " << parent_pid;
837
838 // Need to take a heap dump while GC isn't running. See the comment in
839 // Heap::VisitObjects(). Also we need the critical section to avoid visiting
840 // the same object twice. See b/34967844.
841 //
842 // We need to do this before the fork, because otherwise it can deadlock
843 // waiting for the GC, as all other threads get terminated by the clone, but
844 // their locks are not released.
Florian Mayerf3d10342021-03-19 16:26:58 +0000845 // This does not perfectly solve all fork-related issues, as there could still be threads that
846 // are unaffected by ScopedSuspendAll and in a non-fork-friendly situation
847 // (e.g. inside a malloc holding a lock). This situation is quite rare, and in that case we will
848 // hit the watchdog in the grand-child process if it gets stuck.
Florian Mayerd1c8aae2021-03-16 17:22:14 +0000849 std::optional<art::gc::ScopedGCCriticalSection> gcs(std::in_place, self, art::gc::kGcCauseHprof,
850 art::gc::kCollectorTypeHprof);
Florian Mayer07710c52019-09-16 15:53:38 +0000851
Florian Mayerd1c8aae2021-03-16 17:22:14 +0000852 std::optional<art::ScopedSuspendAll> ssa(std::in_place, __FUNCTION__, /* long_suspend=*/ true);
Florian Mayer07710c52019-09-16 15:53:38 +0000853
854 pid_t pid = fork();
Florian Mayer6d41e572020-01-24 15:13:59 +0000855 if (pid == -1) {
856 // Fork error.
857 PLOG(ERROR) << "fork";
Florian Mayer07710c52019-09-16 15:53:38 +0000858 return;
859 }
Florian Mayer6d41e572020-01-24 15:13:59 +0000860 if (pid != 0) {
861 // Parent
Florian Mayerd1c8aae2021-03-16 17:22:14 +0000862 // Stop the thread suspension as soon as possible to allow the rest of the application to
863 // continue while we waitpid here.
864 ssa.reset();
865 gcs.reset();
Florian Mayerf3d10342021-03-19 16:26:58 +0000866 for (size_t i = 0;; ++i) {
867 if (i == 1000) {
868 // The child hasn't exited for 1 second (and all it was supposed to do was fork itself).
869 // Give up and SIGKILL it. The next waitpid should succeed.
870 LOG(ERROR) << "perfetto_hprof child timed out. Sending SIGKILL.";
871 kill(pid, SIGKILL);
872 }
873 // Busy waiting here will introduce some extra latency, but that is okay because we have
874 // already unsuspended all other threads. This runs on the perfetto_hprof_listener, which
875 // is not needed for progress of the app itself.
876 int stat_loc;
877 pid_t wait_result = waitpid(pid, &stat_loc, WNOHANG);
878 if (wait_result == -1 && errno != EINTR) {
879 if (errno != ECHILD) {
880 // This hopefully never happens (should only be EINVAL).
881 PLOG(FATAL_WITHOUT_ABORT) << "waitpid";
882 }
883 // If we get ECHILD, the parent process was handling SIGCHLD, or did a wildcard wait.
884 // The child is no longer here either way, so that's good enough for us.
Florian Mayer6d41e572020-01-24 15:13:59 +0000885 break;
Florian Mayerf3d10342021-03-19 16:26:58 +0000886 } else if (wait_result > 0) {
887 break;
888 } else { // wait_result == 0 || errno == EINTR.
889 usleep(1000);
Florian Mayer6d41e572020-01-24 15:13:59 +0000890 }
891 }
892 return;
893 }
894
895 // The following code is only executed by the child of the original process.
Florian Mayer31d21c72020-11-27 13:35:34 +0000896
897 // Uninstall signal handler, so we don't trigger a profile on it.
898 if (sigaction(kJavaHeapprofdSignal, &g_orig_act, nullptr) != 0) {
899 close(g_signal_pipe_fds[0]);
900 close(g_signal_pipe_fds[1]);
901 PLOG(FATAL) << "Failed to sigaction";
902 return;
903 }
904
Florian Mayer6d41e572020-01-24 15:13:59 +0000905 // Daemon creates a new process that is the grand-child of the original process, and exits.
906 if (daemon(0, 0) == -1) {
907 PLOG(FATAL) << "daemon";
908 }
909
910 // The following code is only executed by the grand-child of the original process.
Florian Mayer07710c52019-09-16 15:53:38 +0000911
912 // Make sure that this is the first thing we do after forking, so if anything
913 // below hangs, the fork will go away from the watchdog.
914 ArmWatchdogOrDie();
915
Florian Mayerc99a2312019-12-17 11:07:34 +0000916 struct timespec ts = {};
917 if (clock_gettime(CLOCK_BOOTTIME, &ts) != 0) {
918 LOG(FATAL) << "Failed to get boottime.";
919 }
920 uint64_t timestamp = ts.tv_sec * 1000000000LL + ts.tv_nsec;
921
Florian Mayer07710c52019-09-16 15:53:38 +0000922 WaitForDataSource(self);
923
924 JavaHprofDataSource::Trace(
Florian Mayerc99a2312019-12-17 11:07:34 +0000925 [parent_pid, timestamp](JavaHprofDataSource::TraceContext ctx)
Florian Mayer07710c52019-09-16 15:53:38 +0000926 NO_THREAD_SAFETY_ANALYSIS {
Florian Mayer4b79ef42020-04-03 15:10:45 +0200927 bool dump_smaps;
Florian Mayer4075edd2020-09-24 15:26:23 +0100928 std::vector<std::string> ignored_types;
Florian Mayer4bbc62b2019-09-25 12:13:35 +0100929 {
930 auto ds = ctx.GetDataSourceLocked();
931 if (!ds || !ds->enabled()) {
Florian Mayercb4fab12020-09-25 13:57:37 +0100932 if (ds) ds->Finish();
Florian Mayer4bbc62b2019-09-25 12:13:35 +0100933 LOG(INFO) << "skipping irrelevant data source.";
934 return;
935 }
Florian Mayer4b79ef42020-04-03 15:10:45 +0200936 dump_smaps = ds->dump_smaps();
Florian Mayer4075edd2020-09-24 15:26:23 +0100937 ignored_types = ds->ignored_types();
Florian Mayer4bbc62b2019-09-25 12:13:35 +0100938 }
Florian Mayer07710c52019-09-16 15:53:38 +0000939 LOG(INFO) << "dumping heap for " << parent_pid;
Florian Mayer4b79ef42020-04-03 15:10:45 +0200940 if (dump_smaps) {
941 DumpSmaps(&ctx);
942 }
Florian Mayerc99a2312019-12-17 11:07:34 +0000943 Writer writer(parent_pid, &ctx, timestamp);
Daniele Di Proietto0b7fd872021-10-13 17:25:33 +0100944 HeapGraphDumper dumper(ignored_types);
Florian Mayer07710c52019-09-16 15:53:38 +0000945
Daniele Di Proietto0b7fd872021-10-13 17:25:33 +0100946 dumper.Dump(art::Runtime::Current(), writer);
Florian Mayer07710c52019-09-16 15:53:38 +0000947
948 writer.Finalize();
Florian Mayer07710c52019-09-16 15:53:38 +0000949 ctx.Flush([] {
Daniele Di Proietto0b7fd872021-10-13 17:25:33 +0100950 art::MutexLock lk(JavaHprofDataSource::art_thread(), GetStateMutex());
951 g_state = State::kEnd;
952 GetStateCV().Broadcast(JavaHprofDataSource::art_thread());
Florian Mayer07710c52019-09-16 15:53:38 +0000953 });
Florian Mayercb4fab12020-09-25 13:57:37 +0100954 // Wait for the Flush that will happen on the Perfetto thread.
955 {
956 art::MutexLock lk(JavaHprofDataSource::art_thread(), GetStateMutex());
957 while (g_state != State::kEnd) {
958 GetStateCV().Wait(JavaHprofDataSource::art_thread());
959 }
960 }
961 {
962 auto ds = ctx.GetDataSourceLocked();
963 if (ds) {
964 ds->Finish();
965 } else {
966 LOG(ERROR) << "datasource timed out (duration_ms + datasource_stop_timeout_ms) "
967 "before dump finished";
968 }
969 }
Florian Mayer07710c52019-09-16 15:53:38 +0000970 });
971
Florian Mayer07710c52019-09-16 15:53:38 +0000972 LOG(INFO) << "finished dumping heap for " << parent_pid;
Roland Levillainbdf17372021-05-06 00:19:19 +0100973 // Prevent the `atexit` handlers from running. We do not want to call cleanup
974 // functions the parent process has registered.
975 art::FastExit(0);
Florian Mayer07710c52019-09-16 15:53:38 +0000976}
977
978// The plugin initialization function.
979extern "C" bool ArtPlugin_Initialize() {
980 if (art::Runtime::Current() == nullptr) {
981 return false;
982 }
983 art::Thread* self = art::Thread::Current();
984 {
985 art::MutexLock lk(self, GetStateMutex());
986 if (g_state != State::kUninitialized) {
987 LOG(ERROR) << "perfetto_hprof already initialized. state: " << g_state;
988 return false;
989 }
990 g_state = State::kWaitForListener;
991 }
992
Nick Kralevich20d57d12020-01-31 12:54:35 -0800993 if (pipe2(g_signal_pipe_fds, O_CLOEXEC) == -1) {
Florian Mayer07710c52019-09-16 15:53:38 +0000994 PLOG(ERROR) << "Failed to pipe";
995 return false;
996 }
997
998 struct sigaction act = {};
Florian Mayer516745b2020-01-27 14:29:57 +0000999 act.sa_flags = SA_SIGINFO | SA_RESTART;
Florian Mayer74f1b072020-11-30 18:08:30 +00001000 act.sa_sigaction = [](int, siginfo_t* si, void*) {
1001 requested_tracing_session_id = si->si_value.sival_int;
Florian Mayer07710c52019-09-16 15:53:38 +00001002 if (write(g_signal_pipe_fds[1], kByte, sizeof(kByte)) == -1) {
1003 PLOG(ERROR) << "Failed to trigger heap dump";
1004 }
1005 };
1006
1007 // TODO(fmayer): We can probably use the SignalCatcher thread here to not
1008 // have an idle thread.
1009 if (sigaction(kJavaHeapprofdSignal, &act, &g_orig_act) != 0) {
1010 close(g_signal_pipe_fds[0]);
1011 close(g_signal_pipe_fds[1]);
1012 PLOG(ERROR) << "Failed to sigaction";
1013 return false;
1014 }
1015
1016 std::thread th([] {
1017 art::Runtime* runtime = art::Runtime::Current();
1018 if (!runtime) {
Florian Mayer516745b2020-01-27 14:29:57 +00001019 LOG(FATAL_WITHOUT_ABORT) << "no runtime in perfetto_hprof_listener";
Florian Mayer07710c52019-09-16 15:53:38 +00001020 return;
1021 }
Florian Mayer516745b2020-01-27 14:29:57 +00001022 if (!runtime->AttachCurrentThread("perfetto_hprof_listener", /*as_daemon=*/ true,
Florian Mayer07710c52019-09-16 15:53:38 +00001023 runtime->GetSystemThreadGroup(), /*create_peer=*/ false)) {
1024 LOG(ERROR) << "failed to attach thread.";
Florian Mayerfa082fb2020-05-15 14:07:53 +02001025 {
1026 art::MutexLock lk(nullptr, GetStateMutex());
1027 g_state = State::kUninitialized;
1028 GetStateCV().Broadcast(nullptr);
1029 }
1030
Florian Mayer07710c52019-09-16 15:53:38 +00001031 return;
1032 }
1033 art::Thread* self = art::Thread::Current();
1034 if (!self) {
Florian Mayer516745b2020-01-27 14:29:57 +00001035 LOG(FATAL_WITHOUT_ABORT) << "no thread in perfetto_hprof_listener";
Florian Mayer07710c52019-09-16 15:53:38 +00001036 return;
1037 }
1038 {
1039 art::MutexLock lk(self, GetStateMutex());
1040 if (g_state == State::kWaitForListener) {
1041 g_state = State::kWaitForStart;
1042 GetStateCV().Broadcast(self);
1043 }
1044 }
1045 char buf[1];
1046 for (;;) {
1047 int res;
1048 do {
1049 res = read(g_signal_pipe_fds[0], buf, sizeof(buf));
1050 } while (res == -1 && errno == EINTR);
1051
1052 if (res <= 0) {
1053 if (res == -1) {
1054 PLOG(ERROR) << "failed to read";
1055 }
1056 close(g_signal_pipe_fds[0]);
1057 return;
1058 }
1059
1060 perfetto_hprof::DumpPerfetto(self);
1061 }
1062 });
1063 th.detach();
1064
Florian Mayer07710c52019-09-16 15:53:38 +00001065 return true;
1066}
1067
1068extern "C" bool ArtPlugin_Deinitialize() {
1069 if (sigaction(kJavaHeapprofdSignal, &g_orig_act, nullptr) != 0) {
1070 PLOG(ERROR) << "failed to reset signal handler";
1071 // We cannot close the pipe if the signal handler wasn't unregistered,
1072 // to avoid receiving SIGPIPE.
1073 return false;
1074 }
1075 close(g_signal_pipe_fds[1]);
1076
1077 art::Thread* self = art::Thread::Current();
1078 art::MutexLock lk(self, GetStateMutex());
Florian Mayerfa082fb2020-05-15 14:07:53 +02001079 // Wait until after the thread was registered to the runtime. This is so
1080 // we do not attempt to register it with the runtime after it had been torn
1081 // down (ArtPlugin_Deinitialize gets called in the Runtime dtor).
1082 while (g_state == State::kWaitForListener) {
1083 GetStateCV().Wait(art::Thread::Current());
Florian Mayer07710c52019-09-16 15:53:38 +00001084 }
Florian Mayerfa082fb2020-05-15 14:07:53 +02001085 g_state = State::kUninitialized;
1086 GetStateCV().Broadcast(self);
Florian Mayer07710c52019-09-16 15:53:38 +00001087 return true;
1088}
1089
1090} // namespace perfetto_hprof
1091
1092namespace perfetto {
1093
1094PERFETTO_DEFINE_DATA_SOURCE_STATIC_MEMBERS(perfetto_hprof::JavaHprofDataSource);
1095
1096}