blob: efd105b0a67c0f421117a899e595b2eb265bf327 [file] [log] [blame]
Florian Mayer07710c52019-09-16 15:53:38 +00001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "perfetto_hprof"
18
19#include "perfetto_hprof.h"
20
21#include <android-base/logging.h>
22#include <fcntl.h>
23#include <inttypes.h>
24#include <sched.h>
25#include <signal.h>
26#include <sys/stat.h>
27#include <sys/types.h>
Florian Mayer6d41e572020-01-24 15:13:59 +000028#include <sys/wait.h>
Florian Mayer07710c52019-09-16 15:53:38 +000029#include <thread>
Florian Mayerc99a2312019-12-17 11:07:34 +000030#include <time.h>
Florian Mayer07710c52019-09-16 15:53:38 +000031
32#include "gc/heap-visit-objects-inl.h"
33#include "gc/heap.h"
34#include "gc/scoped_gc_critical_section.h"
35#include "mirror/object-refvisitor-inl.h"
36#include "nativehelper/scoped_local_ref.h"
Florian Mayer4bbc62b2019-09-25 12:13:35 +010037#include "perfetto/profiling/normalize.h"
Florian Mayer07710c52019-09-16 15:53:38 +000038#include "perfetto/trace/interned_data/interned_data.pbzero.h"
39#include "perfetto/trace/profiling/heap_graph.pbzero.h"
40#include "perfetto/trace/profiling/profile_common.pbzero.h"
Florian Mayer4bbc62b2019-09-25 12:13:35 +010041#include "perfetto/config/profiling/java_hprof_config.pbzero.h"
Florian Mayer2c5dfe12019-11-14 11:22:25 +000042#include "perfetto/protozero/packed_repeated_fields.h"
Florian Mayer07710c52019-09-16 15:53:38 +000043#include "perfetto/tracing.h"
44#include "runtime-inl.h"
45#include "runtime_callbacks.h"
46#include "scoped_thread_state_change-inl.h"
47#include "thread_list.h"
48#include "well_known_classes.h"
49
50// There are three threads involved in this:
51// * listener thread: this is idle in the background when this plugin gets loaded, and waits
52// for data on on g_signal_pipe_fds.
53// * signal thread: an arbitrary thread that handles the signal and writes data to
54// g_signal_pipe_fds.
55// * perfetto producer thread: once the signal is received, the app forks. In the newly forked
56// child, the Perfetto Client API spawns a thread to communicate with traced.
57
58namespace perfetto_hprof {
59
60constexpr int kJavaHeapprofdSignal = __SIGRTMIN + 6;
61constexpr time_t kWatchdogTimeoutSec = 120;
Florian Mayer2246a4e2020-02-24 16:16:41 +000062// This needs to be lower than the maximum acceptable chunk size, because this
63// is checked *before* writing another submessage. We conservatively assume
64// submessages can be up to 100k here for a 500k chunk size.
65// DropBox has a 500k chunk limit, and each chunk needs to parse as a proto.
66constexpr uint32_t kPacketSizeThreshold = 400000;
Florian Mayer07710c52019-09-16 15:53:38 +000067constexpr char kByte[1] = {'x'};
68static art::Mutex& GetStateMutex() {
69 static art::Mutex state_mutex("perfetto_hprof_state_mutex", art::LockLevel::kGenericBottomLock);
70 return state_mutex;
71}
72
73static art::ConditionVariable& GetStateCV() {
74 static art::ConditionVariable state_cv("perfetto_hprof_state_cv", GetStateMutex());
75 return state_cv;
76}
77
78static State g_state = State::kUninitialized;
79
80// Pipe to signal from the signal handler into a worker thread that handles the
81// dump requests.
82int g_signal_pipe_fds[2];
83static struct sigaction g_orig_act = {};
84
85uint64_t FindOrAppend(std::map<std::string, uint64_t>* m,
86 const std::string& s) {
87 auto it = m->find(s);
88 if (it == m->end()) {
89 std::tie(it, std::ignore) = m->emplace(s, m->size());
90 }
91 return it->second;
92}
93
94void ArmWatchdogOrDie() {
95 timer_t timerid{};
96 struct sigevent sev {};
97 sev.sigev_notify = SIGEV_SIGNAL;
98 sev.sigev_signo = SIGKILL;
99
100 if (timer_create(CLOCK_MONOTONIC, &sev, &timerid) == -1) {
101 // This only gets called in the child, so we can fatal without impacting
102 // the app.
103 PLOG(FATAL) << "failed to create watchdog timer";
104 }
105
106 struct itimerspec its {};
107 its.it_value.tv_sec = kWatchdogTimeoutSec;
108
109 if (timer_settime(timerid, 0, &its, nullptr) == -1) {
110 // This only gets called in the child, so we can fatal without impacting
111 // the app.
112 PLOG(FATAL) << "failed to arm watchdog timer";
113 }
114}
115
Florian Mayer4bbc62b2019-09-25 12:13:35 +0100116constexpr size_t kMaxCmdlineSize = 512;
117
Florian Mayer07710c52019-09-16 15:53:38 +0000118class JavaHprofDataSource : public perfetto::DataSource<JavaHprofDataSource> {
119 public:
Florian Mayer3b1d8e32019-10-01 14:46:58 +0100120 constexpr static perfetto::BufferExhaustedPolicy kBufferExhaustedPolicy =
121 perfetto::BufferExhaustedPolicy::kStall;
Florian Mayer4bbc62b2019-09-25 12:13:35 +0100122 void OnSetup(const SetupArgs& args) override {
123 // This is on the heap as it triggers -Wframe-larger-than.
124 std::unique_ptr<perfetto::protos::pbzero::JavaHprofConfig::Decoder> cfg(
125 new perfetto::protos::pbzero::JavaHprofConfig::Decoder(
126 args.config->java_hprof_config_raw()));
127
128 uint64_t self_pid = static_cast<uint64_t>(getpid());
Primiano Tucci4d319c72019-10-17 15:18:45 +0100129 for (auto pid_it = cfg->pid(); pid_it; ++pid_it) {
130 if (*pid_it == self_pid) {
Florian Mayer4bbc62b2019-09-25 12:13:35 +0100131 enabled_ = true;
132 return;
133 }
134 }
135
136 if (cfg->has_process_cmdline()) {
137 int fd = open("/proc/self/cmdline", O_RDONLY | O_CLOEXEC);
138 if (fd == -1) {
139 PLOG(ERROR) << "failed to open /proc/self/cmdline";
140 return;
141 }
142 char cmdline[kMaxCmdlineSize];
143 ssize_t rd = read(fd, cmdline, sizeof(cmdline) - 1);
144 if (rd == -1) {
145 PLOG(ERROR) << "failed to read /proc/self/cmdline";
146 }
147 close(fd);
148 if (rd == -1) {
149 return;
150 }
151 cmdline[rd] = '\0';
152 char* cmdline_ptr = cmdline;
153 ssize_t sz = perfetto::profiling::NormalizeCmdLine(&cmdline_ptr, static_cast<size_t>(rd + 1));
154 if (sz == -1) {
155 PLOG(ERROR) << "failed to normalize cmdline";
156 }
157 for (auto it = cfg->process_cmdline(); it; ++it) {
Primiano Tucci4d319c72019-10-17 15:18:45 +0100158 std::string other = (*it).ToStdString();
Florian Mayer4bbc62b2019-09-25 12:13:35 +0100159 // Append \0 to make this a C string.
160 other.resize(other.size() + 1);
161 char* other_ptr = &(other[0]);
162 ssize_t other_sz = perfetto::profiling::NormalizeCmdLine(&other_ptr, other.size());
163 if (other_sz == -1) {
164 PLOG(ERROR) << "failed to normalize other cmdline";
165 continue;
166 }
167 if (sz == other_sz && strncmp(cmdline_ptr, other_ptr, static_cast<size_t>(sz)) == 0) {
168 enabled_ = true;
169 return;
170 }
171 }
172 }
173 }
174
175 bool enabled() { return enabled_; }
Florian Mayer07710c52019-09-16 15:53:38 +0000176
177 void OnStart(const StartArgs&) override {
Florian Mayer4bbc62b2019-09-25 12:13:35 +0100178 if (!enabled()) {
179 return;
180 }
Florian Mayer07710c52019-09-16 15:53:38 +0000181 art::MutexLock lk(art_thread(), GetStateMutex());
182 if (g_state == State::kWaitForStart) {
183 g_state = State::kStart;
184 GetStateCV().Broadcast(art_thread());
185 }
186 }
187
188 void OnStop(const StopArgs&) override {}
189
190 static art::Thread* art_thread() {
191 // TODO(fmayer): Attach the Perfetto producer thread to ART and give it a name. This is
192 // not trivial, we cannot just attach the first time this method is called, because
193 // AttachCurrentThread deadlocks with the ConditionVariable::Wait in WaitForDataSource.
194 //
195 // We should attach the thread as soon as the Client API spawns it, but that needs more
196 // complicated plumbing.
197 return nullptr;
198 }
199
200 private:
Florian Mayer4bbc62b2019-09-25 12:13:35 +0100201 bool enabled_ = false;
Florian Mayer07710c52019-09-16 15:53:38 +0000202 static art::Thread* self_;
203};
204
205art::Thread* JavaHprofDataSource::self_ = nullptr;
206
207
208void WaitForDataSource(art::Thread* self) {
209 perfetto::TracingInitArgs args;
210 args.backends = perfetto::BackendType::kSystemBackend;
211 perfetto::Tracing::Initialize(args);
212
213 perfetto::DataSourceDescriptor dsd;
214 dsd.set_name("android.java_hprof");
215 JavaHprofDataSource::Register(dsd);
216
217 LOG(INFO) << "waiting for data source";
218
219 art::MutexLock lk(self, GetStateMutex());
220 while (g_state != State::kStart) {
221 GetStateCV().Wait(self);
222 }
223}
224
225class Writer {
226 public:
Florian Mayerc99a2312019-12-17 11:07:34 +0000227 Writer(pid_t parent_pid, JavaHprofDataSource::TraceContext* ctx, uint64_t timestamp)
Florian Mayer2246a4e2020-02-24 16:16:41 +0000228 : parent_pid_(parent_pid), ctx_(ctx), timestamp_(timestamp),
229 last_written_(ctx_->written()) {}
230
231 // Return whether the next call to GetHeapGraph will create a new TracePacket.
232 bool will_create_new_packet() {
233 return !heap_graph_ || ctx_->written() - last_written_ > kPacketSizeThreshold;
234 }
Florian Mayer07710c52019-09-16 15:53:38 +0000235
236 perfetto::protos::pbzero::HeapGraph* GetHeapGraph() {
Florian Mayer2246a4e2020-02-24 16:16:41 +0000237 if (will_create_new_packet()) {
238 CreateNewHeapGraph();
Florian Mayer07710c52019-09-16 15:53:38 +0000239 }
240 return heap_graph_;
241 }
242
Florian Mayer2246a4e2020-02-24 16:16:41 +0000243 void CreateNewHeapGraph() {
244 if (heap_graph_) {
245 heap_graph_->set_continued(true);
246 }
247 Finalize();
248
249 uint64_t written = ctx_->written();
250
251 trace_packet_ = ctx_->NewTracePacket();
252 trace_packet_->set_timestamp(timestamp_);
253 heap_graph_ = trace_packet_->set_heap_graph();
254 heap_graph_->set_pid(parent_pid_);
255 heap_graph_->set_index(index_++);
256
257 last_written_ = written;
258 }
259
Florian Mayer07710c52019-09-16 15:53:38 +0000260 void Finalize() {
261 if (trace_packet_) {
262 trace_packet_->Finalize();
263 }
264 heap_graph_ = nullptr;
265 }
266
267 ~Writer() { Finalize(); }
268
269 private:
270 const pid_t parent_pid_;
271 JavaHprofDataSource::TraceContext* const ctx_;
Florian Mayerc99a2312019-12-17 11:07:34 +0000272 const uint64_t timestamp_;
Florian Mayer07710c52019-09-16 15:53:38 +0000273
Florian Mayer2246a4e2020-02-24 16:16:41 +0000274 uint64_t last_written_ = 0;
275
Florian Mayer07710c52019-09-16 15:53:38 +0000276 perfetto::DataSource<JavaHprofDataSource>::TraceContext::TracePacketHandle
277 trace_packet_;
278 perfetto::protos::pbzero::HeapGraph* heap_graph_ = nullptr;
279
280 uint64_t index_ = 0;
Florian Mayer07710c52019-09-16 15:53:38 +0000281};
282
283class ReferredObjectsFinder {
284 public:
285 explicit ReferredObjectsFinder(
286 std::vector<std::pair<std::string, art::mirror::Object*>>* referred_objects)
Florian Mayer07710c52019-09-16 15:53:38 +0000287 : referred_objects_(referred_objects) {}
288
289 // For art::mirror::Object::VisitReferences.
290 void operator()(art::ObjPtr<art::mirror::Object> obj, art::MemberOffset offset,
291 bool is_static) const
292 REQUIRES_SHARED(art::Locks::mutator_lock_) {
293 art::mirror::Object* ref = obj->GetFieldObject<art::mirror::Object>(offset);
294 art::ArtField* field;
295 if (is_static) {
296 field = art::ArtField::FindStaticFieldWithOffset(obj->AsClass(), offset.Uint32Value());
297 } else {
298 field = art::ArtField::FindInstanceFieldWithOffset(obj->GetClass(), offset.Uint32Value());
299 }
300 std::string field_name = "";
301 if (field != nullptr) {
Florian Mayer22be0652020-02-06 17:51:46 +0000302 field_name = field->PrettyField(/*with_type=*/true);
Florian Mayer07710c52019-09-16 15:53:38 +0000303 }
304 referred_objects_->emplace_back(std::move(field_name), ref);
305 }
306
307 void VisitRootIfNonNull(art::mirror::CompressedReference<art::mirror::Object>* root
308 ATTRIBUTE_UNUSED) const {}
309 void VisitRoot(art::mirror::CompressedReference<art::mirror::Object>* root
310 ATTRIBUTE_UNUSED) const {}
311
312 private:
313 // We can use a raw Object* pointer here, because there are no concurrent GC threads after the
314 // fork.
315 std::vector<std::pair<std::string, art::mirror::Object*>>* referred_objects_;
316};
317
Florian Mayer46392352019-10-11 14:25:49 +0100318class RootFinder : public art::SingleRootVisitor {
319 public:
320 explicit RootFinder(
321 std::map<art::RootType, std::vector<art::mirror::Object*>>* root_objects)
322 : root_objects_(root_objects) {}
323
324 void VisitRoot(art::mirror::Object* root, const art::RootInfo& info) override {
325 (*root_objects_)[info.GetType()].emplace_back(root);
326 }
327
328 private:
329 // We can use a raw Object* pointer here, because there are no concurrent GC threads after the
330 // fork.
331 std::map<art::RootType, std::vector<art::mirror::Object*>>* root_objects_;
332};
333
334perfetto::protos::pbzero::HeapGraphRoot::Type ToProtoType(art::RootType art_type) {
335 switch (art_type) {
336 case art::kRootUnknown:
337 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_UNKNOWN;
338 case art::kRootJNIGlobal:
339 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_JNI_GLOBAL;
340 case art::kRootJNILocal:
341 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_JNI_LOCAL;
342 case art::kRootJavaFrame:
343 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_JAVA_FRAME;
344 case art::kRootNativeStack:
345 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_NATIVE_STACK;
346 case art::kRootStickyClass:
347 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_STICKY_CLASS;
348 case art::kRootThreadBlock:
349 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_THREAD_BLOCK;
350 case art::kRootMonitorUsed:
351 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_MONITOR_USED;
352 case art::kRootThreadObject:
353 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_THREAD_OBJECT;
354 case art::kRootInternedString:
355 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_INTERNED_STRING;
356 case art::kRootFinalizing:
357 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_FINALIZING;
358 case art::kRootDebugger:
359 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_DEBUGGER;
360 case art::kRootReferenceCleanup:
361 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_REFERENCE_CLEANUP;
362 case art::kRootVMInternal:
363 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_VM_INTERNAL;
364 case art::kRootJNIMonitor:
365 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_JNI_MONITOR;
366 }
367}
368
Florian Mayer07710c52019-09-16 15:53:38 +0000369void DumpPerfetto(art::Thread* self) {
370 pid_t parent_pid = getpid();
371 LOG(INFO) << "preparing to dump heap for " << parent_pid;
372
373 // Need to take a heap dump while GC isn't running. See the comment in
374 // Heap::VisitObjects(). Also we need the critical section to avoid visiting
375 // the same object twice. See b/34967844.
376 //
377 // We need to do this before the fork, because otherwise it can deadlock
378 // waiting for the GC, as all other threads get terminated by the clone, but
379 // their locks are not released.
380 art::gc::ScopedGCCriticalSection gcs(self, art::gc::kGcCauseHprof,
381 art::gc::kCollectorTypeHprof);
382
383 art::ScopedSuspendAll ssa(__FUNCTION__, /* long_suspend=*/ true);
384
385 pid_t pid = fork();
Florian Mayer6d41e572020-01-24 15:13:59 +0000386 if (pid == -1) {
387 // Fork error.
388 PLOG(ERROR) << "fork";
Florian Mayer07710c52019-09-16 15:53:38 +0000389 return;
390 }
Florian Mayer6d41e572020-01-24 15:13:59 +0000391 if (pid != 0) {
392 // Parent
393 int stat_loc;
394 for (;;) {
395 if (waitpid(pid, &stat_loc, 0) != -1 || errno != EINTR) {
396 break;
397 }
398 }
399 return;
400 }
401
402 // The following code is only executed by the child of the original process.
403 //
404 // Daemon creates a new process that is the grand-child of the original process, and exits.
405 if (daemon(0, 0) == -1) {
406 PLOG(FATAL) << "daemon";
407 }
408
409 // The following code is only executed by the grand-child of the original process.
Florian Mayer07710c52019-09-16 15:53:38 +0000410
411 // Make sure that this is the first thing we do after forking, so if anything
412 // below hangs, the fork will go away from the watchdog.
413 ArmWatchdogOrDie();
414
Florian Mayerc99a2312019-12-17 11:07:34 +0000415 struct timespec ts = {};
416 if (clock_gettime(CLOCK_BOOTTIME, &ts) != 0) {
417 LOG(FATAL) << "Failed to get boottime.";
418 }
419 uint64_t timestamp = ts.tv_sec * 1000000000LL + ts.tv_nsec;
420
Florian Mayer07710c52019-09-16 15:53:38 +0000421 WaitForDataSource(self);
422
423 JavaHprofDataSource::Trace(
Florian Mayerc99a2312019-12-17 11:07:34 +0000424 [parent_pid, timestamp](JavaHprofDataSource::TraceContext ctx)
Florian Mayer07710c52019-09-16 15:53:38 +0000425 NO_THREAD_SAFETY_ANALYSIS {
Florian Mayer4bbc62b2019-09-25 12:13:35 +0100426 {
427 auto ds = ctx.GetDataSourceLocked();
428 if (!ds || !ds->enabled()) {
429 LOG(INFO) << "skipping irrelevant data source.";
430 return;
431 }
432 }
Florian Mayer07710c52019-09-16 15:53:38 +0000433 LOG(INFO) << "dumping heap for " << parent_pid;
Florian Mayerc99a2312019-12-17 11:07:34 +0000434 Writer writer(parent_pid, &ctx, timestamp);
Florian Mayer07710c52019-09-16 15:53:38 +0000435 // Make sure that intern ID 0 (default proto value for a uint64_t) always maps to ""
436 // (default proto value for a string).
437 std::map<std::string, uint64_t> interned_fields{{"", 0}};
438 std::map<std::string, uint64_t> interned_types{{"", 0}};
439
Florian Mayer46392352019-10-11 14:25:49 +0100440 std::map<art::RootType, std::vector<art::mirror::Object*>> root_objects;
441 RootFinder rcf(&root_objects);
442 art::Runtime::Current()->VisitRoots(&rcf);
Florian Mayer2c5dfe12019-11-14 11:22:25 +0000443 std::unique_ptr<protozero::PackedVarInt> object_ids(
444 new protozero::PackedVarInt);
Florian Mayer46392352019-10-11 14:25:49 +0100445 for (const auto& p : root_objects) {
446 const art::RootType root_type = p.first;
447 const std::vector<art::mirror::Object*>& children = p.second;
448 perfetto::protos::pbzero::HeapGraphRoot* root_proto =
449 writer.GetHeapGraph()->add_roots();
450 root_proto->set_root_type(ToProtoType(root_type));
Florian Mayer2246a4e2020-02-24 16:16:41 +0000451 for (art::mirror::Object* obj : children) {
452 if (writer.will_create_new_packet()) {
453 root_proto->set_object_ids(*object_ids);
454 object_ids->Reset();
455 root_proto = writer.GetHeapGraph()->add_roots();
456 root_proto->set_root_type(ToProtoType(root_type));
457 }
Florian Mayer2c5dfe12019-11-14 11:22:25 +0000458 object_ids->Append(reinterpret_cast<uintptr_t>(obj));
Florian Mayer2246a4e2020-02-24 16:16:41 +0000459 }
Florian Mayer2c5dfe12019-11-14 11:22:25 +0000460 root_proto->set_object_ids(*object_ids);
461 object_ids->Reset();
Florian Mayer46392352019-10-11 14:25:49 +0100462 }
463
Florian Mayer2c5dfe12019-11-14 11:22:25 +0000464 std::unique_ptr<protozero::PackedVarInt> reference_field_ids(
465 new protozero::PackedVarInt);
466 std::unique_ptr<protozero::PackedVarInt> reference_object_ids(
467 new protozero::PackedVarInt);
468
Florian Mayer07710c52019-09-16 15:53:38 +0000469 art::Runtime::Current()->GetHeap()->VisitObjectsPaused(
Florian Mayer2c5dfe12019-11-14 11:22:25 +0000470 [&writer, &interned_types, &interned_fields,
471 &reference_field_ids, &reference_object_ids](
Florian Mayer07710c52019-09-16 15:53:38 +0000472 art::mirror::Object* obj) REQUIRES_SHARED(art::Locks::mutator_lock_) {
473 perfetto::protos::pbzero::HeapGraphObject* object_proto =
474 writer.GetHeapGraph()->add_objects();
475 object_proto->set_id(reinterpret_cast<uintptr_t>(obj));
476 object_proto->set_type_id(
477 FindOrAppend(&interned_types, obj->PrettyTypeOf()));
478 object_proto->set_self_size(obj->SizeOf());
479
480 std::vector<std::pair<std::string, art::mirror::Object*>>
481 referred_objects;
482 ReferredObjectsFinder objf(&referred_objects);
483 obj->VisitReferences(objf, art::VoidFunctor());
484 for (const auto& p : referred_objects) {
Florian Mayer2c5dfe12019-11-14 11:22:25 +0000485 reference_field_ids->Append(FindOrAppend(&interned_fields, p.first));
486 reference_object_ids->Append(reinterpret_cast<uintptr_t>(p.second));
Florian Mayer07710c52019-09-16 15:53:38 +0000487 }
Florian Mayer2c5dfe12019-11-14 11:22:25 +0000488 object_proto->set_reference_field_id(*reference_field_ids);
489 object_proto->set_reference_object_id(*reference_object_ids);
490 reference_field_ids->Reset();
491 reference_object_ids->Reset();
Florian Mayer07710c52019-09-16 15:53:38 +0000492 });
493
494 for (const auto& p : interned_fields) {
495 const std::string& str = p.first;
496 uint64_t id = p.second;
497
498 perfetto::protos::pbzero::InternedString* field_proto =
499 writer.GetHeapGraph()->add_field_names();
500 field_proto->set_iid(id);
501 field_proto->set_str(
502 reinterpret_cast<const uint8_t*>(str.c_str()), str.size());
503 }
504 for (const auto& p : interned_types) {
505 const std::string& str = p.first;
506 uint64_t id = p.second;
507
508 perfetto::protos::pbzero::InternedString* type_proto =
509 writer.GetHeapGraph()->add_type_names();
510 type_proto->set_iid(id);
511 type_proto->set_str(reinterpret_cast<const uint8_t*>(str.c_str()),
512 str.size());
513 }
514
515 writer.Finalize();
516
517 ctx.Flush([] {
518 {
519 art::MutexLock lk(JavaHprofDataSource::art_thread(), GetStateMutex());
520 g_state = State::kEnd;
521 GetStateCV().Broadcast(JavaHprofDataSource::art_thread());
522 }
523 });
524 });
525
526 art::MutexLock lk(self, GetStateMutex());
527 while (g_state != State::kEnd) {
528 GetStateCV().Wait(self);
529 }
530 LOG(INFO) << "finished dumping heap for " << parent_pid;
531 // Prevent the atexit handlers to run. We do not want to call cleanup
532 // functions the parent process has registered.
533 _exit(0);
534}
535
536// The plugin initialization function.
537extern "C" bool ArtPlugin_Initialize() {
538 if (art::Runtime::Current() == nullptr) {
539 return false;
540 }
541 art::Thread* self = art::Thread::Current();
542 {
543 art::MutexLock lk(self, GetStateMutex());
544 if (g_state != State::kUninitialized) {
545 LOG(ERROR) << "perfetto_hprof already initialized. state: " << g_state;
546 return false;
547 }
548 g_state = State::kWaitForListener;
549 }
550
Nick Kralevich20d57d12020-01-31 12:54:35 -0800551 if (pipe2(g_signal_pipe_fds, O_CLOEXEC) == -1) {
Florian Mayer07710c52019-09-16 15:53:38 +0000552 PLOG(ERROR) << "Failed to pipe";
553 return false;
554 }
555
556 struct sigaction act = {};
Florian Mayer516745b2020-01-27 14:29:57 +0000557 act.sa_flags = SA_SIGINFO | SA_RESTART;
Florian Mayer07710c52019-09-16 15:53:38 +0000558 act.sa_sigaction = [](int, siginfo_t*, void*) {
559 if (write(g_signal_pipe_fds[1], kByte, sizeof(kByte)) == -1) {
560 PLOG(ERROR) << "Failed to trigger heap dump";
561 }
562 };
563
564 // TODO(fmayer): We can probably use the SignalCatcher thread here to not
565 // have an idle thread.
566 if (sigaction(kJavaHeapprofdSignal, &act, &g_orig_act) != 0) {
567 close(g_signal_pipe_fds[0]);
568 close(g_signal_pipe_fds[1]);
569 PLOG(ERROR) << "Failed to sigaction";
570 return false;
571 }
572
573 std::thread th([] {
574 art::Runtime* runtime = art::Runtime::Current();
575 if (!runtime) {
Florian Mayer516745b2020-01-27 14:29:57 +0000576 LOG(FATAL_WITHOUT_ABORT) << "no runtime in perfetto_hprof_listener";
Florian Mayer07710c52019-09-16 15:53:38 +0000577 return;
578 }
Florian Mayer516745b2020-01-27 14:29:57 +0000579 if (!runtime->AttachCurrentThread("perfetto_hprof_listener", /*as_daemon=*/ true,
Florian Mayer07710c52019-09-16 15:53:38 +0000580 runtime->GetSystemThreadGroup(), /*create_peer=*/ false)) {
581 LOG(ERROR) << "failed to attach thread.";
582 return;
583 }
584 art::Thread* self = art::Thread::Current();
585 if (!self) {
Florian Mayer516745b2020-01-27 14:29:57 +0000586 LOG(FATAL_WITHOUT_ABORT) << "no thread in perfetto_hprof_listener";
Florian Mayer07710c52019-09-16 15:53:38 +0000587 return;
588 }
589 {
590 art::MutexLock lk(self, GetStateMutex());
591 if (g_state == State::kWaitForListener) {
592 g_state = State::kWaitForStart;
593 GetStateCV().Broadcast(self);
594 }
595 }
596 char buf[1];
597 for (;;) {
598 int res;
599 do {
600 res = read(g_signal_pipe_fds[0], buf, sizeof(buf));
601 } while (res == -1 && errno == EINTR);
602
603 if (res <= 0) {
604 if (res == -1) {
605 PLOG(ERROR) << "failed to read";
606 }
607 close(g_signal_pipe_fds[0]);
608 return;
609 }
610
611 perfetto_hprof::DumpPerfetto(self);
612 }
613 });
614 th.detach();
615
616 art::MutexLock lk(art::Thread::Current(), GetStateMutex());
617 while (g_state == State::kWaitForListener) {
618 GetStateCV().Wait(art::Thread::Current());
619 }
620 return true;
621}
622
623extern "C" bool ArtPlugin_Deinitialize() {
624 if (sigaction(kJavaHeapprofdSignal, &g_orig_act, nullptr) != 0) {
625 PLOG(ERROR) << "failed to reset signal handler";
626 // We cannot close the pipe if the signal handler wasn't unregistered,
627 // to avoid receiving SIGPIPE.
628 return false;
629 }
630 close(g_signal_pipe_fds[1]);
631
632 art::Thread* self = art::Thread::Current();
633 art::MutexLock lk(self, GetStateMutex());
634 if (g_state != State::kWaitForListener) {
635 g_state = State::kUninitialized;
636 GetStateCV().Broadcast(self);
637 }
638 return true;
639}
640
641} // namespace perfetto_hprof
642
643namespace perfetto {
644
645PERFETTO_DEFINE_DATA_SOURCE_STATIC_MEMBERS(perfetto_hprof::JavaHprofDataSource);
646
647}