Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2019 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #define LOG_TAG "perfetto_hprof" |
| 18 | |
| 19 | #include "perfetto_hprof.h" |
| 20 | |
| 21 | #include <android-base/logging.h> |
Roland Levillain | bdf1737 | 2021-05-06 00:19:19 +0100 | [diff] [blame] | 22 | #include <base/fast_exit.h> |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 23 | #include <fcntl.h> |
| 24 | #include <inttypes.h> |
| 25 | #include <sched.h> |
| 26 | #include <signal.h> |
Florian Mayer | b04b30c | 2020-04-06 11:00:45 +0200 | [diff] [blame] | 27 | #include <sys/socket.h> |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 28 | #include <sys/stat.h> |
| 29 | #include <sys/types.h> |
Florian Mayer | b04b30c | 2020-04-06 11:00:45 +0200 | [diff] [blame] | 30 | #include <sys/un.h> |
Florian Mayer | 6d41e57 | 2020-01-24 15:13:59 +0000 | [diff] [blame] | 31 | #include <sys/wait.h> |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 32 | #include <thread> |
Florian Mayer | c99a231 | 2019-12-17 11:07:34 +0000 | [diff] [blame] | 33 | #include <time.h> |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 34 | |
Florian Mayer | 74f1b07 | 2020-11-30 18:08:30 +0000 | [diff] [blame] | 35 | #include <limits> |
Florian Mayer | d1c8aae | 2021-03-16 17:22:14 +0000 | [diff] [blame] | 36 | #include <optional> |
Florian Mayer | c560e1b | 2020-06-03 14:34:52 +0200 | [diff] [blame] | 37 | #include <type_traits> |
| 38 | |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 39 | #include "gc/heap-visit-objects-inl.h" |
| 40 | #include "gc/heap.h" |
| 41 | #include "gc/scoped_gc_critical_section.h" |
| 42 | #include "mirror/object-refvisitor-inl.h" |
| 43 | #include "nativehelper/scoped_local_ref.h" |
Florian Mayer | 4b79ef4 | 2020-04-03 15:10:45 +0200 | [diff] [blame] | 44 | #include "perfetto/profiling/parse_smaps.h" |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 45 | #include "perfetto/trace/interned_data/interned_data.pbzero.h" |
| 46 | #include "perfetto/trace/profiling/heap_graph.pbzero.h" |
| 47 | #include "perfetto/trace/profiling/profile_common.pbzero.h" |
Florian Mayer | 4b79ef4 | 2020-04-03 15:10:45 +0200 | [diff] [blame] | 48 | #include "perfetto/trace/profiling/smaps.pbzero.h" |
Florian Mayer | 4bbc62b | 2019-09-25 12:13:35 +0100 | [diff] [blame] | 49 | #include "perfetto/config/profiling/java_hprof_config.pbzero.h" |
Florian Mayer | 2c5dfe1 | 2019-11-14 11:22:25 +0000 | [diff] [blame] | 50 | #include "perfetto/protozero/packed_repeated_fields.h" |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 51 | #include "perfetto/tracing.h" |
| 52 | #include "runtime-inl.h" |
| 53 | #include "runtime_callbacks.h" |
| 54 | #include "scoped_thread_state_change-inl.h" |
| 55 | #include "thread_list.h" |
| 56 | #include "well_known_classes.h" |
Florian Mayer | 29e62c3 | 2020-03-19 12:05:46 +0100 | [diff] [blame] | 57 | #include "dex/descriptors_names.h" |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 58 | |
| 59 | // There are three threads involved in this: |
| 60 | // * listener thread: this is idle in the background when this plugin gets loaded, and waits |
| 61 | // for data on on g_signal_pipe_fds. |
| 62 | // * signal thread: an arbitrary thread that handles the signal and writes data to |
| 63 | // g_signal_pipe_fds. |
| 64 | // * perfetto producer thread: once the signal is received, the app forks. In the newly forked |
| 65 | // child, the Perfetto Client API spawns a thread to communicate with traced. |
| 66 | |
| 67 | namespace perfetto_hprof { |
| 68 | |
| 69 | constexpr int kJavaHeapprofdSignal = __SIGRTMIN + 6; |
| 70 | constexpr time_t kWatchdogTimeoutSec = 120; |
Florian Mayer | 2246a4e | 2020-02-24 16:16:41 +0000 | [diff] [blame] | 71 | // This needs to be lower than the maximum acceptable chunk size, because this |
| 72 | // is checked *before* writing another submessage. We conservatively assume |
| 73 | // submessages can be up to 100k here for a 500k chunk size. |
| 74 | // DropBox has a 500k chunk limit, and each chunk needs to parse as a proto. |
| 75 | constexpr uint32_t kPacketSizeThreshold = 400000; |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 76 | constexpr char kByte[1] = {'x'}; |
| 77 | static art::Mutex& GetStateMutex() { |
| 78 | static art::Mutex state_mutex("perfetto_hprof_state_mutex", art::LockLevel::kGenericBottomLock); |
| 79 | return state_mutex; |
| 80 | } |
| 81 | |
| 82 | static art::ConditionVariable& GetStateCV() { |
| 83 | static art::ConditionVariable state_cv("perfetto_hprof_state_cv", GetStateMutex()); |
| 84 | return state_cv; |
| 85 | } |
| 86 | |
Florian Mayer | 74f1b07 | 2020-11-30 18:08:30 +0000 | [diff] [blame] | 87 | static int requested_tracing_session_id = 0; |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 88 | static State g_state = State::kUninitialized; |
| 89 | |
| 90 | // Pipe to signal from the signal handler into a worker thread that handles the |
| 91 | // dump requests. |
| 92 | int g_signal_pipe_fds[2]; |
| 93 | static struct sigaction g_orig_act = {}; |
| 94 | |
Florian Mayer | 614bffc | 2020-06-02 12:15:49 +0200 | [diff] [blame] | 95 | template <typename T> |
| 96 | uint64_t FindOrAppend(std::map<T, uint64_t>* m, const T& s) { |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 97 | auto it = m->find(s); |
| 98 | if (it == m->end()) { |
| 99 | std::tie(it, std::ignore) = m->emplace(s, m->size()); |
| 100 | } |
| 101 | return it->second; |
| 102 | } |
| 103 | |
| 104 | void ArmWatchdogOrDie() { |
| 105 | timer_t timerid{}; |
| 106 | struct sigevent sev {}; |
| 107 | sev.sigev_notify = SIGEV_SIGNAL; |
| 108 | sev.sigev_signo = SIGKILL; |
| 109 | |
| 110 | if (timer_create(CLOCK_MONOTONIC, &sev, &timerid) == -1) { |
| 111 | // This only gets called in the child, so we can fatal without impacting |
| 112 | // the app. |
| 113 | PLOG(FATAL) << "failed to create watchdog timer"; |
| 114 | } |
| 115 | |
| 116 | struct itimerspec its {}; |
| 117 | its.it_value.tv_sec = kWatchdogTimeoutSec; |
| 118 | |
| 119 | if (timer_settime(timerid, 0, &its, nullptr) == -1) { |
| 120 | // This only gets called in the child, so we can fatal without impacting |
| 121 | // the app. |
| 122 | PLOG(FATAL) << "failed to arm watchdog timer"; |
| 123 | } |
| 124 | } |
| 125 | |
Florian Mayer | 4b79ef4 | 2020-04-03 15:10:45 +0200 | [diff] [blame] | 126 | bool StartsWith(const std::string& str, const std::string& prefix) { |
| 127 | return str.compare(0, prefix.length(), prefix) == 0; |
| 128 | } |
| 129 | |
| 130 | // Sample entries that match one of the following |
| 131 | // start with /system/ |
| 132 | // start with /vendor/ |
| 133 | // start with /data/app/ |
| 134 | // contains "extracted in memory from Y", where Y matches any of the above |
| 135 | bool ShouldSampleSmapsEntry(const perfetto::profiling::SmapsEntry& e) { |
| 136 | if (StartsWith(e.pathname, "/system/") || StartsWith(e.pathname, "/vendor/") || |
| 137 | StartsWith(e.pathname, "/data/app/")) { |
| 138 | return true; |
| 139 | } |
| 140 | if (StartsWith(e.pathname, "[anon:")) { |
| 141 | if (e.pathname.find("extracted in memory from /system/") != std::string::npos) { |
| 142 | return true; |
| 143 | } |
| 144 | if (e.pathname.find("extracted in memory from /vendor/") != std::string::npos) { |
| 145 | return true; |
| 146 | } |
| 147 | if (e.pathname.find("extracted in memory from /data/app/") != std::string::npos) { |
| 148 | return true; |
| 149 | } |
| 150 | } |
| 151 | return false; |
| 152 | } |
| 153 | |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 154 | class JavaHprofDataSource : public perfetto::DataSource<JavaHprofDataSource> { |
| 155 | public: |
Florian Mayer | 3b1d8e3 | 2019-10-01 14:46:58 +0100 | [diff] [blame] | 156 | constexpr static perfetto::BufferExhaustedPolicy kBufferExhaustedPolicy = |
| 157 | perfetto::BufferExhaustedPolicy::kStall; |
Florian Mayer | 4bbc62b | 2019-09-25 12:13:35 +0100 | [diff] [blame] | 158 | void OnSetup(const SetupArgs& args) override { |
Florian Mayer | 74f1b07 | 2020-11-30 18:08:30 +0000 | [diff] [blame] | 159 | uint64_t normalized_cfg_tracing_session_id = |
| 160 | args.config->tracing_session_id() % std::numeric_limits<int32_t>::max(); |
| 161 | if (requested_tracing_session_id < 0) { |
| 162 | LOG(ERROR) << "invalid requested tracing session id " << requested_tracing_session_id; |
| 163 | return; |
| 164 | } |
| 165 | if (static_cast<uint64_t>(requested_tracing_session_id) != normalized_cfg_tracing_session_id) { |
| 166 | return; |
| 167 | } |
| 168 | |
Florian Mayer | 4bbc62b | 2019-09-25 12:13:35 +0100 | [diff] [blame] | 169 | // This is on the heap as it triggers -Wframe-larger-than. |
| 170 | std::unique_ptr<perfetto::protos::pbzero::JavaHprofConfig::Decoder> cfg( |
| 171 | new perfetto::protos::pbzero::JavaHprofConfig::Decoder( |
| 172 | args.config->java_hprof_config_raw())); |
| 173 | |
Florian Mayer | 4b79ef4 | 2020-04-03 15:10:45 +0200 | [diff] [blame] | 174 | dump_smaps_ = cfg->dump_smaps(); |
Florian Mayer | 4075edd | 2020-09-24 15:26:23 +0100 | [diff] [blame] | 175 | for (auto it = cfg->ignored_types(); it; ++it) { |
| 176 | std::string name = (*it).ToStdString(); |
| 177 | ignored_types_.emplace_back(std::move(name)); |
| 178 | } |
Florian Mayer | dc65a45 | 2021-05-19 12:36:21 +0100 | [diff] [blame] | 179 | // This tracing session ID matches the requesting tracing session ID, so we know heapprofd |
| 180 | // has verified it targets this process. |
| 181 | enabled_ = true; |
Florian Mayer | 4bbc62b | 2019-09-25 12:13:35 +0100 | [diff] [blame] | 182 | } |
| 183 | |
Florian Mayer | 4b79ef4 | 2020-04-03 15:10:45 +0200 | [diff] [blame] | 184 | bool dump_smaps() { return dump_smaps_; } |
Florian Mayer | 4bbc62b | 2019-09-25 12:13:35 +0100 | [diff] [blame] | 185 | bool enabled() { return enabled_; } |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 186 | |
| 187 | void OnStart(const StartArgs&) override { |
Florian Mayer | 4bbc62b | 2019-09-25 12:13:35 +0100 | [diff] [blame] | 188 | if (!enabled()) { |
| 189 | return; |
| 190 | } |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 191 | art::MutexLock lk(art_thread(), GetStateMutex()); |
| 192 | if (g_state == State::kWaitForStart) { |
| 193 | g_state = State::kStart; |
| 194 | GetStateCV().Broadcast(art_thread()); |
| 195 | } |
| 196 | } |
| 197 | |
Florian Mayer | cb4fab1 | 2020-09-25 13:57:37 +0100 | [diff] [blame] | 198 | // This datasource can be used with a trace config with a short duration_ms |
| 199 | // but a long datasource_stop_timeout_ms. In that case, OnStop is called (in |
| 200 | // general) before the dump is done. In that case, we handle the stop |
| 201 | // asynchronously, and notify the tracing service once we are done. |
| 202 | // In case OnStop is called after the dump is done (but before the process) |
| 203 | // has exited, we just acknowledge the request. |
| 204 | void OnStop(const StopArgs& a) override { |
| 205 | art::MutexLock lk(art_thread(), finish_mutex_); |
| 206 | if (is_finished_) { |
| 207 | return; |
| 208 | } |
| 209 | is_stopped_ = true; |
| 210 | async_stop_ = std::move(a.HandleStopAsynchronously()); |
| 211 | } |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 212 | |
| 213 | static art::Thread* art_thread() { |
| 214 | // TODO(fmayer): Attach the Perfetto producer thread to ART and give it a name. This is |
| 215 | // not trivial, we cannot just attach the first time this method is called, because |
| 216 | // AttachCurrentThread deadlocks with the ConditionVariable::Wait in WaitForDataSource. |
| 217 | // |
| 218 | // We should attach the thread as soon as the Client API spawns it, but that needs more |
| 219 | // complicated plumbing. |
| 220 | return nullptr; |
| 221 | } |
| 222 | |
Florian Mayer | 4075edd | 2020-09-24 15:26:23 +0100 | [diff] [blame] | 223 | std::vector<std::string> ignored_types() { return ignored_types_; } |
| 224 | |
Florian Mayer | cb4fab1 | 2020-09-25 13:57:37 +0100 | [diff] [blame] | 225 | void Finish() { |
| 226 | art::MutexLock lk(art_thread(), finish_mutex_); |
| 227 | if (is_stopped_) { |
| 228 | async_stop_(); |
| 229 | } else { |
| 230 | is_finished_ = true; |
| 231 | } |
| 232 | } |
| 233 | |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 234 | private: |
Florian Mayer | 4bbc62b | 2019-09-25 12:13:35 +0100 | [diff] [blame] | 235 | bool enabled_ = false; |
Florian Mayer | 4b79ef4 | 2020-04-03 15:10:45 +0200 | [diff] [blame] | 236 | bool dump_smaps_ = false; |
Florian Mayer | 4075edd | 2020-09-24 15:26:23 +0100 | [diff] [blame] | 237 | std::vector<std::string> ignored_types_; |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 238 | static art::Thread* self_; |
Florian Mayer | cb4fab1 | 2020-09-25 13:57:37 +0100 | [diff] [blame] | 239 | |
| 240 | art::Mutex finish_mutex_{"perfetto_hprof_ds_mutex", art::LockLevel::kGenericBottomLock}; |
| 241 | bool is_finished_ = false; |
| 242 | bool is_stopped_ = false; |
| 243 | std::function<void()> async_stop_; |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 244 | }; |
| 245 | |
| 246 | art::Thread* JavaHprofDataSource::self_ = nullptr; |
| 247 | |
| 248 | |
| 249 | void WaitForDataSource(art::Thread* self) { |
| 250 | perfetto::TracingInitArgs args; |
| 251 | args.backends = perfetto::BackendType::kSystemBackend; |
| 252 | perfetto::Tracing::Initialize(args); |
| 253 | |
| 254 | perfetto::DataSourceDescriptor dsd; |
| 255 | dsd.set_name("android.java_hprof"); |
Florian Mayer | cb4fab1 | 2020-09-25 13:57:37 +0100 | [diff] [blame] | 256 | dsd.set_will_notify_on_stop(true); |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 257 | JavaHprofDataSource::Register(dsd); |
| 258 | |
| 259 | LOG(INFO) << "waiting for data source"; |
| 260 | |
| 261 | art::MutexLock lk(self, GetStateMutex()); |
| 262 | while (g_state != State::kStart) { |
| 263 | GetStateCV().Wait(self); |
| 264 | } |
| 265 | } |
| 266 | |
Daniele Di Proietto | 0b7fd87 | 2021-10-13 17:25:33 +0100 | [diff] [blame^] | 267 | // Helper class to write Java heap dumps to `ctx`. The whole heap dump can be |
| 268 | // split into more perfetto.protos.HeapGraph messages, to avoid making each |
| 269 | // message too big. |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 270 | class Writer { |
| 271 | public: |
Daniele Di Proietto | 0b7fd87 | 2021-10-13 17:25:33 +0100 | [diff] [blame^] | 272 | Writer(pid_t pid, JavaHprofDataSource::TraceContext* ctx, uint64_t timestamp) |
| 273 | : pid_(pid), ctx_(ctx), timestamp_(timestamp), |
Florian Mayer | 2246a4e | 2020-02-24 16:16:41 +0000 | [diff] [blame] | 274 | last_written_(ctx_->written()) {} |
| 275 | |
| 276 | // Return whether the next call to GetHeapGraph will create a new TracePacket. |
Daniele Di Proietto | 0b7fd87 | 2021-10-13 17:25:33 +0100 | [diff] [blame^] | 277 | bool will_create_new_packet() const { |
Florian Mayer | 2246a4e | 2020-02-24 16:16:41 +0000 | [diff] [blame] | 278 | return !heap_graph_ || ctx_->written() - last_written_ > kPacketSizeThreshold; |
| 279 | } |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 280 | |
| 281 | perfetto::protos::pbzero::HeapGraph* GetHeapGraph() { |
Florian Mayer | 2246a4e | 2020-02-24 16:16:41 +0000 | [diff] [blame] | 282 | if (will_create_new_packet()) { |
| 283 | CreateNewHeapGraph(); |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 284 | } |
| 285 | return heap_graph_; |
| 286 | } |
| 287 | |
| 288 | void Finalize() { |
| 289 | if (trace_packet_) { |
| 290 | trace_packet_->Finalize(); |
| 291 | } |
| 292 | heap_graph_ = nullptr; |
| 293 | } |
| 294 | |
| 295 | ~Writer() { Finalize(); } |
| 296 | |
| 297 | private: |
Daniele Di Proietto | 0b7fd87 | 2021-10-13 17:25:33 +0100 | [diff] [blame^] | 298 | Writer(const Writer&) = delete; |
| 299 | Writer& operator=(const Writer&) = delete; |
| 300 | Writer(Writer&&) = delete; |
| 301 | Writer& operator=(Writer&&) = delete; |
| 302 | |
| 303 | void CreateNewHeapGraph() { |
| 304 | if (heap_graph_) { |
| 305 | heap_graph_->set_continued(true); |
| 306 | } |
| 307 | Finalize(); |
| 308 | |
| 309 | uint64_t written = ctx_->written(); |
| 310 | |
| 311 | trace_packet_ = ctx_->NewTracePacket(); |
| 312 | trace_packet_->set_timestamp(timestamp_); |
| 313 | heap_graph_ = trace_packet_->set_heap_graph(); |
| 314 | heap_graph_->set_pid(pid_); |
| 315 | heap_graph_->set_index(index_++); |
| 316 | |
| 317 | last_written_ = written; |
| 318 | } |
| 319 | |
| 320 | const pid_t pid_; |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 321 | JavaHprofDataSource::TraceContext* const ctx_; |
Florian Mayer | c99a231 | 2019-12-17 11:07:34 +0000 | [diff] [blame] | 322 | const uint64_t timestamp_; |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 323 | |
Florian Mayer | 2246a4e | 2020-02-24 16:16:41 +0000 | [diff] [blame] | 324 | uint64_t last_written_ = 0; |
| 325 | |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 326 | perfetto::DataSource<JavaHprofDataSource>::TraceContext::TracePacketHandle |
| 327 | trace_packet_; |
| 328 | perfetto::protos::pbzero::HeapGraph* heap_graph_ = nullptr; |
| 329 | |
| 330 | uint64_t index_ = 0; |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 331 | }; |
| 332 | |
| 333 | class ReferredObjectsFinder { |
| 334 | public: |
| 335 | explicit ReferredObjectsFinder( |
Daniele Di Proietto | 0b7fd87 | 2021-10-13 17:25:33 +0100 | [diff] [blame^] | 336 | std::vector<std::pair<std::string, art::mirror::Object*>>* referred_objects) |
| 337 | : referred_objects_(referred_objects) {} |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 338 | |
| 339 | // For art::mirror::Object::VisitReferences. |
| 340 | void operator()(art::ObjPtr<art::mirror::Object> obj, art::MemberOffset offset, |
| 341 | bool is_static) const |
| 342 | REQUIRES_SHARED(art::Locks::mutator_lock_) { |
Florian Mayer | 4a525a0 | 2020-08-17 17:34:39 +0100 | [diff] [blame] | 343 | if (offset.Uint32Value() == art::mirror::Object::ClassOffset().Uint32Value()) { |
| 344 | // Skip shadow$klass pointer. |
| 345 | return; |
| 346 | } |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 347 | art::mirror::Object* ref = obj->GetFieldObject<art::mirror::Object>(offset); |
| 348 | art::ArtField* field; |
| 349 | if (is_static) { |
| 350 | field = art::ArtField::FindStaticFieldWithOffset(obj->AsClass(), offset.Uint32Value()); |
| 351 | } else { |
| 352 | field = art::ArtField::FindInstanceFieldWithOffset(obj->GetClass(), offset.Uint32Value()); |
| 353 | } |
| 354 | std::string field_name = ""; |
| 355 | if (field != nullptr) { |
Florian Mayer | 22be065 | 2020-02-06 17:51:46 +0000 | [diff] [blame] | 356 | field_name = field->PrettyField(/*with_type=*/true); |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 357 | } |
| 358 | referred_objects_->emplace_back(std::move(field_name), ref); |
| 359 | } |
| 360 | |
| 361 | void VisitRootIfNonNull(art::mirror::CompressedReference<art::mirror::Object>* root |
| 362 | ATTRIBUTE_UNUSED) const {} |
| 363 | void VisitRoot(art::mirror::CompressedReference<art::mirror::Object>* root |
| 364 | ATTRIBUTE_UNUSED) const {} |
| 365 | |
| 366 | private: |
| 367 | // We can use a raw Object* pointer here, because there are no concurrent GC threads after the |
| 368 | // fork. |
| 369 | std::vector<std::pair<std::string, art::mirror::Object*>>* referred_objects_; |
| 370 | }; |
| 371 | |
Florian Mayer | 4639235 | 2019-10-11 14:25:49 +0100 | [diff] [blame] | 372 | class RootFinder : public art::SingleRootVisitor { |
| 373 | public: |
| 374 | explicit RootFinder( |
| 375 | std::map<art::RootType, std::vector<art::mirror::Object*>>* root_objects) |
| 376 | : root_objects_(root_objects) {} |
| 377 | |
| 378 | void VisitRoot(art::mirror::Object* root, const art::RootInfo& info) override { |
| 379 | (*root_objects_)[info.GetType()].emplace_back(root); |
| 380 | } |
| 381 | |
| 382 | private: |
| 383 | // We can use a raw Object* pointer here, because there are no concurrent GC threads after the |
| 384 | // fork. |
| 385 | std::map<art::RootType, std::vector<art::mirror::Object*>>* root_objects_; |
| 386 | }; |
| 387 | |
| 388 | perfetto::protos::pbzero::HeapGraphRoot::Type ToProtoType(art::RootType art_type) { |
Florian Mayer | 4a525a0 | 2020-08-17 17:34:39 +0100 | [diff] [blame] | 389 | using perfetto::protos::pbzero::HeapGraphRoot; |
Florian Mayer | 4639235 | 2019-10-11 14:25:49 +0100 | [diff] [blame] | 390 | switch (art_type) { |
| 391 | case art::kRootUnknown: |
Florian Mayer | 4a525a0 | 2020-08-17 17:34:39 +0100 | [diff] [blame] | 392 | return HeapGraphRoot::ROOT_UNKNOWN; |
Florian Mayer | 4639235 | 2019-10-11 14:25:49 +0100 | [diff] [blame] | 393 | case art::kRootJNIGlobal: |
Florian Mayer | 4a525a0 | 2020-08-17 17:34:39 +0100 | [diff] [blame] | 394 | return HeapGraphRoot::ROOT_JNI_GLOBAL; |
Florian Mayer | 4639235 | 2019-10-11 14:25:49 +0100 | [diff] [blame] | 395 | case art::kRootJNILocal: |
Florian Mayer | 4a525a0 | 2020-08-17 17:34:39 +0100 | [diff] [blame] | 396 | return HeapGraphRoot::ROOT_JNI_LOCAL; |
Florian Mayer | 4639235 | 2019-10-11 14:25:49 +0100 | [diff] [blame] | 397 | case art::kRootJavaFrame: |
Florian Mayer | 4a525a0 | 2020-08-17 17:34:39 +0100 | [diff] [blame] | 398 | return HeapGraphRoot::ROOT_JAVA_FRAME; |
Florian Mayer | 4639235 | 2019-10-11 14:25:49 +0100 | [diff] [blame] | 399 | case art::kRootNativeStack: |
Florian Mayer | 4a525a0 | 2020-08-17 17:34:39 +0100 | [diff] [blame] | 400 | return HeapGraphRoot::ROOT_NATIVE_STACK; |
Florian Mayer | 4639235 | 2019-10-11 14:25:49 +0100 | [diff] [blame] | 401 | case art::kRootStickyClass: |
Florian Mayer | 4a525a0 | 2020-08-17 17:34:39 +0100 | [diff] [blame] | 402 | return HeapGraphRoot::ROOT_STICKY_CLASS; |
Florian Mayer | 4639235 | 2019-10-11 14:25:49 +0100 | [diff] [blame] | 403 | case art::kRootThreadBlock: |
Florian Mayer | 4a525a0 | 2020-08-17 17:34:39 +0100 | [diff] [blame] | 404 | return HeapGraphRoot::ROOT_THREAD_BLOCK; |
Florian Mayer | 4639235 | 2019-10-11 14:25:49 +0100 | [diff] [blame] | 405 | case art::kRootMonitorUsed: |
Florian Mayer | 4a525a0 | 2020-08-17 17:34:39 +0100 | [diff] [blame] | 406 | return HeapGraphRoot::ROOT_MONITOR_USED; |
Florian Mayer | 4639235 | 2019-10-11 14:25:49 +0100 | [diff] [blame] | 407 | case art::kRootThreadObject: |
Florian Mayer | 4a525a0 | 2020-08-17 17:34:39 +0100 | [diff] [blame] | 408 | return HeapGraphRoot::ROOT_THREAD_OBJECT; |
Florian Mayer | 4639235 | 2019-10-11 14:25:49 +0100 | [diff] [blame] | 409 | case art::kRootInternedString: |
Florian Mayer | 4a525a0 | 2020-08-17 17:34:39 +0100 | [diff] [blame] | 410 | return HeapGraphRoot::ROOT_INTERNED_STRING; |
Florian Mayer | 4639235 | 2019-10-11 14:25:49 +0100 | [diff] [blame] | 411 | case art::kRootFinalizing: |
Florian Mayer | 4a525a0 | 2020-08-17 17:34:39 +0100 | [diff] [blame] | 412 | return HeapGraphRoot::ROOT_FINALIZING; |
Florian Mayer | 4639235 | 2019-10-11 14:25:49 +0100 | [diff] [blame] | 413 | case art::kRootDebugger: |
Florian Mayer | 4a525a0 | 2020-08-17 17:34:39 +0100 | [diff] [blame] | 414 | return HeapGraphRoot::ROOT_DEBUGGER; |
Florian Mayer | 4639235 | 2019-10-11 14:25:49 +0100 | [diff] [blame] | 415 | case art::kRootReferenceCleanup: |
Florian Mayer | 4a525a0 | 2020-08-17 17:34:39 +0100 | [diff] [blame] | 416 | return HeapGraphRoot::ROOT_REFERENCE_CLEANUP; |
Florian Mayer | 4639235 | 2019-10-11 14:25:49 +0100 | [diff] [blame] | 417 | case art::kRootVMInternal: |
Florian Mayer | 4a525a0 | 2020-08-17 17:34:39 +0100 | [diff] [blame] | 418 | return HeapGraphRoot::ROOT_VM_INTERNAL; |
Florian Mayer | 4639235 | 2019-10-11 14:25:49 +0100 | [diff] [blame] | 419 | case art::kRootJNIMonitor: |
Florian Mayer | 4a525a0 | 2020-08-17 17:34:39 +0100 | [diff] [blame] | 420 | return HeapGraphRoot::ROOT_JNI_MONITOR; |
| 421 | } |
| 422 | } |
| 423 | |
| 424 | perfetto::protos::pbzero::HeapGraphType::Kind ProtoClassKind(uint32_t class_flags) { |
| 425 | using perfetto::protos::pbzero::HeapGraphType; |
| 426 | switch (class_flags) { |
| 427 | case art::mirror::kClassFlagNormal: |
| 428 | return HeapGraphType::KIND_NORMAL; |
| 429 | case art::mirror::kClassFlagNoReferenceFields: |
| 430 | return HeapGraphType::KIND_NOREFERENCES; |
| 431 | case art::mirror::kClassFlagString | art::mirror::kClassFlagNoReferenceFields: |
| 432 | return HeapGraphType::KIND_STRING; |
| 433 | case art::mirror::kClassFlagObjectArray: |
| 434 | return HeapGraphType::KIND_ARRAY; |
| 435 | case art::mirror::kClassFlagClass: |
| 436 | return HeapGraphType::KIND_CLASS; |
| 437 | case art::mirror::kClassFlagClassLoader: |
| 438 | return HeapGraphType::KIND_CLASSLOADER; |
| 439 | case art::mirror::kClassFlagDexCache: |
| 440 | return HeapGraphType::KIND_DEXCACHE; |
| 441 | case art::mirror::kClassFlagSoftReference: |
| 442 | return HeapGraphType::KIND_SOFT_REFERENCE; |
| 443 | case art::mirror::kClassFlagWeakReference: |
| 444 | return HeapGraphType::KIND_WEAK_REFERENCE; |
| 445 | case art::mirror::kClassFlagFinalizerReference: |
| 446 | return HeapGraphType::KIND_FINALIZER_REFERENCE; |
| 447 | case art::mirror::kClassFlagPhantomReference: |
| 448 | return HeapGraphType::KIND_PHANTOM_REFERENCE; |
| 449 | default: |
| 450 | return HeapGraphType::KIND_UNKNOWN; |
Florian Mayer | 4639235 | 2019-10-11 14:25:49 +0100 | [diff] [blame] | 451 | } |
| 452 | } |
| 453 | |
Florian Mayer | 29e62c3 | 2020-03-19 12:05:46 +0100 | [diff] [blame] | 454 | std::string PrettyType(art::mirror::Class* klass) NO_THREAD_SAFETY_ANALYSIS { |
| 455 | if (klass == nullptr) { |
| 456 | return "(raw)"; |
| 457 | } |
| 458 | std::string temp; |
| 459 | std::string result(art::PrettyDescriptor(klass->GetDescriptor(&temp))); |
| 460 | return result; |
| 461 | } |
| 462 | |
Florian Mayer | 4b79ef4 | 2020-04-03 15:10:45 +0200 | [diff] [blame] | 463 | void DumpSmaps(JavaHprofDataSource::TraceContext* ctx) { |
| 464 | FILE* smaps = fopen("/proc/self/smaps", "r"); |
| 465 | if (smaps != nullptr) { |
| 466 | auto trace_packet = ctx->NewTracePacket(); |
| 467 | auto* smaps_packet = trace_packet->set_smaps_packet(); |
| 468 | smaps_packet->set_pid(getpid()); |
| 469 | perfetto::profiling::ParseSmaps(smaps, |
| 470 | [&smaps_packet](const perfetto::profiling::SmapsEntry& e) { |
| 471 | if (ShouldSampleSmapsEntry(e)) { |
| 472 | auto* smaps_entry = smaps_packet->add_entries(); |
| 473 | smaps_entry->set_path(e.pathname); |
| 474 | smaps_entry->set_size_kb(e.size_kb); |
| 475 | smaps_entry->set_private_dirty_kb(e.private_dirty_kb); |
| 476 | smaps_entry->set_swap_kb(e.swap_kb); |
| 477 | } |
| 478 | }); |
| 479 | fclose(smaps); |
| 480 | } else { |
| 481 | PLOG(ERROR) << "failed to open smaps"; |
| 482 | } |
| 483 | } |
| 484 | |
Florian Mayer | c560e1b | 2020-06-03 14:34:52 +0200 | [diff] [blame] | 485 | uint64_t GetObjectId(const art::mirror::Object* obj) { |
| 486 | return reinterpret_cast<uint64_t>(obj) / std::alignment_of<art::mirror::Object>::value; |
| 487 | } |
| 488 | |
Florian Mayer | 4a525a0 | 2020-08-17 17:34:39 +0100 | [diff] [blame] | 489 | template <typename F> |
| 490 | void ForInstanceReferenceField(art::mirror::Class* klass, F fn) NO_THREAD_SAFETY_ANALYSIS { |
| 491 | for (art::ArtField& af : klass->GetIFields()) { |
| 492 | if (af.IsPrimitiveType() || |
| 493 | af.GetOffset().Uint32Value() == art::mirror::Object::ClassOffset().Uint32Value()) { |
| 494 | continue; |
| 495 | } |
| 496 | fn(af.GetOffset()); |
| 497 | } |
| 498 | } |
| 499 | |
Florian Mayer | f86e144 | 2020-09-30 22:10:24 +0100 | [diff] [blame] | 500 | size_t EncodedSize(uint64_t n) { |
| 501 | if (n == 0) return 1; |
| 502 | return 1 + static_cast<size_t>(art::MostSignificantBit(n)) / 7; |
| 503 | } |
| 504 | |
Daniele Di Proietto | 0b7fd87 | 2021-10-13 17:25:33 +0100 | [diff] [blame^] | 505 | // Returns all the references that `*obj` (an object of type `*klass`) is holding. |
| 506 | std::vector<std::pair<std::string, art::mirror::Object*>> GetReferences(art::mirror::Object* obj, |
| 507 | art::mirror::Class* klass) |
| 508 | REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 509 | std::vector<std::pair<std::string, art::mirror::Object*>> referred_objects; |
| 510 | ReferredObjectsFinder objf(&referred_objects); |
| 511 | |
| 512 | if (klass->GetClassFlags() != art::mirror::kClassFlagNormal) { |
| 513 | obj->VisitReferences(objf, art::VoidFunctor()); |
| 514 | } else { |
| 515 | for (art::mirror::Class* cls = klass; cls != nullptr; cls = cls->GetSuperClass().Ptr()) { |
| 516 | ForInstanceReferenceField(cls, |
| 517 | [obj, objf](art::MemberOffset offset) NO_THREAD_SAFETY_ANALYSIS { |
| 518 | objf(art::ObjPtr<art::mirror::Object>(obj), |
| 519 | offset, |
| 520 | /*is_static=*/false); |
| 521 | }); |
| 522 | } |
| 523 | } |
| 524 | return referred_objects; |
| 525 | } |
| 526 | |
| 527 | // Returns the base for delta encoding all the `referred_objects`. If delta |
| 528 | // encoding would waste space, returns 0. |
| 529 | uint64_t EncodeBaseObjId( |
| 530 | const std::vector<std::pair<std::string, art::mirror::Object*>>& referred_objects, |
| 531 | const art::mirror::Object* min_nonnull_ptr) REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 532 | uint64_t base_obj_id = GetObjectId(min_nonnull_ptr); |
| 533 | if (base_obj_id <= 1) { |
| 534 | return 0; |
| 535 | } |
| 536 | |
| 537 | // We need to decrement the base for object ids so that we can tell apart |
| 538 | // null references. |
| 539 | base_obj_id--; |
| 540 | uint64_t bytes_saved = 0; |
| 541 | for (const auto& p : referred_objects) { |
| 542 | art::mirror::Object* referred_obj = p.second; |
| 543 | if (!referred_obj) { |
| 544 | continue; |
| 545 | } |
| 546 | uint64_t referred_obj_id = GetObjectId(referred_obj); |
| 547 | bytes_saved += EncodedSize(referred_obj_id) - EncodedSize(referred_obj_id - base_obj_id); |
| 548 | } |
| 549 | |
| 550 | // +1 for storing the field id. |
| 551 | if (bytes_saved <= EncodedSize(base_obj_id) + 1) { |
| 552 | // Subtracting the base ptr gains fewer bytes than it takes to store it. |
| 553 | return 0; |
| 554 | } |
| 555 | return base_obj_id; |
| 556 | } |
| 557 | |
| 558 | // Helper to keep intermediate state while dumping objects and classes from ART into |
| 559 | // perfetto.protos.HeapGraph. |
| 560 | class HeapGraphDumper { |
| 561 | public: |
| 562 | // Instances of classes whose name is in `ignored_types` will be ignored. |
| 563 | explicit HeapGraphDumper(const std::vector<std::string>& ignored_types) |
| 564 | : ignored_types_(ignored_types), |
| 565 | reference_field_ids_(std::make_unique<protozero::PackedVarInt>()), |
| 566 | reference_object_ids_(std::make_unique<protozero::PackedVarInt>()) {} |
| 567 | |
| 568 | // Dumps a heap graph from `*runtime` and writes it to `writer`. |
| 569 | void Dump(art::Runtime* runtime, Writer& writer) REQUIRES(art::Locks::mutator_lock_) { |
| 570 | DumpRootObjects(runtime, writer); |
| 571 | |
| 572 | DumpObjects(runtime, writer); |
| 573 | |
| 574 | WriteInternedData(writer); |
| 575 | } |
| 576 | |
| 577 | private: |
| 578 | // Dumps the root objects from `*runtime` to `writer`. |
| 579 | void DumpRootObjects(art::Runtime* runtime, Writer& writer) |
| 580 | REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 581 | std::map<art::RootType, std::vector<art::mirror::Object*>> root_objects; |
| 582 | RootFinder rcf(&root_objects); |
| 583 | runtime->VisitRoots(&rcf); |
| 584 | std::unique_ptr<protozero::PackedVarInt> object_ids(new protozero::PackedVarInt); |
| 585 | for (const auto& p : root_objects) { |
| 586 | const art::RootType root_type = p.first; |
| 587 | const std::vector<art::mirror::Object*>& children = p.second; |
| 588 | perfetto::protos::pbzero::HeapGraphRoot* root_proto = writer.GetHeapGraph()->add_roots(); |
| 589 | root_proto->set_root_type(ToProtoType(root_type)); |
| 590 | for (art::mirror::Object* obj : children) { |
| 591 | if (writer.will_create_new_packet()) { |
| 592 | root_proto->set_object_ids(*object_ids); |
| 593 | object_ids->Reset(); |
| 594 | root_proto = writer.GetHeapGraph()->add_roots(); |
| 595 | root_proto->set_root_type(ToProtoType(root_type)); |
| 596 | } |
| 597 | object_ids->Append(GetObjectId(obj)); |
| 598 | } |
| 599 | root_proto->set_object_ids(*object_ids); |
| 600 | object_ids->Reset(); |
| 601 | } |
| 602 | } |
| 603 | |
| 604 | // Dumps all the objects from `*runtime` to `writer`. |
| 605 | void DumpObjects(art::Runtime* runtime, Writer& writer) REQUIRES(art::Locks::mutator_lock_) { |
| 606 | runtime->GetHeap()->VisitObjectsPaused( |
| 607 | [this, &writer](art::mirror::Object* obj) |
| 608 | REQUIRES_SHARED(art::Locks::mutator_lock_) { WriteOneObject(obj, writer); }); |
| 609 | } |
| 610 | |
| 611 | // Writes all the previously accumulated (while dumping objects and roots) interned data to |
| 612 | // `writer`. |
| 613 | void WriteInternedData(Writer& writer) { |
| 614 | for (const auto& p : interned_locations_) { |
| 615 | const std::string& str = p.first; |
| 616 | uint64_t id = p.second; |
| 617 | |
| 618 | perfetto::protos::pbzero::InternedString* location_proto = |
| 619 | writer.GetHeapGraph()->add_location_names(); |
| 620 | location_proto->set_iid(id); |
| 621 | location_proto->set_str(reinterpret_cast<const uint8_t*>(str.c_str()), str.size()); |
| 622 | } |
| 623 | for (const auto& p : interned_fields_) { |
| 624 | const std::string& str = p.first; |
| 625 | uint64_t id = p.second; |
| 626 | |
| 627 | perfetto::protos::pbzero::InternedString* field_proto = |
| 628 | writer.GetHeapGraph()->add_field_names(); |
| 629 | field_proto->set_iid(id); |
| 630 | field_proto->set_str(reinterpret_cast<const uint8_t*>(str.c_str()), str.size()); |
| 631 | } |
| 632 | } |
| 633 | |
| 634 | // Writes `*obj` into `writer`. |
| 635 | void WriteOneObject(art::mirror::Object* obj, Writer& writer) |
| 636 | REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 637 | if (obj->IsClass()) { |
| 638 | WriteClass(obj->AsClass().Ptr(), writer); |
| 639 | } |
| 640 | |
| 641 | art::mirror::Class* klass = obj->GetClass(); |
| 642 | uintptr_t class_ptr = reinterpret_cast<uintptr_t>(klass); |
| 643 | // We need to synethesize a new type for Class<Foo>, which does not exist |
| 644 | // in the runtime. Otherwise, all the static members of all classes would be |
| 645 | // attributed to java.lang.Class. |
| 646 | if (klass->IsClassClass()) { |
| 647 | class_ptr = WriteSyntheticClassFromObj(obj, writer); |
| 648 | } |
| 649 | |
| 650 | if (IsIgnored(obj)) { |
| 651 | return; |
| 652 | } |
| 653 | |
| 654 | auto class_id = FindOrAppend(&interned_classes_, class_ptr); |
| 655 | |
| 656 | uint64_t object_id = GetObjectId(obj); |
| 657 | perfetto::protos::pbzero::HeapGraphObject* object_proto = writer.GetHeapGraph()->add_objects(); |
| 658 | if (prev_object_id_ && prev_object_id_ < object_id) { |
| 659 | object_proto->set_id_delta(object_id - prev_object_id_); |
| 660 | } else { |
| 661 | object_proto->set_id(object_id); |
| 662 | } |
| 663 | prev_object_id_ = object_id; |
| 664 | object_proto->set_type_id(class_id); |
| 665 | |
| 666 | // Arrays / strings are magic and have an instance dependent size. |
| 667 | if (obj->SizeOf() != klass->GetObjectSize()) { |
| 668 | object_proto->set_self_size(obj->SizeOf()); |
| 669 | } |
| 670 | |
| 671 | FillReferences(obj, klass, object_proto); |
| 672 | } |
| 673 | |
| 674 | // Writes `*klass` into `writer`. |
| 675 | void WriteClass(art::mirror::Class* klass, Writer& writer) |
| 676 | REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 677 | perfetto::protos::pbzero::HeapGraphType* type_proto = writer.GetHeapGraph()->add_types(); |
| 678 | type_proto->set_id(FindOrAppend(&interned_classes_, reinterpret_cast<uintptr_t>(klass))); |
| 679 | type_proto->set_class_name(PrettyType(klass)); |
| 680 | type_proto->set_location_id(FindOrAppend(&interned_locations_, klass->GetLocation())); |
| 681 | type_proto->set_object_size(klass->GetObjectSize()); |
| 682 | type_proto->set_kind(ProtoClassKind(klass->GetClassFlags())); |
| 683 | type_proto->set_classloader_id(GetObjectId(klass->GetClassLoader().Ptr())); |
| 684 | if (klass->GetSuperClass().Ptr()) { |
| 685 | type_proto->set_superclass_id(FindOrAppend( |
| 686 | &interned_classes_, reinterpret_cast<uintptr_t>(klass->GetSuperClass().Ptr()))); |
| 687 | } |
| 688 | ForInstanceReferenceField( |
| 689 | klass, [klass, this](art::MemberOffset offset) NO_THREAD_SAFETY_ANALYSIS { |
| 690 | auto art_field = art::ArtField::FindInstanceFieldWithOffset(klass, offset.Uint32Value()); |
| 691 | reference_field_ids_->Append( |
| 692 | FindOrAppend(&interned_fields_, art_field->PrettyField(true))); |
| 693 | }); |
| 694 | type_proto->set_reference_field_id(*reference_field_ids_); |
| 695 | reference_field_ids_->Reset(); |
| 696 | } |
| 697 | |
| 698 | // Creates a fake class that represents a type only used by `*obj` into `writer`. |
| 699 | uintptr_t WriteSyntheticClassFromObj(art::mirror::Object* obj, Writer& writer) |
| 700 | REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 701 | CHECK(obj->IsClass()); |
| 702 | perfetto::protos::pbzero::HeapGraphType* type_proto = writer.GetHeapGraph()->add_types(); |
| 703 | // All pointers are at least multiples of two, so this way we can make sure |
| 704 | // we are not colliding with a real class. |
| 705 | uintptr_t class_ptr = reinterpret_cast<uintptr_t>(obj) | 1; |
| 706 | auto class_id = FindOrAppend(&interned_classes_, class_ptr); |
| 707 | type_proto->set_id(class_id); |
| 708 | type_proto->set_class_name(obj->PrettyTypeOf()); |
| 709 | type_proto->set_location_id(FindOrAppend(&interned_locations_, obj->AsClass()->GetLocation())); |
| 710 | return class_ptr; |
| 711 | } |
| 712 | |
| 713 | // Fills `*object_proto` with all the references held by `*obj` (an object of type `*klass`). |
| 714 | void FillReferences(art::mirror::Object* obj, |
| 715 | art::mirror::Class* klass, |
| 716 | perfetto::protos::pbzero::HeapGraphObject* object_proto) |
| 717 | REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 718 | std::vector<std::pair<std::string, art::mirror::Object*>> referred_objects = |
| 719 | GetReferences(obj, klass); |
| 720 | |
| 721 | art::mirror::Object* min_nonnull_ptr = FilterIgnoredReferencesAndFindMin(referred_objects); |
| 722 | |
| 723 | uint64_t base_obj_id = EncodeBaseObjId(referred_objects, min_nonnull_ptr); |
| 724 | |
| 725 | const bool emit_field_ids = klass->GetClassFlags() != art::mirror::kClassFlagObjectArray && |
| 726 | klass->GetClassFlags() != art::mirror::kClassFlagNormal; |
| 727 | |
| 728 | for (const auto& p : referred_objects) { |
| 729 | const std::string& field_name = p.first; |
| 730 | art::mirror::Object* referred_obj = p.second; |
| 731 | if (emit_field_ids) { |
| 732 | reference_field_ids_->Append(FindOrAppend(&interned_fields_, field_name)); |
| 733 | } |
| 734 | uint64_t referred_obj_id = GetObjectId(referred_obj); |
| 735 | if (referred_obj_id) { |
| 736 | referred_obj_id -= base_obj_id; |
| 737 | } |
| 738 | reference_object_ids_->Append(referred_obj_id); |
| 739 | } |
| 740 | if (emit_field_ids) { |
| 741 | object_proto->set_reference_field_id(*reference_field_ids_); |
| 742 | reference_field_ids_->Reset(); |
| 743 | } |
| 744 | if (base_obj_id) { |
| 745 | // The field is called `reference_field_id_base`, but it has always been used as a base for |
| 746 | // `reference_object_id`. It should be called `reference_object_id_base`. |
| 747 | object_proto->set_reference_field_id_base(base_obj_id); |
| 748 | } |
| 749 | object_proto->set_reference_object_id(*reference_object_ids_); |
| 750 | reference_object_ids_->Reset(); |
| 751 | } |
| 752 | |
| 753 | // Iterates all the `referred_objects` and sets all the objects that are supposed to be ignored |
| 754 | // to nullptr. Returns the object with the smallest address (ignoring nullptr). |
| 755 | art::mirror::Object* FilterIgnoredReferencesAndFindMin( |
| 756 | std::vector<std::pair<std::string, art::mirror::Object*>>& referred_objects) const |
| 757 | REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 758 | art::mirror::Object* min_nonnull_ptr = nullptr; |
| 759 | for (auto& p : referred_objects) { |
| 760 | art::mirror::Object*& referred_obj = p.second; |
| 761 | if (referred_obj == nullptr) |
| 762 | continue; |
| 763 | if (IsIgnored(referred_obj)) { |
| 764 | referred_obj = nullptr; |
| 765 | continue; |
| 766 | } |
| 767 | if (min_nonnull_ptr == nullptr || min_nonnull_ptr > referred_obj) { |
| 768 | min_nonnull_ptr = referred_obj; |
| 769 | } |
| 770 | } |
| 771 | return min_nonnull_ptr; |
| 772 | } |
| 773 | |
| 774 | // Returns true if `*obj` has a type that's supposed to be ignored. |
| 775 | bool IsIgnored(art::mirror::Object* obj) const REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 776 | if (obj->IsClass()) { |
| 777 | return false; |
| 778 | } |
| 779 | art::mirror::Class* klass = obj->GetClass(); |
| 780 | return std::find(ignored_types_.begin(), ignored_types_.end(), PrettyType(klass)) != |
| 781 | ignored_types_.end(); |
| 782 | } |
| 783 | |
| 784 | // Name of classes whose instances should be ignored. |
| 785 | const std::vector<std::string> ignored_types_; |
| 786 | |
| 787 | // Make sure that intern ID 0 (default proto value for a uint64_t) always maps to "" |
| 788 | // (default proto value for a string) or to 0 (default proto value for a uint64). |
| 789 | |
| 790 | // Map from string (the field name) to its index in perfetto.protos.HeapGraph.field_names |
| 791 | std::map<std::string, uint64_t> interned_fields_{{"", 0}}; |
| 792 | // Map from string (the location name) to its index in perfetto.protos.HeapGraph.location_names |
| 793 | std::map<std::string, uint64_t> interned_locations_{{"", 0}}; |
| 794 | // Map from addr (the class pointer) to its id in perfetto.protos.HeapGraph.types |
| 795 | std::map<uintptr_t, uint64_t> interned_classes_{{0, 0}}; |
| 796 | |
| 797 | // Temporary buffers: used locally in some methods and then cleared. |
| 798 | std::unique_ptr<protozero::PackedVarInt> reference_field_ids_; |
| 799 | std::unique_ptr<protozero::PackedVarInt> reference_object_ids_; |
| 800 | |
| 801 | // Id of the previous object that was dumped. Used for delta encoding. |
| 802 | uint64_t prev_object_id_ = 0; |
| 803 | }; |
| 804 | |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 805 | void DumpPerfetto(art::Thread* self) { |
| 806 | pid_t parent_pid = getpid(); |
| 807 | LOG(INFO) << "preparing to dump heap for " << parent_pid; |
| 808 | |
| 809 | // Need to take a heap dump while GC isn't running. See the comment in |
| 810 | // Heap::VisitObjects(). Also we need the critical section to avoid visiting |
| 811 | // the same object twice. See b/34967844. |
| 812 | // |
| 813 | // We need to do this before the fork, because otherwise it can deadlock |
| 814 | // waiting for the GC, as all other threads get terminated by the clone, but |
| 815 | // their locks are not released. |
Florian Mayer | f3d1034 | 2021-03-19 16:26:58 +0000 | [diff] [blame] | 816 | // This does not perfectly solve all fork-related issues, as there could still be threads that |
| 817 | // are unaffected by ScopedSuspendAll and in a non-fork-friendly situation |
| 818 | // (e.g. inside a malloc holding a lock). This situation is quite rare, and in that case we will |
| 819 | // hit the watchdog in the grand-child process if it gets stuck. |
Florian Mayer | d1c8aae | 2021-03-16 17:22:14 +0000 | [diff] [blame] | 820 | std::optional<art::gc::ScopedGCCriticalSection> gcs(std::in_place, self, art::gc::kGcCauseHprof, |
| 821 | art::gc::kCollectorTypeHprof); |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 822 | |
Florian Mayer | d1c8aae | 2021-03-16 17:22:14 +0000 | [diff] [blame] | 823 | std::optional<art::ScopedSuspendAll> ssa(std::in_place, __FUNCTION__, /* long_suspend=*/ true); |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 824 | |
| 825 | pid_t pid = fork(); |
Florian Mayer | 6d41e57 | 2020-01-24 15:13:59 +0000 | [diff] [blame] | 826 | if (pid == -1) { |
| 827 | // Fork error. |
| 828 | PLOG(ERROR) << "fork"; |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 829 | return; |
| 830 | } |
Florian Mayer | 6d41e57 | 2020-01-24 15:13:59 +0000 | [diff] [blame] | 831 | if (pid != 0) { |
| 832 | // Parent |
Florian Mayer | d1c8aae | 2021-03-16 17:22:14 +0000 | [diff] [blame] | 833 | // Stop the thread suspension as soon as possible to allow the rest of the application to |
| 834 | // continue while we waitpid here. |
| 835 | ssa.reset(); |
| 836 | gcs.reset(); |
Florian Mayer | f3d1034 | 2021-03-19 16:26:58 +0000 | [diff] [blame] | 837 | for (size_t i = 0;; ++i) { |
| 838 | if (i == 1000) { |
| 839 | // The child hasn't exited for 1 second (and all it was supposed to do was fork itself). |
| 840 | // Give up and SIGKILL it. The next waitpid should succeed. |
| 841 | LOG(ERROR) << "perfetto_hprof child timed out. Sending SIGKILL."; |
| 842 | kill(pid, SIGKILL); |
| 843 | } |
| 844 | // Busy waiting here will introduce some extra latency, but that is okay because we have |
| 845 | // already unsuspended all other threads. This runs on the perfetto_hprof_listener, which |
| 846 | // is not needed for progress of the app itself. |
| 847 | int stat_loc; |
| 848 | pid_t wait_result = waitpid(pid, &stat_loc, WNOHANG); |
| 849 | if (wait_result == -1 && errno != EINTR) { |
| 850 | if (errno != ECHILD) { |
| 851 | // This hopefully never happens (should only be EINVAL). |
| 852 | PLOG(FATAL_WITHOUT_ABORT) << "waitpid"; |
| 853 | } |
| 854 | // If we get ECHILD, the parent process was handling SIGCHLD, or did a wildcard wait. |
| 855 | // The child is no longer here either way, so that's good enough for us. |
Florian Mayer | 6d41e57 | 2020-01-24 15:13:59 +0000 | [diff] [blame] | 856 | break; |
Florian Mayer | f3d1034 | 2021-03-19 16:26:58 +0000 | [diff] [blame] | 857 | } else if (wait_result > 0) { |
| 858 | break; |
| 859 | } else { // wait_result == 0 || errno == EINTR. |
| 860 | usleep(1000); |
Florian Mayer | 6d41e57 | 2020-01-24 15:13:59 +0000 | [diff] [blame] | 861 | } |
| 862 | } |
| 863 | return; |
| 864 | } |
| 865 | |
| 866 | // The following code is only executed by the child of the original process. |
Florian Mayer | 31d21c7 | 2020-11-27 13:35:34 +0000 | [diff] [blame] | 867 | |
| 868 | // Uninstall signal handler, so we don't trigger a profile on it. |
| 869 | if (sigaction(kJavaHeapprofdSignal, &g_orig_act, nullptr) != 0) { |
| 870 | close(g_signal_pipe_fds[0]); |
| 871 | close(g_signal_pipe_fds[1]); |
| 872 | PLOG(FATAL) << "Failed to sigaction"; |
| 873 | return; |
| 874 | } |
| 875 | |
Florian Mayer | 6d41e57 | 2020-01-24 15:13:59 +0000 | [diff] [blame] | 876 | // Daemon creates a new process that is the grand-child of the original process, and exits. |
| 877 | if (daemon(0, 0) == -1) { |
| 878 | PLOG(FATAL) << "daemon"; |
| 879 | } |
| 880 | |
| 881 | // The following code is only executed by the grand-child of the original process. |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 882 | |
| 883 | // Make sure that this is the first thing we do after forking, so if anything |
| 884 | // below hangs, the fork will go away from the watchdog. |
| 885 | ArmWatchdogOrDie(); |
| 886 | |
Florian Mayer | c99a231 | 2019-12-17 11:07:34 +0000 | [diff] [blame] | 887 | struct timespec ts = {}; |
| 888 | if (clock_gettime(CLOCK_BOOTTIME, &ts) != 0) { |
| 889 | LOG(FATAL) << "Failed to get boottime."; |
| 890 | } |
| 891 | uint64_t timestamp = ts.tv_sec * 1000000000LL + ts.tv_nsec; |
| 892 | |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 893 | WaitForDataSource(self); |
| 894 | |
| 895 | JavaHprofDataSource::Trace( |
Florian Mayer | c99a231 | 2019-12-17 11:07:34 +0000 | [diff] [blame] | 896 | [parent_pid, timestamp](JavaHprofDataSource::TraceContext ctx) |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 897 | NO_THREAD_SAFETY_ANALYSIS { |
Florian Mayer | 4b79ef4 | 2020-04-03 15:10:45 +0200 | [diff] [blame] | 898 | bool dump_smaps; |
Florian Mayer | 4075edd | 2020-09-24 15:26:23 +0100 | [diff] [blame] | 899 | std::vector<std::string> ignored_types; |
Florian Mayer | 4bbc62b | 2019-09-25 12:13:35 +0100 | [diff] [blame] | 900 | { |
| 901 | auto ds = ctx.GetDataSourceLocked(); |
| 902 | if (!ds || !ds->enabled()) { |
Florian Mayer | cb4fab1 | 2020-09-25 13:57:37 +0100 | [diff] [blame] | 903 | if (ds) ds->Finish(); |
Florian Mayer | 4bbc62b | 2019-09-25 12:13:35 +0100 | [diff] [blame] | 904 | LOG(INFO) << "skipping irrelevant data source."; |
| 905 | return; |
| 906 | } |
Florian Mayer | 4b79ef4 | 2020-04-03 15:10:45 +0200 | [diff] [blame] | 907 | dump_smaps = ds->dump_smaps(); |
Florian Mayer | 4075edd | 2020-09-24 15:26:23 +0100 | [diff] [blame] | 908 | ignored_types = ds->ignored_types(); |
Florian Mayer | 4bbc62b | 2019-09-25 12:13:35 +0100 | [diff] [blame] | 909 | } |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 910 | LOG(INFO) << "dumping heap for " << parent_pid; |
Florian Mayer | 4b79ef4 | 2020-04-03 15:10:45 +0200 | [diff] [blame] | 911 | if (dump_smaps) { |
| 912 | DumpSmaps(&ctx); |
| 913 | } |
Florian Mayer | c99a231 | 2019-12-17 11:07:34 +0000 | [diff] [blame] | 914 | Writer writer(parent_pid, &ctx, timestamp); |
Daniele Di Proietto | 0b7fd87 | 2021-10-13 17:25:33 +0100 | [diff] [blame^] | 915 | HeapGraphDumper dumper(ignored_types); |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 916 | |
Daniele Di Proietto | 0b7fd87 | 2021-10-13 17:25:33 +0100 | [diff] [blame^] | 917 | dumper.Dump(art::Runtime::Current(), writer); |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 918 | |
| 919 | writer.Finalize(); |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 920 | ctx.Flush([] { |
Daniele Di Proietto | 0b7fd87 | 2021-10-13 17:25:33 +0100 | [diff] [blame^] | 921 | art::MutexLock lk(JavaHprofDataSource::art_thread(), GetStateMutex()); |
| 922 | g_state = State::kEnd; |
| 923 | GetStateCV().Broadcast(JavaHprofDataSource::art_thread()); |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 924 | }); |
Florian Mayer | cb4fab1 | 2020-09-25 13:57:37 +0100 | [diff] [blame] | 925 | // Wait for the Flush that will happen on the Perfetto thread. |
| 926 | { |
| 927 | art::MutexLock lk(JavaHprofDataSource::art_thread(), GetStateMutex()); |
| 928 | while (g_state != State::kEnd) { |
| 929 | GetStateCV().Wait(JavaHprofDataSource::art_thread()); |
| 930 | } |
| 931 | } |
| 932 | { |
| 933 | auto ds = ctx.GetDataSourceLocked(); |
| 934 | if (ds) { |
| 935 | ds->Finish(); |
| 936 | } else { |
| 937 | LOG(ERROR) << "datasource timed out (duration_ms + datasource_stop_timeout_ms) " |
| 938 | "before dump finished"; |
| 939 | } |
| 940 | } |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 941 | }); |
| 942 | |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 943 | LOG(INFO) << "finished dumping heap for " << parent_pid; |
Roland Levillain | bdf1737 | 2021-05-06 00:19:19 +0100 | [diff] [blame] | 944 | // Prevent the `atexit` handlers from running. We do not want to call cleanup |
| 945 | // functions the parent process has registered. |
| 946 | art::FastExit(0); |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 947 | } |
| 948 | |
| 949 | // The plugin initialization function. |
| 950 | extern "C" bool ArtPlugin_Initialize() { |
| 951 | if (art::Runtime::Current() == nullptr) { |
| 952 | return false; |
| 953 | } |
| 954 | art::Thread* self = art::Thread::Current(); |
| 955 | { |
| 956 | art::MutexLock lk(self, GetStateMutex()); |
| 957 | if (g_state != State::kUninitialized) { |
| 958 | LOG(ERROR) << "perfetto_hprof already initialized. state: " << g_state; |
| 959 | return false; |
| 960 | } |
| 961 | g_state = State::kWaitForListener; |
| 962 | } |
| 963 | |
Nick Kralevich | 20d57d1 | 2020-01-31 12:54:35 -0800 | [diff] [blame] | 964 | if (pipe2(g_signal_pipe_fds, O_CLOEXEC) == -1) { |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 965 | PLOG(ERROR) << "Failed to pipe"; |
| 966 | return false; |
| 967 | } |
| 968 | |
| 969 | struct sigaction act = {}; |
Florian Mayer | 516745b | 2020-01-27 14:29:57 +0000 | [diff] [blame] | 970 | act.sa_flags = SA_SIGINFO | SA_RESTART; |
Florian Mayer | 74f1b07 | 2020-11-30 18:08:30 +0000 | [diff] [blame] | 971 | act.sa_sigaction = [](int, siginfo_t* si, void*) { |
| 972 | requested_tracing_session_id = si->si_value.sival_int; |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 973 | if (write(g_signal_pipe_fds[1], kByte, sizeof(kByte)) == -1) { |
| 974 | PLOG(ERROR) << "Failed to trigger heap dump"; |
| 975 | } |
| 976 | }; |
| 977 | |
| 978 | // TODO(fmayer): We can probably use the SignalCatcher thread here to not |
| 979 | // have an idle thread. |
| 980 | if (sigaction(kJavaHeapprofdSignal, &act, &g_orig_act) != 0) { |
| 981 | close(g_signal_pipe_fds[0]); |
| 982 | close(g_signal_pipe_fds[1]); |
| 983 | PLOG(ERROR) << "Failed to sigaction"; |
| 984 | return false; |
| 985 | } |
| 986 | |
| 987 | std::thread th([] { |
| 988 | art::Runtime* runtime = art::Runtime::Current(); |
| 989 | if (!runtime) { |
Florian Mayer | 516745b | 2020-01-27 14:29:57 +0000 | [diff] [blame] | 990 | LOG(FATAL_WITHOUT_ABORT) << "no runtime in perfetto_hprof_listener"; |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 991 | return; |
| 992 | } |
Florian Mayer | 516745b | 2020-01-27 14:29:57 +0000 | [diff] [blame] | 993 | if (!runtime->AttachCurrentThread("perfetto_hprof_listener", /*as_daemon=*/ true, |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 994 | runtime->GetSystemThreadGroup(), /*create_peer=*/ false)) { |
| 995 | LOG(ERROR) << "failed to attach thread."; |
Florian Mayer | fa082fb | 2020-05-15 14:07:53 +0200 | [diff] [blame] | 996 | { |
| 997 | art::MutexLock lk(nullptr, GetStateMutex()); |
| 998 | g_state = State::kUninitialized; |
| 999 | GetStateCV().Broadcast(nullptr); |
| 1000 | } |
| 1001 | |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 1002 | return; |
| 1003 | } |
| 1004 | art::Thread* self = art::Thread::Current(); |
| 1005 | if (!self) { |
Florian Mayer | 516745b | 2020-01-27 14:29:57 +0000 | [diff] [blame] | 1006 | LOG(FATAL_WITHOUT_ABORT) << "no thread in perfetto_hprof_listener"; |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 1007 | return; |
| 1008 | } |
| 1009 | { |
| 1010 | art::MutexLock lk(self, GetStateMutex()); |
| 1011 | if (g_state == State::kWaitForListener) { |
| 1012 | g_state = State::kWaitForStart; |
| 1013 | GetStateCV().Broadcast(self); |
| 1014 | } |
| 1015 | } |
| 1016 | char buf[1]; |
| 1017 | for (;;) { |
| 1018 | int res; |
| 1019 | do { |
| 1020 | res = read(g_signal_pipe_fds[0], buf, sizeof(buf)); |
| 1021 | } while (res == -1 && errno == EINTR); |
| 1022 | |
| 1023 | if (res <= 0) { |
| 1024 | if (res == -1) { |
| 1025 | PLOG(ERROR) << "failed to read"; |
| 1026 | } |
| 1027 | close(g_signal_pipe_fds[0]); |
| 1028 | return; |
| 1029 | } |
| 1030 | |
| 1031 | perfetto_hprof::DumpPerfetto(self); |
| 1032 | } |
| 1033 | }); |
| 1034 | th.detach(); |
| 1035 | |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 1036 | return true; |
| 1037 | } |
| 1038 | |
| 1039 | extern "C" bool ArtPlugin_Deinitialize() { |
| 1040 | if (sigaction(kJavaHeapprofdSignal, &g_orig_act, nullptr) != 0) { |
| 1041 | PLOG(ERROR) << "failed to reset signal handler"; |
| 1042 | // We cannot close the pipe if the signal handler wasn't unregistered, |
| 1043 | // to avoid receiving SIGPIPE. |
| 1044 | return false; |
| 1045 | } |
| 1046 | close(g_signal_pipe_fds[1]); |
| 1047 | |
| 1048 | art::Thread* self = art::Thread::Current(); |
| 1049 | art::MutexLock lk(self, GetStateMutex()); |
Florian Mayer | fa082fb | 2020-05-15 14:07:53 +0200 | [diff] [blame] | 1050 | // Wait until after the thread was registered to the runtime. This is so |
| 1051 | // we do not attempt to register it with the runtime after it had been torn |
| 1052 | // down (ArtPlugin_Deinitialize gets called in the Runtime dtor). |
| 1053 | while (g_state == State::kWaitForListener) { |
| 1054 | GetStateCV().Wait(art::Thread::Current()); |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 1055 | } |
Florian Mayer | fa082fb | 2020-05-15 14:07:53 +0200 | [diff] [blame] | 1056 | g_state = State::kUninitialized; |
| 1057 | GetStateCV().Broadcast(self); |
Florian Mayer | 07710c5 | 2019-09-16 15:53:38 +0000 | [diff] [blame] | 1058 | return true; |
| 1059 | } |
| 1060 | |
| 1061 | } // namespace perfetto_hprof |
| 1062 | |
| 1063 | namespace perfetto { |
| 1064 | |
| 1065 | PERFETTO_DEFINE_DATA_SOURCE_STATIC_MEMBERS(perfetto_hprof::JavaHprofDataSource); |
| 1066 | |
| 1067 | } |