Elliott Hughes | 872d4ec | 2011-10-21 17:07:15 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2008 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "debugger.h" |
| 18 | |
Elliott Hughes | 3bb8156 | 2011-10-21 18:52:59 -0700 | [diff] [blame] | 19 | #include <sys/uio.h> |
| 20 | |
Alex Light | 8c2b929 | 2017-11-09 13:21:01 -0800 | [diff] [blame] | 21 | #include <functional> |
Andreas Gampe | f774a4e | 2017-07-06 22:15:18 -0700 | [diff] [blame] | 22 | #include <memory> |
Elliott Hughes | 545a064 | 2011-11-08 19:10:03 -0800 | [diff] [blame] | 23 | #include <set> |
Andreas Gampe | f774a4e | 2017-07-06 22:15:18 -0700 | [diff] [blame] | 24 | #include <vector> |
Elliott Hughes | 545a064 | 2011-11-08 19:10:03 -0800 | [diff] [blame] | 25 | |
Alex Light | b7c640d | 2019-03-20 15:52:13 -0700 | [diff] [blame] | 26 | #include "android-base/macros.h" |
Andreas Gampe | 46ee31b | 2016-12-14 10:11:49 -0800 | [diff] [blame] | 27 | #include "android-base/stringprintf.h" |
| 28 | |
Ian Rogers | 166db04 | 2013-07-26 12:05:57 -0700 | [diff] [blame] | 29 | #include "arch/context.h" |
Mathieu Chartier | c785344 | 2015-03-27 14:35:38 -0700 | [diff] [blame] | 30 | #include "art_field-inl.h" |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 31 | #include "art_method-inl.h" |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 32 | #include "base/endian_utils.h" |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 33 | #include "base/enums.h" |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 34 | #include "base/logging.h" |
Alex Light | b7c640d | 2019-03-20 15:52:13 -0700 | [diff] [blame] | 35 | #include "base/memory_tool.h" |
David Sehr | 67bf42e | 2018-02-26 16:43:04 -0800 | [diff] [blame] | 36 | #include "base/safe_map.h" |
Andreas Gampe | f774a4e | 2017-07-06 22:15:18 -0700 | [diff] [blame] | 37 | #include "base/strlcpy.h" |
Vladimir Marko | 80afd02 | 2015-05-19 18:08:00 +0100 | [diff] [blame] | 38 | #include "base/time_utils.h" |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 39 | #include "class_linker-inl.h" |
Steven Moreland | e431e27 | 2017-07-18 16:53:49 -0700 | [diff] [blame] | 40 | #include "class_linker.h" |
David Sehr | b2ec9f5 | 2018-02-21 13:20:31 -0800 | [diff] [blame] | 41 | #include "dex/descriptors_names.h" |
David Sehr | 9e734c7 | 2018-01-04 17:56:19 -0800 | [diff] [blame] | 42 | #include "dex/dex_file-inl.h" |
| 43 | #include "dex/dex_file_annotations.h" |
| 44 | #include "dex/dex_file_types.h" |
| 45 | #include "dex/dex_instruction.h" |
David Sehr | 0225f8e | 2018-01-31 08:52:24 +0000 | [diff] [blame] | 46 | #include "dex/utf.h" |
Mingyao Yang | 6ea1a0e | 2016-01-29 12:12:49 -0800 | [diff] [blame] | 47 | #include "entrypoints/runtime_asm_entrypoints.h" |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 48 | #include "gc/accounting/card_table-inl.h" |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 49 | #include "gc/allocation_record.h" |
Andreas Gampe | 94c589d | 2017-12-27 12:43:01 -0800 | [diff] [blame] | 50 | #include "gc/gc_cause.h" |
Mathieu Chartier | aa51682 | 2015-10-02 15:53:37 -0700 | [diff] [blame] | 51 | #include "gc/scoped_gc_critical_section.h" |
Andreas Gampe | 0c18338 | 2017-07-13 22:26:24 -0700 | [diff] [blame] | 52 | #include "gc/space/bump_pointer_space-walk-inl.h" |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 53 | #include "gc/space/large_object_space.h" |
| 54 | #include "gc/space/space-inl.h" |
Andreas Gampe | 513061a | 2017-06-01 09:17:34 -0700 | [diff] [blame] | 55 | #include "handle_scope-inl.h" |
Alex Light | b7c640d | 2019-03-20 15:52:13 -0700 | [diff] [blame] | 56 | #include "instrumentation.h" |
Vladimir Marko | a3ad0cd | 2018-05-04 10:06:38 +0100 | [diff] [blame] | 57 | #include "jni/jni_internal.h" |
Mathieu Chartier | 28bd2e4 | 2016-10-04 13:54:57 -0700 | [diff] [blame] | 58 | #include "jvalue-inl.h" |
Andreas Gampe | 8e0f043 | 2018-10-24 13:38:03 -0700 | [diff] [blame] | 59 | #include "mirror/array-alloc-inl.h" |
Andreas Gampe | 70f5fd0 | 2018-10-24 19:58:37 -0700 | [diff] [blame] | 60 | #include "mirror/class-alloc-inl.h" |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 61 | #include "mirror/class-inl.h" |
Steven Moreland | e431e27 | 2017-07-18 16:53:49 -0700 | [diff] [blame] | 62 | #include "mirror/class.h" |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 63 | #include "mirror/class_loader.h" |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 64 | #include "mirror/object-inl.h" |
| 65 | #include "mirror/object_array-inl.h" |
Andreas Gampe | fd63bbf | 2018-10-29 12:55:35 -0700 | [diff] [blame] | 66 | #include "mirror/string-alloc-inl.h" |
Ian Rogers | b0fa5dc | 2014-04-28 16:47:08 -0700 | [diff] [blame] | 67 | #include "mirror/string-inl.h" |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 68 | #include "mirror/throwable.h" |
Andreas Gampe | 373a9b5 | 2017-10-18 09:01:57 -0700 | [diff] [blame] | 69 | #include "nativehelper/scoped_local_ref.h" |
| 70 | #include "nativehelper/scoped_primitive_array.h" |
Nicolas Geoffray | 58cc1cb | 2017-11-20 13:27:29 +0000 | [diff] [blame] | 71 | #include "oat_file.h" |
Mathieu Chartier | 3398c78 | 2016-09-30 10:27:43 -0700 | [diff] [blame] | 72 | #include "obj_ptr-inl.h" |
Ian Rogers | 53b8b09 | 2014-03-13 23:45:53 -0700 | [diff] [blame] | 73 | #include "reflection.h" |
Alex Light | 55eccdf | 2019-10-07 13:51:13 +0000 | [diff] [blame] | 74 | #include "reflective_handle.h" |
| 75 | #include "reflective_handle_scope-inl.h" |
David Srbecky | 28f6cff | 2018-10-16 15:07:28 +0100 | [diff] [blame] | 76 | #include "runtime-inl.h" |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 77 | #include "runtime_callbacks.h" |
Mathieu Chartier | 0795f23 | 2016-09-27 18:43:30 -0700 | [diff] [blame] | 78 | #include "scoped_thread_state_change-inl.h" |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 79 | #include "scoped_thread_state_change.h" |
Andreas Gampe | 513061a | 2017-06-01 09:17:34 -0700 | [diff] [blame] | 80 | #include "stack.h" |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 81 | #include "thread.h" |
Elliott Hughes | 475fc23 | 2011-10-25 15:00:35 -0700 | [diff] [blame] | 82 | #include "thread_list.h" |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 83 | #include "thread_pool.h" |
Elliott Hughes | eac7667 | 2012-05-24 21:56:51 -0700 | [diff] [blame] | 84 | #include "well_known_classes.h" |
Elliott Hughes | 475fc23 | 2011-10-25 15:00:35 -0700 | [diff] [blame] | 85 | |
Elliott Hughes | 872d4ec | 2011-10-21 17:07:15 -0700 | [diff] [blame] | 86 | namespace art { |
| 87 | |
Andreas Gampe | 46ee31b | 2016-12-14 10:11:49 -0800 | [diff] [blame] | 88 | using android::base::StringPrintf; |
| 89 | |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 90 | // Limit alloc_record_count to the 2BE value (64k-1) that is the limit of the current protocol. |
Brian Carlstrom | 306db81 | 2014-09-05 13:01:41 -0700 | [diff] [blame] | 91 | static uint16_t CappedAllocRecordCount(size_t alloc_record_count) { |
Man Cao | 1ed11b9 | 2015-06-11 22:47:35 -0700 | [diff] [blame] | 92 | const size_t cap = 0xffff; |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 93 | if (alloc_record_count > cap) { |
| 94 | return cap; |
Brian Carlstrom | 306db81 | 2014-09-05 13:01:41 -0700 | [diff] [blame] | 95 | } |
| 96 | return alloc_record_count; |
| 97 | } |
Elliott Hughes | 475fc23 | 2011-10-25 15:00:35 -0700 | [diff] [blame] | 98 | |
Elliott Hughes | 4ffd313 | 2011-10-24 12:06:42 -0700 | [diff] [blame] | 99 | // JDWP is allowed unless the Zygote forbids it. |
| 100 | static bool gJdwpAllowed = true; |
| 101 | |
Elliott Hughes | 47fce01 | 2011-10-25 18:37:19 -0700 | [diff] [blame] | 102 | static bool gDdmThreadNotification = false; |
| 103 | |
Elliott Hughes | 767a147 | 2011-10-26 18:49:02 -0700 | [diff] [blame] | 104 | // DDMS GC-related settings. |
| 105 | static Dbg::HpifWhen gDdmHpifWhen = Dbg::HPIF_WHEN_NEVER; |
| 106 | static Dbg::HpsgWhen gDdmHpsgWhen = Dbg::HPSG_WHEN_NEVER; |
| 107 | static Dbg::HpsgWhat gDdmHpsgWhat; |
| 108 | static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER; |
| 109 | static Dbg::HpsgWhat gDdmNhsgWhat; |
| 110 | |
Andreas Gampe | 04bbb5b | 2017-01-19 17:49:03 +0000 | [diff] [blame] | 111 | Dbg::DbgThreadLifecycleCallback Dbg::thread_lifecycle_callback_; |
Elliott Hughes | 872d4ec | 2011-10-21 17:07:15 -0700 | [diff] [blame] | 112 | |
Elliott Hughes | 767a147 | 2011-10-26 18:49:02 -0700 | [diff] [blame] | 113 | void Dbg::GcDidFinish() { |
| 114 | if (gDdmHpifWhen != HPIF_WHEN_NEVER) { |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 115 | ScopedObjectAccess soa(Thread::Current()); |
Brian Carlstrom | 4d466a8 | 2014-05-08 19:05:29 -0700 | [diff] [blame] | 116 | VLOG(jdwp) << "Sending heap info to DDM"; |
Elliott Hughes | 7162ad9 | 2011-10-27 14:08:42 -0700 | [diff] [blame] | 117 | DdmSendHeapInfo(gDdmHpifWhen); |
Elliott Hughes | 767a147 | 2011-10-26 18:49:02 -0700 | [diff] [blame] | 118 | } |
| 119 | if (gDdmHpsgWhen != HPSG_WHEN_NEVER) { |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 120 | ScopedObjectAccess soa(Thread::Current()); |
Brian Carlstrom | 4d466a8 | 2014-05-08 19:05:29 -0700 | [diff] [blame] | 121 | VLOG(jdwp) << "Dumping heap to DDM"; |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 122 | DdmSendHeapSegments(false); |
Elliott Hughes | 767a147 | 2011-10-26 18:49:02 -0700 | [diff] [blame] | 123 | } |
| 124 | if (gDdmNhsgWhen != HPSG_WHEN_NEVER) { |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 125 | ScopedObjectAccess soa(Thread::Current()); |
Brian Carlstrom | 4d466a8 | 2014-05-08 19:05:29 -0700 | [diff] [blame] | 126 | VLOG(jdwp) << "Dumping native heap to DDM"; |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 127 | DdmSendHeapSegments(true); |
Elliott Hughes | 767a147 | 2011-10-26 18:49:02 -0700 | [diff] [blame] | 128 | } |
| 129 | } |
| 130 | |
Elliott Hughes | 4ffd313 | 2011-10-24 12:06:42 -0700 | [diff] [blame] | 131 | void Dbg::SetJdwpAllowed(bool allowed) { |
| 132 | gJdwpAllowed = allowed; |
| 133 | } |
| 134 | |
Leonard Mosescu | eb84221 | 2016-10-06 17:26:36 -0700 | [diff] [blame] | 135 | bool Dbg::IsJdwpAllowed() { |
| 136 | return gJdwpAllowed; |
| 137 | } |
| 138 | |
Sebastien Hertz | 520633b | 2015-09-08 17:03:36 +0200 | [diff] [blame] | 139 | // Do we need to deoptimize the stack to handle an exception? |
| 140 | bool Dbg::IsForcedInterpreterNeededForExceptionImpl(Thread* thread) { |
Sebastien Hertz | 520633b | 2015-09-08 17:03:36 +0200 | [diff] [blame] | 141 | // Deoptimization is required if at least one method in the stack needs it. However we |
| 142 | // skip frames that will be unwound (thus not executed). |
Andreas Gampe | c7d878d | 2018-11-19 18:42:06 +0000 | [diff] [blame] | 143 | bool needs_deoptimization = false; |
| 144 | StackVisitor::WalkStack( |
| 145 | [&](art::StackVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) { |
| 146 | // The visitor is meant to be used when handling exception from compiled code only. |
| 147 | CHECK(!visitor->IsShadowFrame()) << "We only expect to visit compiled frame: " |
| 148 | << ArtMethod::PrettyMethod(visitor->GetMethod()); |
| 149 | ArtMethod* method = visitor->GetMethod(); |
| 150 | if (method == nullptr) { |
| 151 | // We reach an upcall and don't need to deoptimize this part of the stack (ManagedFragment) |
| 152 | // so we can stop the visit. |
| 153 | DCHECK(!needs_deoptimization); |
| 154 | return false; |
| 155 | } |
| 156 | if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) { |
| 157 | // We found a compiled frame in the stack but instrumentation is set to interpret |
| 158 | // everything: we need to deoptimize. |
| 159 | needs_deoptimization = true; |
| 160 | return false; |
| 161 | } |
| 162 | if (Runtime::Current()->GetInstrumentation()->IsDeoptimized(method)) { |
| 163 | // We found a deoptimized method in the stack. |
| 164 | needs_deoptimization = true; |
| 165 | return false; |
| 166 | } |
| 167 | ShadowFrame* frame = visitor->GetThread()->FindDebuggerShadowFrame(visitor->GetFrameId()); |
| 168 | if (frame != nullptr) { |
| 169 | // The debugger allocated a ShadowFrame to update a variable in the stack: we need to |
| 170 | // deoptimize the stack to execute (and deallocate) this frame. |
| 171 | needs_deoptimization = true; |
| 172 | return false; |
| 173 | } |
| 174 | return true; |
| 175 | }, |
| 176 | thread, |
| 177 | /* context= */ nullptr, |
| 178 | art::StackVisitor::StackWalkKind::kIncludeInlinedFrames, |
| 179 | /* check_suspended */ true, |
| 180 | /* include_transitions */ true); |
| 181 | return needs_deoptimization; |
Sebastien Hertz | 520633b | 2015-09-08 17:03:36 +0200 | [diff] [blame] | 182 | } |
| 183 | |
Elliott Hughes | 872d4ec | 2011-10-21 17:07:15 -0700 | [diff] [blame] | 184 | |
Alex Light | 8c2b929 | 2017-11-09 13:21:01 -0800 | [diff] [blame] | 185 | bool Dbg::DdmHandleChunk(JNIEnv* env, |
| 186 | uint32_t type, |
| 187 | const ArrayRef<const jbyte>& data, |
| 188 | /*out*/uint32_t* out_type, |
| 189 | /*out*/std::vector<uint8_t>* out_data) { |
| 190 | ScopedLocalRef<jbyteArray> dataArray(env, env->NewByteArray(data.size())); |
Ian Rogers | c0542af | 2014-09-03 16:16:56 -0700 | [diff] [blame] | 191 | if (dataArray.get() == nullptr) { |
Alex Light | 8c2b929 | 2017-11-09 13:21:01 -0800 | [diff] [blame] | 192 | LOG(WARNING) << "byte[] allocation failed: " << data.size(); |
Elliott Hughes | f6a1e1e | 2011-10-25 16:28:04 -0700 | [diff] [blame] | 193 | env->ExceptionClear(); |
| 194 | return false; |
| 195 | } |
Alex Light | 8c2b929 | 2017-11-09 13:21:01 -0800 | [diff] [blame] | 196 | env->SetByteArrayRegion(dataArray.get(), |
| 197 | 0, |
| 198 | data.size(), |
| 199 | reinterpret_cast<const jbyte*>(data.data())); |
Elliott Hughes | f6a1e1e | 2011-10-25 16:28:04 -0700 | [diff] [blame] | 200 | // Call "private static Chunk dispatch(int type, byte[] data, int offset, int length)". |
Alex Light | 8c2b929 | 2017-11-09 13:21:01 -0800 | [diff] [blame] | 201 | ScopedLocalRef<jobject> chunk( |
| 202 | env, |
| 203 | env->CallStaticObjectMethod( |
| 204 | WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer, |
| 205 | WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch, |
| 206 | type, dataArray.get(), 0, data.size())); |
Elliott Hughes | f6a1e1e | 2011-10-25 16:28:04 -0700 | [diff] [blame] | 207 | if (env->ExceptionCheck()) { |
Alex Light | 1eeefa6 | 2018-03-19 13:47:56 -0700 | [diff] [blame] | 208 | Thread* self = Thread::Current(); |
| 209 | ScopedObjectAccess soa(self); |
| 210 | LOG(INFO) << StringPrintf("Exception thrown by dispatcher for 0x%08x", type) << std::endl |
| 211 | << self->GetException()->Dump(); |
| 212 | self->ClearException(); |
Elliott Hughes | f6a1e1e | 2011-10-25 16:28:04 -0700 | [diff] [blame] | 213 | return false; |
| 214 | } |
| 215 | |
Ian Rogers | c0542af | 2014-09-03 16:16:56 -0700 | [diff] [blame] | 216 | if (chunk.get() == nullptr) { |
Elliott Hughes | f6a1e1e | 2011-10-25 16:28:04 -0700 | [diff] [blame] | 217 | return false; |
| 218 | } |
| 219 | |
| 220 | /* |
| 221 | * Pull the pieces out of the chunk. We copy the results into a |
| 222 | * newly-allocated buffer that the caller can free. We don't want to |
| 223 | * continue using the Chunk object because nothing has a reference to it. |
| 224 | * |
| 225 | * We could avoid this by returning type/data/offset/length and having |
| 226 | * the caller be aware of the object lifetime issues, but that |
Elliott Hughes | 81ff318 | 2012-03-23 20:35:56 -0700 | [diff] [blame] | 227 | * integrates the JDWP code more tightly into the rest of the runtime, and doesn't work |
Elliott Hughes | f6a1e1e | 2011-10-25 16:28:04 -0700 | [diff] [blame] | 228 | * if we have responses for multiple chunks. |
| 229 | * |
| 230 | * So we're pretty much stuck with copying data around multiple times. |
| 231 | */ |
Alex Light | 8c2b929 | 2017-11-09 13:21:01 -0800 | [diff] [blame] | 232 | ScopedLocalRef<jbyteArray> replyData( |
| 233 | env, |
| 234 | reinterpret_cast<jbyteArray>( |
| 235 | env->GetObjectField( |
| 236 | chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data))); |
| 237 | jint offset = env->GetIntField(chunk.get(), |
| 238 | WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset); |
| 239 | jint length = env->GetIntField(chunk.get(), |
| 240 | WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length); |
| 241 | *out_type = env->GetIntField(chunk.get(), |
| 242 | WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type); |
Elliott Hughes | f6a1e1e | 2011-10-25 16:28:04 -0700 | [diff] [blame] | 243 | |
Alex Light | 8c2b929 | 2017-11-09 13:21:01 -0800 | [diff] [blame] | 244 | VLOG(jdwp) << StringPrintf("DDM reply: type=0x%08x data=%p offset=%d length=%d", |
| 245 | type, |
| 246 | replyData.get(), |
| 247 | offset, |
| 248 | length); |
Alex Light | 8c2b929 | 2017-11-09 13:21:01 -0800 | [diff] [blame] | 249 | out_data->resize(length); |
| 250 | env->GetByteArrayRegion(replyData.get(), |
| 251 | offset, |
| 252 | length, |
| 253 | reinterpret_cast<jbyte*>(out_data->data())); |
Alex Light | 6f2a634 | 2017-12-12 09:55:05 -0800 | [diff] [blame] | 254 | |
Alex Light | e5463a8 | 2017-12-12 13:33:28 -0800 | [diff] [blame] | 255 | if (env->ExceptionCheck()) { |
Alex Light | 1eeefa6 | 2018-03-19 13:47:56 -0700 | [diff] [blame] | 256 | Thread* self = Thread::Current(); |
| 257 | ScopedObjectAccess soa(self); |
Alex Light | e5463a8 | 2017-12-12 13:33:28 -0800 | [diff] [blame] | 258 | LOG(INFO) << StringPrintf("Exception thrown when reading response data from dispatcher 0x%08x", |
Alex Light | 1eeefa6 | 2018-03-19 13:47:56 -0700 | [diff] [blame] | 259 | type) << std::endl << self->GetException()->Dump(); |
| 260 | self->ClearException(); |
Alex Light | e5463a8 | 2017-12-12 13:33:28 -0800 | [diff] [blame] | 261 | return false; |
| 262 | } |
| 263 | |
Alex Light | 8c2b929 | 2017-11-09 13:21:01 -0800 | [diff] [blame] | 264 | return true; |
| 265 | } |
| 266 | |
Elliott Hughes | a215526 | 2011-11-16 16:26:58 -0800 | [diff] [blame] | 267 | void Dbg::DdmBroadcast(bool connect) { |
Elliott Hughes | 4dd9b4d | 2011-12-12 18:29:24 -0800 | [diff] [blame] | 268 | VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "..."; |
Elliott Hughes | 47fce01 | 2011-10-25 18:37:19 -0700 | [diff] [blame] | 269 | |
| 270 | Thread* self = Thread::Current(); |
Ian Rogers | 50b35e2 | 2012-10-04 10:09:15 -0700 | [diff] [blame] | 271 | if (self->GetState() != kRunnable) { |
| 272 | LOG(ERROR) << "DDM broadcast in thread state " << self->GetState(); |
| 273 | /* try anyway? */ |
Elliott Hughes | 47fce01 | 2011-10-25 18:37:19 -0700 | [diff] [blame] | 274 | } |
| 275 | |
| 276 | JNIEnv* env = self->GetJniEnv(); |
Elliott Hughes | 47fce01 | 2011-10-25 18:37:19 -0700 | [diff] [blame] | 277 | jint event = connect ? 1 /*DdmServer.CONNECTED*/ : 2 /*DdmServer.DISCONNECTED*/; |
Elliott Hughes | eac7667 | 2012-05-24 21:56:51 -0700 | [diff] [blame] | 278 | env->CallStaticVoidMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer, |
| 279 | WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast, |
| 280 | event); |
Elliott Hughes | 47fce01 | 2011-10-25 18:37:19 -0700 | [diff] [blame] | 281 | if (env->ExceptionCheck()) { |
| 282 | LOG(ERROR) << "DdmServer.broadcast " << event << " failed"; |
| 283 | env->ExceptionDescribe(); |
| 284 | env->ExceptionClear(); |
| 285 | } |
| 286 | } |
| 287 | |
Elliott Hughes | 872d4ec | 2011-10-21 17:07:15 -0700 | [diff] [blame] | 288 | void Dbg::DdmConnected() { |
Elliott Hughes | a215526 | 2011-11-16 16:26:58 -0800 | [diff] [blame] | 289 | Dbg::DdmBroadcast(true); |
Elliott Hughes | 872d4ec | 2011-10-21 17:07:15 -0700 | [diff] [blame] | 290 | } |
| 291 | |
| 292 | void Dbg::DdmDisconnected() { |
Elliott Hughes | a215526 | 2011-11-16 16:26:58 -0800 | [diff] [blame] | 293 | Dbg::DdmBroadcast(false); |
Elliott Hughes | 47fce01 | 2011-10-25 18:37:19 -0700 | [diff] [blame] | 294 | gDdmThreadNotification = false; |
| 295 | } |
| 296 | |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 297 | |
Elliott Hughes | 47fce01 | 2011-10-25 18:37:19 -0700 | [diff] [blame] | 298 | /* |
Elliott Hughes | 8218847 | 2011-11-07 18:11:48 -0800 | [diff] [blame] | 299 | * Send a notification when a thread starts, stops, or changes its name. |
Elliott Hughes | 47fce01 | 2011-10-25 18:37:19 -0700 | [diff] [blame] | 300 | * |
| 301 | * Because we broadcast the full set of threads when the notifications are |
| 302 | * first enabled, it's possible for "thread" to be actively executing. |
| 303 | */ |
Elliott Hughes | 8218847 | 2011-11-07 18:11:48 -0800 | [diff] [blame] | 304 | void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) { |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 305 | Locks::mutator_lock_->AssertNotExclusiveHeld(Thread::Current()); |
Elliott Hughes | 47fce01 | 2011-10-25 18:37:19 -0700 | [diff] [blame] | 306 | if (!gDdmThreadNotification) { |
| 307 | return; |
| 308 | } |
| 309 | |
Alex Light | 772099a | 2017-11-21 14:05:04 -0800 | [diff] [blame] | 310 | RuntimeCallbacks* cb = Runtime::Current()->GetRuntimeCallbacks(); |
Elliott Hughes | 8218847 | 2011-11-07 18:11:48 -0800 | [diff] [blame] | 311 | if (type == CHUNK_TYPE("THDE")) { |
Elliott Hughes | 47fce01 | 2011-10-25 18:37:19 -0700 | [diff] [blame] | 312 | uint8_t buf[4]; |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 313 | Set4BE(&buf[0], t->GetThreadId()); |
Alex Light | 772099a | 2017-11-21 14:05:04 -0800 | [diff] [blame] | 314 | cb->DdmPublishChunk(CHUNK_TYPE("THDE"), ArrayRef<const uint8_t>(buf)); |
Elliott Hughes | 8218847 | 2011-11-07 18:11:48 -0800 | [diff] [blame] | 315 | } else { |
| 316 | CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type; |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 317 | StackHandleScope<1> hs(Thread::Current()); |
Andreas Gampe | 08883de | 2016-11-08 13:20:52 -0800 | [diff] [blame] | 318 | Handle<mirror::String> name(hs.NewHandle(t->GetThreadName())); |
Andreas Gampe | fa4333d | 2017-02-14 11:10:34 -0800 | [diff] [blame] | 319 | size_t char_count = (name != nullptr) ? name->GetLength() : 0; |
| 320 | const jchar* chars = (name != nullptr) ? name->GetValue() : nullptr; |
| 321 | bool is_compressed = (name != nullptr) ? name->IsCompressed() : false; |
Elliott Hughes | 8218847 | 2011-11-07 18:11:48 -0800 | [diff] [blame] | 322 | |
Elliott Hughes | 21f32d7 | 2011-11-09 17:44:13 -0800 | [diff] [blame] | 323 | std::vector<uint8_t> bytes; |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 324 | Append4BE(bytes, t->GetThreadId()); |
jessicahandojo | 3aaa37b | 2016-07-29 14:46:37 -0700 | [diff] [blame] | 325 | if (is_compressed) { |
| 326 | const uint8_t* chars_compressed = name->GetValueCompressed(); |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 327 | AppendUtf16CompressedBE(bytes, chars_compressed, char_count); |
jessicahandojo | 3aaa37b | 2016-07-29 14:46:37 -0700 | [diff] [blame] | 328 | } else { |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 329 | AppendUtf16BE(bytes, chars, char_count); |
jessicahandojo | 3aaa37b | 2016-07-29 14:46:37 -0700 | [diff] [blame] | 330 | } |
Elliott Hughes | 21f32d7 | 2011-11-09 17:44:13 -0800 | [diff] [blame] | 331 | CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2); |
Alex Light | 772099a | 2017-11-21 14:05:04 -0800 | [diff] [blame] | 332 | cb->DdmPublishChunk(type, ArrayRef<const uint8_t>(bytes)); |
Elliott Hughes | 47fce01 | 2011-10-25 18:37:19 -0700 | [diff] [blame] | 333 | } |
| 334 | } |
| 335 | |
Elliott Hughes | 47fce01 | 2011-10-25 18:37:19 -0700 | [diff] [blame] | 336 | void Dbg::DdmSetThreadNotification(bool enable) { |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 337 | // Enable/disable thread notifications. |
Elliott Hughes | 47fce01 | 2011-10-25 18:37:19 -0700 | [diff] [blame] | 338 | gDdmThreadNotification = enable; |
| 339 | if (enable) { |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 340 | // Use a Checkpoint to cause every currently running thread to send their own notification when |
| 341 | // able. We then wait for every thread thread active at the time to post the creation |
| 342 | // notification. Threads created later will send this themselves. |
Ian Rogers | 50b35e2 | 2012-10-04 10:09:15 -0700 | [diff] [blame] | 343 | Thread* self = Thread::Current(); |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 344 | ScopedObjectAccess soa(self); |
| 345 | Barrier finish_barrier(0); |
| 346 | FunctionClosure fc([&](Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_) { |
| 347 | Thread* cls_self = Thread::Current(); |
| 348 | Locks::mutator_lock_->AssertSharedHeld(cls_self); |
| 349 | Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR")); |
| 350 | finish_barrier.Pass(cls_self); |
| 351 | }); |
| 352 | size_t checkpoints = Runtime::Current()->GetThreadList()->RunCheckpoint(&fc); |
| 353 | ScopedThreadSuspension sts(self, ThreadState::kWaitingForCheckPointsToRun); |
| 354 | finish_barrier.Increment(self, checkpoints); |
Elliott Hughes | 47fce01 | 2011-10-25 18:37:19 -0700 | [diff] [blame] | 355 | } |
| 356 | } |
| 357 | |
Elliott Hughes | a215526 | 2011-11-16 16:26:58 -0800 | [diff] [blame] | 358 | void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) { |
Elliott Hughes | 8218847 | 2011-11-07 18:11:48 -0800 | [diff] [blame] | 359 | Dbg::DdmSendThreadNotification(t, type); |
Elliott Hughes | 47fce01 | 2011-10-25 18:37:19 -0700 | [diff] [blame] | 360 | } |
| 361 | |
| 362 | void Dbg::PostThreadStart(Thread* t) { |
Elliott Hughes | a215526 | 2011-11-16 16:26:58 -0800 | [diff] [blame] | 363 | Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THCR")); |
Elliott Hughes | 47fce01 | 2011-10-25 18:37:19 -0700 | [diff] [blame] | 364 | } |
| 365 | |
| 366 | void Dbg::PostThreadDeath(Thread* t) { |
Elliott Hughes | a215526 | 2011-11-16 16:26:58 -0800 | [diff] [blame] | 367 | Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE")); |
Elliott Hughes | 872d4ec | 2011-10-21 17:07:15 -0700 | [diff] [blame] | 368 | } |
| 369 | |
Elliott Hughes | 767a147 | 2011-10-26 18:49:02 -0700 | [diff] [blame] | 370 | int Dbg::DdmHandleHpifChunk(HpifWhen when) { |
| 371 | if (when == HPIF_WHEN_NOW) { |
Elliott Hughes | 7162ad9 | 2011-10-27 14:08:42 -0700 | [diff] [blame] | 372 | DdmSendHeapInfo(when); |
Andreas Gampe | 7c5acbb | 2018-09-20 13:54:52 -0700 | [diff] [blame] | 373 | return 1; |
Elliott Hughes | 767a147 | 2011-10-26 18:49:02 -0700 | [diff] [blame] | 374 | } |
| 375 | |
| 376 | if (when != HPIF_WHEN_NEVER && when != HPIF_WHEN_NEXT_GC && when != HPIF_WHEN_EVERY_GC) { |
| 377 | LOG(ERROR) << "invalid HpifWhen value: " << static_cast<int>(when); |
Andreas Gampe | 7c5acbb | 2018-09-20 13:54:52 -0700 | [diff] [blame] | 378 | return 0; |
Elliott Hughes | 767a147 | 2011-10-26 18:49:02 -0700 | [diff] [blame] | 379 | } |
| 380 | |
| 381 | gDdmHpifWhen = when; |
Andreas Gampe | 7c5acbb | 2018-09-20 13:54:52 -0700 | [diff] [blame] | 382 | return 1; |
Elliott Hughes | 767a147 | 2011-10-26 18:49:02 -0700 | [diff] [blame] | 383 | } |
| 384 | |
| 385 | bool Dbg::DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when, Dbg::HpsgWhat what, bool native) { |
| 386 | if (when != HPSG_WHEN_NEVER && when != HPSG_WHEN_EVERY_GC) { |
| 387 | LOG(ERROR) << "invalid HpsgWhen value: " << static_cast<int>(when); |
| 388 | return false; |
| 389 | } |
| 390 | |
| 391 | if (what != HPSG_WHAT_MERGED_OBJECTS && what != HPSG_WHAT_DISTINCT_OBJECTS) { |
| 392 | LOG(ERROR) << "invalid HpsgWhat value: " << static_cast<int>(what); |
| 393 | return false; |
| 394 | } |
| 395 | |
| 396 | if (native) { |
| 397 | gDdmNhsgWhen = when; |
| 398 | gDdmNhsgWhat = what; |
| 399 | } else { |
| 400 | gDdmHpsgWhen = when; |
| 401 | gDdmHpsgWhat = what; |
| 402 | } |
| 403 | return true; |
| 404 | } |
| 405 | |
Elliott Hughes | 7162ad9 | 2011-10-27 14:08:42 -0700 | [diff] [blame] | 406 | void Dbg::DdmSendHeapInfo(HpifWhen reason) { |
| 407 | // If there's a one-shot 'when', reset it. |
| 408 | if (reason == gDdmHpifWhen) { |
| 409 | if (gDdmHpifWhen == HPIF_WHEN_NEXT_GC) { |
| 410 | gDdmHpifWhen = HPIF_WHEN_NEVER; |
| 411 | } |
| 412 | } |
| 413 | |
| 414 | /* |
| 415 | * Chunk HPIF (client --> server) |
| 416 | * |
| 417 | * Heap Info. General information about the heap, |
| 418 | * suitable for a summary display. |
| 419 | * |
| 420 | * [u4]: number of heaps |
| 421 | * |
| 422 | * For each heap: |
| 423 | * [u4]: heap ID |
| 424 | * [u8]: timestamp in ms since Unix epoch |
| 425 | * [u1]: capture reason (same as 'when' value from server) |
| 426 | * [u4]: max heap size in bytes (-Xmx) |
| 427 | * [u4]: current heap size in bytes |
| 428 | * [u4]: current number of bytes allocated |
| 429 | * [u4]: current number of objects allocated |
| 430 | */ |
| 431 | uint8_t heap_count = 1; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 432 | gc::Heap* heap = Runtime::Current()->GetHeap(); |
Elliott Hughes | 21f32d7 | 2011-11-09 17:44:13 -0800 | [diff] [blame] | 433 | std::vector<uint8_t> bytes; |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 434 | Append4BE(bytes, heap_count); |
| 435 | Append4BE(bytes, 1); // Heap id (bogus; we only have one heap). |
| 436 | Append8BE(bytes, MilliTime()); |
| 437 | Append1BE(bytes, reason); |
| 438 | Append4BE(bytes, heap->GetMaxMemory()); // Max allowed heap size in bytes. |
| 439 | Append4BE(bytes, heap->GetTotalMemory()); // Current heap size in bytes. |
| 440 | Append4BE(bytes, heap->GetBytesAllocated()); |
| 441 | Append4BE(bytes, heap->GetObjectsAllocated()); |
Elliott Hughes | 21f32d7 | 2011-11-09 17:44:13 -0800 | [diff] [blame] | 442 | CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4))); |
Alex Light | 772099a | 2017-11-21 14:05:04 -0800 | [diff] [blame] | 443 | Runtime::Current()->GetRuntimeCallbacks()->DdmPublishChunk(CHUNK_TYPE("HPIF"), |
| 444 | ArrayRef<const uint8_t>(bytes)); |
Elliott Hughes | 767a147 | 2011-10-26 18:49:02 -0700 | [diff] [blame] | 445 | } |
| 446 | |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 447 | enum HpsgSolidity { |
| 448 | SOLIDITY_FREE = 0, |
| 449 | SOLIDITY_HARD = 1, |
| 450 | SOLIDITY_SOFT = 2, |
| 451 | SOLIDITY_WEAK = 3, |
| 452 | SOLIDITY_PHANTOM = 4, |
| 453 | SOLIDITY_FINALIZABLE = 5, |
| 454 | SOLIDITY_SWEEP = 6, |
| 455 | }; |
| 456 | |
| 457 | enum HpsgKind { |
| 458 | KIND_OBJECT = 0, |
| 459 | KIND_CLASS_OBJECT = 1, |
| 460 | KIND_ARRAY_1 = 2, |
| 461 | KIND_ARRAY_2 = 3, |
| 462 | KIND_ARRAY_4 = 4, |
| 463 | KIND_ARRAY_8 = 5, |
| 464 | KIND_UNKNOWN = 6, |
| 465 | KIND_NATIVE = 7, |
| 466 | }; |
| 467 | |
| 468 | #define HPSG_PARTIAL (1<<7) |
| 469 | #define HPSG_STATE(solidity, kind) ((uint8_t)((((kind) & 0x7) << 3) | ((solidity) & 0x7))) |
| 470 | |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 471 | class HeapChunkContext { |
| 472 | public: |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 473 | // Maximum chunk size. Obtain this from the formula: |
| 474 | // (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2 |
| 475 | HeapChunkContext(bool merge, bool native) |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 476 | : buf_(16384 - 16), |
| 477 | type_(0), |
Mathieu Chartier | 36dab36 | 2014-07-30 14:59:56 -0700 | [diff] [blame] | 478 | chunk_overhead_(0) { |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 479 | Reset(); |
| 480 | if (native) { |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 481 | type_ = CHUNK_TYPE("NHSG"); |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 482 | } else { |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 483 | type_ = merge ? CHUNK_TYPE("HPSG") : CHUNK_TYPE("HPSO"); |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 484 | } |
| 485 | } |
| 486 | |
| 487 | ~HeapChunkContext() { |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 488 | if (p_ > &buf_[0]) { |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 489 | Flush(); |
| 490 | } |
| 491 | } |
| 492 | |
Mathieu Chartier | 36dab36 | 2014-07-30 14:59:56 -0700 | [diff] [blame] | 493 | void SetChunkOverhead(size_t chunk_overhead) { |
| 494 | chunk_overhead_ = chunk_overhead; |
| 495 | } |
| 496 | |
| 497 | void ResetStartOfNextChunk() { |
| 498 | startOfNextMemoryChunk_ = nullptr; |
| 499 | } |
| 500 | |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 501 | void EnsureHeader(const void* chunk_ptr) { |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 502 | if (!needHeader_) { |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 503 | return; |
| 504 | } |
| 505 | |
| 506 | // Start a new HPSx chunk. |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 507 | Write4BE(&p_, 1); // Heap id (bogus; we only have one heap). |
| 508 | Write1BE(&p_, 8); // Size of allocation unit, in bytes. |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 509 | |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 510 | Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr)); // virtual address of segment start. |
| 511 | Write4BE(&p_, 0); // offset of this piece (relative to the virtual address). |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 512 | // [u4]: length of piece, in allocation units |
| 513 | // We won't know this until we're done, so save the offset and stuff in a dummy value. |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 514 | pieceLenField_ = p_; |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 515 | Write4BE(&p_, 0x55555555); |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 516 | needHeader_ = false; |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 517 | } |
| 518 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 519 | void Flush() REQUIRES_SHARED(Locks::mutator_lock_) { |
Ian Rogers | c0542af | 2014-09-03 16:16:56 -0700 | [diff] [blame] | 520 | if (pieceLenField_ == nullptr) { |
Ian Rogers | d636b06 | 2013-01-18 17:51:18 -0800 | [diff] [blame] | 521 | // Flush immediately post Reset (maybe back-to-back Flush). Ignore. |
| 522 | CHECK(needHeader_); |
| 523 | return; |
| 524 | } |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 525 | // Patch the "length of piece" field. |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 526 | CHECK_LE(&buf_[0], pieceLenField_); |
| 527 | CHECK_LE(pieceLenField_, p_); |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 528 | Set4BE(pieceLenField_, totalAllocationUnits_); |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 529 | |
Alex Light | 772099a | 2017-11-21 14:05:04 -0800 | [diff] [blame] | 530 | ArrayRef<const uint8_t> out(&buf_[0], p_ - &buf_[0]); |
| 531 | Runtime::Current()->GetRuntimeCallbacks()->DdmPublishChunk(type_, out); |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 532 | Reset(); |
| 533 | } |
| 534 | |
Mathieu Chartier | bc689b7 | 2014-12-14 17:01:31 -0800 | [diff] [blame] | 535 | static void HeapChunkJavaCallback(void* start, void* end, size_t used_bytes, void* arg) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 536 | REQUIRES_SHARED(Locks::heap_bitmap_lock_, |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 537 | Locks::mutator_lock_) { |
Mathieu Chartier | bc689b7 | 2014-12-14 17:01:31 -0800 | [diff] [blame] | 538 | reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkJavaCallback(start, end, used_bytes); |
| 539 | } |
| 540 | |
| 541 | static void HeapChunkNativeCallback(void* start, void* end, size_t used_bytes, void* arg) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 542 | REQUIRES_SHARED(Locks::mutator_lock_) { |
Mathieu Chartier | bc689b7 | 2014-12-14 17:01:31 -0800 | [diff] [blame] | 543 | reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkNativeCallback(start, end, used_bytes); |
Elliott Hughes | a215526 | 2011-11-16 16:26:58 -0800 | [diff] [blame] | 544 | } |
| 545 | |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 546 | private: |
Elliott Hughes | a215526 | 2011-11-16 16:26:58 -0800 | [diff] [blame] | 547 | enum { ALLOCATION_UNIT_SIZE = 8 }; |
| 548 | |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 549 | void Reset() { |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 550 | p_ = &buf_[0]; |
Mathieu Chartier | 36dab36 | 2014-07-30 14:59:56 -0700 | [diff] [blame] | 551 | ResetStartOfNextChunk(); |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 552 | totalAllocationUnits_ = 0; |
| 553 | needHeader_ = true; |
Ian Rogers | c0542af | 2014-09-03 16:16:56 -0700 | [diff] [blame] | 554 | pieceLenField_ = nullptr; |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 555 | } |
| 556 | |
Mathieu Chartier | bc689b7 | 2014-12-14 17:01:31 -0800 | [diff] [blame] | 557 | bool IsNative() const { |
| 558 | return type_ == CHUNK_TYPE("NHSG"); |
| 559 | } |
| 560 | |
| 561 | // Returns true if the object is not an empty chunk. |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 562 | bool ProcessRecord(void* start, size_t used_bytes) REQUIRES_SHARED(Locks::mutator_lock_) { |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 563 | // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken |
| 564 | // in the following code not to allocate memory, by ensuring buf_ is of the correct size |
Ian Rogers | 15bf2d3 | 2012-08-28 17:33:04 -0700 | [diff] [blame] | 565 | if (used_bytes == 0) { |
Mathieu Chartier | bc689b7 | 2014-12-14 17:01:31 -0800 | [diff] [blame] | 566 | if (start == nullptr) { |
| 567 | // Reset for start of new heap. |
| 568 | startOfNextMemoryChunk_ = nullptr; |
| 569 | Flush(); |
| 570 | } |
| 571 | // Only process in use memory so that free region information |
| 572 | // also includes dlmalloc book keeping. |
| 573 | return false; |
Elliott Hughes | a215526 | 2011-11-16 16:26:58 -0800 | [diff] [blame] | 574 | } |
Ian Rogers | c0542af | 2014-09-03 16:16:56 -0700 | [diff] [blame] | 575 | if (startOfNextMemoryChunk_ != nullptr) { |
Mathieu Chartier | bc689b7 | 2014-12-14 17:01:31 -0800 | [diff] [blame] | 576 | // Transmit any pending free memory. Native free memory of over kMaxFreeLen could be because |
| 577 | // of the use of mmaps, so don't report. If not free memory then start a new segment. |
| 578 | bool flush = true; |
| 579 | if (start > startOfNextMemoryChunk_) { |
| 580 | const size_t kMaxFreeLen = 2 * kPageSize; |
| 581 | void* free_start = startOfNextMemoryChunk_; |
| 582 | void* free_end = start; |
| 583 | const size_t free_len = |
| 584 | reinterpret_cast<uintptr_t>(free_end) - reinterpret_cast<uintptr_t>(free_start); |
| 585 | if (!IsNative() || free_len < kMaxFreeLen) { |
| 586 | AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), free_start, free_len, IsNative()); |
| 587 | flush = false; |
Ian Rogers | 15bf2d3 | 2012-08-28 17:33:04 -0700 | [diff] [blame] | 588 | } |
Mathieu Chartier | bc689b7 | 2014-12-14 17:01:31 -0800 | [diff] [blame] | 589 | } |
| 590 | if (flush) { |
| 591 | startOfNextMemoryChunk_ = nullptr; |
| 592 | Flush(); |
| 593 | } |
Ian Rogers | 15bf2d3 | 2012-08-28 17:33:04 -0700 | [diff] [blame] | 594 | } |
Mathieu Chartier | bc689b7 | 2014-12-14 17:01:31 -0800 | [diff] [blame] | 595 | return true; |
Ian Rogers | 15bf2d3 | 2012-08-28 17:33:04 -0700 | [diff] [blame] | 596 | } |
Elliott Hughes | a215526 | 2011-11-16 16:26:58 -0800 | [diff] [blame] | 597 | |
Mathieu Chartier | bc689b7 | 2014-12-14 17:01:31 -0800 | [diff] [blame] | 598 | void HeapChunkNativeCallback(void* start, void* /*end*/, size_t used_bytes) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 599 | REQUIRES_SHARED(Locks::mutator_lock_) { |
Mathieu Chartier | bc689b7 | 2014-12-14 17:01:31 -0800 | [diff] [blame] | 600 | if (ProcessRecord(start, used_bytes)) { |
| 601 | uint8_t state = ExamineNativeObject(start); |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 602 | AppendChunk(state, start, used_bytes + chunk_overhead_, /*is_native=*/ true); |
Mathieu Chartier | bc689b7 | 2014-12-14 17:01:31 -0800 | [diff] [blame] | 603 | startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_; |
| 604 | } |
| 605 | } |
| 606 | |
| 607 | void HeapChunkJavaCallback(void* start, void* /*end*/, size_t used_bytes) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 608 | REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { |
Mathieu Chartier | bc689b7 | 2014-12-14 17:01:31 -0800 | [diff] [blame] | 609 | if (ProcessRecord(start, used_bytes)) { |
| 610 | // Determine the type of this chunk. |
| 611 | // OLD-TODO: if context.merge, see if this chunk is different from the last chunk. |
| 612 | // If it's the same, we should combine them. |
| 613 | uint8_t state = ExamineJavaObject(reinterpret_cast<mirror::Object*>(start)); |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 614 | AppendChunk(state, start, used_bytes + chunk_overhead_, /*is_native=*/ false); |
Mathieu Chartier | bc689b7 | 2014-12-14 17:01:31 -0800 | [diff] [blame] | 615 | startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_; |
| 616 | } |
| 617 | } |
| 618 | |
| 619 | void AppendChunk(uint8_t state, void* ptr, size_t length, bool is_native) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 620 | REQUIRES_SHARED(Locks::mutator_lock_) { |
Ian Rogers | 15bf2d3 | 2012-08-28 17:33:04 -0700 | [diff] [blame] | 621 | // Make sure there's enough room left in the buffer. |
| 622 | // We need to use two bytes for every fractional 256 allocation units used by the chunk plus |
| 623 | // 17 bytes for any header. |
Mathieu Chartier | bc689b7 | 2014-12-14 17:01:31 -0800 | [diff] [blame] | 624 | const size_t needed = ((RoundUp(length / ALLOCATION_UNIT_SIZE, 256) / 256) * 2) + 17; |
| 625 | size_t byte_left = &buf_.back() - p_; |
| 626 | if (byte_left < needed) { |
| 627 | if (is_native) { |
Pavel Vyssotski | 7522c74 | 2014-12-08 13:38:26 +0600 | [diff] [blame] | 628 | // Cannot trigger memory allocation while walking native heap. |
Pavel Vyssotski | 7522c74 | 2014-12-08 13:38:26 +0600 | [diff] [blame] | 629 | return; |
| 630 | } |
Ian Rogers | 15bf2d3 | 2012-08-28 17:33:04 -0700 | [diff] [blame] | 631 | Flush(); |
| 632 | } |
| 633 | |
Mathieu Chartier | bc689b7 | 2014-12-14 17:01:31 -0800 | [diff] [blame] | 634 | byte_left = &buf_.back() - p_; |
| 635 | if (byte_left < needed) { |
Ian Rogers | 15bf2d3 | 2012-08-28 17:33:04 -0700 | [diff] [blame] | 636 | LOG(WARNING) << "Chunk is too big to transmit (chunk_len=" << length << ", " |
| 637 | << needed << " bytes)"; |
| 638 | return; |
| 639 | } |
| 640 | EnsureHeader(ptr); |
Elliott Hughes | a215526 | 2011-11-16 16:26:58 -0800 | [diff] [blame] | 641 | // Write out the chunk description. |
Ian Rogers | 15bf2d3 | 2012-08-28 17:33:04 -0700 | [diff] [blame] | 642 | length /= ALLOCATION_UNIT_SIZE; // Convert to allocation units. |
| 643 | totalAllocationUnits_ += length; |
| 644 | while (length > 256) { |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 645 | *p_++ = state | HPSG_PARTIAL; |
| 646 | *p_++ = 255; // length - 1 |
Ian Rogers | 15bf2d3 | 2012-08-28 17:33:04 -0700 | [diff] [blame] | 647 | length -= 256; |
Elliott Hughes | a215526 | 2011-11-16 16:26:58 -0800 | [diff] [blame] | 648 | } |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 649 | *p_++ = state; |
Ian Rogers | 15bf2d3 | 2012-08-28 17:33:04 -0700 | [diff] [blame] | 650 | *p_++ = length - 1; |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 651 | } |
| 652 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 653 | uint8_t ExamineNativeObject(const void* p) REQUIRES_SHARED(Locks::mutator_lock_) { |
Mathieu Chartier | bc689b7 | 2014-12-14 17:01:31 -0800 | [diff] [blame] | 654 | return p == nullptr ? HPSG_STATE(SOLIDITY_FREE, 0) : HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE); |
| 655 | } |
| 656 | |
Vladimir Marko | 8311489 | 2019-04-11 13:05:50 +0100 | [diff] [blame] | 657 | uint8_t ExamineJavaObject(ObjPtr<mirror::Object> o) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 658 | REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { |
Ian Rogers | c0542af | 2014-09-03 16:16:56 -0700 | [diff] [blame] | 659 | if (o == nullptr) { |
Elliott Hughes | a215526 | 2011-11-16 16:26:58 -0800 | [diff] [blame] | 660 | return HPSG_STATE(SOLIDITY_FREE, 0); |
| 661 | } |
Elliott Hughes | a215526 | 2011-11-16 16:26:58 -0800 | [diff] [blame] | 662 | // It's an allocated chunk. Figure out what it is. |
Mathieu Chartier | bc689b7 | 2014-12-14 17:01:31 -0800 | [diff] [blame] | 663 | gc::Heap* heap = Runtime::Current()->GetHeap(); |
| 664 | if (!heap->IsLiveObjectLocked(o)) { |
| 665 | LOG(ERROR) << "Invalid object in managed heap: " << o; |
Elliott Hughes | a215526 | 2011-11-16 16:26:58 -0800 | [diff] [blame] | 666 | return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE); |
| 667 | } |
Vladimir Marko | 4617d58 | 2019-03-28 13:48:31 +0000 | [diff] [blame] | 668 | ObjPtr<mirror::Class> c = o->GetClass(); |
Ian Rogers | c0542af | 2014-09-03 16:16:56 -0700 | [diff] [blame] | 669 | if (c == nullptr) { |
Elliott Hughes | a215526 | 2011-11-16 16:26:58 -0800 | [diff] [blame] | 670 | // The object was probably just created but hasn't been initialized yet. |
| 671 | return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT); |
| 672 | } |
Vladimir Marko | 4617d58 | 2019-03-28 13:48:31 +0000 | [diff] [blame] | 673 | if (!heap->IsValidObjectAddress(c.Ptr())) { |
Ian Rogers | 15bf2d3 | 2012-08-28 17:33:04 -0700 | [diff] [blame] | 674 | LOG(ERROR) << "Invalid class for managed heap object: " << o << " " << c; |
Elliott Hughes | a215526 | 2011-11-16 16:26:58 -0800 | [diff] [blame] | 675 | return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN); |
| 676 | } |
Mathieu Chartier | f26e1b3 | 2015-01-29 10:47:10 -0800 | [diff] [blame] | 677 | if (c->GetClass() == nullptr) { |
| 678 | LOG(ERROR) << "Null class of class " << c << " for object " << o; |
| 679 | return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN); |
| 680 | } |
Elliott Hughes | a215526 | 2011-11-16 16:26:58 -0800 | [diff] [blame] | 681 | if (c->IsClassClass()) { |
| 682 | return HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT); |
| 683 | } |
Elliott Hughes | a215526 | 2011-11-16 16:26:58 -0800 | [diff] [blame] | 684 | if (c->IsArrayClass()) { |
Elliott Hughes | a215526 | 2011-11-16 16:26:58 -0800 | [diff] [blame] | 685 | switch (c->GetComponentSize()) { |
| 686 | case 1: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1); |
| 687 | case 2: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2); |
| 688 | case 4: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4); |
| 689 | case 8: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8); |
| 690 | } |
| 691 | } |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 692 | return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT); |
| 693 | } |
| 694 | |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 695 | std::vector<uint8_t> buf_; |
| 696 | uint8_t* p_; |
| 697 | uint8_t* pieceLenField_; |
Ian Rogers | 15bf2d3 | 2012-08-28 17:33:04 -0700 | [diff] [blame] | 698 | void* startOfNextMemoryChunk_; |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 699 | size_t totalAllocationUnits_; |
| 700 | uint32_t type_; |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 701 | bool needHeader_; |
Mathieu Chartier | 36dab36 | 2014-07-30 14:59:56 -0700 | [diff] [blame] | 702 | size_t chunk_overhead_; |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 703 | |
Elliott Hughes | a215526 | 2011-11-16 16:26:58 -0800 | [diff] [blame] | 704 | DISALLOW_COPY_AND_ASSIGN(HeapChunkContext); |
| 705 | }; |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 706 | |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 707 | |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 708 | void Dbg::DdmSendHeapSegments(bool native) { |
Mathieu Chartier | bc689b7 | 2014-12-14 17:01:31 -0800 | [diff] [blame] | 709 | Dbg::HpsgWhen when = native ? gDdmNhsgWhen : gDdmHpsgWhen; |
| 710 | Dbg::HpsgWhat what = native ? gDdmNhsgWhat : gDdmHpsgWhat; |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 711 | if (when == HPSG_WHEN_NEVER) { |
| 712 | return; |
| 713 | } |
Alex Light | 772099a | 2017-11-21 14:05:04 -0800 | [diff] [blame] | 714 | RuntimeCallbacks* cb = Runtime::Current()->GetRuntimeCallbacks(); |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 715 | // Figure out what kind of chunks we'll be sending. |
Mathieu Chartier | bc689b7 | 2014-12-14 17:01:31 -0800 | [diff] [blame] | 716 | CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS) |
| 717 | << static_cast<int>(what); |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 718 | |
| 719 | // First, send a heap start chunk. |
| 720 | uint8_t heap_id[4]; |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 721 | Set4BE(&heap_id[0], 1); // Heap id (bogus; we only have one heap). |
Alex Light | 772099a | 2017-11-21 14:05:04 -0800 | [diff] [blame] | 722 | cb->DdmPublishChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), |
| 723 | ArrayRef<const uint8_t>(heap_id)); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 724 | Thread* self = Thread::Current(); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 725 | Locks::mutator_lock_->AssertSharedHeld(self); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 726 | |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 727 | // Send a series of heap segment chunks. |
Mathieu Chartier | bc689b7 | 2014-12-14 17:01:31 -0800 | [diff] [blame] | 728 | HeapChunkContext context(what == HPSG_WHAT_MERGED_OBJECTS, native); |
Andreas Gampe | 0c18338 | 2017-07-13 22:26:24 -0700 | [diff] [blame] | 729 | auto bump_pointer_space_visitor = [&](mirror::Object* obj) |
| 730 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { |
| 731 | const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment); |
| 732 | HeapChunkContext::HeapChunkJavaCallback( |
| 733 | obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, &context); |
| 734 | }; |
Elliott Hughes | a215526 | 2011-11-16 16:26:58 -0800 | [diff] [blame] | 735 | if (native) { |
Dimitry Ivanov | e6465bc | 2015-12-14 18:55:02 -0800 | [diff] [blame] | 736 | UNIMPLEMENTED(WARNING) << "Native heap inspection is not supported"; |
Elliott Hughes | a215526 | 2011-11-16 16:26:58 -0800 | [diff] [blame] | 737 | } else { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 738 | gc::Heap* heap = Runtime::Current()->GetHeap(); |
Mathieu Chartier | 36dab36 | 2014-07-30 14:59:56 -0700 | [diff] [blame] | 739 | for (const auto& space : heap->GetContinuousSpaces()) { |
| 740 | if (space->IsDlMallocSpace()) { |
Mathieu Chartier | 4c69d7f | 2014-10-10 12:45:50 -0700 | [diff] [blame] | 741 | ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 36dab36 | 2014-07-30 14:59:56 -0700 | [diff] [blame] | 742 | // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an |
| 743 | // allocation then the first sizeof(size_t) may belong to it. |
| 744 | context.SetChunkOverhead(sizeof(size_t)); |
Mathieu Chartier | bc689b7 | 2014-12-14 17:01:31 -0800 | [diff] [blame] | 745 | space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context); |
Mathieu Chartier | 36dab36 | 2014-07-30 14:59:56 -0700 | [diff] [blame] | 746 | } else if (space->IsRosAllocSpace()) { |
| 747 | context.SetChunkOverhead(0); |
Mathieu Chartier | 4c69d7f | 2014-10-10 12:45:50 -0700 | [diff] [blame] | 748 | // Need to acquire the mutator lock before the heap bitmap lock with exclusive access since |
| 749 | // RosAlloc's internal logic doesn't know to release and reacquire the heap bitmap lock. |
Mathieu Chartier | f1d666e | 2015-09-03 16:13:34 -0700 | [diff] [blame] | 750 | ScopedThreadSuspension sts(self, kSuspended); |
Mathieu Chartier | 4f55e22 | 2015-09-04 13:26:21 -0700 | [diff] [blame] | 751 | ScopedSuspendAll ssa(__FUNCTION__); |
| 752 | ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 753 | space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context); |
Mathieu Chartier | 36dab36 | 2014-07-30 14:59:56 -0700 | [diff] [blame] | 754 | } else if (space->IsBumpPointerSpace()) { |
Mathieu Chartier | 4c69d7f | 2014-10-10 12:45:50 -0700 | [diff] [blame] | 755 | ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 36dab36 | 2014-07-30 14:59:56 -0700 | [diff] [blame] | 756 | context.SetChunkOverhead(0); |
Andreas Gampe | 0c18338 | 2017-07-13 22:26:24 -0700 | [diff] [blame] | 757 | space->AsBumpPointerSpace()->Walk(bump_pointer_space_visitor); |
Mathieu Chartier | bc689b7 | 2014-12-14 17:01:31 -0800 | [diff] [blame] | 758 | HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 759 | } else if (space->IsRegionSpace()) { |
| 760 | heap->IncrementDisableMovingGC(self); |
Mathieu Chartier | f1d666e | 2015-09-03 16:13:34 -0700 | [diff] [blame] | 761 | { |
| 762 | ScopedThreadSuspension sts(self, kSuspended); |
Mathieu Chartier | 4f55e22 | 2015-09-04 13:26:21 -0700 | [diff] [blame] | 763 | ScopedSuspendAll ssa(__FUNCTION__); |
Mathieu Chartier | f1d666e | 2015-09-03 16:13:34 -0700 | [diff] [blame] | 764 | ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 765 | context.SetChunkOverhead(0); |
Andreas Gampe | 0c18338 | 2017-07-13 22:26:24 -0700 | [diff] [blame] | 766 | space->AsRegionSpace()->Walk(bump_pointer_space_visitor); |
Mathieu Chartier | f1d666e | 2015-09-03 16:13:34 -0700 | [diff] [blame] | 767 | HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context); |
Mathieu Chartier | f1d666e | 2015-09-03 16:13:34 -0700 | [diff] [blame] | 768 | } |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 769 | heap->DecrementDisableMovingGC(self); |
Mathieu Chartier | 36dab36 | 2014-07-30 14:59:56 -0700 | [diff] [blame] | 770 | } else { |
| 771 | UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space; |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 772 | } |
Mathieu Chartier | 36dab36 | 2014-07-30 14:59:56 -0700 | [diff] [blame] | 773 | context.ResetStartOfNextChunk(); |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 774 | } |
Mathieu Chartier | 4c69d7f | 2014-10-10 12:45:50 -0700 | [diff] [blame] | 775 | ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 776 | // Walk the large objects, these are not in the AllocSpace. |
Mathieu Chartier | 36dab36 | 2014-07-30 14:59:56 -0700 | [diff] [blame] | 777 | context.SetChunkOverhead(0); |
Mathieu Chartier | bc689b7 | 2014-12-14 17:01:31 -0800 | [diff] [blame] | 778 | heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context); |
Elliott Hughes | a215526 | 2011-11-16 16:26:58 -0800 | [diff] [blame] | 779 | } |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 780 | |
| 781 | // Finally, send a heap end chunk. |
Alex Light | 772099a | 2017-11-21 14:05:04 -0800 | [diff] [blame] | 782 | cb->DdmPublishChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), |
| 783 | ArrayRef<const uint8_t>(heap_id)); |
Elliott Hughes | 767a147 | 2011-10-26 18:49:02 -0700 | [diff] [blame] | 784 | } |
| 785 | |
Brian Carlstrom | 306db81 | 2014-09-05 13:01:41 -0700 | [diff] [blame] | 786 | void Dbg::SetAllocTrackingEnabled(bool enable) { |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 787 | gc::AllocRecordObjectMap::SetAllocTrackingEnabled(enable); |
Elliott Hughes | 545a064 | 2011-11-08 19:10:03 -0800 | [diff] [blame] | 788 | } |
| 789 | |
Elliott Hughes | 545a064 | 2011-11-08 19:10:03 -0800 | [diff] [blame] | 790 | class StringTable { |
Andreas Gampe | 2eeb01a | 2017-07-07 14:09:46 -0700 | [diff] [blame] | 791 | private: |
| 792 | struct Entry { |
Andreas Gampe | 84eadb2 | 2017-07-07 15:08:01 -0700 | [diff] [blame] | 793 | explicit Entry(const char* data_in) |
| 794 | : data(data_in), hash(ComputeModifiedUtf8Hash(data_in)), index(0) { |
| 795 | } |
Andreas Gampe | 2eeb01a | 2017-07-07 14:09:46 -0700 | [diff] [blame] | 796 | Entry(const Entry& entry) = default; |
| 797 | Entry(Entry&& entry) = default; |
| 798 | |
| 799 | // Pointer to the actual string data. |
| 800 | const char* data; |
Andreas Gampe | 84eadb2 | 2017-07-07 15:08:01 -0700 | [diff] [blame] | 801 | |
| 802 | // The hash of the data. |
| 803 | const uint32_t hash; |
| 804 | |
Andreas Gampe | 2eeb01a | 2017-07-07 14:09:46 -0700 | [diff] [blame] | 805 | // The index. This will be filled in on Finish and is not part of the ordering, so mark it |
| 806 | // mutable. |
| 807 | mutable uint32_t index; |
| 808 | |
Andreas Gampe | 84eadb2 | 2017-07-07 15:08:01 -0700 | [diff] [blame] | 809 | bool operator==(const Entry& other) const { |
| 810 | return strcmp(data, other.data) == 0; |
| 811 | } |
| 812 | }; |
| 813 | struct EntryHash { |
| 814 | size_t operator()(const Entry& entry) const { |
| 815 | return entry.hash; |
Andreas Gampe | 2eeb01a | 2017-07-07 14:09:46 -0700 | [diff] [blame] | 816 | } |
| 817 | }; |
| 818 | |
Elliott Hughes | 545a064 | 2011-11-08 19:10:03 -0800 | [diff] [blame] | 819 | public: |
Andreas Gampe | 2eeb01a | 2017-07-07 14:09:46 -0700 | [diff] [blame] | 820 | StringTable() : finished_(false) { |
Elliott Hughes | 545a064 | 2011-11-08 19:10:03 -0800 | [diff] [blame] | 821 | } |
| 822 | |
Andreas Gampe | f774a4e | 2017-07-06 22:15:18 -0700 | [diff] [blame] | 823 | void Add(const char* str, bool copy_string) { |
Andreas Gampe | 2eeb01a | 2017-07-07 14:09:46 -0700 | [diff] [blame] | 824 | DCHECK(!finished_); |
Andreas Gampe | f774a4e | 2017-07-06 22:15:18 -0700 | [diff] [blame] | 825 | if (UNLIKELY(copy_string)) { |
| 826 | // Check whether it's already there. |
Andreas Gampe | 2eeb01a | 2017-07-07 14:09:46 -0700 | [diff] [blame] | 827 | Entry entry(str); |
| 828 | if (table_.find(entry) != table_.end()) { |
Andreas Gampe | f774a4e | 2017-07-06 22:15:18 -0700 | [diff] [blame] | 829 | return; |
| 830 | } |
Mathieu Chartier | 4345c46 | 2014-06-27 10:20:14 -0700 | [diff] [blame] | 831 | |
Andreas Gampe | f774a4e | 2017-07-06 22:15:18 -0700 | [diff] [blame] | 832 | // Make a copy. |
| 833 | size_t str_len = strlen(str); |
| 834 | char* copy = new char[str_len + 1]; |
| 835 | strlcpy(copy, str, str_len + 1); |
| 836 | string_backup_.emplace_back(copy); |
| 837 | str = copy; |
| 838 | } |
Andreas Gampe | 2eeb01a | 2017-07-07 14:09:46 -0700 | [diff] [blame] | 839 | Entry entry(str); |
| 840 | table_.insert(entry); |
| 841 | } |
| 842 | |
| 843 | // Update all entries and give them an index. Note that this is likely not the insertion order, |
| 844 | // as the set will with high likelihood reorder elements. Thus, Add must not be called after |
| 845 | // Finish, and Finish must be called before IndexOf. In that case, WriteTo will walk in |
| 846 | // the same order as Finish, and indices will agree. The order invariant, as well as indices, |
| 847 | // are enforced through debug checks. |
| 848 | void Finish() { |
| 849 | DCHECK(!finished_); |
| 850 | finished_ = true; |
| 851 | uint32_t index = 0; |
| 852 | for (auto& entry : table_) { |
| 853 | entry.index = index; |
| 854 | ++index; |
| 855 | } |
Elliott Hughes | 545a064 | 2011-11-08 19:10:03 -0800 | [diff] [blame] | 856 | } |
| 857 | |
Elliott Hughes | a8f93cb | 2012-06-08 17:08:48 -0700 | [diff] [blame] | 858 | size_t IndexOf(const char* s) const { |
Andreas Gampe | 2eeb01a | 2017-07-07 14:09:46 -0700 | [diff] [blame] | 859 | DCHECK(finished_); |
| 860 | Entry entry(s); |
| 861 | auto it = table_.find(entry); |
Elliott Hughes | a8f93cb | 2012-06-08 17:08:48 -0700 | [diff] [blame] | 862 | if (it == table_.end()) { |
| 863 | LOG(FATAL) << "IndexOf(\"" << s << "\") failed"; |
| 864 | } |
Andreas Gampe | 2eeb01a | 2017-07-07 14:09:46 -0700 | [diff] [blame] | 865 | return it->index; |
Elliott Hughes | 545a064 | 2011-11-08 19:10:03 -0800 | [diff] [blame] | 866 | } |
| 867 | |
Elliott Hughes | a8f93cb | 2012-06-08 17:08:48 -0700 | [diff] [blame] | 868 | size_t Size() const { |
Elliott Hughes | 545a064 | 2011-11-08 19:10:03 -0800 | [diff] [blame] | 869 | return table_.size(); |
| 870 | } |
| 871 | |
Elliott Hughes | a8f93cb | 2012-06-08 17:08:48 -0700 | [diff] [blame] | 872 | void WriteTo(std::vector<uint8_t>& bytes) const { |
Andreas Gampe | 2eeb01a | 2017-07-07 14:09:46 -0700 | [diff] [blame] | 873 | DCHECK(finished_); |
| 874 | uint32_t cur_index = 0; |
| 875 | for (const auto& entry : table_) { |
| 876 | DCHECK_EQ(cur_index++, entry.index); |
| 877 | |
| 878 | size_t s_len = CountModifiedUtf8Chars(entry.data); |
Christopher Ferris | 8a35405 | 2015-04-24 17:23:53 -0700 | [diff] [blame] | 879 | std::unique_ptr<uint16_t[]> s_utf16(new uint16_t[s_len]); |
Andreas Gampe | 2eeb01a | 2017-07-07 14:09:46 -0700 | [diff] [blame] | 880 | ConvertModifiedUtf8ToUtf16(s_utf16.get(), entry.data); |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 881 | AppendUtf16BE(bytes, s_utf16.get(), s_len); |
Elliott Hughes | 545a064 | 2011-11-08 19:10:03 -0800 | [diff] [blame] | 882 | } |
| 883 | } |
| 884 | |
| 885 | private: |
Andreas Gampe | 84eadb2 | 2017-07-07 15:08:01 -0700 | [diff] [blame] | 886 | std::unordered_set<Entry, EntryHash> table_; |
Andreas Gampe | f774a4e | 2017-07-06 22:15:18 -0700 | [diff] [blame] | 887 | std::vector<std::unique_ptr<char[]>> string_backup_; |
Andreas Gampe | 2eeb01a | 2017-07-07 14:09:46 -0700 | [diff] [blame] | 888 | |
| 889 | bool finished_; |
| 890 | |
Elliott Hughes | 545a064 | 2011-11-08 19:10:03 -0800 | [diff] [blame] | 891 | DISALLOW_COPY_AND_ASSIGN(StringTable); |
| 892 | }; |
| 893 | |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 894 | |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 895 | static const char* GetMethodSourceFile(ArtMethod* method) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 896 | REQUIRES_SHARED(Locks::mutator_lock_) { |
Mathieu Chartier | bfd9a43 | 2014-05-21 17:43:44 -0700 | [diff] [blame] | 897 | DCHECK(method != nullptr); |
| 898 | const char* source_file = method->GetDeclaringClassSourceFile(); |
Sebastien Hertz | 280286a | 2014-04-28 09:26:50 +0200 | [diff] [blame] | 899 | return (source_file != nullptr) ? source_file : ""; |
| 900 | } |
| 901 | |
Elliott Hughes | 545a064 | 2011-11-08 19:10:03 -0800 | [diff] [blame] | 902 | /* |
| 903 | * The data we send to DDMS contains everything we have recorded. |
| 904 | * |
| 905 | * Message header (all values big-endian): |
| 906 | * (1b) message header len (to allow future expansion); includes itself |
| 907 | * (1b) entry header len |
| 908 | * (1b) stack frame len |
| 909 | * (2b) number of entries |
| 910 | * (4b) offset to string table from start of message |
| 911 | * (2b) number of class name strings |
| 912 | * (2b) number of method name strings |
| 913 | * (2b) number of source file name strings |
| 914 | * For each entry: |
| 915 | * (4b) total allocation size |
Elliott Hughes | 221229c | 2013-01-08 18:17:50 -0800 | [diff] [blame] | 916 | * (2b) thread id |
Elliott Hughes | 545a064 | 2011-11-08 19:10:03 -0800 | [diff] [blame] | 917 | * (2b) allocated object's class name index |
| 918 | * (1b) stack depth |
| 919 | * For each stack frame: |
| 920 | * (2b) method's class name |
| 921 | * (2b) method name |
| 922 | * (2b) method source file |
| 923 | * (2b) line number, clipped to 32767; -2 if native; -1 if no source |
| 924 | * (xb) class name strings |
| 925 | * (xb) method name strings |
| 926 | * (xb) source file strings |
| 927 | * |
| 928 | * As with other DDM traffic, strings are sent as a 4-byte length |
| 929 | * followed by UTF-16 data. |
| 930 | * |
| 931 | * We send up 16-bit unsigned indexes into string tables. In theory there |
Brian Carlstrom | 306db81 | 2014-09-05 13:01:41 -0700 | [diff] [blame] | 932 | * can be (kMaxAllocRecordStackDepth * alloc_record_max_) unique strings in |
Elliott Hughes | 545a064 | 2011-11-08 19:10:03 -0800 | [diff] [blame] | 933 | * each table, but in practice there should be far fewer. |
| 934 | * |
| 935 | * The chief reason for using a string table here is to keep the size of |
| 936 | * the DDMS message to a minimum. This is partly to make the protocol |
| 937 | * efficient, but also because we have to form the whole thing up all at |
| 938 | * once in a memory buffer. |
| 939 | * |
| 940 | * We use separate string tables for class names, method names, and source |
| 941 | * files to keep the indexes small. There will generally be no overlap |
| 942 | * between the contents of these tables. |
| 943 | */ |
| 944 | jbyteArray Dbg::GetRecentAllocations() { |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 945 | if ((false)) { |
Elliott Hughes | 545a064 | 2011-11-08 19:10:03 -0800 | [diff] [blame] | 946 | DumpRecentAllocations(); |
| 947 | } |
| 948 | |
Ian Rogers | 50b35e2 | 2012-10-04 10:09:15 -0700 | [diff] [blame] | 949 | Thread* self = Thread::Current(); |
Elliott Hughes | 545a064 | 2011-11-08 19:10:03 -0800 | [diff] [blame] | 950 | std::vector<uint8_t> bytes; |
Mathieu Chartier | 46e811b | 2013-07-10 17:09:14 -0700 | [diff] [blame] | 951 | { |
Brian Carlstrom | 306db81 | 2014-09-05 13:01:41 -0700 | [diff] [blame] | 952 | MutexLock mu(self, *Locks::alloc_tracker_lock_); |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 953 | gc::AllocRecordObjectMap* records = Runtime::Current()->GetHeap()->GetAllocationRecords(); |
| 954 | // In case this method is called when allocation tracker is disabled, |
| 955 | // we should still send some data back. |
| 956 | gc::AllocRecordObjectMap dummy; |
| 957 | if (records == nullptr) { |
| 958 | CHECK(!Runtime::Current()->GetHeap()->IsAllocTrackingEnabled()); |
| 959 | records = &dummy; |
| 960 | } |
Man Cao | 41656de | 2015-07-06 18:53:15 -0700 | [diff] [blame] | 961 | // We don't need to wait on the condition variable records->new_record_condition_, because this |
| 962 | // function only reads the class objects, which are already marked so it doesn't change their |
| 963 | // reachability. |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 964 | |
Mathieu Chartier | 46e811b | 2013-07-10 17:09:14 -0700 | [diff] [blame] | 965 | // |
| 966 | // Part 1: generate string tables. |
| 967 | // |
| 968 | StringTable class_names; |
| 969 | StringTable method_names; |
| 970 | StringTable filenames; |
Elliott Hughes | 545a064 | 2011-11-08 19:10:03 -0800 | [diff] [blame] | 971 | |
Andreas Gampe | ff29cee | 2017-07-07 11:11:15 -0700 | [diff] [blame] | 972 | VLOG(jdwp) << "Collecting StringTables."; |
| 973 | |
Man Cao | 1ed11b9 | 2015-06-11 22:47:35 -0700 | [diff] [blame] | 974 | const uint16_t capped_count = CappedAllocRecordCount(records->GetRecentAllocationSize()); |
Brian Carlstrom | 306db81 | 2014-09-05 13:01:41 -0700 | [diff] [blame] | 975 | uint16_t count = capped_count; |
Andreas Gampe | d0fc768 | 2017-07-07 14:03:08 -0700 | [diff] [blame] | 976 | size_t alloc_byte_count = 0; |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 977 | for (auto it = records->RBegin(), end = records->REnd(); |
| 978 | count > 0 && it != end; count--, it++) { |
Mathieu Chartier | 458b105 | 2016-03-29 14:02:55 -0700 | [diff] [blame] | 979 | const gc::AllocRecord* record = &it->second; |
Ian Rogers | 1ff3c98 | 2014-08-12 02:30:58 -0700 | [diff] [blame] | 980 | std::string temp; |
Andreas Gampe | f774a4e | 2017-07-06 22:15:18 -0700 | [diff] [blame] | 981 | const char* class_descr = record->GetClassDescriptor(&temp); |
| 982 | class_names.Add(class_descr, !temp.empty()); |
Andreas Gampe | d0fc768 | 2017-07-07 14:03:08 -0700 | [diff] [blame] | 983 | |
| 984 | // Size + tid + class name index + stack depth. |
| 985 | alloc_byte_count += 4u + 2u + 2u + 1u; |
| 986 | |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 987 | for (size_t i = 0, depth = record->GetDepth(); i < depth; i++) { |
| 988 | ArtMethod* m = record->StackElement(i).GetMethod(); |
Andreas Gampe | f774a4e | 2017-07-06 22:15:18 -0700 | [diff] [blame] | 989 | class_names.Add(m->GetDeclaringClassDescriptor(), false); |
| 990 | method_names.Add(m->GetName(), false); |
| 991 | filenames.Add(GetMethodSourceFile(m), false); |
Mathieu Chartier | 46e811b | 2013-07-10 17:09:14 -0700 | [diff] [blame] | 992 | } |
Andreas Gampe | d0fc768 | 2017-07-07 14:03:08 -0700 | [diff] [blame] | 993 | |
| 994 | // Depth * (class index + method name index + file name index + line number). |
| 995 | alloc_byte_count += record->GetDepth() * (2u + 2u + 2u + 2u); |
Elliott Hughes | 545a064 | 2011-11-08 19:10:03 -0800 | [diff] [blame] | 996 | } |
| 997 | |
Andreas Gampe | 2eeb01a | 2017-07-07 14:09:46 -0700 | [diff] [blame] | 998 | class_names.Finish(); |
| 999 | method_names.Finish(); |
| 1000 | filenames.Finish(); |
Andreas Gampe | ff29cee | 2017-07-07 11:11:15 -0700 | [diff] [blame] | 1001 | VLOG(jdwp) << "Done collecting StringTables:" << std::endl |
| 1002 | << " ClassNames: " << class_names.Size() << std::endl |
| 1003 | << " MethodNames: " << method_names.Size() << std::endl |
| 1004 | << " Filenames: " << filenames.Size(); |
| 1005 | |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 1006 | LOG(INFO) << "recent allocation records: " << capped_count; |
| 1007 | LOG(INFO) << "allocation records all objects: " << records->Size(); |
Mathieu Chartier | 46e811b | 2013-07-10 17:09:14 -0700 | [diff] [blame] | 1008 | |
| 1009 | // |
| 1010 | // Part 2: Generate the output and store it in the buffer. |
| 1011 | // |
| 1012 | |
| 1013 | // (1b) message header len (to allow future expansion); includes itself |
| 1014 | // (1b) entry header len |
| 1015 | // (1b) stack frame len |
| 1016 | const int kMessageHeaderLen = 15; |
| 1017 | const int kEntryHeaderLen = 9; |
| 1018 | const int kStackFrameLen = 8; |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 1019 | Append1BE(bytes, kMessageHeaderLen); |
| 1020 | Append1BE(bytes, kEntryHeaderLen); |
| 1021 | Append1BE(bytes, kStackFrameLen); |
Mathieu Chartier | 46e811b | 2013-07-10 17:09:14 -0700 | [diff] [blame] | 1022 | |
| 1023 | // (2b) number of entries |
| 1024 | // (4b) offset to string table from start of message |
| 1025 | // (2b) number of class name strings |
| 1026 | // (2b) number of method name strings |
| 1027 | // (2b) number of source file name strings |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 1028 | Append2BE(bytes, capped_count); |
Mathieu Chartier | 46e811b | 2013-07-10 17:09:14 -0700 | [diff] [blame] | 1029 | size_t string_table_offset = bytes.size(); |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 1030 | Append4BE(bytes, 0); // We'll patch this later... |
| 1031 | Append2BE(bytes, class_names.Size()); |
| 1032 | Append2BE(bytes, method_names.Size()); |
| 1033 | Append2BE(bytes, filenames.Size()); |
Mathieu Chartier | 46e811b | 2013-07-10 17:09:14 -0700 | [diff] [blame] | 1034 | |
Andreas Gampe | ff29cee | 2017-07-07 11:11:15 -0700 | [diff] [blame] | 1035 | VLOG(jdwp) << "Dumping allocations with stacks"; |
| 1036 | |
Andreas Gampe | d0fc768 | 2017-07-07 14:03:08 -0700 | [diff] [blame] | 1037 | // Enlarge the vector for the allocation data. |
| 1038 | size_t reserve_size = bytes.size() + alloc_byte_count; |
| 1039 | bytes.reserve(reserve_size); |
| 1040 | |
Ian Rogers | 1ff3c98 | 2014-08-12 02:30:58 -0700 | [diff] [blame] | 1041 | std::string temp; |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 1042 | count = capped_count; |
| 1043 | // The last "count" number of allocation records in "records" are the most recent "count" number |
| 1044 | // of allocations. Reverse iterate to get them. The most recent allocation is sent first. |
| 1045 | for (auto it = records->RBegin(), end = records->REnd(); |
| 1046 | count > 0 && it != end; count--, it++) { |
Mathieu Chartier | 46e811b | 2013-07-10 17:09:14 -0700 | [diff] [blame] | 1047 | // For each entry: |
| 1048 | // (4b) total allocation size |
| 1049 | // (2b) thread id |
| 1050 | // (2b) allocated object's class name index |
| 1051 | // (1b) stack depth |
Mathieu Chartier | 458b105 | 2016-03-29 14:02:55 -0700 | [diff] [blame] | 1052 | const gc::AllocRecord* record = &it->second; |
Mathieu Chartier | 46e811b | 2013-07-10 17:09:14 -0700 | [diff] [blame] | 1053 | size_t stack_depth = record->GetDepth(); |
Mathieu Chartier | f832284 | 2014-05-16 10:59:25 -0700 | [diff] [blame] | 1054 | size_t allocated_object_class_name_index = |
Man Cao | 41656de | 2015-07-06 18:53:15 -0700 | [diff] [blame] | 1055 | class_names.IndexOf(record->GetClassDescriptor(&temp)); |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 1056 | Append4BE(bytes, record->ByteCount()); |
| 1057 | Append2BE(bytes, static_cast<uint16_t>(record->GetTid())); |
| 1058 | Append2BE(bytes, allocated_object_class_name_index); |
| 1059 | Append1BE(bytes, stack_depth); |
Mathieu Chartier | 46e811b | 2013-07-10 17:09:14 -0700 | [diff] [blame] | 1060 | |
Mathieu Chartier | 46e811b | 2013-07-10 17:09:14 -0700 | [diff] [blame] | 1061 | for (size_t stack_frame = 0; stack_frame < stack_depth; ++stack_frame) { |
| 1062 | // For each stack frame: |
| 1063 | // (2b) method's class name |
| 1064 | // (2b) method name |
| 1065 | // (2b) method source file |
| 1066 | // (2b) line number, clipped to 32767; -2 if native; -1 if no source |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 1067 | ArtMethod* m = record->StackElement(stack_frame).GetMethod(); |
Mathieu Chartier | bfd9a43 | 2014-05-21 17:43:44 -0700 | [diff] [blame] | 1068 | size_t class_name_index = class_names.IndexOf(m->GetDeclaringClassDescriptor()); |
| 1069 | size_t method_name_index = method_names.IndexOf(m->GetName()); |
| 1070 | size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(m)); |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 1071 | Append2BE(bytes, class_name_index); |
| 1072 | Append2BE(bytes, method_name_index); |
| 1073 | Append2BE(bytes, file_name_index); |
| 1074 | Append2BE(bytes, record->StackElement(stack_frame).ComputeLineNumber()); |
Mathieu Chartier | 46e811b | 2013-07-10 17:09:14 -0700 | [diff] [blame] | 1075 | } |
Mathieu Chartier | 46e811b | 2013-07-10 17:09:14 -0700 | [diff] [blame] | 1076 | } |
| 1077 | |
Andreas Gampe | d0fc768 | 2017-07-07 14:03:08 -0700 | [diff] [blame] | 1078 | CHECK_EQ(bytes.size(), reserve_size); |
Andreas Gampe | ff29cee | 2017-07-07 11:11:15 -0700 | [diff] [blame] | 1079 | VLOG(jdwp) << "Dumping tables."; |
| 1080 | |
Mathieu Chartier | 46e811b | 2013-07-10 17:09:14 -0700 | [diff] [blame] | 1081 | // (xb) class name strings |
| 1082 | // (xb) method name strings |
| 1083 | // (xb) source file strings |
Alex Light | fc58809 | 2020-01-23 15:39:08 -0800 | [diff] [blame] | 1084 | Set4BE(&bytes[string_table_offset], bytes.size()); |
Mathieu Chartier | 46e811b | 2013-07-10 17:09:14 -0700 | [diff] [blame] | 1085 | class_names.WriteTo(bytes); |
| 1086 | method_names.WriteTo(bytes); |
| 1087 | filenames.WriteTo(bytes); |
Andreas Gampe | ff29cee | 2017-07-07 11:11:15 -0700 | [diff] [blame] | 1088 | |
| 1089 | VLOG(jdwp) << "GetRecentAllocations: data created. " << bytes.size(); |
Elliott Hughes | 545a064 | 2011-11-08 19:10:03 -0800 | [diff] [blame] | 1090 | } |
Ian Rogers | 50b35e2 | 2012-10-04 10:09:15 -0700 | [diff] [blame] | 1091 | JNIEnv* env = self->GetJniEnv(); |
Elliott Hughes | 545a064 | 2011-11-08 19:10:03 -0800 | [diff] [blame] | 1092 | jbyteArray result = env->NewByteArray(bytes.size()); |
Ian Rogers | c0542af | 2014-09-03 16:16:56 -0700 | [diff] [blame] | 1093 | if (result != nullptr) { |
Elliott Hughes | 545a064 | 2011-11-08 19:10:03 -0800 | [diff] [blame] | 1094 | env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0])); |
| 1095 | } |
| 1096 | return result; |
| 1097 | } |
| 1098 | |
Andreas Gampe | 04bbb5b | 2017-01-19 17:49:03 +0000 | [diff] [blame] | 1099 | void Dbg::DbgThreadLifecycleCallback::ThreadStart(Thread* self) { |
| 1100 | Dbg::PostThreadStart(self); |
| 1101 | } |
| 1102 | |
| 1103 | void Dbg::DbgThreadLifecycleCallback::ThreadDeath(Thread* self) { |
| 1104 | Dbg::PostThreadDeath(self); |
| 1105 | } |
| 1106 | |
Elliott Hughes | 872d4ec | 2011-10-21 17:07:15 -0700 | [diff] [blame] | 1107 | } // namespace art |