blob: 6e27a9f31a13841d2426cbd297070571c7b11f07 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro1fb86202011-06-27 17:43:13 -070016
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_RUNTIME_H_
18#define ART_RUNTIME_RUNTIME_H_
Carl Shapiro1fb86202011-06-27 17:43:13 -070019
Elliott Hughesa0e18062012-04-13 15:59:59 -070020#include <jni.h>
Elliott Hughesc5f7c912011-08-18 14:00:42 -070021#include <stdio.h>
22
Elliott Hughese27955c2011-08-26 15:21:24 -070023#include <iosfwd>
Hiroshi Yamauchi799eb3a2014-07-18 15:38:17 -070024#include <set>
Brian Carlstrom6ea095a2011-08-16 15:26:54 -070025#include <string>
Carl Shapirofc322c72011-07-27 00:20:01 -070026#include <utility>
Chang Xing16d1dd82017-07-20 17:56:26 -070027#include <memory>
Brian Carlstrom6ea095a2011-08-16 15:26:54 -070028#include <vector>
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070029
Andreas Gampe44f67602018-11-28 08:27:27 -080030#include "base/locks.h"
Andreas Gampe794ad762015-02-23 08:12:24 -080031#include "base/macros.h"
Vladimir Markoc34bebf2018-08-16 16:12:49 +010032#include "base/mem_map.h"
Nicolas Geoffray4e92c3c2017-05-08 09:34:26 +010033#include "deoptimization_kind.h"
David Sehr9e734c72018-01-04 17:56:19 -080034#include "dex/dex_file_types.h"
Alex Lighteb7c1442015-08-31 13:17:42 -070035#include "experimental_flags.h"
Andreas Gampe86823542019-02-25 09:38:49 -080036#include "gc/space/image_space_loading_order.h"
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -070037#include "gc_root.h"
Ian Rogers62d6c772013-02-27 08:32:07 -080038#include "instrumentation.h"
Alex Light40320712017-12-14 11:52:04 -080039#include "jdwp_provider.h"
Mathieu Chartier8778c522016-10-04 19:06:30 -070040#include "obj_ptr.h"
Ian Rogers576ca0c2014-06-06 15:58:22 -070041#include "offsets.h"
Mathieu Chartierf8cb1782016-03-18 18:45:41 -070042#include "process_state.h"
Vladimir Marko7624d252014-05-02 14:40:15 +010043#include "quick/quick_method_frame_info.h"
Elliott Hughes9d5ccec2011-09-19 13:19:50 -070044#include "runtime_stats.h"
Carl Shapirob5573532011-07-12 18:22:59 -070045
Carl Shapiro1fb86202011-06-27 17:43:13 -070046namespace art {
47
Ian Rogers1d54e732013-05-02 21:10:01 -070048namespace gc {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080049class AbstractSystemWeakHolder;
50class Heap;
Ian Rogers576ca0c2014-06-06 15:58:22 -070051} // namespace gc
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080052
Mathew Inwooda5dc52c2018-02-19 15:30:51 +000053namespace hiddenapi {
54enum class EnforcementPolicy;
55} // namespace hiddenapi
56
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080057namespace jit {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080058class Jit;
Orion Hodsonad28f5e2018-10-17 09:08:17 +010059class JitCodeCache;
Igor Murashkin2ffb7032017-11-08 13:35:21 -080060class JitOptions;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080061} // namespace jit
62
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080063namespace mirror {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080064class Array;
65class ClassLoader;
66class DexCache;
67template<class T> class ObjectArray;
68template<class T> class PrimitiveArray;
69typedef PrimitiveArray<int8_t> ByteArray;
70class String;
71class Throwable;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080072} // namespace mirror
Alex Light7233c7e2016-07-28 10:07:45 -070073namespace ti {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080074class Agent;
Andreas Gampeaadcbc62017-12-28 14:05:42 -080075class AgentSpec;
Alex Light7233c7e2016-07-28 10:07:45 -070076} // namespace ti
Mathieu Chartierc528dba2013-11-26 12:00:11 -080077namespace verifier {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080078class MethodVerifier;
79enum class VerifyMode : int8_t;
Ian Rogers6a3c1fc2014-10-31 00:33:20 -070080} // namespace verifier
Mathieu Chartiere401d142015-04-22 13:56:20 -070081class ArenaPool;
82class ArtMethod;
Andreas Gampe8228cdf2017-05-30 15:03:54 -070083enum class CalleeSaveType: uint32_t;
Carl Shapiro61e019d2011-07-14 16:53:09 -070084class ClassLinker;
Mathieu Chartiere401d142015-04-22 13:56:20 -070085class CompilerCallbacks;
Carl Shapirofc322c72011-07-27 00:20:01 -070086class DexFile;
Andreas Gampe639b2b12019-01-08 10:32:50 -080087enum class InstructionSet;
Elliott Hughescf4c6c42011-09-01 15:16:42 -070088class InternTable;
Andreas Gamped482e732017-04-24 17:59:09 -070089class IsMarkedVisitor;
Ian Rogersb48b9eb2014-02-28 16:20:21 -080090class JavaVMExt;
Mathieu Chartiere401d142015-04-22 13:56:20 -070091class LinearAlloc;
Elliott Hughesc33a32b2011-10-11 18:18:07 -070092class MonitorList;
Ian Rogersef7d42f2014-01-06 12:55:46 -080093class MonitorPool;
Ian Rogers576ca0c2014-06-06 15:58:22 -070094class NullPointerHandler;
Vladimir Markof3d88a82018-12-21 16:38:47 +000095class OatFileAssistantTest;
Mathieu Chartierf9c6fc62015-10-07 11:44:05 -070096class OatFileManager;
Alex Light185d1342016-08-11 10:48:03 -070097class Plugin;
Vladimir Marko88b2b802015-12-04 14:19:04 +000098struct RuntimeArgumentMap;
Andreas Gampeac30fa22017-01-18 21:02:36 -080099class RuntimeCallbacks;
Elliott Hughese27955c2011-08-26 15:21:24 -0700100class SignalCatcher;
Ian Rogers576ca0c2014-06-06 15:58:22 -0700101class StackOverflowHandler;
102class SuspensionHandler;
Carl Shapiro61e019d2011-07-14 16:53:09 -0700103class ThreadList;
Mathieu Chartierc6068c72018-11-13 16:00:58 -0800104class ThreadPool;
jeffhao2692b572011-12-16 15:42:28 -0800105class Trace;
Andreas Gampef6a780a2015-04-02 18:51:05 -0700106struct TraceConfig;
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100107class Transaction;
Carl Shapiro61e019d2011-07-14 16:53:09 -0700108
Ian Rogerse63db272014-07-15 15:36:11 -0700109typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
110
Carl Shapiro1fb86202011-06-27 17:43:13 -0700111class Runtime {
112 public:
Vladimir Marko88b2b802015-12-04 14:19:04 +0000113 // Parse raw runtime options.
114 static bool ParseOptions(const RuntimeOptions& raw_options,
115 bool ignore_unrecognized,
116 RuntimeArgumentMap* runtime_options);
117
Carl Shapiro61e019d2011-07-14 16:53:09 -0700118 // Creates and initializes a new runtime.
Vladimir Marko88b2b802015-12-04 14:19:04 +0000119 static bool Create(RuntimeArgumentMap&& runtime_options)
120 SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
121
122 // Creates and initializes a new runtime.
123 static bool Create(const RuntimeOptions& raw_options, bool ignore_unrecognized)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700124 SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700125
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800126 // IsAotCompiler for compilers that don't have a running runtime. Only dex2oat currently.
127 bool IsAotCompiler() const {
Calin Juravleffc87072016-04-20 14:22:09 +0100128 return !UseJitCompilation() && IsCompiler();
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800129 }
130
131 // IsCompiler is any runtime which has a running compiler, either dex2oat or JIT.
Elliott Hughesd9c67be2012-02-02 19:54:06 -0800132 bool IsCompiler() const {
Vladimir Marko2b5eaa22013-12-13 13:59:30 +0000133 return compiler_callbacks_ != nullptr;
134 }
135
Andreas Gampe4585f872015-03-27 23:45:15 -0700136 // If a compiler, are we compiling a boot image?
137 bool IsCompilingBootImage() const;
138
139 bool CanRelocate() const;
Alex Lighta59dd802014-07-02 16:28:08 -0700140
141 bool ShouldRelocate() const {
142 return must_relocate_ && CanRelocate();
143 }
144
145 bool MustRelocateIfPossible() const {
146 return must_relocate_;
147 }
148
Alex Light64ad14d2014-08-19 14:23:13 -0700149 bool IsImageDex2OatEnabled() const {
150 return image_dex2oat_enabled_;
Nicolas Geoffray4fcdc942014-07-22 10:48:00 +0100151 }
152
Vladimir Marko2b5eaa22013-12-13 13:59:30 +0000153 CompilerCallbacks* GetCompilerCallbacks() {
154 return compiler_callbacks_;
Elliott Hughesd9c67be2012-02-02 19:54:06 -0800155 }
156
Mathieu Chartier07ddb6f2015-11-05 11:16:34 -0800157 void SetCompilerCallbacks(CompilerCallbacks* callbacks) {
158 CHECK(callbacks != nullptr);
159 compiler_callbacks_ = callbacks;
160 }
161
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700162 bool IsZygote() const {
163 return is_zygote_;
164 }
165
Nicolas Geoffray4444f1b2019-06-10 14:09:19 +0100166 bool IsPrimaryZygote() const {
167 return is_primary_zygote_;
168 }
169
Nicolas Geoffray9ac09ee2019-05-08 23:38:27 +0100170 bool IsSystemServer() const {
171 return is_system_server_;
172 }
173
174 void SetSystemServer(bool value) {
175 is_system_server_ = value;
176 }
177
Anwar Ghuloum87183592013-08-14 12:12:19 -0700178 bool IsExplicitGcDisabled() const {
179 return is_explicit_gc_disabled_;
180 }
181
Tsu Chiang Chuang12e6d742014-05-22 10:22:25 -0700182 std::string GetCompilerExecutable() const;
183
Brian Carlstrom6449c622014-02-10 23:48:36 -0800184 const std::vector<std::string>& GetCompilerOptions() const {
185 return compiler_options_;
Dragos Sbirlea7467ee02013-06-21 09:20:34 -0700186 }
187
Vladimir Marko5c657fe2016-11-03 15:12:29 +0000188 void AddCompilerOption(const std::string& option) {
Andreas Gamped2abbc92014-12-19 09:53:27 -0800189 compiler_options_.push_back(option);
190 }
191
Brian Carlstrom6449c622014-02-10 23:48:36 -0800192 const std::vector<std::string>& GetImageCompilerOptions() const {
193 return image_compiler_options_;
Anwar Ghuloum8447d842013-04-30 17:27:40 -0700194 }
195
Brian Carlstrom31d8f522014-09-29 11:22:54 -0700196 const std::string& GetImageLocation() const {
197 return image_location_;
198 }
199
David Srbecky3db3d372019-04-17 18:19:17 +0100200 bool IsUsingApexBootImageLocation() const {
201 return is_using_apex_boot_image_location_;
Nicolas Geoffray7417ce92019-02-01 10:52:42 +0000202 }
203
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700204 // Starts a runtime, which may cause threads to be started and code to run.
Brian Carlstrombd86bcc2013-03-10 20:26:16 -0700205 bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
Carl Shapiro2ed144c2011-07-26 16:52:08 -0700206
Mathieu Chartier590fee92013-09-13 13:46:47 -0700207 bool IsShuttingDown(Thread* self);
Mathieu Chartier90443472015-07-16 20:32:27 -0700208 bool IsShuttingDownLocked() const REQUIRES(Locks::runtime_shutdown_lock_) {
Ian Rogers9af209c2012-06-03 20:50:30 -0700209 return shutting_down_;
210 }
211
Mathieu Chartier90443472015-07-16 20:32:27 -0700212 size_t NumberOfThreadsBeingBorn() const REQUIRES(Locks::runtime_shutdown_lock_) {
Ian Rogers120f1c72012-09-28 17:17:10 -0700213 return threads_being_born_;
214 }
215
Mathieu Chartier90443472015-07-16 20:32:27 -0700216 void StartThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) {
Ian Rogers120f1c72012-09-28 17:17:10 -0700217 threads_being_born_++;
218 }
219
Mathieu Chartier90443472015-07-16 20:32:27 -0700220 void EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_);
Ian Rogers120f1c72012-09-28 17:17:10 -0700221
Ian Rogers9af209c2012-06-03 20:50:30 -0700222 bool IsStarted() const {
223 return started_;
224 }
Elliott Hughesdcc24742011-09-07 14:02:44 -0700225
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700226 bool IsFinishedStarting() const {
227 return finished_starting_;
228 }
229
Vladimir Markodcfcce42018-06-27 10:00:28 +0000230 void RunRootClinits(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
231
Carl Shapiro2ed144c2011-07-26 16:52:08 -0700232 static Runtime* Current() {
233 return instance_;
234 }
Carl Shapiro1fb86202011-06-27 17:43:13 -0700235
Nicolas Geoffraydb978712014-12-09 13:33:38 +0000236 // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
237 // callers should prefer.
Andreas Gampe90a32b12016-10-03 19:47:08 -0700238 NO_RETURN static void Abort(const char* msg) REQUIRES(!Locks::abort_lock_);
Elliott Hughesffe67362011-07-17 12:09:27 -0700239
Ian Rogers365c1022012-06-22 15:05:28 -0700240 // Returns the "main" ThreadGroup, used when attaching user threads.
Brian Carlstrom034f76b2012-08-01 15:51:58 -0700241 jobject GetMainThreadGroup() const;
Ian Rogers365c1022012-06-22 15:05:28 -0700242
243 // Returns the "system" ThreadGroup, used when attaching our internal threads.
Brian Carlstrom034f76b2012-08-01 15:51:58 -0700244 jobject GetSystemThreadGroup() const;
Ian Rogers365c1022012-06-22 15:05:28 -0700245
Brian Carlstromce888532013-10-10 00:32:58 -0700246 // Returns the system ClassLoader which represents the CLASSPATH.
247 jobject GetSystemClassLoader() const;
248
Elliott Hughes462c9442012-03-23 18:47:50 -0700249 // Attaches the calling native thread to the runtime.
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700250 bool AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
Mathieu Chartier664bebf2012-11-12 16:54:11 -0800251 bool create_peer);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700252
Elliott Hughesbf86d042011-08-31 17:53:14 -0700253 void CallExitHook(jint status);
254
Carl Shapiro61e019d2011-07-14 16:53:09 -0700255 // Detaches the current native thread from the runtime.
Mathieu Chartier90443472015-07-16 20:32:27 -0700256 void DetachCurrentThread() REQUIRES(!Locks::mutator_lock_);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700257
Nicolas Geoffray4e92c3c2017-05-08 09:34:26 +0100258 void DumpDeoptimizations(std::ostream& os);
Ian Rogers7b078e82014-09-10 14:44:24 -0700259 void DumpForSigQuit(std::ostream& os);
Elliott Hughes21a5bf22011-12-07 14:35:20 -0800260 void DumpLockHolders(std::ostream& os);
Elliott Hughese27955c2011-08-26 15:21:24 -0700261
Carl Shapiro61e019d2011-07-14 16:53:09 -0700262 ~Runtime();
Carl Shapirob5573532011-07-12 18:22:59 -0700263
Vladimir Markod1908512018-11-22 14:57:28 +0000264 const std::vector<std::string>& GetBootClassPath() const {
265 return boot_class_path_;
266 }
267
268 const std::vector<std::string>& GetBootClassPathLocations() const {
269 DCHECK(boot_class_path_locations_.empty() ||
270 boot_class_path_locations_.size() == boot_class_path_.size());
271 return boot_class_path_locations_.empty() ? boot_class_path_ : boot_class_path_locations_;
Brian Carlstroma004aa92012-02-08 18:05:09 -0800272 }
273
274 const std::string& GetClassPathString() const {
275 return class_path_string_;
Brian Carlstromb765be02011-08-17 23:54:10 -0700276 }
277
278 ClassLinker* GetClassLinker() const {
Carl Shapiro7a909592011-07-24 19:21:59 -0700279 return class_linker_;
280 }
281
Elliott Hughes7ede61e2011-09-14 18:18:06 -0700282 size_t GetDefaultStackSize() const {
283 return default_stack_size_;
284 }
285
Hans Boehmb2155572019-03-27 14:25:53 -0700286 unsigned int GetFinalizerTimeoutMs() const {
287 return finalizer_timeout_ms_;
288 }
289
Ian Rogers1d54e732013-05-02 21:10:01 -0700290 gc::Heap* GetHeap() const {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800291 return heap_;
292 }
293
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700294 InternTable* GetInternTable() const {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700295 DCHECK(intern_table_ != nullptr);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700296 return intern_table_;
297 }
298
Elliott Hughes0af55432011-08-17 18:37:28 -0700299 JavaVMExt* GetJavaVM() const {
Richard Uhlerda0a69e2016-10-11 15:06:38 +0100300 return java_vm_.get();
Elliott Hughesf2682d52011-08-15 16:37:04 -0700301 }
302
Hans Boehmb3da36c2016-12-15 13:12:59 -0800303 size_t GetMaxSpinsBeforeThinLockInflation() const {
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700304 return max_spins_before_thin_lock_inflation_;
305 }
306
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700307 MonitorList* GetMonitorList() const {
308 return monitor_list_;
309 }
310
Ian Rogersef7d42f2014-01-06 12:55:46 -0800311 MonitorPool* GetMonitorPool() const {
312 return monitor_pool_;
313 }
314
Ian Rogersc0542af2014-09-03 16:16:56 -0700315 // Is the given object the special object used to mark a cleared JNI weak global?
Mathieu Chartier8778c522016-10-04 19:06:30 -0700316 bool IsClearedJniWeakGlobal(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -0700317
318 // Get the special object used to mark a cleared JNI weak global.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700319 mirror::Object* GetClearedJniWeakGlobal() REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -0700320
Roland Levillain7b0e8442018-04-11 18:27:47 +0100321 mirror::Throwable* GetPreAllocatedOutOfMemoryErrorWhenThrowingException()
322 REQUIRES_SHARED(Locks::mutator_lock_);
323 mirror::Throwable* GetPreAllocatedOutOfMemoryErrorWhenThrowingOOME()
324 REQUIRES_SHARED(Locks::mutator_lock_);
325 mirror::Throwable* GetPreAllocatedOutOfMemoryErrorWhenHandlingStackOverflow()
326 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes225f5a12012-06-11 11:23:48 -0700327
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700328 mirror::Throwable* GetPreAllocatedNoClassDefFoundError()
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700329 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers63557452014-06-04 16:57:15 -0700330
Elliott Hughes225f5a12012-06-11 11:23:48 -0700331 const std::vector<std::string>& GetProperties() const {
332 return properties_;
333 }
334
Elliott Hughesd92bec42011-09-02 17:04:36 -0700335 ThreadList* GetThreadList() const {
336 return thread_list_;
337 }
338
Brian Carlstrom491ca9e2014-03-02 18:24:38 -0800339 static const char* GetVersion() {
Andreas Gampe2153f932014-06-26 08:09:17 -0700340 return "2.1.0";
Elliott Hughes7ede61e2011-09-14 18:18:06 -0700341 }
342
Narayan Kamath25352fc2016-08-03 12:46:58 +0100343 bool IsMethodHandlesEnabled() const {
Narayan Kamath93206752017-01-17 13:20:55 +0000344 return true;
Narayan Kamath25352fc2016-08-03 12:46:58 +0100345 }
346
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700347 void DisallowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
348 void AllowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi30493242016-11-03 13:06:52 -0700349 // broadcast_for_checkpoint is true when we broadcast for making blocking threads to respond to
350 // checkpoint requests. It's false when we broadcast to unblock blocking threads after system weak
351 // access is reenabled.
352 void BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint = false);
Mathieu Chartierc11d9b82013-09-19 10:01:59 -0700353
Ian Rogers1d54e732013-05-02 21:10:01 -0700354 // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
355 // clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700356 void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots)
Andreas Gamped98b4ed2016-11-04 20:27:24 -0700357 REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700358 REQUIRES_SHARED(Locks::mutator_lock_);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700359
Mathieu Chartier461687d2015-03-31 12:05:24 -0700360 // Visit image roots, only used for hprof since the GC uses the image space mod union table
361 // instead.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700362 void VisitImageRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier461687d2015-03-31 12:05:24 -0700363
Roland Levillainef012222017-06-21 16:28:06 +0100364 // Visit all of the roots we can safely visit concurrently.
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700365 void VisitConcurrentRoots(RootVisitor* visitor,
366 VisitRootFlags flags = kVisitRootFlagAllRoots)
Andreas Gamped98b4ed2016-11-04 20:27:24 -0700367 REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700368 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700369
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700370 // Visit all of the non thread roots, we can do this with mutators unpaused.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700371 void VisitNonThreadRoots(RootVisitor* visitor)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700372 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700373
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700374 void VisitTransactionRoots(RootVisitor* visitor)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700375 REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800376
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700377 // Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700378 // system weak is updated to be the visitor's returned value.
Mathieu Chartier97509952015-07-13 14:35:43 -0700379 void SweepSystemWeaks(IsMarkedVisitor* visitor)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700380 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700381
Ian Rogers9af209c2012-06-03 20:50:30 -0700382 // Returns a special method that calls into a trampoline for runtime method resolution
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800383 ArtMethod* GetResolutionMethod();
Ian Rogers9af209c2012-06-03 20:50:30 -0700384
385 bool HasResolutionMethod() const {
Mathieu Chartiere401d142015-04-22 13:56:20 -0700386 return resolution_method_ != nullptr;
Ian Rogers9af209c2012-06-03 20:50:30 -0700387 }
388
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700389 void SetResolutionMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
Igor Murashkin8275fba2017-05-02 15:58:02 -0700390 void ClearResolutionMethod() {
391 resolution_method_ = nullptr;
392 }
Ian Rogers9af209c2012-06-03 20:50:30 -0700393
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700394 ArtMethod* CreateResolutionMethod() REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers19846512012-02-24 11:42:47 -0800395
Ian Rogerse63db272014-07-15 15:36:11 -0700396 // Returns a special method that calls into a trampoline for runtime imt conflicts.
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800397 ArtMethod* GetImtConflictMethod();
398 ArtMethod* GetImtUnimplementedMethod();
Jeff Hao88474b42013-10-23 16:24:40 -0700399
400 bool HasImtConflictMethod() const {
Mathieu Chartiere401d142015-04-22 13:56:20 -0700401 return imt_conflict_method_ != nullptr;
Jeff Hao88474b42013-10-23 16:24:40 -0700402 }
403
Igor Murashkin8275fba2017-05-02 15:58:02 -0700404 void ClearImtConflictMethod() {
405 imt_conflict_method_ = nullptr;
406 }
407
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700408 void FixupConflictTables();
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700409 void SetImtConflictMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
410 void SetImtUnimplementedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
Jeff Hao88474b42013-10-23 16:24:40 -0700411
Nicolas Geoffray796d6302016-03-13 22:22:31 +0000412 ArtMethod* CreateImtConflictMethod(LinearAlloc* linear_alloc)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700413 REQUIRES_SHARED(Locks::mutator_lock_);
Jeff Hao88474b42013-10-23 16:24:40 -0700414
Igor Murashkin8275fba2017-05-02 15:58:02 -0700415 void ClearImtUnimplementedMethod() {
416 imt_unimplemented_method_ = nullptr;
417 }
418
Ian Rogers9af209c2012-06-03 20:50:30 -0700419 bool HasCalleeSaveMethod(CalleeSaveType type) const {
Andreas Gampe8228cdf2017-05-30 15:03:54 -0700420 return callee_save_methods_[static_cast<size_t>(type)] != 0u;
Ian Rogers9af209c2012-06-03 20:50:30 -0700421 }
422
Mathieu Chartiere401d142015-04-22 13:56:20 -0700423 ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700424 REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchiab088112014-07-14 13:00:14 -0700425
Mathieu Chartiere401d142015-04-22 13:56:20 -0700426 ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700427 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers9af209c2012-06-03 20:50:30 -0700428
Mathieu Chartiere401d142015-04-22 13:56:20 -0700429 QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700430 REQUIRES_SHARED(Locks::mutator_lock_);
Vladimir Marko7624d252014-05-02 14:40:15 +0100431
David Srbecky56de89a2018-10-01 15:32:20 +0100432 static constexpr size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
Andreas Gampe8228cdf2017-05-30 15:03:54 -0700433 return OFFSETOF_MEMBER(Runtime, callee_save_methods_[static_cast<size_t>(type)]);
Ian Rogers936b37f2014-02-14 00:52:24 -0800434 }
435
Vladimir Marko7624d252014-05-02 14:40:15 +0100436 InstructionSet GetInstructionSet() const {
437 return instruction_set_;
438 }
439
440 void SetInstructionSet(InstructionSet instruction_set);
Igor Murashkin8275fba2017-05-02 15:58:02 -0700441 void ClearInstructionSet();
Vladimir Marko7624d252014-05-02 14:40:15 +0100442
Mathieu Chartiere401d142015-04-22 13:56:20 -0700443 void SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type);
Igor Murashkin8275fba2017-05-02 15:58:02 -0700444 void ClearCalleeSaveMethods();
Ian Rogers4f0d07c2011-10-06 23:38:47 -0700445
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700446 ArtMethod* CreateCalleeSaveMethod() REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersff1ed472011-09-20 13:46:24 -0700447
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700448 int32_t GetStat(int kind);
449
Ian Rogers9af209c2012-06-03 20:50:30 -0700450 RuntimeStats* GetStats() {
451 return &stats_;
452 }
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700453
454 bool HasStatsEnabled() const {
455 return stats_enabled_;
456 }
457
458 void ResetStats(int kinds);
459
Mathieu Chartier90443472015-07-16 20:32:27 -0700460 void SetStatsEnabled(bool new_state)
461 REQUIRES(!Locks::instrument_entrypoints_lock_, !Locks::mutator_lock_);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700462
Andreas Gampe6be67ee2014-09-02 21:22:18 -0700463 enum class NativeBridgeAction { // private
464 kUnload,
465 kInitialize
466 };
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800467
Nicolas Geoffray433b79a2017-01-30 20:54:45 +0000468 jit::Jit* GetJit() const {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800469 return jit_.get();
470 }
Calin Juravleffc87072016-04-20 14:22:09 +0100471
472 // Returns true if JIT compilations are enabled. GetJit() will be not null in this case.
473 bool UseJitCompilation() const;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800474
Narayan Kamath3de95a72014-04-02 12:54:23 +0100475 void PreZygoteFork();
Nicolas Geoffrayce9ed362018-11-29 03:19:28 +0000476 void PostZygoteFork();
Nicolas Geoffrayd66c8622015-12-11 14:59:16 +0000477 void InitNonZygoteOrPostFork(
Calin Juravle016fcbe22018-05-03 19:47:35 -0700478 JNIEnv* env,
479 bool is_system_server,
480 NativeBridgeAction action,
481 const char* isa,
482 bool profile_system_server = false);
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700483
Ian Rogers9bc54402014-04-17 16:40:01 -0700484 const instrumentation::Instrumentation* GetInstrumentation() const {
485 return &instrumentation_;
486 }
487
Ian Rogers62d6c772013-02-27 08:32:07 -0800488 instrumentation::Instrumentation* GetInstrumentation() {
489 return &instrumentation_;
490 }
jeffhao2692b572011-12-16 15:42:28 -0800491
Calin Juravle66f55232015-12-08 15:09:10 +0000492 void RegisterAppInfo(const std::vector<std::string>& code_paths,
Calin Juravle77651c42017-03-03 18:04:02 -0800493 const std::string& profile_output_filename);
Dave Allison0aded082013-11-07 13:15:11 -0800494
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100495 // Transaction support.
Chang Xing605fe242017-07-20 15:57:21 -0700496 bool IsActiveTransaction() const;
Chang Xing16d1dd82017-07-20 17:56:26 -0700497 void EnterTransactionMode();
Chang Xing5a906fc2017-07-26 15:01:16 -0700498 void EnterTransactionMode(bool strict, mirror::Class* root);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100499 void ExitTransactionMode();
Chang Xing605fe242017-07-20 15:57:21 -0700500 void RollbackAllTransactions() REQUIRES_SHARED(Locks::mutator_lock_);
Chang Xing16d1dd82017-07-20 17:56:26 -0700501 // Transaction rollback and exit transaction are always done together, it's convenience to
502 // do them in one function.
503 void RollbackAndExitTransactionMode() REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertz1c80bec2015-02-03 11:58:06 +0100504 bool IsTransactionAborted() const;
Chang Xing605fe242017-07-20 15:57:21 -0700505 const std::unique_ptr<Transaction>& GetTransaction() const;
Chang Xing5a906fc2017-07-26 15:01:16 -0700506 bool IsActiveStrictTransactionMode() const;
Sebastien Hertz1c80bec2015-02-03 11:58:06 +0100507
Sebastien Hertz2fd7e692015-04-02 11:11:19 +0200508 void AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700509 REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertz2fd7e692015-04-02 11:11:19 +0200510 void ThrowTransactionAbortError(Thread* self)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700511 REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertz1c80bec2015-02-03 11:58:06 +0100512
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700513 void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
Fred Shih37f05ef2014-07-16 18:38:08 -0700514 bool is_volatile) const;
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700515 void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value,
Fred Shih37f05ef2014-07-16 18:38:08 -0700516 bool is_volatile) const;
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700517 void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value,
Fred Shih37f05ef2014-07-16 18:38:08 -0700518 bool is_volatile) const;
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700519 void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value,
Fred Shih37f05ef2014-07-16 18:38:08 -0700520 bool is_volatile) const;
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700521 void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100522 bool is_volatile) const;
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700523 void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100524 bool is_volatile) const;
Mathieu Chartiera058fdf2016-10-06 15:13:58 -0700525 void RecordWriteFieldReference(mirror::Object* obj,
526 MemberOffset field_offset,
527 ObjPtr<mirror::Object> value,
528 bool is_volatile) const
529 REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100530 void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700531 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier9e868092016-10-31 14:58:04 -0700532 void RecordStrongStringInsertion(ObjPtr<mirror::String> s) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700533 REQUIRES(Locks::intern_table_lock_);
Mathieu Chartier9e868092016-10-31 14:58:04 -0700534 void RecordWeakStringInsertion(ObjPtr<mirror::String> s) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700535 REQUIRES(Locks::intern_table_lock_);
Mathieu Chartier9e868092016-10-31 14:58:04 -0700536 void RecordStrongStringRemoval(ObjPtr<mirror::String> s) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700537 REQUIRES(Locks::intern_table_lock_);
Mathieu Chartier9e868092016-10-31 14:58:04 -0700538 void RecordWeakStringRemoval(ObjPtr<mirror::String> s) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700539 REQUIRES(Locks::intern_table_lock_);
Andreas Gampe8a0128a2016-11-28 07:38:35 -0800540 void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx) const
Mathieu Chartierbb816d62016-09-07 10:17:46 -0700541 REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100542
Andreas Gampe44f67602018-11-28 08:27:27 -0800543 void SetFaultMessage(const std::string& message);
Mathieu Chartier15d34022014-02-26 17:16:38 -0800544
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700545 void AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* arg_vector) const;
Ian Rogers8afeb852014-04-02 14:55:49 -0700546
Dave Allisonb373e092014-02-20 16:06:36 -0800547 bool ExplicitStackOverflowChecks() const {
Andreas Gampe928f72b2014-09-09 19:53:48 -0700548 return !implicit_so_checks_;
Dave Allisonb373e092014-02-20 16:06:36 -0800549 }
550
Nicolas Geoffray68bf3902017-09-07 14:40:48 +0100551 void DisableVerifier();
Igor Murashkin7617abd2015-07-10 18:27:47 -0700552 bool IsVerificationEnabled() const;
553 bool IsVerificationSoftFail() const;
Jeff Hao4a200f52014-04-01 14:58:49 -0700554
Mathew Inwooda5dc52c2018-02-19 15:30:51 +0000555 void SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy) {
556 hidden_api_policy_ = policy;
David Brazdil3e0fa0a2018-01-15 18:41:44 +0000557 }
558
Mathew Inwooda5dc52c2018-02-19 15:30:51 +0000559 hiddenapi::EnforcementPolicy GetHiddenApiEnforcementPolicy() const {
560 return hidden_api_policy_;
David Brazdil3e0fa0a2018-01-15 18:41:44 +0000561 }
562
David Brazdile7681822018-12-14 16:25:33 +0000563 void SetCorePlatformApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy) {
564 core_platform_api_policy_ = policy;
565 }
566
567 hiddenapi::EnforcementPolicy GetCorePlatformApiEnforcementPolicy() const {
568 return core_platform_api_policy_;
569 }
570
Mathew Inwood3383aa52018-03-16 14:18:33 +0000571 void SetHiddenApiExemptions(const std::vector<std::string>& exemptions) {
572 hidden_api_exemptions_ = exemptions;
573 }
574
575 const std::vector<std::string>& GetHiddenApiExemptions() {
576 return hidden_api_exemptions_;
577 }
578
David Brazdilee7d2fd2018-01-20 17:25:23 +0000579 void SetDedupeHiddenApiWarnings(bool value) {
580 dedupe_hidden_api_warnings_ = value;
581 }
582
583 bool ShouldDedupeHiddenApiWarnings() {
584 return dedupe_hidden_api_warnings_;
585 }
586
Mathew Inwood73ddda42018-04-03 15:32:32 +0100587 void SetHiddenApiEventLogSampleRate(uint32_t rate) {
588 hidden_api_access_event_log_rate_ = rate;
589 }
590
591 uint32_t GetHiddenApiEventLogSampleRate() const {
592 return hidden_api_access_event_log_rate_;
593 }
594
Mathew Inwood5bcef172018-05-01 14:40:12 +0100595 const std::string& GetProcessPackageName() const {
596 return process_package_name_;
597 }
598
599 void SetProcessPackageName(const char* package_name) {
600 if (package_name == nullptr) {
601 process_package_name_.clear();
602 } else {
603 process_package_name_ = package_name;
604 }
605 }
606
David Brazdil35a3f6a2019-03-04 15:59:06 +0000607 const std::string& GetProcessDataDirectory() const {
608 return process_data_directory_;
609 }
610
611 void SetProcessDataDirectory(const char* data_dir) {
612 if (data_dir == nullptr) {
613 process_data_directory_.clear();
614 } else {
615 process_data_directory_ = data_dir;
616 }
617 }
618
Jean Christophe Beyler24e04aa2014-09-12 12:03:25 -0700619 bool IsDexFileFallbackEnabled() const {
620 return allow_dex_file_fallback_;
621 }
622
Dmitriy Ivanov785049f2014-07-18 10:08:57 -0700623 const std::vector<std::string>& GetCpuAbilist() const {
624 return cpu_abilist_;
625 }
626
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700627 bool IsRunningOnMemoryTool() const {
628 return is_running_on_memory_tool_;
Mathieu Chartierda44d772014-04-01 15:01:46 -0700629 }
630
David Brazdil2bb2fbd2018-11-13 18:24:26 +0000631 void SetTargetSdkVersion(uint32_t version) {
Jeff Haof00571c2014-05-29 17:29:47 -0700632 target_sdk_version_ = version;
633 }
634
David Brazdil2bb2fbd2018-11-13 18:24:26 +0000635 uint32_t GetTargetSdkVersion() const {
Jeff Haof00571c2014-05-29 17:29:47 -0700636 return target_sdk_version_;
637 }
638
Narayan Kamath5a2be3f2015-02-16 13:51:51 +0000639 uint32_t GetZygoteMaxFailedBoots() const {
640 return zygote_max_failed_boots_;
641 }
642
Alex Lighteb7c1442015-08-31 13:17:42 -0700643 bool AreExperimentalFlagsEnabled(ExperimentalFlags flags) {
644 return (experimental_flags_ & flags) != ExperimentalFlags::kNone;
Igor Murashkin158f35c2015-06-10 15:55:30 -0700645 }
646
Orion Hodsonad28f5e2018-10-17 09:08:17 +0100647 void CreateJitCodeCache(bool rwx_memory_allowed);
648
Mathieu Chartier455f67c2015-03-17 13:48:29 -0700649 // Create the JIT and instrumentation and code cache.
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800650 void CreateJit();
651
Mathieu Chartier9b34b242015-03-09 11:30:17 -0700652 ArenaPool* GetArenaPool() {
653 return arena_pool_.get();
654 }
Nicolas Geoffray25e04562016-03-01 13:17:58 +0000655 ArenaPool* GetJitArenaPool() {
656 return jit_arena_pool_.get();
657 }
Mathieu Chartier9b34b242015-03-09 11:30:17 -0700658 const ArenaPool* GetArenaPool() const {
659 return arena_pool_.get();
660 }
Jean-Philippe Halimica76a1a2016-02-02 19:48:52 +0100661
662 void ReclaimArenaPoolMemory();
663
Mathieu Chartierc7853442015-03-27 14:35:38 -0700664 LinearAlloc* GetLinearAlloc() {
665 return linear_alloc_.get();
666 }
Mathieu Chartier9b34b242015-03-09 11:30:17 -0700667
Mathieu Chartier455f67c2015-03-17 13:48:29 -0700668 jit::JitOptions* GetJITOptions() {
669 return jit_options_.get();
670 }
671
Nicolas Geoffray433b79a2017-01-30 20:54:45 +0000672 bool IsJavaDebuggable() const {
673 return is_java_debuggable_;
Alex Light6b16d892016-11-11 11:21:04 -0800674 }
675
Nicolas Geoffray433b79a2017-01-30 20:54:45 +0000676 void SetJavaDebuggable(bool value);
677
678 // Deoptimize the boot image, called for Java debuggable apps.
Nicolas Geoffray226805d2018-12-14 10:59:02 +0000679 void DeoptimizeBootImage() REQUIRES(Locks::mutator_lock_);
Alex Light6b16d892016-11-11 11:21:04 -0800680
David Srbeckyf4480162016-03-16 00:06:24 +0000681 bool IsNativeDebuggable() const {
682 return is_native_debuggable_;
683 }
684
685 void SetNativeDebuggable(bool value) {
686 is_native_debuggable_ = value;
687 }
688
Alex Light0aa7a5a2018-10-10 15:58:14 +0000689 bool AreNonStandardExitsEnabled() const {
690 return non_standard_exits_enabled_;
691 }
692
693 void SetNonStandardExitsEnabled() {
David Srbecky28f6cff2018-10-16 15:07:28 +0100694 DoAndMaybeSwitchInterpreter([=](){ non_standard_exits_enabled_ = true; });
Alex Light0aa7a5a2018-10-10 15:58:14 +0000695 }
696
Alex Light7919db92017-11-29 09:00:55 -0800697 bool AreAsyncExceptionsThrown() const {
698 return async_exceptions_thrown_;
699 }
700
701 void SetAsyncExceptionsThrown() {
David Srbecky28f6cff2018-10-16 15:07:28 +0100702 DoAndMaybeSwitchInterpreter([=](){ async_exceptions_thrown_ = true; });
Alex Light7919db92017-11-29 09:00:55 -0800703 }
704
David Srbecky28f6cff2018-10-16 15:07:28 +0100705 // Change state and re-check which interpreter should be used.
706 //
707 // This must be called whenever there is an event that forces
708 // us to use different interpreter (e.g. debugger is attached).
709 //
710 // Changing the state using the lamda gives us some multihreading safety.
711 // It ensures that two calls do not interfere with each other and
712 // it makes it possible to DCHECK that thread local flag is correct.
713 template<typename Action>
David Srbeckyd3883902019-02-26 17:29:32 +0000714 static void DoAndMaybeSwitchInterpreter(Action lamda);
David Srbecky28f6cff2018-10-16 15:07:28 +0100715
Andreas Gampedd671252015-07-23 14:37:18 -0700716 // Returns the build fingerprint, if set. Otherwise an empty string is returned.
717 std::string GetFingerprint() {
718 return fingerprint_;
719 }
720
Mathieu Chartier673ed3d2015-08-28 14:56:43 -0700721 // Called from class linker.
Vladimir Marko024d69f2019-06-13 10:52:32 +0100722 void SetSentinel(ObjPtr<mirror::Object> sentinel) REQUIRES_SHARED(Locks::mutator_lock_);
Roland Levillain0e155592018-11-05 18:31:49 +0000723 // For testing purpose only.
724 // TODO: Remove this when this is no longer needed (b/116087961).
725 GcRoot<mirror::Object> GetSentinel() REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier673ed3d2015-08-28 14:56:43 -0700726
Mathieu Chartier951ec2c2015-09-22 08:50:05 -0700727 // Create a normal LinearAlloc or low 4gb version if we are 64 bit AOT compiler.
728 LinearAlloc* CreateLinearAlloc();
729
Mathieu Chartierf9c6fc62015-10-07 11:44:05 -0700730 OatFileManager& GetOatFileManager() const {
731 DCHECK(oat_file_manager_ != nullptr);
Mathieu Chartiere58991b2015-10-13 07:59:34 -0700732 return *oat_file_manager_;
Mathieu Chartierf9c6fc62015-10-07 11:44:05 -0700733 }
734
Mathieu Chartier32cc9ee2015-10-15 09:19:15 -0700735 double GetHashTableMinLoadFactor() const;
736 double GetHashTableMaxLoadFactor() const;
737
Andreas Gampe83e20492018-11-07 11:12:26 -0800738 bool IsSafeMode() const {
Andreas Gampe83e20492018-11-07 11:12:26 -0800739 return safe_mode_;
740 }
741
Nicolas Geoffray787ae8e2015-11-05 11:32:24 +0000742 void SetSafeMode(bool mode) {
743 safe_mode_ = mode;
744 }
745
Nicolas Geoffraya73280d2016-02-15 13:05:16 +0000746 bool GetDumpNativeStackOnSigQuit() const {
747 return dump_native_stack_on_sig_quit_;
748 }
749
Andreas Gampea1425a12016-03-11 17:44:04 -0800750 bool GetPrunedDalvikCache() const {
751 return pruned_dalvik_cache_;
752 }
753
754 void SetPrunedDalvikCache(bool pruned) {
755 pruned_dalvik_cache_ = pruned;
756 }
757
Mathieu Chartierf8cb1782016-03-18 18:45:41 -0700758 void UpdateProcessState(ProcessState process_state);
759
760 // Returns true if we currently care about long mutator pause.
761 bool InJankPerceptibleProcessState() const {
762 return process_state_ == kProcessStateJankPerceptible;
763 }
764
Calin Juravle97cbc922016-04-15 16:16:35 +0100765 void RegisterSensitiveThread() const;
766
Andreas Gampef38a6612016-04-11 08:42:26 -0700767 void SetZygoteNoThreadSection(bool val) {
768 zygote_no_threads_ = val;
769 }
770
771 bool IsZygoteNoThreadSection() const {
772 return zygote_no_threads_;
773 }
774
Nicolas Geoffray433b79a2017-01-30 20:54:45 +0000775 // Returns if the code can be deoptimized asynchronously. Code may be compiled with some
Mingyao Yangf711f2c2016-05-23 12:29:39 -0700776 // optimization that makes it impossible to deoptimize.
Nicolas Geoffray433b79a2017-01-30 20:54:45 +0000777 bool IsAsyncDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_);
Mingyao Yangf711f2c2016-05-23 12:29:39 -0700778
David Sehrd106d9f2016-08-16 19:22:57 -0700779 // Returns a saved copy of the environment (getenv/setenv values).
780 // Used by Fork to protect against overwriting LD_LIBRARY_PATH, etc.
781 char** GetEnvSnapshot() const {
782 return env_snapshot_.GetSnapshot();
783 }
784
Andreas Gampefda57142016-09-08 20:29:18 -0700785 void AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
786 void RemoveSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
787
Alex Lightf889c702018-02-23 15:25:45 -0800788 void AttachAgent(JNIEnv* env, const std::string& agent_arg, jobject class_loader);
Leonard Mosescueb842212016-10-06 17:26:36 -0700789
Andreas Gampeaadcbc62017-12-28 14:05:42 -0800790 const std::list<std::unique_ptr<ti::Agent>>& GetAgents() const {
Alex Light65af20b2017-04-20 09:15:08 -0700791 return agents_;
792 }
793
Andreas Gampeac30fa22017-01-18 21:02:36 -0800794 RuntimeCallbacks* GetRuntimeCallbacks();
Andreas Gampe04bbb5b2017-01-19 17:49:03 +0000795
Alex Light77fee872017-09-05 14:51:49 -0700796 bool HasLoadedPlugins() const {
797 return !plugins_.empty();
798 }
799
Andreas Gampebad529d2017-02-13 18:52:10 -0800800 void InitThreadGroups(Thread* self);
801
Mathieu Chartier1d495012017-04-11 17:50:00 -0700802 void SetDumpGCPerformanceOnShutdown(bool value) {
803 dump_gc_performance_on_shutdown_ = value;
804 }
805
Lokesh Gidra10d0c962019-03-07 22:40:36 +0000806 bool GetDumpGCPerformanceOnShutdown() const {
807 return dump_gc_performance_on_shutdown_;
808 }
809
Nicolas Geoffray4e92c3c2017-05-08 09:34:26 +0100810 void IncrementDeoptimizationCount(DeoptimizationKind kind) {
811 DCHECK_LE(kind, DeoptimizationKind::kLast);
812 deoptimization_counts_[static_cast<size_t>(kind)]++;
813 }
814
Nicolas Geoffrayb9bec2e2017-05-24 15:59:18 +0100815 uint32_t GetNumberOfDeoptimizations() const {
816 uint32_t result = 0;
817 for (size_t i = 0; i <= static_cast<size_t>(DeoptimizationKind::kLast); ++i) {
818 result += deoptimization_counts_[i];
819 }
820 return result;
821 }
822
Mathieu Chartierc42cb0e2017-10-13 11:35:00 -0700823 // Whether or not we use MADV_RANDOM on files that are thought to have random access patterns.
824 // This is beneficial for low RAM devices since it reduces page cache thrashing.
825 bool MAdviseRandomAccess() const {
826 return madvise_random_access_;
827 }
828
Alex Light40320712017-12-14 11:52:04 -0800829 const std::string& GetJdwpOptions() {
830 return jdwp_options_;
831 }
832
833 JdwpProvider GetJdwpProvider() const {
834 return jdwp_provider_;
835 }
836
Andreas Gampe0b0ffc12018-08-01 14:41:27 -0700837 uint32_t GetVerifierLoggingThresholdMs() const {
838 return verifier_logging_threshold_ms_;
839 }
840
Mathieu Chartierada33d72018-12-17 13:17:30 -0800841 // Atomically delete the thread pool if the reference count is 0.
842 bool DeleteThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
843
844 // Wait for all the thread workers to be attached.
845 void WaitForThreadPoolWorkersToStart() REQUIRES(!Locks::runtime_thread_pool_lock_);
846
847 // Scoped usage of the runtime thread pool. Prevents the pool from being
848 // deleted. Note that the thread pool is only for startup and gets deleted after.
849 class ScopedThreadPoolUsage {
850 public:
851 ScopedThreadPoolUsage();
852 ~ScopedThreadPoolUsage();
853
854 // Return the thread pool.
855 ThreadPool* GetThreadPool() const {
856 return thread_pool_;
857 }
858
859 private:
860 ThreadPool* const thread_pool_;
861 };
862
Mathieu Chartiera88abfa2019-02-04 11:08:29 -0800863 bool LoadAppImageStartupCache() const {
864 return load_app_image_startup_cache_;
865 }
866
867 void SetLoadAppImageStartupCacheEnabled(bool enabled) {
868 load_app_image_startup_cache_ = enabled;
869 }
870
Mathieu Chartier03625442019-06-24 17:29:23 -0700871 // Reset the startup completed status so that we can call NotifyStartupCompleted again. Should
872 // only be used for testing.
873 void ResetStartupCompleted();
874
Mathieu Chartier175ce3d2019-03-06 16:54:24 -0800875 // Notify the runtime that application startup is considered completed. Only has effect for the
876 // first call.
877 void NotifyStartupCompleted();
878
879 // Return true if startup is already completed.
880 bool GetStartupCompleted() const;
881
Andreas Gampe86823542019-02-25 09:38:49 -0800882 gc::space::ImageSpaceLoadingOrder GetImageSpaceLoadingOrder() const {
883 return image_space_loading_order_;
884 }
885
Carl Shapirob5573532011-07-12 18:22:59 -0700886 private:
Elliott Hughes457005c2012-04-16 13:54:25 -0700887 static void InitPlatformSignalHandlers();
Elliott Hughesffe67362011-07-17 12:09:27 -0700888
Elliott Hughesdcc24742011-09-07 14:02:44 -0700889 Runtime();
Carl Shapiro61e019d2011-07-14 16:53:09 -0700890
Elliott Hughesc1674ed2011-08-25 18:09:09 -0700891 void BlockSignals();
892
Vladimir Marko88b2b802015-12-04 14:19:04 +0000893 bool Init(RuntimeArgumentMap&& runtime_options)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700894 SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
Mathieu Chartier90443472015-07-16 20:32:27 -0700895 void InitNativeMethods() REQUIRES(!Locks::mutator_lock_);
Elliott Hughesff17f1f2012-01-24 18:12:29 -0800896 void RegisterRuntimeNativeMethods(JNIEnv* env);
Elliott Hughesd1cc8362011-10-24 16:58:50 -0700897
Elliott Hughes85d15452011-09-16 17:33:01 -0700898 void StartDaemonThreads();
Elliott Hughesd1cc8362011-10-24 16:58:50 -0700899 void StartSignalCatcher();
Carl Shapiro61e019d2011-07-14 16:53:09 -0700900
Calin Juravle31f2c152015-10-23 17:56:15 +0100901 void MaybeSaveJitProfilingInfo();
902
Andreas Gampe585da952016-12-02 14:52:29 -0800903 // Visit all of the thread roots.
904 void VisitThreadRoots(RootVisitor* visitor, VisitRootFlags flags)
905 REQUIRES_SHARED(Locks::mutator_lock_);
906
907 // Visit all other roots which must be done with mutators suspended.
908 void VisitNonConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags)
909 REQUIRES_SHARED(Locks::mutator_lock_);
910
911 // Constant roots are the roots which never change after the runtime is initialized, they only
912 // need to be visited once per GC cycle.
913 void VisitConstantRoots(RootVisitor* visitor)
914 REQUIRES_SHARED(Locks::mutator_lock_);
915
Andreas Gampe44f67602018-11-28 08:27:27 -0800916 // Note: To be lock-free, GetFaultMessage temporarily replaces the lock message with null.
917 // As such, there is a window where a call will return an empty string. In general,
918 // only aborting code should retrieve this data (via GetFaultMessageForAbortLogging
919 // friend).
920 std::string GetFaultMessage();
921
Mathieu Chartierada33d72018-12-17 13:17:30 -0800922 ThreadPool* AcquireThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
923 void ReleaseThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
924
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700925 // A pointer to the active runtime or null.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800926 static Runtime* instance_;
927
Ian Rogers8afeb852014-04-02 14:55:49 -0700928 // NOTE: these must match the gc::ProcessState values as they come directly from the framework.
929 static constexpr int kProfileForground = 0;
Calin Juravle31f2c152015-10-23 17:56:15 +0100930 static constexpr int kProfileBackground = 1;
Ian Rogers8afeb852014-04-02 14:55:49 -0700931
Mingyao Yang0a87a652017-04-12 13:43:15 -0700932 static constexpr uint32_t kCalleeSaveSize = 6u;
Andreas Gampe8228cdf2017-05-30 15:03:54 -0700933
Mathieu Chartiere401d142015-04-22 13:56:20 -0700934 // 64 bit so that we can share the same asm offsets for both 32 and 64 bits.
Andreas Gampe8228cdf2017-05-30 15:03:54 -0700935 uint64_t callee_save_methods_[kCalleeSaveSize];
Roland Levillain7b0e8442018-04-11 18:27:47 +0100936 // Pre-allocated exceptions (see Runtime::Init).
937 GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_when_throwing_exception_;
938 GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_when_throwing_oome_;
939 GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_when_handling_stack_overflow_;
Ian Rogers63557452014-06-04 16:57:15 -0700940 GcRoot<mirror::Throwable> pre_allocated_NoClassDefFoundError_;
Mathieu Chartiere401d142015-04-22 13:56:20 -0700941 ArtMethod* resolution_method_;
942 ArtMethod* imt_conflict_method_;
Mathieu Chartier2d2621a2014-10-23 16:48:06 -0700943 // Unresolved method has the same behavior as the conflict method, it is used by the class linker
944 // for differentiating between unfilled imt slots vs conflict slots in superclasses.
Mathieu Chartiere401d142015-04-22 13:56:20 -0700945 ArtMethod* imt_unimplemented_method_;
Ian Rogers6ed19fd2014-03-20 08:10:17 -0700946
Ian Rogersc0542af2014-09-03 16:16:56 -0700947 // Special sentinel object used to invalid conditions in JNI (cleared weak references) and
948 // JDWP (invalid references).
949 GcRoot<mirror::Object> sentinel_;
950
Vladimir Marko7624d252014-05-02 14:40:15 +0100951 InstructionSet instruction_set_;
Vladimir Marko7624d252014-05-02 14:40:15 +0100952
Vladimir Marko2b5eaa22013-12-13 13:59:30 +0000953 CompilerCallbacks* compiler_callbacks_;
Elliott Hughes9ca7a1f2011-10-11 14:29:52 -0700954 bool is_zygote_;
Nicolas Geoffray4444f1b2019-06-10 14:09:19 +0100955 bool is_primary_zygote_;
Nicolas Geoffray9ac09ee2019-05-08 23:38:27 +0100956 bool is_system_server_;
Alex Lighta59dd802014-07-02 16:28:08 -0700957 bool must_relocate_;
Mathieu Chartier069387a2012-06-18 12:01:01 -0700958 bool is_concurrent_gc_enabled_;
Anwar Ghuloum87183592013-08-14 12:12:19 -0700959 bool is_explicit_gc_disabled_;
Alex Light64ad14d2014-08-19 14:23:13 -0700960 bool image_dex2oat_enabled_;
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700961
Tsu Chiang Chuang12e6d742014-05-22 10:22:25 -0700962 std::string compiler_executable_;
Brian Carlstrom6449c622014-02-10 23:48:36 -0800963 std::vector<std::string> compiler_options_;
964 std::vector<std::string> image_compiler_options_;
Brian Carlstrom31d8f522014-09-29 11:22:54 -0700965 std::string image_location_;
David Srbecky3db3d372019-04-17 18:19:17 +0100966 bool is_using_apex_boot_image_location_;
Dragos Sbirlea7467ee02013-06-21 09:20:34 -0700967
Vladimir Markod1908512018-11-22 14:57:28 +0000968 std::vector<std::string> boot_class_path_;
969 std::vector<std::string> boot_class_path_locations_;
Brian Carlstroma004aa92012-02-08 18:05:09 -0800970 std::string class_path_string_;
Elliott Hughes7ede61e2011-09-14 18:18:06 -0700971 std::vector<std::string> properties_;
972
Andreas Gampeaadcbc62017-12-28 14:05:42 -0800973 std::list<ti::AgentSpec> agent_specs_;
974 std::list<std::unique_ptr<ti::Agent>> agents_;
Alex Light185d1342016-08-11 10:48:03 -0700975 std::vector<Plugin> plugins_;
Alex Light7233c7e2016-07-28 10:07:45 -0700976
Brian Carlstromb765be02011-08-17 23:54:10 -0700977 // The default stack size for managed threads created by the runtime.
Elliott Hughesbe759c62011-09-08 19:38:21 -0700978 size_t default_stack_size_;
Brian Carlstromb765be02011-08-17 23:54:10 -0700979
Hans Boehmb2155572019-03-27 14:25:53 -0700980 // Finalizers running for longer than this many milliseconds abort the runtime.
981 unsigned int finalizer_timeout_ms_;
982
Ian Rogers1d54e732013-05-02 21:10:01 -0700983 gc::Heap* heap_;
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800984
Nicolas Geoffray25e04562016-03-01 13:17:58 +0000985 std::unique_ptr<ArenaPool> jit_arena_pool_;
Mathieu Chartier9b34b242015-03-09 11:30:17 -0700986 std::unique_ptr<ArenaPool> arena_pool_;
Mathieu Chartierc7853442015-03-27 14:35:38 -0700987 // Special low 4gb pool for compiler linear alloc. We need ArtFields to be in low 4gb if we are
988 // compiling using a 32 bit image on a 64 bit compiler in case we resolve things in the image
989 // since the field arrays are int arrays in this case.
990 std::unique_ptr<ArenaPool> low_4gb_arena_pool_;
991
992 // Shared linear alloc for now.
993 std::unique_ptr<LinearAlloc> linear_alloc_;
Mathieu Chartier9b34b242015-03-09 11:30:17 -0700994
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700995 // The number of spins that are done before thread suspension is used to forcibly inflate.
996 size_t max_spins_before_thin_lock_inflation_;
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700997 MonitorList* monitor_list_;
Ian Rogersef7d42f2014-01-06 12:55:46 -0800998 MonitorPool* monitor_pool_;
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700999
Carl Shapirob5573532011-07-12 18:22:59 -07001000 ThreadList* thread_list_;
Carl Shapiro61e019d2011-07-14 16:53:09 -07001001
Elliott Hughescf4c6c42011-09-01 15:16:42 -07001002 InternTable* intern_table_;
1003
Brian Carlstromb0460ea2011-07-29 10:08:05 -07001004 ClassLinker* class_linker_;
1005
Elliott Hughese27955c2011-08-26 15:21:24 -07001006 SignalCatcher* signal_catcher_;
Narayan Kamatheb710332017-05-10 11:48:46 +01001007
Richard Uhlerda0a69e2016-10-11 15:06:38 +01001008 std::unique_ptr<JavaVMExt> java_vm_;
Elliott Hughesf2682d52011-08-15 16:37:04 -07001009
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001010 std::unique_ptr<jit::Jit> jit_;
Orion Hodsonad28f5e2018-10-17 09:08:17 +01001011 std::unique_ptr<jit::JitCodeCache> jit_code_cache_;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001012 std::unique_ptr<jit::JitOptions> jit_options_;
1013
Mathieu Chartierada33d72018-12-17 13:17:30 -08001014 // Runtime thread pool. The pool is only for startup and gets deleted after.
1015 std::unique_ptr<ThreadPool> thread_pool_ GUARDED_BY(Locks::runtime_thread_pool_lock_);
1016 size_t thread_pool_ref_count_ GUARDED_BY(Locks::runtime_thread_pool_lock_);
1017
Andreas Gampe44f67602018-11-28 08:27:27 -08001018 // Fault message, printed when we get a SIGSEGV. Stored as a native-heap object and accessed
1019 // lock-free, so needs to be atomic.
1020 std::atomic<std::string*> fault_message_;
Mathieu Chartier15d34022014-02-26 17:16:38 -08001021
Ian Rogers120f1c72012-09-28 17:17:10 -07001022 // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by
1023 // the shutdown lock so that threads aren't born while we're shutting down.
1024 size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_);
1025
1026 // Waited upon until no threads are being born.
Ian Rogers700a4022014-05-19 16:49:03 -07001027 std::unique_ptr<ConditionVariable> shutdown_cond_ GUARDED_BY(Locks::runtime_shutdown_lock_);
Ian Rogers120f1c72012-09-28 17:17:10 -07001028
1029 // Set when runtime shutdown is past the point that new threads may attach.
1030 bool shutting_down_ GUARDED_BY(Locks::runtime_shutdown_lock_);
1031
1032 // The runtime is starting to shutdown but is blocked waiting on shutdown_cond_.
1033 bool shutting_down_started_ GUARDED_BY(Locks::runtime_shutdown_lock_);
1034
Elliott Hughesdcc24742011-09-07 14:02:44 -07001035 bool started_;
1036
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001037 // New flag added which tells us if the runtime has finished starting. If
1038 // this flag is set then the Daemon threads are created and the class loader
1039 // is created. This flag is needed for knowing if its safe to request CMS.
1040 bool finished_starting_;
1041
Brian Carlstrom6ea095a2011-08-16 15:26:54 -07001042 // Hooks supported by JNI_CreateJavaVM
1043 jint (*vfprintf_)(FILE* stream, const char* format, va_list ap);
1044 void (*exit_)(jint status);
1045 void (*abort_)();
1046
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001047 bool stats_enabled_;
1048 RuntimeStats stats_;
1049
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001050 const bool is_running_on_memory_tool_;
Mathieu Chartierda44d772014-04-01 15:01:46 -07001051
Andreas Gampef6a780a2015-04-02 18:51:05 -07001052 std::unique_ptr<TraceConfig> trace_config_;
1053
Ian Rogers62d6c772013-02-27 08:32:07 -08001054 instrumentation::Instrumentation instrumentation_;
jeffhao2692b572011-12-16 15:42:28 -08001055
Ian Rogers365c1022012-06-22 15:05:28 -07001056 jobject main_thread_group_;
1057 jobject system_thread_group_;
1058
Brian Carlstromce888532013-10-10 00:32:58 -07001059 // As returned by ClassLoader.getSystemClassLoader().
1060 jobject system_class_loader_;
1061
Hiroshi Yamauchi2e899a92013-11-22 16:50:12 -08001062 // If true, then we dump the GC cumulative timings on shutdown.
1063 bool dump_gc_performance_on_shutdown_;
1064
Chang Xing605fe242017-07-20 15:57:21 -07001065 // Transactions used for pre-initializing classes at compilation time.
1066 // Support nested transactions, maintain a list containing all transactions. Transactions are
1067 // handled under a stack discipline. Because GC needs to go over all transactions, we choose list
1068 // as substantial data structure instead of stack.
1069 std::list<std::unique_ptr<Transaction>> preinitialization_transactions_;
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +01001070
Igor Murashkin7617abd2015-07-10 18:27:47 -07001071 // If kNone, verification is disabled. kEnable by default.
1072 verifier::VerifyMode verify_;
Jeff Hao4a200f52014-04-01 14:58:49 -07001073
Jean Christophe Beyler24e04aa2014-09-12 12:03:25 -07001074 // If true, the runtime may use dex files directly with the interpreter if an oat file is not
1075 // available/usable.
1076 bool allow_dex_file_fallback_;
1077
Dmitriy Ivanov785049f2014-07-18 10:08:57 -07001078 // List of supported cpu abis.
1079 std::vector<std::string> cpu_abilist_;
1080
Jeff Haof00571c2014-05-29 17:29:47 -07001081 // Specifies target SDK version to allow workarounds for certain API levels.
David Brazdil2bb2fbd2018-11-13 18:24:26 +00001082 uint32_t target_sdk_version_;
Jeff Haof00571c2014-05-29 17:29:47 -07001083
Dave Allison69dfe512014-07-11 17:11:58 +00001084 // Implicit checks flags.
1085 bool implicit_null_checks_; // NullPointer checks are implicit.
1086 bool implicit_so_checks_; // StackOverflow checks are implicit.
1087 bool implicit_suspend_checks_; // Thread suspension checks are implicit.
1088
Calin Juravle01aaf6e2015-06-19 22:05:39 +01001089 // Whether or not the sig chain (and implicitly the fault handler) should be
Vladimir Markoa497a392018-09-26 10:52:50 +01001090 // disabled. Tools like dex2oat don't need them. This enables
Calin Juravle01aaf6e2015-06-19 22:05:39 +01001091 // building a statically link version of dex2oat.
1092 bool no_sig_chain_;
1093
Calin Juravled3e7c6c2016-02-04 19:07:51 +00001094 // Force the use of native bridge even if the app ISA matches the runtime ISA.
1095 bool force_native_bridge_;
1096
Calin Juravle07d83c72014-10-22 21:02:23 +01001097 // Whether or not a native bridge has been loaded.
Calin Juravlec8423522014-08-12 20:55:20 +01001098 //
1099 // The native bridge allows running native code compiled for a foreign ISA. The way it works is,
1100 // if standard dlopen fails to load native library associated with native activity, it calls to
1101 // the native bridge to load it and then gets the trampoline for the entry to native activity.
Calin Juravle07d83c72014-10-22 21:02:23 +01001102 //
1103 // The option 'native_bridge_library_filename' specifies the name of the native bridge.
1104 // When non-empty the native bridge will be loaded from the given file. An empty value means
1105 // that there's no native bridge.
1106 bool is_native_bridge_loaded_;
Calin Juravlec8423522014-08-12 20:55:20 +01001107
David Srbeckyf4480162016-03-16 00:06:24 +00001108 // Whether we are running under native debugger.
1109 bool is_native_debuggable_;
1110
Alex Light7919db92017-11-29 09:00:55 -08001111 // whether or not any async exceptions have ever been thrown. This is used to speed up the
1112 // MterpShouldSwitchInterpreters function.
1113 bool async_exceptions_thrown_;
1114
Alex Light0aa7a5a2018-10-10 15:58:14 +00001115 // Whether anything is going to be using the shadow-frame APIs to force a function to return
1116 // early. Doing this requires that (1) we be debuggable and (2) that mterp is exited.
1117 bool non_standard_exits_enabled_;
1118
Nicolas Geoffray433b79a2017-01-30 20:54:45 +00001119 // Whether Java code needs to be debuggable.
1120 bool is_java_debuggable_;
Alex Light6b16d892016-11-11 11:21:04 -08001121
Narayan Kamath5a2be3f2015-02-16 13:51:51 +00001122 // The maximum number of failed boots we allow before pruning the dalvik cache
1123 // and trying again. This option is only inspected when we're running as a
1124 // zygote.
1125 uint32_t zygote_max_failed_boots_;
1126
Igor Murashkin158f35c2015-06-10 15:55:30 -07001127 // Enable experimental opcodes that aren't fully specified yet. The intent is to
1128 // eventually publish them as public-usable opcodes, but they aren't ready yet.
1129 //
1130 // Experimental opcodes should not be used by other production code.
Alex Lighteb7c1442015-08-31 13:17:42 -07001131 ExperimentalFlags experimental_flags_;
Igor Murashkin158f35c2015-06-10 15:55:30 -07001132
Andreas Gampedd671252015-07-23 14:37:18 -07001133 // Contains the build fingerprint, if given as a parameter.
1134 std::string fingerprint_;
1135
Mathieu Chartierf9c6fc62015-10-07 11:44:05 -07001136 // Oat file manager, keeps track of what oat files are open.
Mathieu Chartiere58991b2015-10-13 07:59:34 -07001137 OatFileManager* oat_file_manager_;
Mathieu Chartierf9c6fc62015-10-07 11:44:05 -07001138
Mathieu Chartier32cc9ee2015-10-15 09:19:15 -07001139 // Whether or not we are on a low RAM device.
1140 bool is_low_memory_mode_;
1141
Mathieu Chartierc42cb0e2017-10-13 11:35:00 -07001142 // Whether or not we use MADV_RANDOM on files that are thought to have random access patterns.
1143 // This is beneficial for low RAM devices since it reduces page cache thrashing.
1144 bool madvise_random_access_;
1145
Nicolas Geoffray787ae8e2015-11-05 11:32:24 +00001146 // Whether the application should run in safe mode, that is, interpreter only.
1147 bool safe_mode_;
1148
David Brazdil3e0fa0a2018-01-15 18:41:44 +00001149 // Whether access checks on hidden API should be performed.
Mathew Inwooda5dc52c2018-02-19 15:30:51 +00001150 hiddenapi::EnforcementPolicy hidden_api_policy_;
David Brazdil3e0fa0a2018-01-15 18:41:44 +00001151
David Brazdile7681822018-12-14 16:25:33 +00001152 // Whether access checks on core platform API should be performed.
1153 hiddenapi::EnforcementPolicy core_platform_api_policy_;
1154
Mathew Inwoodc8ce5f52018-04-05 13:58:55 +01001155 // List of signature prefixes of methods that have been removed from the blacklist, and treated
1156 // as if whitelisted.
Mathew Inwood3383aa52018-03-16 14:18:33 +00001157 std::vector<std::string> hidden_api_exemptions_;
1158
David Brazdilee7d2fd2018-01-20 17:25:23 +00001159 // Do not warn about the same hidden API access violation twice.
1160 // This is only used for testing.
1161 bool dedupe_hidden_api_warnings_;
1162
Mathew Inwood5bcef172018-05-01 14:40:12 +01001163 // How often to log hidden API access to the event log. An integer between 0
1164 // (never) and 0x10000 (always).
Mathew Inwood73ddda42018-04-03 15:32:32 +01001165 uint32_t hidden_api_access_event_log_rate_;
1166
Mathew Inwood5bcef172018-05-01 14:40:12 +01001167 // The package of the app running in this process.
1168 std::string process_package_name_;
1169
David Brazdil35a3f6a2019-03-04 15:59:06 +00001170 // The data directory of the app running in this process.
1171 std::string process_data_directory_;
1172
Nicolas Geoffraya73280d2016-02-15 13:05:16 +00001173 // Whether threads should dump their native stack on SIGQUIT.
1174 bool dump_native_stack_on_sig_quit_;
1175
Andreas Gampea1425a12016-03-11 17:44:04 -08001176 // Whether the dalvik cache was pruned when initializing the runtime.
1177 bool pruned_dalvik_cache_;
1178
Mathieu Chartierf8cb1782016-03-18 18:45:41 -07001179 // Whether or not we currently care about pause times.
1180 ProcessState process_state_;
1181
Andreas Gampef38a6612016-04-11 08:42:26 -07001182 // Whether zygote code is in a section that should not start threads.
1183 bool zygote_no_threads_;
1184
Alex Light40320712017-12-14 11:52:04 -08001185 // The string containing requested jdwp options
1186 std::string jdwp_options_;
1187
1188 // The jdwp provider we were configured with.
1189 JdwpProvider jdwp_provider_;
1190
David Sehrd106d9f2016-08-16 19:22:57 -07001191 // Saved environment.
1192 class EnvSnapshot {
1193 public:
1194 EnvSnapshot() = default;
1195 void TakeSnapshot();
1196 char** GetSnapshot() const;
1197
1198 private:
1199 std::unique_ptr<char*[]> c_env_vector_;
1200 std::vector<std::unique_ptr<std::string>> name_value_pairs_;
1201
1202 DISALLOW_COPY_AND_ASSIGN(EnvSnapshot);
1203 } env_snapshot_;
1204
Andreas Gampefda57142016-09-08 20:29:18 -07001205 // Generic system-weak holders.
1206 std::vector<gc::AbstractSystemWeakHolder*> system_weak_holders_;
1207
Andreas Gampeac30fa22017-01-18 21:02:36 -08001208 std::unique_ptr<RuntimeCallbacks> callbacks_;
Andreas Gampe04bbb5b2017-01-19 17:49:03 +00001209
Nicolas Geoffray81cc43e2017-05-10 12:04:49 +01001210 std::atomic<uint32_t> deoptimization_counts_[
1211 static_cast<uint32_t>(DeoptimizationKind::kLast) + 1];
Nicolas Geoffray4e92c3c2017-05-08 09:34:26 +01001212
Vladimir Markoc34bebf2018-08-16 16:12:49 +01001213 MemMap protected_fault_page_;
Andreas Gampe2ac67d52017-05-11 22:30:38 -07001214
Andreas Gampe0b0ffc12018-08-01 14:41:27 -07001215 uint32_t verifier_logging_threshold_ms_;
1216
Mathieu Chartiera88abfa2019-02-04 11:08:29 -08001217 bool load_app_image_startup_cache_ = false;
1218
Mathieu Chartier175ce3d2019-03-06 16:54:24 -08001219 // If startup has completed, must happen at most once.
1220 std::atomic<bool> startup_completed_ = false;
1221
Andreas Gampe86823542019-02-25 09:38:49 -08001222 gc::space::ImageSpaceLoadingOrder image_space_loading_order_ =
1223 gc::space::ImageSpaceLoadingOrder::kSystemFirst;
1224
Andreas Gampe44f67602018-11-28 08:27:27 -08001225 // Note: See comments on GetFaultMessage.
1226 friend std::string GetFaultMessageForAbortLogging();
Mathieu Chartierada33d72018-12-17 13:17:30 -08001227 friend class ScopedThreadPoolUsage;
Vladimir Markof3d88a82018-12-21 16:38:47 +00001228 friend class OatFileAssistantTest;
Andreas Gampe44f67602018-11-28 08:27:27 -08001229
Carl Shapiro61e019d2011-07-14 16:53:09 -07001230 DISALLOW_COPY_AND_ASSIGN(Runtime);
Carl Shapiro1fb86202011-06-27 17:43:13 -07001231};
1232
1233} // namespace art
1234
Brian Carlstromfc0e3212013-07-17 14:40:12 -07001235#endif // ART_RUNTIME_RUNTIME_H_