blob: e811230501b34b4a6026e938f298e27d7a7121a6 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro1fb86202011-06-27 17:43:13 -070016
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_RUNTIME_H_
18#define ART_RUNTIME_RUNTIME_H_
Carl Shapiro1fb86202011-06-27 17:43:13 -070019
Elliott Hughesa0e18062012-04-13 15:59:59 -070020#include <jni.h>
Elliott Hughesc5f7c912011-08-18 14:00:42 -070021#include <stdio.h>
22
Elliott Hughese27955c2011-08-26 15:21:24 -070023#include <iosfwd>
Hiroshi Yamauchi799eb3a2014-07-18 15:38:17 -070024#include <set>
Brian Carlstrom6ea095a2011-08-16 15:26:54 -070025#include <string>
Carl Shapirofc322c72011-07-27 00:20:01 -070026#include <utility>
Chang Xing16d1dd82017-07-20 17:56:26 -070027#include <memory>
Brian Carlstrom6ea095a2011-08-16 15:26:54 -070028#include <vector>
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070029
Nicolas Geoffrayde1b2a22019-02-27 09:10:57 +000030#include "base/file_utils.h"
Andreas Gampe44f67602018-11-28 08:27:27 -080031#include "base/locks.h"
Andreas Gampe794ad762015-02-23 08:12:24 -080032#include "base/macros.h"
Vladimir Markoc34bebf2018-08-16 16:12:49 +010033#include "base/mem_map.h"
Nicolas Geoffray4e92c3c2017-05-08 09:34:26 +010034#include "deoptimization_kind.h"
David Sehr9e734c72018-01-04 17:56:19 -080035#include "dex/dex_file_types.h"
Alex Lighteb7c1442015-08-31 13:17:42 -070036#include "experimental_flags.h"
Andreas Gampe86823542019-02-25 09:38:49 -080037#include "gc/space/image_space_loading_order.h"
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -070038#include "gc_root.h"
Ian Rogers62d6c772013-02-27 08:32:07 -080039#include "instrumentation.h"
Alex Light40320712017-12-14 11:52:04 -080040#include "jdwp_provider.h"
Mathieu Chartier8778c522016-10-04 19:06:30 -070041#include "obj_ptr.h"
Ian Rogers576ca0c2014-06-06 15:58:22 -070042#include "offsets.h"
Mathieu Chartierf8cb1782016-03-18 18:45:41 -070043#include "process_state.h"
Vladimir Marko7624d252014-05-02 14:40:15 +010044#include "quick/quick_method_frame_info.h"
Elliott Hughes9d5ccec2011-09-19 13:19:50 -070045#include "runtime_stats.h"
Carl Shapirob5573532011-07-12 18:22:59 -070046
Carl Shapiro1fb86202011-06-27 17:43:13 -070047namespace art {
48
Ian Rogers1d54e732013-05-02 21:10:01 -070049namespace gc {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080050class AbstractSystemWeakHolder;
51class Heap;
Ian Rogers576ca0c2014-06-06 15:58:22 -070052} // namespace gc
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080053
Mathew Inwooda5dc52c2018-02-19 15:30:51 +000054namespace hiddenapi {
55enum class EnforcementPolicy;
56} // namespace hiddenapi
57
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080058namespace jit {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080059class Jit;
Orion Hodsonad28f5e2018-10-17 09:08:17 +010060class JitCodeCache;
Igor Murashkin2ffb7032017-11-08 13:35:21 -080061class JitOptions;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080062} // namespace jit
63
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080064namespace mirror {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080065class Array;
66class ClassLoader;
67class DexCache;
68template<class T> class ObjectArray;
69template<class T> class PrimitiveArray;
70typedef PrimitiveArray<int8_t> ByteArray;
71class String;
72class Throwable;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080073} // namespace mirror
Alex Light7233c7e2016-07-28 10:07:45 -070074namespace ti {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080075class Agent;
Andreas Gampeaadcbc62017-12-28 14:05:42 -080076class AgentSpec;
Alex Light7233c7e2016-07-28 10:07:45 -070077} // namespace ti
Mathieu Chartierc528dba2013-11-26 12:00:11 -080078namespace verifier {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080079class MethodVerifier;
80enum class VerifyMode : int8_t;
Ian Rogers6a3c1fc2014-10-31 00:33:20 -070081} // namespace verifier
Mathieu Chartiere401d142015-04-22 13:56:20 -070082class ArenaPool;
83class ArtMethod;
Andreas Gampe8228cdf2017-05-30 15:03:54 -070084enum class CalleeSaveType: uint32_t;
Carl Shapiro61e019d2011-07-14 16:53:09 -070085class ClassLinker;
Mathieu Chartiere401d142015-04-22 13:56:20 -070086class CompilerCallbacks;
Carl Shapirofc322c72011-07-27 00:20:01 -070087class DexFile;
Andreas Gampe639b2b12019-01-08 10:32:50 -080088enum class InstructionSet;
Elliott Hughescf4c6c42011-09-01 15:16:42 -070089class InternTable;
Andreas Gamped482e732017-04-24 17:59:09 -070090class IsMarkedVisitor;
Ian Rogersb48b9eb2014-02-28 16:20:21 -080091class JavaVMExt;
Mathieu Chartiere401d142015-04-22 13:56:20 -070092class LinearAlloc;
Elliott Hughesc33a32b2011-10-11 18:18:07 -070093class MonitorList;
Ian Rogersef7d42f2014-01-06 12:55:46 -080094class MonitorPool;
Ian Rogers576ca0c2014-06-06 15:58:22 -070095class NullPointerHandler;
Vladimir Markof3d88a82018-12-21 16:38:47 +000096class OatFileAssistantTest;
Mathieu Chartierf9c6fc62015-10-07 11:44:05 -070097class OatFileManager;
Alex Light185d1342016-08-11 10:48:03 -070098class Plugin;
Vladimir Marko88b2b802015-12-04 14:19:04 +000099struct RuntimeArgumentMap;
Andreas Gampeac30fa22017-01-18 21:02:36 -0800100class RuntimeCallbacks;
Elliott Hughese27955c2011-08-26 15:21:24 -0700101class SignalCatcher;
Ian Rogers576ca0c2014-06-06 15:58:22 -0700102class StackOverflowHandler;
103class SuspensionHandler;
Carl Shapiro61e019d2011-07-14 16:53:09 -0700104class ThreadList;
Mathieu Chartierc6068c72018-11-13 16:00:58 -0800105class ThreadPool;
jeffhao2692b572011-12-16 15:42:28 -0800106class Trace;
Andreas Gampef6a780a2015-04-02 18:51:05 -0700107struct TraceConfig;
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100108class Transaction;
Carl Shapiro61e019d2011-07-14 16:53:09 -0700109
Ian Rogerse63db272014-07-15 15:36:11 -0700110typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
111
Carl Shapiro1fb86202011-06-27 17:43:13 -0700112class Runtime {
113 public:
Vladimir Marko88b2b802015-12-04 14:19:04 +0000114 // Parse raw runtime options.
115 static bool ParseOptions(const RuntimeOptions& raw_options,
116 bool ignore_unrecognized,
117 RuntimeArgumentMap* runtime_options);
118
Carl Shapiro61e019d2011-07-14 16:53:09 -0700119 // Creates and initializes a new runtime.
Vladimir Marko88b2b802015-12-04 14:19:04 +0000120 static bool Create(RuntimeArgumentMap&& runtime_options)
121 SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
122
123 // Creates and initializes a new runtime.
124 static bool Create(const RuntimeOptions& raw_options, bool ignore_unrecognized)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700125 SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700126
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800127 // IsAotCompiler for compilers that don't have a running runtime. Only dex2oat currently.
128 bool IsAotCompiler() const {
Calin Juravleffc87072016-04-20 14:22:09 +0100129 return !UseJitCompilation() && IsCompiler();
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800130 }
131
132 // IsCompiler is any runtime which has a running compiler, either dex2oat or JIT.
Elliott Hughesd9c67be2012-02-02 19:54:06 -0800133 bool IsCompiler() const {
Vladimir Marko2b5eaa22013-12-13 13:59:30 +0000134 return compiler_callbacks_ != nullptr;
135 }
136
Andreas Gampe4585f872015-03-27 23:45:15 -0700137 // If a compiler, are we compiling a boot image?
138 bool IsCompilingBootImage() const;
139
140 bool CanRelocate() const;
Alex Lighta59dd802014-07-02 16:28:08 -0700141
142 bool ShouldRelocate() const {
143 return must_relocate_ && CanRelocate();
144 }
145
146 bool MustRelocateIfPossible() const {
147 return must_relocate_;
148 }
149
Alex Light64ad14d2014-08-19 14:23:13 -0700150 bool IsImageDex2OatEnabled() const {
151 return image_dex2oat_enabled_;
Nicolas Geoffray4fcdc942014-07-22 10:48:00 +0100152 }
153
Vladimir Marko2b5eaa22013-12-13 13:59:30 +0000154 CompilerCallbacks* GetCompilerCallbacks() {
155 return compiler_callbacks_;
Elliott Hughesd9c67be2012-02-02 19:54:06 -0800156 }
157
Mathieu Chartier07ddb6f2015-11-05 11:16:34 -0800158 void SetCompilerCallbacks(CompilerCallbacks* callbacks) {
159 CHECK(callbacks != nullptr);
160 compiler_callbacks_ = callbacks;
161 }
162
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700163 bool IsZygote() const {
164 return is_zygote_;
165 }
166
Anwar Ghuloum87183592013-08-14 12:12:19 -0700167 bool IsExplicitGcDisabled() const {
168 return is_explicit_gc_disabled_;
169 }
170
Tsu Chiang Chuang12e6d742014-05-22 10:22:25 -0700171 std::string GetCompilerExecutable() const;
172
Brian Carlstrom6449c622014-02-10 23:48:36 -0800173 const std::vector<std::string>& GetCompilerOptions() const {
174 return compiler_options_;
Dragos Sbirlea7467ee02013-06-21 09:20:34 -0700175 }
176
Vladimir Marko5c657fe2016-11-03 15:12:29 +0000177 void AddCompilerOption(const std::string& option) {
Andreas Gamped2abbc92014-12-19 09:53:27 -0800178 compiler_options_.push_back(option);
179 }
180
Brian Carlstrom6449c622014-02-10 23:48:36 -0800181 const std::vector<std::string>& GetImageCompilerOptions() const {
182 return image_compiler_options_;
Anwar Ghuloum8447d842013-04-30 17:27:40 -0700183 }
184
Brian Carlstrom31d8f522014-09-29 11:22:54 -0700185 const std::string& GetImageLocation() const {
186 return image_location_;
187 }
188
Nicolas Geoffray7417ce92019-02-01 10:52:42 +0000189 bool IsUsingDefaultBootImageLocation() const {
Nicolas Geoffray144f82c2019-02-21 09:43:18 +0000190 return is_using_default_boot_image_location_;
Nicolas Geoffray7417ce92019-02-01 10:52:42 +0000191 }
192
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700193 // Starts a runtime, which may cause threads to be started and code to run.
Brian Carlstrombd86bcc2013-03-10 20:26:16 -0700194 bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
Carl Shapiro2ed144c2011-07-26 16:52:08 -0700195
Mathieu Chartier590fee92013-09-13 13:46:47 -0700196 bool IsShuttingDown(Thread* self);
Mathieu Chartier90443472015-07-16 20:32:27 -0700197 bool IsShuttingDownLocked() const REQUIRES(Locks::runtime_shutdown_lock_) {
Ian Rogers9af209c2012-06-03 20:50:30 -0700198 return shutting_down_;
199 }
200
Mathieu Chartier90443472015-07-16 20:32:27 -0700201 size_t NumberOfThreadsBeingBorn() const REQUIRES(Locks::runtime_shutdown_lock_) {
Ian Rogers120f1c72012-09-28 17:17:10 -0700202 return threads_being_born_;
203 }
204
Mathieu Chartier90443472015-07-16 20:32:27 -0700205 void StartThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) {
Ian Rogers120f1c72012-09-28 17:17:10 -0700206 threads_being_born_++;
207 }
208
Mathieu Chartier90443472015-07-16 20:32:27 -0700209 void EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_);
Ian Rogers120f1c72012-09-28 17:17:10 -0700210
Ian Rogers9af209c2012-06-03 20:50:30 -0700211 bool IsStarted() const {
212 return started_;
213 }
Elliott Hughesdcc24742011-09-07 14:02:44 -0700214
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700215 bool IsFinishedStarting() const {
216 return finished_starting_;
217 }
218
Vladimir Markodcfcce42018-06-27 10:00:28 +0000219 void RunRootClinits(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
220
Carl Shapiro2ed144c2011-07-26 16:52:08 -0700221 static Runtime* Current() {
222 return instance_;
223 }
Carl Shapiro1fb86202011-06-27 17:43:13 -0700224
Nicolas Geoffraydb978712014-12-09 13:33:38 +0000225 // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
226 // callers should prefer.
Andreas Gampe90a32b12016-10-03 19:47:08 -0700227 NO_RETURN static void Abort(const char* msg) REQUIRES(!Locks::abort_lock_);
Elliott Hughesffe67362011-07-17 12:09:27 -0700228
Ian Rogers365c1022012-06-22 15:05:28 -0700229 // Returns the "main" ThreadGroup, used when attaching user threads.
Brian Carlstrom034f76b2012-08-01 15:51:58 -0700230 jobject GetMainThreadGroup() const;
Ian Rogers365c1022012-06-22 15:05:28 -0700231
232 // Returns the "system" ThreadGroup, used when attaching our internal threads.
Brian Carlstrom034f76b2012-08-01 15:51:58 -0700233 jobject GetSystemThreadGroup() const;
Ian Rogers365c1022012-06-22 15:05:28 -0700234
Brian Carlstromce888532013-10-10 00:32:58 -0700235 // Returns the system ClassLoader which represents the CLASSPATH.
236 jobject GetSystemClassLoader() const;
237
Elliott Hughes462c9442012-03-23 18:47:50 -0700238 // Attaches the calling native thread to the runtime.
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700239 bool AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
Mathieu Chartier664bebf2012-11-12 16:54:11 -0800240 bool create_peer);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700241
Elliott Hughesbf86d042011-08-31 17:53:14 -0700242 void CallExitHook(jint status);
243
Carl Shapiro61e019d2011-07-14 16:53:09 -0700244 // Detaches the current native thread from the runtime.
Mathieu Chartier90443472015-07-16 20:32:27 -0700245 void DetachCurrentThread() REQUIRES(!Locks::mutator_lock_);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700246
Nicolas Geoffray4e92c3c2017-05-08 09:34:26 +0100247 void DumpDeoptimizations(std::ostream& os);
Ian Rogers7b078e82014-09-10 14:44:24 -0700248 void DumpForSigQuit(std::ostream& os);
Elliott Hughes21a5bf22011-12-07 14:35:20 -0800249 void DumpLockHolders(std::ostream& os);
Elliott Hughese27955c2011-08-26 15:21:24 -0700250
Carl Shapiro61e019d2011-07-14 16:53:09 -0700251 ~Runtime();
Carl Shapirob5573532011-07-12 18:22:59 -0700252
Vladimir Markod1908512018-11-22 14:57:28 +0000253 const std::vector<std::string>& GetBootClassPath() const {
254 return boot_class_path_;
255 }
256
257 const std::vector<std::string>& GetBootClassPathLocations() const {
258 DCHECK(boot_class_path_locations_.empty() ||
259 boot_class_path_locations_.size() == boot_class_path_.size());
260 return boot_class_path_locations_.empty() ? boot_class_path_ : boot_class_path_locations_;
Brian Carlstroma004aa92012-02-08 18:05:09 -0800261 }
262
263 const std::string& GetClassPathString() const {
264 return class_path_string_;
Brian Carlstromb765be02011-08-17 23:54:10 -0700265 }
266
267 ClassLinker* GetClassLinker() const {
Carl Shapiro7a909592011-07-24 19:21:59 -0700268 return class_linker_;
269 }
270
Elliott Hughes7ede61e2011-09-14 18:18:06 -0700271 size_t GetDefaultStackSize() const {
272 return default_stack_size_;
273 }
274
Hans Boehmb2155572019-03-27 14:25:53 -0700275 unsigned int GetFinalizerTimeoutMs() const {
276 return finalizer_timeout_ms_;
277 }
278
Ian Rogers1d54e732013-05-02 21:10:01 -0700279 gc::Heap* GetHeap() const {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800280 return heap_;
281 }
282
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700283 InternTable* GetInternTable() const {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700284 DCHECK(intern_table_ != nullptr);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700285 return intern_table_;
286 }
287
Elliott Hughes0af55432011-08-17 18:37:28 -0700288 JavaVMExt* GetJavaVM() const {
Richard Uhlerda0a69e2016-10-11 15:06:38 +0100289 return java_vm_.get();
Elliott Hughesf2682d52011-08-15 16:37:04 -0700290 }
291
Hans Boehmb3da36c2016-12-15 13:12:59 -0800292 size_t GetMaxSpinsBeforeThinLockInflation() const {
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700293 return max_spins_before_thin_lock_inflation_;
294 }
295
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700296 MonitorList* GetMonitorList() const {
297 return monitor_list_;
298 }
299
Ian Rogersef7d42f2014-01-06 12:55:46 -0800300 MonitorPool* GetMonitorPool() const {
301 return monitor_pool_;
302 }
303
Ian Rogersc0542af2014-09-03 16:16:56 -0700304 // Is the given object the special object used to mark a cleared JNI weak global?
Mathieu Chartier8778c522016-10-04 19:06:30 -0700305 bool IsClearedJniWeakGlobal(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -0700306
307 // Get the special object used to mark a cleared JNI weak global.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700308 mirror::Object* GetClearedJniWeakGlobal() REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -0700309
Roland Levillain7b0e8442018-04-11 18:27:47 +0100310 mirror::Throwable* GetPreAllocatedOutOfMemoryErrorWhenThrowingException()
311 REQUIRES_SHARED(Locks::mutator_lock_);
312 mirror::Throwable* GetPreAllocatedOutOfMemoryErrorWhenThrowingOOME()
313 REQUIRES_SHARED(Locks::mutator_lock_);
314 mirror::Throwable* GetPreAllocatedOutOfMemoryErrorWhenHandlingStackOverflow()
315 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes225f5a12012-06-11 11:23:48 -0700316
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700317 mirror::Throwable* GetPreAllocatedNoClassDefFoundError()
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700318 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers63557452014-06-04 16:57:15 -0700319
Elliott Hughes225f5a12012-06-11 11:23:48 -0700320 const std::vector<std::string>& GetProperties() const {
321 return properties_;
322 }
323
Elliott Hughesd92bec42011-09-02 17:04:36 -0700324 ThreadList* GetThreadList() const {
325 return thread_list_;
326 }
327
Brian Carlstrom491ca9e2014-03-02 18:24:38 -0800328 static const char* GetVersion() {
Andreas Gampe2153f932014-06-26 08:09:17 -0700329 return "2.1.0";
Elliott Hughes7ede61e2011-09-14 18:18:06 -0700330 }
331
Narayan Kamath25352fc2016-08-03 12:46:58 +0100332 bool IsMethodHandlesEnabled() const {
Narayan Kamath93206752017-01-17 13:20:55 +0000333 return true;
Narayan Kamath25352fc2016-08-03 12:46:58 +0100334 }
335
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700336 void DisallowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
337 void AllowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi30493242016-11-03 13:06:52 -0700338 // broadcast_for_checkpoint is true when we broadcast for making blocking threads to respond to
339 // checkpoint requests. It's false when we broadcast to unblock blocking threads after system weak
340 // access is reenabled.
341 void BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint = false);
Mathieu Chartierc11d9b82013-09-19 10:01:59 -0700342
Ian Rogers1d54e732013-05-02 21:10:01 -0700343 // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
344 // clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700345 void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots)
Andreas Gamped98b4ed2016-11-04 20:27:24 -0700346 REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700347 REQUIRES_SHARED(Locks::mutator_lock_);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700348
Mathieu Chartier461687d2015-03-31 12:05:24 -0700349 // Visit image roots, only used for hprof since the GC uses the image space mod union table
350 // instead.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700351 void VisitImageRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier461687d2015-03-31 12:05:24 -0700352
Roland Levillainef012222017-06-21 16:28:06 +0100353 // Visit all of the roots we can safely visit concurrently.
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700354 void VisitConcurrentRoots(RootVisitor* visitor,
355 VisitRootFlags flags = kVisitRootFlagAllRoots)
Andreas Gamped98b4ed2016-11-04 20:27:24 -0700356 REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700357 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700358
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700359 // Visit all of the non thread roots, we can do this with mutators unpaused.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700360 void VisitNonThreadRoots(RootVisitor* visitor)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700361 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700362
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700363 void VisitTransactionRoots(RootVisitor* visitor)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700364 REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800365
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700366 // Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700367 // system weak is updated to be the visitor's returned value.
Mathieu Chartier97509952015-07-13 14:35:43 -0700368 void SweepSystemWeaks(IsMarkedVisitor* visitor)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700369 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700370
Ian Rogers9af209c2012-06-03 20:50:30 -0700371 // Returns a special method that calls into a trampoline for runtime method resolution
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800372 ArtMethod* GetResolutionMethod();
Ian Rogers9af209c2012-06-03 20:50:30 -0700373
374 bool HasResolutionMethod() const {
Mathieu Chartiere401d142015-04-22 13:56:20 -0700375 return resolution_method_ != nullptr;
Ian Rogers9af209c2012-06-03 20:50:30 -0700376 }
377
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700378 void SetResolutionMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
Igor Murashkin8275fba2017-05-02 15:58:02 -0700379 void ClearResolutionMethod() {
380 resolution_method_ = nullptr;
381 }
Ian Rogers9af209c2012-06-03 20:50:30 -0700382
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700383 ArtMethod* CreateResolutionMethod() REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers19846512012-02-24 11:42:47 -0800384
Ian Rogerse63db272014-07-15 15:36:11 -0700385 // Returns a special method that calls into a trampoline for runtime imt conflicts.
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800386 ArtMethod* GetImtConflictMethod();
387 ArtMethod* GetImtUnimplementedMethod();
Jeff Hao88474b42013-10-23 16:24:40 -0700388
389 bool HasImtConflictMethod() const {
Mathieu Chartiere401d142015-04-22 13:56:20 -0700390 return imt_conflict_method_ != nullptr;
Jeff Hao88474b42013-10-23 16:24:40 -0700391 }
392
Igor Murashkin8275fba2017-05-02 15:58:02 -0700393 void ClearImtConflictMethod() {
394 imt_conflict_method_ = nullptr;
395 }
396
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700397 void FixupConflictTables();
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700398 void SetImtConflictMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
399 void SetImtUnimplementedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
Jeff Hao88474b42013-10-23 16:24:40 -0700400
Nicolas Geoffray796d6302016-03-13 22:22:31 +0000401 ArtMethod* CreateImtConflictMethod(LinearAlloc* linear_alloc)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700402 REQUIRES_SHARED(Locks::mutator_lock_);
Jeff Hao88474b42013-10-23 16:24:40 -0700403
Igor Murashkin8275fba2017-05-02 15:58:02 -0700404 void ClearImtUnimplementedMethod() {
405 imt_unimplemented_method_ = nullptr;
406 }
407
Ian Rogers9af209c2012-06-03 20:50:30 -0700408 bool HasCalleeSaveMethod(CalleeSaveType type) const {
Andreas Gampe8228cdf2017-05-30 15:03:54 -0700409 return callee_save_methods_[static_cast<size_t>(type)] != 0u;
Ian Rogers9af209c2012-06-03 20:50:30 -0700410 }
411
Mathieu Chartiere401d142015-04-22 13:56:20 -0700412 ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700413 REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchiab088112014-07-14 13:00:14 -0700414
Mathieu Chartiere401d142015-04-22 13:56:20 -0700415 ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700416 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers9af209c2012-06-03 20:50:30 -0700417
Mathieu Chartiere401d142015-04-22 13:56:20 -0700418 QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700419 REQUIRES_SHARED(Locks::mutator_lock_);
Vladimir Marko7624d252014-05-02 14:40:15 +0100420
David Srbecky56de89a2018-10-01 15:32:20 +0100421 static constexpr size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
Andreas Gampe8228cdf2017-05-30 15:03:54 -0700422 return OFFSETOF_MEMBER(Runtime, callee_save_methods_[static_cast<size_t>(type)]);
Ian Rogers936b37f2014-02-14 00:52:24 -0800423 }
424
Vladimir Marko7624d252014-05-02 14:40:15 +0100425 InstructionSet GetInstructionSet() const {
426 return instruction_set_;
427 }
428
429 void SetInstructionSet(InstructionSet instruction_set);
Igor Murashkin8275fba2017-05-02 15:58:02 -0700430 void ClearInstructionSet();
Vladimir Marko7624d252014-05-02 14:40:15 +0100431
Mathieu Chartiere401d142015-04-22 13:56:20 -0700432 void SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type);
Igor Murashkin8275fba2017-05-02 15:58:02 -0700433 void ClearCalleeSaveMethods();
Ian Rogers4f0d07c2011-10-06 23:38:47 -0700434
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700435 ArtMethod* CreateCalleeSaveMethod() REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersff1ed472011-09-20 13:46:24 -0700436
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700437 int32_t GetStat(int kind);
438
Ian Rogers9af209c2012-06-03 20:50:30 -0700439 RuntimeStats* GetStats() {
440 return &stats_;
441 }
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700442
443 bool HasStatsEnabled() const {
444 return stats_enabled_;
445 }
446
447 void ResetStats(int kinds);
448
Mathieu Chartier90443472015-07-16 20:32:27 -0700449 void SetStatsEnabled(bool new_state)
450 REQUIRES(!Locks::instrument_entrypoints_lock_, !Locks::mutator_lock_);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700451
Andreas Gampe6be67ee2014-09-02 21:22:18 -0700452 enum class NativeBridgeAction { // private
453 kUnload,
454 kInitialize
455 };
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800456
Nicolas Geoffray433b79a2017-01-30 20:54:45 +0000457 jit::Jit* GetJit() const {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800458 return jit_.get();
459 }
Calin Juravleffc87072016-04-20 14:22:09 +0100460
461 // Returns true if JIT compilations are enabled. GetJit() will be not null in this case.
462 bool UseJitCompilation() const;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800463
Narayan Kamath3de95a72014-04-02 12:54:23 +0100464 void PreZygoteFork();
Nicolas Geoffrayce9ed362018-11-29 03:19:28 +0000465 void PostZygoteFork();
Nicolas Geoffrayd66c8622015-12-11 14:59:16 +0000466 void InitNonZygoteOrPostFork(
Calin Juravle016fcbe22018-05-03 19:47:35 -0700467 JNIEnv* env,
468 bool is_system_server,
469 NativeBridgeAction action,
470 const char* isa,
471 bool profile_system_server = false);
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700472
Ian Rogers9bc54402014-04-17 16:40:01 -0700473 const instrumentation::Instrumentation* GetInstrumentation() const {
474 return &instrumentation_;
475 }
476
Ian Rogers62d6c772013-02-27 08:32:07 -0800477 instrumentation::Instrumentation* GetInstrumentation() {
478 return &instrumentation_;
479 }
jeffhao2692b572011-12-16 15:42:28 -0800480
Calin Juravle66f55232015-12-08 15:09:10 +0000481 void RegisterAppInfo(const std::vector<std::string>& code_paths,
Calin Juravle77651c42017-03-03 18:04:02 -0800482 const std::string& profile_output_filename);
Dave Allison0aded082013-11-07 13:15:11 -0800483
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100484 // Transaction support.
Chang Xing605fe242017-07-20 15:57:21 -0700485 bool IsActiveTransaction() const;
Chang Xing16d1dd82017-07-20 17:56:26 -0700486 void EnterTransactionMode();
Chang Xing5a906fc2017-07-26 15:01:16 -0700487 void EnterTransactionMode(bool strict, mirror::Class* root);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100488 void ExitTransactionMode();
Chang Xing605fe242017-07-20 15:57:21 -0700489 void RollbackAllTransactions() REQUIRES_SHARED(Locks::mutator_lock_);
Chang Xing16d1dd82017-07-20 17:56:26 -0700490 // Transaction rollback and exit transaction are always done together, it's convenience to
491 // do them in one function.
492 void RollbackAndExitTransactionMode() REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertz1c80bec2015-02-03 11:58:06 +0100493 bool IsTransactionAborted() const;
Chang Xing605fe242017-07-20 15:57:21 -0700494 const std::unique_ptr<Transaction>& GetTransaction() const;
Chang Xing5a906fc2017-07-26 15:01:16 -0700495 bool IsActiveStrictTransactionMode() const;
Sebastien Hertz1c80bec2015-02-03 11:58:06 +0100496
Sebastien Hertz2fd7e692015-04-02 11:11:19 +0200497 void AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700498 REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertz2fd7e692015-04-02 11:11:19 +0200499 void ThrowTransactionAbortError(Thread* self)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700500 REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertz1c80bec2015-02-03 11:58:06 +0100501
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700502 void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
Fred Shih37f05ef2014-07-16 18:38:08 -0700503 bool is_volatile) const;
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700504 void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value,
Fred Shih37f05ef2014-07-16 18:38:08 -0700505 bool is_volatile) const;
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700506 void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value,
Fred Shih37f05ef2014-07-16 18:38:08 -0700507 bool is_volatile) const;
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700508 void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value,
Fred Shih37f05ef2014-07-16 18:38:08 -0700509 bool is_volatile) const;
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700510 void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100511 bool is_volatile) const;
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700512 void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100513 bool is_volatile) const;
Mathieu Chartiera058fdf2016-10-06 15:13:58 -0700514 void RecordWriteFieldReference(mirror::Object* obj,
515 MemberOffset field_offset,
516 ObjPtr<mirror::Object> value,
517 bool is_volatile) const
518 REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100519 void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700520 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier9e868092016-10-31 14:58:04 -0700521 void RecordStrongStringInsertion(ObjPtr<mirror::String> s) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700522 REQUIRES(Locks::intern_table_lock_);
Mathieu Chartier9e868092016-10-31 14:58:04 -0700523 void RecordWeakStringInsertion(ObjPtr<mirror::String> s) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700524 REQUIRES(Locks::intern_table_lock_);
Mathieu Chartier9e868092016-10-31 14:58:04 -0700525 void RecordStrongStringRemoval(ObjPtr<mirror::String> s) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700526 REQUIRES(Locks::intern_table_lock_);
Mathieu Chartier9e868092016-10-31 14:58:04 -0700527 void RecordWeakStringRemoval(ObjPtr<mirror::String> s) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700528 REQUIRES(Locks::intern_table_lock_);
Andreas Gampe8a0128a2016-11-28 07:38:35 -0800529 void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx) const
Mathieu Chartierbb816d62016-09-07 10:17:46 -0700530 REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100531
Andreas Gampe44f67602018-11-28 08:27:27 -0800532 void SetFaultMessage(const std::string& message);
Mathieu Chartier15d34022014-02-26 17:16:38 -0800533
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700534 void AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* arg_vector) const;
Ian Rogers8afeb852014-04-02 14:55:49 -0700535
Dave Allisonb373e092014-02-20 16:06:36 -0800536 bool ExplicitStackOverflowChecks() const {
Andreas Gampe928f72b2014-09-09 19:53:48 -0700537 return !implicit_so_checks_;
Dave Allisonb373e092014-02-20 16:06:36 -0800538 }
539
Nicolas Geoffray68bf3902017-09-07 14:40:48 +0100540 void DisableVerifier();
Igor Murashkin7617abd2015-07-10 18:27:47 -0700541 bool IsVerificationEnabled() const;
542 bool IsVerificationSoftFail() const;
Jeff Hao4a200f52014-04-01 14:58:49 -0700543
Mathew Inwooda5dc52c2018-02-19 15:30:51 +0000544 void SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy) {
545 hidden_api_policy_ = policy;
David Brazdil3e0fa0a2018-01-15 18:41:44 +0000546 }
547
Mathew Inwooda5dc52c2018-02-19 15:30:51 +0000548 hiddenapi::EnforcementPolicy GetHiddenApiEnforcementPolicy() const {
549 return hidden_api_policy_;
David Brazdil3e0fa0a2018-01-15 18:41:44 +0000550 }
551
David Brazdile7681822018-12-14 16:25:33 +0000552 void SetCorePlatformApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy) {
553 core_platform_api_policy_ = policy;
554 }
555
556 hiddenapi::EnforcementPolicy GetCorePlatformApiEnforcementPolicy() const {
557 return core_platform_api_policy_;
558 }
559
Mathew Inwood3383aa52018-03-16 14:18:33 +0000560 void SetHiddenApiExemptions(const std::vector<std::string>& exemptions) {
561 hidden_api_exemptions_ = exemptions;
562 }
563
564 const std::vector<std::string>& GetHiddenApiExemptions() {
565 return hidden_api_exemptions_;
566 }
567
David Brazdilee7d2fd2018-01-20 17:25:23 +0000568 void SetDedupeHiddenApiWarnings(bool value) {
569 dedupe_hidden_api_warnings_ = value;
570 }
571
572 bool ShouldDedupeHiddenApiWarnings() {
573 return dedupe_hidden_api_warnings_;
574 }
575
Mathew Inwood73ddda42018-04-03 15:32:32 +0100576 void SetHiddenApiEventLogSampleRate(uint32_t rate) {
577 hidden_api_access_event_log_rate_ = rate;
578 }
579
580 uint32_t GetHiddenApiEventLogSampleRate() const {
581 return hidden_api_access_event_log_rate_;
582 }
583
Mathew Inwood5bcef172018-05-01 14:40:12 +0100584 const std::string& GetProcessPackageName() const {
585 return process_package_name_;
586 }
587
588 void SetProcessPackageName(const char* package_name) {
589 if (package_name == nullptr) {
590 process_package_name_.clear();
591 } else {
592 process_package_name_ = package_name;
593 }
594 }
595
Jean Christophe Beyler24e04aa2014-09-12 12:03:25 -0700596 bool IsDexFileFallbackEnabled() const {
597 return allow_dex_file_fallback_;
598 }
599
Dmitriy Ivanov785049f2014-07-18 10:08:57 -0700600 const std::vector<std::string>& GetCpuAbilist() const {
601 return cpu_abilist_;
602 }
603
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700604 bool IsRunningOnMemoryTool() const {
605 return is_running_on_memory_tool_;
Mathieu Chartierda44d772014-04-01 15:01:46 -0700606 }
607
David Brazdil2bb2fbd2018-11-13 18:24:26 +0000608 void SetTargetSdkVersion(uint32_t version) {
Jeff Haof00571c2014-05-29 17:29:47 -0700609 target_sdk_version_ = version;
610 }
611
David Brazdil2bb2fbd2018-11-13 18:24:26 +0000612 uint32_t GetTargetSdkVersion() const {
Jeff Haof00571c2014-05-29 17:29:47 -0700613 return target_sdk_version_;
614 }
615
Narayan Kamath5a2be3f2015-02-16 13:51:51 +0000616 uint32_t GetZygoteMaxFailedBoots() const {
617 return zygote_max_failed_boots_;
618 }
619
Alex Lighteb7c1442015-08-31 13:17:42 -0700620 bool AreExperimentalFlagsEnabled(ExperimentalFlags flags) {
621 return (experimental_flags_ & flags) != ExperimentalFlags::kNone;
Igor Murashkin158f35c2015-06-10 15:55:30 -0700622 }
623
Orion Hodsonad28f5e2018-10-17 09:08:17 +0100624 void CreateJitCodeCache(bool rwx_memory_allowed);
625
Mathieu Chartier455f67c2015-03-17 13:48:29 -0700626 // Create the JIT and instrumentation and code cache.
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800627 void CreateJit();
628
Mathieu Chartier9b34b242015-03-09 11:30:17 -0700629 ArenaPool* GetArenaPool() {
630 return arena_pool_.get();
631 }
Nicolas Geoffray25e04562016-03-01 13:17:58 +0000632 ArenaPool* GetJitArenaPool() {
633 return jit_arena_pool_.get();
634 }
Mathieu Chartier9b34b242015-03-09 11:30:17 -0700635 const ArenaPool* GetArenaPool() const {
636 return arena_pool_.get();
637 }
Jean-Philippe Halimica76a1a2016-02-02 19:48:52 +0100638
639 void ReclaimArenaPoolMemory();
640
Mathieu Chartierc7853442015-03-27 14:35:38 -0700641 LinearAlloc* GetLinearAlloc() {
642 return linear_alloc_.get();
643 }
Mathieu Chartier9b34b242015-03-09 11:30:17 -0700644
Mathieu Chartier455f67c2015-03-17 13:48:29 -0700645 jit::JitOptions* GetJITOptions() {
646 return jit_options_.get();
647 }
648
Nicolas Geoffray433b79a2017-01-30 20:54:45 +0000649 bool IsJavaDebuggable() const {
650 return is_java_debuggable_;
Alex Light6b16d892016-11-11 11:21:04 -0800651 }
652
Nicolas Geoffray433b79a2017-01-30 20:54:45 +0000653 void SetJavaDebuggable(bool value);
654
655 // Deoptimize the boot image, called for Java debuggable apps.
Nicolas Geoffray226805d2018-12-14 10:59:02 +0000656 void DeoptimizeBootImage() REQUIRES(Locks::mutator_lock_);
Alex Light6b16d892016-11-11 11:21:04 -0800657
David Srbeckyf4480162016-03-16 00:06:24 +0000658 bool IsNativeDebuggable() const {
659 return is_native_debuggable_;
660 }
661
662 void SetNativeDebuggable(bool value) {
663 is_native_debuggable_ = value;
664 }
665
Alex Light0aa7a5a2018-10-10 15:58:14 +0000666 bool AreNonStandardExitsEnabled() const {
667 return non_standard_exits_enabled_;
668 }
669
670 void SetNonStandardExitsEnabled() {
David Srbecky28f6cff2018-10-16 15:07:28 +0100671 DoAndMaybeSwitchInterpreter([=](){ non_standard_exits_enabled_ = true; });
Alex Light0aa7a5a2018-10-10 15:58:14 +0000672 }
673
Alex Light7919db92017-11-29 09:00:55 -0800674 bool AreAsyncExceptionsThrown() const {
675 return async_exceptions_thrown_;
676 }
677
678 void SetAsyncExceptionsThrown() {
David Srbecky28f6cff2018-10-16 15:07:28 +0100679 DoAndMaybeSwitchInterpreter([=](){ async_exceptions_thrown_ = true; });
Alex Light7919db92017-11-29 09:00:55 -0800680 }
681
David Srbecky28f6cff2018-10-16 15:07:28 +0100682 // Change state and re-check which interpreter should be used.
683 //
684 // This must be called whenever there is an event that forces
685 // us to use different interpreter (e.g. debugger is attached).
686 //
687 // Changing the state using the lamda gives us some multihreading safety.
688 // It ensures that two calls do not interfere with each other and
689 // it makes it possible to DCHECK that thread local flag is correct.
690 template<typename Action>
David Srbeckyd3883902019-02-26 17:29:32 +0000691 static void DoAndMaybeSwitchInterpreter(Action lamda);
David Srbecky28f6cff2018-10-16 15:07:28 +0100692
Andreas Gampedd671252015-07-23 14:37:18 -0700693 // Returns the build fingerprint, if set. Otherwise an empty string is returned.
694 std::string GetFingerprint() {
695 return fingerprint_;
696 }
697
Mathieu Chartier673ed3d2015-08-28 14:56:43 -0700698 // Called from class linker.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700699 void SetSentinel(mirror::Object* sentinel) REQUIRES_SHARED(Locks::mutator_lock_);
Roland Levillain0e155592018-11-05 18:31:49 +0000700 // For testing purpose only.
701 // TODO: Remove this when this is no longer needed (b/116087961).
702 GcRoot<mirror::Object> GetSentinel() REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier673ed3d2015-08-28 14:56:43 -0700703
Mathieu Chartier951ec2c2015-09-22 08:50:05 -0700704 // Create a normal LinearAlloc or low 4gb version if we are 64 bit AOT compiler.
705 LinearAlloc* CreateLinearAlloc();
706
Mathieu Chartierf9c6fc62015-10-07 11:44:05 -0700707 OatFileManager& GetOatFileManager() const {
708 DCHECK(oat_file_manager_ != nullptr);
Mathieu Chartiere58991b2015-10-13 07:59:34 -0700709 return *oat_file_manager_;
Mathieu Chartierf9c6fc62015-10-07 11:44:05 -0700710 }
711
Mathieu Chartier32cc9ee2015-10-15 09:19:15 -0700712 double GetHashTableMinLoadFactor() const;
713 double GetHashTableMaxLoadFactor() const;
714
Andreas Gampe83e20492018-11-07 11:12:26 -0800715 bool IsSafeMode() const {
Andreas Gampe83e20492018-11-07 11:12:26 -0800716 return safe_mode_;
717 }
718
Nicolas Geoffray787ae8e2015-11-05 11:32:24 +0000719 void SetSafeMode(bool mode) {
720 safe_mode_ = mode;
721 }
722
Nicolas Geoffraya73280d2016-02-15 13:05:16 +0000723 bool GetDumpNativeStackOnSigQuit() const {
724 return dump_native_stack_on_sig_quit_;
725 }
726
Andreas Gampea1425a12016-03-11 17:44:04 -0800727 bool GetPrunedDalvikCache() const {
728 return pruned_dalvik_cache_;
729 }
730
731 void SetPrunedDalvikCache(bool pruned) {
732 pruned_dalvik_cache_ = pruned;
733 }
734
Mathieu Chartierf8cb1782016-03-18 18:45:41 -0700735 void UpdateProcessState(ProcessState process_state);
736
737 // Returns true if we currently care about long mutator pause.
738 bool InJankPerceptibleProcessState() const {
739 return process_state_ == kProcessStateJankPerceptible;
740 }
741
Calin Juravle97cbc922016-04-15 16:16:35 +0100742 void RegisterSensitiveThread() const;
743
Andreas Gampef38a6612016-04-11 08:42:26 -0700744 void SetZygoteNoThreadSection(bool val) {
745 zygote_no_threads_ = val;
746 }
747
748 bool IsZygoteNoThreadSection() const {
749 return zygote_no_threads_;
750 }
751
Nicolas Geoffray433b79a2017-01-30 20:54:45 +0000752 // Returns if the code can be deoptimized asynchronously. Code may be compiled with some
Mingyao Yangf711f2c2016-05-23 12:29:39 -0700753 // optimization that makes it impossible to deoptimize.
Nicolas Geoffray433b79a2017-01-30 20:54:45 +0000754 bool IsAsyncDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_);
Mingyao Yangf711f2c2016-05-23 12:29:39 -0700755
David Sehrd106d9f2016-08-16 19:22:57 -0700756 // Returns a saved copy of the environment (getenv/setenv values).
757 // Used by Fork to protect against overwriting LD_LIBRARY_PATH, etc.
758 char** GetEnvSnapshot() const {
759 return env_snapshot_.GetSnapshot();
760 }
761
Andreas Gampefda57142016-09-08 20:29:18 -0700762 void AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
763 void RemoveSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
764
Alex Lightf889c702018-02-23 15:25:45 -0800765 void AttachAgent(JNIEnv* env, const std::string& agent_arg, jobject class_loader);
Leonard Mosescueb842212016-10-06 17:26:36 -0700766
Andreas Gampeaadcbc62017-12-28 14:05:42 -0800767 const std::list<std::unique_ptr<ti::Agent>>& GetAgents() const {
Alex Light65af20b2017-04-20 09:15:08 -0700768 return agents_;
769 }
770
Andreas Gampeac30fa22017-01-18 21:02:36 -0800771 RuntimeCallbacks* GetRuntimeCallbacks();
Andreas Gampe04bbb5b2017-01-19 17:49:03 +0000772
Alex Light77fee872017-09-05 14:51:49 -0700773 bool HasLoadedPlugins() const {
774 return !plugins_.empty();
775 }
776
Andreas Gampebad529d2017-02-13 18:52:10 -0800777 void InitThreadGroups(Thread* self);
778
Mathieu Chartier1d495012017-04-11 17:50:00 -0700779 void SetDumpGCPerformanceOnShutdown(bool value) {
780 dump_gc_performance_on_shutdown_ = value;
781 }
782
Lokesh Gidra10d0c962019-03-07 22:40:36 +0000783 bool GetDumpGCPerformanceOnShutdown() const {
784 return dump_gc_performance_on_shutdown_;
785 }
786
Nicolas Geoffray4e92c3c2017-05-08 09:34:26 +0100787 void IncrementDeoptimizationCount(DeoptimizationKind kind) {
788 DCHECK_LE(kind, DeoptimizationKind::kLast);
789 deoptimization_counts_[static_cast<size_t>(kind)]++;
790 }
791
Nicolas Geoffrayb9bec2e2017-05-24 15:59:18 +0100792 uint32_t GetNumberOfDeoptimizations() const {
793 uint32_t result = 0;
794 for (size_t i = 0; i <= static_cast<size_t>(DeoptimizationKind::kLast); ++i) {
795 result += deoptimization_counts_[i];
796 }
797 return result;
798 }
799
Mathieu Chartierc42cb0e2017-10-13 11:35:00 -0700800 // Whether or not we use MADV_RANDOM on files that are thought to have random access patterns.
801 // This is beneficial for low RAM devices since it reduces page cache thrashing.
802 bool MAdviseRandomAccess() const {
803 return madvise_random_access_;
804 }
805
Alex Light40320712017-12-14 11:52:04 -0800806 const std::string& GetJdwpOptions() {
807 return jdwp_options_;
808 }
809
810 JdwpProvider GetJdwpProvider() const {
811 return jdwp_provider_;
812 }
813
Andreas Gampe0b0ffc12018-08-01 14:41:27 -0700814 uint32_t GetVerifierLoggingThresholdMs() const {
815 return verifier_logging_threshold_ms_;
816 }
817
Mathieu Chartierada33d72018-12-17 13:17:30 -0800818 // Atomically delete the thread pool if the reference count is 0.
819 bool DeleteThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
820
821 // Wait for all the thread workers to be attached.
822 void WaitForThreadPoolWorkersToStart() REQUIRES(!Locks::runtime_thread_pool_lock_);
823
824 // Scoped usage of the runtime thread pool. Prevents the pool from being
825 // deleted. Note that the thread pool is only for startup and gets deleted after.
826 class ScopedThreadPoolUsage {
827 public:
828 ScopedThreadPoolUsage();
829 ~ScopedThreadPoolUsage();
830
831 // Return the thread pool.
832 ThreadPool* GetThreadPool() const {
833 return thread_pool_;
834 }
835
836 private:
837 ThreadPool* const thread_pool_;
838 };
839
Mathieu Chartiera88abfa2019-02-04 11:08:29 -0800840 bool LoadAppImageStartupCache() const {
841 return load_app_image_startup_cache_;
842 }
843
844 void SetLoadAppImageStartupCacheEnabled(bool enabled) {
845 load_app_image_startup_cache_ = enabled;
846 }
847
Mathieu Chartier175ce3d2019-03-06 16:54:24 -0800848 // Notify the runtime that application startup is considered completed. Only has effect for the
849 // first call.
850 void NotifyStartupCompleted();
851
852 // Return true if startup is already completed.
853 bool GetStartupCompleted() const;
854
Andreas Gampe86823542019-02-25 09:38:49 -0800855 gc::space::ImageSpaceLoadingOrder GetImageSpaceLoadingOrder() const {
856 return image_space_loading_order_;
857 }
858
Carl Shapirob5573532011-07-12 18:22:59 -0700859 private:
Elliott Hughes457005c2012-04-16 13:54:25 -0700860 static void InitPlatformSignalHandlers();
Elliott Hughesffe67362011-07-17 12:09:27 -0700861
Elliott Hughesdcc24742011-09-07 14:02:44 -0700862 Runtime();
Carl Shapiro61e019d2011-07-14 16:53:09 -0700863
Elliott Hughesc1674ed2011-08-25 18:09:09 -0700864 void BlockSignals();
865
Vladimir Marko88b2b802015-12-04 14:19:04 +0000866 bool Init(RuntimeArgumentMap&& runtime_options)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700867 SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
Mathieu Chartier90443472015-07-16 20:32:27 -0700868 void InitNativeMethods() REQUIRES(!Locks::mutator_lock_);
Elliott Hughesff17f1f2012-01-24 18:12:29 -0800869 void RegisterRuntimeNativeMethods(JNIEnv* env);
Elliott Hughesd1cc8362011-10-24 16:58:50 -0700870
Elliott Hughes85d15452011-09-16 17:33:01 -0700871 void StartDaemonThreads();
Elliott Hughesd1cc8362011-10-24 16:58:50 -0700872 void StartSignalCatcher();
Carl Shapiro61e019d2011-07-14 16:53:09 -0700873
Calin Juravle31f2c152015-10-23 17:56:15 +0100874 void MaybeSaveJitProfilingInfo();
875
Andreas Gampe585da952016-12-02 14:52:29 -0800876 // Visit all of the thread roots.
877 void VisitThreadRoots(RootVisitor* visitor, VisitRootFlags flags)
878 REQUIRES_SHARED(Locks::mutator_lock_);
879
880 // Visit all other roots which must be done with mutators suspended.
881 void VisitNonConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags)
882 REQUIRES_SHARED(Locks::mutator_lock_);
883
884 // Constant roots are the roots which never change after the runtime is initialized, they only
885 // need to be visited once per GC cycle.
886 void VisitConstantRoots(RootVisitor* visitor)
887 REQUIRES_SHARED(Locks::mutator_lock_);
888
Andreas Gampe44f67602018-11-28 08:27:27 -0800889 // Note: To be lock-free, GetFaultMessage temporarily replaces the lock message with null.
890 // As such, there is a window where a call will return an empty string. In general,
891 // only aborting code should retrieve this data (via GetFaultMessageForAbortLogging
892 // friend).
893 std::string GetFaultMessage();
894
Mathieu Chartierada33d72018-12-17 13:17:30 -0800895 ThreadPool* AcquireThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
896 void ReleaseThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
897
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700898 // A pointer to the active runtime or null.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800899 static Runtime* instance_;
900
Ian Rogers8afeb852014-04-02 14:55:49 -0700901 // NOTE: these must match the gc::ProcessState values as they come directly from the framework.
902 static constexpr int kProfileForground = 0;
Calin Juravle31f2c152015-10-23 17:56:15 +0100903 static constexpr int kProfileBackground = 1;
Ian Rogers8afeb852014-04-02 14:55:49 -0700904
Mingyao Yang0a87a652017-04-12 13:43:15 -0700905 static constexpr uint32_t kCalleeSaveSize = 6u;
Andreas Gampe8228cdf2017-05-30 15:03:54 -0700906
Mathieu Chartiere401d142015-04-22 13:56:20 -0700907 // 64 bit so that we can share the same asm offsets for both 32 and 64 bits.
Andreas Gampe8228cdf2017-05-30 15:03:54 -0700908 uint64_t callee_save_methods_[kCalleeSaveSize];
Roland Levillain7b0e8442018-04-11 18:27:47 +0100909 // Pre-allocated exceptions (see Runtime::Init).
910 GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_when_throwing_exception_;
911 GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_when_throwing_oome_;
912 GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_when_handling_stack_overflow_;
Ian Rogers63557452014-06-04 16:57:15 -0700913 GcRoot<mirror::Throwable> pre_allocated_NoClassDefFoundError_;
Mathieu Chartiere401d142015-04-22 13:56:20 -0700914 ArtMethod* resolution_method_;
915 ArtMethod* imt_conflict_method_;
Mathieu Chartier2d2621a2014-10-23 16:48:06 -0700916 // Unresolved method has the same behavior as the conflict method, it is used by the class linker
917 // for differentiating between unfilled imt slots vs conflict slots in superclasses.
Mathieu Chartiere401d142015-04-22 13:56:20 -0700918 ArtMethod* imt_unimplemented_method_;
Ian Rogers6ed19fd2014-03-20 08:10:17 -0700919
Ian Rogersc0542af2014-09-03 16:16:56 -0700920 // Special sentinel object used to invalid conditions in JNI (cleared weak references) and
921 // JDWP (invalid references).
922 GcRoot<mirror::Object> sentinel_;
923
Vladimir Marko7624d252014-05-02 14:40:15 +0100924 InstructionSet instruction_set_;
Vladimir Marko7624d252014-05-02 14:40:15 +0100925
Vladimir Marko2b5eaa22013-12-13 13:59:30 +0000926 CompilerCallbacks* compiler_callbacks_;
Elliott Hughes9ca7a1f2011-10-11 14:29:52 -0700927 bool is_zygote_;
Alex Lighta59dd802014-07-02 16:28:08 -0700928 bool must_relocate_;
Mathieu Chartier069387a2012-06-18 12:01:01 -0700929 bool is_concurrent_gc_enabled_;
Anwar Ghuloum87183592013-08-14 12:12:19 -0700930 bool is_explicit_gc_disabled_;
Alex Light64ad14d2014-08-19 14:23:13 -0700931 bool image_dex2oat_enabled_;
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700932
Tsu Chiang Chuang12e6d742014-05-22 10:22:25 -0700933 std::string compiler_executable_;
Brian Carlstrom6449c622014-02-10 23:48:36 -0800934 std::vector<std::string> compiler_options_;
935 std::vector<std::string> image_compiler_options_;
Brian Carlstrom31d8f522014-09-29 11:22:54 -0700936 std::string image_location_;
Nicolas Geoffray144f82c2019-02-21 09:43:18 +0000937 bool is_using_default_boot_image_location_;
Dragos Sbirlea7467ee02013-06-21 09:20:34 -0700938
Vladimir Markod1908512018-11-22 14:57:28 +0000939 std::vector<std::string> boot_class_path_;
940 std::vector<std::string> boot_class_path_locations_;
Brian Carlstroma004aa92012-02-08 18:05:09 -0800941 std::string class_path_string_;
Elliott Hughes7ede61e2011-09-14 18:18:06 -0700942 std::vector<std::string> properties_;
943
Andreas Gampeaadcbc62017-12-28 14:05:42 -0800944 std::list<ti::AgentSpec> agent_specs_;
945 std::list<std::unique_ptr<ti::Agent>> agents_;
Alex Light185d1342016-08-11 10:48:03 -0700946 std::vector<Plugin> plugins_;
Alex Light7233c7e2016-07-28 10:07:45 -0700947
Brian Carlstromb765be02011-08-17 23:54:10 -0700948 // The default stack size for managed threads created by the runtime.
Elliott Hughesbe759c62011-09-08 19:38:21 -0700949 size_t default_stack_size_;
Brian Carlstromb765be02011-08-17 23:54:10 -0700950
Hans Boehmb2155572019-03-27 14:25:53 -0700951 // Finalizers running for longer than this many milliseconds abort the runtime.
952 unsigned int finalizer_timeout_ms_;
953
Ian Rogers1d54e732013-05-02 21:10:01 -0700954 gc::Heap* heap_;
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800955
Nicolas Geoffray25e04562016-03-01 13:17:58 +0000956 std::unique_ptr<ArenaPool> jit_arena_pool_;
Mathieu Chartier9b34b242015-03-09 11:30:17 -0700957 std::unique_ptr<ArenaPool> arena_pool_;
Mathieu Chartierc7853442015-03-27 14:35:38 -0700958 // Special low 4gb pool for compiler linear alloc. We need ArtFields to be in low 4gb if we are
959 // compiling using a 32 bit image on a 64 bit compiler in case we resolve things in the image
960 // since the field arrays are int arrays in this case.
961 std::unique_ptr<ArenaPool> low_4gb_arena_pool_;
962
963 // Shared linear alloc for now.
964 std::unique_ptr<LinearAlloc> linear_alloc_;
Mathieu Chartier9b34b242015-03-09 11:30:17 -0700965
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700966 // The number of spins that are done before thread suspension is used to forcibly inflate.
967 size_t max_spins_before_thin_lock_inflation_;
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700968 MonitorList* monitor_list_;
Ian Rogersef7d42f2014-01-06 12:55:46 -0800969 MonitorPool* monitor_pool_;
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700970
Carl Shapirob5573532011-07-12 18:22:59 -0700971 ThreadList* thread_list_;
Carl Shapiro61e019d2011-07-14 16:53:09 -0700972
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700973 InternTable* intern_table_;
974
Brian Carlstromb0460ea2011-07-29 10:08:05 -0700975 ClassLinker* class_linker_;
976
Elliott Hughese27955c2011-08-26 15:21:24 -0700977 SignalCatcher* signal_catcher_;
Narayan Kamatheb710332017-05-10 11:48:46 +0100978
Richard Uhlerda0a69e2016-10-11 15:06:38 +0100979 std::unique_ptr<JavaVMExt> java_vm_;
Elliott Hughesf2682d52011-08-15 16:37:04 -0700980
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800981 std::unique_ptr<jit::Jit> jit_;
Orion Hodsonad28f5e2018-10-17 09:08:17 +0100982 std::unique_ptr<jit::JitCodeCache> jit_code_cache_;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800983 std::unique_ptr<jit::JitOptions> jit_options_;
984
Mathieu Chartierada33d72018-12-17 13:17:30 -0800985 // Runtime thread pool. The pool is only for startup and gets deleted after.
986 std::unique_ptr<ThreadPool> thread_pool_ GUARDED_BY(Locks::runtime_thread_pool_lock_);
987 size_t thread_pool_ref_count_ GUARDED_BY(Locks::runtime_thread_pool_lock_);
988
Andreas Gampe44f67602018-11-28 08:27:27 -0800989 // Fault message, printed when we get a SIGSEGV. Stored as a native-heap object and accessed
990 // lock-free, so needs to be atomic.
991 std::atomic<std::string*> fault_message_;
Mathieu Chartier15d34022014-02-26 17:16:38 -0800992
Ian Rogers120f1c72012-09-28 17:17:10 -0700993 // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by
994 // the shutdown lock so that threads aren't born while we're shutting down.
995 size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_);
996
997 // Waited upon until no threads are being born.
Ian Rogers700a4022014-05-19 16:49:03 -0700998 std::unique_ptr<ConditionVariable> shutdown_cond_ GUARDED_BY(Locks::runtime_shutdown_lock_);
Ian Rogers120f1c72012-09-28 17:17:10 -0700999
1000 // Set when runtime shutdown is past the point that new threads may attach.
1001 bool shutting_down_ GUARDED_BY(Locks::runtime_shutdown_lock_);
1002
1003 // The runtime is starting to shutdown but is blocked waiting on shutdown_cond_.
1004 bool shutting_down_started_ GUARDED_BY(Locks::runtime_shutdown_lock_);
1005
Elliott Hughesdcc24742011-09-07 14:02:44 -07001006 bool started_;
1007
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001008 // New flag added which tells us if the runtime has finished starting. If
1009 // this flag is set then the Daemon threads are created and the class loader
1010 // is created. This flag is needed for knowing if its safe to request CMS.
1011 bool finished_starting_;
1012
Brian Carlstrom6ea095a2011-08-16 15:26:54 -07001013 // Hooks supported by JNI_CreateJavaVM
1014 jint (*vfprintf_)(FILE* stream, const char* format, va_list ap);
1015 void (*exit_)(jint status);
1016 void (*abort_)();
1017
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001018 bool stats_enabled_;
1019 RuntimeStats stats_;
1020
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001021 const bool is_running_on_memory_tool_;
Mathieu Chartierda44d772014-04-01 15:01:46 -07001022
Andreas Gampef6a780a2015-04-02 18:51:05 -07001023 std::unique_ptr<TraceConfig> trace_config_;
1024
Ian Rogers62d6c772013-02-27 08:32:07 -08001025 instrumentation::Instrumentation instrumentation_;
jeffhao2692b572011-12-16 15:42:28 -08001026
Ian Rogers365c1022012-06-22 15:05:28 -07001027 jobject main_thread_group_;
1028 jobject system_thread_group_;
1029
Brian Carlstromce888532013-10-10 00:32:58 -07001030 // As returned by ClassLoader.getSystemClassLoader().
1031 jobject system_class_loader_;
1032
Hiroshi Yamauchi2e899a92013-11-22 16:50:12 -08001033 // If true, then we dump the GC cumulative timings on shutdown.
1034 bool dump_gc_performance_on_shutdown_;
1035
Chang Xing605fe242017-07-20 15:57:21 -07001036 // Transactions used for pre-initializing classes at compilation time.
1037 // Support nested transactions, maintain a list containing all transactions. Transactions are
1038 // handled under a stack discipline. Because GC needs to go over all transactions, we choose list
1039 // as substantial data structure instead of stack.
1040 std::list<std::unique_ptr<Transaction>> preinitialization_transactions_;
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +01001041
Igor Murashkin7617abd2015-07-10 18:27:47 -07001042 // If kNone, verification is disabled. kEnable by default.
1043 verifier::VerifyMode verify_;
Jeff Hao4a200f52014-04-01 14:58:49 -07001044
Jean Christophe Beyler24e04aa2014-09-12 12:03:25 -07001045 // If true, the runtime may use dex files directly with the interpreter if an oat file is not
1046 // available/usable.
1047 bool allow_dex_file_fallback_;
1048
Dmitriy Ivanov785049f2014-07-18 10:08:57 -07001049 // List of supported cpu abis.
1050 std::vector<std::string> cpu_abilist_;
1051
Jeff Haof00571c2014-05-29 17:29:47 -07001052 // Specifies target SDK version to allow workarounds for certain API levels.
David Brazdil2bb2fbd2018-11-13 18:24:26 +00001053 uint32_t target_sdk_version_;
Jeff Haof00571c2014-05-29 17:29:47 -07001054
Dave Allison69dfe512014-07-11 17:11:58 +00001055 // Implicit checks flags.
1056 bool implicit_null_checks_; // NullPointer checks are implicit.
1057 bool implicit_so_checks_; // StackOverflow checks are implicit.
1058 bool implicit_suspend_checks_; // Thread suspension checks are implicit.
1059
Calin Juravle01aaf6e2015-06-19 22:05:39 +01001060 // Whether or not the sig chain (and implicitly the fault handler) should be
Vladimir Markoa497a392018-09-26 10:52:50 +01001061 // disabled. Tools like dex2oat don't need them. This enables
Calin Juravle01aaf6e2015-06-19 22:05:39 +01001062 // building a statically link version of dex2oat.
1063 bool no_sig_chain_;
1064
Calin Juravled3e7c6c2016-02-04 19:07:51 +00001065 // Force the use of native bridge even if the app ISA matches the runtime ISA.
1066 bool force_native_bridge_;
1067
Calin Juravle07d83c72014-10-22 21:02:23 +01001068 // Whether or not a native bridge has been loaded.
Calin Juravlec8423522014-08-12 20:55:20 +01001069 //
1070 // The native bridge allows running native code compiled for a foreign ISA. The way it works is,
1071 // if standard dlopen fails to load native library associated with native activity, it calls to
1072 // the native bridge to load it and then gets the trampoline for the entry to native activity.
Calin Juravle07d83c72014-10-22 21:02:23 +01001073 //
1074 // The option 'native_bridge_library_filename' specifies the name of the native bridge.
1075 // When non-empty the native bridge will be loaded from the given file. An empty value means
1076 // that there's no native bridge.
1077 bool is_native_bridge_loaded_;
Calin Juravlec8423522014-08-12 20:55:20 +01001078
David Srbeckyf4480162016-03-16 00:06:24 +00001079 // Whether we are running under native debugger.
1080 bool is_native_debuggable_;
1081
Alex Light7919db92017-11-29 09:00:55 -08001082 // whether or not any async exceptions have ever been thrown. This is used to speed up the
1083 // MterpShouldSwitchInterpreters function.
1084 bool async_exceptions_thrown_;
1085
Alex Light0aa7a5a2018-10-10 15:58:14 +00001086 // Whether anything is going to be using the shadow-frame APIs to force a function to return
1087 // early. Doing this requires that (1) we be debuggable and (2) that mterp is exited.
1088 bool non_standard_exits_enabled_;
1089
Nicolas Geoffray433b79a2017-01-30 20:54:45 +00001090 // Whether Java code needs to be debuggable.
1091 bool is_java_debuggable_;
Alex Light6b16d892016-11-11 11:21:04 -08001092
Narayan Kamath5a2be3f2015-02-16 13:51:51 +00001093 // The maximum number of failed boots we allow before pruning the dalvik cache
1094 // and trying again. This option is only inspected when we're running as a
1095 // zygote.
1096 uint32_t zygote_max_failed_boots_;
1097
Igor Murashkin158f35c2015-06-10 15:55:30 -07001098 // Enable experimental opcodes that aren't fully specified yet. The intent is to
1099 // eventually publish them as public-usable opcodes, but they aren't ready yet.
1100 //
1101 // Experimental opcodes should not be used by other production code.
Alex Lighteb7c1442015-08-31 13:17:42 -07001102 ExperimentalFlags experimental_flags_;
Igor Murashkin158f35c2015-06-10 15:55:30 -07001103
Andreas Gampedd671252015-07-23 14:37:18 -07001104 // Contains the build fingerprint, if given as a parameter.
1105 std::string fingerprint_;
1106
Mathieu Chartierf9c6fc62015-10-07 11:44:05 -07001107 // Oat file manager, keeps track of what oat files are open.
Mathieu Chartiere58991b2015-10-13 07:59:34 -07001108 OatFileManager* oat_file_manager_;
Mathieu Chartierf9c6fc62015-10-07 11:44:05 -07001109
Mathieu Chartier32cc9ee2015-10-15 09:19:15 -07001110 // Whether or not we are on a low RAM device.
1111 bool is_low_memory_mode_;
1112
Mathieu Chartierc42cb0e2017-10-13 11:35:00 -07001113 // Whether or not we use MADV_RANDOM on files that are thought to have random access patterns.
1114 // This is beneficial for low RAM devices since it reduces page cache thrashing.
1115 bool madvise_random_access_;
1116
Nicolas Geoffray787ae8e2015-11-05 11:32:24 +00001117 // Whether the application should run in safe mode, that is, interpreter only.
1118 bool safe_mode_;
1119
David Brazdil3e0fa0a2018-01-15 18:41:44 +00001120 // Whether access checks on hidden API should be performed.
Mathew Inwooda5dc52c2018-02-19 15:30:51 +00001121 hiddenapi::EnforcementPolicy hidden_api_policy_;
David Brazdil3e0fa0a2018-01-15 18:41:44 +00001122
David Brazdile7681822018-12-14 16:25:33 +00001123 // Whether access checks on core platform API should be performed.
1124 hiddenapi::EnforcementPolicy core_platform_api_policy_;
1125
Mathew Inwoodc8ce5f52018-04-05 13:58:55 +01001126 // List of signature prefixes of methods that have been removed from the blacklist, and treated
1127 // as if whitelisted.
Mathew Inwood3383aa52018-03-16 14:18:33 +00001128 std::vector<std::string> hidden_api_exemptions_;
1129
David Brazdilee7d2fd2018-01-20 17:25:23 +00001130 // Do not warn about the same hidden API access violation twice.
1131 // This is only used for testing.
1132 bool dedupe_hidden_api_warnings_;
1133
Mathew Inwood5bcef172018-05-01 14:40:12 +01001134 // How often to log hidden API access to the event log. An integer between 0
1135 // (never) and 0x10000 (always).
Mathew Inwood73ddda42018-04-03 15:32:32 +01001136 uint32_t hidden_api_access_event_log_rate_;
1137
Mathew Inwood5bcef172018-05-01 14:40:12 +01001138 // The package of the app running in this process.
1139 std::string process_package_name_;
1140
Nicolas Geoffraya73280d2016-02-15 13:05:16 +00001141 // Whether threads should dump their native stack on SIGQUIT.
1142 bool dump_native_stack_on_sig_quit_;
1143
Andreas Gampea1425a12016-03-11 17:44:04 -08001144 // Whether the dalvik cache was pruned when initializing the runtime.
1145 bool pruned_dalvik_cache_;
1146
Mathieu Chartierf8cb1782016-03-18 18:45:41 -07001147 // Whether or not we currently care about pause times.
1148 ProcessState process_state_;
1149
Andreas Gampef38a6612016-04-11 08:42:26 -07001150 // Whether zygote code is in a section that should not start threads.
1151 bool zygote_no_threads_;
1152
Alex Light40320712017-12-14 11:52:04 -08001153 // The string containing requested jdwp options
1154 std::string jdwp_options_;
1155
1156 // The jdwp provider we were configured with.
1157 JdwpProvider jdwp_provider_;
1158
David Sehrd106d9f2016-08-16 19:22:57 -07001159 // Saved environment.
1160 class EnvSnapshot {
1161 public:
1162 EnvSnapshot() = default;
1163 void TakeSnapshot();
1164 char** GetSnapshot() const;
1165
1166 private:
1167 std::unique_ptr<char*[]> c_env_vector_;
1168 std::vector<std::unique_ptr<std::string>> name_value_pairs_;
1169
1170 DISALLOW_COPY_AND_ASSIGN(EnvSnapshot);
1171 } env_snapshot_;
1172
Andreas Gampefda57142016-09-08 20:29:18 -07001173 // Generic system-weak holders.
1174 std::vector<gc::AbstractSystemWeakHolder*> system_weak_holders_;
1175
Andreas Gampeac30fa22017-01-18 21:02:36 -08001176 std::unique_ptr<RuntimeCallbacks> callbacks_;
Andreas Gampe04bbb5b2017-01-19 17:49:03 +00001177
Nicolas Geoffray81cc43e2017-05-10 12:04:49 +01001178 std::atomic<uint32_t> deoptimization_counts_[
1179 static_cast<uint32_t>(DeoptimizationKind::kLast) + 1];
Nicolas Geoffray4e92c3c2017-05-08 09:34:26 +01001180
Vladimir Markoc34bebf2018-08-16 16:12:49 +01001181 MemMap protected_fault_page_;
Andreas Gampe2ac67d52017-05-11 22:30:38 -07001182
Andreas Gampe0b0ffc12018-08-01 14:41:27 -07001183 uint32_t verifier_logging_threshold_ms_;
1184
Mathieu Chartiera88abfa2019-02-04 11:08:29 -08001185 bool load_app_image_startup_cache_ = false;
1186
Mathieu Chartier175ce3d2019-03-06 16:54:24 -08001187 // If startup has completed, must happen at most once.
1188 std::atomic<bool> startup_completed_ = false;
1189
Andreas Gampe86823542019-02-25 09:38:49 -08001190 gc::space::ImageSpaceLoadingOrder image_space_loading_order_ =
1191 gc::space::ImageSpaceLoadingOrder::kSystemFirst;
1192
Andreas Gampe44f67602018-11-28 08:27:27 -08001193 // Note: See comments on GetFaultMessage.
1194 friend std::string GetFaultMessageForAbortLogging();
Mathieu Chartierada33d72018-12-17 13:17:30 -08001195 friend class ScopedThreadPoolUsage;
Vladimir Markof3d88a82018-12-21 16:38:47 +00001196 friend class OatFileAssistantTest;
Andreas Gampe44f67602018-11-28 08:27:27 -08001197
Carl Shapiro61e019d2011-07-14 16:53:09 -07001198 DISALLOW_COPY_AND_ASSIGN(Runtime);
Carl Shapiro1fb86202011-06-27 17:43:13 -07001199};
1200
1201} // namespace art
1202
Brian Carlstromfc0e3212013-07-17 14:40:12 -07001203#endif // ART_RUNTIME_RUNTIME_H_