blob: de56de524abe18f5e6f308d6d90082155137a427 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro1fb86202011-06-27 17:43:13 -070016
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_RUNTIME_H_
18#define ART_RUNTIME_RUNTIME_H_
Carl Shapiro1fb86202011-06-27 17:43:13 -070019
Elliott Hughesa0e18062012-04-13 15:59:59 -070020#include <jni.h>
Elliott Hughesc5f7c912011-08-18 14:00:42 -070021#include <stdio.h>
22
Elliott Hughese27955c2011-08-26 15:21:24 -070023#include <iosfwd>
Eric Holka79872b2020-10-01 13:09:53 -070024#include <memory>
Hiroshi Yamauchi799eb3a2014-07-18 15:38:17 -070025#include <set>
Brian Carlstrom6ea095a2011-08-16 15:26:54 -070026#include <string>
Carl Shapirofc322c72011-07-27 00:20:01 -070027#include <utility>
Brian Carlstrom6ea095a2011-08-16 15:26:54 -070028#include <vector>
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070029
Andreas Gampe44f67602018-11-28 08:27:27 -080030#include "base/locks.h"
Andreas Gampe794ad762015-02-23 08:12:24 -080031#include "base/macros.h"
Vladimir Markoc34bebf2018-08-16 16:12:49 +010032#include "base/mem_map.h"
Eric Holka79872b2020-10-01 13:09:53 -070033#include "base/metrics.h"
Nicolas Geoffray4cbb51a2020-02-07 11:25:54 +000034#include "base/string_view_cpp20.h"
Andrei Onea037d2822020-11-19 00:20:04 +000035#include "compat_framework.h"
Nicolas Geoffray4e92c3c2017-05-08 09:34:26 +010036#include "deoptimization_kind.h"
David Sehr9e734c72018-01-04 17:56:19 -080037#include "dex/dex_file_types.h"
Alex Lighteb7c1442015-08-31 13:17:42 -070038#include "experimental_flags.h"
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -070039#include "gc_root.h"
Ian Rogers62d6c772013-02-27 08:32:07 -080040#include "instrumentation.h"
Alex Light40320712017-12-14 11:52:04 -080041#include "jdwp_provider.h"
Alex Light79d6c802019-06-27 15:50:11 +000042#include "jni/jni_id_manager.h"
43#include "jni_id_type.h"
Mathieu Chartier8778c522016-10-04 19:06:30 -070044#include "obj_ptr.h"
Ian Rogers576ca0c2014-06-06 15:58:22 -070045#include "offsets.h"
Mathieu Chartierf8cb1782016-03-18 18:45:41 -070046#include "process_state.h"
Vladimir Marko7624d252014-05-02 14:40:15 +010047#include "quick/quick_method_frame_info.h"
Alex Lightc18eba32019-09-24 14:36:27 -070048#include "reflective_value_visitor.h"
Elliott Hughes9d5ccec2011-09-19 13:19:50 -070049#include "runtime_stats.h"
Carl Shapirob5573532011-07-12 18:22:59 -070050
Carl Shapiro1fb86202011-06-27 17:43:13 -070051namespace art {
52
Ian Rogers1d54e732013-05-02 21:10:01 -070053namespace gc {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080054class AbstractSystemWeakHolder;
55class Heap;
Ian Rogers576ca0c2014-06-06 15:58:22 -070056} // namespace gc
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080057
Mathew Inwooda5dc52c2018-02-19 15:30:51 +000058namespace hiddenapi {
59enum class EnforcementPolicy;
60} // namespace hiddenapi
61
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080062namespace jit {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080063class Jit;
Orion Hodsonad28f5e2018-10-17 09:08:17 +010064class JitCodeCache;
Igor Murashkin2ffb7032017-11-08 13:35:21 -080065class JitOptions;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080066} // namespace jit
67
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080068namespace mirror {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080069class Array;
70class ClassLoader;
71class DexCache;
72template<class T> class ObjectArray;
73template<class T> class PrimitiveArray;
74typedef PrimitiveArray<int8_t> ByteArray;
75class String;
76class Throwable;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080077} // namespace mirror
Alex Light7233c7e2016-07-28 10:07:45 -070078namespace ti {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080079class Agent;
Andreas Gampeaadcbc62017-12-28 14:05:42 -080080class AgentSpec;
Alex Light7233c7e2016-07-28 10:07:45 -070081} // namespace ti
Mathieu Chartierc528dba2013-11-26 12:00:11 -080082namespace verifier {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080083class MethodVerifier;
84enum class VerifyMode : int8_t;
Ian Rogers6a3c1fc2014-10-31 00:33:20 -070085} // namespace verifier
Mathieu Chartiere401d142015-04-22 13:56:20 -070086class ArenaPool;
87class ArtMethod;
Andreas Gampe8228cdf2017-05-30 15:03:54 -070088enum class CalleeSaveType: uint32_t;
Carl Shapiro61e019d2011-07-14 16:53:09 -070089class ClassLinker;
Mathieu Chartiere401d142015-04-22 13:56:20 -070090class CompilerCallbacks;
Vladimir Markoc0e0e5e2020-01-23 17:43:05 +000091class Dex2oatImageTest;
Carl Shapirofc322c72011-07-27 00:20:01 -070092class DexFile;
Andreas Gampe639b2b12019-01-08 10:32:50 -080093enum class InstructionSet;
Elliott Hughescf4c6c42011-09-01 15:16:42 -070094class InternTable;
Andreas Gamped482e732017-04-24 17:59:09 -070095class IsMarkedVisitor;
Ian Rogersb48b9eb2014-02-28 16:20:21 -080096class JavaVMExt;
Mathieu Chartiere401d142015-04-22 13:56:20 -070097class LinearAlloc;
Elliott Hughesc33a32b2011-10-11 18:18:07 -070098class MonitorList;
Ian Rogersef7d42f2014-01-06 12:55:46 -080099class MonitorPool;
Ian Rogers576ca0c2014-06-06 15:58:22 -0700100class NullPointerHandler;
Vladimir Markof3d88a82018-12-21 16:38:47 +0000101class OatFileAssistantTest;
Mathieu Chartierf9c6fc62015-10-07 11:44:05 -0700102class OatFileManager;
Alex Light185d1342016-08-11 10:48:03 -0700103class Plugin;
Vladimir Marko88b2b802015-12-04 14:19:04 +0000104struct RuntimeArgumentMap;
Andreas Gampeac30fa22017-01-18 21:02:36 -0800105class RuntimeCallbacks;
Elliott Hughese27955c2011-08-26 15:21:24 -0700106class SignalCatcher;
Ian Rogers576ca0c2014-06-06 15:58:22 -0700107class StackOverflowHandler;
108class SuspensionHandler;
Carl Shapiro61e019d2011-07-14 16:53:09 -0700109class ThreadList;
Mathieu Chartierc6068c72018-11-13 16:00:58 -0800110class ThreadPool;
jeffhao2692b572011-12-16 15:42:28 -0800111class Trace;
Andreas Gampef6a780a2015-04-02 18:51:05 -0700112struct TraceConfig;
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100113class Transaction;
Carl Shapiro61e019d2011-07-14 16:53:09 -0700114
Ian Rogerse63db272014-07-15 15:36:11 -0700115typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
116
Carl Shapiro1fb86202011-06-27 17:43:13 -0700117class Runtime {
118 public:
Vladimir Marko88b2b802015-12-04 14:19:04 +0000119 // Parse raw runtime options.
120 static bool ParseOptions(const RuntimeOptions& raw_options,
121 bool ignore_unrecognized,
122 RuntimeArgumentMap* runtime_options);
123
Carl Shapiro61e019d2011-07-14 16:53:09 -0700124 // Creates and initializes a new runtime.
Vladimir Marko88b2b802015-12-04 14:19:04 +0000125 static bool Create(RuntimeArgumentMap&& runtime_options)
126 SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
127
128 // Creates and initializes a new runtime.
129 static bool Create(const RuntimeOptions& raw_options, bool ignore_unrecognized)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700130 SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700131
Florian Mayer07710c52019-09-16 15:53:38 +0000132 bool EnsurePluginLoaded(const char* plugin_name, std::string* error_msg);
133 bool EnsurePerfettoPlugin(std::string* error_msg);
134
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800135 // IsAotCompiler for compilers that don't have a running runtime. Only dex2oat currently.
136 bool IsAotCompiler() const {
Calin Juravleffc87072016-04-20 14:22:09 +0100137 return !UseJitCompilation() && IsCompiler();
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800138 }
139
140 // IsCompiler is any runtime which has a running compiler, either dex2oat or JIT.
Elliott Hughesd9c67be2012-02-02 19:54:06 -0800141 bool IsCompiler() const {
Vladimir Marko2b5eaa22013-12-13 13:59:30 +0000142 return compiler_callbacks_ != nullptr;
143 }
144
Andreas Gampe4585f872015-03-27 23:45:15 -0700145 // If a compiler, are we compiling a boot image?
146 bool IsCompilingBootImage() const;
147
148 bool CanRelocate() const;
Alex Lighta59dd802014-07-02 16:28:08 -0700149
150 bool ShouldRelocate() const {
151 return must_relocate_ && CanRelocate();
152 }
153
154 bool MustRelocateIfPossible() const {
155 return must_relocate_;
156 }
157
Alex Light64ad14d2014-08-19 14:23:13 -0700158 bool IsImageDex2OatEnabled() const {
159 return image_dex2oat_enabled_;
Nicolas Geoffray4fcdc942014-07-22 10:48:00 +0100160 }
161
Vladimir Marko2b5eaa22013-12-13 13:59:30 +0000162 CompilerCallbacks* GetCompilerCallbacks() {
163 return compiler_callbacks_;
Elliott Hughesd9c67be2012-02-02 19:54:06 -0800164 }
165
Mathieu Chartier07ddb6f2015-11-05 11:16:34 -0800166 void SetCompilerCallbacks(CompilerCallbacks* callbacks) {
167 CHECK(callbacks != nullptr);
168 compiler_callbacks_ = callbacks;
169 }
170
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700171 bool IsZygote() const {
172 return is_zygote_;
173 }
174
Nicolas Geoffray4444f1b2019-06-10 14:09:19 +0100175 bool IsPrimaryZygote() const {
176 return is_primary_zygote_;
177 }
178
Nicolas Geoffray9ac09ee2019-05-08 23:38:27 +0100179 bool IsSystemServer() const {
180 return is_system_server_;
181 }
182
Nicolas Geoffraya67daeb2019-08-12 10:41:25 +0100183 void SetAsSystemServer() {
184 is_system_server_ = true;
185 is_zygote_ = false;
186 is_primary_zygote_ = false;
187 }
188
189 void SetAsZygoteChild(bool is_system_server, bool is_zygote) {
190 // System server should have been set earlier in SetAsSystemServer.
191 CHECK_EQ(is_system_server_, is_system_server);
192 is_zygote_ = is_zygote;
193 is_primary_zygote_ = false;
Nicolas Geoffray9ac09ee2019-05-08 23:38:27 +0100194 }
195
Anwar Ghuloum87183592013-08-14 12:12:19 -0700196 bool IsExplicitGcDisabled() const {
197 return is_explicit_gc_disabled_;
198 }
199
Tsu Chiang Chuang12e6d742014-05-22 10:22:25 -0700200 std::string GetCompilerExecutable() const;
201
Brian Carlstrom6449c622014-02-10 23:48:36 -0800202 const std::vector<std::string>& GetCompilerOptions() const {
203 return compiler_options_;
Dragos Sbirlea7467ee02013-06-21 09:20:34 -0700204 }
205
Vladimir Marko5c657fe2016-11-03 15:12:29 +0000206 void AddCompilerOption(const std::string& option) {
Andreas Gamped2abbc92014-12-19 09:53:27 -0800207 compiler_options_.push_back(option);
208 }
209
Brian Carlstrom6449c622014-02-10 23:48:36 -0800210 const std::vector<std::string>& GetImageCompilerOptions() const {
211 return image_compiler_options_;
Anwar Ghuloum8447d842013-04-30 17:27:40 -0700212 }
213
Brian Carlstrom31d8f522014-09-29 11:22:54 -0700214 const std::string& GetImageLocation() const {
215 return image_location_;
216 }
217
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700218 // Starts a runtime, which may cause threads to be started and code to run.
Brian Carlstrombd86bcc2013-03-10 20:26:16 -0700219 bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
Carl Shapiro2ed144c2011-07-26 16:52:08 -0700220
Mathieu Chartier590fee92013-09-13 13:46:47 -0700221 bool IsShuttingDown(Thread* self);
Mathieu Chartier90443472015-07-16 20:32:27 -0700222 bool IsShuttingDownLocked() const REQUIRES(Locks::runtime_shutdown_lock_) {
Ian Rogers9af209c2012-06-03 20:50:30 -0700223 return shutting_down_;
224 }
225
Mathieu Chartier90443472015-07-16 20:32:27 -0700226 size_t NumberOfThreadsBeingBorn() const REQUIRES(Locks::runtime_shutdown_lock_) {
Ian Rogers120f1c72012-09-28 17:17:10 -0700227 return threads_being_born_;
228 }
229
Mathieu Chartier90443472015-07-16 20:32:27 -0700230 void StartThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) {
Ian Rogers120f1c72012-09-28 17:17:10 -0700231 threads_being_born_++;
232 }
233
Mathieu Chartier90443472015-07-16 20:32:27 -0700234 void EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_);
Ian Rogers120f1c72012-09-28 17:17:10 -0700235
Ian Rogers9af209c2012-06-03 20:50:30 -0700236 bool IsStarted() const {
237 return started_;
238 }
Elliott Hughesdcc24742011-09-07 14:02:44 -0700239
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700240 bool IsFinishedStarting() const {
241 return finished_starting_;
242 }
243
Vladimir Markodcfcce42018-06-27 10:00:28 +0000244 void RunRootClinits(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
245
Carl Shapiro2ed144c2011-07-26 16:52:08 -0700246 static Runtime* Current() {
247 return instance_;
248 }
Carl Shapiro1fb86202011-06-27 17:43:13 -0700249
Nicolas Geoffraydb978712014-12-09 13:33:38 +0000250 // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
251 // callers should prefer.
Andreas Gampe90a32b12016-10-03 19:47:08 -0700252 NO_RETURN static void Abort(const char* msg) REQUIRES(!Locks::abort_lock_);
Elliott Hughesffe67362011-07-17 12:09:27 -0700253
Ian Rogers365c1022012-06-22 15:05:28 -0700254 // Returns the "main" ThreadGroup, used when attaching user threads.
Brian Carlstrom034f76b2012-08-01 15:51:58 -0700255 jobject GetMainThreadGroup() const;
Ian Rogers365c1022012-06-22 15:05:28 -0700256
257 // Returns the "system" ThreadGroup, used when attaching our internal threads.
Brian Carlstrom034f76b2012-08-01 15:51:58 -0700258 jobject GetSystemThreadGroup() const;
Ian Rogers365c1022012-06-22 15:05:28 -0700259
Brian Carlstromce888532013-10-10 00:32:58 -0700260 // Returns the system ClassLoader which represents the CLASSPATH.
261 jobject GetSystemClassLoader() const;
262
Elliott Hughes462c9442012-03-23 18:47:50 -0700263 // Attaches the calling native thread to the runtime.
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700264 bool AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
Mathieu Chartier664bebf2012-11-12 16:54:11 -0800265 bool create_peer);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700266
Elliott Hughesbf86d042011-08-31 17:53:14 -0700267 void CallExitHook(jint status);
268
Carl Shapiro61e019d2011-07-14 16:53:09 -0700269 // Detaches the current native thread from the runtime.
Mathieu Chartier90443472015-07-16 20:32:27 -0700270 void DetachCurrentThread() REQUIRES(!Locks::mutator_lock_);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700271
Nicolas Geoffray4e92c3c2017-05-08 09:34:26 +0100272 void DumpDeoptimizations(std::ostream& os);
Ian Rogers7b078e82014-09-10 14:44:24 -0700273 void DumpForSigQuit(std::ostream& os);
Elliott Hughes21a5bf22011-12-07 14:35:20 -0800274 void DumpLockHolders(std::ostream& os);
Elliott Hughese27955c2011-08-26 15:21:24 -0700275
Carl Shapiro61e019d2011-07-14 16:53:09 -0700276 ~Runtime();
Carl Shapirob5573532011-07-12 18:22:59 -0700277
Vladimir Markod1908512018-11-22 14:57:28 +0000278 const std::vector<std::string>& GetBootClassPath() const {
279 return boot_class_path_;
280 }
281
282 const std::vector<std::string>& GetBootClassPathLocations() const {
283 DCHECK(boot_class_path_locations_.empty() ||
284 boot_class_path_locations_.size() == boot_class_path_.size());
285 return boot_class_path_locations_.empty() ? boot_class_path_ : boot_class_path_locations_;
Brian Carlstroma004aa92012-02-08 18:05:09 -0800286 }
287
288 const std::string& GetClassPathString() const {
289 return class_path_string_;
Brian Carlstromb765be02011-08-17 23:54:10 -0700290 }
291
292 ClassLinker* GetClassLinker() const {
Carl Shapiro7a909592011-07-24 19:21:59 -0700293 return class_linker_;
294 }
295
Alex Light79d6c802019-06-27 15:50:11 +0000296 jni::JniIdManager* GetJniIdManager() const {
297 return jni_id_manager_.get();
298 }
299
Elliott Hughes7ede61e2011-09-14 18:18:06 -0700300 size_t GetDefaultStackSize() const {
301 return default_stack_size_;
302 }
303
Hans Boehmb2155572019-03-27 14:25:53 -0700304 unsigned int GetFinalizerTimeoutMs() const {
305 return finalizer_timeout_ms_;
306 }
307
Ian Rogers1d54e732013-05-02 21:10:01 -0700308 gc::Heap* GetHeap() const {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800309 return heap_;
310 }
311
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700312 InternTable* GetInternTable() const {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700313 DCHECK(intern_table_ != nullptr);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700314 return intern_table_;
315 }
316
Elliott Hughes0af55432011-08-17 18:37:28 -0700317 JavaVMExt* GetJavaVM() const {
Richard Uhlerda0a69e2016-10-11 15:06:38 +0100318 return java_vm_.get();
Elliott Hughesf2682d52011-08-15 16:37:04 -0700319 }
320
Hans Boehmb3da36c2016-12-15 13:12:59 -0800321 size_t GetMaxSpinsBeforeThinLockInflation() const {
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700322 return max_spins_before_thin_lock_inflation_;
323 }
324
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700325 MonitorList* GetMonitorList() const {
326 return monitor_list_;
327 }
328
Ian Rogersef7d42f2014-01-06 12:55:46 -0800329 MonitorPool* GetMonitorPool() const {
330 return monitor_pool_;
331 }
332
Ian Rogersc0542af2014-09-03 16:16:56 -0700333 // Is the given object the special object used to mark a cleared JNI weak global?
Mathieu Chartier8778c522016-10-04 19:06:30 -0700334 bool IsClearedJniWeakGlobal(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -0700335
336 // Get the special object used to mark a cleared JNI weak global.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700337 mirror::Object* GetClearedJniWeakGlobal() REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -0700338
Roland Levillain7b0e8442018-04-11 18:27:47 +0100339 mirror::Throwable* GetPreAllocatedOutOfMemoryErrorWhenThrowingException()
340 REQUIRES_SHARED(Locks::mutator_lock_);
341 mirror::Throwable* GetPreAllocatedOutOfMemoryErrorWhenThrowingOOME()
342 REQUIRES_SHARED(Locks::mutator_lock_);
343 mirror::Throwable* GetPreAllocatedOutOfMemoryErrorWhenHandlingStackOverflow()
344 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes225f5a12012-06-11 11:23:48 -0700345
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700346 mirror::Throwable* GetPreAllocatedNoClassDefFoundError()
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700347 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers63557452014-06-04 16:57:15 -0700348
Elliott Hughes225f5a12012-06-11 11:23:48 -0700349 const std::vector<std::string>& GetProperties() const {
350 return properties_;
351 }
352
Elliott Hughesd92bec42011-09-02 17:04:36 -0700353 ThreadList* GetThreadList() const {
354 return thread_list_;
355 }
356
Brian Carlstrom491ca9e2014-03-02 18:24:38 -0800357 static const char* GetVersion() {
Andreas Gampe2153f932014-06-26 08:09:17 -0700358 return "2.1.0";
Elliott Hughes7ede61e2011-09-14 18:18:06 -0700359 }
360
Narayan Kamath25352fc2016-08-03 12:46:58 +0100361 bool IsMethodHandlesEnabled() const {
Narayan Kamath93206752017-01-17 13:20:55 +0000362 return true;
Narayan Kamath25352fc2016-08-03 12:46:58 +0100363 }
364
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700365 void DisallowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
366 void AllowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi30493242016-11-03 13:06:52 -0700367 // broadcast_for_checkpoint is true when we broadcast for making blocking threads to respond to
368 // checkpoint requests. It's false when we broadcast to unblock blocking threads after system weak
369 // access is reenabled.
370 void BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint = false);
Mathieu Chartierc11d9b82013-09-19 10:01:59 -0700371
Ian Rogers1d54e732013-05-02 21:10:01 -0700372 // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
373 // clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700374 void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots)
Andreas Gamped98b4ed2016-11-04 20:27:24 -0700375 REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700376 REQUIRES_SHARED(Locks::mutator_lock_);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700377
Mathieu Chartier461687d2015-03-31 12:05:24 -0700378 // Visit image roots, only used for hprof since the GC uses the image space mod union table
379 // instead.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700380 void VisitImageRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier461687d2015-03-31 12:05:24 -0700381
Roland Levillainef012222017-06-21 16:28:06 +0100382 // Visit all of the roots we can safely visit concurrently.
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700383 void VisitConcurrentRoots(RootVisitor* visitor,
384 VisitRootFlags flags = kVisitRootFlagAllRoots)
Andreas Gamped98b4ed2016-11-04 20:27:24 -0700385 REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700386 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700387
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700388 // Visit all of the non thread roots, we can do this with mutators unpaused.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700389 void VisitNonThreadRoots(RootVisitor* visitor)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700390 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700391
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700392 void VisitTransactionRoots(RootVisitor* visitor)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700393 REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800394
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700395 // Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700396 // system weak is updated to be the visitor's returned value.
Mathieu Chartier97509952015-07-13 14:35:43 -0700397 void SweepSystemWeaks(IsMarkedVisitor* visitor)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700398 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700399
Alex Lightc18eba32019-09-24 14:36:27 -0700400 // Walk all reflective objects and visit their targets as well as any method/fields held by the
401 // runtime threads that are marked as being reflective.
402 void VisitReflectiveTargets(ReflectiveValueVisitor* visitor) REQUIRES(Locks::mutator_lock_);
403 // Helper for visiting reflective targets with lambdas for both field and method reflective
404 // targets.
405 template <typename FieldVis, typename MethodVis>
406 void VisitReflectiveTargets(FieldVis&& fv, MethodVis&& mv) REQUIRES(Locks::mutator_lock_) {
407 FunctionReflectiveValueVisitor frvv(fv, mv);
408 VisitReflectiveTargets(&frvv);
409 }
410
Ian Rogers9af209c2012-06-03 20:50:30 -0700411 // Returns a special method that calls into a trampoline for runtime method resolution
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800412 ArtMethod* GetResolutionMethod();
Ian Rogers9af209c2012-06-03 20:50:30 -0700413
414 bool HasResolutionMethod() const {
Mathieu Chartiere401d142015-04-22 13:56:20 -0700415 return resolution_method_ != nullptr;
Ian Rogers9af209c2012-06-03 20:50:30 -0700416 }
417
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700418 void SetResolutionMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
Igor Murashkin8275fba2017-05-02 15:58:02 -0700419 void ClearResolutionMethod() {
420 resolution_method_ = nullptr;
421 }
Ian Rogers9af209c2012-06-03 20:50:30 -0700422
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700423 ArtMethod* CreateResolutionMethod() REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers19846512012-02-24 11:42:47 -0800424
Ian Rogerse63db272014-07-15 15:36:11 -0700425 // Returns a special method that calls into a trampoline for runtime imt conflicts.
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800426 ArtMethod* GetImtConflictMethod();
427 ArtMethod* GetImtUnimplementedMethod();
Jeff Hao88474b42013-10-23 16:24:40 -0700428
429 bool HasImtConflictMethod() const {
Mathieu Chartiere401d142015-04-22 13:56:20 -0700430 return imt_conflict_method_ != nullptr;
Jeff Hao88474b42013-10-23 16:24:40 -0700431 }
432
Igor Murashkin8275fba2017-05-02 15:58:02 -0700433 void ClearImtConflictMethod() {
434 imt_conflict_method_ = nullptr;
435 }
436
Nicolas Geoffrayf05f04b2019-10-31 11:50:41 +0000437 void FixupConflictTables() REQUIRES_SHARED(Locks::mutator_lock_);
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700438 void SetImtConflictMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
439 void SetImtUnimplementedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
Jeff Hao88474b42013-10-23 16:24:40 -0700440
Nicolas Geoffray796d6302016-03-13 22:22:31 +0000441 ArtMethod* CreateImtConflictMethod(LinearAlloc* linear_alloc)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700442 REQUIRES_SHARED(Locks::mutator_lock_);
Jeff Hao88474b42013-10-23 16:24:40 -0700443
Igor Murashkin8275fba2017-05-02 15:58:02 -0700444 void ClearImtUnimplementedMethod() {
445 imt_unimplemented_method_ = nullptr;
446 }
447
Ian Rogers9af209c2012-06-03 20:50:30 -0700448 bool HasCalleeSaveMethod(CalleeSaveType type) const {
Andreas Gampe8228cdf2017-05-30 15:03:54 -0700449 return callee_save_methods_[static_cast<size_t>(type)] != 0u;
Ian Rogers9af209c2012-06-03 20:50:30 -0700450 }
451
Mathieu Chartiere401d142015-04-22 13:56:20 -0700452 ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700453 REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchiab088112014-07-14 13:00:14 -0700454
Mathieu Chartiere401d142015-04-22 13:56:20 -0700455 ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700456 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers9af209c2012-06-03 20:50:30 -0700457
Mathieu Chartiere401d142015-04-22 13:56:20 -0700458 QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700459 REQUIRES_SHARED(Locks::mutator_lock_);
Vladimir Marko7624d252014-05-02 14:40:15 +0100460
David Srbecky56de89a2018-10-01 15:32:20 +0100461 static constexpr size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
Andreas Gampe8228cdf2017-05-30 15:03:54 -0700462 return OFFSETOF_MEMBER(Runtime, callee_save_methods_[static_cast<size_t>(type)]);
Ian Rogers936b37f2014-02-14 00:52:24 -0800463 }
464
Vladimir Marko7624d252014-05-02 14:40:15 +0100465 InstructionSet GetInstructionSet() const {
466 return instruction_set_;
467 }
468
469 void SetInstructionSet(InstructionSet instruction_set);
Igor Murashkin8275fba2017-05-02 15:58:02 -0700470 void ClearInstructionSet();
Vladimir Marko7624d252014-05-02 14:40:15 +0100471
Mathieu Chartiere401d142015-04-22 13:56:20 -0700472 void SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type);
Igor Murashkin8275fba2017-05-02 15:58:02 -0700473 void ClearCalleeSaveMethods();
Ian Rogers4f0d07c2011-10-06 23:38:47 -0700474
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700475 ArtMethod* CreateCalleeSaveMethod() REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersff1ed472011-09-20 13:46:24 -0700476
Andreas Gampe7ee607e2019-07-23 12:22:04 -0700477 uint64_t GetStat(int kind);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700478
Ian Rogers9af209c2012-06-03 20:50:30 -0700479 RuntimeStats* GetStats() {
480 return &stats_;
481 }
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700482
483 bool HasStatsEnabled() const {
484 return stats_enabled_;
485 }
486
487 void ResetStats(int kinds);
488
Mathieu Chartier90443472015-07-16 20:32:27 -0700489 void SetStatsEnabled(bool new_state)
490 REQUIRES(!Locks::instrument_entrypoints_lock_, !Locks::mutator_lock_);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700491
Andreas Gampe6be67ee2014-09-02 21:22:18 -0700492 enum class NativeBridgeAction { // private
493 kUnload,
494 kInitialize
495 };
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800496
Nicolas Geoffray433b79a2017-01-30 20:54:45 +0000497 jit::Jit* GetJit() const {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800498 return jit_.get();
499 }
Calin Juravleffc87072016-04-20 14:22:09 +0100500
David Srbecky9ac8e432019-08-13 13:16:13 +0100501 jit::JitCodeCache* GetJitCodeCache() const {
502 return jit_code_cache_.get();
503 }
504
Calin Juravleffc87072016-04-20 14:22:09 +0100505 // Returns true if JIT compilations are enabled. GetJit() will be not null in this case.
506 bool UseJitCompilation() const;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800507
Narayan Kamath3de95a72014-04-02 12:54:23 +0100508 void PreZygoteFork();
Nicolas Geoffrayce9ed362018-11-29 03:19:28 +0000509 void PostZygoteFork();
Nicolas Geoffrayd66c8622015-12-11 14:59:16 +0000510 void InitNonZygoteOrPostFork(
Calin Juravle016fcbe22018-05-03 19:47:35 -0700511 JNIEnv* env,
512 bool is_system_server,
Lev Rumyantsev87f36302019-12-13 15:49:37 -0800513 bool is_child_zygote,
Calin Juravle016fcbe22018-05-03 19:47:35 -0700514 NativeBridgeAction action,
515 const char* isa,
516 bool profile_system_server = false);
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700517
Ian Rogers9bc54402014-04-17 16:40:01 -0700518 const instrumentation::Instrumentation* GetInstrumentation() const {
519 return &instrumentation_;
520 }
521
Ian Rogers62d6c772013-02-27 08:32:07 -0800522 instrumentation::Instrumentation* GetInstrumentation() {
523 return &instrumentation_;
524 }
jeffhao2692b572011-12-16 15:42:28 -0800525
Calin Juravle66f55232015-12-08 15:09:10 +0000526 void RegisterAppInfo(const std::vector<std::string>& code_paths,
Calin Juravle77651c42017-03-03 18:04:02 -0800527 const std::string& profile_output_filename);
Dave Allison0aded082013-11-07 13:15:11 -0800528
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100529 // Transaction support.
Chang Xing605fe242017-07-20 15:57:21 -0700530 bool IsActiveTransaction() const;
Chang Xing5a906fc2017-07-26 15:01:16 -0700531 void EnterTransactionMode(bool strict, mirror::Class* root);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100532 void ExitTransactionMode();
Chang Xing605fe242017-07-20 15:57:21 -0700533 void RollbackAllTransactions() REQUIRES_SHARED(Locks::mutator_lock_);
Chang Xing16d1dd82017-07-20 17:56:26 -0700534 // Transaction rollback and exit transaction are always done together, it's convenience to
535 // do them in one function.
536 void RollbackAndExitTransactionMode() REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertz1c80bec2015-02-03 11:58:06 +0100537 bool IsTransactionAborted() const;
Chang Xing605fe242017-07-20 15:57:21 -0700538 const std::unique_ptr<Transaction>& GetTransaction() const;
Chang Xing5a906fc2017-07-26 15:01:16 -0700539 bool IsActiveStrictTransactionMode() const;
Sebastien Hertz1c80bec2015-02-03 11:58:06 +0100540
Sebastien Hertz2fd7e692015-04-02 11:11:19 +0200541 void AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700542 REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertz2fd7e692015-04-02 11:11:19 +0200543 void ThrowTransactionAbortError(Thread* self)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700544 REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertz1c80bec2015-02-03 11:58:06 +0100545
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700546 void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
Fred Shih37f05ef2014-07-16 18:38:08 -0700547 bool is_volatile) const;
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700548 void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value,
Fred Shih37f05ef2014-07-16 18:38:08 -0700549 bool is_volatile) const;
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700550 void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value,
Fred Shih37f05ef2014-07-16 18:38:08 -0700551 bool is_volatile) const;
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700552 void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value,
Fred Shih37f05ef2014-07-16 18:38:08 -0700553 bool is_volatile) const;
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700554 void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100555 bool is_volatile) const;
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700556 void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100557 bool is_volatile) const;
Mathieu Chartiera058fdf2016-10-06 15:13:58 -0700558 void RecordWriteFieldReference(mirror::Object* obj,
559 MemberOffset field_offset,
560 ObjPtr<mirror::Object> value,
561 bool is_volatile) const
562 REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100563 void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700564 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier9e868092016-10-31 14:58:04 -0700565 void RecordStrongStringInsertion(ObjPtr<mirror::String> s) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700566 REQUIRES(Locks::intern_table_lock_);
Mathieu Chartier9e868092016-10-31 14:58:04 -0700567 void RecordWeakStringInsertion(ObjPtr<mirror::String> s) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700568 REQUIRES(Locks::intern_table_lock_);
Mathieu Chartier9e868092016-10-31 14:58:04 -0700569 void RecordStrongStringRemoval(ObjPtr<mirror::String> s) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700570 REQUIRES(Locks::intern_table_lock_);
Mathieu Chartier9e868092016-10-31 14:58:04 -0700571 void RecordWeakStringRemoval(ObjPtr<mirror::String> s) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700572 REQUIRES(Locks::intern_table_lock_);
Andreas Gampe8a0128a2016-11-28 07:38:35 -0800573 void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx) const
Mathieu Chartierbb816d62016-09-07 10:17:46 -0700574 REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100575
Andreas Gampe44f67602018-11-28 08:27:27 -0800576 void SetFaultMessage(const std::string& message);
Mathieu Chartier15d34022014-02-26 17:16:38 -0800577
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700578 void AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* arg_vector) const;
Ian Rogers8afeb852014-04-02 14:55:49 -0700579
Dave Allisonb373e092014-02-20 16:06:36 -0800580 bool ExplicitStackOverflowChecks() const {
Andreas Gampe928f72b2014-09-09 19:53:48 -0700581 return !implicit_so_checks_;
Dave Allisonb373e092014-02-20 16:06:36 -0800582 }
583
Nicolas Geoffray68bf3902017-09-07 14:40:48 +0100584 void DisableVerifier();
Igor Murashkin7617abd2015-07-10 18:27:47 -0700585 bool IsVerificationEnabled() const;
586 bool IsVerificationSoftFail() const;
Jeff Hao4a200f52014-04-01 14:58:49 -0700587
Mathew Inwooda5dc52c2018-02-19 15:30:51 +0000588 void SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy) {
589 hidden_api_policy_ = policy;
David Brazdil3e0fa0a2018-01-15 18:41:44 +0000590 }
591
Mathew Inwooda5dc52c2018-02-19 15:30:51 +0000592 hiddenapi::EnforcementPolicy GetHiddenApiEnforcementPolicy() const {
593 return hidden_api_policy_;
David Brazdil3e0fa0a2018-01-15 18:41:44 +0000594 }
595
David Brazdile7681822018-12-14 16:25:33 +0000596 void SetCorePlatformApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy) {
597 core_platform_api_policy_ = policy;
598 }
599
600 hiddenapi::EnforcementPolicy GetCorePlatformApiEnforcementPolicy() const {
601 return core_platform_api_policy_;
602 }
603
Artur Satayevacdb9a32019-10-28 18:09:53 +0000604 void SetTestApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy) {
605 test_api_policy_ = policy;
606 }
607
608 hiddenapi::EnforcementPolicy GetTestApiEnforcementPolicy() const {
609 return test_api_policy_;
610 }
611
Mathew Inwood3383aa52018-03-16 14:18:33 +0000612 void SetHiddenApiExemptions(const std::vector<std::string>& exemptions) {
613 hidden_api_exemptions_ = exemptions;
614 }
615
616 const std::vector<std::string>& GetHiddenApiExemptions() {
617 return hidden_api_exemptions_;
618 }
619
David Brazdilee7d2fd2018-01-20 17:25:23 +0000620 void SetDedupeHiddenApiWarnings(bool value) {
621 dedupe_hidden_api_warnings_ = value;
622 }
623
624 bool ShouldDedupeHiddenApiWarnings() {
625 return dedupe_hidden_api_warnings_;
626 }
627
Mathew Inwood73ddda42018-04-03 15:32:32 +0100628 void SetHiddenApiEventLogSampleRate(uint32_t rate) {
629 hidden_api_access_event_log_rate_ = rate;
630 }
631
632 uint32_t GetHiddenApiEventLogSampleRate() const {
633 return hidden_api_access_event_log_rate_;
634 }
635
Mathew Inwood5bcef172018-05-01 14:40:12 +0100636 const std::string& GetProcessPackageName() const {
637 return process_package_name_;
638 }
639
640 void SetProcessPackageName(const char* package_name) {
641 if (package_name == nullptr) {
642 process_package_name_.clear();
643 } else {
644 process_package_name_ = package_name;
645 }
646 }
647
David Brazdil35a3f6a2019-03-04 15:59:06 +0000648 const std::string& GetProcessDataDirectory() const {
649 return process_data_directory_;
650 }
651
652 void SetProcessDataDirectory(const char* data_dir) {
653 if (data_dir == nullptr) {
654 process_data_directory_.clear();
655 } else {
656 process_data_directory_ = data_dir;
657 }
658 }
659
Dmitriy Ivanov785049f2014-07-18 10:08:57 -0700660 const std::vector<std::string>& GetCpuAbilist() const {
661 return cpu_abilist_;
662 }
663
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700664 bool IsRunningOnMemoryTool() const {
665 return is_running_on_memory_tool_;
Mathieu Chartierda44d772014-04-01 15:01:46 -0700666 }
667
David Brazdil2bb2fbd2018-11-13 18:24:26 +0000668 void SetTargetSdkVersion(uint32_t version) {
Jeff Haof00571c2014-05-29 17:29:47 -0700669 target_sdk_version_ = version;
670 }
671
David Brazdil2bb2fbd2018-11-13 18:24:26 +0000672 uint32_t GetTargetSdkVersion() const {
Jeff Haof00571c2014-05-29 17:29:47 -0700673 return target_sdk_version_;
674 }
675
Andrei Onea037d2822020-11-19 00:20:04 +0000676 CompatFramework& GetCompatFramework() {
677 return compat_framework_;
atrostfab72082019-12-06 13:37:36 +0000678 }
679
Narayan Kamath5a2be3f2015-02-16 13:51:51 +0000680 uint32_t GetZygoteMaxFailedBoots() const {
681 return zygote_max_failed_boots_;
682 }
683
Alex Lighteb7c1442015-08-31 13:17:42 -0700684 bool AreExperimentalFlagsEnabled(ExperimentalFlags flags) {
685 return (experimental_flags_ & flags) != ExperimentalFlags::kNone;
Igor Murashkin158f35c2015-06-10 15:55:30 -0700686 }
687
Orion Hodsonad28f5e2018-10-17 09:08:17 +0100688 void CreateJitCodeCache(bool rwx_memory_allowed);
689
Mathieu Chartier455f67c2015-03-17 13:48:29 -0700690 // Create the JIT and instrumentation and code cache.
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800691 void CreateJit();
692
Mathieu Chartier9b34b242015-03-09 11:30:17 -0700693 ArenaPool* GetArenaPool() {
694 return arena_pool_.get();
695 }
Nicolas Geoffray25e04562016-03-01 13:17:58 +0000696 ArenaPool* GetJitArenaPool() {
697 return jit_arena_pool_.get();
698 }
Mathieu Chartier9b34b242015-03-09 11:30:17 -0700699 const ArenaPool* GetArenaPool() const {
700 return arena_pool_.get();
701 }
Jean-Philippe Halimica76a1a2016-02-02 19:48:52 +0100702
703 void ReclaimArenaPoolMemory();
704
Mathieu Chartierc7853442015-03-27 14:35:38 -0700705 LinearAlloc* GetLinearAlloc() {
706 return linear_alloc_.get();
707 }
Mathieu Chartier9b34b242015-03-09 11:30:17 -0700708
Mathieu Chartier455f67c2015-03-17 13:48:29 -0700709 jit::JitOptions* GetJITOptions() {
710 return jit_options_.get();
711 }
712
Nicolas Geoffray433b79a2017-01-30 20:54:45 +0000713 bool IsJavaDebuggable() const {
714 return is_java_debuggable_;
Alex Light6b16d892016-11-11 11:21:04 -0800715 }
716
Florian Mayer07710c52019-09-16 15:53:38 +0000717 void SetProfileableFromShell(bool value) {
718 is_profileable_from_shell_ = value;
719 }
720
721 bool IsProfileableFromShell() const {
722 return is_profileable_from_shell_;
723 }
724
Nicolas Geoffray433b79a2017-01-30 20:54:45 +0000725 void SetJavaDebuggable(bool value);
726
727 // Deoptimize the boot image, called for Java debuggable apps.
Nicolas Geoffray226805d2018-12-14 10:59:02 +0000728 void DeoptimizeBootImage() REQUIRES(Locks::mutator_lock_);
Alex Light6b16d892016-11-11 11:21:04 -0800729
David Srbeckyf4480162016-03-16 00:06:24 +0000730 bool IsNativeDebuggable() const {
731 return is_native_debuggable_;
732 }
733
734 void SetNativeDebuggable(bool value) {
735 is_native_debuggable_ = value;
736 }
737
randy.jeong5bef0222019-05-27 10:29:09 +0900738 void SetSignalHookDebuggable(bool value);
739
Alex Light0aa7a5a2018-10-10 15:58:14 +0000740 bool AreNonStandardExitsEnabled() const {
741 return non_standard_exits_enabled_;
742 }
743
744 void SetNonStandardExitsEnabled() {
David Srbecky28f6cff2018-10-16 15:07:28 +0100745 DoAndMaybeSwitchInterpreter([=](){ non_standard_exits_enabled_ = true; });
Alex Light0aa7a5a2018-10-10 15:58:14 +0000746 }
747
Alex Light7919db92017-11-29 09:00:55 -0800748 bool AreAsyncExceptionsThrown() const {
749 return async_exceptions_thrown_;
750 }
751
752 void SetAsyncExceptionsThrown() {
David Srbecky28f6cff2018-10-16 15:07:28 +0100753 DoAndMaybeSwitchInterpreter([=](){ async_exceptions_thrown_ = true; });
Alex Light7919db92017-11-29 09:00:55 -0800754 }
755
David Srbecky28f6cff2018-10-16 15:07:28 +0100756 // Change state and re-check which interpreter should be used.
757 //
758 // This must be called whenever there is an event that forces
759 // us to use different interpreter (e.g. debugger is attached).
760 //
761 // Changing the state using the lamda gives us some multihreading safety.
762 // It ensures that two calls do not interfere with each other and
763 // it makes it possible to DCHECK that thread local flag is correct.
764 template<typename Action>
David Srbeckyd3883902019-02-26 17:29:32 +0000765 static void DoAndMaybeSwitchInterpreter(Action lamda);
David Srbecky28f6cff2018-10-16 15:07:28 +0100766
Andreas Gampedd671252015-07-23 14:37:18 -0700767 // Returns the build fingerprint, if set. Otherwise an empty string is returned.
768 std::string GetFingerprint() {
769 return fingerprint_;
770 }
771
Mathieu Chartier673ed3d2015-08-28 14:56:43 -0700772 // Called from class linker.
Vladimir Marko024d69f2019-06-13 10:52:32 +0100773 void SetSentinel(ObjPtr<mirror::Object> sentinel) REQUIRES_SHARED(Locks::mutator_lock_);
Roland Levillain0e155592018-11-05 18:31:49 +0000774 // For testing purpose only.
775 // TODO: Remove this when this is no longer needed (b/116087961).
776 GcRoot<mirror::Object> GetSentinel() REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier673ed3d2015-08-28 14:56:43 -0700777
Nicolas Geoffraye3f775b2019-12-04 14:41:52 +0000778
779 // Use a sentinel for marking entries in a table that have been cleared.
780 // This helps diagnosing in case code tries to wrongly access such
781 // entries.
782 static mirror::Class* GetWeakClassSentinel() {
783 return reinterpret_cast<mirror::Class*>(0xebadbeef);
784 }
785
786 // Helper for the GC to process a weak class in a table.
787 static void ProcessWeakClass(GcRoot<mirror::Class>* root_ptr,
788 IsMarkedVisitor* visitor,
789 mirror::Class* update)
790 REQUIRES_SHARED(Locks::mutator_lock_);
791
Mathieu Chartier951ec2c2015-09-22 08:50:05 -0700792 // Create a normal LinearAlloc or low 4gb version if we are 64 bit AOT compiler.
793 LinearAlloc* CreateLinearAlloc();
794
Mathieu Chartierf9c6fc62015-10-07 11:44:05 -0700795 OatFileManager& GetOatFileManager() const {
796 DCHECK(oat_file_manager_ != nullptr);
Mathieu Chartiere58991b2015-10-13 07:59:34 -0700797 return *oat_file_manager_;
Mathieu Chartierf9c6fc62015-10-07 11:44:05 -0700798 }
799
Mathieu Chartier32cc9ee2015-10-15 09:19:15 -0700800 double GetHashTableMinLoadFactor() const;
801 double GetHashTableMaxLoadFactor() const;
802
Andreas Gampe83e20492018-11-07 11:12:26 -0800803 bool IsSafeMode() const {
Andreas Gampe83e20492018-11-07 11:12:26 -0800804 return safe_mode_;
805 }
806
Nicolas Geoffray787ae8e2015-11-05 11:32:24 +0000807 void SetSafeMode(bool mode) {
808 safe_mode_ = mode;
809 }
810
Nicolas Geoffraya73280d2016-02-15 13:05:16 +0000811 bool GetDumpNativeStackOnSigQuit() const {
812 return dump_native_stack_on_sig_quit_;
813 }
814
Andreas Gampea1425a12016-03-11 17:44:04 -0800815 bool GetPrunedDalvikCache() const {
816 return pruned_dalvik_cache_;
817 }
818
819 void SetPrunedDalvikCache(bool pruned) {
820 pruned_dalvik_cache_ = pruned;
821 }
822
Mathieu Chartierf8cb1782016-03-18 18:45:41 -0700823 void UpdateProcessState(ProcessState process_state);
824
825 // Returns true if we currently care about long mutator pause.
826 bool InJankPerceptibleProcessState() const {
827 return process_state_ == kProcessStateJankPerceptible;
828 }
829
Calin Juravle97cbc922016-04-15 16:16:35 +0100830 void RegisterSensitiveThread() const;
831
Andreas Gampef38a6612016-04-11 08:42:26 -0700832 void SetZygoteNoThreadSection(bool val) {
833 zygote_no_threads_ = val;
834 }
835
836 bool IsZygoteNoThreadSection() const {
837 return zygote_no_threads_;
838 }
839
Nicolas Geoffray433b79a2017-01-30 20:54:45 +0000840 // Returns if the code can be deoptimized asynchronously. Code may be compiled with some
Mingyao Yangf711f2c2016-05-23 12:29:39 -0700841 // optimization that makes it impossible to deoptimize.
Nicolas Geoffray433b79a2017-01-30 20:54:45 +0000842 bool IsAsyncDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_);
Mingyao Yangf711f2c2016-05-23 12:29:39 -0700843
David Sehrd106d9f2016-08-16 19:22:57 -0700844 // Returns a saved copy of the environment (getenv/setenv values).
845 // Used by Fork to protect against overwriting LD_LIBRARY_PATH, etc.
846 char** GetEnvSnapshot() const {
847 return env_snapshot_.GetSnapshot();
848 }
849
Andreas Gampefda57142016-09-08 20:29:18 -0700850 void AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
851 void RemoveSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
852
Alex Lightf889c702018-02-23 15:25:45 -0800853 void AttachAgent(JNIEnv* env, const std::string& agent_arg, jobject class_loader);
Leonard Mosescueb842212016-10-06 17:26:36 -0700854
Andreas Gampeaadcbc62017-12-28 14:05:42 -0800855 const std::list<std::unique_ptr<ti::Agent>>& GetAgents() const {
Alex Light65af20b2017-04-20 09:15:08 -0700856 return agents_;
857 }
858
Andreas Gampeac30fa22017-01-18 21:02:36 -0800859 RuntimeCallbacks* GetRuntimeCallbacks();
Andreas Gampe04bbb5b2017-01-19 17:49:03 +0000860
Alex Light77fee872017-09-05 14:51:49 -0700861 bool HasLoadedPlugins() const {
862 return !plugins_.empty();
863 }
864
Andreas Gampebad529d2017-02-13 18:52:10 -0800865 void InitThreadGroups(Thread* self);
866
Mathieu Chartier1d495012017-04-11 17:50:00 -0700867 void SetDumpGCPerformanceOnShutdown(bool value) {
868 dump_gc_performance_on_shutdown_ = value;
869 }
870
Lokesh Gidra10d0c962019-03-07 22:40:36 +0000871 bool GetDumpGCPerformanceOnShutdown() const {
872 return dump_gc_performance_on_shutdown_;
873 }
874
Nicolas Geoffray4e92c3c2017-05-08 09:34:26 +0100875 void IncrementDeoptimizationCount(DeoptimizationKind kind) {
876 DCHECK_LE(kind, DeoptimizationKind::kLast);
877 deoptimization_counts_[static_cast<size_t>(kind)]++;
878 }
879
Nicolas Geoffrayb9bec2e2017-05-24 15:59:18 +0100880 uint32_t GetNumberOfDeoptimizations() const {
881 uint32_t result = 0;
882 for (size_t i = 0; i <= static_cast<size_t>(DeoptimizationKind::kLast); ++i) {
883 result += deoptimization_counts_[i];
884 }
885 return result;
886 }
887
Mathieu Chartierc42cb0e2017-10-13 11:35:00 -0700888 // Whether or not we use MADV_RANDOM on files that are thought to have random access patterns.
889 // This is beneficial for low RAM devices since it reduces page cache thrashing.
890 bool MAdviseRandomAccess() const {
891 return madvise_random_access_;
892 }
893
Alex Light40320712017-12-14 11:52:04 -0800894 const std::string& GetJdwpOptions() {
895 return jdwp_options_;
896 }
897
898 JdwpProvider GetJdwpProvider() const {
899 return jdwp_provider_;
900 }
901
Alex Lightf3677472019-06-26 16:31:53 -0700902 JniIdType GetJniIdType() const {
903 return jni_ids_indirection_;
Alex Light79d6c802019-06-27 15:50:11 +0000904 }
905
Alex Lightf3677472019-06-26 16:31:53 -0700906 bool CanSetJniIdType() const {
907 return GetJniIdType() == JniIdType::kSwapablePointer;
908 }
909
910 // Changes the JniIdType to the given type. Only allowed if CanSetJniIdType(). All threads must be
911 // suspended to call this function.
912 void SetJniIdType(JniIdType t);
913
Andreas Gampe0b0ffc12018-08-01 14:41:27 -0700914 uint32_t GetVerifierLoggingThresholdMs() const {
915 return verifier_logging_threshold_ms_;
916 }
917
Mathieu Chartierada33d72018-12-17 13:17:30 -0800918 // Atomically delete the thread pool if the reference count is 0.
919 bool DeleteThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
920
921 // Wait for all the thread workers to be attached.
922 void WaitForThreadPoolWorkersToStart() REQUIRES(!Locks::runtime_thread_pool_lock_);
923
924 // Scoped usage of the runtime thread pool. Prevents the pool from being
925 // deleted. Note that the thread pool is only for startup and gets deleted after.
926 class ScopedThreadPoolUsage {
927 public:
928 ScopedThreadPoolUsage();
929 ~ScopedThreadPoolUsage();
930
931 // Return the thread pool.
932 ThreadPool* GetThreadPool() const {
933 return thread_pool_;
934 }
935
936 private:
937 ThreadPool* const thread_pool_;
938 };
939
Mathieu Chartiera88abfa2019-02-04 11:08:29 -0800940 bool LoadAppImageStartupCache() const {
941 return load_app_image_startup_cache_;
942 }
943
944 void SetLoadAppImageStartupCacheEnabled(bool enabled) {
945 load_app_image_startup_cache_ = enabled;
946 }
947
Mathieu Chartier03625442019-06-24 17:29:23 -0700948 // Reset the startup completed status so that we can call NotifyStartupCompleted again. Should
949 // only be used for testing.
950 void ResetStartupCompleted();
951
Mathieu Chartier175ce3d2019-03-06 16:54:24 -0800952 // Notify the runtime that application startup is considered completed. Only has effect for the
953 // first call.
954 void NotifyStartupCompleted();
955
956 // Return true if startup is already completed.
957 bool GetStartupCompleted() const;
958
Andreas Gamped84794d2019-07-18 13:40:03 -0700959 bool IsVerifierMissingKThrowFatal() const {
960 return verifier_missing_kthrow_fatal_;
961 }
962
Florian Mayer0972d082020-05-15 14:07:31 +0200963 bool IsPerfettoHprofEnabled() const {
964 return perfetto_hprof_enabled_;
965 }
966
Mathieu Chartierccc75192019-10-28 13:34:03 -0700967 // Return true if we should load oat files as executable or not.
968 bool GetOatFilesExecutable() const;
969
Eric Holka79872b2020-10-01 13:09:53 -0700970 metrics::ArtMetrics* GetMetrics() { return &metrics_; }
971
Carl Shapirob5573532011-07-12 18:22:59 -0700972 private:
Elliott Hughes457005c2012-04-16 13:54:25 -0700973 static void InitPlatformSignalHandlers();
Elliott Hughesffe67362011-07-17 12:09:27 -0700974
Elliott Hughesdcc24742011-09-07 14:02:44 -0700975 Runtime();
Carl Shapiro61e019d2011-07-14 16:53:09 -0700976
Elliott Hughesc1674ed2011-08-25 18:09:09 -0700977 void BlockSignals();
978
Vladimir Marko88b2b802015-12-04 14:19:04 +0000979 bool Init(RuntimeArgumentMap&& runtime_options)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700980 SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
Mathieu Chartier90443472015-07-16 20:32:27 -0700981 void InitNativeMethods() REQUIRES(!Locks::mutator_lock_);
Elliott Hughesff17f1f2012-01-24 18:12:29 -0800982 void RegisterRuntimeNativeMethods(JNIEnv* env);
Elliott Hughesd1cc8362011-10-24 16:58:50 -0700983
Elliott Hughes85d15452011-09-16 17:33:01 -0700984 void StartDaemonThreads();
Elliott Hughesd1cc8362011-10-24 16:58:50 -0700985 void StartSignalCatcher();
Carl Shapiro61e019d2011-07-14 16:53:09 -0700986
Calin Juravle31f2c152015-10-23 17:56:15 +0100987 void MaybeSaveJitProfilingInfo();
988
Andreas Gampe585da952016-12-02 14:52:29 -0800989 // Visit all of the thread roots.
990 void VisitThreadRoots(RootVisitor* visitor, VisitRootFlags flags)
991 REQUIRES_SHARED(Locks::mutator_lock_);
992
993 // Visit all other roots which must be done with mutators suspended.
994 void VisitNonConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags)
995 REQUIRES_SHARED(Locks::mutator_lock_);
996
997 // Constant roots are the roots which never change after the runtime is initialized, they only
998 // need to be visited once per GC cycle.
999 void VisitConstantRoots(RootVisitor* visitor)
1000 REQUIRES_SHARED(Locks::mutator_lock_);
1001
Andreas Gampe44f67602018-11-28 08:27:27 -08001002 // Note: To be lock-free, GetFaultMessage temporarily replaces the lock message with null.
1003 // As such, there is a window where a call will return an empty string. In general,
1004 // only aborting code should retrieve this data (via GetFaultMessageForAbortLogging
1005 // friend).
1006 std::string GetFaultMessage();
1007
Mathieu Chartierada33d72018-12-17 13:17:30 -08001008 ThreadPool* AcquireThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
1009 void ReleaseThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
1010
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001011 // A pointer to the active runtime or null.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001012 static Runtime* instance_;
1013
Ian Rogers8afeb852014-04-02 14:55:49 -07001014 // NOTE: these must match the gc::ProcessState values as they come directly from the framework.
1015 static constexpr int kProfileForground = 0;
Calin Juravle31f2c152015-10-23 17:56:15 +01001016 static constexpr int kProfileBackground = 1;
Ian Rogers8afeb852014-04-02 14:55:49 -07001017
Mingyao Yang0a87a652017-04-12 13:43:15 -07001018 static constexpr uint32_t kCalleeSaveSize = 6u;
Andreas Gampe8228cdf2017-05-30 15:03:54 -07001019
Mathieu Chartiere401d142015-04-22 13:56:20 -07001020 // 64 bit so that we can share the same asm offsets for both 32 and 64 bits.
Andreas Gampe8228cdf2017-05-30 15:03:54 -07001021 uint64_t callee_save_methods_[kCalleeSaveSize];
Roland Levillain7b0e8442018-04-11 18:27:47 +01001022 // Pre-allocated exceptions (see Runtime::Init).
1023 GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_when_throwing_exception_;
1024 GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_when_throwing_oome_;
1025 GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_when_handling_stack_overflow_;
Ian Rogers63557452014-06-04 16:57:15 -07001026 GcRoot<mirror::Throwable> pre_allocated_NoClassDefFoundError_;
Mathieu Chartiere401d142015-04-22 13:56:20 -07001027 ArtMethod* resolution_method_;
1028 ArtMethod* imt_conflict_method_;
Mathieu Chartier2d2621a2014-10-23 16:48:06 -07001029 // Unresolved method has the same behavior as the conflict method, it is used by the class linker
1030 // for differentiating between unfilled imt slots vs conflict slots in superclasses.
Mathieu Chartiere401d142015-04-22 13:56:20 -07001031 ArtMethod* imt_unimplemented_method_;
Ian Rogers6ed19fd2014-03-20 08:10:17 -07001032
Ian Rogersc0542af2014-09-03 16:16:56 -07001033 // Special sentinel object used to invalid conditions in JNI (cleared weak references) and
1034 // JDWP (invalid references).
1035 GcRoot<mirror::Object> sentinel_;
1036
Vladimir Marko7624d252014-05-02 14:40:15 +01001037 InstructionSet instruction_set_;
Vladimir Marko7624d252014-05-02 14:40:15 +01001038
Vladimir Marko2b5eaa22013-12-13 13:59:30 +00001039 CompilerCallbacks* compiler_callbacks_;
Elliott Hughes9ca7a1f2011-10-11 14:29:52 -07001040 bool is_zygote_;
Nicolas Geoffray4444f1b2019-06-10 14:09:19 +01001041 bool is_primary_zygote_;
Nicolas Geoffray9ac09ee2019-05-08 23:38:27 +01001042 bool is_system_server_;
Alex Lighta59dd802014-07-02 16:28:08 -07001043 bool must_relocate_;
Mathieu Chartier069387a2012-06-18 12:01:01 -07001044 bool is_concurrent_gc_enabled_;
Anwar Ghuloum87183592013-08-14 12:12:19 -07001045 bool is_explicit_gc_disabled_;
Alex Light64ad14d2014-08-19 14:23:13 -07001046 bool image_dex2oat_enabled_;
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -07001047
Tsu Chiang Chuang12e6d742014-05-22 10:22:25 -07001048 std::string compiler_executable_;
Brian Carlstrom6449c622014-02-10 23:48:36 -08001049 std::vector<std::string> compiler_options_;
1050 std::vector<std::string> image_compiler_options_;
Brian Carlstrom31d8f522014-09-29 11:22:54 -07001051 std::string image_location_;
Dragos Sbirlea7467ee02013-06-21 09:20:34 -07001052
Vladimir Markod1908512018-11-22 14:57:28 +00001053 std::vector<std::string> boot_class_path_;
1054 std::vector<std::string> boot_class_path_locations_;
Brian Carlstroma004aa92012-02-08 18:05:09 -08001055 std::string class_path_string_;
Elliott Hughes7ede61e2011-09-14 18:18:06 -07001056 std::vector<std::string> properties_;
1057
Andreas Gampeaadcbc62017-12-28 14:05:42 -08001058 std::list<ti::AgentSpec> agent_specs_;
1059 std::list<std::unique_ptr<ti::Agent>> agents_;
Alex Light185d1342016-08-11 10:48:03 -07001060 std::vector<Plugin> plugins_;
Alex Light7233c7e2016-07-28 10:07:45 -07001061
Brian Carlstromb765be02011-08-17 23:54:10 -07001062 // The default stack size for managed threads created by the runtime.
Elliott Hughesbe759c62011-09-08 19:38:21 -07001063 size_t default_stack_size_;
Brian Carlstromb765be02011-08-17 23:54:10 -07001064
Hans Boehmb2155572019-03-27 14:25:53 -07001065 // Finalizers running for longer than this many milliseconds abort the runtime.
1066 unsigned int finalizer_timeout_ms_;
1067
Ian Rogers1d54e732013-05-02 21:10:01 -07001068 gc::Heap* heap_;
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001069
Nicolas Geoffray25e04562016-03-01 13:17:58 +00001070 std::unique_ptr<ArenaPool> jit_arena_pool_;
Mathieu Chartier9b34b242015-03-09 11:30:17 -07001071 std::unique_ptr<ArenaPool> arena_pool_;
Mathieu Chartierc7853442015-03-27 14:35:38 -07001072 // Special low 4gb pool for compiler linear alloc. We need ArtFields to be in low 4gb if we are
1073 // compiling using a 32 bit image on a 64 bit compiler in case we resolve things in the image
1074 // since the field arrays are int arrays in this case.
1075 std::unique_ptr<ArenaPool> low_4gb_arena_pool_;
1076
1077 // Shared linear alloc for now.
1078 std::unique_ptr<LinearAlloc> linear_alloc_;
Mathieu Chartier9b34b242015-03-09 11:30:17 -07001079
Ian Rogersd9c4fc92013-10-01 19:45:43 -07001080 // The number of spins that are done before thread suspension is used to forcibly inflate.
1081 size_t max_spins_before_thin_lock_inflation_;
Elliott Hughesc33a32b2011-10-11 18:18:07 -07001082 MonitorList* monitor_list_;
Ian Rogersef7d42f2014-01-06 12:55:46 -08001083 MonitorPool* monitor_pool_;
Elliott Hughesc33a32b2011-10-11 18:18:07 -07001084
Carl Shapirob5573532011-07-12 18:22:59 -07001085 ThreadList* thread_list_;
Carl Shapiro61e019d2011-07-14 16:53:09 -07001086
Elliott Hughescf4c6c42011-09-01 15:16:42 -07001087 InternTable* intern_table_;
1088
Brian Carlstromb0460ea2011-07-29 10:08:05 -07001089 ClassLinker* class_linker_;
1090
Elliott Hughese27955c2011-08-26 15:21:24 -07001091 SignalCatcher* signal_catcher_;
Narayan Kamatheb710332017-05-10 11:48:46 +01001092
Alex Light79d6c802019-06-27 15:50:11 +00001093 std::unique_ptr<jni::JniIdManager> jni_id_manager_;
1094
Richard Uhlerda0a69e2016-10-11 15:06:38 +01001095 std::unique_ptr<JavaVMExt> java_vm_;
Elliott Hughesf2682d52011-08-15 16:37:04 -07001096
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001097 std::unique_ptr<jit::Jit> jit_;
Orion Hodsonad28f5e2018-10-17 09:08:17 +01001098 std::unique_ptr<jit::JitCodeCache> jit_code_cache_;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001099 std::unique_ptr<jit::JitOptions> jit_options_;
1100
Mathieu Chartierada33d72018-12-17 13:17:30 -08001101 // Runtime thread pool. The pool is only for startup and gets deleted after.
1102 std::unique_ptr<ThreadPool> thread_pool_ GUARDED_BY(Locks::runtime_thread_pool_lock_);
1103 size_t thread_pool_ref_count_ GUARDED_BY(Locks::runtime_thread_pool_lock_);
1104
Andreas Gampe44f67602018-11-28 08:27:27 -08001105 // Fault message, printed when we get a SIGSEGV. Stored as a native-heap object and accessed
1106 // lock-free, so needs to be atomic.
1107 std::atomic<std::string*> fault_message_;
Mathieu Chartier15d34022014-02-26 17:16:38 -08001108
Ian Rogers120f1c72012-09-28 17:17:10 -07001109 // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by
1110 // the shutdown lock so that threads aren't born while we're shutting down.
1111 size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_);
1112
1113 // Waited upon until no threads are being born.
Ian Rogers700a4022014-05-19 16:49:03 -07001114 std::unique_ptr<ConditionVariable> shutdown_cond_ GUARDED_BY(Locks::runtime_shutdown_lock_);
Ian Rogers120f1c72012-09-28 17:17:10 -07001115
1116 // Set when runtime shutdown is past the point that new threads may attach.
1117 bool shutting_down_ GUARDED_BY(Locks::runtime_shutdown_lock_);
1118
1119 // The runtime is starting to shutdown but is blocked waiting on shutdown_cond_.
1120 bool shutting_down_started_ GUARDED_BY(Locks::runtime_shutdown_lock_);
1121
Elliott Hughesdcc24742011-09-07 14:02:44 -07001122 bool started_;
1123
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001124 // New flag added which tells us if the runtime has finished starting. If
1125 // this flag is set then the Daemon threads are created and the class loader
1126 // is created. This flag is needed for knowing if its safe to request CMS.
1127 bool finished_starting_;
1128
Brian Carlstrom6ea095a2011-08-16 15:26:54 -07001129 // Hooks supported by JNI_CreateJavaVM
1130 jint (*vfprintf_)(FILE* stream, const char* format, va_list ap);
1131 void (*exit_)(jint status);
1132 void (*abort_)();
1133
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001134 bool stats_enabled_;
1135 RuntimeStats stats_;
1136
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001137 const bool is_running_on_memory_tool_;
Mathieu Chartierda44d772014-04-01 15:01:46 -07001138
Andreas Gampef6a780a2015-04-02 18:51:05 -07001139 std::unique_ptr<TraceConfig> trace_config_;
1140
Ian Rogers62d6c772013-02-27 08:32:07 -08001141 instrumentation::Instrumentation instrumentation_;
jeffhao2692b572011-12-16 15:42:28 -08001142
Ian Rogers365c1022012-06-22 15:05:28 -07001143 jobject main_thread_group_;
1144 jobject system_thread_group_;
1145
Brian Carlstromce888532013-10-10 00:32:58 -07001146 // As returned by ClassLoader.getSystemClassLoader().
1147 jobject system_class_loader_;
1148
Hiroshi Yamauchi2e899a92013-11-22 16:50:12 -08001149 // If true, then we dump the GC cumulative timings on shutdown.
1150 bool dump_gc_performance_on_shutdown_;
1151
Chang Xing605fe242017-07-20 15:57:21 -07001152 // Transactions used for pre-initializing classes at compilation time.
1153 // Support nested transactions, maintain a list containing all transactions. Transactions are
1154 // handled under a stack discipline. Because GC needs to go over all transactions, we choose list
1155 // as substantial data structure instead of stack.
1156 std::list<std::unique_ptr<Transaction>> preinitialization_transactions_;
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +01001157
Igor Murashkin7617abd2015-07-10 18:27:47 -07001158 // If kNone, verification is disabled. kEnable by default.
1159 verifier::VerifyMode verify_;
Jeff Hao4a200f52014-04-01 14:58:49 -07001160
Dmitriy Ivanov785049f2014-07-18 10:08:57 -07001161 // List of supported cpu abis.
1162 std::vector<std::string> cpu_abilist_;
1163
Jeff Haof00571c2014-05-29 17:29:47 -07001164 // Specifies target SDK version to allow workarounds for certain API levels.
David Brazdil2bb2fbd2018-11-13 18:24:26 +00001165 uint32_t target_sdk_version_;
Jeff Haof00571c2014-05-29 17:29:47 -07001166
Andrei Onea037d2822020-11-19 00:20:04 +00001167 // ART counterpart for the compat framework (go/compat-framework).
1168 CompatFramework compat_framework_;
atrostfab72082019-12-06 13:37:36 +00001169
Dave Allison69dfe512014-07-11 17:11:58 +00001170 // Implicit checks flags.
1171 bool implicit_null_checks_; // NullPointer checks are implicit.
1172 bool implicit_so_checks_; // StackOverflow checks are implicit.
1173 bool implicit_suspend_checks_; // Thread suspension checks are implicit.
1174
Calin Juravle01aaf6e2015-06-19 22:05:39 +01001175 // Whether or not the sig chain (and implicitly the fault handler) should be
Vladimir Markoa497a392018-09-26 10:52:50 +01001176 // disabled. Tools like dex2oat don't need them. This enables
Calin Juravle01aaf6e2015-06-19 22:05:39 +01001177 // building a statically link version of dex2oat.
1178 bool no_sig_chain_;
1179
Calin Juravled3e7c6c2016-02-04 19:07:51 +00001180 // Force the use of native bridge even if the app ISA matches the runtime ISA.
1181 bool force_native_bridge_;
1182
Calin Juravle07d83c72014-10-22 21:02:23 +01001183 // Whether or not a native bridge has been loaded.
Calin Juravlec8423522014-08-12 20:55:20 +01001184 //
1185 // The native bridge allows running native code compiled for a foreign ISA. The way it works is,
1186 // if standard dlopen fails to load native library associated with native activity, it calls to
1187 // the native bridge to load it and then gets the trampoline for the entry to native activity.
Calin Juravle07d83c72014-10-22 21:02:23 +01001188 //
1189 // The option 'native_bridge_library_filename' specifies the name of the native bridge.
1190 // When non-empty the native bridge will be loaded from the given file. An empty value means
1191 // that there's no native bridge.
1192 bool is_native_bridge_loaded_;
Calin Juravlec8423522014-08-12 20:55:20 +01001193
David Srbeckyf4480162016-03-16 00:06:24 +00001194 // Whether we are running under native debugger.
1195 bool is_native_debuggable_;
1196
Alex Light7919db92017-11-29 09:00:55 -08001197 // whether or not any async exceptions have ever been thrown. This is used to speed up the
1198 // MterpShouldSwitchInterpreters function.
1199 bool async_exceptions_thrown_;
1200
Alex Light0aa7a5a2018-10-10 15:58:14 +00001201 // Whether anything is going to be using the shadow-frame APIs to force a function to return
1202 // early. Doing this requires that (1) we be debuggable and (2) that mterp is exited.
1203 bool non_standard_exits_enabled_;
1204
Nicolas Geoffray433b79a2017-01-30 20:54:45 +00001205 // Whether Java code needs to be debuggable.
1206 bool is_java_debuggable_;
Alex Light6b16d892016-11-11 11:21:04 -08001207
Florian Mayer07710c52019-09-16 15:53:38 +00001208 bool is_profileable_from_shell_ = false;
1209
Narayan Kamath5a2be3f2015-02-16 13:51:51 +00001210 // The maximum number of failed boots we allow before pruning the dalvik cache
1211 // and trying again. This option is only inspected when we're running as a
1212 // zygote.
1213 uint32_t zygote_max_failed_boots_;
1214
Igor Murashkin158f35c2015-06-10 15:55:30 -07001215 // Enable experimental opcodes that aren't fully specified yet. The intent is to
1216 // eventually publish them as public-usable opcodes, but they aren't ready yet.
1217 //
1218 // Experimental opcodes should not be used by other production code.
Alex Lighteb7c1442015-08-31 13:17:42 -07001219 ExperimentalFlags experimental_flags_;
Igor Murashkin158f35c2015-06-10 15:55:30 -07001220
Andreas Gampedd671252015-07-23 14:37:18 -07001221 // Contains the build fingerprint, if given as a parameter.
1222 std::string fingerprint_;
1223
Mathieu Chartierf9c6fc62015-10-07 11:44:05 -07001224 // Oat file manager, keeps track of what oat files are open.
Mathieu Chartiere58991b2015-10-13 07:59:34 -07001225 OatFileManager* oat_file_manager_;
Mathieu Chartierf9c6fc62015-10-07 11:44:05 -07001226
Mathieu Chartier32cc9ee2015-10-15 09:19:15 -07001227 // Whether or not we are on a low RAM device.
1228 bool is_low_memory_mode_;
1229
Mathieu Chartierc42cb0e2017-10-13 11:35:00 -07001230 // Whether or not we use MADV_RANDOM on files that are thought to have random access patterns.
1231 // This is beneficial for low RAM devices since it reduces page cache thrashing.
1232 bool madvise_random_access_;
1233
Nicolas Geoffray787ae8e2015-11-05 11:32:24 +00001234 // Whether the application should run in safe mode, that is, interpreter only.
1235 bool safe_mode_;
1236
David Brazdil3e0fa0a2018-01-15 18:41:44 +00001237 // Whether access checks on hidden API should be performed.
Mathew Inwooda5dc52c2018-02-19 15:30:51 +00001238 hiddenapi::EnforcementPolicy hidden_api_policy_;
David Brazdil3e0fa0a2018-01-15 18:41:44 +00001239
David Brazdile7681822018-12-14 16:25:33 +00001240 // Whether access checks on core platform API should be performed.
1241 hiddenapi::EnforcementPolicy core_platform_api_policy_;
1242
Artur Satayevacdb9a32019-10-28 18:09:53 +00001243 // Whether access checks on test API should be performed.
1244 hiddenapi::EnforcementPolicy test_api_policy_;
1245
Mathew Inwoodc8ce5f52018-04-05 13:58:55 +01001246 // List of signature prefixes of methods that have been removed from the blacklist, and treated
1247 // as if whitelisted.
Mathew Inwood3383aa52018-03-16 14:18:33 +00001248 std::vector<std::string> hidden_api_exemptions_;
1249
David Brazdilee7d2fd2018-01-20 17:25:23 +00001250 // Do not warn about the same hidden API access violation twice.
1251 // This is only used for testing.
1252 bool dedupe_hidden_api_warnings_;
1253
Mathew Inwood5bcef172018-05-01 14:40:12 +01001254 // How often to log hidden API access to the event log. An integer between 0
1255 // (never) and 0x10000 (always).
Mathew Inwood73ddda42018-04-03 15:32:32 +01001256 uint32_t hidden_api_access_event_log_rate_;
1257
Mathew Inwood5bcef172018-05-01 14:40:12 +01001258 // The package of the app running in this process.
1259 std::string process_package_name_;
1260
David Brazdil35a3f6a2019-03-04 15:59:06 +00001261 // The data directory of the app running in this process.
1262 std::string process_data_directory_;
1263
Nicolas Geoffraya73280d2016-02-15 13:05:16 +00001264 // Whether threads should dump their native stack on SIGQUIT.
1265 bool dump_native_stack_on_sig_quit_;
1266
Andreas Gampea1425a12016-03-11 17:44:04 -08001267 // Whether the dalvik cache was pruned when initializing the runtime.
1268 bool pruned_dalvik_cache_;
1269
Mathieu Chartierf8cb1782016-03-18 18:45:41 -07001270 // Whether or not we currently care about pause times.
1271 ProcessState process_state_;
1272
Andreas Gampef38a6612016-04-11 08:42:26 -07001273 // Whether zygote code is in a section that should not start threads.
1274 bool zygote_no_threads_;
1275
Alex Light40320712017-12-14 11:52:04 -08001276 // The string containing requested jdwp options
1277 std::string jdwp_options_;
1278
1279 // The jdwp provider we were configured with.
1280 JdwpProvider jdwp_provider_;
1281
Alex Light79d6c802019-06-27 15:50:11 +00001282 // True if jmethodID and jfieldID are opaque Indices. When false (the default) these are simply
1283 // pointers. This is set by -Xopaque-jni-ids:{true,false}.
1284 JniIdType jni_ids_indirection_;
1285
Alex Light4ca2f482019-11-25 16:07:22 -08001286 // Set to false in cases where we want to directly control when jni-id
1287 // indirection is changed. This is intended only for testing JNI id swapping.
1288 bool automatically_set_jni_ids_indirection_;
1289
David Sehrd106d9f2016-08-16 19:22:57 -07001290 // Saved environment.
1291 class EnvSnapshot {
1292 public:
1293 EnvSnapshot() = default;
1294 void TakeSnapshot();
1295 char** GetSnapshot() const;
1296
1297 private:
1298 std::unique_ptr<char*[]> c_env_vector_;
1299 std::vector<std::unique_ptr<std::string>> name_value_pairs_;
1300
1301 DISALLOW_COPY_AND_ASSIGN(EnvSnapshot);
1302 } env_snapshot_;
1303
Andreas Gampefda57142016-09-08 20:29:18 -07001304 // Generic system-weak holders.
1305 std::vector<gc::AbstractSystemWeakHolder*> system_weak_holders_;
1306
Andreas Gampeac30fa22017-01-18 21:02:36 -08001307 std::unique_ptr<RuntimeCallbacks> callbacks_;
Andreas Gampe04bbb5b2017-01-19 17:49:03 +00001308
Nicolas Geoffray81cc43e2017-05-10 12:04:49 +01001309 std::atomic<uint32_t> deoptimization_counts_[
1310 static_cast<uint32_t>(DeoptimizationKind::kLast) + 1];
Nicolas Geoffray4e92c3c2017-05-08 09:34:26 +01001311
Vladimir Markoc34bebf2018-08-16 16:12:49 +01001312 MemMap protected_fault_page_;
Andreas Gampe2ac67d52017-05-11 22:30:38 -07001313
Andreas Gampe0b0ffc12018-08-01 14:41:27 -07001314 uint32_t verifier_logging_threshold_ms_;
1315
Mathieu Chartiera88abfa2019-02-04 11:08:29 -08001316 bool load_app_image_startup_cache_ = false;
1317
Mathieu Chartier175ce3d2019-03-06 16:54:24 -08001318 // If startup has completed, must happen at most once.
1319 std::atomic<bool> startup_completed_ = false;
1320
Andreas Gamped84794d2019-07-18 13:40:03 -07001321 bool verifier_missing_kthrow_fatal_;
Florian Mayer0972d082020-05-15 14:07:31 +02001322 bool perfetto_hprof_enabled_;
Andreas Gamped84794d2019-07-18 13:40:03 -07001323
Eric Holka79872b2020-10-01 13:09:53 -07001324 metrics::ArtMetrics metrics_;
1325
Andreas Gampe44f67602018-11-28 08:27:27 -08001326 // Note: See comments on GetFaultMessage.
1327 friend std::string GetFaultMessageForAbortLogging();
Vladimir Markoc0e0e5e2020-01-23 17:43:05 +00001328 friend class Dex2oatImageTest;
Mathieu Chartierada33d72018-12-17 13:17:30 -08001329 friend class ScopedThreadPoolUsage;
Vladimir Markof3d88a82018-12-21 16:38:47 +00001330 friend class OatFileAssistantTest;
Mathieu Chartierad390fa2019-10-16 20:03:00 -07001331 class NotifyStartupCompletedTask;
Andreas Gampe44f67602018-11-28 08:27:27 -08001332
Carl Shapiro61e019d2011-07-14 16:53:09 -07001333 DISALLOW_COPY_AND_ASSIGN(Runtime);
Carl Shapiro1fb86202011-06-27 17:43:13 -07001334};
1335
Eric Holka79872b2020-10-01 13:09:53 -07001336inline metrics::ArtMetrics* GetMetrics() { return Runtime::Current()->GetMetrics(); }
1337
Carl Shapiro1fb86202011-06-27 17:43:13 -07001338} // namespace art
1339
Brian Carlstromfc0e3212013-07-17 14:40:12 -07001340#endif // ART_RUNTIME_RUNTIME_H_