blob: 2be570ac85c1d3e75000b79b3448ed1e43d8e397 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Brian Carlstrom7e93b502011-08-04 14:16:22 -070016
17#include "intern_table.h"
18
Ian Rogers700a4022014-05-19 16:49:03 -070019#include <memory>
20
Mathieu Chartiere401d142015-04-22 13:56:20 -070021#include "gc_root-inl.h"
Mathieu Chartier97509952015-07-13 14:35:43 -070022#include "gc/collector/garbage_collector.h"
Ian Rogers7dfb28c2013-08-22 08:18:36 -070023#include "gc/space/image_space.h"
Mathieu Chartier14c3bf92015-07-13 14:35:43 -070024#include "gc/weak_root_state.h"
Ian Rogers7dfb28c2013-08-22 08:18:36 -070025#include "mirror/dex_cache.h"
26#include "mirror/object_array-inl.h"
27#include "mirror/object-inl.h"
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -070028#include "mirror/string-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080029#include "thread.h"
Elliott Hughes814e4032011-08-23 12:07:56 -070030#include "utf.h"
Brian Carlstrom7e93b502011-08-04 14:16:22 -070031
32namespace art {
33
Ian Rogers7dfb28c2013-08-22 08:18:36 -070034InternTable::InternTable()
Mathieu Chartiereb175f72014-10-31 11:49:27 -070035 : image_added_to_intern_table_(false), log_new_roots_(false),
Mathieu Chartier14c3bf92015-07-13 14:35:43 -070036 weak_intern_condition_("New intern condition", *Locks::intern_table_lock_),
37 weak_root_state_(gc::kWeakRootStateNormal) {
Mathieu Chartierc11d9b82013-09-19 10:01:59 -070038}
Elliott Hughesde69d7f2011-08-18 16:49:37 -070039
Brian Carlstroma663ea52011-08-19 23:33:41 -070040size_t InternTable::Size() const {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +010041 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartiereb175f72014-10-31 11:49:27 -070042 return strong_interns_.Size() + weak_interns_.Size();
Brian Carlstroma663ea52011-08-19 23:33:41 -070043}
44
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -070045size_t InternTable::StrongSize() const {
46 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartiereb175f72014-10-31 11:49:27 -070047 return strong_interns_.Size();
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -070048}
49
50size_t InternTable::WeakSize() const {
51 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartiereb175f72014-10-31 11:49:27 -070052 return weak_interns_.Size();
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -070053}
54
Elliott Hughescac6cc72011-11-03 20:31:21 -070055void InternTable::DumpForSigQuit(std::ostream& os) const {
Mathieu Chartiereb175f72014-10-31 11:49:27 -070056 os << "Intern table: " << StrongSize() << " strong; " << WeakSize() << " weak\n";
Elliott Hughescac6cc72011-11-03 20:31:21 -070057}
58
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -070059void InternTable::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +010060 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartier893263b2014-03-04 11:07:42 -080061 if ((flags & kVisitRootFlagAllRoots) != 0) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -070062 strong_interns_.VisitRoots(visitor);
Mathieu Chartier893263b2014-03-04 11:07:42 -080063 } else if ((flags & kVisitRootFlagNewRoots) != 0) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -070064 for (auto& root : new_strong_intern_roots_) {
65 mirror::String* old_ref = root.Read<kWithoutReadBarrier>();
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -070066 root.VisitRoot(visitor, RootInfo(kRootInternedString));
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -070067 mirror::String* new_ref = root.Read<kWithoutReadBarrier>();
Mathieu Chartierc2e20622014-11-03 11:41:47 -080068 if (new_ref != old_ref) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -070069 // The GC moved a root in the log. Need to search the strong interns and update the
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -070070 // corresponding object. This is slow, but luckily for us, this may only happen with a
71 // concurrent moving GC.
Mathieu Chartiereb175f72014-10-31 11:49:27 -070072 strong_interns_.Remove(old_ref);
73 strong_interns_.Insert(new_ref);
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -070074 }
75 }
Mathieu Chartier893263b2014-03-04 11:07:42 -080076 }
Mathieu Chartier893263b2014-03-04 11:07:42 -080077 if ((flags & kVisitRootFlagClearRootLog) != 0) {
78 new_strong_intern_roots_.clear();
79 }
80 if ((flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
81 log_new_roots_ = true;
82 } else if ((flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
83 log_new_roots_ = false;
Ian Rogers1d54e732013-05-02 21:10:01 -070084 }
Mathieu Chartier423d2a32013-09-12 17:33:56 -070085 // Note: we deliberately don't visit the weak_interns_ table and the immutable image roots.
Brian Carlstrom7e93b502011-08-04 14:16:22 -070086}
87
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -070088mirror::String* InternTable::LookupStrong(mirror::String* s) {
Mathieu Chartiereb175f72014-10-31 11:49:27 -070089 return strong_interns_.Find(s);
Hiroshi Yamauchi1bd48722014-05-23 19:58:15 -070090}
91
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -070092mirror::String* InternTable::LookupWeak(mirror::String* s) {
Mathieu Chartiereb175f72014-10-31 11:49:27 -070093 return weak_interns_.Find(s);
Hiroshi Yamauchi1bd48722014-05-23 19:58:15 -070094}
95
Mathieu Chartiereb175f72014-10-31 11:49:27 -070096void InternTable::SwapPostZygoteWithPreZygote() {
97 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
98 weak_interns_.SwapPostZygoteWithPreZygote();
99 strong_interns_.SwapPostZygoteWithPreZygote();
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700100}
101
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700102mirror::String* InternTable::InsertStrong(mirror::String* s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100103 Runtime* runtime = Runtime::Current();
104 if (runtime->IsActiveTransaction()) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700105 runtime->RecordStrongStringInsertion(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100106 }
Mathieu Chartier893263b2014-03-04 11:07:42 -0800107 if (log_new_roots_) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700108 new_strong_intern_roots_.push_back(GcRoot<mirror::String>(s));
Mathieu Chartier893263b2014-03-04 11:07:42 -0800109 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700110 strong_interns_.Insert(s);
Mathieu Chartier893263b2014-03-04 11:07:42 -0800111 return s;
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100112}
113
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700114mirror::String* InternTable::InsertWeak(mirror::String* s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100115 Runtime* runtime = Runtime::Current();
116 if (runtime->IsActiveTransaction()) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700117 runtime->RecordWeakStringInsertion(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100118 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700119 weak_interns_.Insert(s);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700120 return s;
121}
122
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700123void InternTable::RemoveStrong(mirror::String* s) {
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700124 strong_interns_.Remove(s);
Hiroshi Yamauchi1bd48722014-05-23 19:58:15 -0700125}
126
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700127void InternTable::RemoveWeak(mirror::String* s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100128 Runtime* runtime = Runtime::Current();
129 if (runtime->IsActiveTransaction()) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700130 runtime->RecordWeakStringRemoval(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100131 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700132 weak_interns_.Remove(s);
Brian Carlstrom7e93b502011-08-04 14:16:22 -0700133}
134
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100135// Insert/remove methods used to undo changes made during an aborted transaction.
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700136mirror::String* InternTable::InsertStrongFromTransaction(mirror::String* s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100137 DCHECK(!Runtime::Current()->IsActiveTransaction());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700138 return InsertStrong(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100139}
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700140mirror::String* InternTable::InsertWeakFromTransaction(mirror::String* s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100141 DCHECK(!Runtime::Current()->IsActiveTransaction());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700142 return InsertWeak(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100143}
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700144void InternTable::RemoveStrongFromTransaction(mirror::String* s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100145 DCHECK(!Runtime::Current()->IsActiveTransaction());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700146 RemoveStrong(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100147}
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700148void InternTable::RemoveWeakFromTransaction(mirror::String* s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100149 DCHECK(!Runtime::Current()->IsActiveTransaction());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700150 RemoveWeak(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100151}
152
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700153void InternTable::AddImageStringsToTable(gc::space::ImageSpace* image_space) {
Mathieu Chartierbc58ede2014-11-17 12:36:24 -0800154 CHECK(image_space != nullptr);
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700155 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
156 if (!image_added_to_intern_table_) {
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700157 const ImageHeader* const header = &image_space->GetImageHeader();
158 // Check if we have the interned strings section.
159 const ImageSection& section = header->GetImageSection(ImageHeader::kSectionInternedStrings);
160 if (section.Size() > 0) {
161 ReadFromMemoryLocked(image_space->Begin() + section.Offset());
162 } else {
163 // TODO: Delete this logic?
164 mirror::Object* root = header->GetImageRoot(ImageHeader::kDexCaches);
165 mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
166 for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
167 mirror::DexCache* dex_cache = dex_caches->Get(i);
168 const DexFile* dex_file = dex_cache->GetDexFile();
169 const size_t num_strings = dex_file->NumStringIds();
170 for (size_t j = 0; j < num_strings; ++j) {
171 mirror::String* image_string = dex_cache->GetResolvedString(j);
172 if (image_string != nullptr) {
173 mirror::String* found = LookupStrong(image_string);
174 if (found == nullptr) {
175 InsertStrong(image_string);
176 } else {
177 DCHECK_EQ(found, image_string);
178 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700179 }
180 }
181 }
182 }
183 image_added_to_intern_table_ = true;
184 }
185}
186
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700187mirror::String* InternTable::LookupStringFromImage(mirror::String* s) {
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700188 if (image_added_to_intern_table_) {
189 return nullptr;
190 }
Ian Rogers7dfb28c2013-08-22 08:18:36 -0700191 gc::space::ImageSpace* image = Runtime::Current()->GetHeap()->GetImageSpace();
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700192 if (image == nullptr) {
193 return nullptr; // No image present.
Ian Rogers7dfb28c2013-08-22 08:18:36 -0700194 }
195 mirror::Object* root = image->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
196 mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
197 const std::string utf8 = s->ToModifiedUtf8();
198 for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
199 mirror::DexCache* dex_cache = dex_caches->Get(i);
200 const DexFile* dex_file = dex_cache->GetDexFile();
201 // Binary search the dex file for the string index.
202 const DexFile::StringId* string_id = dex_file->FindStringId(utf8.c_str());
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800203 if (string_id != nullptr) {
Ian Rogers7dfb28c2013-08-22 08:18:36 -0700204 uint32_t string_idx = dex_file->GetIndexForStringId(*string_id);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800205 // GetResolvedString() contains a RB.
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800206 mirror::String* image_string = dex_cache->GetResolvedString(string_idx);
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700207 if (image_string != nullptr) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800208 return image_string;
Ian Rogers7dfb28c2013-08-22 08:18:36 -0700209 }
210 }
211 }
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800212 return nullptr;
Ian Rogers7dfb28c2013-08-22 08:18:36 -0700213}
214
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700215void InternTable::EnsureNewWeakInternsDisallowed() {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800216 // Lock and unlock once to ensure that no threads are still in the
217 // middle of adding new interns.
218 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700219 CHECK_EQ(weak_root_state_, gc::kWeakRootStateNoReadsOrWrites);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800220}
221
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700222void InternTable::BroadcastForNewInterns() {
223 CHECK(kUseReadBarrier);
224 Thread* self = Thread::Current();
225 MutexLock mu(self, *Locks::intern_table_lock_);
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700226 weak_intern_condition_.Broadcast(self);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700227}
228
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700229void InternTable::WaitUntilAccessible(Thread* self) {
230 Locks::intern_table_lock_->ExclusiveUnlock(self);
Mathieu Chartier90ef3db2015-08-04 15:19:41 -0700231 self->TransitionFromRunnableToSuspended(kWaitingWeakGcRootRead);
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700232 Locks::intern_table_lock_->ExclusiveLock(self);
233 while (weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) {
234 weak_intern_condition_.Wait(self);
235 }
236 Locks::intern_table_lock_->ExclusiveUnlock(self);
237 self->TransitionFromSuspendedToRunnable();
238 Locks::intern_table_lock_->ExclusiveLock(self);
239}
240
241mirror::String* InternTable::Insert(mirror::String* s, bool is_strong, bool holding_locks) {
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800242 if (s == nullptr) {
243 return nullptr;
244 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700245 Thread* const self = Thread::Current();
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100246 MutexLock mu(self, *Locks::intern_table_lock_);
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700247 if (kDebugLocking && !holding_locks) {
248 Locks::mutator_lock_->AssertSharedHeld(self);
249 CHECK_EQ(2u, self->NumberOfHeldMutexes()) << "may only safely hold the mutator lock";
Mathieu Chartierc11d9b82013-09-19 10:01:59 -0700250 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700251 while (true) {
Mathieu Chartier90ef3db2015-08-04 15:19:41 -0700252 if (holding_locks) {
253 if (!kUseReadBarrier) {
254 CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
255 } else {
256 CHECK(self->GetWeakRefAccessEnabled());
257 }
258 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700259 // Check the strong table for a match.
260 mirror::String* strong = LookupStrong(s);
261 if (strong != nullptr) {
262 return strong;
263 }
Mathieu Chartier90ef3db2015-08-04 15:19:41 -0700264 if ((!kUseReadBarrier && weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) ||
265 (kUseReadBarrier && self->GetWeakRefAccessEnabled())) {
266 break;
267 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700268 // weak_root_state_ is set to gc::kWeakRootStateNoReadsOrWrites in the GC pause but is only
269 // cleared after SweepSystemWeaks has completed. This is why we need to wait until it is
270 // cleared.
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700271 CHECK(!holding_locks);
272 StackHandleScope<1> hs(self);
273 auto h = hs.NewHandleWrapper(&s);
274 WaitUntilAccessible(self);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700275 }
Mathieu Chartier90ef3db2015-08-04 15:19:41 -0700276 if (!kUseReadBarrier) {
277 CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
278 } else {
279 CHECK(self->GetWeakRefAccessEnabled());
280 }
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800281 // There is no match in the strong table, check the weak table.
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700282 mirror::String* weak = LookupWeak(s);
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800283 if (weak != nullptr) {
284 if (is_strong) {
285 // A match was found in the weak table. Promote to the strong table.
286 RemoveWeak(weak);
287 return InsertStrong(weak);
288 }
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700289 return weak;
290 }
nikolay serdjuka446d862015-04-17 19:27:56 +0600291 // Check the image for a match.
292 mirror::String* image = LookupStringFromImage(s);
293 if (image != nullptr) {
294 return is_strong ? InsertStrong(image) : InsertWeak(image);
295 }
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800296 // No match in the strong table or the weak table. Insert into the strong / weak table.
297 return is_strong ? InsertStrong(s) : InsertWeak(s);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700298}
299
Mathieu Chartiered0fc1d2014-03-21 14:09:35 -0700300mirror::String* InternTable::InternStrong(int32_t utf16_length, const char* utf8_data) {
301 DCHECK(utf8_data != nullptr);
Ian Rogers7dfb28c2013-08-22 08:18:36 -0700302 return InternStrong(mirror::String::AllocFromModifiedUtf8(
303 Thread::Current(), utf16_length, utf8_data));
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700304}
305
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800306mirror::String* InternTable::InternStrong(const char* utf8_data) {
Mathieu Chartiered0fc1d2014-03-21 14:09:35 -0700307 DCHECK(utf8_data != nullptr);
308 return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
Brian Carlstromc74255f2011-09-11 22:47:39 -0700309}
310
Mathieu Chartier90ef3db2015-08-04 15:19:41 -0700311mirror::String* InternTable::InternStrongImageString(mirror::String* s) {
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700312 // May be holding the heap bitmap lock.
313 return Insert(s, true, true);
314}
315
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800316mirror::String* InternTable::InternStrong(mirror::String* s) {
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700317 return Insert(s, true, false);
Brian Carlstromc74255f2011-09-11 22:47:39 -0700318}
319
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800320mirror::String* InternTable::InternWeak(mirror::String* s) {
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700321 return Insert(s, false, false);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700322}
323
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800324bool InternTable::ContainsWeak(mirror::String* s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100325 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700326 return LookupWeak(s) == s;
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700327}
328
Mathieu Chartier97509952015-07-13 14:35:43 -0700329void InternTable::SweepInternTableWeaks(IsMarkedVisitor* visitor) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100330 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartier97509952015-07-13 14:35:43 -0700331 weak_interns_.SweepWeaks(visitor);
Brian Carlstroma663ea52011-08-19 23:33:41 -0700332}
333
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700334void InternTable::AddImageInternTable(gc::space::ImageSpace* image_space) {
335 const ImageSection& intern_section = image_space->GetImageHeader().GetImageSection(
336 ImageHeader::kSectionInternedStrings);
337 // Read the string tables from the image.
338 const uint8_t* ptr = image_space->Begin() + intern_section.Offset();
339 const size_t offset = ReadFromMemory(ptr);
340 CHECK_LE(offset, intern_section.Size());
341}
342
343size_t InternTable::ReadFromMemory(const uint8_t* ptr) {
344 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
345 return ReadFromMemoryLocked(ptr);
346}
347
348size_t InternTable::ReadFromMemoryLocked(const uint8_t* ptr) {
349 return strong_interns_.ReadIntoPreZygoteTable(ptr);
350}
351
352size_t InternTable::WriteToMemory(uint8_t* ptr) {
353 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
354 return strong_interns_.WriteFromPostZygoteTable(ptr);
355}
356
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800357std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& root) const {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700358 if (kIsDebugBuild) {
359 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
360 }
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800361 return static_cast<size_t>(root.Read()->GetHashCode());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700362}
363
364bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800365 const GcRoot<mirror::String>& b) const {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700366 if (kIsDebugBuild) {
367 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
368 }
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800369 return a.Read()->Equals(b.Read());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700370}
371
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700372size_t InternTable::Table::ReadIntoPreZygoteTable(const uint8_t* ptr) {
373 CHECK_EQ(pre_zygote_table_.Size(), 0u);
374 size_t read_count = 0;
375 pre_zygote_table_ = UnorderedSet(ptr, false /* make copy */, &read_count);
376 return read_count;
377}
378
379size_t InternTable::Table::WriteFromPostZygoteTable(uint8_t* ptr) {
380 return post_zygote_table_.WriteToMemory(ptr);
381}
382
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700383void InternTable::Table::Remove(mirror::String* s) {
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800384 auto it = post_zygote_table_.Find(GcRoot<mirror::String>(s));
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700385 if (it != post_zygote_table_.end()) {
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800386 post_zygote_table_.Erase(it);
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700387 } else {
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800388 it = pre_zygote_table_.Find(GcRoot<mirror::String>(s));
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700389 DCHECK(it != pre_zygote_table_.end());
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800390 pre_zygote_table_.Erase(it);
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700391 }
392}
393
394mirror::String* InternTable::Table::Find(mirror::String* s) {
395 Locks::intern_table_lock_->AssertHeld(Thread::Current());
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800396 auto it = pre_zygote_table_.Find(GcRoot<mirror::String>(s));
397 if (it != pre_zygote_table_.end()) {
398 return it->Read();
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700399 }
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800400 it = post_zygote_table_.Find(GcRoot<mirror::String>(s));
401 if (it != post_zygote_table_.end()) {
402 return it->Read();
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700403 }
404 return nullptr;
405}
406
407void InternTable::Table::SwapPostZygoteWithPreZygote() {
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700408 if (pre_zygote_table_.Empty()) {
409 std::swap(pre_zygote_table_, post_zygote_table_);
410 VLOG(heap) << "Swapping " << pre_zygote_table_.Size() << " interns to the pre zygote table";
411 } else {
412 // This case happens if read the intern table from the image.
413 VLOG(heap) << "Not swapping due to non-empty pre_zygote_table_";
414 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700415}
416
417void InternTable::Table::Insert(mirror::String* s) {
418 // Always insert the post zygote table, this gets swapped when we create the zygote to be the
419 // pre zygote table.
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800420 post_zygote_table_.Insert(GcRoot<mirror::String>(s));
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700421}
422
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700423void InternTable::Table::VisitRoots(RootVisitor* visitor) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -0700424 BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
425 visitor, RootInfo(kRootInternedString));
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700426 for (auto& intern : pre_zygote_table_) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700427 buffered_visitor.VisitRoot(intern);
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700428 }
429 for (auto& intern : post_zygote_table_) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700430 buffered_visitor.VisitRoot(intern);
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700431 }
432}
433
Mathieu Chartier97509952015-07-13 14:35:43 -0700434void InternTable::Table::SweepWeaks(IsMarkedVisitor* visitor) {
435 SweepWeaks(&pre_zygote_table_, visitor);
436 SweepWeaks(&post_zygote_table_, visitor);
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700437}
438
Mathieu Chartier97509952015-07-13 14:35:43 -0700439void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor) {
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700440 for (auto it = set->begin(), end = set->end(); it != end;) {
441 // This does not need a read barrier because this is called by GC.
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800442 mirror::Object* object = it->Read<kWithoutReadBarrier>();
Mathieu Chartier97509952015-07-13 14:35:43 -0700443 mirror::Object* new_object = visitor->IsMarked(object);
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700444 if (new_object == nullptr) {
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800445 it = set->Erase(it);
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700446 } else {
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800447 *it = GcRoot<mirror::String>(new_object->AsString());
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700448 ++it;
449 }
450 }
451}
452
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800453size_t InternTable::Table::Size() const {
454 return pre_zygote_table_.Size() + post_zygote_table_.Size();
455}
456
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700457void InternTable::ChangeWeakRootState(gc::WeakRootState new_state) {
458 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
459 ChangeWeakRootStateLocked(new_state);
460}
461
462void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) {
463 weak_root_state_ = new_state;
464 if (new_state != gc::kWeakRootStateNoReadsOrWrites) {
465 weak_intern_condition_.Broadcast(Thread::Current());
466 }
467}
468
Brian Carlstrom7e93b502011-08-04 14:16:22 -0700469} // namespace art