blob: 9c9d87be018136c3791a7451d9e1c40f65baa99a [file] [log] [blame]
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_MIRROR_REFERENCE_H_
18#define ART_RUNTIME_MIRROR_REFERENCE_H_
19
20#include "object.h"
21
22namespace art {
23
Mathieu Chartier308351a2014-06-15 12:39:02 -070024namespace gc {
25
26class ReferenceProcessor;
27class ReferenceQueue;
28
29} // namespace gc
30
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -070031struct ReferenceOffsets;
32struct FinalizerReferenceOffsets;
33
34namespace mirror {
35
36// C++ mirror of java.lang.ref.Reference
37class MANAGED Reference : public Object {
38 public:
39 static MemberOffset PendingNextOffset() {
40 return OFFSET_OF_OBJECT_MEMBER(Reference, pending_next_);
41 }
42 static MemberOffset QueueOffset() {
43 return OFFSET_OF_OBJECT_MEMBER(Reference, queue_);
44 }
45 static MemberOffset QueueNextOffset() {
46 return OFFSET_OF_OBJECT_MEMBER(Reference, queue_next_);
47 }
48 static MemberOffset ReferentOffset() {
49 return OFFSET_OF_OBJECT_MEMBER(Reference, referent_);
50 }
Hiroshi Yamauchibfff21a2014-05-09 12:21:15 -070051 template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -070052 Object* GetReferent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Hiroshi Yamauchibfff21a2014-05-09 12:21:15 -070053 return GetFieldObjectVolatile<Object, kDefaultVerifyFlags, kReadBarrierOption>(
54 ReferentOffset());
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -070055 }
56 template<bool kTransactionActive>
57 void SetReferent(Object* referent) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersb0fa5dc2014-04-28 16:47:08 -070058 SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), referent);
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -070059 }
60 template<bool kTransactionActive>
61 void ClearReferent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersb0fa5dc2014-04-28 16:47:08 -070062 SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr);
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -070063 }
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -070064 // Volatile read/write is not necessary since the java pending next is only accessed from
65 // the java threads for cleared references. Once these cleared references have a null referent,
66 // we never end up reading their pending next from the GC again.
67 Reference* GetPendingNext() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersb0fa5dc2014-04-28 16:47:08 -070068 return GetFieldObject<Reference>(PendingNextOffset());
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -070069 }
70 template<bool kTransactionActive>
71 void SetPendingNext(Reference* pending_next) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersb0fa5dc2014-04-28 16:47:08 -070072 SetFieldObject<kTransactionActive>(PendingNextOffset(), pending_next);
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -070073 }
74
75 bool IsEnqueued() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
76 // Since the references are stored as cyclic lists it means that once enqueued, the pending
77 // next is always non-null.
78 return GetPendingNext() != nullptr;
79 }
80
81 bool IsEnqueuable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
82
83 private:
Mathieu Chartier308351a2014-06-15 12:39:02 -070084 // Note: This avoids a read barrier, it should only be used by the GC.
85 HeapReference<Object>* GetReferentReferenceAddr() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
86 return GetFieldObjectReferenceAddr<kDefaultVerifyFlags>(ReferentOffset());
87 }
88
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -070089 // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
90 HeapReference<Reference> pending_next_; // Note this is Java volatile:
91 HeapReference<Object> queue_; // Note this is Java volatile:
92 HeapReference<Reference> queue_next_; // Note this is Java volatile:
93 HeapReference<Object> referent_; // Note this is Java volatile:
94
95 friend struct art::ReferenceOffsets; // for verifying offset information
Mathieu Chartier308351a2014-06-15 12:39:02 -070096 friend class gc::ReferenceProcessor;
97 friend class gc::ReferenceQueue;
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -070098 DISALLOW_IMPLICIT_CONSTRUCTORS(Reference);
99};
100
101// C++ mirror of java.lang.ref.FinalizerReference
102class MANAGED FinalizerReference : public Reference {
103 public:
104 static MemberOffset ZombieOffset() {
105 return OFFSET_OF_OBJECT_MEMBER(FinalizerReference, zombie_);
106 }
107
108 template<bool kTransactionActive>
109 void SetZombie(Object* zombie) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersb0fa5dc2014-04-28 16:47:08 -0700110 return SetFieldObjectVolatile<kTransactionActive>(ZombieOffset(), zombie);
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -0700111 }
112 Object* GetZombie() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersb0fa5dc2014-04-28 16:47:08 -0700113 return GetFieldObjectVolatile<Object>(ZombieOffset());
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -0700114 }
115
116 private:
117 HeapReference<FinalizerReference> next_;
118 HeapReference<FinalizerReference> prev_;
119 HeapReference<Object> zombie_;
120
121 friend struct art::FinalizerReferenceOffsets; // for verifying offset information
122 DISALLOW_IMPLICIT_CONSTRUCTORS(FinalizerReference);
123};
124
125} // namespace mirror
126} // namespace art
127
128#endif // ART_RUNTIME_MIRROR_REFERENCE_H_