1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "reference_queue.h"
18
19 #include "accounting/card_table-inl.h"
20 #include "base/mutex.h"
21 #include "collector/concurrent_copying.h"
22 #include "heap.h"
23 #include "mirror/class-inl.h"
24 #include "mirror/object-inl.h"
25 #include "mirror/reference-inl.h"
26 #include "object_callbacks.h"
27
28 namespace art HIDDEN {
29 namespace gc {
30
ReferenceQueue(Mutex * lock)31 ReferenceQueue::ReferenceQueue(Mutex* lock) : lock_(lock), list_(nullptr) {
32 }
33
AtomicEnqueueIfNotEnqueued(Thread * self,ObjPtr<mirror::Reference> ref)34 void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, ObjPtr<mirror::Reference> ref) {
35 DCHECK(ref != nullptr);
36 MutexLock mu(self, *lock_);
37 if (ref->IsUnprocessed()) {
38 EnqueueReference(ref);
39 }
40 }
41
EnqueueReference(ObjPtr<mirror::Reference> ref)42 void ReferenceQueue::EnqueueReference(ObjPtr<mirror::Reference> ref) {
43 DCHECK(ref != nullptr);
44 CHECK(ref->IsUnprocessed());
45 if (IsEmpty()) {
46 // 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
47 list_ = ref.Ptr();
48 } else {
49 // The list is owned by the GC, everything that has been inserted must already be at least
50 // gray.
51 ObjPtr<mirror::Reference> head = list_->GetPendingNext<kWithoutReadBarrier>();
52 DCHECK(head != nullptr);
53 ref->SetPendingNext(head);
54 }
55 // Add the reference in the middle to preserve the cycle.
56 list_->SetPendingNext(ref);
57 }
58
DequeuePendingReference()59 ObjPtr<mirror::Reference> ReferenceQueue::DequeuePendingReference() {
60 DCHECK(!IsEmpty());
61 ObjPtr<mirror::Reference> ref = list_->GetPendingNext<kWithoutReadBarrier>();
62 DCHECK(ref != nullptr);
63 // Note: the following code is thread-safe because it is only called from ProcessReferences which
64 // is single threaded.
65 if (list_ == ref) {
66 list_ = nullptr;
67 } else {
68 ObjPtr<mirror::Reference> next = ref->GetPendingNext<kWithoutReadBarrier>();
69 list_->SetPendingNext(next);
70 }
71 ref->SetPendingNext(nullptr);
72 return ref;
73 }
74
75 // This must be called whenever DequeuePendingReference is called.
DisableReadBarrierForReference(ObjPtr<mirror::Reference> ref,std::memory_order order)76 void ReferenceQueue::DisableReadBarrierForReference(ObjPtr<mirror::Reference> ref,
77 std::memory_order order) {
78 Heap* heap = Runtime::Current()->GetHeap();
79 if (kUseBakerReadBarrier && heap->CurrentCollectorType() == kCollectorTypeCC &&
80 heap->ConcurrentCopyingCollector()->IsActive()) {
81 // Change the gray ptr we left in ConcurrentCopying::ProcessMarkStackRef() to non-gray.
82 // We check IsActive() above because we don't want to do this when the zygote compaction
83 // collector (SemiSpace) is running.
84 CHECK(ref != nullptr);
85 collector::ConcurrentCopying* concurrent_copying = heap->ConcurrentCopyingCollector();
86 uint32_t rb_state = ref->GetReadBarrierState();
87 if (rb_state == ReadBarrier::GrayState()) {
88 ref->AtomicSetReadBarrierState(ReadBarrier::GrayState(), ReadBarrier::NonGrayState(), order);
89 CHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::NonGrayState());
90 } else {
91 // In ConcurrentCopying::ProcessMarkStackRef() we may leave a non-gray reference in the queue
92 // and find it here, which is OK.
93 CHECK_EQ(rb_state, ReadBarrier::NonGrayState()) << "ref=" << ref << " rb_state=" << rb_state;
94 ObjPtr<mirror::Object> referent = ref->GetReferent<kWithoutReadBarrier>();
95 // The referent could be null if it's cleared by a mutator (Reference.clear()).
96 if (referent != nullptr) {
97 CHECK(concurrent_copying->IsInToSpace(referent.Ptr()))
98 << "ref=" << ref << " rb_state=" << ref->GetReadBarrierState()
99 << " referent=" << referent;
100 }
101 }
102 }
103 }
104
Dump(std::ostream & os) const105 void ReferenceQueue::Dump(std::ostream& os) const {
106 ObjPtr<mirror::Reference> cur = list_;
107 os << "Reference starting at list_=" << list_ << "\n";
108 if (cur == nullptr) {
109 return;
110 }
111 do {
112 ObjPtr<mirror::Reference> pending_next = cur->GetPendingNext();
113 os << "Reference= " << cur << " PendingNext=" << pending_next;
114 if (cur->IsFinalizerReferenceInstance()) {
115 os << " Zombie=" << cur->AsFinalizerReference()->GetZombie();
116 }
117 os << "\n";
118 cur = pending_next;
119 } while (cur != list_);
120 }
121
GetLength() const122 size_t ReferenceQueue::GetLength() const {
123 size_t count = 0;
124 ObjPtr<mirror::Reference> cur = list_;
125 if (cur != nullptr) {
126 do {
127 ++count;
128 cur = cur->GetPendingNext();
129 } while (cur != list_);
130 }
131 return count;
132 }
133
ClearWhiteReferences(ReferenceQueue * cleared_references,collector::GarbageCollector * collector,bool report_cleared)134 void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
135 collector::GarbageCollector* collector,
136 bool report_cleared) {
137 while (!IsEmpty()) {
138 ObjPtr<mirror::Reference> ref = DequeuePendingReference();
139 mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
140 // do_atomic_update is false because this happens during the reference processing phase where
141 // Reference.clear() would block.
142 if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update=*/false)) {
143 // Referent is white, clear it.
144 if (Runtime::Current()->IsActiveTransaction()) {
145 ref->ClearReferent<true>();
146 } else {
147 ref->ClearReferent<false>();
148 }
149 cleared_references->EnqueueReference(ref);
150 if (report_cleared) {
151 static bool already_reported = false;
152 if (!already_reported) {
153 // TODO: Maybe do this only if the queue is non-null?
154 LOG(WARNING)
155 << "Cleared Reference was only reachable from finalizer (only reported once)";
156 already_reported = true;
157 }
158 }
159 }
160 // Delay disabling the read barrier until here so that the ClearReferent call above in
161 // transaction mode will trigger the read barrier.
162 DisableReadBarrierForReference(ref, std::memory_order_relaxed);
163 }
164 }
165
EnqueueFinalizerReferences(ReferenceQueue * cleared_references,collector::GarbageCollector * collector)166 FinalizerStats ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
167 collector::GarbageCollector* collector) {
168 uint32_t num_refs(0), num_enqueued(0);
169 while (!IsEmpty()) {
170 ObjPtr<mirror::FinalizerReference> ref = DequeuePendingReference()->AsFinalizerReference();
171 ++num_refs;
172 mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
173 // do_atomic_update is false because this happens during the reference processing phase where
174 // Reference.clear() would block.
175 if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update=*/false)) {
176 ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
177 // Move the updated referent to the zombie field.
178 if (Runtime::Current()->IsActiveTransaction()) {
179 ref->SetZombie<true>(forward_address);
180 ref->ClearReferent<true>();
181 } else {
182 ref->SetZombie<false>(forward_address);
183 ref->ClearReferent<false>();
184 }
185 cleared_references->EnqueueReference(ref);
186 ++num_enqueued;
187 }
188 // Delay disabling the read barrier until here so that the ClearReferent call above in
189 // transaction mode will trigger the read barrier.
190 DisableReadBarrierForReference(ref->AsReference(), std::memory_order_relaxed);
191 }
192 return FinalizerStats(num_refs, num_enqueued);
193 }
194
ForwardSoftReferences(MarkObjectVisitor * visitor)195 uint32_t ReferenceQueue::ForwardSoftReferences(MarkObjectVisitor* visitor) {
196 uint32_t num_refs(0);
197 Thread* self = Thread::Current();
198 static constexpr int SR_BUF_SIZE = 32;
199 ObjPtr<mirror::Reference> buf[SR_BUF_SIZE];
200 int n_entries;
201 bool empty;
202 do {
203 {
204 // Acquire lock only a few times and hold it as briefly as possible.
205 MutexLock mu(self, *lock_);
206 empty = IsEmpty();
207 for (n_entries = 0; n_entries < SR_BUF_SIZE && !empty; ++n_entries) {
208 // Dequeuing the Reference here means it could possibly be enqueued again during this GC.
209 // That's unlikely and benign.
210 buf[n_entries] = DequeuePendingReference();
211 empty = IsEmpty();
212 }
213 }
214 for (int i = 0; i < n_entries; ++i) {
215 mirror::HeapReference<mirror::Object>* referent_addr = buf[i]->GetReferentReferenceAddr();
216 if (referent_addr->AsMirrorPtr() != nullptr) {
217 visitor->MarkHeapReference(referent_addr, /*do_atomic_update=*/ true);
218 ++num_refs;
219 }
220 DisableReadBarrierForReference(buf[i]->AsReference(), std::memory_order_release);
221 }
222 } while (!empty);
223 return num_refs;
224 }
225
UpdateRoots(IsMarkedVisitor * visitor)226 void ReferenceQueue::UpdateRoots(IsMarkedVisitor* visitor) {
227 if (list_ != nullptr) {
228 list_ = down_cast<mirror::Reference*>(visitor->IsMarked(list_));
229 }
230 }
231
232 } // namespace gc
233 } // namespace art
234