1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_RUNTIME_READ_BARRIER_INL_H_
18 #define ART_RUNTIME_READ_BARRIER_INL_H_
19
20 #include "read_barrier.h"
21
22 #include "gc/accounting/read_barrier_table.h"
23 #include "gc/collector/concurrent_copying-inl.h"
24 #include "gc/collector/mark_compact.h"
25 #include "gc/heap.h"
26 #include "mirror/object-readbarrier-inl.h"
27 #include "mirror/object_reference.h"
28 #include "mirror/reference.h"
29 #include "runtime.h"
30
31 namespace art HIDDEN {
32
33 template <typename MirrorType, bool kIsVolatile, ReadBarrierOption kReadBarrierOption,
34 bool kAlwaysUpdateField>
Barrier(mirror::Object * obj,MemberOffset offset,mirror::HeapReference<MirrorType> * ref_addr)35 inline MirrorType* ReadBarrier::Barrier(
36 mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
37 constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
38 if (gUseReadBarrier && with_read_barrier) {
39 if (kCheckDebugDisallowReadBarrierCount) {
40 Thread* const self = Thread::Current();
41 CHECK(self != nullptr);
42 CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
43 }
44 if (kUseBakerReadBarrier) {
45 // fake_address_dependency (must be zero) is used to create artificial data dependency from
46 // the is_gray load to the ref field (ptr) load to avoid needing a load-load barrier between
47 // the two.
48 uintptr_t fake_address_dependency;
49 bool is_gray = IsGray(obj, &fake_address_dependency);
50 if (kEnableReadBarrierInvariantChecks) {
51 CHECK_EQ(fake_address_dependency, 0U) << obj << " rb_state=" << obj->GetReadBarrierState();
52 }
53 ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>(
54 fake_address_dependency | reinterpret_cast<uintptr_t>(ref_addr));
55 MirrorType* ref = ref_addr->template AsMirrorPtr<kIsVolatile>();
56 MirrorType* old_ref = ref;
57 if (is_gray) {
58 // Slow-path.
59 ref = reinterpret_cast<MirrorType*>(Mark(ref));
60 // If kAlwaysUpdateField is true, update the field atomically. This may fail if mutator
61 // updates before us, but it's OK.
62 if (kAlwaysUpdateField && ref != old_ref) {
63 obj->CasFieldObjectWithoutWriteBarrier<false, false>(offset,
64 old_ref,
65 ref,
66 CASMode::kStrong,
67 std::memory_order_release);
68 }
69 }
70 AssertToSpaceInvariant(obj, offset, ref);
71 return ref;
72 } else if (kUseTableLookupReadBarrier) {
73 MirrorType* ref = ref_addr->template AsMirrorPtr<kIsVolatile>();
74 MirrorType* old_ref = ref;
75 // The heap or the collector can be null at startup. TODO: avoid the need for this null check.
76 gc::Heap* heap = Runtime::Current()->GetHeap();
77 if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) {
78 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
79 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
80 if (ref != old_ref) {
81 obj->CasFieldObjectWithoutWriteBarrier<false, false>(offset,
82 old_ref,
83 ref,
84 CASMode::kStrong,
85 std::memory_order_release);
86 }
87 }
88 AssertToSpaceInvariant(obj, offset, ref);
89 return ref;
90 } else {
91 LOG(FATAL) << "Unexpected read barrier type";
92 UNREACHABLE();
93 }
94 } else if (kReadBarrierOption == kWithFromSpaceBarrier) {
95 DCHECK(gUseUserfaultfd);
96 MirrorType* old = ref_addr->template AsMirrorPtr<kIsVolatile>();
97 mirror::Object* ref =
98 Runtime::Current()->GetHeap()->MarkCompactCollector()->GetFromSpaceAddrFromBarrier(old);
99 return reinterpret_cast<MirrorType*>(ref);
100 } else {
101 // No read barrier.
102 return ref_addr->template AsMirrorPtr<kIsVolatile>();
103 }
104 }
105
106 template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
BarrierForRoot(MirrorType ** root,GcRootSource * gc_root_source)107 inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root,
108 GcRootSource* gc_root_source) {
109 MirrorType* ref = *root;
110 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
111 if (gUseReadBarrier && with_read_barrier) {
112 if (kCheckDebugDisallowReadBarrierCount) {
113 Thread* const self = Thread::Current();
114 CHECK(self != nullptr);
115 CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
116 }
117 if (kUseBakerReadBarrier) {
118 // TODO: separate the read barrier code from the collector code more.
119 Thread* self = Thread::Current();
120 if (self != nullptr && self->GetIsGcMarking()) {
121 ref = reinterpret_cast<MirrorType*>(Mark(ref));
122 }
123 AssertToSpaceInvariant(gc_root_source, ref);
124 return ref;
125 } else if (kUseTableLookupReadBarrier) {
126 Thread* self = Thread::Current();
127 if (self != nullptr &&
128 self->GetIsGcMarking() &&
129 Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
130 MirrorType* old_ref = ref;
131 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
132 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
133 if (ref != old_ref) {
134 Atomic<MirrorType*>* atomic_root = reinterpret_cast<Atomic<MirrorType*>*>(root);
135 atomic_root->CompareAndSetStrongRelaxed(old_ref, ref);
136 }
137 }
138 AssertToSpaceInvariant(gc_root_source, ref);
139 return ref;
140 } else {
141 LOG(FATAL) << "Unexpected read barrier type";
142 UNREACHABLE();
143 }
144 } else if (kReadBarrierOption == kWithFromSpaceBarrier) {
145 DCHECK(gUseUserfaultfd);
146 mirror::Object* from_ref =
147 Runtime::Current()->GetHeap()->MarkCompactCollector()->GetFromSpaceAddrFromBarrier(ref);
148 return reinterpret_cast<MirrorType*>(from_ref);
149 } else {
150 return ref;
151 }
152 }
153
154 // TODO: Reduce copy paste
155 template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
BarrierForRoot(mirror::CompressedReference<MirrorType> * root,GcRootSource * gc_root_source)156 inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root,
157 GcRootSource* gc_root_source) {
158 MirrorType* ref = root->AsMirrorPtr();
159 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
160 if (gUseReadBarrier && with_read_barrier) {
161 if (kCheckDebugDisallowReadBarrierCount) {
162 Thread* const self = Thread::Current();
163 CHECK(self != nullptr);
164 CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
165 }
166 if (kUseBakerReadBarrier) {
167 // TODO: separate the read barrier code from the collector code more.
168 Thread* self = Thread::Current();
169 if (self != nullptr && self->GetIsGcMarking()) {
170 ref = reinterpret_cast<MirrorType*>(Mark(ref));
171 }
172 AssertToSpaceInvariant(gc_root_source, ref);
173 return ref;
174 } else if (kUseTableLookupReadBarrier) {
175 Thread* self = Thread::Current();
176 if (self != nullptr &&
177 self->GetIsGcMarking() &&
178 Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
179 auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
180 ref = reinterpret_cast<MirrorType*>(Mark(ref));
181 auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
182 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
183 if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) {
184 auto* atomic_root =
185 reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
186 atomic_root->CompareAndSetStrongRelaxed(old_ref, new_ref);
187 }
188 }
189 AssertToSpaceInvariant(gc_root_source, ref);
190 return ref;
191 } else {
192 LOG(FATAL) << "Unexpected read barrier type";
193 UNREACHABLE();
194 }
195 } else if (kReadBarrierOption == kWithFromSpaceBarrier) {
196 DCHECK(gUseUserfaultfd);
197 mirror::Object* from_ref =
198 Runtime::Current()->GetHeap()->MarkCompactCollector()->GetFromSpaceAddrFromBarrier(ref);
199 return reinterpret_cast<MirrorType*>(from_ref);
200 } else {
201 return ref;
202 }
203 }
204
205 template <typename MirrorType>
IsMarked(MirrorType * ref)206 inline MirrorType* ReadBarrier::IsMarked(MirrorType* ref) {
207 // Only read-barrier configurations can have mutators run while
208 // the GC is marking.
209 if (!gUseReadBarrier) {
210 return ref;
211 }
212 // IsMarked does not handle null, so handle it here.
213 if (ref == nullptr) {
214 return nullptr;
215 }
216 // IsMarked should only be called when the GC is marking.
217 if (!Thread::Current()->GetIsGcMarking()) {
218 return ref;
219 }
220
221 return reinterpret_cast<MirrorType*>(
222 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarked(ref));
223 }
224
IsDuringStartup()225 inline bool ReadBarrier::IsDuringStartup() {
226 gc::Heap* heap = Runtime::Current()->GetHeap();
227 if (heap == nullptr) {
228 // During startup, the heap can be null.
229 return true;
230 }
231 if (heap->CurrentCollectorType() != gc::kCollectorTypeCC) {
232 // CC isn't running.
233 return true;
234 }
235 gc::collector::ConcurrentCopying* collector = heap->ConcurrentCopyingCollector();
236 if (collector == nullptr) {
237 // During startup, the collector can be null.
238 return true;
239 }
240 return false;
241 }
242
AssertToSpaceInvariant(mirror::Object * obj,MemberOffset offset,mirror::Object * ref)243 inline void ReadBarrier::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
244 mirror::Object* ref) {
245 if (kEnableToSpaceInvariantChecks) {
246 if (ref == nullptr || IsDuringStartup()) {
247 return;
248 }
249 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
250 AssertToSpaceInvariant(obj, offset, ref);
251 }
252 }
253
AssertToSpaceInvariant(GcRootSource * gc_root_source,mirror::Object * ref)254 inline void ReadBarrier::AssertToSpaceInvariant(GcRootSource* gc_root_source,
255 mirror::Object* ref) {
256 if (kEnableToSpaceInvariantChecks) {
257 if (ref == nullptr || IsDuringStartup()) {
258 return;
259 }
260 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
261 AssertToSpaceInvariant(gc_root_source, ref);
262 }
263 }
264
Mark(mirror::Object * obj)265 inline mirror::Object* ReadBarrier::Mark(mirror::Object* obj) {
266 return Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->MarkFromReadBarrier(obj);
267 }
268
IsGray(mirror::Object * obj,uintptr_t * fake_address_dependency)269 inline bool ReadBarrier::IsGray(mirror::Object* obj, uintptr_t* fake_address_dependency) {
270 return obj->GetReadBarrierState(fake_address_dependency) == kGrayState;
271 }
272
IsGray(mirror::Object * obj)273 inline bool ReadBarrier::IsGray(mirror::Object* obj) {
274 // Use a load-acquire to load the read barrier bit to avoid reordering with the subsequent load.
275 // GetReadBarrierStateAcquire() has load-acquire semantics.
276 return obj->GetReadBarrierStateAcquire() == kGrayState;
277 }
278
279 } // namespace art
280
281 #endif // ART_RUNTIME_READ_BARRIER_INL_H_
282