1 /*
2 * Copyright (C) 2005 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "RefBase"
18 // #define LOG_NDEBUG 0
19
20 #include <memory>
21 #include <mutex>
22
23 #include <fcntl.h>
24 #include <log/log.h>
25
26 #include <utils/RefBase.h>
27 #include <utils/String8.h>
28
29 #ifndef __unused
30 #define __unused __attribute__((__unused__))
31 #endif
32
33 // Compile with refcounting debugging enabled.
34 #ifndef DEBUG_REFS
35 #define DEBUG_REFS 0
36 #endif
37
38 // The following three are ignored unless DEBUG_REFS is set.
39
40 // whether ref-tracking is enabled by default, if not, trackMe(true, false)
41 // needs to be called explicitly
42 #define DEBUG_REFS_ENABLED_BY_DEFAULT 0
43
44 // whether callstack are collected (significantly slows things down)
45 #define DEBUG_REFS_CALLSTACK_ENABLED 1
46
47 // folder where stack traces are saved when DEBUG_REFS is enabled
48 // this folder needs to exist and be writable
49 #ifdef __ANDROID__
50 #define DEBUG_REFS_CALLSTACK_PATH "/data/debug"
51 #else
52 #define DEBUG_REFS_CALLSTACK_PATH "."
53 #endif
54
55 // log all reference counting operations
56 #define PRINT_REFS 0
57
58 #if !defined(ANDROID_UTILS_CALLSTACK_ENABLED)
59 #if defined(__linux__)
60 // CallStack is only supported on linux type platforms.
61 #define ANDROID_UTILS_CALLSTACK_ENABLED 1
62 #else
63 #define ANDROID_UTILS_CALLSTACK_ENABLED 0
64 #endif // defined(__linux__)
65 #endif // !defined(ANDROID_UTILS_CALLSTACK_ENABLED)
66
67 #if ANDROID_UTILS_CALLSTACK_ENABLED
68 #include "../../include/utils/CallStack.h"
69 #endif
70
71 // ---------------------------------------------------------------------------
72
73 namespace android {
74
75 // Observations, invariants, etc:
76
77 // By default, obects are destroyed when the last strong reference disappears
78 // or, if the object never had a strong reference, when the last weak reference
79 // disappears.
80 //
81 // OBJECT_LIFETIME_WEAK changes this behavior to retain the object
82 // unconditionally until the last reference of either kind disappears. The
83 // client ensures that the extendObjectLifetime call happens before the dec
84 // call that would otherwise have deallocated the object, or before an
85 // attemptIncStrong call that might rely on it. We do not worry about
86 // concurrent changes to the object lifetime.
87 //
88 // AttemptIncStrong will succeed if the object has a strong reference, or if it
89 // has a weak reference and has never had a strong reference.
90 // AttemptIncWeak really does succeed only if there is already a WEAK
91 // reference, and thus may fail when attemptIncStrong would succeed.
92 //
93 // mStrong is the strong reference count. mWeak is the weak reference count.
94 // Between calls, and ignoring memory ordering effects, mWeak includes strong
95 // references, and is thus >= mStrong.
96 //
97 // A weakref_impl holds all the information, including both reference counts,
98 // required to perform wp<> operations. Thus these can continue to be performed
99 // after the RefBase object has been destroyed.
100 //
101 // A weakref_impl is allocated as the value of mRefs in a RefBase object on
102 // construction.
103 // In the OBJECT_LIFETIME_STRONG case, it is normally deallocated in decWeak,
104 // and hence lives as long as the last weak reference. (It can also be
105 // deallocated in the RefBase destructor iff the strong reference count was
106 // never incremented and the weak count is zero, e.g. if the RefBase object is
107 // explicitly destroyed without decrementing the strong count. This should be
108 // avoided.) In this case, the RefBase destructor should be invoked from
109 // decStrong.
110 // In the OBJECT_LIFETIME_WEAK case, the weakref_impl is always deallocated in
111 // the RefBase destructor, which is always invoked by decWeak. DecStrong
112 // explicitly avoids the deletion in this case.
113 //
114 // Memory ordering:
115 // The client must ensure that every inc() call, together with all other
116 // accesses to the object, happens before the corresponding dec() call.
117 //
118 // We try to keep memory ordering constraints on atomics as weak as possible,
119 // since memory fences or ordered memory accesses are likely to be a major
120 // performance cost for this code. All accesses to mStrong, mWeak, and mFlags
121 // explicitly relax memory ordering in some way.
122 //
123 // The only operations that are not memory_order_relaxed are reference count
124 // decrements. All reference count decrements are release operations. In
125 // addition, the final decrement leading the deallocation is followed by an
126 // acquire fence, which we can view informally as also turning it into an
127 // acquire operation. (See 29.8p4 [atomics.fences] for details. We could
128 // alternatively use acq_rel operations for all decrements. This is probably
129 // slower on most current (2016) hardware, especially on ARMv7, but that may
130 // not be true indefinitely.)
131 //
132 // This convention ensures that the second-to-last decrement synchronizes with
133 // (in the language of 1.10 in the C++ standard) the final decrement of a
134 // reference count. Since reference counts are only updated using atomic
135 // read-modify-write operations, this also extends to any earlier decrements.
136 // (See "release sequence" in 1.10.)
137 //
138 // Since all operations on an object happen before the corresponding reference
139 // count decrement, and all reference count decrements happen before the final
140 // one, we are guaranteed that all other object accesses happen before the
141 // object is destroyed.
142
143
144 #define INITIAL_STRONG_VALUE (1<<28)
145
146 #define MAX_COUNT 0xfffff
147
148 // Test whether the argument is a clearly invalid strong reference count.
149 // Used only for error checking on the value before an atomic decrement.
150 // Intended to be very cheap.
151 // Note that we cannot just check for excess decrements by comparing to zero
152 // since the object would be deallocated before that.
153 #define BAD_STRONG(c) \
154 ((c) == 0 || ((c) & (~(MAX_COUNT | INITIAL_STRONG_VALUE))) != 0)
155
156 // Same for weak counts.
157 #define BAD_WEAK(c) ((c) == 0 || ((c) & (~MAX_COUNT)) != 0)
158
159 // name kept because prebuilts used to use it from inlining sp<> code
sp_report_stack_pointer()160 void sp_report_stack_pointer() { LOG_ALWAYS_FATAL("RefBase used with stack pointer argument"); }
161
162 // Check whether address is definitely on the calling stack. We actually check whether it is on
163 // the same 4K page as the frame pointer.
164 //
165 // Assumptions:
166 // - Pages are never smaller than 4K (MIN_PAGE_SIZE)
167 // - Malloced memory never shares a page with a stack.
168 //
169 // It does not appear safe to broaden this check to include adjacent pages; apparently this code
170 // is used in environments where there may not be a guard page below (at higher addresses than)
171 // the bottom of the stack.
check_not_on_stack(const void * ptr)172 static void check_not_on_stack(const void* ptr) {
173 static constexpr int MIN_PAGE_SIZE = 0x1000; // 4K. Safer than including sys/user.h.
174 static constexpr uintptr_t MIN_PAGE_MASK = ~static_cast<uintptr_t>(MIN_PAGE_SIZE - 1);
175 uintptr_t my_frame_address =
176 reinterpret_cast<uintptr_t>(__builtin_frame_address(0 /* this frame */));
177 if (((reinterpret_cast<uintptr_t>(ptr) ^ my_frame_address) & MIN_PAGE_MASK) == 0) {
178 sp_report_stack_pointer();
179 }
180 }
181
182 // ---------------------------------------------------------------------------
183
184 class RefBase::weakref_impl : public RefBase::weakref_type
185 {
186 public:
187 std::atomic<int32_t> mStrong;
188 std::atomic<int32_t> mWeak;
189 RefBase* const mBase;
190 std::atomic<int32_t> mFlags;
191
192 #if !DEBUG_REFS
193
weakref_impl(RefBase * base)194 explicit weakref_impl(RefBase* base)
195 : mStrong(INITIAL_STRONG_VALUE)
196 , mWeak(0)
197 , mBase(base)
198 , mFlags(OBJECT_LIFETIME_STRONG)
199 {
200 }
201
addStrongRef(const void *)202 void addStrongRef(const void* /*id*/) { }
removeStrongRef(const void *)203 void removeStrongRef(const void* /*id*/) { }
renameStrongRefId(const void *,const void *)204 void renameStrongRefId(const void* /*old_id*/, const void* /*new_id*/) { }
addWeakRef(const void *)205 void addWeakRef(const void* /*id*/) { }
removeWeakRef(const void *)206 void removeWeakRef(const void* /*id*/) { }
renameWeakRefId(const void *,const void *)207 void renameWeakRefId(const void* /*old_id*/, const void* /*new_id*/) { }
printRefs() const208 void printRefs() const { }
trackMe(bool,bool)209 void trackMe(bool, bool) { }
210
211 #else
212
weakref_impl(RefBase * base)213 weakref_impl(RefBase* base)
214 : mStrong(INITIAL_STRONG_VALUE)
215 , mWeak(0)
216 , mBase(base)
217 , mFlags(OBJECT_LIFETIME_STRONG)
218 , mStrongRefs(NULL)
219 , mWeakRefs(NULL)
220 , mTrackEnabled(!!DEBUG_REFS_ENABLED_BY_DEFAULT)
221 , mRetain(false)
222 {
223 }
224
~weakref_impl()225 ~weakref_impl()
226 {
227 bool dumpStack = false;
228 if (!mRetain && mStrongRefs != NULL) {
229 dumpStack = true;
230 ALOGE("Strong references remain:");
231 ref_entry* refs = mStrongRefs;
232 while (refs) {
233 char inc = refs->ref >= 0 ? '+' : '-';
234 ALOGD("\t%c ID %p (ref %d):", inc, refs->id, refs->ref);
235 #if DEBUG_REFS_CALLSTACK_ENABLED && ANDROID_UTILS_CALLSTACK_ENABLED
236 CallStack::logStack(LOG_TAG, refs->stack.get());
237 #endif
238 refs = refs->next;
239 }
240 }
241
242 if (!mRetain && mWeakRefs != NULL) {
243 dumpStack = true;
244 ALOGE("Weak references remain!");
245 ref_entry* refs = mWeakRefs;
246 while (refs) {
247 char inc = refs->ref >= 0 ? '+' : '-';
248 ALOGD("\t%c ID %p (ref %d):", inc, refs->id, refs->ref);
249 #if DEBUG_REFS_CALLSTACK_ENABLED && ANDROID_UTILS_CALLSTACK_ENABLED
250 CallStack::logStack(LOG_TAG, refs->stack.get());
251 #endif
252 refs = refs->next;
253 }
254 }
255 if (dumpStack) {
256 ALOGE("above errors at:");
257 #if ANDROID_UTILS_CALLSTACK_ENABLED
258 CallStack::logStack(LOG_TAG);
259 #endif
260 }
261 }
262
addStrongRef(const void * id)263 void addStrongRef(const void* id) {
264 //ALOGD_IF(mTrackEnabled,
265 // "addStrongRef: RefBase=%p, id=%p", mBase, id);
266 addRef(&mStrongRefs, id, mStrong.load(std::memory_order_relaxed));
267 }
268
removeStrongRef(const void * id)269 void removeStrongRef(const void* id) {
270 //ALOGD_IF(mTrackEnabled,
271 // "removeStrongRef: RefBase=%p, id=%p", mBase, id);
272 if (!mRetain) {
273 removeRef(&mStrongRefs, id);
274 } else {
275 addRef(&mStrongRefs, id, -mStrong.load(std::memory_order_relaxed));
276 }
277 }
278
renameStrongRefId(const void * old_id,const void * new_id)279 void renameStrongRefId(const void* old_id, const void* new_id) {
280 //ALOGD_IF(mTrackEnabled,
281 // "renameStrongRefId: RefBase=%p, oid=%p, nid=%p",
282 // mBase, old_id, new_id);
283 renameRefsId(mStrongRefs, old_id, new_id);
284 }
285
addWeakRef(const void * id)286 void addWeakRef(const void* id) {
287 addRef(&mWeakRefs, id, mWeak.load(std::memory_order_relaxed));
288 }
289
removeWeakRef(const void * id)290 void removeWeakRef(const void* id) {
291 if (!mRetain) {
292 removeRef(&mWeakRefs, id);
293 } else {
294 addRef(&mWeakRefs, id, -mWeak.load(std::memory_order_relaxed));
295 }
296 }
297
renameWeakRefId(const void * old_id,const void * new_id)298 void renameWeakRefId(const void* old_id, const void* new_id) {
299 renameRefsId(mWeakRefs, old_id, new_id);
300 }
301
trackMe(bool track,bool retain)302 void trackMe(bool track, bool retain) {
303 mTrackEnabled = track;
304 mRetain = retain;
305 }
306
printRefs() const307 void printRefs() const
308 {
309 String8 text;
310
311 {
312 std::lock_guard<std::mutex> _l(mMutex);
313 char buf[128];
314 snprintf(buf, sizeof(buf),
315 "Strong references on RefBase %p (weakref_type %p):\n",
316 mBase, this);
317 text.append(buf);
318 printRefsLocked(&text, mStrongRefs);
319 snprintf(buf, sizeof(buf),
320 "Weak references on RefBase %p (weakref_type %p):\n",
321 mBase, this);
322 text.append(buf);
323 printRefsLocked(&text, mWeakRefs);
324 }
325
326 {
327 char name[100];
328 snprintf(name, sizeof(name), DEBUG_REFS_CALLSTACK_PATH "/%p.stack",
329 this);
330 int rc = open(name, O_RDWR | O_CREAT | O_APPEND, 0644);
331 if (rc >= 0) {
332 (void)write(rc, text.c_str(), text.length());
333 close(rc);
334 ALOGI("STACK TRACE for %p saved in %s", this, name);
335 }
336 else ALOGE("FAILED TO PRINT STACK TRACE for %p in %s: %s", this,
337 name, strerror(errno));
338 }
339 }
340
341 private:
342 struct ref_entry
343 {
344 ref_entry* next;
345 const void* id;
346 #if DEBUG_REFS_CALLSTACK_ENABLED && ANDROID_UTILS_CALLSTACK_ENABLED
347 CallStack::CallStackUPtr stack;
348 #endif
349 int32_t ref;
350 };
351
addRef(ref_entry ** refs,const void * id,int32_t mRef)352 void addRef(ref_entry** refs, const void* id, int32_t mRef)
353 {
354 if (mTrackEnabled) {
355 std::lock_guard<std::mutex> _l(mMutex);
356
357 ref_entry* ref = new ref_entry;
358 // Reference count at the time of the snapshot, but before the
359 // update. Positive value means we increment, negative--we
360 // decrement the reference count.
361 ref->ref = mRef;
362 ref->id = id;
363 #if DEBUG_REFS_CALLSTACK_ENABLED && ANDROID_UTILS_CALLSTACK_ENABLED
364 ref->stack = CallStack::getCurrent(2);
365 #endif
366 ref->next = *refs;
367 *refs = ref;
368 }
369 }
370
removeRef(ref_entry ** refs,const void * id)371 void removeRef(ref_entry** refs, const void* id)
372 {
373 if (mTrackEnabled) {
374 std::lock_guard<std::mutex> _l(mMutex);
375
376 ref_entry* const head = *refs;
377 ref_entry* ref = head;
378 while (ref != NULL) {
379 if (ref->id == id) {
380 *refs = ref->next;
381 delete ref;
382 return;
383 }
384 refs = &ref->next;
385 ref = *refs;
386 }
387
388 ALOGE("RefBase: removing id %p on RefBase %p"
389 "(weakref_type %p) that doesn't exist!",
390 id, mBase, this);
391
392 ref = head;
393 while (ref) {
394 char inc = ref->ref >= 0 ? '+' : '-';
395 ALOGD("\t%c ID %p (ref %d):", inc, ref->id, ref->ref);
396 ref = ref->next;
397 }
398
399 #if ANDROID_UTILS_CALLSTACK_ENABLED
400 CallStack::logStack(LOG_TAG);
401 #endif
402 }
403 }
404
renameRefsId(ref_entry * r,const void * old_id,const void * new_id)405 void renameRefsId(ref_entry* r, const void* old_id, const void* new_id)
406 {
407 if (mTrackEnabled) {
408 std::lock_guard<std::mutex> _l(mMutex);
409 ref_entry* ref = r;
410 while (ref != NULL) {
411 if (ref->id == old_id) {
412 ref->id = new_id;
413 }
414 ref = ref->next;
415 }
416 }
417 }
418
printRefsLocked(String8 * out,const ref_entry * refs) const419 void printRefsLocked(String8* out, const ref_entry* refs) const
420 {
421 char buf[128];
422 while (refs) {
423 char inc = refs->ref >= 0 ? '+' : '-';
424 snprintf(buf, sizeof(buf), "\t%c ID %p (ref %d):\n",
425 inc, refs->id, refs->ref);
426 out->append(buf);
427 #if DEBUG_REFS_CALLSTACK_ENABLED && ANDROID_UTILS_CALLSTACK_ENABLED
428 out->append(CallStack::stackToString("\t\t", refs->stack.get()));
429 #else
430 out->append("\t\t(call stacks disabled)");
431 #endif
432 refs = refs->next;
433 }
434 }
435
436 mutable std::mutex mMutex;
437 ref_entry* mStrongRefs;
438 ref_entry* mWeakRefs;
439
440 bool mTrackEnabled;
441 // Collect stack traces on addref and removeref, instead of deleting the stack references
442 // on removeref that match the address ones.
443 bool mRetain;
444
445 #endif
446 };
447
448 // ---------------------------------------------------------------------------
449
incStrong(const void * id) const450 void RefBase::incStrong(const void* id) const
451 {
452 weakref_impl* const refs = mRefs;
453 refs->incWeak(id);
454
455 refs->addStrongRef(id);
456 const int32_t c = refs->mStrong.fetch_add(1, std::memory_order_relaxed);
457 ALOG_ASSERT(c > 0, "incStrong() called on %p after last strong ref", refs);
458 #if PRINT_REFS
459 ALOGD("incStrong of %p from %p: cnt=%d\n", this, id, c);
460 #endif
461 if (c != INITIAL_STRONG_VALUE) {
462 return;
463 }
464
465 check_not_on_stack(this);
466
467 int32_t old __unused = refs->mStrong.fetch_sub(INITIAL_STRONG_VALUE, std::memory_order_relaxed);
468 // A decStrong() must still happen after us.
469 ALOG_ASSERT(old > INITIAL_STRONG_VALUE, "0x%x too small", old);
470 refs->mBase->onFirstRef();
471 }
472
incStrongRequireStrong(const void * id) const473 void RefBase::incStrongRequireStrong(const void* id) const {
474 weakref_impl* const refs = mRefs;
475 refs->incWeak(id);
476
477 refs->addStrongRef(id);
478 const int32_t c = refs->mStrong.fetch_add(1, std::memory_order_relaxed);
479
480 LOG_ALWAYS_FATAL_IF(c <= 0 || c == INITIAL_STRONG_VALUE,
481 "incStrongRequireStrong() called on %p which isn't already owned", refs);
482 #if PRINT_REFS
483 ALOGD("incStrong (requiring strong) of %p from %p: cnt=%d\n", this, id, c);
484 #endif
485 }
486
decStrong(const void * id) const487 void RefBase::decStrong(const void* id) const
488 {
489 weakref_impl* const refs = mRefs;
490 refs->removeStrongRef(id);
491 const int32_t c = refs->mStrong.fetch_sub(1, std::memory_order_release);
492 #if PRINT_REFS
493 ALOGD("decStrong of %p from %p: cnt=%d\n", this, id, c);
494 #endif
495 LOG_ALWAYS_FATAL_IF(BAD_STRONG(c), "decStrong() called on %p too many times",
496 refs);
497 if (c == 1) {
498 std::atomic_thread_fence(std::memory_order_acquire);
499 refs->mBase->onLastStrongRef(id);
500 int32_t flags = refs->mFlags.load(std::memory_order_relaxed);
501 if ((flags&OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_STRONG) {
502 delete this;
503 // The destructor does not delete refs in this case.
504 }
505 }
506 // Note that even with only strong reference operations, the thread
507 // deallocating this may not be the same as the thread deallocating refs.
508 // That's OK: all accesses to this happen before its deletion here,
509 // and all accesses to refs happen before its deletion in the final decWeak.
510 // The destructor can safely access mRefs because either it's deleting
511 // mRefs itself, or it's running entirely before the final mWeak decrement.
512 //
513 // Since we're doing atomic loads of `flags`, the static analyzer assumes
514 // they can change between `delete this;` and `refs->decWeak(id);`. This is
515 // not the case. The analyzer may become more okay with this patten when
516 // https://bugs.llvm.org/show_bug.cgi?id=34365 gets resolved. NOLINTNEXTLINE
517 refs->decWeak(id);
518 }
519
forceIncStrong(const void * id) const520 void RefBase::forceIncStrong(const void* id) const
521 {
522 // Allows initial mStrong of 0 in addition to INITIAL_STRONG_VALUE.
523 // TODO: Better document assumptions.
524 weakref_impl* const refs = mRefs;
525 refs->incWeak(id);
526
527 refs->addStrongRef(id);
528 const int32_t c = refs->mStrong.fetch_add(1, std::memory_order_relaxed);
529 ALOG_ASSERT(c >= 0, "forceIncStrong called on %p after ref count underflow",
530 refs);
531 #if PRINT_REFS
532 ALOGD("forceIncStrong of %p from %p: cnt=%d\n", this, id, c);
533 #endif
534
535 switch (c) {
536 case INITIAL_STRONG_VALUE:
537 refs->mStrong.fetch_sub(INITIAL_STRONG_VALUE,
538 std::memory_order_relaxed);
539 [[fallthrough]];
540 case 0:
541 refs->mBase->onFirstRef();
542 }
543 }
544
getStrongCount() const545 int32_t RefBase::getStrongCount() const
546 {
547 // Debugging only; No memory ordering guarantees.
548 return mRefs->mStrong.load(std::memory_order_relaxed);
549 }
550
refBase() const551 RefBase* RefBase::weakref_type::refBase() const
552 {
553 return static_cast<const weakref_impl*>(this)->mBase;
554 }
555
incWeak(const void * id)556 void RefBase::weakref_type::incWeak(const void* id)
557 {
558 weakref_impl* const impl = static_cast<weakref_impl*>(this);
559 impl->addWeakRef(id);
560 const int32_t c __unused = impl->mWeak.fetch_add(1,
561 std::memory_order_relaxed);
562 ALOG_ASSERT(c >= 0, "incWeak called on %p after last weak ref", this);
563 }
564
incWeakRequireWeak(const void * id)565 void RefBase::weakref_type::incWeakRequireWeak(const void* id)
566 {
567 weakref_impl* const impl = static_cast<weakref_impl*>(this);
568 impl->addWeakRef(id);
569 const int32_t c __unused = impl->mWeak.fetch_add(1,
570 std::memory_order_relaxed);
571 LOG_ALWAYS_FATAL_IF(c <= 0, "incWeakRequireWeak called on %p which has no weak refs", this);
572 }
573
decWeak(const void * id)574 void RefBase::weakref_type::decWeak(const void* id)
575 {
576 weakref_impl* const impl = static_cast<weakref_impl*>(this);
577 impl->removeWeakRef(id);
578 const int32_t c = impl->mWeak.fetch_sub(1, std::memory_order_release);
579 LOG_ALWAYS_FATAL_IF(BAD_WEAK(c), "decWeak called on %p too many times",
580 this);
581 if (c != 1) return;
582 atomic_thread_fence(std::memory_order_acquire);
583
584 int32_t flags = impl->mFlags.load(std::memory_order_relaxed);
585 if ((flags&OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_STRONG) {
586 // This is the regular lifetime case. The object is destroyed
587 // when the last strong reference goes away. Since weakref_impl
588 // outlives the object, it is not destroyed in the dtor, and
589 // we'll have to do it here.
590 if (impl->mStrong.load(std::memory_order_relaxed)
591 == INITIAL_STRONG_VALUE) {
592 // Decrementing a weak count to zero when object never had a strong
593 // reference. We assume it acquired a weak reference early, e.g.
594 // in the constructor, and will eventually be properly destroyed,
595 // usually via incrementing and decrementing the strong count.
596 // Thus we no longer do anything here. We log this case, since it
597 // seems to be extremely rare, and should not normally occur. We
598 // used to deallocate mBase here, so this may now indicate a leak.
599 ALOGW("RefBase: Object at %p lost last weak reference "
600 "before it had a strong reference", impl->mBase);
601 } else {
602 // ALOGV("Freeing refs %p of old RefBase %p\n", this, impl->mBase);
603 delete impl;
604 }
605 } else {
606 // This is the OBJECT_LIFETIME_WEAK case. The last weak-reference
607 // is gone, we can destroy the object.
608 impl->mBase->onLastWeakRef(id);
609 delete impl->mBase;
610 }
611 }
612
attemptIncStrong(const void * id)613 bool RefBase::weakref_type::attemptIncStrong(const void* id)
614 {
615 incWeak(id);
616
617 weakref_impl* const impl = static_cast<weakref_impl*>(this);
618 int32_t curCount = impl->mStrong.load(std::memory_order_relaxed);
619
620 ALOG_ASSERT(curCount >= 0,
621 "attemptIncStrong called on %p after underflow", this);
622
623 while (curCount > 0 && curCount != INITIAL_STRONG_VALUE) {
624 // we're in the easy/common case of promoting a weak-reference
625 // from an existing strong reference.
626 if (impl->mStrong.compare_exchange_weak(curCount, curCount+1,
627 std::memory_order_relaxed)) {
628 break;
629 }
630 // the strong count has changed on us, we need to re-assert our
631 // situation. curCount was updated by compare_exchange_weak.
632 }
633
634 if (curCount <= 0 || curCount == INITIAL_STRONG_VALUE) {
635 // we're now in the harder case of either:
636 // - there never was a strong reference on us
637 // - or, all strong references have been released
638 int32_t flags = impl->mFlags.load(std::memory_order_relaxed);
639 if ((flags&OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_STRONG) {
640 // this object has a "normal" life-time, i.e.: it gets destroyed
641 // when the last strong reference goes away
642 if (curCount <= 0) {
643 // the last strong-reference got released, the object cannot
644 // be revived.
645 decWeak(id);
646 return false;
647 }
648
649 // here, curCount == INITIAL_STRONG_VALUE, which means
650 // there never was a strong-reference, so we can try to
651 // promote this object; we need to do that atomically.
652 while (curCount > 0) {
653 if (impl->mStrong.compare_exchange_weak(curCount, curCount+1,
654 std::memory_order_relaxed)) {
655 break;
656 }
657 // the strong count has changed on us, we need to re-assert our
658 // situation (e.g.: another thread has inc/decStrong'ed us)
659 // curCount has been updated.
660 }
661
662 if (curCount <= 0) {
663 // promote() failed, some other thread destroyed us in the
664 // meantime (i.e.: strong count reached zero).
665 decWeak(id);
666 return false;
667 }
668 } else {
669 // this object has an "extended" life-time, i.e.: it can be
670 // revived from a weak-reference only.
671 // Ask the object's implementation if it agrees to be revived
672 if (!impl->mBase->onIncStrongAttempted(FIRST_INC_STRONG, id)) {
673 // it didn't so give-up.
674 decWeak(id);
675 return false;
676 }
677 // grab a strong-reference, which is always safe due to the
678 // extended life-time.
679 curCount = impl->mStrong.fetch_add(1, std::memory_order_relaxed);
680 // If the strong reference count has already been incremented by
681 // someone else, the implementor of onIncStrongAttempted() is holding
682 // an unneeded reference. So call onLastStrongRef() here to remove it.
683 // (No, this is not pretty.) Note that we MUST NOT do this if we
684 // are in fact acquiring the first reference.
685 if (curCount != 0 && curCount != INITIAL_STRONG_VALUE) {
686 impl->mBase->onLastStrongRef(id);
687 }
688 }
689 }
690
691 impl->addStrongRef(id);
692
693 #if PRINT_REFS
694 ALOGD("attemptIncStrong of %p from %p: cnt=%d\n", this, id, curCount);
695 #endif
696
697 // curCount is the value of mStrong before we incremented it.
698 // Now we need to fix-up the count if it was INITIAL_STRONG_VALUE.
699 // This must be done safely, i.e.: handle the case where several threads
700 // were here in attemptIncStrong().
701 // curCount > INITIAL_STRONG_VALUE is OK, and can happen if we're doing
702 // this in the middle of another incStrong. The subtraction is handled
703 // by the thread that started with INITIAL_STRONG_VALUE.
704 if (curCount == INITIAL_STRONG_VALUE) {
705 impl->mStrong.fetch_sub(INITIAL_STRONG_VALUE,
706 std::memory_order_relaxed);
707 }
708
709 return true;
710 }
711
attemptIncWeak(const void * id)712 bool RefBase::weakref_type::attemptIncWeak(const void* id)
713 {
714 weakref_impl* const impl = static_cast<weakref_impl*>(this);
715
716 int32_t curCount = impl->mWeak.load(std::memory_order_relaxed);
717 ALOG_ASSERT(curCount >= 0, "attemptIncWeak called on %p after underflow",
718 this);
719 while (curCount > 0) {
720 if (impl->mWeak.compare_exchange_weak(curCount, curCount+1,
721 std::memory_order_relaxed)) {
722 break;
723 }
724 // curCount has been updated.
725 }
726
727 if (curCount > 0) {
728 impl->addWeakRef(id);
729 }
730
731 return curCount > 0;
732 }
733
getWeakCount() const734 int32_t RefBase::weakref_type::getWeakCount() const
735 {
736 // Debug only!
737 return static_cast<const weakref_impl*>(this)->mWeak
738 .load(std::memory_order_relaxed);
739 }
740
printRefs() const741 void RefBase::weakref_type::printRefs() const
742 {
743 static_cast<const weakref_impl*>(this)->printRefs();
744 }
745
trackMe(bool enable,bool retain)746 void RefBase::weakref_type::trackMe(bool enable, bool retain)
747 {
748 static_cast<weakref_impl*>(this)->trackMe(enable, retain);
749 }
750
createWeak(const void * id) const751 RefBase::weakref_type* RefBase::createWeak(const void* id) const
752 {
753 mRefs->incWeak(id);
754 return mRefs;
755 }
756
getWeakRefs() const757 RefBase::weakref_type* RefBase::getWeakRefs() const
758 {
759 return mRefs;
760 }
761
RefBase()762 RefBase::RefBase()
763 : mRefs(new weakref_impl(this))
764 {
765 }
766
~RefBase()767 RefBase::~RefBase()
768 {
769 int32_t flags = mRefs->mFlags.load(std::memory_order_relaxed);
770 // Life-time of this object is extended to WEAK, in
771 // which case weakref_impl doesn't out-live the object and we
772 // can free it now.
773 if ((flags & OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_WEAK) {
774 // It's possible that the weak count is not 0 if the object
775 // re-acquired a weak reference in its destructor
776 if (mRefs->mWeak.load(std::memory_order_relaxed) == 0) {
777 delete mRefs;
778 }
779 } else {
780 int32_t strongs = mRefs->mStrong.load(std::memory_order_relaxed);
781
782 if (strongs == INITIAL_STRONG_VALUE) {
783 // We never acquired a strong reference on this object.
784
785 // It would be nice to make this fatal, but many places use RefBase on the stack.
786 // However, this is dangerous because it's also common for code to use the
787 // sp<T>(T*) constructor, assuming that if the object is around, it is already
788 // owned by an sp<>.
789 ALOGW("RefBase: Explicit destruction, weak count = %d (in %p). Use sp<> to manage this "
790 "object.",
791 mRefs->mWeak.load(), this);
792
793 #if ANDROID_UTILS_CALLSTACK_ENABLED
794 CallStack::logStack(LOG_TAG);
795 #endif
796 } else if (strongs != 0) {
797 LOG_ALWAYS_FATAL("RefBase: object %p with strong count %d deleted. Double owned?", this,
798 strongs);
799 }
800 }
801 // For debugging purposes, clear mRefs. Ineffective against outstanding wp's.
802 const_cast<weakref_impl*&>(mRefs) = nullptr;
803 }
804
extendObjectLifetime(int32_t mode)805 void RefBase::extendObjectLifetime(int32_t mode)
806 {
807 check_not_on_stack(this);
808
809 // Must be happens-before ordered with respect to construction or any
810 // operation that could destroy the object.
811 mRefs->mFlags.fetch_or(mode, std::memory_order_relaxed);
812 }
813
onFirstRef()814 void RefBase::onFirstRef()
815 {
816 }
817
onLastStrongRef(const void *)818 void RefBase::onLastStrongRef(const void* /*id*/)
819 {
820 }
821
onIncStrongAttempted(uint32_t flags,const void *)822 bool RefBase::onIncStrongAttempted(uint32_t flags, const void* /*id*/)
823 {
824 return (flags&FIRST_INC_STRONG) ? true : false;
825 }
826
onLastWeakRef(const void *)827 void RefBase::onLastWeakRef(const void* /*id*/)
828 {
829 }
830
831 // ---------------------------------------------------------------------------
832
833 #if DEBUG_REFS
renameRefs(size_t n,const ReferenceRenamer & renamer)834 void RefBase::renameRefs(size_t n, const ReferenceRenamer& renamer) {
835 for (size_t i=0 ; i<n ; i++) {
836 renamer(i);
837 }
838 }
839 #else
renameRefs(size_t,const ReferenceRenamer &)840 void RefBase::renameRefs(size_t /*n*/, const ReferenceRenamer& /*renamer*/) { }
841 #endif
842
renameRefId(weakref_type * ref,const void * old_id,const void * new_id)843 void RefBase::renameRefId(weakref_type* ref,
844 const void* old_id, const void* new_id) {
845 weakref_impl* const impl = static_cast<weakref_impl*>(ref);
846 impl->renameStrongRefId(old_id, new_id);
847 impl->renameWeakRefId(old_id, new_id);
848 }
849
renameRefId(RefBase * ref,const void * old_id,const void * new_id)850 void RefBase::renameRefId(RefBase* ref,
851 const void* old_id, const void* new_id) {
852 ref->mRefs->renameStrongRefId(old_id, new_id);
853 ref->mRefs->renameWeakRefId(old_id, new_id);
854 }
855
856 }; // namespace android
857