bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2012 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #ifndef SkWeakRefCnt_DEFINED |
| 9 | #define SkWeakRefCnt_DEFINED |
| 10 | |
Mike Klein | c0bd9f9 | 2019-04-23 12:05:21 -0500 | [diff] [blame] | 11 | #include "include/core/SkRefCnt.h" |
Kevin Lubick | bfc7c3a | 2022-11-29 11:58:51 -0500 | [diff] [blame] | 12 | #include "include/core/SkTypes.h" |
| 13 | |
bungeman | 2c4bd07 | 2016-04-08 06:58:51 -0700 | [diff] [blame] | 14 | #include <atomic> |
Kevin Lubick | bfc7c3a | 2022-11-29 11:58:51 -0500 | [diff] [blame] | 15 | #include <cstdint> |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 16 | |
| 17 | /** \class SkWeakRefCnt |
| 18 | |
| 19 | SkWeakRefCnt is the base class for objects that may be shared by multiple |
| 20 | objects. When an existing strong owner wants to share a reference, it calls |
| 21 | ref(). When a strong owner wants to release its reference, it calls |
| 22 | unref(). When the shared object's strong reference count goes to zero as |
| 23 | the result of an unref() call, its (virtual) weak_dispose method is called. |
| 24 | It is an error for the destructor to be called explicitly (or via the |
| 25 | object going out of scope on the stack or calling delete) if |
| 26 | getRefCnt() > 1. |
| 27 | |
| 28 | In addition to strong ownership, an owner may instead obtain a weak |
bungeman@google.com | 1ad75a1 | 2013-12-02 19:12:04 +0000 | [diff] [blame] | 29 | reference by calling weak_ref(). A call to weak_ref() must be balanced by a |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 30 | call to weak_unref(). To obtain a strong reference from a weak reference, |
| 31 | call try_ref(). If try_ref() returns true, the owner's pointer is now also |
| 32 | a strong reference on which unref() must be called. Note that this does not |
| 33 | affect the original weak reference, weak_unref() must still be called. When |
| 34 | the weak reference count goes to zero, the object is deleted. While the |
| 35 | weak reference count is positive and the strong reference count is zero the |
| 36 | object still exists, but will be in the disposed state. It is up to the |
| 37 | object to define what this means. |
| 38 | |
| 39 | Note that a strong reference implicitly implies a weak reference. As a |
| 40 | result, it is allowable for the owner of a strong ref to call try_ref(). |
| 41 | This will have the same effect as calling ref(), but may be more expensive. |
| 42 | |
| 43 | Example: |
| 44 | |
| 45 | SkWeakRefCnt myRef = strongRef.weak_ref(); |
| 46 | ... // strongRef.unref() may or may not be called |
| 47 | if (myRef.try_ref()) { |
| 48 | ... // use myRef |
| 49 | myRef.unref(); |
| 50 | } else { |
| 51 | // myRef is in the disposed state |
| 52 | } |
| 53 | myRef.weak_unref(); |
| 54 | */ |
| 55 | class SK_API SkWeakRefCnt : public SkRefCnt { |
| 56 | public: |
| 57 | /** Default construct, initializing the reference counts to 1. |
| 58 | The strong references collectively hold one weak reference. When the |
| 59 | strong reference count goes to zero, the collectively held weak |
| 60 | reference is released. |
| 61 | */ |
| 62 | SkWeakRefCnt() : SkRefCnt(), fWeakCnt(1) {} |
| 63 | |
| 64 | /** Destruct, asserting that the weak reference count is 1. |
| 65 | */ |
Brian Salomon | d3b6597 | 2017-03-22 12:05:03 -0400 | [diff] [blame] | 66 | ~SkWeakRefCnt() override { |
Mike Klein | 874a62a | 2014-07-09 09:04:07 -0400 | [diff] [blame] | 67 | #ifdef SK_DEBUG |
bungeman | 2c4bd07 | 2016-04-08 06:58:51 -0700 | [diff] [blame] | 68 | SkASSERT(getWeakCnt() == 1); |
| 69 | fWeakCnt.store(0, std::memory_order_relaxed); |
Mike Klein | 874a62a | 2014-07-09 09:04:07 -0400 | [diff] [blame] | 70 | #endif |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 71 | } |
| 72 | |
Mike Klein | 874a62a | 2014-07-09 09:04:07 -0400 | [diff] [blame] | 73 | #ifdef SK_DEBUG |
bungeman | 2c4bd07 | 2016-04-08 06:58:51 -0700 | [diff] [blame] | 74 | /** Return the weak reference count. */ |
| 75 | int32_t getWeakCnt() const { |
| 76 | return fWeakCnt.load(std::memory_order_relaxed); |
| 77 | } |
robertphillips@google.com | 0308707 | 2013-10-02 16:42:21 +0000 | [diff] [blame] | 78 | #endif |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 79 | |
bungeman | 2c4bd07 | 2016-04-08 06:58:51 -0700 | [diff] [blame] | 80 | private: |
| 81 | /** If fRefCnt is 0, returns 0. |
| 82 | * Otherwise increments fRefCnt, acquires, and returns the old value. |
| 83 | */ |
| 84 | int32_t atomic_conditional_acquire_strong_ref() const { |
| 85 | int32_t prev = fRefCnt.load(std::memory_order_relaxed); |
| 86 | do { |
| 87 | if (0 == prev) { |
| 88 | break; |
| 89 | } |
| 90 | } while(!fRefCnt.compare_exchange_weak(prev, prev+1, std::memory_order_acquire, |
| 91 | std::memory_order_relaxed)); |
| 92 | return prev; |
| 93 | } |
| 94 | |
| 95 | public: |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 96 | /** Creates a strong reference from a weak reference, if possible. The |
| 97 | caller must already be an owner. If try_ref() returns true the owner |
| 98 | is in posession of an additional strong reference. Both the original |
| 99 | reference and new reference must be properly unreferenced. If try_ref() |
| 100 | returns false, no strong reference could be created and the owner's |
| 101 | reference is in the same state as before the call. |
| 102 | */ |
| 103 | bool SK_WARN_UNUSED_RESULT try_ref() const { |
bungeman | 2c4bd07 | 2016-04-08 06:58:51 -0700 | [diff] [blame] | 104 | if (atomic_conditional_acquire_strong_ref() != 0) { |
bungeman@google.com | d9947f6 | 2013-12-18 15:27:39 +0000 | [diff] [blame] | 105 | // Acquire barrier (L/SL), if not provided above. |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 106 | // Prevents subsequent code from happening before the increment. |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 107 | return true; |
| 108 | } |
| 109 | return false; |
| 110 | } |
| 111 | |
| 112 | /** Increment the weak reference count. Must be balanced by a call to |
| 113 | weak_unref(). |
| 114 | */ |
| 115 | void weak_ref() const { |
bungeman | 2c4bd07 | 2016-04-08 06:58:51 -0700 | [diff] [blame] | 116 | SkASSERT(getRefCnt() > 0); |
| 117 | SkASSERT(getWeakCnt() > 0); |
| 118 | // No barrier required. |
| 119 | (void)fWeakCnt.fetch_add(+1, std::memory_order_relaxed); |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 120 | } |
| 121 | |
| 122 | /** Decrement the weak reference count. If the weak reference count is 1 |
| 123 | before the decrement, then call delete on the object. Note that if this |
| 124 | is the case, then the object needs to have been allocated via new, and |
| 125 | not on the stack. |
| 126 | */ |
| 127 | void weak_unref() const { |
bungeman | 2c4bd07 | 2016-04-08 06:58:51 -0700 | [diff] [blame] | 128 | SkASSERT(getWeakCnt() > 0); |
| 129 | // A release here acts in place of all releases we "should" have been doing in ref(). |
| 130 | if (1 == fWeakCnt.fetch_add(-1, std::memory_order_acq_rel)) { |
| 131 | // Like try_ref(), the acquire is only needed on success, to make sure |
| 132 | // code in internal_dispose() doesn't happen before the decrement. |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 133 | #ifdef SK_DEBUG |
| 134 | // so our destructor won't complain |
bungeman | 2c4bd07 | 2016-04-08 06:58:51 -0700 | [diff] [blame] | 135 | fWeakCnt.store(1, std::memory_order_relaxed); |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 136 | #endif |
bungeman@google.com | 1ad75a1 | 2013-12-02 19:12:04 +0000 | [diff] [blame] | 137 | this->INHERITED::internal_dispose(); |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 138 | } |
| 139 | } |
| 140 | |
| 141 | /** Returns true if there are no strong references to the object. When this |
| 142 | is the case all future calls to try_ref() will return false. |
| 143 | */ |
| 144 | bool weak_expired() const { |
bungeman | 2c4bd07 | 2016-04-08 06:58:51 -0700 | [diff] [blame] | 145 | return fRefCnt.load(std::memory_order_relaxed) == 0; |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 146 | } |
| 147 | |
| 148 | protected: |
| 149 | /** Called when the strong reference count goes to zero. This allows the |
| 150 | object to free any resources it may be holding. Weak references may |
| 151 | still exist and their level of allowed access to the object is defined |
| 152 | by the object's class. |
| 153 | */ |
| 154 | virtual void weak_dispose() const { |
| 155 | } |
| 156 | |
| 157 | private: |
| 158 | /** Called when the strong reference count goes to zero. Calls weak_dispose |
| 159 | on the object and releases the implicit weak reference held |
| 160 | collectively by the strong references. |
| 161 | */ |
mtklein | 36352bf | 2015-03-25 18:17:31 -0700 | [diff] [blame] | 162 | void internal_dispose() const override { |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 163 | weak_dispose(); |
| 164 | weak_unref(); |
| 165 | } |
| 166 | |
| 167 | /* Invariant: fWeakCnt = #weak + (fRefCnt > 0 ? 1 : 0) */ |
bungeman | 2c4bd07 | 2016-04-08 06:58:51 -0700 | [diff] [blame] | 168 | mutable std::atomic<int32_t> fWeakCnt; |
robertphillips@google.com | 15e9d3e | 2012-06-21 20:25:03 +0000 | [diff] [blame] | 169 | |
John Stiles | 7571f9e | 2020-09-02 22:42:33 -0400 | [diff] [blame] | 170 | using INHERITED = SkRefCnt; |
bungeman@google.com | a02bc15 | 2012-05-16 18:21:56 +0000 | [diff] [blame] | 171 | }; |
| 172 | |
| 173 | #endif |