Add explicit GrSurfaceProxy flag to skip explicit resource allocation

This approach eliminates a lot of edge cases where ops (e.g., the SmallPathOp) are treating their proxies in a special manner (i.e., just holding a raw ref and never adding pendingIO). Given that the atlas managers are managing the lifetime of the proxies there is no reason for the GrResourceAllocator to be aware of them.

Pulled out of:

https://skia-review.googlesource.com/c/skia/+/208227 (Implement alternate method for determining recycle-ability of allocated GrSurfaces)

Change-Id: Ia6bec5e8f5d5bc63e86ae011bcc3f8e061c066b2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/209400
Reviewed-by: Brian Salomon <bsalomon@google.com>
Commit-Queue: Robert Phillips <robertphillips@google.com>
diff --git a/include/private/GrSurfaceProxy.h b/include/private/GrSurfaceProxy.h
index dc974b4..f9f991d 100644
--- a/include/private/GrSurfaceProxy.h
+++ b/include/private/GrSurfaceProxy.h
@@ -183,9 +183,8 @@
         }
     }
 
-    int32_t internalGetProxyRefCnt() const {
-        return fRefCnt;
-    }
+    int32_t internalGetProxyRefCnt() const { return fRefCnt; }
+    int32_t internalGetTotalRefs() const { return fRefCnt + fPendingReads + fPendingWrites; }
 
     // For deferred proxies this will be null. For wrapped proxies it will point to the
     // wrapped resource.
@@ -493,9 +492,11 @@
     friend class GrSurfaceProxyPriv;
 
     // Methods made available via GrSurfaceProxyPriv
-    int32_t getProxyRefCnt() const {
-        return this->internalGetProxyRefCnt();
-    }
+    bool ignoredByResourceAllocator() const { return fIgnoredByResourceAllocator; }
+    void setIgnoredByResourceAllocator() { fIgnoredByResourceAllocator = true; }
+
+    int32_t getProxyRefCnt() const { return this->internalGetProxyRefCnt(); }
+    int32_t getTotalRefs() const { return this->internalGetTotalRefs(); }
 
     void computeScratchKey(GrScratchKey*) const;
 
@@ -551,6 +552,7 @@
     virtual size_t onUninstantiatedGpuMemorySize() const = 0;
 
     bool                   fNeedsClear;
+    bool                   fIgnoredByResourceAllocator = false;
 
     // This entry is lazily evaluated so, when the proxy wraps a resource, the resource
     // will be called but, when the proxy is deferred, it will compute the answer itself.
diff --git a/src/gpu/GrDrawOpAtlas.cpp b/src/gpu/GrDrawOpAtlas.cpp
index 0d9cd80..4f7e246 100644
--- a/src/gpu/GrDrawOpAtlas.cpp
+++ b/src/gpu/GrDrawOpAtlas.cpp
@@ -536,6 +536,8 @@
             return false;
         }
 
+        fProxies[i]->priv().setIgnoredByResourceAllocator();
+
         // set up allocated plots
         fPages[i].fPlotArray.reset(new sk_sp<Plot>[ numPlotsX * numPlotsY ]);
 
diff --git a/src/gpu/GrOnFlushResourceProvider.cpp b/src/gpu/GrOnFlushResourceProvider.cpp
index 7349901..e2ea013 100644
--- a/src/gpu/GrOnFlushResourceProvider.cpp
+++ b/src/gpu/GrOnFlushResourceProvider.cpp
@@ -64,6 +64,7 @@
 }
 
 bool GrOnFlushResourceProvider::instatiateProxy(GrSurfaceProxy* proxy) {
+    SkASSERT(proxy->priv().ignoredByResourceAllocator());
     SkASSERT(proxy->priv().requiresNoPendingIO());
 
     // TODO: this class should probably just get a GrDirectContext
diff --git a/src/gpu/GrResourceAllocator.cpp b/src/gpu/GrResourceAllocator.cpp
index 396372c..a94be6e 100644
--- a/src/gpu/GrResourceAllocator.cpp
+++ b/src/gpu/GrResourceAllocator.cpp
@@ -57,6 +57,12 @@
 
 void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end
                                       SkDEBUGCODE(, bool isDirectDstRead)) {
+    if (proxy->canSkipResourceAllocator()) {
+        return;
+    }
+
+    SkASSERT(!proxy->priv().ignoredByResourceAllocator());
+
     SkASSERT(start <= end);
     SkASSERT(!fAssigned);      // We shouldn't be adding any intervals after (or during) assignment
 
@@ -341,17 +347,22 @@
     SkASSERT(outError);
     *outError = AssignError::kNoError;
 
-    fIntvlHash.reset(); // we don't need the interval hash anymore
-    if (fIntvlList.empty()) {
-        return false;          // nothing to render
-    }
-
-    SkASSERT(fCurOpListIndex < fNumOpLists);
     SkASSERT(fNumOpLists == fEndOfOpListOpIndices.count());
 
+    fIntvlHash.reset(); // we don't need the interval hash anymore
+
+    if (fCurOpListIndex >= fEndOfOpListOpIndices.count()) {
+        return false; // nothing to render
+    }
+
     *startIndex = fCurOpListIndex;
     *stopIndex = fEndOfOpListOpIndices.count();
 
+    if (fIntvlList.empty()) {
+        fCurOpListIndex = fEndOfOpListOpIndices.count();
+        return true;          // no resources to assign
+    }
+
 #if GR_ALLOCATION_SPEW
     SkDebugf("assigning opLists %d through %d out of %d numOpLists\n",
              *startIndex, *stopIndex, fNumOpLists);
diff --git a/src/gpu/GrSurfaceProxy.cpp b/src/gpu/GrSurfaceProxy.cpp
index e9f14cb..7a59b0e 100644
--- a/src/gpu/GrSurfaceProxy.cpp
+++ b/src/gpu/GrSurfaceProxy.cpp
@@ -189,6 +189,11 @@
 }
 
 bool GrSurfaceProxy::canSkipResourceAllocator() const {
+    if (this->ignoredByResourceAllocator()) {
+        // Usually an atlas or onFlush proxy
+        return true;
+    }
+
     auto peek = this->peekSurface();
     if (!peek) {
         return false;
diff --git a/src/gpu/GrSurfaceProxyPriv.h b/src/gpu/GrSurfaceProxyPriv.h
index ef8fe19..34ca7a1 100644
--- a/src/gpu/GrSurfaceProxyPriv.h
+++ b/src/gpu/GrSurfaceProxyPriv.h
@@ -23,6 +23,8 @@
     // depends on the read and write refs (So this method can validly return 0).
     int32_t getProxyRefCnt() const { return fProxy->getProxyRefCnt(); }
 
+    int32_t getTotalRefs() const { return fProxy->getTotalRefs(); }
+
     void computeScratchKey(GrScratchKey* key) const { return fProxy->computeScratchKey(key); }
 
     // Create a GrSurface-derived class that meets the requirements (i.e, desc, renderability)
@@ -58,6 +60,9 @@
     static bool SK_WARN_UNUSED_RESULT AttachStencilIfNeeded(GrResourceProvider*, GrSurface*,
                                                             bool needsStencil);
 
+    bool ignoredByResourceAllocator() const { return fProxy->ignoredByResourceAllocator(); }
+    void setIgnoredByResourceAllocator() { fProxy->setIgnoredByResourceAllocator(); }
+
 private:
     explicit GrSurfaceProxyPriv(GrSurfaceProxy* proxy) : fProxy(proxy) {}
     GrSurfaceProxyPriv(const GrSurfaceProxyPriv&) {} // unimpl
diff --git a/src/gpu/ccpr/GrCCAtlas.cpp b/src/gpu/ccpr/GrCCAtlas.cpp
index 417bf96..4e9e4e3 100644
--- a/src/gpu/ccpr/GrCCAtlas.cpp
+++ b/src/gpu/ccpr/GrCCAtlas.cpp
@@ -97,6 +97,8 @@
                     return GrSurfaceProxy::LazyInstantiationResult(fBackingTexture);
             },
             format, GrProxyProvider::Renderable::kYes, kTextureOrigin, pixelConfig, caps);
+
+    fTextureProxy->priv().setIgnoredByResourceAllocator();
 }
 
 GrCCAtlas::~GrCCAtlas() {
diff --git a/src/gpu/ops/GrTextureOp.cpp b/src/gpu/ops/GrTextureOp.cpp
index daa2db1..554dfb8 100644
--- a/src/gpu/ops/GrTextureOp.cpp
+++ b/src/gpu/ops/GrTextureOp.cpp
@@ -196,9 +196,6 @@
     const char* name() const override { return "TextureOp"; }
 
     void visitProxies(const VisitProxyFunc& func, VisitorType visitor) const override {
-        if (visitor == VisitorType::kAllocatorGather && fCanSkipAllocatorGather) {
-            return;
-        }
         for (unsigned p = 0; p < fProxyCnt; ++p) {
             func(fProxies[p].fProxy);
         }
@@ -300,8 +297,6 @@
         auto bounds = dstQuad.bounds(dstQuadType);
         this->setBounds(bounds, HasAABloat(aaType == GrAAType::kCoverage), IsZeroArea::kNo);
         fDomain = static_cast<unsigned>(domain);
-        fCanSkipAllocatorGather =
-                static_cast<unsigned>(fProxies[0].fProxy->canSkipResourceAllocator());
     }
     TextureOp(const GrRenderTargetContext::TextureSetEntry set[], int cnt,
               GrSamplerState::Filter filter, GrAAType aaType,
@@ -315,7 +310,6 @@
         SkRect bounds = SkRectPriv::MakeLargestInverted();
         GrAAType overallAAType = GrAAType::kNone; // aa type maximally compatible with all dst rects
         bool mustFilter = false;
-        fCanSkipAllocatorGather = static_cast<unsigned>(true);
         // Most dst rects are transformed by the same view matrix, so their quad types start
         // identical, unless an entry provides a dstClip or additional transform that changes it.
         // The quad list will automatically adapt to that.
@@ -327,9 +321,6 @@
             fProxies[p].fQuadCnt = 1;
             SkASSERT(fProxies[p].fProxy->textureType() == fProxies[0].fProxy->textureType());
             SkASSERT(fProxies[p].fProxy->config() == fProxies[0].fProxy->config());
-            if (!fProxies[p].fProxy->canSkipResourceAllocator()) {
-                fCanSkipAllocatorGather = static_cast<unsigned>(false);
-            }
 
             SkMatrix ctm = viewMatrix;
             if (set[p].fPreViewMatrix) {
@@ -677,8 +668,7 @@
     GR_STATIC_ASSERT(GrQuadPerEdgeAA::kColorTypeCount <= 4);
     // Used to track whether fProxy is ref'ed or has a pending IO after finalize() is called.
     unsigned fFinalized : 1;
-    unsigned fCanSkipAllocatorGather : 1;
-    unsigned fProxyCnt : 32 - 9;
+    unsigned fProxyCnt : 32 - 8;
     Proxy fProxies[1];
 
     static_assert(kGrQuadTypeCount <= 4, "GrQuadType does not fit in 2 bits");
diff --git a/tests/OnFlushCallbackTest.cpp b/tests/OnFlushCallbackTest.cpp
index 54982bc..c560d6c 100644
--- a/tests/OnFlushCallbackTest.cpp
+++ b/tests/OnFlushCallbackTest.cpp
@@ -323,6 +323,8 @@
                 kBottomLeft_GrSurfaceOrigin,
                 kRGBA_8888_GrPixelConfig,
                 *proxyProvider->caps());
+
+        fAtlasProxy->priv().setIgnoredByResourceAllocator();
         return fAtlasProxy;
     }