Implement alternate method for determining recycle-ability of allocated GrSurfaces (take 2)
With the upcoming removal of pendingIO refs the GrResourceAllocator needs a new means of determining when a backing GrSurface can be recycled and when it needs to be left assigned to a GrSurfaceProxy.
Change-Id: I2327b0f15ceb639b400a55a9c53359a4b43288c6
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/210041
Reviewed-by: Brian Salomon <bsalomon@google.com>
Commit-Queue: Robert Phillips <robertphillips@google.com>
diff --git a/src/gpu/GrDrawingManager.cpp b/src/gpu/GrDrawingManager.cpp
index d887e1a..b7d4d31 100644
--- a/src/gpu/GrDrawingManager.cpp
+++ b/src/gpu/GrDrawingManager.cpp
@@ -314,6 +314,7 @@
}
alloc.markEndOfOpList(i);
}
+ alloc.determineRecyclability();
GrResourceAllocator::AssignError error = GrResourceAllocator::AssignError::kNoError;
int numOpListsExecuted = 0;
diff --git a/src/gpu/GrProcessorSet.cpp b/src/gpu/GrProcessorSet.cpp
index 94dd240..3a1458f 100644
--- a/src/gpu/GrProcessorSet.cpp
+++ b/src/gpu/GrProcessorSet.cpp
@@ -190,6 +190,7 @@
hasCoverageFP = hasCoverageFP || clip->numClipCoverageFragmentProcessors();
for (int i = 0; i < clip->numClipCoverageFragmentProcessors(); ++i) {
const GrFragmentProcessor* clipFP = clip->clipCoverageFragmentProcessor(i);
+ clipFP->markPendingExecution();
analysis.fCompatibleWithCoverageAsAlpha &= clipFP->compatibleWithCoverageAsAlpha();
coverageUsesLocalCoords |= clipFP->usesLocalCoords();
}
diff --git a/src/gpu/GrRenderTargetOpList.cpp b/src/gpu/GrRenderTargetOpList.cpp
index e652902..f586293 100644
--- a/src/gpu/GrRenderTargetOpList.cpp
+++ b/src/gpu/GrRenderTargetOpList.cpp
@@ -633,24 +633,27 @@
// they can be recycled. This is a bit unfortunate because a flush can proceed in waves
// with sub-flushes. The deferred proxies only need to be pinned from the start of
// the sub-flush in which they appear.
- alloc->addInterval(fDeferredProxies[i], 0, 0);
+ alloc->addInterval(fDeferredProxies[i], 0, 0, GrResourceAllocator::ActualUse::kNo);
}
// Add the interval for all the writes to this opList's target
if (fOpChains.count()) {
unsigned int cur = alloc->curOp();
- alloc->addInterval(fTarget.get(), cur, cur + fOpChains.count() - 1);
+ alloc->addInterval(fTarget.get(), cur, cur + fOpChains.count() - 1,
+ GrResourceAllocator::ActualUse::kYes);
} else {
// This can happen if there is a loadOp (e.g., a clear) but no other draws. In this case we
// still need to add an interval for the destination so we create a fake op# for
// the missing clear op.
- alloc->addInterval(fTarget.get(), alloc->curOp(), alloc->curOp());
+ alloc->addInterval(fTarget.get(), alloc->curOp(), alloc->curOp(),
+ GrResourceAllocator::ActualUse::kYes);
alloc->incOps();
}
auto gather = [ alloc SkDEBUGCODE(, this) ] (GrSurfaceProxy* p) {
- alloc->addInterval(p, alloc->curOp(), alloc->curOp() SkDEBUGCODE(, fTarget.get() == p));
+ alloc->addInterval(p, alloc->curOp(), alloc->curOp(), GrResourceAllocator::ActualUse::kYes
+ SkDEBUGCODE(, fTarget.get() == p));
};
for (const OpChain& recordedOp : fOpChains) {
// only diff from the GrTextureOpList version
diff --git a/src/gpu/GrResourceAllocator.cpp b/src/gpu/GrResourceAllocator.cpp
index ff09c31..f8706eb 100644
--- a/src/gpu/GrResourceAllocator.cpp
+++ b/src/gpu/GrResourceAllocator.cpp
@@ -37,6 +37,22 @@
fProxy->priv().assign(std::move(s));
}
+void GrResourceAllocator::determineRecyclability() {
+ for (Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
+ if (cur->proxy()->canSkipResourceAllocator()) {
+ // These types of proxies can slip in here if they require a stencil buffer
+ continue;
+ }
+
+ if (cur->uses() >= cur->proxy()->priv().getTotalRefs()) {
+ // All the refs on the proxy are known to the resource allocator thus no one
+ // should be holding onto it outside of Ganesh.
+ SkASSERT(cur->uses() == cur->proxy()->priv().getTotalRefs());
+ cur->markAsRecyclable();
+ }
+ }
+}
+
void GrResourceAllocator::markEndOfOpList(int opListIndex) {
SkASSERT(!fAssigned); // We shouldn't be adding any opLists after (or during) assignment
@@ -55,7 +71,8 @@
SkASSERT(!fIntvlHash.count());
}
-void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end
+void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end,
+ ActualUse actualUse
SkDEBUGCODE(, bool isDirectDstRead)) {
bool needsStencil = proxy->asRenderTargetProxy()
@@ -100,6 +117,9 @@
SkASSERT(intvl->end() <= start && intvl->end() <= end);
}
#endif
+ if (ActualUse::kYes == actualUse) {
+ intvl->addUse();
+ }
intvl->extendEnd(end);
return;
}
@@ -113,6 +133,9 @@
newIntvl = fIntervalAllocator.make<Interval>(proxy, start, end);
}
+ if (ActualUse::kYes == actualUse) {
+ newIntvl->addUse();
+ }
fIntvlList.insertByIncreasingStart(newIntvl);
fIntvlHash.add(newIntvl);
}
@@ -309,10 +332,7 @@
if (temp->wasAssignedSurface()) {
sk_sp<GrSurface> surface = temp->detachSurface();
- // If the proxy has an actual live ref on it that means someone wants to retain its
- // contents. In that case we cannot recycle it (until the external holder lets
- // go of it).
- if (0 == temp->proxy()->priv().getProxyRefCnt()) {
+ if (temp->isRecyclable()) {
this->recycleSurface(std::move(surface));
}
}
diff --git a/src/gpu/GrResourceAllocator.h b/src/gpu/GrResourceAllocator.h
index 7958eca..eded88a 100644
--- a/src/gpu/GrResourceAllocator.h
+++ b/src/gpu/GrResourceAllocator.h
@@ -55,9 +55,19 @@
unsigned int curOp() const { return fNumOps; }
void incOps() { fNumOps++; }
+ /** Indicates whether a given call to addInterval represents an actual usage of the
+ * provided proxy. This is mainly here to accomodate deferred proxies attached to opLists.
+ * In that case we need to create an extra long interval for them (due to the upload) but
+ * don't want to count that usage/reference towards the proxy's recyclability.
+ */
+ enum class ActualUse : bool {
+ kNo = false,
+ kYes = true
+ };
+
// Add a usage interval from 'start' to 'end' inclusive. This is usually used for renderTargets.
// If an existing interval already exists it will be expanded to include the new range.
- void addInterval(GrSurfaceProxy*, unsigned int start, unsigned int end
+ void addInterval(GrSurfaceProxy*, unsigned int start, unsigned int end, ActualUse actualUse
SkDEBUGCODE(, bool isDirectDstRead = false));
enum class AssignError {
@@ -73,6 +83,7 @@
// amount of GPU resources required.
bool assign(int* startIndex, int* stopIndex, AssignError* outError);
+ void determineRecyclability();
void markEndOfOpList(int opListIndex);
#if GR_ALLOCATION_SPEW
@@ -125,6 +136,7 @@
SkASSERT(proxy);
SkASSERT(!fProxy && !fNext);
+ fUses = 0;
fProxy = proxy;
fProxyID = proxy->uniqueID().asUInt();
fStart = start;
@@ -143,12 +155,19 @@
const GrSurfaceProxy* proxy() const { return fProxy; }
GrSurfaceProxy* proxy() { return fProxy; }
+
unsigned int start() const { return fStart; }
unsigned int end() const { return fEnd; }
+
+ void setNext(Interval* next) { fNext = next; }
const Interval* next() const { return fNext; }
Interval* next() { return fNext; }
- void setNext(Interval* next) { fNext = next; }
+ void markAsRecyclable() { fIsRecyclable = true;}
+ bool isRecyclable() const { return fIsRecyclable; }
+
+ void addUse() { fUses++; }
+ int uses() { return fUses; }
void extendEnd(unsigned int newEnd) {
if (newEnd > fEnd) {
@@ -176,6 +195,8 @@
unsigned int fStart;
unsigned int fEnd;
Interval* fNext;
+ unsigned int fUses = 0;
+ bool fIsRecyclable = false;
#if GR_TRACK_INTERVAL_CREATION
uint32_t fUniqueID;
@@ -197,6 +218,7 @@
return !SkToBool(fHead);
}
const Interval* peekHead() const { return fHead; }
+ Interval* peekHead() { return fHead; }
Interval* popHead();
void insertByIncreasingStart(Interval*);
void insertByIncreasingEnd(Interval*);
diff --git a/src/gpu/GrTextureOpList.cpp b/src/gpu/GrTextureOpList.cpp
index 285bb3c..fcd9196 100644
--- a/src/gpu/GrTextureOpList.cpp
+++ b/src/gpu/GrTextureOpList.cpp
@@ -203,17 +203,20 @@
if (fRecordedOps.count()) {
unsigned int cur = alloc->curOp();
- alloc->addInterval(fTarget.get(), cur, cur+fRecordedOps.count()-1);
+ alloc->addInterval(fTarget.get(), cur, cur+fRecordedOps.count()-1,
+ GrResourceAllocator::ActualUse::kYes);
} else {
// This can happen if there is a loadOp (e.g., a clear) but no other draws. In this case we
// still need to add an interval for the destination so we create a fake op# for
// the missing clear op.
- alloc->addInterval(fTarget.get(), alloc->curOp(), alloc->curOp());
+ alloc->addInterval(fTarget.get(), alloc->curOp(), alloc->curOp(),
+ GrResourceAllocator::ActualUse::kYes);
alloc->incOps();
}
auto gather = [ alloc SkDEBUGCODE(, this) ] (GrSurfaceProxy* p) {
- alloc->addInterval(p, alloc->curOp(), alloc->curOp() SkDEBUGCODE(, p == fTarget.get()));
+ alloc->addInterval(p, alloc->curOp(), alloc->curOp(), GrResourceAllocator::ActualUse::kYes
+ SkDEBUGCODE(, p == fTarget.get()));
};
for (int i = 0; i < fRecordedOps.count(); ++i) {
const GrOp* op = fRecordedOps[i].get(); // only diff from the GrRenderTargetOpList version
diff --git a/src/gpu/ops/GrCopySurfaceOp.cpp b/src/gpu/ops/GrCopySurfaceOp.cpp
index f2a5257..af20b65 100644
--- a/src/gpu/ops/GrCopySurfaceOp.cpp
+++ b/src/gpu/ops/GrCopySurfaceOp.cpp
@@ -83,7 +83,7 @@
GrOpMemoryPool* pool = context->priv().opMemoryPool();
- return pool->allocate<GrCopySurfaceOp>(dstProxy, srcProxy, clippedSrcRect, clippedDstPoint);
+ return pool->allocate<GrCopySurfaceOp>(srcProxy, clippedSrcRect, clippedDstPoint);
}
void GrCopySurfaceOp::onExecute(GrOpFlushState* state, const SkRect& chainBounds) {
diff --git a/src/gpu/ops/GrCopySurfaceOp.h b/src/gpu/ops/GrCopySurfaceOp.h
index eeaa237..dd87bc7 100644
--- a/src/gpu/ops/GrCopySurfaceOp.h
+++ b/src/gpu/ops/GrCopySurfaceOp.h
@@ -43,8 +43,7 @@
private:
friend class GrOpMemoryPool; // for ctor
- GrCopySurfaceOp(GrSurfaceProxy* dst, GrSurfaceProxy* src,
- const SkIRect& srcRect, const SkIPoint& dstPoint)
+ GrCopySurfaceOp(GrSurfaceProxy* src, const SkIRect& srcRect, const SkIPoint& dstPoint)
: INHERITED(ClassID())
, fSrc(src)
, fSrcRect(srcRect)
diff --git a/src/gpu/ops/GrSimpleMeshDrawOpHelper.cpp b/src/gpu/ops/GrSimpleMeshDrawOpHelper.cpp
index 4826b3e..e2b14c9 100644
--- a/src/gpu/ops/GrSimpleMeshDrawOpHelper.cpp
+++ b/src/gpu/ops/GrSimpleMeshDrawOpHelper.cpp
@@ -93,6 +93,13 @@
*geometryColor = overrideColor;
}
} else {
+ if (clip) {
+ for (int i = 0; i < clip->numClipCoverageFragmentProcessors(); ++i) {
+ const GrFragmentProcessor* clipFP = clip->clipCoverageFragmentProcessor(i);
+ clipFP->markPendingExecution();
+ }
+ }
+
analysis = GrProcessorSet::EmptySetAnalysis();
}
fUsesLocalCoords = analysis.usesLocalCoords();
diff --git a/tests/ResourceAllocatorTest.cpp b/tests/ResourceAllocatorTest.cpp
index e3c2564..b40107a 100644
--- a/tests/ResourceAllocatorTest.cpp
+++ b/tests/ResourceAllocatorTest.cpp
@@ -95,12 +95,14 @@
GrDeinstantiateProxyTracker deinstantiateTracker(resourceCache);
GrResourceAllocator alloc(resourceProvider, &deinstantiateTracker SkDEBUGCODE(, 1));
- alloc.addInterval(p1, 0, 4);
+ alloc.addInterval(p1, 0, 4, GrResourceAllocator::ActualUse::kYes);
alloc.incOps();
- alloc.addInterval(p2, 1, 2);
+ alloc.addInterval(p2, 1, 2, GrResourceAllocator::ActualUse::kYes);
alloc.incOps();
alloc.markEndOfOpList(0);
+ alloc.determineRecyclability();
+
int startIndex, stopIndex;
GrResourceAllocator::AssignError error;
alloc.assign(&startIndex, &stopIndex, &error);
@@ -127,10 +129,12 @@
alloc.incOps();
alloc.incOps();
- alloc.addInterval(p1, 0, 2);
- alloc.addInterval(p2, 3, 5);
+ alloc.addInterval(p1, 0, 2, GrResourceAllocator::ActualUse::kYes);
+ alloc.addInterval(p2, 3, 5, GrResourceAllocator::ActualUse::kYes);
alloc.markEndOfOpList(0);
+ alloc.determineRecyclability();
+
int startIndex, stopIndex;
GrResourceAllocator::AssignError error;
alloc.assign(&startIndex, &stopIndex, &error);
@@ -345,12 +349,15 @@
GrDeinstantiateProxyTracker deinstantiateTracker(resourceCache);
{
GrResourceAllocator alloc(resourceProvider, &deinstantiateTracker SkDEBUGCODE(, 1));
- alloc.addInterval(p0.get(), 0, 1);
- alloc.addInterval(p1.get(), 0, 1);
- alloc.addInterval(p2.get(), 0, 1);
- alloc.addInterval(p3.get(), 0, 1);
+ alloc.addInterval(p0.get(), 0, 1, GrResourceAllocator::ActualUse::kNo);
+ alloc.addInterval(p1.get(), 0, 1, GrResourceAllocator::ActualUse::kNo);
+ alloc.addInterval(p2.get(), 0, 1, GrResourceAllocator::ActualUse::kNo);
+ alloc.addInterval(p3.get(), 0, 1, GrResourceAllocator::ActualUse::kNo);
alloc.incOps();
alloc.markEndOfOpList(0);
+
+ alloc.determineRecyclability();
+
int startIndex, stopIndex;
GrResourceAllocator::AssignError error;
alloc.assign(&startIndex, &stopIndex, &error);
@@ -392,21 +399,23 @@
GrDeinstantiateProxyTracker deinstantiateTracker(resourceCache);
GrResourceAllocator alloc(resourceProvider, &deinstantiateTracker SkDEBUGCODE(, 2));
- alloc.addInterval(p1, 0, 0);
+ alloc.addInterval(p1, 0, 0, GrResourceAllocator::ActualUse::kYes);
alloc.incOps();
- alloc.addInterval(p2, 1, 1);
+ alloc.addInterval(p2, 1, 1, GrResourceAllocator::ActualUse::kYes);
alloc.incOps();
alloc.markEndOfOpList(0);
- alloc.addInterval(p3, 2, 2);
+ alloc.addInterval(p3, 2, 2, GrResourceAllocator::ActualUse::kYes);
alloc.incOps();
- alloc.addInterval(p4, 3, 3);
+ alloc.addInterval(p4, 3, 3, GrResourceAllocator::ActualUse::kYes);
alloc.incOps();
alloc.markEndOfOpList(1);
int startIndex, stopIndex;
GrResourceAllocator::AssignError error;
+ alloc.determineRecyclability();
+
alloc.assign(&startIndex, &stopIndex, &error);
REPORTER_ASSERT(reporter, GrResourceAllocator::AssignError::kNoError == error);
REPORTER_ASSERT(reporter, 0 == startIndex && 1 == stopIndex);