ccpr: Polyfill buffer mapping when not supported
Change-Id: I62880a83d9b59d42c6491125e2a62338d2ce757f
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/279200
Reviewed-by: Brian Osman <brianosman@google.com>
Commit-Queue: Chris Dalton <csmartdalton@google.com>
diff --git a/gn/gpu.gni b/gn/gpu.gni
index 1d91c73..1d2fe87 100644
--- a/gn/gpu.gni
+++ b/gn/gpu.gni
@@ -575,6 +575,7 @@
skia_ccpr_sources = [
# coverage counting path renderer
+ "$_src/gpu/ccpr/GrAutoMapVertexBuffer.h",
"$_src/gpu/ccpr/GrCCAtlas.cpp",
"$_src/gpu/ccpr/GrCCAtlas.h",
"$_src/gpu/ccpr/GrCCClipPath.cpp",
diff --git a/src/gpu/ccpr/GrAutoMapVertexBuffer.h b/src/gpu/ccpr/GrAutoMapVertexBuffer.h
new file mode 100644
index 0000000..5121e4d
--- /dev/null
+++ b/src/gpu/ccpr/GrAutoMapVertexBuffer.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAutoMapVertexBuffer_DEFINED
+#define GrAutoMapVertexBuffer_DEFINED
+
+#include "include/private/SkNoncopyable.h"
+#include "src/gpu/GrGpuBuffer.h"
+#include "src/gpu/GrOnFlushResourceProvider.h"
+
+// This class automatically allocates and maps a GPU vertex buffer, and polyfills the mapping
+// functionality with a mirror buffer on CPU if it is not supported.
+class GrAutoMapVertexBuffer : SkNoncopyable {
+public:
+ ~GrAutoMapVertexBuffer() {
+ if (this->isMapped()) {
+ this->unmapBuffer();
+ }
+ }
+
+ const GrGpuBuffer* gpuBuffer() const { return fGpuBuffer.get(); }
+ bool isMapped() const { return SkToBool(fData); }
+ void* data() const { SkASSERT(this->isMapped()); return fData; }
+
+ void resetAndMapBuffer(GrOnFlushResourceProvider* onFlushRP, size_t sizeInBytes) {
+ if (this->isMapped()) {
+ this->unmapBuffer();
+ }
+ fGpuBuffer = onFlushRP->makeBuffer(GrGpuBufferType::kVertex, sizeInBytes);
+ if (!fGpuBuffer) {
+ fSizeInBytes = 0;
+ fData = nullptr;
+ return;
+ }
+ fSizeInBytes = sizeInBytes;
+ fData = fGpuBuffer->map();
+ if (!fData) {
+ // Mapping failed. Allocate a mirror buffer on CPU.
+ fData = sk_malloc_throw(fSizeInBytes);
+ }
+ }
+
+ void unmapBuffer() {
+ SkASSERT(this->isMapped());
+ if (fGpuBuffer->isMapped()) {
+ fGpuBuffer->unmap();
+ } else {
+ // fData is a mirror buffer on CPU.
+ fGpuBuffer->updateData(fData, fSizeInBytes);
+ sk_free(fData);
+ }
+ fData = nullptr;
+ }
+
+protected:
+ sk_sp<GrGpuBuffer> fGpuBuffer;
+ size_t fSizeInBytes = 0;
+ void* fData = nullptr;
+};
+
+template<typename T> class GrTAutoMapVertexBuffer : public GrAutoMapVertexBuffer {
+public:
+ T& operator[](int idx) {
+ SkASSERT(this->isMapped());
+ SkASSERT(idx >= 0 && (size_t)idx < fSizeInBytes / sizeof(T));
+ return ((T*)fData)[idx];
+ }
+};
+
+#endif
diff --git a/src/gpu/ccpr/GrCCFiller.cpp b/src/gpu/ccpr/GrCCFiller.cpp
index 5e3e305..3816fb6 100644
--- a/src/gpu/ccpr/GrCCFiller.cpp
+++ b/src/gpu/ccpr/GrCCFiller.cpp
@@ -37,7 +37,7 @@
void GrCCFiller::parseDeviceSpaceFill(const SkPath& path, const SkPoint* deviceSpacePts,
GrScissorTest scissorTest, const SkIRect& clippedDevIBounds,
const SkIVector& devToAtlasOffset) {
- SkASSERT(!fInstanceBuffer); // Can't call after prepareToDraw().
+ SkASSERT(!fInstanceBuffer.gpuBuffer()); // Can't call after prepareToDraw().
SkASSERT(!path.isEmpty());
int currPathPointsIdx = fGeometry.points().count();
@@ -206,7 +206,7 @@
}
GrCCFiller::BatchID GrCCFiller::closeCurrentBatch() {
- SkASSERT(!fInstanceBuffer);
+ SkASSERT(!fInstanceBuffer.gpuBuffer());
SkASSERT(!fBatches.empty());
const auto& lastBatch = fBatches.back();
@@ -294,7 +294,7 @@
bool GrCCFiller::prepareToDraw(GrOnFlushResourceProvider* onFlushRP) {
using Verb = GrCCFillGeometry::Verb;
- SkASSERT(!fInstanceBuffer);
+ SkASSERT(!fInstanceBuffer.gpuBuffer());
SkASSERT(fBatches.back().fEndNonScissorIndices == // Call closeCurrentBatch().
fTotalPrimitiveCounts[(int)GrScissorTest::kDisabled]);
SkASSERT(fBatches.back().fEndScissorSubBatchIdx == fScissorSubBatches.count());
@@ -340,16 +340,14 @@
fBaseInstances[1].fConics = fBaseInstances[0].fConics + fTotalPrimitiveCounts[0].fConics;
int quadEndIdx = fBaseInstances[1].fConics + fTotalPrimitiveCounts[1].fConics;
- fInstanceBuffer =
- onFlushRP->makeBuffer(GrGpuBufferType::kVertex, quadEndIdx * sizeof(QuadPointInstance));
- if (!fInstanceBuffer) {
+ fInstanceBuffer.resetAndMapBuffer(onFlushRP, quadEndIdx * sizeof(QuadPointInstance));
+ if (!fInstanceBuffer.gpuBuffer()) {
SkDebugf("WARNING: failed to allocate CCPR fill instance buffer.\n");
return false;
}
- TriPointInstance* triPointInstanceData = static_cast<TriPointInstance*>(fInstanceBuffer->map());
- QuadPointInstance* quadPointInstanceData =
- reinterpret_cast<QuadPointInstance*>(triPointInstanceData);
+ auto triPointInstanceData = reinterpret_cast<TriPointInstance*>(fInstanceBuffer.data());
+ auto quadPointInstanceData = reinterpret_cast<QuadPointInstance*>(fInstanceBuffer.data());
SkASSERT(quadPointInstanceData);
PathInfo* nextPathInfo = fPathInfos.begin();
@@ -453,7 +451,7 @@
}
}
- fInstanceBuffer->unmap();
+ fInstanceBuffer.unmapBuffer();
SkASSERT(nextPathInfo == fPathInfos.end());
SkASSERT(ptsIdx == pts.count() - 1);
@@ -476,7 +474,7 @@
BatchID batchID, const SkIRect& drawBounds) const {
using PrimitiveType = GrCCCoverageProcessor::PrimitiveType;
- SkASSERT(fInstanceBuffer);
+ SkASSERT(fInstanceBuffer.gpuBuffer());
GrResourceProvider* rp = flushState->resourceProvider();
const PrimitiveTallies& batchTotalCounts = fBatches[batchID].fTotalPrimitiveCounts;
@@ -532,7 +530,7 @@
GrOpsRenderPass* renderPass = flushState->opsRenderPass();
proc.bindPipeline(flushState, pipeline, SkRect::Make(drawBounds));
- proc.bindBuffers(renderPass, fInstanceBuffer.get());
+ proc.bindBuffers(renderPass, fInstanceBuffer.gpuBuffer());
SkASSERT(batchID > 0);
SkASSERT(batchID < fBatches.count());
diff --git a/src/gpu/ccpr/GrCCFiller.h b/src/gpu/ccpr/GrCCFiller.h
index 52c3ee4..e69ce20 100644
--- a/src/gpu/ccpr/GrCCFiller.h
+++ b/src/gpu/ccpr/GrCCFiller.h
@@ -12,6 +12,7 @@
#include "include/core/SkRefCnt.h"
#include "src/core/SkPathPriv.h"
#include "src/gpu/GrTessellator.h"
+#include "src/gpu/ccpr/GrAutoMapVertexBuffer.h"
#include "src/gpu/ccpr/GrCCCoverageProcessor.h"
#include "src/gpu/ccpr/GrCCFillGeometry.h"
#include "src/gpu/ops/GrDrawOp.h"
@@ -117,7 +118,7 @@
PrimitiveTallies fTotalPrimitiveCounts[kNumScissorModes];
int fMaxMeshesPerDraw = 0;
- sk_sp<GrGpuBuffer> fInstanceBuffer;
+ GrAutoMapVertexBuffer fInstanceBuffer;
PrimitiveTallies fBaseInstances[kNumScissorModes];
};
diff --git a/src/gpu/ccpr/GrCCPerFlushResources.cpp b/src/gpu/ccpr/GrCCPerFlushResources.cpp
index 7fd5d5c..06dd3ff 100644
--- a/src/gpu/ccpr/GrCCPerFlushResources.cpp
+++ b/src/gpu/ccpr/GrCCPerFlushResources.cpp
@@ -193,8 +193,6 @@
, fRenderedAtlasStack(coverageType, specs.fRenderedAtlasSpecs, onFlushRP->caps())
, fIndexBuffer(GrCCPathProcessor::FindIndexBuffer(onFlushRP))
, fVertexBuffer(GrCCPathProcessor::FindVertexBuffer(onFlushRP))
- , fInstanceBuffer(onFlushRP->makeBuffer(GrGpuBufferType::kVertex,
- inst_buffer_count(specs) * sizeof(PathInstance)))
, fNextCopyInstanceIdx(0)
, fNextPathInstanceIdx(
specs.fNumCopiedPaths[kFillIdx] + specs.fNumCopiedPaths[kStrokeIdx]) {
@@ -206,23 +204,24 @@
SkDebugf("WARNING: failed to allocate CCPR vertex buffer. No paths will be drawn.\n");
return;
}
- if (!fInstanceBuffer) {
+ fPathInstanceBuffer.resetAndMapBuffer(onFlushRP,
+ inst_buffer_count(specs) * sizeof(PathInstance));
+ if (!fPathInstanceBuffer.gpuBuffer()) {
SkDebugf("WARNING: failed to allocate CCPR instance buffer. No paths will be drawn.\n");
return;
}
- fPathInstanceData = static_cast<PathInstance*>(fInstanceBuffer->map());
- SkASSERT(fPathInstanceData);
if (CoverageType::kA8_Multisample == coverageType) {
int numRenderedPaths =
specs.fNumRenderedPaths[kFillIdx] + specs.fNumRenderedPaths[kStrokeIdx] +
specs.fNumClipPaths;
- fStencilResolveBuffer = onFlushRP->makeBuffer(
- GrGpuBufferType::kVertex,
- numRenderedPaths * sizeof(GrStencilAtlasOp::ResolveRectInstance));
- fStencilResolveInstanceData = static_cast<GrStencilAtlasOp::ResolveRectInstance*>(
- fStencilResolveBuffer->map());
- SkASSERT(fStencilResolveInstanceData);
+ fStencilResolveBuffer.resetAndMapBuffer(
+ onFlushRP, numRenderedPaths * sizeof(GrStencilAtlasOp::ResolveRectInstance));
+ if (!fStencilResolveBuffer.gpuBuffer()) {
+ SkDebugf("WARNING: failed to allocate CCPR stencil resolve buffer. "
+ "No paths will be drawn.\n");
+ return;
+ }
SkDEBUGCODE(fEndStencilResolveInstance = numRenderedPaths);
}
@@ -284,7 +283,7 @@
(((uint64_t) SK_Half1) << 16) |
(((uint64_t) SK_Half1) << 32) |
(((uint64_t) SK_Half1) << 48);
- fPathInstanceData[currentInstanceIdx].set(entry, newAtlasOffset, kWhite, fillRule);
+ fPathInstanceBuffer[currentInstanceIdx].set(entry, newAtlasOffset, kWhite, fillRule);
// Percolate the instance forward until it's contiguous with other instances that share the same
// proxy.
@@ -294,7 +293,8 @@
return;
}
int rangeFirstInstanceIdx = currentInstanceIdx - fCopyPathRanges[i].fCount;
- std::swap(fPathInstanceData[rangeFirstInstanceIdx], fPathInstanceData[currentInstanceIdx]);
+ std::swap(fPathInstanceBuffer[rangeFirstInstanceIdx],
+ fPathInstanceBuffer[currentInstanceIdx]);
currentInstanceIdx = rangeFirstInstanceIdx;
}
@@ -492,7 +492,7 @@
// "nonzero" settings in front and "even/odd" settings in back.
std::swap(atlasIBounds.fLeft, atlasIBounds.fRight);
}
- fStencilResolveInstanceData[fNextStencilResolveInstanceIdx++] = {
+ fStencilResolveBuffer[fNextStencilResolveInstanceIdx++] = {
(int16_t)atlasIBounds.left(), (int16_t)atlasIBounds.top(),
(int16_t)atlasIBounds.right(), (int16_t)atlasIBounds.bottom()};
}
@@ -504,12 +504,10 @@
SkASSERT(GrCCAtlas::CoverageType::kA8_Multisample != this->renderedPathCoverageType() ||
fNextStencilResolveInstanceIdx == fEndStencilResolveInstance);
- fInstanceBuffer->unmap();
- fPathInstanceData = nullptr;
+ fPathInstanceBuffer.unmapBuffer();
- if (fStencilResolveBuffer) {
- fStencilResolveBuffer->unmap();
- fStencilResolveInstanceData = nullptr;
+ if (fStencilResolveBuffer.gpuBuffer()) {
+ fStencilResolveBuffer.unmapBuffer();
}
if (!fCopyAtlasStack.empty()) {
diff --git a/src/gpu/ccpr/GrCCPerFlushResources.h b/src/gpu/ccpr/GrCCPerFlushResources.h
index 86f918d..c094a9f 100644
--- a/src/gpu/ccpr/GrCCPerFlushResources.h
+++ b/src/gpu/ccpr/GrCCPerFlushResources.h
@@ -9,6 +9,7 @@
#define GrCCPerFlushResources_DEFINED
#include "src/gpu/GrNonAtomicRef.h"
+#include "src/gpu/ccpr/GrAutoMapVertexBuffer.h"
#include "src/gpu/ccpr/GrCCAtlas.h"
#include "src/gpu/ccpr/GrCCFiller.h"
#include "src/gpu/ccpr/GrCCPathProcessor.h"
@@ -70,7 +71,7 @@
GrCCPerFlushResources(
GrOnFlushResourceProvider*, GrCCAtlas::CoverageType,const GrCCPerFlushResourceSpecs&);
- bool isMapped() const { return SkToBool(fPathInstanceData); }
+ bool isMapped() const { return fPathInstanceBuffer.isMapped(); }
GrCCAtlas::CoverageType renderedPathCoverageType() const {
return fRenderedAtlasStack.coverageType();
@@ -103,7 +104,7 @@
GrCCPathProcessor::Instance& appendDrawPathInstance() {
SkASSERT(this->isMapped());
SkASSERT(fNextPathInstanceIdx < fEndPathInstance);
- return fPathInstanceData[fNextPathInstanceIdx++];
+ return fPathInstanceBuffer[fNextPathInstanceIdx++];
}
// Finishes off the GPU buffers and renders the atlas(es).
@@ -118,7 +119,7 @@
}
const GrGpuBuffer* instanceBuffer() const {
SkASSERT(!this->isMapped());
- return fInstanceBuffer.get();
+ return fPathInstanceBuffer.gpuBuffer();
}
const GrGpuBuffer* vertexBuffer() const {
SkASSERT(!this->isMapped());
@@ -126,7 +127,7 @@
}
const GrGpuBuffer* stencilResolveBuffer() const {
SkASSERT(!this->isMapped());
- return fStencilResolveBuffer.get();
+ return fStencilResolveBuffer.gpuBuffer();
}
private:
@@ -148,9 +149,8 @@
const sk_sp<const GrGpuBuffer> fIndexBuffer;
const sk_sp<const GrGpuBuffer> fVertexBuffer;
- const sk_sp<GrGpuBuffer> fInstanceBuffer;
- GrCCPathProcessor::Instance* fPathInstanceData = nullptr;
+ GrTAutoMapVertexBuffer<GrCCPathProcessor::Instance> fPathInstanceBuffer;
int fNextCopyInstanceIdx;
SkDEBUGCODE(int fEndCopyInstance);
int fNextPathInstanceIdx;
@@ -175,8 +175,7 @@
SkSTArray<2, sk_sp<GrTexture>> fRecyclableAtlasTextures;
// Used in MSAA mode make an intermediate draw that resolves stencil winding values to coverage.
- sk_sp<GrGpuBuffer> fStencilResolveBuffer;
- GrStencilAtlasOp::ResolveRectInstance* fStencilResolveInstanceData = nullptr;
+ GrTAutoMapVertexBuffer<GrStencilAtlasOp::ResolveRectInstance> fStencilResolveBuffer;
int fNextStencilResolveInstanceIdx = 0;
SkDEBUGCODE(int fEndStencilResolveInstance);
diff --git a/src/gpu/ccpr/GrCCStroker.cpp b/src/gpu/ccpr/GrCCStroker.cpp
index bbf1486..4b5ef14 100644
--- a/src/gpu/ccpr/GrCCStroker.cpp
+++ b/src/gpu/ccpr/GrCCStroker.cpp
@@ -12,6 +12,7 @@
#include "src/gpu/GrOnFlushResourceProvider.h"
#include "src/gpu/GrOpsRenderPass.h"
#include "src/gpu/GrProgramInfo.h"
+#include "src/gpu/ccpr/GrAutoMapVertexBuffer.h"
#include "src/gpu/ccpr/GrCCCoverageProcessor.h"
#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
@@ -375,16 +376,14 @@
int endConicsIdx = stroker->fBaseInstances[1].fConics +
stroker->fInstanceCounts[1]->fConics;
- fInstanceBuffer = onFlushRP->makeBuffer(GrGpuBufferType::kVertex,
- endConicsIdx * sizeof(ConicInstance));
- if (!fInstanceBuffer) {
+ fInstanceBuffer.resetAndMapBuffer(onFlushRP, endConicsIdx * sizeof(ConicInstance));
+ if (!fInstanceBuffer.gpuBuffer()) {
SkDebugf("WARNING: failed to allocate CCPR stroke instance buffer.\n");
return;
}
- fInstanceBufferData = fInstanceBuffer->map();
}
- bool isMapped() const { return SkToBool(fInstanceBufferData); }
+ bool isMapped() const { return fInstanceBuffer.isMapped(); }
void updateCurrentInfo(const PathInfo& pathInfo) {
SkASSERT(this->isMapped());
@@ -514,10 +513,9 @@
sk_sp<GrGpuBuffer> finish() {
SkASSERT(this->isMapped());
SkASSERT(!memcmp(fNextInstances, fEndInstances, sizeof(fNextInstances)));
- fInstanceBuffer->unmap();
- fInstanceBufferData = nullptr;
+ fInstanceBuffer.unmapBuffer();
SkASSERT(!this->isMapped());
- return std::move(fInstanceBuffer);
+ return sk_ref_sp(fInstanceBuffer.gpuBuffer());
}
private:
@@ -525,7 +523,7 @@
int instanceIdx = fCurrNextInstances->fStrokes[0]++;
SkASSERT(instanceIdx < fCurrEndInstances->fStrokes[0]);
- return reinterpret_cast<LinearStrokeInstance*>(fInstanceBufferData)[instanceIdx];
+ return reinterpret_cast<LinearStrokeInstance*>(fInstanceBuffer.data())[instanceIdx];
}
CubicStrokeInstance& appendCubicStrokeInstance(int numLinearSegmentsLog2) {
@@ -535,21 +533,21 @@
int instanceIdx = fCurrNextInstances->fStrokes[numLinearSegmentsLog2]++;
SkASSERT(instanceIdx < fCurrEndInstances->fStrokes[numLinearSegmentsLog2]);
- return reinterpret_cast<CubicStrokeInstance*>(fInstanceBufferData)[instanceIdx];
+ return reinterpret_cast<CubicStrokeInstance*>(fInstanceBuffer.data())[instanceIdx];
}
TriangleInstance& appendTriangleInstance() {
int instanceIdx = fCurrNextInstances->fTriangles++;
SkASSERT(instanceIdx < fCurrEndInstances->fTriangles);
- return reinterpret_cast<TriangleInstance*>(fInstanceBufferData)[instanceIdx];
+ return reinterpret_cast<TriangleInstance*>(fInstanceBuffer.data())[instanceIdx];
}
ConicInstance& appendConicInstance() {
int instanceIdx = fCurrNextInstances->fConics++;
SkASSERT(instanceIdx < fCurrEndInstances->fConics);
- return reinterpret_cast<ConicInstance*>(fInstanceBufferData)[instanceIdx];
+ return reinterpret_cast<ConicInstance*>(fInstanceBuffer.data())[instanceIdx];
}
float fCurrDX, fCurrDY;
@@ -557,8 +555,7 @@
InstanceTallies* fCurrNextInstances;
SkDEBUGCODE(const InstanceTallies* fCurrEndInstances);
- sk_sp<GrGpuBuffer> fInstanceBuffer;
- void* fInstanceBufferData = nullptr;
+ GrAutoMapVertexBuffer fInstanceBuffer;
InstanceTallies fNextInstances[2];
SkDEBUGCODE(InstanceTallies fEndInstances[2]);
};
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
index bede7b6..c6c60f6 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
@@ -23,7 +23,6 @@
GrRenderable::kYes);
if (caps.driverBlacklistCCPR() || !shaderCaps.integerSupport() ||
!caps.instanceAttribSupport() || !shaderCaps.floatIs32Bits() ||
- GrCaps::kNone_MapFlags == caps.mapBufferFlags() ||
!defaultA8Format.isValid() || // This checks both texturable and renderable
!caps.halfFloatVertexAttributeSupport()) {
return false;