we now cache GLSL shaders when we can't cache the binaries
Bug: skia:
Change-Id: I56552049c141cbd69fa6b55653e38820d541bf2f
Reviewed-on: https://skia-review.googlesource.com/c/176976
Reviewed-by: Brian Salomon <bsalomon@google.com>
Commit-Queue: Ethan Nicholas <ethannicholas@google.com>
diff --git a/src/gpu/gl/builders/GrGLProgramBuilder.cpp b/src/gpu/gl/builders/GrGLProgramBuilder.cpp
index c98d47a..73f4a36 100644
--- a/src/gpu/gl/builders/GrGLProgramBuilder.cpp
+++ b/src/gpu/gl/builders/GrGLProgramBuilder.cpp
@@ -44,7 +44,7 @@
GrGLProgramBuilder builder(gpu, pipeline, primProc, primProcProxies, desc);
auto persistentCache = gpu->getContext()->contextPriv().getPersistentCache();
- if (persistentCache && gpu->glCaps().programBinarySupport()) {
+ if (persistentCache) {
sk_sp<SkData> key = SkData::MakeWithoutCopy(desc->asKey(), desc->keyLength());
builder.fCached = persistentCache->load(*key);
// the eventual end goal is to completely skip emitAndInstallProcs on a cache hit, but it's
@@ -169,6 +169,44 @@
}
}
+void GrGLProgramBuilder::storeShaderInCache(const SkSL::Program::Inputs& inputs, GrGLuint programID,
+ const SkSL::String& glsl) {
+ if (!this->gpu()->getContext()->contextPriv().getPersistentCache()) {
+ return;
+ }
+ sk_sp<SkData> key = SkData::MakeWithoutCopy(desc()->asKey(), desc()->keyLength());
+ if (fGpu->glCaps().programBinarySupport()) {
+ // binary cache
+ GrGLsizei length = 0;
+ GL_CALL(GetProgramiv(programID, GL_PROGRAM_BINARY_LENGTH, &length));
+ if (length > 0) {
+ GrGLenum binaryFormat;
+ std::unique_ptr<char[]> binary(new char[length]);
+ GL_CALL(GetProgramBinary(programID, length, &length, &binaryFormat, binary.get()));
+ size_t dataLength = sizeof(inputs) + sizeof(binaryFormat) + length;
+ std::unique_ptr<uint8_t[]> data(new uint8_t[dataLength]);
+ size_t offset = 0;
+ memcpy(data.get() + offset, &inputs, sizeof(inputs));
+ offset += sizeof(inputs);
+ memcpy(data.get() + offset, &binaryFormat, sizeof(binaryFormat));
+ offset += sizeof(binaryFormat);
+ memcpy(data.get() + offset, binary.get(), length);
+ this->gpu()->getContext()->contextPriv().getPersistentCache()->store(
+ *key, *SkData::MakeWithoutCopy(data.get(), dataLength));
+ }
+ } else {
+ // source cache
+ size_t dataLength = sizeof(inputs) + glsl.length();
+ std::unique_ptr<uint8_t[]> data(new uint8_t[dataLength]);
+ size_t offset = 0;
+ memcpy(data.get() + offset, &inputs, sizeof(inputs));
+ offset += sizeof(inputs);
+ memcpy(data.get() + offset, glsl.data(), glsl.length());
+ this->gpu()->getContext()->contextPriv().getPersistentCache()->store(
+ *key, *SkData::MakeWithoutCopy(data.get(), dataLength));
+ }
+}
+
GrGLProgram* GrGLProgramBuilder::finalize() {
TRACE_EVENT0("skia", TRACE_FUNC);
@@ -196,49 +234,61 @@
SkSL::Program::Inputs inputs;
SkTDArray<GrGLuint> shadersToDelete;
- bool cached = fGpu->glCaps().programBinarySupport() && nullptr != fCached.get();
+ SkSL::String glsl;
+ bool cached = fCached.get() != nullptr;
if (cached) {
- this->bindProgramResourceLocations(programID);
- // cache hit, just hand the binary to GL
const uint8_t* bytes = fCached->bytes();
size_t offset = 0;
memcpy(&inputs, bytes + offset, sizeof(inputs));
offset += sizeof(inputs);
- int binaryFormat;
- memcpy(&binaryFormat, bytes + offset, sizeof(binaryFormat));
- offset += sizeof(binaryFormat);
- GrGLClearErr(this->gpu()->glInterface());
- GR_GL_CALL_NOERRCHECK(this->gpu()->glInterface(),
- ProgramBinary(programID, binaryFormat, (void*) (bytes + offset),
- fCached->size() - offset));
- if (GR_GL_GET_ERROR(this->gpu()->glInterface()) == GR_GL_NO_ERROR) {
- cached = this->checkLinkStatus(programID);
- if (cached) {
- this->addInputVars(inputs);
- this->computeCountsAndStrides(programID, primProc, false);
+ if (fGpu->glCaps().programBinarySupport()) {
+ // binary cache hit, just hand the binary to GL
+ this->bindProgramResourceLocations(programID);
+ int binaryFormat;
+ memcpy(&binaryFormat, bytes + offset, sizeof(binaryFormat));
+ offset += sizeof(binaryFormat);
+ GrGLClearErr(this->gpu()->glInterface());
+ GR_GL_CALL_NOERRCHECK(this->gpu()->glInterface(),
+ ProgramBinary(programID, binaryFormat, (void*) (bytes + offset),
+ fCached->size() - offset));
+ if (GR_GL_GET_ERROR(this->gpu()->glInterface()) == GR_GL_NO_ERROR) {
+ cached = this->checkLinkStatus(programID);
+ if (cached) {
+ this->addInputVars(inputs);
+ this->computeCountsAndStrides(programID, primProc, false);
+ }
+ } else {
+ cached = false;
}
} else {
- cached = false;
+ // source cache hit, we don't need to compile the SkSL->GLSL
+ glsl = SkSL::String(((const char*) bytes) + offset, fCached->size() - offset);
}
}
- if (!cached) {
- // cache miss, compile shaders
- if (fFS.fForceHighPrecision) {
- settings.fForceHighPrecision = true;
+ if (!cached || !fGpu->glCaps().programBinarySupport()) {
+ // either a cache miss, or we can't store binaries in the cache
+ if (glsl == "" || true) {
+ // don't have cached GLSL, need to compile SkSL->GLSL
+ if (fFS.fForceHighPrecision) {
+ settings.fForceHighPrecision = true;
+ }
+ std::unique_ptr<SkSL::Program> fs = GrSkSLtoGLSL(gpu()->glContext(),
+ GR_GL_FRAGMENT_SHADER,
+ fFS.fCompilerStrings.begin(),
+ fFS.fCompilerStringLengths.begin(),
+ fFS.fCompilerStrings.count(),
+ settings,
+ &glsl);
+ if (!fs) {
+ this->cleanupProgram(programID, shadersToDelete);
+ return nullptr;
+ }
+ inputs = fs->fInputs;
+ } else {
+ // we've pulled GLSL and inputs from the cache, but still need to do some setup
+ this->addInputVars(inputs);
+ this->computeCountsAndStrides(programID, primProc, false);
}
- SkSL::String glsl;
- std::unique_ptr<SkSL::Program> fs = GrSkSLtoGLSL(gpu()->glContext(),
- GR_GL_FRAGMENT_SHADER,
- fFS.fCompilerStrings.begin(),
- fFS.fCompilerStringLengths.begin(),
- fFS.fCompilerStrings.count(),
- settings,
- &glsl);
- if (!fs) {
- this->cleanupProgram(programID, shadersToDelete);
- return nullptr;
- }
- inputs = fs->fInputs;
this->addInputVars(inputs);
if (!this->compileAndAttachShaders(glsl.c_str(), glsl.size(), programID,
GR_GL_FRAGMENT_SHADER, &shadersToDelete, settings,
@@ -314,27 +364,8 @@
this->resolveProgramResourceLocations(programID);
this->cleanupShaders(shadersToDelete);
- if (!cached && this->gpu()->getContext()->contextPriv().getPersistentCache() &&
- fGpu->glCaps().programBinarySupport()) {
- GrGLsizei length = 0;
- GL_CALL(GetProgramiv(programID, GL_PROGRAM_BINARY_LENGTH, &length));
- if (length > 0) {
- // store shader in cache
- sk_sp<SkData> key = SkData::MakeWithoutCopy(desc()->asKey(), desc()->keyLength());
- GrGLenum binaryFormat;
- std::unique_ptr<char[]> binary(new char[length]);
- GL_CALL(GetProgramBinary(programID, length, &length, &binaryFormat, binary.get()));
- size_t dataLength = sizeof(inputs) + sizeof(binaryFormat) + length;
- std::unique_ptr<uint8_t[]> data(new uint8_t[dataLength]);
- size_t offset = 0;
- memcpy(data.get() + offset, &inputs, sizeof(inputs));
- offset += sizeof(inputs);
- memcpy(data.get() + offset, &binaryFormat, sizeof(binaryFormat));
- offset += sizeof(binaryFormat);
- memcpy(data.get() + offset, binary.get(), length);
- this->gpu()->getContext()->contextPriv().getPersistentCache()->store(
- *key, *SkData::MakeWithoutCopy(data.get(), dataLength));
- }
+ if (!cached) {
+ this->storeShaderInCache(inputs, programID, glsl);
}
return this->createProgram(programID);
}
diff --git a/src/gpu/gl/builders/GrGLProgramBuilder.h b/src/gpu/gl/builders/GrGLProgramBuilder.h
index 0ba7d49..9c090b7 100644
--- a/src/gpu/gl/builders/GrGLProgramBuilder.h
+++ b/src/gpu/gl/builders/GrGLProgramBuilder.h
@@ -66,6 +66,8 @@
SkSL::Program::Inputs* outInputs);
void computeCountsAndStrides(GrGLuint programID, const GrPrimitiveProcessor& primProc,
bool bindAttribLocations);
+ void storeShaderInCache(const SkSL::Program::Inputs& inputs, GrGLuint programID,
+ const SkSL::String& glsl);
GrGLProgram* finalize();
void bindProgramResourceLocations(GrGLuint programID);
bool checkLinkStatus(GrGLuint programID);