| /* |
| * Copyright 2022 Google LLC |
| * |
| * Use of this source code is governed by a BSD-style license that can be |
| * found in the LICENSE file. |
| */ |
| |
| #include "include/gpu/graphite/dawn/DawnUtils.h" |
| #include "src/gpu/graphite/dawn/DawnGraphiteUtilsPriv.h" |
| |
| #include "include/gpu/ShaderErrorHandler.h" |
| #include "include/gpu/graphite/Context.h" |
| #include "include/gpu/graphite/dawn/DawnBackendContext.h" |
| #include "src/gpu/graphite/ContextPriv.h" |
| #include "src/gpu/graphite/dawn/DawnQueueManager.h" |
| #include "src/gpu/graphite/dawn/DawnSharedContext.h" |
| |
| namespace skgpu::graphite { |
| |
| namespace ContextFactory { |
| std::unique_ptr<Context> MakeDawn(const DawnBackendContext& backendContext, |
| const ContextOptions& options) { |
| sk_sp<SharedContext> sharedContext = DawnSharedContext::Make(backendContext, options); |
| if (!sharedContext) { |
| return nullptr; |
| } |
| |
| auto queueManager = |
| std::make_unique<DawnQueueManager>(backendContext.fQueue, sharedContext.get()); |
| if (!queueManager) { |
| return nullptr; |
| } |
| |
| auto context = ContextCtorAccessor::MakeContext(std::move(sharedContext), |
| std::move(queueManager), |
| options); |
| SkASSERT(context); |
| return context; |
| } |
| } // namespace ContextFactory |
| |
| bool DawnFormatIsDepthOrStencil(wgpu::TextureFormat format) { |
| switch (format) { |
| case wgpu::TextureFormat::Stencil8: // fallthrough |
| case wgpu::TextureFormat::Depth32Float: |
| case wgpu::TextureFormat::Depth24PlusStencil8: |
| case wgpu::TextureFormat::Depth32FloatStencil8: |
| return true; |
| default: |
| return false; |
| } |
| } |
| |
| bool DawnFormatIsDepth(wgpu::TextureFormat format) { |
| switch (format) { |
| case wgpu::TextureFormat::Depth32Float: |
| case wgpu::TextureFormat::Depth24PlusStencil8: |
| case wgpu::TextureFormat::Depth32FloatStencil8: |
| return true; |
| default: |
| return false; |
| } |
| } |
| |
| bool DawnFormatIsStencil(wgpu::TextureFormat format) { |
| switch (format) { |
| case wgpu::TextureFormat::Stencil8: // fallthrough |
| case wgpu::TextureFormat::Depth24PlusStencil8: |
| case wgpu::TextureFormat::Depth32FloatStencil8: |
| return true; |
| default: |
| return false; |
| } |
| } |
| |
| wgpu::TextureFormat DawnDepthStencilFlagsToFormat(SkEnumBitMask<DepthStencilFlags> mask) { |
| // TODO: Decide if we want to change this to always return a combined depth and stencil format |
| // to allow more sharing of depth stencil allocations. |
| if (mask == DepthStencilFlags::kDepth) { |
| // wgpu::TextureFormatDepth16Unorm is also a universally supported option here |
| return wgpu::TextureFormat::Depth32Float; |
| } else if (mask == DepthStencilFlags::kStencil) { |
| return wgpu::TextureFormat::Stencil8; |
| } else if (mask == DepthStencilFlags::kDepthStencil) { |
| return wgpu::TextureFormat::Depth24PlusStencil8; |
| } |
| SkASSERT(false); |
| return wgpu::TextureFormat::Undefined; |
| } |
| |
| static bool check_shader_module(wgpu::ShaderModule* module, |
| const char* shaderText, |
| ShaderErrorHandler* errorHandler) { |
| struct Handler { |
| static void Fn(WGPUCompilationInfoRequestStatus status, |
| const WGPUCompilationInfo* info, |
| void* userdata) { |
| Handler* self = reinterpret_cast<Handler*>(userdata); |
| SkASSERT(status == WGPUCompilationInfoRequestStatus_Success); |
| |
| // Walk the message list and check for hard errors. |
| self->fSuccess = true; |
| for (size_t index = 0; index < info->messageCount; ++index) { |
| const WGPUCompilationMessage& entry = info->messages[index]; |
| if (entry.type == WGPUCompilationMessageType_Error) { |
| self->fSuccess = false; |
| break; |
| } |
| } |
| |
| // If we found a hard error, report the compilation messages to the error handler. |
| if (!self->fSuccess) { |
| std::string errors; |
| for (size_t index = 0; index < info->messageCount; ++index) { |
| const WGPUCompilationMessage& entry = info->messages[index]; |
| errors += "line " + |
| std::to_string(entry.lineNum) + ':' + |
| std::to_string(entry.linePos) + ' ' + |
| entry.message + '\n'; |
| } |
| self->fErrorHandler->compileError(self->fShaderText, errors.c_str()); |
| } |
| } |
| |
| const char* fShaderText; |
| ShaderErrorHandler* fErrorHandler; |
| bool fSuccess = false; |
| }; |
| |
| Handler handler; |
| handler.fShaderText = shaderText; |
| handler.fErrorHandler = errorHandler; |
| module->GetCompilationInfo(&Handler::Fn, &handler); |
| |
| return handler.fSuccess; |
| } |
| |
| bool DawnCompileSPIRVShaderModule(const DawnSharedContext* sharedContext, |
| const std::string& spirv, |
| wgpu::ShaderModule* module, |
| ShaderErrorHandler* errorHandler) { |
| wgpu::ShaderModuleSPIRVDescriptor spirvDesc; |
| spirvDesc.codeSize = spirv.size() / 4; |
| spirvDesc.code = reinterpret_cast<const uint32_t*>(spirv.data()); |
| |
| // Skia often generates shaders that select a texture/sampler conditionally based on an |
| // attribute (specifically in the case of texture atlas indexing). We disable derivative |
| // uniformity warnings as we expect Skia's behavior to result in well-defined values. |
| wgpu::DawnShaderModuleSPIRVOptionsDescriptor dawnSpirvOptions; |
| dawnSpirvOptions.allowNonUniformDerivatives = true; |
| |
| wgpu::ShaderModuleDescriptor desc; |
| desc.nextInChain = &spirvDesc; |
| spirvDesc.nextInChain = &dawnSpirvOptions; |
| |
| *module = sharedContext->device().CreateShaderModule(&desc); |
| |
| return check_shader_module(module, "[SPIR-V omitted]", errorHandler); |
| } |
| |
| bool DawnCompileWGSLShaderModule(const DawnSharedContext* sharedContext, |
| const std::string& wgsl, |
| wgpu::ShaderModule* module, |
| ShaderErrorHandler* errorHandler) { |
| wgpu::ShaderModuleWGSLDescriptor wgslDesc; |
| wgslDesc.code = wgsl.c_str(); |
| |
| wgpu::ShaderModuleDescriptor desc; |
| desc.nextInChain = &wgslDesc; |
| |
| *module = sharedContext->device().CreateShaderModule(&desc); |
| |
| return check_shader_module(module, wgsl.c_str(), errorHandler); |
| } |
| |
| } // namespace skgpu::graphite |