| /* |
| * Copyright 2011 Google Inc. |
| * |
| * Use of this source code is governed by a BSD-style license that can be |
| * found in the LICENSE file. |
| */ |
| |
| #include "GrGLGpu.h" |
| #include "GrBackendSemaphore.h" |
| #include "GrBackendSurface.h" |
| #include "GrFixedClip.h" |
| #include "GrGLBuffer.h" |
| #include "GrGLGpuCommandBuffer.h" |
| #include "GrGLSemaphore.h" |
| #include "GrGLStencilAttachment.h" |
| #include "GrGLTextureRenderTarget.h" |
| #include "GrGpuResourcePriv.h" |
| #include "GrMesh.h" |
| #include "GrPipeline.h" |
| #include "GrRenderTargetPriv.h" |
| #include "GrShaderCaps.h" |
| #include "GrSurfaceProxyPriv.h" |
| #include "GrTexturePriv.h" |
| #include "GrTypes.h" |
| #include "SkAutoMalloc.h" |
| #include "SkConvertPixels.h" |
| #include "SkHalf.h" |
| #include "SkJSONWriter.h" |
| #include "SkMakeUnique.h" |
| #include "SkMipMap.h" |
| #include "SkPixmap.h" |
| #include "SkSLCompiler.h" |
| #include "SkStrokeRec.h" |
| #include "SkTemplates.h" |
| #include "SkTo.h" |
| #include "SkTraceEvent.h" |
| #include "SkTypes.h" |
| #include "builders/GrGLShaderStringBuilder.h" |
| |
| #include <cmath> |
| |
| #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X) |
| #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X) |
| |
| #define SKIP_CACHE_CHECK true |
| |
| #if GR_GL_CHECK_ALLOC_WITH_GET_ERROR |
| #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface) |
| #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call) |
| #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface) |
| #else |
| #define CLEAR_ERROR_BEFORE_ALLOC(iface) |
| #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call) |
| #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR |
| #endif |
| |
| //#define USE_NSIGHT |
| |
| /////////////////////////////////////////////////////////////////////////////// |
| |
| static const GrGLenum gXfermodeEquation2Blend[] = { |
| // Basic OpenGL blend equations. |
| GR_GL_FUNC_ADD, |
| GR_GL_FUNC_SUBTRACT, |
| GR_GL_FUNC_REVERSE_SUBTRACT, |
| |
| // GL_KHR_blend_equation_advanced. |
| GR_GL_SCREEN, |
| GR_GL_OVERLAY, |
| GR_GL_DARKEN, |
| GR_GL_LIGHTEN, |
| GR_GL_COLORDODGE, |
| GR_GL_COLORBURN, |
| GR_GL_HARDLIGHT, |
| GR_GL_SOFTLIGHT, |
| GR_GL_DIFFERENCE, |
| GR_GL_EXCLUSION, |
| GR_GL_MULTIPLY, |
| GR_GL_HSL_HUE, |
| GR_GL_HSL_SATURATION, |
| GR_GL_HSL_COLOR, |
| GR_GL_HSL_LUMINOSITY |
| }; |
| GR_STATIC_ASSERT(0 == kAdd_GrBlendEquation); |
| GR_STATIC_ASSERT(1 == kSubtract_GrBlendEquation); |
| GR_STATIC_ASSERT(2 == kReverseSubtract_GrBlendEquation); |
| GR_STATIC_ASSERT(3 == kScreen_GrBlendEquation); |
| GR_STATIC_ASSERT(4 == kOverlay_GrBlendEquation); |
| GR_STATIC_ASSERT(5 == kDarken_GrBlendEquation); |
| GR_STATIC_ASSERT(6 == kLighten_GrBlendEquation); |
| GR_STATIC_ASSERT(7 == kColorDodge_GrBlendEquation); |
| GR_STATIC_ASSERT(8 == kColorBurn_GrBlendEquation); |
| GR_STATIC_ASSERT(9 == kHardLight_GrBlendEquation); |
| GR_STATIC_ASSERT(10 == kSoftLight_GrBlendEquation); |
| GR_STATIC_ASSERT(11 == kDifference_GrBlendEquation); |
| GR_STATIC_ASSERT(12 == kExclusion_GrBlendEquation); |
| GR_STATIC_ASSERT(13 == kMultiply_GrBlendEquation); |
| GR_STATIC_ASSERT(14 == kHSLHue_GrBlendEquation); |
| GR_STATIC_ASSERT(15 == kHSLSaturation_GrBlendEquation); |
| GR_STATIC_ASSERT(16 == kHSLColor_GrBlendEquation); |
| GR_STATIC_ASSERT(17 == kHSLLuminosity_GrBlendEquation); |
| GR_STATIC_ASSERT(SK_ARRAY_COUNT(gXfermodeEquation2Blend) == kGrBlendEquationCnt); |
| |
| static const GrGLenum gXfermodeCoeff2Blend[] = { |
| GR_GL_ZERO, |
| GR_GL_ONE, |
| GR_GL_SRC_COLOR, |
| GR_GL_ONE_MINUS_SRC_COLOR, |
| GR_GL_DST_COLOR, |
| GR_GL_ONE_MINUS_DST_COLOR, |
| GR_GL_SRC_ALPHA, |
| GR_GL_ONE_MINUS_SRC_ALPHA, |
| GR_GL_DST_ALPHA, |
| GR_GL_ONE_MINUS_DST_ALPHA, |
| GR_GL_CONSTANT_COLOR, |
| GR_GL_ONE_MINUS_CONSTANT_COLOR, |
| GR_GL_CONSTANT_ALPHA, |
| GR_GL_ONE_MINUS_CONSTANT_ALPHA, |
| |
| // extended blend coeffs |
| GR_GL_SRC1_COLOR, |
| GR_GL_ONE_MINUS_SRC1_COLOR, |
| GR_GL_SRC1_ALPHA, |
| GR_GL_ONE_MINUS_SRC1_ALPHA, |
| }; |
| |
| bool GrGLGpu::BlendCoeffReferencesConstant(GrBlendCoeff coeff) { |
| static const bool gCoeffReferencesBlendConst[] = { |
| false, |
| false, |
| false, |
| false, |
| false, |
| false, |
| false, |
| false, |
| false, |
| false, |
| true, |
| true, |
| true, |
| true, |
| |
| // extended blend coeffs |
| false, |
| false, |
| false, |
| false, |
| }; |
| return gCoeffReferencesBlendConst[coeff]; |
| GR_STATIC_ASSERT(kGrBlendCoeffCnt == SK_ARRAY_COUNT(gCoeffReferencesBlendConst)); |
| |
| GR_STATIC_ASSERT(0 == kZero_GrBlendCoeff); |
| GR_STATIC_ASSERT(1 == kOne_GrBlendCoeff); |
| GR_STATIC_ASSERT(2 == kSC_GrBlendCoeff); |
| GR_STATIC_ASSERT(3 == kISC_GrBlendCoeff); |
| GR_STATIC_ASSERT(4 == kDC_GrBlendCoeff); |
| GR_STATIC_ASSERT(5 == kIDC_GrBlendCoeff); |
| GR_STATIC_ASSERT(6 == kSA_GrBlendCoeff); |
| GR_STATIC_ASSERT(7 == kISA_GrBlendCoeff); |
| GR_STATIC_ASSERT(8 == kDA_GrBlendCoeff); |
| GR_STATIC_ASSERT(9 == kIDA_GrBlendCoeff); |
| GR_STATIC_ASSERT(10 == kConstC_GrBlendCoeff); |
| GR_STATIC_ASSERT(11 == kIConstC_GrBlendCoeff); |
| GR_STATIC_ASSERT(12 == kConstA_GrBlendCoeff); |
| GR_STATIC_ASSERT(13 == kIConstA_GrBlendCoeff); |
| |
| GR_STATIC_ASSERT(14 == kS2C_GrBlendCoeff); |
| GR_STATIC_ASSERT(15 == kIS2C_GrBlendCoeff); |
| GR_STATIC_ASSERT(16 == kS2A_GrBlendCoeff); |
| GR_STATIC_ASSERT(17 == kIS2A_GrBlendCoeff); |
| |
| // assertion for gXfermodeCoeff2Blend have to be in GrGpu scope |
| GR_STATIC_ASSERT(kGrBlendCoeffCnt == SK_ARRAY_COUNT(gXfermodeCoeff2Blend)); |
| } |
| |
| /////////////////////////////////////////////////////////////////////////////// |
| |
| sk_sp<GrGpu> GrGLGpu::Make(sk_sp<const GrGLInterface> interface, const GrContextOptions& options, |
| GrContext* context) { |
| if (!interface) { |
| interface = GrGLMakeNativeInterface(); |
| // For clients that have written their own GrGLCreateNativeInterface and haven't yet updated |
| // to GrGLMakeNativeInterface. |
| if (!interface) { |
| interface = sk_ref_sp(GrGLCreateNativeInterface()); |
| } |
| if (!interface) { |
| return nullptr; |
| } |
| } |
| #ifdef USE_NSIGHT |
| const_cast<GrContextOptions&>(options).fSuppressPathRendering = true; |
| #endif |
| auto glContext = GrGLContext::Make(std::move(interface), options); |
| if (!glContext) { |
| return nullptr; |
| } |
| return sk_sp<GrGpu>(new GrGLGpu(std::move(glContext), context)); |
| } |
| |
| GrGLGpu::GrGLGpu(std::unique_ptr<GrGLContext> ctx, GrContext* context) |
| : GrGpu(context) |
| , fGLContext(std::move(ctx)) |
| , fProgramCache(new ProgramCache(this)) |
| , fHWProgramID(0) |
| , fTempSrcFBOID(0) |
| , fTempDstFBOID(0) |
| , fStencilClearFBOID(0) |
| , fHWMaxUsedBufferTextureUnit(-1) |
| , fHWMinSampleShading(0.0) { |
| SkASSERT(fGLContext); |
| fCaps = sk_ref_sp(fGLContext->caps()); |
| |
| fHWBoundTextureUniqueIDs.reset(this->caps()->shaderCaps()->maxCombinedSamplers()); |
| |
| fHWBufferState[kVertex_GrBufferType].fGLTarget = GR_GL_ARRAY_BUFFER; |
| fHWBufferState[kIndex_GrBufferType].fGLTarget = GR_GL_ELEMENT_ARRAY_BUFFER; |
| fHWBufferState[kTexel_GrBufferType].fGLTarget = GR_GL_TEXTURE_BUFFER; |
| fHWBufferState[kDrawIndirect_GrBufferType].fGLTarget = GR_GL_DRAW_INDIRECT_BUFFER; |
| if (GrGLCaps::kChromium_TransferBufferType == this->glCaps().transferBufferType()) { |
| fHWBufferState[kXferCpuToGpu_GrBufferType].fGLTarget = |
| GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM; |
| fHWBufferState[kXferGpuToCpu_GrBufferType].fGLTarget = |
| GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM; |
| } else { |
| fHWBufferState[kXferCpuToGpu_GrBufferType].fGLTarget = GR_GL_PIXEL_UNPACK_BUFFER; |
| fHWBufferState[kXferGpuToCpu_GrBufferType].fGLTarget = GR_GL_PIXEL_PACK_BUFFER; |
| } |
| for (int i = 0; i < kGrBufferTypeCount; ++i) { |
| fHWBufferState[i].invalidate(); |
| } |
| GR_STATIC_ASSERT(6 == SK_ARRAY_COUNT(fHWBufferState)); |
| |
| if (this->caps()->shaderCaps()->texelBufferSupport()) { |
| fHWBufferTextures.reset(this->caps()->shaderCaps()->maxCombinedSamplers()); |
| } |
| |
| if (this->glCaps().shaderCaps()->pathRenderingSupport()) { |
| fPathRendering.reset(new GrGLPathRendering(this)); |
| } |
| |
| GrGLClearErr(this->glInterface()); |
| } |
| |
| GrGLGpu::~GrGLGpu() { |
| // Ensure any GrGpuResource objects get deleted first, since they may require a working GrGLGpu |
| // to release the resources held by the objects themselves. |
| fPathRendering.reset(); |
| fCopyProgramArrayBuffer.reset(); |
| fMipmapProgramArrayBuffer.reset(); |
| fStencilClipClearArrayBuffer.reset(); |
| |
| fHWProgram.reset(); |
| if (fHWProgramID) { |
| // detach the current program so there is no confusion on OpenGL's part |
| // that we want it to be deleted |
| GL_CALL(UseProgram(0)); |
| } |
| |
| if (fTempSrcFBOID) { |
| this->deleteFramebuffer(fTempSrcFBOID); |
| } |
| if (fTempDstFBOID) { |
| this->deleteFramebuffer(fTempDstFBOID); |
| } |
| if (fStencilClearFBOID) { |
| this->deleteFramebuffer(fStencilClearFBOID); |
| } |
| |
| for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) { |
| if (0 != fCopyPrograms[i].fProgram) { |
| GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram)); |
| } |
| } |
| |
| for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) { |
| if (0 != fMipmapPrograms[i].fProgram) { |
| GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram)); |
| } |
| } |
| |
| if (fStencilClipClearProgram) { |
| GL_CALL(DeleteProgram(fStencilClipClearProgram)); |
| } |
| |
| if (fClearColorProgram.fProgram) { |
| GL_CALL(DeleteProgram(fClearColorProgram.fProgram)); |
| } |
| |
| delete fProgramCache; |
| } |
| |
| void GrGLGpu::disconnect(DisconnectType type) { |
| INHERITED::disconnect(type); |
| if (DisconnectType::kCleanup == type) { |
| if (fHWProgramID) { |
| GL_CALL(UseProgram(0)); |
| } |
| if (fTempSrcFBOID) { |
| this->deleteFramebuffer(fTempSrcFBOID); |
| } |
| if (fTempDstFBOID) { |
| this->deleteFramebuffer(fTempDstFBOID); |
| } |
| if (fStencilClearFBOID) { |
| this->deleteFramebuffer(fStencilClearFBOID); |
| } |
| for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) { |
| if (fCopyPrograms[i].fProgram) { |
| GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram)); |
| } |
| } |
| for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) { |
| if (fMipmapPrograms[i].fProgram) { |
| GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram)); |
| } |
| } |
| if (fStencilClipClearProgram) { |
| GL_CALL(DeleteProgram(fStencilClipClearProgram)); |
| } |
| |
| if (fClearColorProgram.fProgram) { |
| GL_CALL(DeleteProgram(fClearColorProgram.fProgram)); |
| } |
| } else { |
| if (fProgramCache) { |
| fProgramCache->abandon(); |
| } |
| } |
| |
| fHWProgram.reset(); |
| delete fProgramCache; |
| fProgramCache = nullptr; |
| |
| fHWProgramID = 0; |
| fTempSrcFBOID = 0; |
| fTempDstFBOID = 0; |
| fStencilClearFBOID = 0; |
| fCopyProgramArrayBuffer.reset(); |
| for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) { |
| fCopyPrograms[i].fProgram = 0; |
| } |
| fMipmapProgramArrayBuffer.reset(); |
| for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) { |
| fMipmapPrograms[i].fProgram = 0; |
| } |
| fStencilClipClearProgram = 0; |
| fStencilClipClearArrayBuffer.reset(); |
| fClearColorProgram.fProgram = 0; |
| |
| if (this->glCaps().shaderCaps()->pathRenderingSupport()) { |
| this->glPathRendering()->disconnect(type); |
| } |
| } |
| |
| /////////////////////////////////////////////////////////////////////////////// |
| |
| void GrGLGpu::onResetContext(uint32_t resetBits) { |
| if (resetBits & kMisc_GrGLBackendState) { |
| // we don't use the zb at all |
| GL_CALL(Disable(GR_GL_DEPTH_TEST)); |
| GL_CALL(DepthMask(GR_GL_FALSE)); |
| |
| // We don't use face culling. |
| GL_CALL(Disable(GR_GL_CULL_FACE)); |
| // We do use separate stencil. Our algorithms don't care which face is front vs. back so |
| // just set this to the default for self-consistency. |
| GL_CALL(FrontFace(GR_GL_CCW)); |
| |
| fHWBufferState[kTexel_GrBufferType].invalidate(); |
| fHWBufferState[kDrawIndirect_GrBufferType].invalidate(); |
| fHWBufferState[kXferCpuToGpu_GrBufferType].invalidate(); |
| fHWBufferState[kXferGpuToCpu_GrBufferType].invalidate(); |
| |
| if (kGL_GrGLStandard == this->glStandard()) { |
| #ifndef USE_NSIGHT |
| // Desktop-only state that we never change |
| if (!this->glCaps().isCoreProfile()) { |
| GL_CALL(Disable(GR_GL_POINT_SMOOTH)); |
| GL_CALL(Disable(GR_GL_LINE_SMOOTH)); |
| GL_CALL(Disable(GR_GL_POLYGON_SMOOTH)); |
| GL_CALL(Disable(GR_GL_POLYGON_STIPPLE)); |
| GL_CALL(Disable(GR_GL_COLOR_LOGIC_OP)); |
| GL_CALL(Disable(GR_GL_INDEX_LOGIC_OP)); |
| } |
| // The windows NVIDIA driver has GL_ARB_imaging in the extension string when using a |
| // core profile. This seems like a bug since the core spec removes any mention of |
| // GL_ARB_imaging. |
| if (this->glCaps().imagingSupport() && !this->glCaps().isCoreProfile()) { |
| GL_CALL(Disable(GR_GL_COLOR_TABLE)); |
| } |
| GL_CALL(Disable(GR_GL_POLYGON_OFFSET_FILL)); |
| |
| if (this->caps()->wireframeMode()) { |
| GL_CALL(PolygonMode(GR_GL_FRONT_AND_BACK, GR_GL_LINE)); |
| } else { |
| GL_CALL(PolygonMode(GR_GL_FRONT_AND_BACK, GR_GL_FILL)); |
| } |
| #endif |
| // Since ES doesn't support glPointSize at all we always use the VS to |
| // set the point size |
| GL_CALL(Enable(GR_GL_VERTEX_PROGRAM_POINT_SIZE)); |
| |
| } |
| |
| if (kGLES_GrGLStandard == this->glStandard() && |
| this->hasExtension("GL_ARM_shader_framebuffer_fetch")) { |
| // The arm extension requires specifically enabling MSAA fetching per sample. |
| // On some devices this may have a perf hit. Also multiple render targets are disabled |
| GL_CALL(Enable(GR_GL_FETCH_PER_SAMPLE_ARM)); |
| } |
| fHWWriteToColor = kUnknown_TriState; |
| // we only ever use lines in hairline mode |
| GL_CALL(LineWidth(1)); |
| GL_CALL(Disable(GR_GL_DITHER)); |
| } |
| |
| if (resetBits & kMSAAEnable_GrGLBackendState) { |
| fMSAAEnabled = kUnknown_TriState; |
| |
| if (this->caps()->usesMixedSamples()) { |
| if (0 != this->caps()->maxRasterSamples()) { |
| fHWRasterMultisampleEnabled = kUnknown_TriState; |
| fHWNumRasterSamples = 0; |
| } |
| |
| // The skia blend modes all use premultiplied alpha and therefore expect RGBA coverage |
| // modulation. This state has no effect when not rendering to a mixed sampled target. |
| GL_CALL(CoverageModulation(GR_GL_RGBA)); |
| } |
| } |
| |
| fHWActiveTextureUnitIdx = -1; // invalid |
| fLastPrimitiveType = static_cast<GrPrimitiveType>(-1); |
| |
| if (resetBits & kTextureBinding_GrGLBackendState) { |
| for (int s = 0; s < fHWBoundTextureUniqueIDs.count(); ++s) { |
| fHWBoundTextureUniqueIDs[s].makeInvalid(); |
| } |
| for (int b = 0; b < fHWBufferTextures.count(); ++b) { |
| SkASSERT(this->caps()->shaderCaps()->texelBufferSupport()); |
| fHWBufferTextures[b].fKnownBound = false; |
| } |
| } |
| |
| if (resetBits & kBlend_GrGLBackendState) { |
| fHWBlendState.invalidate(); |
| } |
| |
| if (resetBits & kView_GrGLBackendState) { |
| fHWScissorSettings.invalidate(); |
| fHWWindowRectsState.invalidate(); |
| fHWViewport.invalidate(); |
| } |
| |
| if (resetBits & kStencil_GrGLBackendState) { |
| fHWStencilSettings.invalidate(); |
| fHWStencilTestEnabled = kUnknown_TriState; |
| } |
| |
| // Vertex |
| if (resetBits & kVertex_GrGLBackendState) { |
| fHWVertexArrayState.invalidate(); |
| fHWBufferState[kVertex_GrBufferType].invalidate(); |
| fHWBufferState[kIndex_GrBufferType].invalidate(); |
| if (this->glCaps().requiresFlushBetweenNonAndInstancedDraws()) { |
| fRequiresFlushBeforeNextInstancedDraw = true; |
| } |
| } |
| |
| if (resetBits & kRenderTarget_GrGLBackendState) { |
| fHWBoundRenderTargetUniqueID.makeInvalid(); |
| fHWSRGBFramebuffer = kUnknown_TriState; |
| } |
| |
| if (resetBits & kPathRendering_GrGLBackendState) { |
| if (this->caps()->shaderCaps()->pathRenderingSupport()) { |
| this->glPathRendering()->resetContext(); |
| } |
| } |
| |
| // we assume these values |
| if (resetBits & kPixelStore_GrGLBackendState) { |
| if (this->glCaps().unpackRowLengthSupport()) { |
| GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); |
| } |
| if (this->glCaps().packRowLengthSupport()) { |
| GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0)); |
| } |
| if (this->glCaps().unpackFlipYSupport()) { |
| GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE)); |
| } |
| if (this->glCaps().packFlipYSupport()) { |
| GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, GR_GL_FALSE)); |
| } |
| } |
| |
| if (resetBits & kProgram_GrGLBackendState) { |
| fHWProgramID = 0; |
| fHWProgram.reset(); |
| } |
| } |
| |
| static bool check_backend_texture(const GrBackendTexture& backendTex, const GrGLCaps& caps, |
| GrGLTexture::IDDesc* idDesc) { |
| GrGLTextureInfo info; |
| if (!backendTex.getGLTextureInfo(&info) || !info.fID) { |
| return false; |
| } |
| |
| idDesc->fInfo = info; |
| |
| if (GR_GL_TEXTURE_EXTERNAL == idDesc->fInfo.fTarget) { |
| if (!caps.shaderCaps()->externalTextureSupport()) { |
| return false; |
| } |
| } else if (GR_GL_TEXTURE_RECTANGLE == idDesc->fInfo.fTarget) { |
| if (!caps.rectangleTextureSupport()) { |
| return false; |
| } |
| } else if (GR_GL_TEXTURE_2D != idDesc->fInfo.fTarget) { |
| return false; |
| } |
| return true; |
| } |
| |
| sk_sp<GrTexture> GrGLGpu::onWrapBackendTexture(const GrBackendTexture& backendTex, |
| GrWrapOwnership ownership) { |
| GrGLTexture::IDDesc idDesc; |
| if (!check_backend_texture(backendTex, this->glCaps(), &idDesc)) { |
| return nullptr; |
| } |
| if (!idDesc.fInfo.fFormat) { |
| idDesc.fInfo.fFormat = this->glCaps().configSizedInternalFormat(backendTex.config()); |
| } |
| if (kBorrow_GrWrapOwnership == ownership) { |
| idDesc.fOwnership = GrBackendObjectOwnership::kBorrowed; |
| } else { |
| idDesc.fOwnership = GrBackendObjectOwnership::kOwned; |
| } |
| |
| GrSurfaceDesc surfDesc; |
| surfDesc.fFlags = kNone_GrSurfaceFlags; |
| surfDesc.fWidth = backendTex.width(); |
| surfDesc.fHeight = backendTex.height(); |
| surfDesc.fConfig = backendTex.config(); |
| surfDesc.fSampleCnt = 1; |
| |
| GrMipMapsStatus mipMapsStatus = backendTex.hasMipMaps() ? GrMipMapsStatus::kValid |
| : GrMipMapsStatus::kNotAllocated; |
| |
| return GrGLTexture::MakeWrapped(this, surfDesc, mipMapsStatus, idDesc); |
| } |
| |
| sk_sp<GrTexture> GrGLGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex, |
| int sampleCnt, |
| GrWrapOwnership ownership) { |
| GrGLTexture::IDDesc idDesc; |
| if (!check_backend_texture(backendTex, this->glCaps(), &idDesc)) { |
| return nullptr; |
| } |
| if (!idDesc.fInfo.fFormat) { |
| idDesc.fInfo.fFormat = this->glCaps().configSizedInternalFormat(backendTex.config()); |
| } |
| |
| // We don't support rendering to a EXTERNAL texture. |
| if (GR_GL_TEXTURE_EXTERNAL == idDesc.fInfo.fTarget) { |
| return nullptr; |
| } |
| |
| if (kBorrow_GrWrapOwnership == ownership) { |
| idDesc.fOwnership = GrBackendObjectOwnership::kBorrowed; |
| } else { |
| idDesc.fOwnership = GrBackendObjectOwnership::kOwned; |
| } |
| |
| GrSurfaceDesc surfDesc; |
| surfDesc.fFlags = kRenderTarget_GrSurfaceFlag; |
| surfDesc.fWidth = backendTex.width(); |
| surfDesc.fHeight = backendTex.height(); |
| surfDesc.fConfig = backendTex.config(); |
| surfDesc.fSampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, backendTex.config()); |
| if (surfDesc.fSampleCnt < 1) { |
| return nullptr; |
| } |
| |
| GrGLRenderTarget::IDDesc rtIDDesc; |
| if (!this->createRenderTargetObjects(surfDesc, idDesc.fInfo, &rtIDDesc)) { |
| return nullptr; |
| } |
| |
| GrMipMapsStatus mipMapsStatus = backendTex.hasMipMaps() ? GrMipMapsStatus::kDirty |
| : GrMipMapsStatus::kNotAllocated; |
| |
| sk_sp<GrGLTextureRenderTarget> texRT( |
| GrGLTextureRenderTarget::MakeWrapped(this, surfDesc, idDesc, rtIDDesc, mipMapsStatus)); |
| texRT->baseLevelWasBoundToFBO(); |
| return std::move(texRT); |
| } |
| |
| sk_sp<GrRenderTarget> GrGLGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) { |
| GrGLFramebufferInfo info; |
| if (!backendRT.getGLFramebufferInfo(&info)) { |
| return nullptr; |
| } |
| |
| GrGLRenderTarget::IDDesc idDesc; |
| idDesc.fRTFBOID = info.fFBOID; |
| idDesc.fMSColorRenderbufferID = 0; |
| idDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID; |
| idDesc.fRTFBOOwnership = GrBackendObjectOwnership::kBorrowed; |
| idDesc.fIsMixedSampled = false; |
| |
| GrSurfaceDesc desc; |
| desc.fFlags = kRenderTarget_GrSurfaceFlag; |
| desc.fWidth = backendRT.width(); |
| desc.fHeight = backendRT.height(); |
| desc.fConfig = backendRT.config(); |
| desc.fSampleCnt = |
| this->caps()->getRenderTargetSampleCount(backendRT.sampleCnt(), backendRT.config()); |
| |
| return GrGLRenderTarget::MakeWrapped(this, desc, idDesc, backendRT.stencilBits()); |
| } |
| |
| sk_sp<GrRenderTarget> GrGLGpu::onWrapBackendTextureAsRenderTarget(const GrBackendTexture& tex, |
| int sampleCnt) { |
| GrGLTextureInfo info; |
| if (!tex.getGLTextureInfo(&info) || !info.fID) { |
| return nullptr; |
| } |
| |
| if (GR_GL_TEXTURE_RECTANGLE != info.fTarget && |
| GR_GL_TEXTURE_2D != info.fTarget) { |
| // Only texture rectangle and texture 2d are supported. We do not check whether texture |
| // rectangle is supported by Skia - if the caller provided us with a texture rectangle, |
| // we assume the necessary support exists. |
| return nullptr; |
| } |
| |
| GrSurfaceDesc surfDesc; |
| surfDesc.fFlags = kRenderTarget_GrSurfaceFlag; |
| surfDesc.fWidth = tex.width(); |
| surfDesc.fHeight = tex.height(); |
| surfDesc.fConfig = tex.config(); |
| surfDesc.fSampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, tex.config()); |
| |
| GrGLRenderTarget::IDDesc rtIDDesc; |
| if (!this->createRenderTargetObjects(surfDesc, info, &rtIDDesc)) { |
| return nullptr; |
| } |
| return GrGLRenderTarget::MakeWrapped(this, surfDesc, rtIDDesc, 0); |
| } |
| |
| static bool check_write_and_transfer_input(GrGLTexture* glTex) { |
| if (!glTex) { |
| return false; |
| } |
| |
| // Write or transfer of pixels is not implemented for TEXTURE_EXTERNAL textures |
| if (GR_GL_TEXTURE_EXTERNAL == glTex->target()) { |
| return false; |
| } |
| |
| return true; |
| } |
| |
| bool GrGLGpu::onWritePixels(GrSurface* surface, int left, int top, int width, int height, |
| GrColorType srcColorType, const GrMipLevel texels[], |
| int mipLevelCount) { |
| auto glTex = static_cast<GrGLTexture*>(surface->asTexture()); |
| |
| if (!check_write_and_transfer_input(glTex)) { |
| return false; |
| } |
| |
| this->setScratchTextureUnit(); |
| GL_CALL(BindTexture(glTex->target(), glTex->textureID())); |
| |
| // No sRGB transformation occurs in uploadTexData. We choose to make the src config match the |
| // srgb-ness of the surface to avoid issues in ES2 where internal/external formats must match. |
| // When we're on ES2 and the dst is GL_SRGB_ALPHA by making the config be kSRGB_8888 we know |
| // that our caps will choose GL_SRGB_ALPHA as the external format, too. On ES3 or regular GL our |
| // caps knows to make the external format be GL_RGBA. |
| auto srgbEncoded = GrPixelConfigIsSRGBEncoded(surface->config()); |
| auto srcAsConfig = GrColorTypeToPixelConfig(srcColorType, srgbEncoded); |
| return this->uploadTexData(glTex->config(), glTex->width(), glTex->height(), glTex->target(), |
| kWrite_UploadType, left, top, width, height, srcAsConfig, texels, |
| mipLevelCount); |
| } |
| |
| // For GL_[UN]PACK_ALIGNMENT. TODO: This really wants to be GrColorType. |
| static inline GrGLint config_alignment(GrPixelConfig config) { |
| switch (config) { |
| case kAlpha_8_GrPixelConfig: |
| case kAlpha_8_as_Alpha_GrPixelConfig: |
| case kAlpha_8_as_Red_GrPixelConfig: |
| case kGray_8_GrPixelConfig: |
| case kGray_8_as_Lum_GrPixelConfig: |
| case kGray_8_as_Red_GrPixelConfig: |
| return 1; |
| case kRGB_565_GrPixelConfig: |
| case kRGBA_4444_GrPixelConfig: |
| case kAlpha_half_GrPixelConfig: |
| case kAlpha_half_as_Red_GrPixelConfig: |
| case kRGBA_half_GrPixelConfig: |
| return 2; |
| case kRGBA_8888_GrPixelConfig: |
| case kRGB_888_GrPixelConfig: // We're really talking about GrColorType::kRGB_888x here. |
| case kBGRA_8888_GrPixelConfig: |
| case kSRGBA_8888_GrPixelConfig: |
| case kSBGRA_8888_GrPixelConfig: |
| case kRGBA_1010102_GrPixelConfig: |
| case kRGBA_float_GrPixelConfig: |
| case kRG_float_GrPixelConfig: |
| return 4; |
| case kUnknown_GrPixelConfig: |
| return 0; |
| } |
| SK_ABORT("Invalid pixel config"); |
| return 0; |
| } |
| |
| bool GrGLGpu::onTransferPixels(GrTexture* texture, int left, int top, int width, int height, |
| GrColorType bufferColorType, GrBuffer* transferBuffer, size_t offset, |
| size_t rowBytes) { |
| GrGLTexture* glTex = static_cast<GrGLTexture*>(texture); |
| GrPixelConfig texConfig = glTex->config(); |
| SkASSERT(this->caps()->isConfigTexturable(texConfig)); |
| |
| if (!check_write_and_transfer_input(glTex)) { |
| return false; |
| } |
| |
| static_assert(sizeof(int) == sizeof(int32_t), ""); |
| if (width <= 0 || height <= 0) { |
| return false; |
| } |
| |
| this->setScratchTextureUnit(); |
| GL_CALL(BindTexture(glTex->target(), glTex->textureID())); |
| |
| SkASSERT(!transferBuffer->isMapped()); |
| SkASSERT(!transferBuffer->isCPUBacked()); |
| const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(transferBuffer); |
| this->bindBuffer(kXferCpuToGpu_GrBufferType, glBuffer); |
| |
| SkDEBUGCODE( |
| SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height); |
| SkIRect bounds = SkIRect::MakeWH(texture->width(), texture->height()); |
| SkASSERT(bounds.contains(subRect)); |
| ) |
| |
| int bpp = GrColorTypeBytesPerPixel(bufferColorType); |
| const size_t trimRowBytes = width * bpp; |
| if (!rowBytes) { |
| rowBytes = trimRowBytes; |
| } |
| const void* pixels = (void*)offset; |
| if (width < 0 || height < 0) { |
| return false; |
| } |
| |
| bool restoreGLRowLength = false; |
| if (trimRowBytes != rowBytes) { |
| // we should have checked for this support already |
| SkASSERT(this->glCaps().unpackRowLengthSupport()); |
| GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowBytes / bpp)); |
| restoreGLRowLength = true; |
| } |
| |
| // Internal format comes from the texture desc. |
| GrGLenum internalFormat; |
| // External format and type come from the upload data. |
| GrGLenum externalFormat; |
| GrGLenum externalType; |
| auto bufferAsConfig = GrColorTypeToPixelConfig(bufferColorType, GrSRGBEncoded::kNo); |
| if (!this->glCaps().getTexImageFormats(texConfig, bufferAsConfig, &internalFormat, |
| &externalFormat, &externalType)) { |
| return false; |
| } |
| |
| GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, config_alignment(texConfig))); |
| GL_CALL(TexSubImage2D(glTex->target(), |
| 0, |
| left, top, |
| width, |
| height, |
| externalFormat, externalType, |
| pixels)); |
| |
| if (restoreGLRowLength) { |
| GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); |
| } |
| |
| return true; |
| } |
| |
| /** |
| * Creates storage space for the texture and fills it with texels. |
| * |
| * @param config Pixel config of the texture. |
| * @param interface The GL interface in use. |
| * @param caps The capabilities of the GL device. |
| * @param internalFormat The data format used for the internal storage of the texture. May be sized. |
| * @param internalFormatForTexStorage The data format used for the TexStorage API. Must be sized. |
| * @param externalFormat The data format used for the external storage of the texture. |
| * @param externalType The type of the data used for the external storage of the texture. |
| * @param texels The texel data of the texture being created. |
| * @param baseWidth The width of the texture's base mipmap level |
| * @param baseHeight The height of the texture's base mipmap level |
| */ |
| static bool allocate_and_populate_texture(GrPixelConfig config, |
| const GrGLInterface& interface, |
| const GrGLCaps& caps, |
| GrGLenum target, |
| GrGLenum internalFormat, |
| GrGLenum internalFormatForTexStorage, |
| GrGLenum externalFormat, |
| GrGLenum externalType, |
| const GrMipLevel texels[], int mipLevelCount, |
| int baseWidth, int baseHeight) { |
| CLEAR_ERROR_BEFORE_ALLOC(&interface); |
| |
| bool useTexStorage = caps.isConfigTexSupportEnabled(config); |
| // We can only use TexStorage if we know we will not later change the storage requirements. |
| // This means if we may later want to add mipmaps, we cannot use TexStorage. |
| // Right now, we cannot know if we will later add mipmaps or not. |
| // The only time we can use TexStorage is when we already have the |
| // mipmaps. |
| useTexStorage &= mipLevelCount > 1; |
| |
| if (useTexStorage) { |
| // We never resize or change formats of textures. |
| GL_ALLOC_CALL(&interface, |
| TexStorage2D(target, SkTMax(mipLevelCount, 1), internalFormatForTexStorage, |
| baseWidth, baseHeight)); |
| GrGLenum error = CHECK_ALLOC_ERROR(&interface); |
| if (error != GR_GL_NO_ERROR) { |
| return false; |
| } else { |
| for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) { |
| const void* currentMipData = texels[currentMipLevel].fPixels; |
| if (currentMipData == nullptr) { |
| continue; |
| } |
| int twoToTheMipLevel = 1 << currentMipLevel; |
| int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel); |
| int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel); |
| |
| GR_GL_CALL(&interface, |
| TexSubImage2D(target, |
| currentMipLevel, |
| 0, // left |
| 0, // top |
| currentWidth, |
| currentHeight, |
| externalFormat, externalType, |
| currentMipData)); |
| } |
| return true; |
| } |
| } else { |
| if (!mipLevelCount) { |
| GL_ALLOC_CALL(&interface, |
| TexImage2D(target, |
| 0, |
| internalFormat, |
| baseWidth, |
| baseHeight, |
| 0, // border |
| externalFormat, externalType, |
| nullptr)); |
| GrGLenum error = CHECK_ALLOC_ERROR(&interface); |
| if (error != GR_GL_NO_ERROR) { |
| return false; |
| } |
| } else { |
| for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) { |
| int twoToTheMipLevel = 1 << currentMipLevel; |
| int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel); |
| int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel); |
| const void* currentMipData = texels[currentMipLevel].fPixels; |
| // Even if curremtMipData is nullptr, continue to call TexImage2D. |
| // This will allocate texture memory which we can later populate. |
| GL_ALLOC_CALL(&interface, |
| TexImage2D(target, |
| currentMipLevel, |
| internalFormat, |
| currentWidth, |
| currentHeight, |
| 0, // border |
| externalFormat, externalType, |
| currentMipData)); |
| GrGLenum error = CHECK_ALLOC_ERROR(&interface); |
| if (error != GR_GL_NO_ERROR) { |
| return false; |
| } |
| } |
| } |
| } |
| return true; |
| } |
| |
| /** |
| * After a texture is created, any state which was altered during its creation |
| * needs to be restored. |
| * |
| * @param interface The GL interface to use. |
| * @param caps The capabilities of the GL device. |
| * @param restoreGLRowLength Should the row length unpacking be restored? |
| * @param glFlipY Did GL flip the texture vertically? |
| */ |
| static void restore_pixelstore_state(const GrGLInterface& interface, const GrGLCaps& caps, |
| bool restoreGLRowLength) { |
| if (restoreGLRowLength) { |
| SkASSERT(caps.unpackRowLengthSupport()); |
| GR_GL_CALL(&interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); |
| } |
| } |
| |
| void GrGLGpu::unbindCpuToGpuXferBuffer() { |
| auto& xferBufferState = fHWBufferState[kXferCpuToGpu_GrBufferType]; |
| if (!xferBufferState.fBoundBufferUniqueID.isInvalid()) { |
| GL_CALL(BindBuffer(xferBufferState.fGLTarget, 0)); |
| xferBufferState.invalidate(); |
| } |
| |
| } |
| |
| // TODO: Make this take a GrColorType instead of dataConfig. This requires updating GrGLCaps to |
| // convert from GrColorType to externalFormat/externalType GLenum values. |
| bool GrGLGpu::uploadTexData(GrPixelConfig texConfig, int texWidth, int texHeight, GrGLenum target, |
| UploadType uploadType, int left, int top, int width, int height, |
| GrPixelConfig dataConfig, const GrMipLevel texels[], int mipLevelCount, |
| GrMipMapsStatus* mipMapsStatus) { |
| SkASSERT(this->caps()->isConfigTexturable(texConfig)); |
| SkDEBUGCODE( |
| SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height); |
| SkIRect bounds = SkIRect::MakeWH(texWidth, texHeight); |
| SkASSERT(bounds.contains(subRect)); |
| ) |
| SkASSERT(1 == mipLevelCount || |
| (0 == left && 0 == top && width == texWidth && height == texHeight)); |
| |
| this->unbindCpuToGpuXferBuffer(); |
| |
| // texels is const. |
| // But we may need to flip the texture vertically to prepare it. |
| // Rather than flip in place and alter the incoming data, |
| // we allocate a new buffer to flip into. |
| // This means we need to make a non-const shallow copy of texels. |
| SkAutoTMalloc<GrMipLevel> texelsShallowCopy; |
| |
| if (mipLevelCount) { |
| texelsShallowCopy.reset(mipLevelCount); |
| memcpy(texelsShallowCopy.get(), texels, mipLevelCount*sizeof(GrMipLevel)); |
| } |
| |
| const GrGLInterface* interface = this->glInterface(); |
| const GrGLCaps& caps = this->glCaps(); |
| |
| size_t bpp = GrBytesPerPixel(dataConfig); |
| |
| if (width == 0 || height == 0) { |
| return false; |
| } |
| |
| // Internal format comes from the texture desc. |
| GrGLenum internalFormat; |
| // External format and type come from the upload data. |
| GrGLenum externalFormat; |
| GrGLenum externalType; |
| if (!this->glCaps().getTexImageFormats(texConfig, dataConfig, &internalFormat, &externalFormat, |
| &externalType)) { |
| return false; |
| } |
| // TexStorage requires a sized format, and internalFormat may or may not be |
| GrGLenum internalFormatForTexStorage = this->glCaps().configSizedInternalFormat(texConfig); |
| |
| /* |
| * Check whether to allocate a temporary buffer for flipping y or |
| * because our srcData has extra bytes past each row. If so, we need |
| * to trim those off here, since GL ES may not let us specify |
| * GL_UNPACK_ROW_LENGTH. |
| */ |
| bool restoreGLRowLength = false; |
| |
| // in case we need a temporary, trimmed copy of the src pixels |
| SkAutoSMalloc<128 * 128> tempStorage; |
| |
| if (mipMapsStatus) { |
| *mipMapsStatus = GrMipMapsStatus::kValid; |
| } |
| |
| const bool usesMips = mipLevelCount > 1; |
| |
| // find the combined size of all the mip levels and the relative offset of |
| // each into the collective buffer |
| bool willNeedData = false; |
| size_t combinedBufferSize = 0; |
| SkTArray<size_t> individualMipOffsets(mipLevelCount); |
| for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) { |
| if (texelsShallowCopy[currentMipLevel].fPixels) { |
| int twoToTheMipLevel = 1 << currentMipLevel; |
| int currentWidth = SkTMax(1, width / twoToTheMipLevel); |
| int currentHeight = SkTMax(1, height / twoToTheMipLevel); |
| const size_t trimRowBytes = currentWidth * bpp; |
| const size_t trimmedSize = trimRowBytes * currentHeight; |
| |
| const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes |
| ? texelsShallowCopy[currentMipLevel].fRowBytes |
| : trimRowBytes; |
| |
| if (((!caps.unpackRowLengthSupport() || usesMips) && trimRowBytes != rowBytes)) { |
| willNeedData = true; |
| } |
| |
| individualMipOffsets.push_back(combinedBufferSize); |
| combinedBufferSize += trimmedSize; |
| } else { |
| if (mipMapsStatus) { |
| *mipMapsStatus = GrMipMapsStatus::kDirty; |
| } |
| individualMipOffsets.push_back(0); |
| } |
| } |
| if (mipMapsStatus && mipLevelCount <= 1) { |
| *mipMapsStatus = GrMipMapsStatus::kNotAllocated; |
| } |
| char* buffer = nullptr; |
| if (willNeedData) { |
| buffer = (char*)tempStorage.reset(combinedBufferSize); |
| } |
| |
| for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) { |
| if (!texelsShallowCopy[currentMipLevel].fPixels) { |
| continue; |
| } |
| int twoToTheMipLevel = 1 << currentMipLevel; |
| int currentWidth = SkTMax(1, width / twoToTheMipLevel); |
| int currentHeight = SkTMax(1, height / twoToTheMipLevel); |
| const size_t trimRowBytes = currentWidth * bpp; |
| |
| /* |
| * check whether to allocate a temporary buffer for flipping y or |
| * because our srcData has extra bytes past each row. If so, we need |
| * to trim those off here, since GL ES may not let us specify |
| * GL_UNPACK_ROW_LENGTH. |
| */ |
| restoreGLRowLength = false; |
| |
| const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes |
| ? texelsShallowCopy[currentMipLevel].fRowBytes |
| : trimRowBytes; |
| |
| // TODO: This optimization should be enabled with or without mips. |
| // For use with mips, we must set GR_GL_UNPACK_ROW_LENGTH once per |
| // mip level, before calling glTexImage2D. |
| if (caps.unpackRowLengthSupport() && !usesMips) { |
| // can't use this for flipping, only non-neg values allowed. :( |
| if (rowBytes != trimRowBytes) { |
| GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp); |
| GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength)); |
| restoreGLRowLength = true; |
| } |
| } else if (trimRowBytes != rowBytes) { |
| // copy data into our new storage, skipping the trailing bytes |
| const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels; |
| char* dst = buffer + individualMipOffsets[currentMipLevel]; |
| SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight); |
| // now point data to our copied version |
| texelsShallowCopy[currentMipLevel].fPixels = buffer + |
| individualMipOffsets[currentMipLevel]; |
| texelsShallowCopy[currentMipLevel].fRowBytes = trimRowBytes; |
| } |
| } |
| |
| if (mipLevelCount) { |
| GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ALIGNMENT, config_alignment(texConfig))); |
| } |
| |
| bool succeeded = true; |
| if (kNewTexture_UploadType == uploadType) { |
| if (0 == left && 0 == top && texWidth == width && texHeight == height) { |
| succeeded = allocate_and_populate_texture( |
| texConfig, *interface, caps, target, internalFormat, |
| internalFormatForTexStorage, externalFormat, externalType, |
| texelsShallowCopy, mipLevelCount, width, height); |
| } else { |
| succeeded = false; |
| } |
| } else { |
| for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) { |
| if (!texelsShallowCopy[currentMipLevel].fPixels) { |
| continue; |
| } |
| int twoToTheMipLevel = 1 << currentMipLevel; |
| int currentWidth = SkTMax(1, width / twoToTheMipLevel); |
| int currentHeight = SkTMax(1, height / twoToTheMipLevel); |
| |
| GL_CALL(TexSubImage2D(target, |
| currentMipLevel, |
| left, top, |
| currentWidth, |
| currentHeight, |
| externalFormat, externalType, |
| texelsShallowCopy[currentMipLevel].fPixels)); |
| } |
| } |
| |
| restore_pixelstore_state(*interface, caps, restoreGLRowLength); |
| |
| return succeeded; |
| } |
| |
| static bool renderbuffer_storage_msaa(const GrGLContext& ctx, |
| int sampleCount, |
| GrGLenum format, |
| int width, int height) { |
| CLEAR_ERROR_BEFORE_ALLOC(ctx.interface()); |
| SkASSERT(GrGLCaps::kNone_MSFBOType != ctx.caps()->msFBOType()); |
| switch (ctx.caps()->msFBOType()) { |
| case GrGLCaps::kStandard_MSFBOType: |
| case GrGLCaps::kMixedSamples_MSFBOType: |
| GL_ALLOC_CALL(ctx.interface(), |
| RenderbufferStorageMultisample(GR_GL_RENDERBUFFER, |
| sampleCount, |
| format, |
| width, height)); |
| break; |
| case GrGLCaps::kES_Apple_MSFBOType: |
| GL_ALLOC_CALL(ctx.interface(), |
| RenderbufferStorageMultisampleES2APPLE(GR_GL_RENDERBUFFER, |
| sampleCount, |
| format, |
| width, height)); |
| break; |
| case GrGLCaps::kES_EXT_MsToTexture_MSFBOType: |
| case GrGLCaps::kES_IMG_MsToTexture_MSFBOType: |
| GL_ALLOC_CALL(ctx.interface(), |
| RenderbufferStorageMultisampleES2EXT(GR_GL_RENDERBUFFER, |
| sampleCount, |
| format, |
| width, height)); |
| break; |
| case GrGLCaps::kNone_MSFBOType: |
| SK_ABORT("Shouldn't be here if we don't support multisampled renderbuffers."); |
| break; |
| } |
| return (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(ctx.interface())); |
| } |
| |
| bool GrGLGpu::createRenderTargetObjects(const GrSurfaceDesc& desc, |
| const GrGLTextureInfo& texInfo, |
| GrGLRenderTarget::IDDesc* idDesc) { |
| idDesc->fMSColorRenderbufferID = 0; |
| idDesc->fRTFBOID = 0; |
| idDesc->fRTFBOOwnership = GrBackendObjectOwnership::kOwned; |
| idDesc->fTexFBOID = 0; |
| SkASSERT((GrGLCaps::kMixedSamples_MSFBOType == this->glCaps().msFBOType()) == |
| this->caps()->usesMixedSamples()); |
| idDesc->fIsMixedSampled = desc.fSampleCnt > 1 && this->caps()->usesMixedSamples(); |
| |
| GrGLenum status; |
| |
| GrGLenum colorRenderbufferFormat = 0; // suppress warning |
| |
| if (desc.fSampleCnt > 1 && GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType()) { |
| goto FAILED; |
| } |
| |
| GL_CALL(GenFramebuffers(1, &idDesc->fTexFBOID)); |
| if (!idDesc->fTexFBOID) { |
| goto FAILED; |
| } |
| |
| // If we are using multisampling we will create two FBOS. We render to one and then resolve to |
| // the texture bound to the other. The exception is the IMG multisample extension. With this |
| // extension the texture is multisampled when rendered to and then auto-resolves it when it is |
| // rendered from. |
| if (desc.fSampleCnt > 1 && this->glCaps().usesMSAARenderBuffers()) { |
| GL_CALL(GenFramebuffers(1, &idDesc->fRTFBOID)); |
| GL_CALL(GenRenderbuffers(1, &idDesc->fMSColorRenderbufferID)); |
| if (!idDesc->fRTFBOID || |
| !idDesc->fMSColorRenderbufferID) { |
| goto FAILED; |
| } |
| if (!this->glCaps().getRenderbufferFormat(desc.fConfig, &colorRenderbufferFormat)) { |
| return false; |
| } |
| } else { |
| idDesc->fRTFBOID = idDesc->fTexFBOID; |
| } |
| |
| // below here we may bind the FBO |
| fHWBoundRenderTargetUniqueID.makeInvalid(); |
| if (idDesc->fRTFBOID != idDesc->fTexFBOID) { |
| SkASSERT(desc.fSampleCnt > 1); |
| GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, idDesc->fMSColorRenderbufferID)); |
| if (!renderbuffer_storage_msaa(*fGLContext, |
| desc.fSampleCnt, |
| colorRenderbufferFormat, |
| desc.fWidth, desc.fHeight)) { |
| goto FAILED; |
| } |
| this->bindFramebuffer(GR_GL_FRAMEBUFFER, idDesc->fRTFBOID); |
| GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, |
| GR_GL_COLOR_ATTACHMENT0, |
| GR_GL_RENDERBUFFER, |
| idDesc->fMSColorRenderbufferID)); |
| if (!this->glCaps().isConfigVerifiedColorAttachment(desc.fConfig)) { |
| GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); |
| if (status != GR_GL_FRAMEBUFFER_COMPLETE) { |
| goto FAILED; |
| } |
| fGLContext->caps()->markConfigAsValidColorAttachment(desc.fConfig); |
| } |
| } |
| this->bindFramebuffer(GR_GL_FRAMEBUFFER, idDesc->fTexFBOID); |
| |
| if (this->glCaps().usesImplicitMSAAResolve() && desc.fSampleCnt > 1) { |
| GL_CALL(FramebufferTexture2DMultisample(GR_GL_FRAMEBUFFER, |
| GR_GL_COLOR_ATTACHMENT0, |
| texInfo.fTarget, |
| texInfo.fID, 0, desc.fSampleCnt)); |
| } else { |
| GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, |
| GR_GL_COLOR_ATTACHMENT0, |
| texInfo.fTarget, |
| texInfo.fID, 0)); |
| } |
| if (!this->glCaps().isConfigVerifiedColorAttachment(desc.fConfig)) { |
| GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); |
| if (status != GR_GL_FRAMEBUFFER_COMPLETE) { |
| goto FAILED; |
| } |
| fGLContext->caps()->markConfigAsValidColorAttachment(desc.fConfig); |
| } |
| |
| return true; |
| |
| FAILED: |
| if (idDesc->fMSColorRenderbufferID) { |
| GL_CALL(DeleteRenderbuffers(1, &idDesc->fMSColorRenderbufferID)); |
| } |
| if (idDesc->fRTFBOID != idDesc->fTexFBOID) { |
| this->deleteFramebuffer(idDesc->fRTFBOID); |
| } |
| if (idDesc->fTexFBOID) { |
| this->deleteFramebuffer(idDesc->fTexFBOID); |
| } |
| return false; |
| } |
| |
| // good to set a break-point here to know when createTexture fails |
| static sk_sp<GrTexture> return_null_texture() { |
| // SkDEBUGFAIL("null texture"); |
| return nullptr; |
| } |
| |
| #if 0 && defined(SK_DEBUG) |
| static size_t as_size_t(int x) { |
| return x; |
| } |
| #endif |
| |
| static void set_initial_texture_params(const GrGLInterface* interface, |
| const GrGLTextureInfo& info, |
| GrGLTexture::TexParams* initialTexParams) { |
| // Some drivers like to know filter/wrap before seeing glTexImage2D. Some |
| // drivers have a bug where an FBO won't be complete if it includes a |
| // texture that is not mipmap complete (considering the filter in use). |
| // we only set a subset here so invalidate first |
| initialTexParams->invalidate(); |
| initialTexParams->fMinFilter = GR_GL_NEAREST; |
| initialTexParams->fMagFilter = GR_GL_NEAREST; |
| initialTexParams->fWrapS = GR_GL_CLAMP_TO_EDGE; |
| initialTexParams->fWrapT = GR_GL_CLAMP_TO_EDGE; |
| GR_GL_CALL(interface, TexParameteri(info.fTarget, |
| GR_GL_TEXTURE_MAG_FILTER, |
| initialTexParams->fMagFilter)); |
| GR_GL_CALL(interface, TexParameteri(info.fTarget, |
| GR_GL_TEXTURE_MIN_FILTER, |
| initialTexParams->fMinFilter)); |
| GR_GL_CALL(interface, TexParameteri(info.fTarget, |
| GR_GL_TEXTURE_WRAP_S, |
| initialTexParams->fWrapS)); |
| GR_GL_CALL(interface, TexParameteri(info.fTarget, |
| GR_GL_TEXTURE_WRAP_T, |
| initialTexParams->fWrapT)); |
| } |
| |
| sk_sp<GrTexture> GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc, |
| SkBudgeted budgeted, |
| const GrMipLevel texels[], |
| int mipLevelCount) { |
| // We fail if the MSAA was requested and is not available. |
| if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleCnt > 1) { |
| //SkDebugf("MSAA RT requested but not supported on this platform."); |
| return return_null_texture(); |
| } |
| |
| bool performClear = (desc.fFlags & kPerformInitialClear_GrSurfaceFlag); |
| |
| GrMipLevel zeroLevel; |
| std::unique_ptr<uint8_t[]> zeros; |
| if (performClear && !this->glCaps().clearTextureSupport() && |
| !this->glCaps().canConfigBeFBOColorAttachment(desc.fConfig)) { |
| size_t rowSize = GrBytesPerPixel(desc.fConfig) * desc.fWidth; |
| size_t size = rowSize * desc.fHeight; |
| zeros.reset(new uint8_t[size]); |
| memset(zeros.get(), 0, size); |
| zeroLevel.fPixels = zeros.get(); |
| zeroLevel.fRowBytes = 0; |
| texels = &zeroLevel; |
| mipLevelCount = 1; |
| performClear = false; |
| } |
| |
| bool isRenderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag); |
| |
| GrGLTexture::IDDesc idDesc; |
| idDesc.fOwnership = GrBackendObjectOwnership::kOwned; |
| GrMipMapsStatus mipMapsStatus; |
| GrGLTexture::TexParams initialTexParams; |
| if (!this->createTextureImpl(desc, &idDesc.fInfo, isRenderTarget, &initialTexParams, texels, |
| mipLevelCount, &mipMapsStatus)) { |
| return return_null_texture(); |
| } |
| |
| sk_sp<GrGLTexture> tex; |
| if (isRenderTarget) { |
| // unbind the texture from the texture unit before binding it to the frame buffer |
| GL_CALL(BindTexture(idDesc.fInfo.fTarget, 0)); |
| GrGLRenderTarget::IDDesc rtIDDesc; |
| |
| if (!this->createRenderTargetObjects(desc, idDesc.fInfo, &rtIDDesc)) { |
| GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID)); |
| return return_null_texture(); |
| } |
| tex = sk_make_sp<GrGLTextureRenderTarget>(this, budgeted, desc, idDesc, rtIDDesc, |
| mipMapsStatus); |
| tex->baseLevelWasBoundToFBO(); |
| } else { |
| tex = sk_make_sp<GrGLTexture>(this, budgeted, desc, idDesc, mipMapsStatus); |
| } |
| tex->setCachedTexParams(initialTexParams, this->getResetTimestamp()); |
| #ifdef TRACE_TEXTURE_CREATION |
| SkDebugf("--- new texture [%d] size=(%d %d) config=%d\n", |
| idDesc.fInfo.fID, desc.fWidth, desc.fHeight, desc.fConfig); |
| #endif |
| if (tex && performClear) { |
| if (this->glCaps().clearTextureSupport()) { |
| static constexpr uint32_t kZero = 0; |
| GL_CALL(ClearTexImage(tex->textureID(), 0, GR_GL_RGBA, GR_GL_UNSIGNED_BYTE, &kZero)); |
| } else { |
| GrGLIRect viewport; |
| this->bindSurfaceFBOForPixelOps(tex.get(), GR_GL_FRAMEBUFFER, &viewport, |
| kDst_TempFBOTarget); |
| this->disableScissor(); |
| this->disableWindowRectangles(); |
| GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE)); |
| fHWWriteToColor = kYes_TriState; |
| GL_CALL(ClearColor(0, 0, 0, 0)); |
| GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT)); |
| this->unbindTextureFBOForPixelOps(GR_GL_FRAMEBUFFER, tex.get()); |
| fHWBoundRenderTargetUniqueID.makeInvalid(); |
| } |
| } |
| return std::move(tex); |
| } |
| |
| namespace { |
| |
| const GrGLuint kUnknownBitCount = GrGLStencilAttachment::kUnknownBitCount; |
| |
| void inline get_stencil_rb_sizes(const GrGLInterface* gl, |
| GrGLStencilAttachment::Format* format) { |
| |
| // we shouldn't ever know one size and not the other |
| SkASSERT((kUnknownBitCount == format->fStencilBits) == |
| (kUnknownBitCount == format->fTotalBits)); |
| if (kUnknownBitCount == format->fStencilBits) { |
| GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER, |
| GR_GL_RENDERBUFFER_STENCIL_SIZE, |
| (GrGLint*)&format->fStencilBits); |
| if (format->fPacked) { |
| GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER, |
| GR_GL_RENDERBUFFER_DEPTH_SIZE, |
| (GrGLint*)&format->fTotalBits); |
| format->fTotalBits += format->fStencilBits; |
| } else { |
| format->fTotalBits = format->fStencilBits; |
| } |
| } |
| } |
| } |
| |
| int GrGLGpu::getCompatibleStencilIndex(GrPixelConfig config) { |
| static const int kSize = 16; |
| SkASSERT(this->caps()->isConfigRenderable(config)); |
| if (!this->glCaps().hasStencilFormatBeenDeterminedForConfig(config)) { |
| // Default to unsupported, set this if we find a stencil format that works. |
| int firstWorkingStencilFormatIndex = -1; |
| |
| // Create color texture |
| GrGLuint colorID = 0; |
| GL_CALL(GenTextures(1, &colorID)); |
| this->setScratchTextureUnit(); |
| GL_CALL(BindTexture(GR_GL_TEXTURE_2D, colorID)); |
| GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, |
| GR_GL_TEXTURE_MAG_FILTER, |
| GR_GL_NEAREST)); |
| GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, |
| GR_GL_TEXTURE_MIN_FILTER, |
| GR_GL_NEAREST)); |
| GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, |
| GR_GL_TEXTURE_WRAP_S, |
| GR_GL_CLAMP_TO_EDGE)); |
| GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, |
| GR_GL_TEXTURE_WRAP_T, |
| GR_GL_CLAMP_TO_EDGE)); |
| |
| GrGLenum internalFormat; |
| GrGLenum externalFormat; |
| GrGLenum externalType; |
| if (!this->glCaps().getTexImageFormats(config, config, &internalFormat, &externalFormat, |
| &externalType)) { |
| return false; |
| } |
| this->unbindCpuToGpuXferBuffer(); |
| CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); |
| GL_ALLOC_CALL(this->glInterface(), TexImage2D(GR_GL_TEXTURE_2D, |
| 0, |
| internalFormat, |
| kSize, |
| kSize, |
| 0, |
| externalFormat, |
| externalType, |
| nullptr)); |
| if (GR_GL_NO_ERROR != CHECK_ALLOC_ERROR(this->glInterface())) { |
| GL_CALL(DeleteTextures(1, &colorID)); |
| return -1; |
| } |
| |
| // unbind the texture from the texture unit before binding it to the frame buffer |
| GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0)); |
| |
| // Create Framebuffer |
| GrGLuint fb = 0; |
| GL_CALL(GenFramebuffers(1, &fb)); |
| this->bindFramebuffer(GR_GL_FRAMEBUFFER, fb); |
| fHWBoundRenderTargetUniqueID.makeInvalid(); |
| GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, |
| GR_GL_COLOR_ATTACHMENT0, |
| GR_GL_TEXTURE_2D, |
| colorID, |
| 0)); |
| GrGLuint sbRBID = 0; |
| GL_CALL(GenRenderbuffers(1, &sbRBID)); |
| |
| // look over formats till I find a compatible one |
| int stencilFmtCnt = this->glCaps().stencilFormats().count(); |
| if (sbRBID) { |
| GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbRBID)); |
| for (int i = 0; i < stencilFmtCnt && sbRBID; ++i) { |
| const GrGLCaps::StencilFormat& sFmt = this->glCaps().stencilFormats()[i]; |
| CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); |
| GL_ALLOC_CALL(this->glInterface(), RenderbufferStorage(GR_GL_RENDERBUFFER, |
| sFmt.fInternalFormat, |
| kSize, kSize)); |
| if (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(this->glInterface())) { |
| GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, |
| GR_GL_STENCIL_ATTACHMENT, |
| GR_GL_RENDERBUFFER, sbRBID)); |
| if (sFmt.fPacked) { |
| GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, |
| GR_GL_DEPTH_ATTACHMENT, |
| GR_GL_RENDERBUFFER, sbRBID)); |
| } else { |
| GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, |
| GR_GL_DEPTH_ATTACHMENT, |
| GR_GL_RENDERBUFFER, 0)); |
| } |
| GrGLenum status; |
| GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); |
| if (status == GR_GL_FRAMEBUFFER_COMPLETE) { |
| firstWorkingStencilFormatIndex = i; |
| break; |
| } |
| GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, |
| GR_GL_STENCIL_ATTACHMENT, |
| GR_GL_RENDERBUFFER, 0)); |
| if (sFmt.fPacked) { |
| GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, |
| GR_GL_DEPTH_ATTACHMENT, |
| GR_GL_RENDERBUFFER, 0)); |
| } |
| } |
| } |
| GL_CALL(DeleteRenderbuffers(1, &sbRBID)); |
| } |
| GL_CALL(DeleteTextures(1, &colorID)); |
| this->bindFramebuffer(GR_GL_FRAMEBUFFER, 0); |
| this->deleteFramebuffer(fb); |
| fGLContext->caps()->setStencilFormatIndexForConfig(config, firstWorkingStencilFormatIndex); |
| } |
| return this->glCaps().getStencilFormatIndexForConfig(config); |
| } |
| |
| bool GrGLGpu::createTextureImpl(const GrSurfaceDesc& desc, GrGLTextureInfo* info, bool renderTarget, |
| GrGLTexture::TexParams* initialTexParams, const GrMipLevel texels[], |
| int mipLevelCount, GrMipMapsStatus* mipMapsStatus) { |
| info->fID = 0; |
| info->fTarget = GR_GL_TEXTURE_2D; |
| GL_CALL(GenTextures(1, &(info->fID))); |
| |
| if (!info->fID) { |
| return false; |
| } |
| |
| this->setScratchTextureUnit(); |
| GL_CALL(BindTexture(info->fTarget, info->fID)); |
| |
| if (renderTarget && this->glCaps().textureUsageSupport()) { |
| // provides a hint about how this texture will be used |
| GL_CALL(TexParameteri(info->fTarget, |
| GR_GL_TEXTURE_USAGE, |
| GR_GL_FRAMEBUFFER_ATTACHMENT)); |
| } |
| |
| if (info) { |
| set_initial_texture_params(this->glInterface(), *info, initialTexParams); |
| } |
| |
| if (!this->uploadTexData(desc.fConfig, desc.fWidth, desc.fHeight, info->fTarget, |
| kNewTexture_UploadType, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig, |
| texels, mipLevelCount, mipMapsStatus)) { |
| GL_CALL(DeleteTextures(1, &(info->fID))); |
| return false; |
| } |
| info->fFormat = this->glCaps().configSizedInternalFormat(desc.fConfig); |
| return true; |
| } |
| |
| GrStencilAttachment* GrGLGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt, |
| int width, int height) { |
| SkASSERT(width >= rt->width()); |
| SkASSERT(height >= rt->height()); |
| |
| int samples = rt->numStencilSamples(); |
| GrGLStencilAttachment::IDDesc sbDesc; |
| |
| int sIdx = this->getCompatibleStencilIndex(rt->config()); |
| if (sIdx < 0) { |
| return nullptr; |
| } |
| |
| if (!sbDesc.fRenderbufferID) { |
| GL_CALL(GenRenderbuffers(1, &sbDesc.fRenderbufferID)); |
| } |
| if (!sbDesc.fRenderbufferID) { |
| return nullptr; |
| } |
| GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbDesc.fRenderbufferID)); |
| const GrGLCaps::StencilFormat& sFmt = this->glCaps().stencilFormats()[sIdx]; |
| CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); |
| // we do this "if" so that we don't call the multisample |
| // version on a GL that doesn't have an MSAA extension. |
| if (samples > 1) { |
| SkAssertResult(renderbuffer_storage_msaa(*fGLContext, |
| samples, |
| sFmt.fInternalFormat, |
| width, height)); |
| } else { |
| GL_ALLOC_CALL(this->glInterface(), RenderbufferStorage(GR_GL_RENDERBUFFER, |
| sFmt.fInternalFormat, |
| width, height)); |
| SkASSERT(GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(this->glInterface())); |
| } |
| fStats.incStencilAttachmentCreates(); |
| // After sized formats we attempt an unsized format and take |
| // whatever sizes GL gives us. In that case we query for the size. |
| GrGLStencilAttachment::Format format = sFmt; |
| get_stencil_rb_sizes(this->glInterface(), &format); |
| GrGLStencilAttachment* stencil = new GrGLStencilAttachment(this, |
| sbDesc, |
| width, |
| height, |
| samples, |
| format); |
| return stencil; |
| } |
| |
| //////////////////////////////////////////////////////////////////////////////// |
| |
| // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer |
| // objects are implemented as client-side-arrays on tile-deferred architectures. |
| #define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW |
| |
| GrBuffer* GrGLGpu::onCreateBuffer(size_t size, GrBufferType intendedType, |
| GrAccessPattern accessPattern, const void* data) { |
| return GrGLBuffer::Create(this, size, intendedType, accessPattern, data); |
| } |
| |
| void GrGLGpu::flushScissor(const GrScissorState& scissorState, |
| const GrGLIRect& rtViewport, |
| GrSurfaceOrigin rtOrigin) { |
| if (scissorState.enabled()) { |
| GrGLIRect scissor; |
| scissor.setRelativeTo(rtViewport, scissorState.rect(), rtOrigin); |
| // if the scissor fully contains the viewport then we fall through and |
| // disable the scissor test. |
| if (!scissor.contains(rtViewport)) { |
| if (fHWScissorSettings.fRect != scissor) { |
| scissor.pushToGLScissor(this->glInterface()); |
| fHWScissorSettings.fRect = scissor; |
| } |
| if (kYes_TriState != fHWScissorSettings.fEnabled) { |
| GL_CALL(Enable(GR_GL_SCISSOR_TEST)); |
| fHWScissorSettings.fEnabled = kYes_TriState; |
| } |
| return; |
| } |
| } |
| |
| // See fall through note above |
| this->disableScissor(); |
| } |
| |
| void GrGLGpu::flushWindowRectangles(const GrWindowRectsState& windowState, |
| const GrGLRenderTarget* rt, GrSurfaceOrigin origin) { |
| #ifndef USE_NSIGHT |
| typedef GrWindowRectsState::Mode Mode; |
| SkASSERT(!windowState.enabled() || rt->renderFBOID()); // Window rects can't be used on-screen. |
| SkASSERT(windowState.numWindows() <= this->caps()->maxWindowRectangles()); |
| |
| if (!this->caps()->maxWindowRectangles() || |
| fHWWindowRectsState.knownEqualTo(origin, rt->getViewport(), windowState)) { |
| return; |
| } |
| |
| // This is purely a workaround for a spurious warning generated by gcc. Otherwise the above |
| // assert would be sufficient. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=5912 |
| int numWindows = SkTMin(windowState.numWindows(), int(GrWindowRectangles::kMaxWindows)); |
| SkASSERT(windowState.numWindows() == numWindows); |
| |
| GrGLIRect glwindows[GrWindowRectangles::kMaxWindows]; |
| const SkIRect* skwindows = windowState.windows().data(); |
| for (int i = 0; i < numWindows; ++i) { |
| glwindows[i].setRelativeTo(rt->getViewport(), skwindows[i], origin); |
| } |
| |
| GrGLenum glmode = (Mode::kExclusive == windowState.mode()) ? GR_GL_EXCLUSIVE : GR_GL_INCLUSIVE; |
| GL_CALL(WindowRectangles(glmode, numWindows, glwindows->asInts())); |
| |
| fHWWindowRectsState.set(origin, rt->getViewport(), windowState); |
| #endif |
| } |
| |
| void GrGLGpu::disableWindowRectangles() { |
| #ifndef USE_NSIGHT |
| if (!this->caps()->maxWindowRectangles() || fHWWindowRectsState.knownDisabled()) { |
| return; |
| } |
| GL_CALL(WindowRectangles(GR_GL_EXCLUSIVE, 0, nullptr)); |
| fHWWindowRectsState.setDisabled(); |
| #endif |
| } |
| |
| void GrGLGpu::flushMinSampleShading(float minSampleShading) { |
| if (fHWMinSampleShading != minSampleShading) { |
| if (minSampleShading > 0.0) { |
| GL_CALL(Enable(GR_GL_SAMPLE_SHADING)); |
| GL_CALL(MinSampleShading(minSampleShading)); |
| } |
| else { |
| GL_CALL(Disable(GR_GL_SAMPLE_SHADING)); |
| } |
| fHWMinSampleShading = minSampleShading; |
| } |
| } |
| |
| bool GrGLGpu::flushGLState(const GrPipeline& pipeline, const GrPrimitiveProcessor& primProc, |
| bool willDrawPoints) { |
| sk_sp<GrGLProgram> program(fProgramCache->refProgram(this, pipeline, primProc, willDrawPoints)); |
| if (!program) { |
| GrCapsDebugf(this->caps(), "Failed to create program!\n"); |
| return false; |
| } |
| |
| program->generateMipmaps(primProc, pipeline); |
| |
| GrXferProcessor::BlendInfo blendInfo; |
| pipeline.getXferProcessor().getBlendInfo(&blendInfo); |
| |
| this->flushColorWrite(blendInfo.fWriteColor); |
| this->flushMinSampleShading(primProc.getSampleShading()); |
| |
| this->flushProgram(std::move(program)); |
| |
| if (blendInfo.fWriteColor) { |
| // Swizzle the blend to match what the shader will output. |
| const GrSwizzle& swizzle = this->caps()->shaderCaps()->configOutputSwizzle( |
| pipeline.proxy()->config()); |
| this->flushBlend(blendInfo, swizzle); |
| } |
| |
| fHWProgram->setData(primProc, pipeline); |
| |
| GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(pipeline.renderTarget()); |
| GrStencilSettings stencil; |
| if (pipeline.isStencilEnabled()) { |
| // TODO: attach stencil and create settings during render target flush. |
| SkASSERT(glRT->renderTargetPriv().getStencilAttachment()); |
| stencil.reset(*pipeline.getUserStencil(), pipeline.hasStencilClip(), |
| glRT->renderTargetPriv().numStencilBits()); |
| } |
| this->flushStencil(stencil); |
| this->flushScissor(pipeline.getScissorState(), glRT->getViewport(), pipeline.proxy()->origin()); |
| this->flushWindowRectangles(pipeline.getWindowRectsState(), glRT, pipeline.proxy()->origin()); |
| this->flushHWAAState(glRT, pipeline.isHWAntialiasState(), !stencil.isDisabled()); |
| |
| // This must come after textures are flushed because a texture may need |
| // to be msaa-resolved (which will modify bound FBO state). |
| this->flushRenderTarget(glRT, pipeline.getDisableOutputConversionToSRGB()); |
| |
| return true; |
| } |
| |
| void GrGLGpu::flushProgram(sk_sp<GrGLProgram> program) { |
| if (!program) { |
| fHWProgram.reset(); |
| fHWProgramID = 0; |
| return; |
| } |
| SkASSERT((program == fHWProgram) == (fHWProgramID == program->programID())); |
| if (program == fHWProgram) { |
| return; |
| } |
| auto id = program->programID(); |
| SkASSERT(id); |
| GL_CALL(UseProgram(id)); |
| fHWProgram = std::move(program); |
| fHWProgramID = id; |
| } |
| |
| void GrGLGpu::flushProgram(GrGLuint id) { |
| SkASSERT(id); |
| if (fHWProgramID == id) { |
| SkASSERT(!fHWProgram); |
| return; |
| } |
| fHWProgram.reset(); |
| GL_CALL(UseProgram(id)); |
| fHWProgramID = id; |
| } |
| |
| void GrGLGpu::setupGeometry(const GrBuffer* indexBuffer, |
| const GrBuffer* vertexBuffer, |
| int baseVertex, |
| const GrBuffer* instanceBuffer, |
| int baseInstance, |
| GrPrimitiveRestart enablePrimitiveRestart) { |
| SkASSERT((enablePrimitiveRestart == GrPrimitiveRestart::kNo) || indexBuffer); |
| |
| GrGLAttribArrayState* attribState; |
| if (indexBuffer) { |
| SkASSERT(indexBuffer && !indexBuffer->isMapped()); |
| attribState = fHWVertexArrayState.bindInternalVertexArray(this, indexBuffer); |
| } else { |
| attribState = fHWVertexArrayState.bindInternalVertexArray(this); |
| } |
| |
| int numAttribs = fHWProgram->numVertexAttributes() + fHWProgram->numInstanceAttributes(); |
| attribState->enableVertexArrays(this, numAttribs, enablePrimitiveRestart); |
| |
| if (int vertexStride = fHWProgram->vertexStride()) { |
| SkASSERT(vertexBuffer && !vertexBuffer->isMapped()); |
| size_t bufferOffset = vertexBuffer->baseOffset() + baseVertex * vertexStride; |
| for (int i = 0; i < fHWProgram->numVertexAttributes(); ++i) { |
| const auto& attrib = fHWProgram->vertexAttribute(i); |
| static constexpr int kDivisor = 0; |
| attribState->set(this, attrib.fLocation, vertexBuffer, attrib.fType, vertexStride, |
| bufferOffset + attrib.fOffset, kDivisor); |
| } |
| } |
| if (int instanceStride = fHWProgram->instanceStride()) { |
| SkASSERT(instanceBuffer && !instanceBuffer->isMapped()); |
| size_t bufferOffset = instanceBuffer->baseOffset() + baseInstance * instanceStride; |
| int attribIdx = fHWProgram->numVertexAttributes(); |
| for (int i = 0; i < fHWProgram->numInstanceAttributes(); ++i, ++attribIdx) { |
| const auto& attrib = fHWProgram->instanceAttribute(i); |
| static constexpr int kDivisor = 1; |
| attribState->set(this, attrib.fLocation, instanceBuffer, attrib.fType, instanceStride, |
| bufferOffset + attrib.fOffset, kDivisor); |
| } |
| } |
| } |
| |
| GrGLenum GrGLGpu::bindBuffer(GrBufferType type, const GrBuffer* buffer) { |
| this->handleDirtyContext(); |
| |
| // Index buffer state is tied to the vertex array. |
| if (kIndex_GrBufferType == type) { |
| this->bindVertexArray(0); |
| } |
| |
| SkASSERT(type >= 0 && type <= kLast_GrBufferType); |
| auto& bufferState = fHWBufferState[type]; |
| |
| if (buffer->uniqueID() != bufferState.fBoundBufferUniqueID) { |
| if (buffer->isCPUBacked()) { |
| if (!bufferState.fBufferZeroKnownBound) { |
| GL_CALL(BindBuffer(bufferState.fGLTarget, 0)); |
| } |
| } else { |
| const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(buffer); |
| GL_CALL(BindBuffer(bufferState.fGLTarget, glBuffer->bufferID())); |
| } |
| bufferState.fBufferZeroKnownBound = buffer->isCPUBacked(); |
| bufferState.fBoundBufferUniqueID = buffer->uniqueID(); |
| } |
| |
| return bufferState.fGLTarget; |
| } |
| |
| void GrGLGpu::notifyBufferReleased(const GrGLBuffer* buffer) { |
| if (buffer->hasAttachedToTexture()) { |
| // Detach this buffer from any textures to ensure the underlying memory is freed. |
| GrGpuResource::UniqueID uniqueID = buffer->uniqueID(); |
| for (int i = fHWMaxUsedBufferTextureUnit; i >= 0; --i) { |
| auto& buffTex = fHWBufferTextures[i]; |
| if (uniqueID != buffTex.fAttachedBufferUniqueID) { |
| continue; |
| } |
| if (i == fHWMaxUsedBufferTextureUnit) { |
| --fHWMaxUsedBufferTextureUnit; |
| } |
| |
| this->setTextureUnit(i); |
| if (!buffTex.fKnownBound) { |
| SkASSERT(buffTex.fTextureID); |
| GL_CALL(BindTexture(GR_GL_TEXTURE_BUFFER, buffTex.fTextureID)); |
| buffTex.fKnownBound = true; |
| } |
| GL_CALL(TexBuffer(GR_GL_TEXTURE_BUFFER, |
| this->glCaps().configSizedInternalFormat(buffTex.fTexelConfig), 0)); |
| } |
| } |
| } |
| |
| void GrGLGpu::disableScissor() { |
| if (kNo_TriState != fHWScissorSettings.fEnabled) { |
| GL_CALL(Disable(GR_GL_SCISSOR_TEST)); |
| fHWScissorSettings.fEnabled = kNo_TriState; |
| return; |
| } |
| } |
| |
| void GrGLGpu::clear(const GrFixedClip& clip, GrColor color, |
| GrRenderTarget* target, GrSurfaceOrigin origin) { |
| // parent class should never let us get here with no RT |
| SkASSERT(target); |
| |
| this->handleDirtyContext(); |
| |
| GrGLfloat r, g, b, a; |
| static const GrGLfloat scale255 = 1.f / 255.f; |
| a = GrColorUnpackA(color) * scale255; |
| GrGLfloat scaleRGB = scale255; |
| r = GrColorUnpackR(color) * scaleRGB; |
| g = GrColorUnpackG(color) * scaleRGB; |
| b = GrColorUnpackB(color) * scaleRGB; |
| |
| if (this->glCaps().useDrawToClearColor()) { |
| this->clearColorAsDraw(clip, r, g, b, a, target, origin); |
| return; |
| } |
| |
| GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); |
| |
| if (clip.scissorEnabled()) { |
| this->flushRenderTarget(glRT, origin, clip.scissorRect()); |
| } else { |
| this->flushRenderTarget(glRT); |
| } |
| this->flushScissor(clip.scissorState(), glRT->getViewport(), origin); |
| this->flushWindowRectangles(clip.windowRectsState(), glRT, origin); |
| |
| GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE)); |
| fHWWriteToColor = kYes_TriState; |
| |
| if (this->glCaps().clearToBoundaryValuesIsBroken() && |
| (1 == r || 0 == r) && (1 == g || 0 == g) && (1 == b || 0 == b) && (1 == a || 0 == a)) { |
| static const GrGLfloat safeAlpha1 = nextafter(1.f, 2.f); |
| static const GrGLfloat safeAlpha0 = nextafter(0.f, -1.f); |
| a = (1 == a) ? safeAlpha1 : safeAlpha0; |
| } |
| GL_CALL(ClearColor(r, g, b, a)); |
| GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT)); |
| } |
| |
| void GrGLGpu::clearStencil(GrRenderTarget* target, int clearValue) { |
| if (!target) { |
| return; |
| } |
| |
| GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment(); |
| // this should only be called internally when we know we have a |
| // stencil buffer. |
| SkASSERT(sb); |
| |
| GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); |
| this->flushRenderTargetNoColorWrites(glRT); |
| |
| this->disableScissor(); |
| this->disableWindowRectangles(); |
| |
| GL_CALL(StencilMask(0xffffffff)); |
| GL_CALL(ClearStencil(clearValue)); |
| GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT)); |
| fHWStencilSettings.invalidate(); |
| if (!clearValue) { |
| sb->cleared(); |
| } |
| } |
| |
| void GrGLGpu::clearStencilClip(const GrFixedClip& clip, |
| bool insideStencilMask, |
| GrRenderTarget* target, GrSurfaceOrigin origin) { |
| SkASSERT(target); |
| this->handleDirtyContext(); |
| |
| if (this->glCaps().useDrawToClearStencilClip()) { |
| this->clearStencilClipAsDraw(clip, insideStencilMask, target, origin); |
| return; |
| } |
| |
| GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment(); |
| // this should only be called internally when we know we have a |
| // stencil buffer. |
| SkASSERT(sb); |
| GrGLint stencilBitCount = sb->bits(); |
| #if 0 |
| SkASSERT(stencilBitCount > 0); |
| GrGLint clipStencilMask = (1 << (stencilBitCount - 1)); |
| #else |
| // we could just clear the clip bit but when we go through |
| // ANGLE a partial stencil mask will cause clears to be |
| // turned into draws. Our contract on GrOpList says that |
| // changing the clip between stencil passes may or may not |
| // zero the client's clip bits. So we just clear the whole thing. |
| static const GrGLint clipStencilMask = ~0; |
| #endif |
| GrGLint value; |
| if (insideStencilMask) { |
| value = (1 << (stencilBitCount - 1)); |
| } else { |
| value = 0; |
| } |
| GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); |
| this->flushRenderTargetNoColorWrites(glRT); |
| |
| this->flushScissor(clip.scissorState(), glRT->getViewport(), origin); |
| this->flushWindowRectangles(clip.windowRectsState(), glRT, origin); |
| |
| GL_CALL(StencilMask((uint32_t) clipStencilMask)); |
| GL_CALL(ClearStencil(value)); |
| GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT)); |
| fHWStencilSettings.invalidate(); |
| } |
| |
| bool GrGLGpu::readPixelsSupported(GrRenderTarget* target, GrPixelConfig readConfig) { |
| #ifdef SK_BUILD_FOR_MAC |
| // Chromium may ask us to read back from locked IOSurfaces. Calling the command buffer's |
| // glGetIntegerv() with GL_IMPLEMENTATION_COLOR_READ_FORMAT/_TYPE causes the command buffer |
| // to make a call to check the framebuffer status which can hang the driver. So in Mac Chromium |
| // we always use a temporary surface to test for read pixels support. |
| // https://www.crbug.com/662802 |
| if (this->glContext().driver() == kChromium_GrGLDriver) { |
| return this->readPixelsSupported(target->config(), readConfig); |
| } |
| #endif |
| auto bindRenderTarget = [this, target]() -> bool { |
| this->flushRenderTargetNoColorWrites(static_cast<GrGLRenderTarget*>(target)); |
| return true; |
| }; |
| auto unbindRenderTarget = []{}; |
| auto getIntegerv = [this](GrGLenum query, GrGLint* value) { |
| GR_GL_GetIntegerv(this->glInterface(), query, value); |
| }; |
| GrPixelConfig rtConfig = target->config(); |
| return this->glCaps().readPixelsSupported(rtConfig, readConfig, getIntegerv, bindRenderTarget, |
| unbindRenderTarget); |
| } |
| |
| bool GrGLGpu::readPixelsSupported(GrPixelConfig rtConfig, GrPixelConfig readConfig) { |
| sk_sp<GrTexture> temp; |
| auto bindRenderTarget = [this, rtConfig, &temp]() -> bool { |
| GrSurfaceDesc desc; |
| desc.fConfig = rtConfig; |
| desc.fWidth = desc.fHeight = 16; |
| if (this->glCaps().isConfigRenderable(rtConfig)) { |
| desc.fFlags = kRenderTarget_GrSurfaceFlag; |
| temp = this->createTexture(desc, SkBudgeted::kNo); |
| if (!temp) { |
| return false; |
| } |
| GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(temp->asRenderTarget()); |
| this->flushRenderTargetNoColorWrites(glrt); |
| return true; |
| } else if (this->glCaps().canConfigBeFBOColorAttachment(rtConfig)) { |
| temp = this->createTexture(desc, SkBudgeted::kNo); |
| if (!temp) { |
| return false; |
| } |
| GrGLIRect vp; |
| this->bindSurfaceFBOForPixelOps(temp.get(), GR_GL_FRAMEBUFFER, &vp, kDst_TempFBOTarget); |
| fHWBoundRenderTargetUniqueID.makeInvalid(); |
| return true; |
| } |
| return false; |
| }; |
| auto unbindRenderTarget = [this, &temp]() { |
| this->unbindTextureFBOForPixelOps(GR_GL_FRAMEBUFFER, temp.get()); |
| }; |
| auto getIntegerv = [this](GrGLenum query, GrGLint* value) { |
| GR_GL_GetIntegerv(this->glInterface(), query, value); |
| }; |
| return this->glCaps().readPixelsSupported(rtConfig, readConfig, getIntegerv, bindRenderTarget, |
| unbindRenderTarget); |
| } |
| |
| bool GrGLGpu::readPixelsSupported(GrSurface* surfaceForConfig, GrPixelConfig readConfig) { |
| if (GrRenderTarget* rt = surfaceForConfig->asRenderTarget()) { |
| return this->readPixelsSupported(rt, readConfig); |
| } else { |
| GrPixelConfig config = surfaceForConfig->config(); |
| return this->readPixelsSupported(config, readConfig); |
| } |
| } |
| |
| bool GrGLGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int height, |
| GrColorType dstColorType, void* buffer, size_t rowBytes) { |
| SkASSERT(surface); |
| |
| GrGLRenderTarget* renderTarget = static_cast<GrGLRenderTarget*>(surface->asRenderTarget()); |
| if (!renderTarget && !this->glCaps().canConfigBeFBOColorAttachment(surface->config())) { |
| return false; |
| } |
| |
| // TODO: Avoid this conversion by making GrGLCaps work with color types. |
| auto dstAsConfig = GrColorTypeToPixelConfig(dstColorType, GrSRGBEncoded::kNo); |
| |
| if (!this->readPixelsSupported(surface, dstAsConfig)) { |
| // If reading in half float format is not supported, then read in a temporary float buffer |
| // and convert to half float. |
| if (kRGBA_half_GrPixelConfig == dstAsConfig && |
| this->readPixelsSupported(surface, kRGBA_float_GrPixelConfig)) { |
| std::unique_ptr<float[]> temp(new float[width * height * 4]); |
| if (this->onReadPixels(surface, left, top, width, height, GrColorType::kRGBA_F32, |
| temp.get(), width * sizeof(float) * 4)) { |
| uint8_t* dst = reinterpret_cast<uint8_t*>(buffer); |
| float* src = temp.get(); |
| for (int j = 0; j < height; ++j) { |
| SkHalf* dstRow = reinterpret_cast<SkHalf*>(dst); |
| for (int i = 0; i < width; ++i) { |
| for (int color = 0; color < 4; color++) { |
| *dstRow++ = SkFloatToHalf(*src++); |
| } |
| } |
| dst += rowBytes; |
| } |
| return true; |
| } |
| } |
| return false; |
| } |
| |
| GrGLenum externalFormat; |
| GrGLenum externalType; |
| if (!this->glCaps().getReadPixelsFormat(surface->config(), dstAsConfig, &externalFormat, |
| &externalType)) { |
| return false; |
| } |
| |
| GrGLIRect glvp; |
| if (renderTarget) { |
| // resolve the render target if necessary |
| switch (renderTarget->getResolveType()) { |
| case GrGLRenderTarget::kCantResolve_ResolveType: |
| return false; |
| case GrGLRenderTarget::kAutoResolves_ResolveType: |
| this->flushRenderTargetNoColorWrites(renderTarget); |
| break; |
| case GrGLRenderTarget::kCanResolve_ResolveType: |
| this->onResolveRenderTarget(renderTarget); |
| // we don't track the state of the READ FBO ID. |
| this->bindFramebuffer(GR_GL_READ_FRAMEBUFFER, renderTarget->textureFBOID()); |
| break; |
| default: |
| SK_ABORT("Unknown resolve type"); |
| } |
| glvp = renderTarget->getViewport(); |
| } else { |
| // Use a temporary FBO. |
| this->bindSurfaceFBOForPixelOps(surface, GR_GL_FRAMEBUFFER, &glvp, kSrc_TempFBOTarget); |
| fHWBoundRenderTargetUniqueID.makeInvalid(); |
| } |
| |
| // the read rect is viewport-relative |
| GrGLIRect readRect; |
| readRect.setRelativeTo(glvp, left, top, width, height, kTopLeft_GrSurfaceOrigin); |
| |
| int bytesPerPixel = GrBytesPerPixel(dstAsConfig); |
| size_t tightRowBytes = bytesPerPixel * width; |
| |
| size_t readDstRowBytes = tightRowBytes; |
| void* readDst = buffer; |
| |
| // determine if GL can read using the passed rowBytes or if we need a scratch buffer. |
| SkAutoSMalloc<32 * sizeof(GrColor)> scratch; |
| if (rowBytes != tightRowBytes) { |
| if (this->glCaps().packRowLengthSupport() && !(rowBytes % bytesPerPixel)) { |
| GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, |
| static_cast<GrGLint>(rowBytes / bytesPerPixel))); |
| readDstRowBytes = rowBytes; |
| } else { |
| scratch.reset(tightRowBytes * height); |
| readDst = scratch.get(); |
| } |
| } |
| GL_CALL(PixelStorei(GR_GL_PACK_ALIGNMENT, config_alignment(dstAsConfig))); |
| |
| GL_CALL(ReadPixels(readRect.fLeft, readRect.fBottom, |
| readRect.fWidth, readRect.fHeight, |
| externalFormat, externalType, readDst)); |
| if (readDstRowBytes != tightRowBytes) { |
| SkASSERT(this->glCaps().packRowLengthSupport()); |
| GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0)); |
| } |
| |
| if (readDst != buffer) { |
| SkASSERT(readDst != buffer); |
| SkASSERT(rowBytes != tightRowBytes); |
| const char* src = reinterpret_cast<const char*>(readDst); |
| char* dst = reinterpret_cast<char*>(buffer); |
| SkRectMemcpy(dst, rowBytes, src, readDstRowBytes, tightRowBytes, height); |
| } |
| if (!renderTarget) { |
| this->unbindTextureFBOForPixelOps(GR_GL_FRAMEBUFFER, surface); |
| } |
| return true; |
| } |
| |
| GrGpuRTCommandBuffer* GrGLGpu::createCommandBuffer( |
| GrRenderTarget* rt, GrSurfaceOrigin origin, |
| const GrGpuRTCommandBuffer::LoadAndStoreInfo& colorInfo, |
| const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo& stencilInfo) { |
| return new GrGLGpuRTCommandBuffer(this, rt, origin, colorInfo, stencilInfo); |
| } |
| |
| GrGpuTextureCommandBuffer* GrGLGpu::createCommandBuffer(GrTexture* texture, |
| GrSurfaceOrigin origin) { |
| return new GrGLGpuTextureCommandBuffer(this, texture, origin); |
| } |
| |
| void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target, GrSurfaceOrigin origin, |
| const SkIRect& bounds, bool disableSRGB) { |
| this->flushRenderTargetNoColorWrites(target, disableSRGB); |
| this->didWriteToSurface(target, origin, &bounds); |
| } |
| |
| void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target, bool disableSRGB) { |
| this->flushRenderTargetNoColorWrites(target, disableSRGB); |
| this->didWriteToSurface(target, kTopLeft_GrSurfaceOrigin, nullptr); |
| } |
| |
| void GrGLGpu::flushRenderTargetNoColorWrites(GrGLRenderTarget* target, bool disableSRGB) { |
| SkASSERT(target); |
| GrGpuResource::UniqueID rtID = target->uniqueID(); |
| if (fHWBoundRenderTargetUniqueID != rtID) { |
| this->bindFramebuffer(GR_GL_FRAMEBUFFER, target->renderFBOID()); |
| #ifdef SK_DEBUG |
| // don't do this check in Chromium -- this is causing |
| // lots of repeated command buffer flushes when the compositor is |
| // rendering with Ganesh, which is really slow; even too slow for |
| // Debug mode. |
| if (kChromium_GrGLDriver != this->glContext().driver()) { |
| GrGLenum status; |
| GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); |
| if (status != GR_GL_FRAMEBUFFER_COMPLETE) { |
| SkDebugf("GrGLGpu::flushRenderTarget glCheckFramebufferStatus %x\n", status); |
| } |
| } |
| #endif |
| if (this->glCaps().requiresFlushBetweenNonAndInstancedDraws()) { |
| fRequiresFlushBeforeNextInstancedDraw = false; |
| } |
| fHWBoundRenderTargetUniqueID = rtID; |
| this->flushViewport(target->getViewport()); |
| } |
| |
| if (this->glCaps().srgbWriteControl()) { |
| this->flushFramebufferSRGB(GrPixelConfigIsSRGB(target->config()) && !disableSRGB); |
| } |
| } |
| |
| void GrGLGpu::flushFramebufferSRGB(bool enable) { |
| if (enable && kYes_TriState != fHWSRGBFramebuffer) { |
| GL_CALL(Enable(GR_GL_FRAMEBUFFER_SRGB)); |
| fHWSRGBFramebuffer = kYes_TriState; |
| } else if (!enable && kNo_TriState != fHWSRGBFramebuffer) { |
| GL_CALL(Disable(GR_GL_FRAMEBUFFER_SRGB)); |
| fHWSRGBFramebuffer = kNo_TriState; |
| } |
| } |
| |
| void GrGLGpu::flushViewport(const GrGLIRect& viewport) { |
| if (fHWViewport != viewport) { |
| viewport.pushToGLViewport(this->glInterface()); |
| fHWViewport = viewport; |
| } |
| } |
| |
| #define SWAP_PER_DRAW 0 |
| |
| #if SWAP_PER_DRAW |
| #if defined(SK_BUILD_FOR_MAC) |
| #include <AGL/agl.h> |
| #elif defined(SK_BUILD_FOR_WIN) |
| #include <gl/GL.h> |
| void SwapBuf() { |
| DWORD procID = GetCurrentProcessId(); |
| HWND hwnd = GetTopWindow(GetDesktopWindow()); |
| while(hwnd) { |
| DWORD wndProcID = 0; |
| GetWindowThreadProcessId(hwnd, &wndProcID); |
| if(wndProcID == procID) { |
| SwapBuffers(GetDC(hwnd)); |
| } |
| hwnd = GetNextWindow(hwnd, GW_HWNDNEXT); |
| } |
| } |
| #endif |
| #endif |
| |
| void GrGLGpu::draw(const GrPipeline& pipeline, |
| const GrPrimitiveProcessor& primProc, |
| const GrMesh meshes[], |
| const GrPipeline::DynamicState dynamicStates[], |
| int meshCount) { |
| this->handleDirtyContext(); |
| |
| bool hasPoints = false; |
| for (int i = 0; i < meshCount; ++i) { |
| if (meshes[i].primitiveType() == GrPrimitiveType::kPoints) { |
| hasPoints = true; |
| break; |
| } |
| } |
| if (!this->flushGLState(pipeline, primProc, hasPoints)) { |
| return; |
| } |
| |
| for (int i = 0; i < meshCount; ++i) { |
| if (GrXferBarrierType barrierType = pipeline.xferBarrierType(*this->caps())) { |
| this->xferBarrier(pipeline.renderTarget(), barrierType); |
| } |
| |
| if (dynamicStates) { |
| if (pipeline.getScissorState().enabled()) { |
| GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(pipeline.renderTarget()); |
| this->flushScissor(dynamicStates[i].fScissorRect, |
| glRT->getViewport(), pipeline.proxy()->origin()); |
| } |
| } |
| if (this->glCaps().requiresCullFaceEnableDisableWhenDrawingLinesAfterNonLines() && |
| GrIsPrimTypeLines(meshes[i].primitiveType()) && |
| !GrIsPrimTypeLines(fLastPrimitiveType)) { |
| GL_CALL(Enable(GR_GL_CULL_FACE)); |
| GL_CALL(Disable(GR_GL_CULL_FACE)); |
| } |
| meshes[i].sendToGpu(this); |
| fLastPrimitiveType = meshes[i].primitiveType(); |
| } |
| |
| #if SWAP_PER_DRAW |
| glFlush(); |
| #if defined(SK_BUILD_FOR_MAC) |
| aglSwapBuffers(aglGetCurrentContext()); |
| int set_a_break_pt_here = 9; |
| aglSwapBuffers(aglGetCurrentContext()); |
| #elif defined(SK_BUILD_FOR_WIN) |
| SwapBuf(); |
| int set_a_break_pt_here = 9; |
| SwapBuf(); |
| #endif |
| #endif |
| } |
| |
| static GrGLenum gr_primitive_type_to_gl_mode(GrPrimitiveType primitiveType) { |
| switch (primitiveType) { |
| case GrPrimitiveType::kTriangles: |
| return GR_GL_TRIANGLES; |
| case GrPrimitiveType::kTriangleStrip: |
| return GR_GL_TRIANGLE_STRIP; |
| case GrPrimitiveType::kPoints: |
| return GR_GL_POINTS; |
| case GrPrimitiveType::kLines: |
| return GR_GL_LINES; |
| case GrPrimitiveType::kLineStrip: |
| return GR_GL_LINE_STRIP; |
| case GrPrimitiveType::kLinesAdjacency: |
| return GR_GL_LINES_ADJACENCY; |
| } |
| SK_ABORT("invalid GrPrimitiveType"); |
| return GR_GL_TRIANGLES; |
| } |
| |
| void GrGLGpu::sendMeshToGpu(GrPrimitiveType primitiveType, const GrBuffer* vertexBuffer, |
| int vertexCount, int baseVertex) { |
| const GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType); |
| if (this->glCaps().drawArraysBaseVertexIsBroken()) { |
| this->setupGeometry(nullptr, vertexBuffer, baseVertex, nullptr, 0, GrPrimitiveRestart::kNo); |
| GL_CALL(DrawArrays(glPrimType, 0, vertexCount)); |
| } else { |
| this->setupGeometry(nullptr, vertexBuffer, 0, nullptr, 0, GrPrimitiveRestart::kNo); |
| GL_CALL(DrawArrays(glPrimType, baseVertex, vertexCount)); |
| } |
| if (this->glCaps().requiresFlushBetweenNonAndInstancedDraws()) { |
| fRequiresFlushBeforeNextInstancedDraw = true; |
| } |
| fStats.incNumDraws(); |
| } |
| |
| void GrGLGpu::sendIndexedMeshToGpu(GrPrimitiveType primitiveType, const GrBuffer* indexBuffer, |
| int indexCount, int baseIndex, uint16_t minIndexValue, |
| uint16_t maxIndexValue, const GrBuffer* vertexBuffer, |
| int baseVertex, GrPrimitiveRestart enablePrimitiveRestart) { |
| const GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType); |
| GrGLvoid* const indices = reinterpret_cast<void*>(indexBuffer->baseOffset() + |
| sizeof(uint16_t) * baseIndex); |
| |
| this->setupGeometry(indexBuffer, vertexBuffer, baseVertex, nullptr, 0, enablePrimitiveRestart); |
| |
| if (this->glCaps().drawRangeElementsSupport()) { |
| GL_CALL(DrawRangeElements(glPrimType, minIndexValue, maxIndexValue, indexCount, |
| GR_GL_UNSIGNED_SHORT, indices)); |
| } else { |
| GL_CALL(DrawElements(glPrimType, indexCount, GR_GL_UNSIGNED_SHORT, indices)); |
| } |
| if (this->glCaps().requiresFlushBetweenNonAndInstancedDraws()) { |
| fRequiresFlushBeforeNextInstancedDraw = true; |
| } |
| fStats.incNumDraws(); |
| } |
| |
| void GrGLGpu::sendInstancedMeshToGpu(GrPrimitiveType primitiveType, const GrBuffer* vertexBuffer, |
| int vertexCount, int baseVertex, |
| const GrBuffer* instanceBuffer, int instanceCount, |
| int baseInstance) { |
| if (fRequiresFlushBeforeNextInstancedDraw) { |
| SkASSERT(this->glCaps().requiresFlushBetweenNonAndInstancedDraws()); |
| GL_CALL(Flush()); |
| fRequiresFlushBeforeNextInstancedDraw = false; |
| } |
| GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType); |
| int maxInstances = this->glCaps().maxInstancesPerDrawArraysWithoutCrashing(instanceCount); |
| for (int i = 0; i < instanceCount; i += maxInstances) { |
| this->setupGeometry(nullptr, vertexBuffer, 0, instanceBuffer, baseInstance + i, |
| GrPrimitiveRestart::kNo); |
| GL_CALL(DrawArraysInstanced(glPrimType, baseVertex, vertexCount, |
| SkTMin(instanceCount - i, maxInstances))); |
| fStats.incNumDraws(); |
| } |
| } |
| |
| void GrGLGpu::sendIndexedInstancedMeshToGpu(GrPrimitiveType primitiveType, |
| const GrBuffer* indexBuffer, int indexCount, |
| int baseIndex, const GrBuffer* vertexBuffer, |
| int baseVertex, const GrBuffer* instanceBuffer, |
| int instanceCount, int baseInstance, |
| GrPrimitiveRestart enablePrimitiveRestart) { |
| if (fRequiresFlushBeforeNextInstancedDraw) { |
| SkASSERT(this->glCaps().requiresFlushBetweenNonAndInstancedDraws()); |
| GL_CALL(Flush()); |
| fRequiresFlushBeforeNextInstancedDraw = false; |
| } |
| const GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType); |
| GrGLvoid* indices = reinterpret_cast<void*>(indexBuffer->baseOffset() + |
| sizeof(uint16_t) * baseIndex); |
| this->setupGeometry(indexBuffer, vertexBuffer, baseVertex, instanceBuffer, baseInstance, |
| enablePrimitiveRestart); |
| GL_CALL(DrawElementsInstanced(glPrimType, indexCount, GR_GL_UNSIGNED_SHORT, indices, |
| instanceCount)); |
| fStats.incNumDraws(); |
| } |
| |
| void GrGLGpu::onResolveRenderTarget(GrRenderTarget* target) { |
| GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(target); |
| if (rt->needsResolve()) { |
| // Some extensions automatically resolves the texture when it is read. |
| if (this->glCaps().usesMSAARenderBuffers()) { |
| SkASSERT(rt->textureFBOID() != rt->renderFBOID()); |
| SkASSERT(rt->textureFBOID() != 0 && rt->renderFBOID() != 0); |
| this->bindFramebuffer(GR_GL_READ_FRAMEBUFFER, rt->renderFBOID()); |
| this->bindFramebuffer(GR_GL_DRAW_FRAMEBUFFER, rt->textureFBOID()); |
| |
| // make sure we go through flushRenderTarget() since we've modified |
| // the bound DRAW FBO ID. |
| fHWBoundRenderTargetUniqueID.makeInvalid(); |
| const GrGLIRect& vp = rt->getViewport(); |
| const SkIRect dirtyRect = rt->getResolveRect(); |
| // The dirty rect tracked on the RT is always stored in the native coordinates of the |
| // surface. Choose kTopLeft so no adjustments are made |
| static constexpr auto kDirtyRectOrigin = kTopLeft_GrSurfaceOrigin; |
| if (GrGLCaps::kES_Apple_MSFBOType == this->glCaps().msFBOType()) { |
| // Apple's extension uses the scissor as the blit bounds. |
| GrScissorState scissorState; |
| scissorState.set(dirtyRect); |
| this->flushScissor(scissorState, vp, kDirtyRectOrigin); |
| this->disableWindowRectangles(); |
| GL_CALL(ResolveMultisampleFramebuffer()); |
| } else { |
| int l, b, r, t; |
| if (GrGLCaps::kResolveMustBeFull_BlitFrambufferFlag & |
| this->glCaps().blitFramebufferSupportFlags()) { |
| l = 0; |
| b = 0; |
| r = target->width(); |
| t = target->height(); |
| } else { |
| GrGLIRect rect; |
| rect.setRelativeTo(vp, dirtyRect, kDirtyRectOrigin); |
| l = rect.fLeft; |
| b = rect.fBottom; |
| r = rect.fLeft + rect.fWidth; |
| t = rect.fBottom + rect.fHeight; |
| } |
| |
| // BlitFrameBuffer respects the scissor, so disable it. |
| this->disableScissor(); |
| this->disableWindowRectangles(); |
| GL_CALL(BlitFramebuffer(l, b, r, t, l, b, r, t, |
| GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST)); |
| } |
| } |
| rt->flagAsResolved(); |
| } |
| } |
| |
| namespace { |
| |
| |
| GrGLenum gr_to_gl_stencil_op(GrStencilOp op) { |
| static const GrGLenum gTable[kGrStencilOpCount] = { |
| GR_GL_KEEP, // kKeep |
| GR_GL_ZERO, // kZero |
| GR_GL_REPLACE, // kReplace |
| GR_GL_INVERT, // kInvert |
| GR_GL_INCR_WRAP, // kIncWrap |
| GR_GL_DECR_WRAP, // kDecWrap |
| GR_GL_INCR, // kIncClamp |
| GR_GL_DECR, // kDecClamp |
| }; |
| GR_STATIC_ASSERT(0 == (int)GrStencilOp::kKeep); |
| GR_STATIC_ASSERT(1 == (int)GrStencilOp::kZero); |
| GR_STATIC_ASSERT(2 == (int)GrStencilOp::kReplace); |
| GR_STATIC_ASSERT(3 == (int)GrStencilOp::kInvert); |
| GR_STATIC_ASSERT(4 == (int)GrStencilOp::kIncWrap); |
| GR_STATIC_ASSERT(5 == (int)GrStencilOp::kDecWrap); |
| GR_STATIC_ASSERT(6 == (int)GrStencilOp::kIncClamp); |
| GR_STATIC_ASSERT(7 == (int)GrStencilOp::kDecClamp); |
| SkASSERT(op < (GrStencilOp)kGrStencilOpCount); |
| return gTable[(int)op]; |
| } |
| |
| void set_gl_stencil(const GrGLInterface* gl, |
| const GrStencilSettings::Face& face, |
| GrGLenum glFace) { |
| GrGLenum glFunc = GrToGLStencilFunc(face.fTest); |
| GrGLenum glFailOp = gr_to_gl_stencil_op(face.fFailOp); |
| GrGLenum glPassOp = gr_to_gl_stencil_op(face.fPassOp); |
| |
| GrGLint ref = face.fRef; |
| GrGLint mask = face.fTestMask; |
| GrGLint writeMask = face.fWriteMask; |
| |
| if (GR_GL_FRONT_AND_BACK == glFace) { |
| // we call the combined func just in case separate stencil is not |
| // supported. |
| GR_GL_CALL(gl, StencilFunc(glFunc, ref, mask)); |
| GR_GL_CALL(gl, StencilMask(writeMask)); |
| GR_GL_CALL(gl, StencilOp(glFailOp, GR_GL_KEEP, glPassOp)); |
| } else { |
| GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask)); |
| GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask)); |
| GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, GR_GL_KEEP, glPassOp)); |
| } |
| } |
| } |
| |
| void GrGLGpu::flushStencil(const GrStencilSettings& stencilSettings) { |
| if (stencilSettings.isDisabled()) { |
| this->disableStencil(); |
| } else if (fHWStencilSettings != stencilSettings) { |
| if (kYes_TriState != fHWStencilTestEnabled) { |
| GL_CALL(Enable(GR_GL_STENCIL_TEST)); |
| |
| fHWStencilTestEnabled = kYes_TriState; |
| } |
| if (stencilSettings.isTwoSided()) { |
| set_gl_stencil(this->glInterface(), |
| stencilSettings.front(), |
| GR_GL_FRONT); |
| set_gl_stencil(this->glInterface(), |
| stencilSettings.back(), |
| GR_GL_BACK); |
| } else { |
| set_gl_stencil(this->glInterface(), |
| stencilSettings.front(), |
| GR_GL_FRONT_AND_BACK); |
| } |
| fHWStencilSettings = stencilSettings; |
| } |
| } |
| |
| void GrGLGpu::disableStencil() { |
| if (kNo_TriState != fHWStencilTestEnabled) { |
| GL_CALL(Disable(GR_GL_STENCIL_TEST)); |
| |
| fHWStencilTestEnabled = kNo_TriState; |
| fHWStencilSettings.invalidate(); |
| } |
| } |
| |
| void GrGLGpu::flushHWAAState(GrRenderTarget* rt, bool useHWAA, bool stencilEnabled) { |
| // rt is only optional if useHWAA is false. |
| SkASSERT(rt || !useHWAA); |
| SkASSERT(!useHWAA || rt->isStencilBufferMultisampled()); |
| |
| if (this->caps()->multisampleDisableSupport()) { |
| if (useHWAA) { |
| if (kYes_TriState != fMSAAEnabled) { |
| GL_CALL(Enable(GR_GL_MULTISAMPLE)); |
| fMSAAEnabled = kYes_TriState; |
| } |
| } else { |
| if (kNo_TriState != fMSAAEnabled) { |
| GL_CALL(Disable(GR_GL_MULTISAMPLE)); |
| fMSAAEnabled = kNo_TriState; |
| } |
| } |
| } |
| |
| if (0 != this->caps()->maxRasterSamples()) { |
| if (useHWAA && GrFSAAType::kMixedSamples == rt->fsaaType() && !stencilEnabled) { |
| // Since stencil is disabled and we want more samples than are in the color buffer, we |
| // need to tell the rasterizer explicitly how many to run. |
| if (kYes_TriState != fHWRasterMultisampleEnabled) { |
| GL_CALL(Enable(GR_GL_RASTER_MULTISAMPLE)); |
| fHWRasterMultisampleEnabled = kYes_TriState; |
| } |
| int numStencilSamples = rt->numStencilSamples(); |
| // convert to GL's understanding of sample counts where 0 means nonMSAA. |
| numStencilSamples = 1 == numStencilSamples ? 0 : numStencilSamples; |
| if (numStencilSamples != fHWNumRasterSamples) { |
| SkASSERT(numStencilSamples <= this->caps()->maxRasterSamples()); |
| GL_CALL(RasterSamples(numStencilSamples, GR_GL_TRUE)); |
| fHWNumRasterSamples = numStencilSamples; |
| } |
| } else { |
| if (kNo_TriState != fHWRasterMultisampleEnabled) { |
| GL_CALL(Disable(GR_GL_RASTER_MULTISAMPLE)); |
| fHWRasterMultisampleEnabled = kNo_TriState; |
| } |
| } |
| } else { |
| SkASSERT(!useHWAA || GrFSAAType::kMixedSamples != rt->fsaaType() || stencilEnabled); |
| } |
| } |
| |
| void GrGLGpu::flushBlend(const GrXferProcessor::BlendInfo& blendInfo, const GrSwizzle& swizzle) { |
| // Any optimization to disable blending should have already been applied and |
| // tweaked the equation to "add" or "subtract", and the coeffs to (1, 0). |
| |
| GrBlendEquation equation = blendInfo.fEquation; |
| GrBlendCoeff srcCoeff = blendInfo.fSrcBlend; |
| GrBlendCoeff dstCoeff = blendInfo.fDstBlend; |
| bool blendOff = (kAdd_GrBlendEquation == equation || kSubtract_GrBlendEquation == equation) && |
| kOne_GrBlendCoeff == srcCoeff && kZero_GrBlendCoeff == dstCoeff; |
| if (blendOff) { |
| if (kNo_TriState != fHWBlendState.fEnabled) { |
| GL_CALL(Disable(GR_GL_BLEND)); |
| |
| // Workaround for the ARM KHR_blend_equation_advanced blacklist issue |
| // https://code.google.com/p/skia/issues/detail?id=3943 |
| if (kARM_GrGLVendor == this->ctxInfo().vendor() && |
| GrBlendEquationIsAdvanced(fHWBlendState.fEquation)) { |
| SkASSERT(this->caps()->advancedBlendEquationSupport()); |
| // Set to any basic blending equation. |
| GrBlendEquation blend_equation = kAdd_GrBlendEquation; |
| GL_CALL(BlendEquation(gXfermodeEquation2Blend[blend_equation])); |
| fHWBlendState.fEquation = blend_equation; |
| } |
| |
| fHWBlendState.fEnabled = kNo_TriState; |
| } |
| return; |
| } |
| |
| if (kYes_TriState != fHWBlendState.fEnabled) { |
| GL_CALL(Enable(GR_GL_BLEND)); |
| |
| fHWBlendState.fEnabled = kYes_TriState; |
| } |
| |
| if (fHWBlendState.fEquation != equation) { |
| GL_CALL(BlendEquation(gXfermodeEquation2Blend[equation])); |
| fHWBlendState.fEquation = equation; |
| } |
| |
| if (GrBlendEquationIsAdvanced(equation)) { |
| SkASSERT(this->caps()->advancedBlendEquationSupport()); |
| // Advanced equations have no other blend state. |
| return; |
| } |
| |
| if (fHWBlendState.fSrcCoeff != srcCoeff || fHWBlendState.fDstCoeff != dstCoeff) { |
| GL_CALL(BlendFunc(gXfermodeCoeff2Blend[srcCoeff], |
| gXfermodeCoeff2Blend[dstCoeff])); |
| fHWBlendState.fSrcCoeff = srcCoeff; |
| fHWBlendState.fDstCoeff = dstCoeff; |
| } |
| |
| if ((BlendCoeffReferencesConstant(srcCoeff) || BlendCoeffReferencesConstant(dstCoeff))) { |
| GrColor blendConst = blendInfo.fBlendConstant; |
| blendConst = swizzle.applyTo(blendConst); |
| if (!fHWBlendState.fConstColorValid || fHWBlendState.fConstColor != blendConst) { |
| GrGLfloat c[4]; |
| GrColorToRGBAFloat(blendConst, c); |
| GL_CALL(BlendColor(c[0], c[1], c[2], c[3])); |
| fHWBlendState.fConstColor = blendConst; |
| fHWBlendState.fConstColorValid = true; |
| } |
| } |
| } |
| |
| static inline GrGLenum wrap_mode_to_gl_wrap(GrSamplerState::WrapMode wrapMode) { |
| switch (wrapMode) { |
| case GrSamplerState::WrapMode::kClamp: |
| return GR_GL_CLAMP_TO_EDGE; |
| case GrSamplerState::WrapMode::kRepeat: |
| return GR_GL_REPEAT; |
| case GrSamplerState::WrapMode::kMirrorRepeat: |
| return GR_GL_MIRRORED_REPEAT; |
| }; |
| SK_ABORT("Unknown wrap mode"); |
| return 0; |
| } |
| |
| static GrGLenum get_component_enum_from_char(char component) { |
| switch (component) { |
| case 'r': |
| return GR_GL_RED; |
| case 'g': |
| return GR_GL_GREEN; |
| case 'b': |
| return GR_GL_BLUE; |
| case 'a': |
| return GR_GL_ALPHA; |
| default: |
| SK_ABORT("Unsupported component"); |
| return 0; |
| } |
| } |
| |
| /** If texture swizzling is available using tex parameters then it is preferred over mangling |
| the generated shader code. This potentially allows greater reuse of cached shaders. */ |
| static void get_tex_param_swizzle(GrPixelConfig config, |
| const GrGLCaps& caps, |
| GrGLenum* glSwizzle) { |
| const GrSwizzle& swizzle = caps.configSwizzle(config); |
| for (int i = 0; i < 4; ++i) { |
| glSwizzle[i] = get_component_enum_from_char(swizzle.c_str()[i]); |
| } |
| } |
| |
| static GrGLenum filter_to_gl_mag_filter(GrSamplerState::Filter filter) { |
| switch (filter) { |
| case GrSamplerState::Filter::kNearest: |
| return GR_GL_NEAREST; |
| case GrSamplerState::Filter::kBilerp: |
| return GR_GL_LINEAR; |
| case GrSamplerState::Filter::kMipMap: |
| return GR_GL_LINEAR; |
| } |
| SK_ABORT("Unknown filter"); |
| return 0; |
| } |
| |
| static GrGLenum filter_to_gl_min_filter(GrSamplerState::Filter filter) { |
| switch (filter) { |
| case GrSamplerState::Filter::kNearest: |
| return GR_GL_NEAREST; |
| case GrSamplerState::Filter::kBilerp: |
| return GR_GL_LINEAR; |
| case GrSamplerState::Filter::kMipMap: |
| return GR_GL_LINEAR_MIPMAP_LINEAR; |
| } |
| SK_ABORT("Unknown filter"); |
| return 0; |
| } |
| |
| void GrGLGpu::bindTexture(int unitIdx, const GrSamplerState& samplerState, GrGLTexture* texture, |
| GrSurfaceOrigin textureOrigin) { |
| SkASSERT(texture); |
| |
| #ifdef SK_DEBUG |
| if (!this->caps()->npotTextureTileSupport()) { |
| if (samplerState.isRepeated()) { |
| const int w = texture->width(); |
| const int h = texture->height(); |
| SkASSERT(SkIsPow2(w) && SkIsPow2(h)); |
| } |
| } |
| #endif |
| |
| // If we created a rt/tex and rendered to it without using a texture and now we're texturing |
| // from the rt it will still be the last bound texture, but it needs resolving. So keep this |
| // out of the "last != next" check. |
| GrGLRenderTarget* texRT = static_cast<GrGLRenderTarget*>(texture->asRenderTarget()); |
| if (texRT) { |
| this->onResolveRenderTarget(texRT); |
| } |
| |
| GrGpuResource::UniqueID textureID = texture->uniqueID(); |
| GrGLenum target = texture->target(); |
| if (fHWBoundTextureUniqueIDs[unitIdx] != textureID) { |
| this->setTextureUnit(unitIdx); |
| GL_CALL(BindTexture(target, texture->textureID())); |
| fHWBoundTextureUniqueIDs[unitIdx] = textureID; |
| } |
| |
| ResetTimestamp timestamp; |
| const GrGLTexture::TexParams& oldTexParams = texture->getCachedTexParams(×tamp); |
| bool setAll = timestamp < this->getResetTimestamp(); |
| GrGLTexture::TexParams newTexParams; |
| |
| GrSamplerState::Filter filterMode = samplerState.filter(); |
| |
| if (GrSamplerState::Filter::kMipMap == filterMode) { |
| if (!this->caps()->mipMapSupport() |