| /* |
| * Copyright 2011 Google Inc. |
| * |
| * Use of this source code is governed by a BSD-style license that can be |
| * found in the LICENSE file. |
| */ |
| |
| #include "include/core/SkPixmap.h" |
| #include "include/core/SkStrokeRec.h" |
| #include "include/core/SkTypes.h" |
| #include "include/gpu/GrBackendSemaphore.h" |
| #include "include/gpu/GrBackendSurface.h" |
| #include "include/gpu/GrTypes.h" |
| #include "include/private/SkHalf.h" |
| #include "include/private/SkTemplates.h" |
| #include "include/private/SkTo.h" |
| #include "src/core/SkAutoMalloc.h" |
| #include "src/core/SkConvertPixels.h" |
| #include "src/core/SkMakeUnique.h" |
| #include "src/core/SkMipMap.h" |
| #include "src/core/SkTraceEvent.h" |
| #include "src/gpu/GrContextPriv.h" |
| #include "src/gpu/GrCpuBuffer.h" |
| #include "src/gpu/GrDataUtils.h" |
| #include "src/gpu/GrFixedClip.h" |
| #include "src/gpu/GrGpuResourcePriv.h" |
| #include "src/gpu/GrMesh.h" |
| #include "src/gpu/GrPipeline.h" |
| #include "src/gpu/GrRenderTargetPriv.h" |
| #include "src/gpu/GrShaderCaps.h" |
| #include "src/gpu/GrSurfaceProxyPriv.h" |
| #include "src/gpu/GrTexturePriv.h" |
| #include "src/gpu/gl/GrGLBuffer.h" |
| #include "src/gpu/gl/GrGLGpu.h" |
| #include "src/gpu/gl/GrGLGpuCommandBuffer.h" |
| #include "src/gpu/gl/GrGLSemaphore.h" |
| #include "src/gpu/gl/GrGLStencilAttachment.h" |
| #include "src/gpu/gl/GrGLTextureRenderTarget.h" |
| #include "src/gpu/gl/builders/GrGLShaderStringBuilder.h" |
| #include "src/sksl/SkSLCompiler.h" |
| |
| #include <cmath> |
| |
| #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X) |
| #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X) |
| |
| #define SKIP_CACHE_CHECK true |
| |
| #if GR_GL_CHECK_ALLOC_WITH_GET_ERROR |
| #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface) |
| #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call) |
| #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface) |
| #else |
| #define CLEAR_ERROR_BEFORE_ALLOC(iface) |
| #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call) |
| #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR |
| #endif |
| |
| //#define USE_NSIGHT |
| |
| /////////////////////////////////////////////////////////////////////////////// |
| |
| static const GrGLenum gXfermodeEquation2Blend[] = { |
| // Basic OpenGL blend equations. |
| GR_GL_FUNC_ADD, |
| GR_GL_FUNC_SUBTRACT, |
| GR_GL_FUNC_REVERSE_SUBTRACT, |
| |
| // GL_KHR_blend_equation_advanced. |
| GR_GL_SCREEN, |
| GR_GL_OVERLAY, |
| GR_GL_DARKEN, |
| GR_GL_LIGHTEN, |
| GR_GL_COLORDODGE, |
| GR_GL_COLORBURN, |
| GR_GL_HARDLIGHT, |
| GR_GL_SOFTLIGHT, |
| GR_GL_DIFFERENCE, |
| GR_GL_EXCLUSION, |
| GR_GL_MULTIPLY, |
| GR_GL_HSL_HUE, |
| GR_GL_HSL_SATURATION, |
| GR_GL_HSL_COLOR, |
| GR_GL_HSL_LUMINOSITY, |
| |
| // Illegal... needs to map to something. |
| GR_GL_FUNC_ADD, |
| }; |
| GR_STATIC_ASSERT(0 == kAdd_GrBlendEquation); |
| GR_STATIC_ASSERT(1 == kSubtract_GrBlendEquation); |
| GR_STATIC_ASSERT(2 == kReverseSubtract_GrBlendEquation); |
| GR_STATIC_ASSERT(3 == kScreen_GrBlendEquation); |
| GR_STATIC_ASSERT(4 == kOverlay_GrBlendEquation); |
| GR_STATIC_ASSERT(5 == kDarken_GrBlendEquation); |
| GR_STATIC_ASSERT(6 == kLighten_GrBlendEquation); |
| GR_STATIC_ASSERT(7 == kColorDodge_GrBlendEquation); |
| GR_STATIC_ASSERT(8 == kColorBurn_GrBlendEquation); |
| GR_STATIC_ASSERT(9 == kHardLight_GrBlendEquation); |
| GR_STATIC_ASSERT(10 == kSoftLight_GrBlendEquation); |
| GR_STATIC_ASSERT(11 == kDifference_GrBlendEquation); |
| GR_STATIC_ASSERT(12 == kExclusion_GrBlendEquation); |
| GR_STATIC_ASSERT(13 == kMultiply_GrBlendEquation); |
| GR_STATIC_ASSERT(14 == kHSLHue_GrBlendEquation); |
| GR_STATIC_ASSERT(15 == kHSLSaturation_GrBlendEquation); |
| GR_STATIC_ASSERT(16 == kHSLColor_GrBlendEquation); |
| GR_STATIC_ASSERT(17 == kHSLLuminosity_GrBlendEquation); |
| GR_STATIC_ASSERT(SK_ARRAY_COUNT(gXfermodeEquation2Blend) == kGrBlendEquationCnt); |
| |
| static const GrGLenum gXfermodeCoeff2Blend[] = { |
| GR_GL_ZERO, |
| GR_GL_ONE, |
| GR_GL_SRC_COLOR, |
| GR_GL_ONE_MINUS_SRC_COLOR, |
| GR_GL_DST_COLOR, |
| GR_GL_ONE_MINUS_DST_COLOR, |
| GR_GL_SRC_ALPHA, |
| GR_GL_ONE_MINUS_SRC_ALPHA, |
| GR_GL_DST_ALPHA, |
| GR_GL_ONE_MINUS_DST_ALPHA, |
| GR_GL_CONSTANT_COLOR, |
| GR_GL_ONE_MINUS_CONSTANT_COLOR, |
| GR_GL_CONSTANT_ALPHA, |
| GR_GL_ONE_MINUS_CONSTANT_ALPHA, |
| |
| // extended blend coeffs |
| GR_GL_SRC1_COLOR, |
| GR_GL_ONE_MINUS_SRC1_COLOR, |
| GR_GL_SRC1_ALPHA, |
| GR_GL_ONE_MINUS_SRC1_ALPHA, |
| |
| // Illegal... needs to map to something. |
| GR_GL_ZERO, |
| }; |
| |
| bool GrGLGpu::BlendCoeffReferencesConstant(GrBlendCoeff coeff) { |
| static const bool gCoeffReferencesBlendConst[] = { |
| false, |
| false, |
| false, |
| false, |
| false, |
| false, |
| false, |
| false, |
| false, |
| false, |
| true, |
| true, |
| true, |
| true, |
| |
| // extended blend coeffs |
| false, |
| false, |
| false, |
| false, |
| |
| // Illegal. |
| false, |
| }; |
| return gCoeffReferencesBlendConst[coeff]; |
| GR_STATIC_ASSERT(kGrBlendCoeffCnt == SK_ARRAY_COUNT(gCoeffReferencesBlendConst)); |
| |
| GR_STATIC_ASSERT(0 == kZero_GrBlendCoeff); |
| GR_STATIC_ASSERT(1 == kOne_GrBlendCoeff); |
| GR_STATIC_ASSERT(2 == kSC_GrBlendCoeff); |
| GR_STATIC_ASSERT(3 == kISC_GrBlendCoeff); |
| GR_STATIC_ASSERT(4 == kDC_GrBlendCoeff); |
| GR_STATIC_ASSERT(5 == kIDC_GrBlendCoeff); |
| GR_STATIC_ASSERT(6 == kSA_GrBlendCoeff); |
| GR_STATIC_ASSERT(7 == kISA_GrBlendCoeff); |
| GR_STATIC_ASSERT(8 == kDA_GrBlendCoeff); |
| GR_STATIC_ASSERT(9 == kIDA_GrBlendCoeff); |
| GR_STATIC_ASSERT(10 == kConstC_GrBlendCoeff); |
| GR_STATIC_ASSERT(11 == kIConstC_GrBlendCoeff); |
| GR_STATIC_ASSERT(12 == kConstA_GrBlendCoeff); |
| GR_STATIC_ASSERT(13 == kIConstA_GrBlendCoeff); |
| |
| GR_STATIC_ASSERT(14 == kS2C_GrBlendCoeff); |
| GR_STATIC_ASSERT(15 == kIS2C_GrBlendCoeff); |
| GR_STATIC_ASSERT(16 == kS2A_GrBlendCoeff); |
| GR_STATIC_ASSERT(17 == kIS2A_GrBlendCoeff); |
| |
| // assertion for gXfermodeCoeff2Blend have to be in GrGpu scope |
| GR_STATIC_ASSERT(kGrBlendCoeffCnt == SK_ARRAY_COUNT(gXfermodeCoeff2Blend)); |
| } |
| |
| ////////////////////////////////////////////////////////////////////////////// |
| |
| static int gl_target_to_binding_index(GrGLenum target) { |
| switch (target) { |
| case GR_GL_TEXTURE_2D: |
| return 0; |
| case GR_GL_TEXTURE_RECTANGLE: |
| return 1; |
| case GR_GL_TEXTURE_EXTERNAL: |
| return 2; |
| } |
| SK_ABORT("Unexpected GL texture target."); |
| return 0; |
| } |
| |
| GrGpuResource::UniqueID GrGLGpu::TextureUnitBindings::boundID(GrGLenum target) const { |
| return fTargetBindings[gl_target_to_binding_index(target)].fBoundResourceID; |
| } |
| |
| bool GrGLGpu::TextureUnitBindings::hasBeenModified(GrGLenum target) const { |
| return fTargetBindings[gl_target_to_binding_index(target)].fHasBeenModified; |
| } |
| |
| void GrGLGpu::TextureUnitBindings::setBoundID(GrGLenum target, GrGpuResource::UniqueID resourceID) { |
| int targetIndex = gl_target_to_binding_index(target); |
| fTargetBindings[targetIndex].fBoundResourceID = resourceID; |
| fTargetBindings[targetIndex].fHasBeenModified = true; |
| } |
| |
| void GrGLGpu::TextureUnitBindings::invalidateForScratchUse(GrGLenum target) { |
| this->setBoundID(target, GrGpuResource::UniqueID()); |
| } |
| |
| void GrGLGpu::TextureUnitBindings::invalidateAllTargets(bool markUnmodified) { |
| for (auto& targetBinding : fTargetBindings) { |
| targetBinding.fBoundResourceID.makeInvalid(); |
| if (markUnmodified) { |
| targetBinding.fHasBeenModified = false; |
| } |
| } |
| } |
| |
| ////////////////////////////////////////////////////////////////////////////// |
| |
| static GrGLenum filter_to_gl_mag_filter(GrSamplerState::Filter filter) { |
| switch (filter) { |
| case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST; |
| case GrSamplerState::Filter::kBilerp: return GR_GL_LINEAR; |
| case GrSamplerState::Filter::kMipMap: return GR_GL_LINEAR; |
| } |
| SK_ABORT("Unknown filter"); |
| return 0; |
| } |
| |
| static GrGLenum filter_to_gl_min_filter(GrSamplerState::Filter filter) { |
| switch (filter) { |
| case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST; |
| case GrSamplerState::Filter::kBilerp: return GR_GL_LINEAR; |
| case GrSamplerState::Filter::kMipMap: return GR_GL_LINEAR_MIPMAP_LINEAR; |
| } |
| SK_ABORT("Unknown filter"); |
| return 0; |
| } |
| |
| static inline GrGLenum wrap_mode_to_gl_wrap(GrSamplerState::WrapMode wrapMode, |
| const GrCaps& caps) { |
| switch (wrapMode) { |
| case GrSamplerState::WrapMode::kClamp: return GR_GL_CLAMP_TO_EDGE; |
| case GrSamplerState::WrapMode::kRepeat: return GR_GL_REPEAT; |
| case GrSamplerState::WrapMode::kMirrorRepeat: return GR_GL_MIRRORED_REPEAT; |
| case GrSamplerState::WrapMode::kClampToBorder: |
| // May not be supported but should have been caught earlier |
| SkASSERT(caps.clampToBorderSupport()); |
| return GR_GL_CLAMP_TO_BORDER; |
| } |
| SK_ABORT("Unknown wrap mode"); |
| return 0; |
| } |
| |
| /////////////////////////////////////////////////////////////////////////////// |
| |
| class GrGLGpu::SamplerObjectCache { |
| public: |
| SamplerObjectCache(GrGLGpu* gpu) : fGpu(gpu) { |
| fNumTextureUnits = fGpu->glCaps().shaderCaps()->maxFragmentSamplers(); |
| fHWBoundSamplers.reset(new GrGLuint[fNumTextureUnits]); |
| std::fill_n(fHWBoundSamplers.get(), fNumTextureUnits, 0); |
| std::fill_n(fSamplers, kNumSamplers, 0); |
| } |
| |
| ~SamplerObjectCache() { |
| if (!fNumTextureUnits) { |
| // We've already been abandoned. |
| return; |
| } |
| for (GrGLuint sampler : fSamplers) { |
| // The spec states that "zero" values should be silently ignored, however they still |
| // trigger GL errors on some NVIDIA platforms. |
| if (sampler) { |
| GR_GL_CALL(fGpu->glInterface(), DeleteSamplers(1, &sampler)); |
| } |
| } |
| } |
| |
| void bindSampler(int unitIdx, const GrSamplerState& state) { |
| int index = StateToIndex(state); |
| if (!fSamplers[index]) { |
| GrGLuint s; |
| GR_GL_CALL(fGpu->glInterface(), GenSamplers(1, &s)); |
| if (!s) { |
| return; |
| } |
| fSamplers[index] = s; |
| auto minFilter = filter_to_gl_min_filter(state.filter()); |
| auto magFilter = filter_to_gl_mag_filter(state.filter()); |
| auto wrapX = wrap_mode_to_gl_wrap(state.wrapModeX(), fGpu->glCaps()); |
| auto wrapY = wrap_mode_to_gl_wrap(state.wrapModeY(), fGpu->glCaps()); |
| GR_GL_CALL(fGpu->glInterface(), |
| SamplerParameteri(s, GR_GL_TEXTURE_MIN_FILTER, minFilter)); |
| GR_GL_CALL(fGpu->glInterface(), |
| SamplerParameteri(s, GR_GL_TEXTURE_MAG_FILTER, magFilter)); |
| GR_GL_CALL(fGpu->glInterface(), SamplerParameteri(s, GR_GL_TEXTURE_WRAP_S, wrapX)); |
| GR_GL_CALL(fGpu->glInterface(), SamplerParameteri(s, GR_GL_TEXTURE_WRAP_T, wrapY)); |
| } |
| if (fHWBoundSamplers[unitIdx] != fSamplers[index]) { |
| GR_GL_CALL(fGpu->glInterface(), BindSampler(unitIdx, fSamplers[index])); |
| fHWBoundSamplers[unitIdx] = fSamplers[index]; |
| } |
| } |
| |
| void invalidateBindings() { |
| // When we have sampler support we always use samplers. So setting these to zero will cause |
| // a rebind on next usage. |
| std::fill_n(fHWBoundSamplers.get(), fNumTextureUnits, 0); |
| } |
| |
| void abandon() { |
| fHWBoundSamplers.reset(); |
| fNumTextureUnits = 0; |
| } |
| |
| void release() { |
| if (!fNumTextureUnits) { |
| // We've already been abandoned. |
| return; |
| } |
| GR_GL_CALL(fGpu->glInterface(), DeleteSamplers(kNumSamplers, fSamplers)); |
| std::fill_n(fSamplers, kNumSamplers, 0); |
| // Deleting a bound sampler implicitly binds sampler 0. |
| std::fill_n(fHWBoundSamplers.get(), fNumTextureUnits, 0); |
| } |
| |
| private: |
| static int StateToIndex(const GrSamplerState& state) { |
| int filter = static_cast<int>(state.filter()); |
| SkASSERT(filter >= 0 && filter < 3); |
| int wrapX = static_cast<int>(state.wrapModeX()); |
| SkASSERT(wrapX >= 0 && wrapX < 4); |
| int wrapY = static_cast<int>(state.wrapModeY()); |
| SkASSERT(wrapY >= 0 && wrapY < 4); |
| int idx = 16 * filter + 4 * wrapX + wrapY; |
| SkASSERT(idx < kNumSamplers); |
| return idx; |
| } |
| |
| GrGLGpu* fGpu; |
| static constexpr int kNumSamplers = 48; |
| std::unique_ptr<GrGLuint[]> fHWBoundSamplers; |
| GrGLuint fSamplers[kNumSamplers]; |
| int fNumTextureUnits; |
| }; |
| |
| /////////////////////////////////////////////////////////////////////////////// |
| |
| sk_sp<GrGpu> GrGLGpu::Make(sk_sp<const GrGLInterface> interface, const GrContextOptions& options, |
| GrContext* context) { |
| if (!interface) { |
| interface = GrGLMakeNativeInterface(); |
| // For clients that have written their own GrGLCreateNativeInterface and haven't yet updated |
| // to GrGLMakeNativeInterface. |
| if (!interface) { |
| interface = sk_ref_sp(GrGLCreateNativeInterface()); |
| } |
| if (!interface) { |
| return nullptr; |
| } |
| } |
| #ifdef USE_NSIGHT |
| const_cast<GrContextOptions&>(options).fSuppressPathRendering = true; |
| #endif |
| auto glContext = GrGLContext::Make(std::move(interface), options); |
| if (!glContext) { |
| return nullptr; |
| } |
| return sk_sp<GrGpu>(new GrGLGpu(std::move(glContext), context)); |
| } |
| |
| GrGLGpu::GrGLGpu(std::unique_ptr<GrGLContext> ctx, GrContext* context) |
| : GrGpu(context) |
| , fGLContext(std::move(ctx)) |
| , fProgramCache(new ProgramCache(this)) |
| , fHWProgramID(0) |
| , fTempSrcFBOID(0) |
| , fTempDstFBOID(0) |
| , fStencilClearFBOID(0) { |
| SkASSERT(fGLContext); |
| GrGLClearErr(this->glInterface()); |
| fCaps = sk_ref_sp(fGLContext->caps()); |
| |
| fHWTextureUnitBindings.reset(this->numTextureUnits()); |
| |
| this->hwBufferState(GrGpuBufferType::kVertex)->fGLTarget = GR_GL_ARRAY_BUFFER; |
| this->hwBufferState(GrGpuBufferType::kIndex)->fGLTarget = GR_GL_ELEMENT_ARRAY_BUFFER; |
| if (GrGLCaps::kChromium_TransferBufferType == this->glCaps().transferBufferType()) { |
| this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->fGLTarget = |
| GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM; |
| this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->fGLTarget = |
| GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM; |
| } else { |
| this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->fGLTarget = GR_GL_PIXEL_UNPACK_BUFFER; |
| this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->fGLTarget = GR_GL_PIXEL_PACK_BUFFER; |
| } |
| for (int i = 0; i < kGrGpuBufferTypeCount; ++i) { |
| fHWBufferState[i].invalidate(); |
| } |
| GR_STATIC_ASSERT(4 == SK_ARRAY_COUNT(fHWBufferState)); |
| |
| if (this->glCaps().shaderCaps()->pathRenderingSupport()) { |
| fPathRendering.reset(new GrGLPathRendering(this)); |
| } |
| |
| if (this->glCaps().samplerObjectSupport()) { |
| fSamplerObjectCache.reset(new SamplerObjectCache(this)); |
| } |
| } |
| |
| GrGLGpu::~GrGLGpu() { |
| // Ensure any GrGpuResource objects get deleted first, since they may require a working GrGLGpu |
| // to release the resources held by the objects themselves. |
| fPathRendering.reset(); |
| fCopyProgramArrayBuffer.reset(); |
| fMipmapProgramArrayBuffer.reset(); |
| |
| fHWProgram.reset(); |
| if (fHWProgramID) { |
| // detach the current program so there is no confusion on OpenGL's part |
| // that we want it to be deleted |
| GL_CALL(UseProgram(0)); |
| } |
| |
| if (fTempSrcFBOID) { |
| this->deleteFramebuffer(fTempSrcFBOID); |
| } |
| if (fTempDstFBOID) { |
| this->deleteFramebuffer(fTempDstFBOID); |
| } |
| if (fStencilClearFBOID) { |
| this->deleteFramebuffer(fStencilClearFBOID); |
| } |
| |
| for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) { |
| if (0 != fCopyPrograms[i].fProgram) { |
| GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram)); |
| } |
| } |
| |
| for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) { |
| if (0 != fMipmapPrograms[i].fProgram) { |
| GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram)); |
| } |
| } |
| |
| delete fProgramCache; |
| fSamplerObjectCache.reset(); |
| } |
| |
| void GrGLGpu::disconnect(DisconnectType type) { |
| INHERITED::disconnect(type); |
| if (DisconnectType::kCleanup == type) { |
| if (fHWProgramID) { |
| GL_CALL(UseProgram(0)); |
| } |
| if (fTempSrcFBOID) { |
| this->deleteFramebuffer(fTempSrcFBOID); |
| } |
| if (fTempDstFBOID) { |
| this->deleteFramebuffer(fTempDstFBOID); |
| } |
| if (fStencilClearFBOID) { |
| this->deleteFramebuffer(fStencilClearFBOID); |
| } |
| for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) { |
| if (fCopyPrograms[i].fProgram) { |
| GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram)); |
| } |
| } |
| for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) { |
| if (fMipmapPrograms[i].fProgram) { |
| GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram)); |
| } |
| } |
| |
| if (fSamplerObjectCache) { |
| fSamplerObjectCache->release(); |
| } |
| } else { |
| if (fProgramCache) { |
| fProgramCache->abandon(); |
| } |
| if (fSamplerObjectCache) { |
| fSamplerObjectCache->abandon(); |
| } |
| } |
| |
| fHWProgram.reset(); |
| delete fProgramCache; |
| fProgramCache = nullptr; |
| |
| fHWProgramID = 0; |
| fTempSrcFBOID = 0; |
| fTempDstFBOID = 0; |
| fStencilClearFBOID = 0; |
| fCopyProgramArrayBuffer.reset(); |
| for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) { |
| fCopyPrograms[i].fProgram = 0; |
| } |
| fMipmapProgramArrayBuffer.reset(); |
| for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) { |
| fMipmapPrograms[i].fProgram = 0; |
| } |
| |
| if (this->glCaps().shaderCaps()->pathRenderingSupport()) { |
| this->glPathRendering()->disconnect(type); |
| } |
| } |
| |
| /////////////////////////////////////////////////////////////////////////////// |
| |
| void GrGLGpu::onResetContext(uint32_t resetBits) { |
| if (resetBits & kMisc_GrGLBackendState) { |
| // we don't use the zb at all |
| GL_CALL(Disable(GR_GL_DEPTH_TEST)); |
| GL_CALL(DepthMask(GR_GL_FALSE)); |
| |
| // We don't use face culling. |
| GL_CALL(Disable(GR_GL_CULL_FACE)); |
| // We do use separate stencil. Our algorithms don't care which face is front vs. back so |
| // just set this to the default for self-consistency. |
| GL_CALL(FrontFace(GR_GL_CCW)); |
| |
| this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->invalidate(); |
| this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->invalidate(); |
| |
| if (GR_IS_GR_GL(this->glStandard())) { |
| #ifndef USE_NSIGHT |
| // Desktop-only state that we never change |
| if (!this->glCaps().isCoreProfile()) { |
| GL_CALL(Disable(GR_GL_POINT_SMOOTH)); |
| GL_CALL(Disable(GR_GL_LINE_SMOOTH)); |
| GL_CALL(Disable(GR_GL_POLYGON_SMOOTH)); |
| GL_CALL(Disable(GR_GL_POLYGON_STIPPLE)); |
| GL_CALL(Disable(GR_GL_COLOR_LOGIC_OP)); |
| GL_CALL(Disable(GR_GL_INDEX_LOGIC_OP)); |
| } |
| // The windows NVIDIA driver has GL_ARB_imaging in the extension string when using a |
| // core profile. This seems like a bug since the core spec removes any mention of |
| // GL_ARB_imaging. |
| if (this->glCaps().imagingSupport() && !this->glCaps().isCoreProfile()) { |
| GL_CALL(Disable(GR_GL_COLOR_TABLE)); |
| } |
| GL_CALL(Disable(GR_GL_POLYGON_OFFSET_FILL)); |
| |
| if (this->caps()->wireframeMode()) { |
| GL_CALL(PolygonMode(GR_GL_FRONT_AND_BACK, GR_GL_LINE)); |
| } else { |
| GL_CALL(PolygonMode(GR_GL_FRONT_AND_BACK, GR_GL_FILL)); |
| } |
| #endif |
| // Since ES doesn't support glPointSize at all we always use the VS to |
| // set the point size |
| GL_CALL(Enable(GR_GL_VERTEX_PROGRAM_POINT_SIZE)); |
| |
| } |
| |
| if (GR_IS_GR_GL_ES(this->glStandard()) && |
| this->glCaps().fbFetchRequiresEnablePerSample()) { |
| // The arm extension requires specifically enabling MSAA fetching per sample. |
| // On some devices this may have a perf hit. Also multiple render targets are disabled |
| GL_CALL(Enable(GR_GL_FETCH_PER_SAMPLE)); |
| } |
| fHWWriteToColor = kUnknown_TriState; |
| // we only ever use lines in hairline mode |
| GL_CALL(LineWidth(1)); |
| GL_CALL(Disable(GR_GL_DITHER)); |
| |
| fHWClearColor[0] = fHWClearColor[1] = fHWClearColor[2] = fHWClearColor[3] = SK_FloatNaN; |
| } |
| |
| if (resetBits & kMSAAEnable_GrGLBackendState) { |
| fMSAAEnabled = kUnknown_TriState; |
| |
| if (this->caps()->mixedSamplesSupport()) { |
| // The skia blend modes all use premultiplied alpha and therefore expect RGBA coverage |
| // modulation. This state has no effect when not rendering to a mixed sampled target. |
| GL_CALL(CoverageModulation(GR_GL_RGBA)); |
| } |
| } |
| |
| fHWActiveTextureUnitIdx = -1; // invalid |
| fLastPrimitiveType = static_cast<GrPrimitiveType>(-1); |
| |
| if (resetBits & kTextureBinding_GrGLBackendState) { |
| for (int s = 0; s < this->numTextureUnits(); ++s) { |
| fHWTextureUnitBindings[s].invalidateAllTargets(false); |
| } |
| if (fSamplerObjectCache) { |
| fSamplerObjectCache->invalidateBindings(); |
| } |
| } |
| |
| if (resetBits & kBlend_GrGLBackendState) { |
| fHWBlendState.invalidate(); |
| } |
| |
| if (resetBits & kView_GrGLBackendState) { |
| fHWScissorSettings.invalidate(); |
| fHWWindowRectsState.invalidate(); |
| fHWViewport.invalidate(); |
| } |
| |
| if (resetBits & kStencil_GrGLBackendState) { |
| fHWStencilSettings.invalidate(); |
| fHWStencilTestEnabled = kUnknown_TriState; |
| } |
| |
| // Vertex |
| if (resetBits & kVertex_GrGLBackendState) { |
| fHWVertexArrayState.invalidate(); |
| this->hwBufferState(GrGpuBufferType::kVertex)->invalidate(); |
| this->hwBufferState(GrGpuBufferType::kIndex)->invalidate(); |
| } |
| |
| if (resetBits & kRenderTarget_GrGLBackendState) { |
| fHWBoundRenderTargetUniqueID.makeInvalid(); |
| fHWSRGBFramebuffer = kUnknown_TriState; |
| } |
| |
| if (resetBits & kPathRendering_GrGLBackendState) { |
| if (this->caps()->shaderCaps()->pathRenderingSupport()) { |
| this->glPathRendering()->resetContext(); |
| } |
| } |
| |
| // we assume these values |
| if (resetBits & kPixelStore_GrGLBackendState) { |
| if (this->glCaps().unpackRowLengthSupport()) { |
| GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); |
| } |
| if (this->glCaps().packRowLengthSupport()) { |
| GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0)); |
| } |
| if (this->glCaps().packFlipYSupport()) { |
| GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, GR_GL_FALSE)); |
| } |
| } |
| |
| if (resetBits & kProgram_GrGLBackendState) { |
| fHWProgramID = 0; |
| fHWProgram.reset(); |
| } |
| ++fResetTimestampForTextureParameters; |
| } |
| |
| static bool check_backend_texture(const GrBackendTexture& backendTex, const GrGLCaps& caps, |
| GrGLTexture::IDDesc* idDesc) { |
| GrGLTextureInfo info; |
| if (!backendTex.getGLTextureInfo(&info) || !info.fID) { |
| return false; |
| } |
| |
| idDesc->fInfo = info; |
| |
| if (GR_GL_TEXTURE_EXTERNAL == idDesc->fInfo.fTarget) { |
| if (!caps.shaderCaps()->externalTextureSupport()) { |
| return false; |
| } |
| } else if (GR_GL_TEXTURE_RECTANGLE == idDesc->fInfo.fTarget) { |
| if (!caps.rectangleTextureSupport()) { |
| return false; |
| } |
| } else if (GR_GL_TEXTURE_2D != idDesc->fInfo.fTarget) { |
| return false; |
| } |
| return true; |
| } |
| |
| sk_sp<GrTexture> GrGLGpu::onWrapBackendTexture(const GrBackendTexture& backendTex, |
| GrWrapOwnership ownership, GrWrapCacheable cacheable, |
| GrIOType ioType) { |
| GrGLTexture::IDDesc idDesc; |
| if (!check_backend_texture(backendTex, this->glCaps(), &idDesc)) { |
| return nullptr; |
| } |
| if (!idDesc.fInfo.fFormat) { |
| idDesc.fInfo.fFormat = this->glCaps().configSizedInternalFormat(backendTex.config()); |
| } |
| if (kBorrow_GrWrapOwnership == ownership) { |
| idDesc.fOwnership = GrBackendObjectOwnership::kBorrowed; |
| } else { |
| idDesc.fOwnership = GrBackendObjectOwnership::kOwned; |
| } |
| |
| GrSurfaceDesc surfDesc; |
| surfDesc.fFlags = kNone_GrSurfaceFlags; |
| surfDesc.fWidth = backendTex.width(); |
| surfDesc.fHeight = backendTex.height(); |
| surfDesc.fConfig = backendTex.config(); |
| surfDesc.fSampleCnt = 1; |
| |
| GrMipMapsStatus mipMapsStatus = backendTex.hasMipMaps() ? GrMipMapsStatus::kValid |
| : GrMipMapsStatus::kNotAllocated; |
| |
| auto texture = GrGLTexture::MakeWrapped(this, surfDesc, mipMapsStatus, idDesc, |
| backendTex.getGLTextureParams(), cacheable, ioType); |
| // We don't know what parameters are already set on wrapped textures. |
| texture->textureParamsModified(); |
| return std::move(texture); |
| } |
| |
| sk_sp<GrTexture> GrGLGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex, |
| int sampleCnt, |
| GrWrapOwnership ownership, |
| GrWrapCacheable cacheable) { |
| GrGLTexture::IDDesc idDesc; |
| if (!check_backend_texture(backendTex, this->glCaps(), &idDesc)) { |
| return nullptr; |
| } |
| if (!idDesc.fInfo.fFormat) { |
| idDesc.fInfo.fFormat = this->glCaps().configSizedInternalFormat(backendTex.config()); |
| } |
| |
| // We don't support rendering to a EXTERNAL texture. |
| if (GR_GL_TEXTURE_EXTERNAL == idDesc.fInfo.fTarget) { |
| return nullptr; |
| } |
| |
| if (kBorrow_GrWrapOwnership == ownership) { |
| idDesc.fOwnership = GrBackendObjectOwnership::kBorrowed; |
| } else { |
| idDesc.fOwnership = GrBackendObjectOwnership::kOwned; |
| } |
| |
| GrSurfaceDesc surfDesc; |
| surfDesc.fFlags = kRenderTarget_GrSurfaceFlag; |
| surfDesc.fWidth = backendTex.width(); |
| surfDesc.fHeight = backendTex.height(); |
| surfDesc.fConfig = backendTex.config(); |
| surfDesc.fSampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, backendTex.config()); |
| if (surfDesc.fSampleCnt < 1) { |
| return nullptr; |
| } |
| |
| GrGLRenderTarget::IDDesc rtIDDesc; |
| if (!this->createRenderTargetObjects(surfDesc, idDesc.fInfo, &rtIDDesc)) { |
| return nullptr; |
| } |
| |
| GrMipMapsStatus mipMapsStatus = backendTex.hasMipMaps() ? GrMipMapsStatus::kDirty |
| : GrMipMapsStatus::kNotAllocated; |
| |
| sk_sp<GrGLTextureRenderTarget> texRT(GrGLTextureRenderTarget::MakeWrapped( |
| this, surfDesc, idDesc, backendTex.getGLTextureParams(), rtIDDesc, cacheable, |
| mipMapsStatus)); |
| texRT->baseLevelWasBoundToFBO(); |
| // We don't know what parameters are already set on wrapped textures. |
| texRT->textureParamsModified(); |
| return std::move(texRT); |
| } |
| |
| sk_sp<GrRenderTarget> GrGLGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) { |
| GrGLFramebufferInfo info; |
| if (!backendRT.getGLFramebufferInfo(&info)) { |
| return nullptr; |
| } |
| |
| GrGLRenderTarget::IDDesc idDesc; |
| idDesc.fRTFBOID = info.fFBOID; |
| idDesc.fMSColorRenderbufferID = 0; |
| idDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID; |
| idDesc.fRTFBOOwnership = GrBackendObjectOwnership::kBorrowed; |
| |
| GrSurfaceDesc desc; |
| desc.fFlags = kRenderTarget_GrSurfaceFlag; |
| desc.fWidth = backendRT.width(); |
| desc.fHeight = backendRT.height(); |
| desc.fConfig = backendRT.config(); |
| desc.fSampleCnt = |
| this->caps()->getRenderTargetSampleCount(backendRT.sampleCnt(), backendRT.config()); |
| |
| return GrGLRenderTarget::MakeWrapped(this, desc, info.fFormat, idDesc, backendRT.stencilBits()); |
| } |
| |
| sk_sp<GrRenderTarget> GrGLGpu::onWrapBackendTextureAsRenderTarget(const GrBackendTexture& tex, |
| int sampleCnt) { |
| GrGLTextureInfo info; |
| if (!tex.getGLTextureInfo(&info) || !info.fID) { |
| return nullptr; |
| } |
| |
| if (GR_GL_TEXTURE_RECTANGLE != info.fTarget && |
| GR_GL_TEXTURE_2D != info.fTarget) { |
| // Only texture rectangle and texture 2d are supported. We do not check whether texture |
| // rectangle is supported by Skia - if the caller provided us with a texture rectangle, |
| // we assume the necessary support exists. |
| return nullptr; |
| } |
| |
| GrSurfaceDesc surfDesc; |
| surfDesc.fFlags = kRenderTarget_GrSurfaceFlag; |
| surfDesc.fWidth = tex.width(); |
| surfDesc.fHeight = tex.height(); |
| surfDesc.fConfig = tex.config(); |
| surfDesc.fSampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, tex.config()); |
| |
| GrGLRenderTarget::IDDesc rtIDDesc; |
| if (!this->createRenderTargetObjects(surfDesc, info, &rtIDDesc)) { |
| return nullptr; |
| } |
| return GrGLRenderTarget::MakeWrapped(this, surfDesc, info.fFormat, rtIDDesc, 0); |
| } |
| |
| static bool check_write_and_transfer_input(GrGLTexture* glTex) { |
| if (!glTex) { |
| return false; |
| } |
| |
| // Write or transfer of pixels is not implemented for TEXTURE_EXTERNAL textures |
| if (GR_GL_TEXTURE_EXTERNAL == glTex->target()) { |
| return false; |
| } |
| |
| return true; |
| } |
| |
| bool GrGLGpu::onWritePixels(GrSurface* surface, int left, int top, int width, int height, |
| GrColorType srcColorType, const GrMipLevel texels[], |
| int mipLevelCount) { |
| auto glTex = static_cast<GrGLTexture*>(surface->asTexture()); |
| |
| if (!check_write_and_transfer_input(glTex)) { |
| return false; |
| } |
| |
| this->bindTextureToScratchUnit(glTex->target(), glTex->textureID()); |
| |
| // No sRGB transformation occurs in uploadTexData. We choose to make the src config match the |
| // srgb-ness of the surface to avoid issues in ES2 where internal/external formats must match. |
| // When we're on ES2 and the dst is GL_SRGB_ALPHA by making the config be kSRGB_8888 we know |
| // that our caps will choose GL_SRGB_ALPHA as the external format, too. On ES3 or regular GL our |
| // caps knows to make the external format be GL_RGBA. |
| auto srgbEncoded = GrPixelConfigIsSRGBEncoded(surface->config()); |
| auto srcAsConfig = GrColorTypeToPixelConfig(srcColorType, srgbEncoded); |
| |
| SkASSERT(!GrPixelConfigIsCompressed(glTex->config())); |
| return this->uploadTexData(glTex->config(), glTex->width(), glTex->height(), glTex->target(), |
| kWrite_UploadType, left, top, width, height, srcAsConfig, texels, |
| mipLevelCount); |
| } |
| |
| // For GL_[UN]PACK_ALIGNMENT. TODO: This really wants to be GrColorType. |
| static inline GrGLint config_alignment(GrPixelConfig config) { |
| SkASSERT(!GrPixelConfigIsCompressed(config)); |
| switch (config) { |
| case kAlpha_8_GrPixelConfig: |
| case kAlpha_8_as_Alpha_GrPixelConfig: |
| case kAlpha_8_as_Red_GrPixelConfig: |
| case kGray_8_GrPixelConfig: |
| case kGray_8_as_Lum_GrPixelConfig: |
| case kGray_8_as_Red_GrPixelConfig: |
| return 1; |
| case kRGB_565_GrPixelConfig: |
| case kRGBA_4444_GrPixelConfig: |
| case kRG_88_GrPixelConfig: |
| case kAlpha_half_GrPixelConfig: |
| case kAlpha_half_as_Red_GrPixelConfig: |
| case kRGBA_half_GrPixelConfig: |
| case kRGBA_half_Clamped_GrPixelConfig: |
| case kR_16_GrPixelConfig: |
| return 2; |
| case kRGBA_8888_GrPixelConfig: |
| case kRGB_888_GrPixelConfig: // We're really talking about GrColorType::kRGB_888x here. |
| case kRGB_888X_GrPixelConfig: |
| case kBGRA_8888_GrPixelConfig: |
| case kSRGBA_8888_GrPixelConfig: |
| case kSBGRA_8888_GrPixelConfig: |
| case kRGBA_1010102_GrPixelConfig: |
| case kRGBA_float_GrPixelConfig: |
| case kRG_float_GrPixelConfig: |
| case kRG_1616_GrPixelConfig: |
| return 4; |
| case kRGB_ETC1_GrPixelConfig: |
| case kUnknown_GrPixelConfig: |
| return 0; |
| |
| // Experimental (for Y416 and mutant P016/P010) |
| case kRGBA_16161616_GrPixelConfig: |
| return 8; |
| case kRG_half_GrPixelConfig: |
| return 4; |
| } |
| SK_ABORT("Invalid pixel config"); |
| return 0; |
| } |
| |
| bool GrGLGpu::onTransferPixelsTo(GrTexture* texture, int left, int top, int width, int height, |
| GrColorType bufferColorType, GrGpuBuffer* transferBuffer, |
| size_t offset, size_t rowBytes) { |
| GrGLTexture* glTex = static_cast<GrGLTexture*>(texture); |
| GrPixelConfig texConfig = glTex->config(); |
| SkASSERT(this->caps()->isConfigTexturable(texConfig)); |
| |
| // Can't transfer compressed data |
| SkASSERT(!GrPixelConfigIsCompressed(glTex->config())); |
| |
| if (!check_write_and_transfer_input(glTex)) { |
| return false; |
| } |
| |
| static_assert(sizeof(int) == sizeof(int32_t), ""); |
| if (width <= 0 || height <= 0) { |
| return false; |
| } |
| |
| this->bindTextureToScratchUnit(glTex->target(), glTex->textureID()); |
| |
| SkASSERT(!transferBuffer->isMapped()); |
| SkASSERT(!transferBuffer->isCpuBuffer()); |
| const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(transferBuffer); |
| this->bindBuffer(GrGpuBufferType::kXferCpuToGpu, glBuffer); |
| |
| SkDEBUGCODE( |
| SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height); |
| SkIRect bounds = SkIRect::MakeWH(texture->width(), texture->height()); |
| SkASSERT(bounds.contains(subRect)); |
| ) |
| |
| int bpp = GrColorTypeBytesPerPixel(bufferColorType); |
| const size_t trimRowBytes = width * bpp; |
| if (!rowBytes) { |
| rowBytes = trimRowBytes; |
| } |
| const void* pixels = (void*)offset; |
| if (width < 0 || height < 0) { |
| return false; |
| } |
| |
| bool restoreGLRowLength = false; |
| if (trimRowBytes != rowBytes) { |
| // we should have checked for this support already |
| SkASSERT(this->glCaps().unpackRowLengthSupport()); |
| GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowBytes / bpp)); |
| restoreGLRowLength = true; |
| } |
| |
| // Internal format comes from the texture desc. |
| GrGLenum internalFormat; |
| // External format and type come from the upload data. |
| GrGLenum externalFormat; |
| GrGLenum externalType; |
| auto bufferAsConfig = GrColorTypeToPixelConfig(bufferColorType, GrSRGBEncoded::kNo); |
| if (!this->glCaps().getTexImageFormats(texConfig, bufferAsConfig, &internalFormat, |
| &externalFormat, &externalType)) { |
| return false; |
| } |
| |
| GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, config_alignment(texConfig))); |
| GL_CALL(TexSubImage2D(glTex->target(), |
| 0, |
| left, top, |
| width, |
| height, |
| externalFormat, externalType, |
| pixels)); |
| |
| if (restoreGLRowLength) { |
| GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); |
| } |
| |
| return true; |
| } |
| |
| bool GrGLGpu::onTransferPixelsFrom(GrSurface* surface, int left, int top, int width, int height, |
| GrColorType dstColorType, GrGpuBuffer* transferBuffer, |
| size_t offset) { |
| auto* glBuffer = static_cast<GrGLBuffer*>(transferBuffer); |
| this->bindBuffer(GrGpuBufferType::kXferGpuToCpu, glBuffer); |
| auto offsetAsPtr = reinterpret_cast<void*>(offset); |
| return this->readOrTransferPixelsFrom(surface, left, top, width, height, dstColorType, |
| offsetAsPtr, width); |
| } |
| |
| /** |
| * Creates storage space for the texture and fills it with texels. |
| * |
| * @param config Pixel config of the texture. |
| * @param interface The GL interface in use. |
| * @param caps The capabilities of the GL device. |
| * @param target Which bound texture to target (GR_GL_TEXTURE_2D, e.g.) |
| * @param internalFormat The data format used for the internal storage of the texture. May be sized. |
| * @param internalFormatForTexStorage The data format used for the TexStorage API. Must be sized. |
| * @param externalFormat The data format used for the external storage of the texture. |
| * @param externalType The type of the data used for the external storage of the texture. |
| * @param texels The texel data of the texture being created. |
| * @param mipLevelCount Number of mipmap levels |
| * @param baseWidth The width of the texture's base mipmap level |
| * @param baseHeight The height of the texture's base mipmap level |
| */ |
| static bool allocate_and_populate_texture(GrPixelConfig config, |
| const GrGLInterface& interface, |
| const GrGLCaps& caps, |
| GrGLenum target, |
| GrGLenum internalFormat, |
| GrGLenum internalFormatForTexStorage, |
| GrGLenum externalFormat, |
| GrGLenum externalType, |
| const GrMipLevel texels[], int mipLevelCount, |
| int baseWidth, int baseHeight) { |
| CLEAR_ERROR_BEFORE_ALLOC(&interface); |
| |
| if (caps.isConfigTexSupportEnabled(config)) { |
| // We never resize or change formats of textures. |
| GL_ALLOC_CALL(&interface, |
| TexStorage2D(target, SkTMax(mipLevelCount, 1), internalFormatForTexStorage, |
| baseWidth, baseHeight)); |
| GrGLenum error = CHECK_ALLOC_ERROR(&interface); |
| if (error != GR_GL_NO_ERROR) { |
| return false; |
| } else { |
| for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) { |
| const void* currentMipData = texels[currentMipLevel].fPixels; |
| if (currentMipData == nullptr) { |
| continue; |
| } |
| int twoToTheMipLevel = 1 << currentMipLevel; |
| int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel); |
| int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel); |
| |
| GR_GL_CALL(&interface, |
| TexSubImage2D(target, |
| currentMipLevel, |
| 0, // left |
| 0, // top |
| currentWidth, |
| currentHeight, |
| externalFormat, externalType, |
| currentMipData)); |
| } |
| return true; |
| } |
| } else { |
| if (!mipLevelCount) { |
| GL_ALLOC_CALL(&interface, |
| TexImage2D(target, |
| 0, |
| internalFormat, |
| baseWidth, |
| baseHeight, |
| 0, // border |
| externalFormat, externalType, |
| nullptr)); |
| GrGLenum error = CHECK_ALLOC_ERROR(&interface); |
| if (error != GR_GL_NO_ERROR) { |
| return false; |
| } |
| } else { |
| for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) { |
| int twoToTheMipLevel = 1 << currentMipLevel; |
| int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel); |
| int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel); |
| const void* currentMipData = texels[currentMipLevel].fPixels; |
| // Even if curremtMipData is nullptr, continue to call TexImage2D. |
| // This will allocate texture memory which we can later populate. |
| GL_ALLOC_CALL(&interface, |
| TexImage2D(target, |
| currentMipLevel, |
| internalFormat, |
| currentWidth, |
| currentHeight, |
| 0, // border |
| externalFormat, externalType, |
| currentMipData)); |
| GrGLenum error = CHECK_ALLOC_ERROR(&interface); |
| if (error != GR_GL_NO_ERROR) { |
| return false; |
| } |
| } |
| } |
| } |
| return true; |
| } |
| |
| /** |
| * Creates storage space for the texture and fills it with texels. |
| * |
| * @param config Compressed pixel config of the texture. |
| * @param interface The GL interface in use. |
| * @param caps The capabilities of the GL device. |
| * @param target Which bound texture to target (GR_GL_TEXTURE_2D, e.g.) |
| * @param internalFormat The data format used for the internal storage of the texture. |
| * @param texels The texel data of the texture being created. |
| * @param mipLevelCount Number of mipmap levels |
| * @param baseWidth The width of the texture's base mipmap level |
| * @param baseHeight The height of the texture's base mipmap level |
| */ |
| static bool allocate_and_populate_compressed_texture(GrPixelConfig config, |
| const GrGLInterface& interface, |
| const GrGLCaps& caps, |
| GrGLenum target, GrGLenum internalFormat, |
| const GrMipLevel texels[], int mipLevelCount, |
| int baseWidth, int baseHeight) { |
| CLEAR_ERROR_BEFORE_ALLOC(&interface); |
| SkASSERT(GrGLFormatIsCompressed(internalFormat)); |
| |
| bool useTexStorage = caps.isConfigTexSupportEnabled(config); |
| // We can only use TexStorage if we know we will not later change the storage requirements. |
| // This means if we may later want to add mipmaps, we cannot use TexStorage. |
| // Right now, we cannot know if we will later add mipmaps or not. |
| // The only time we can use TexStorage is when we already have the |
| // mipmaps. |
| useTexStorage &= mipLevelCount > 1; |
| |
| if (useTexStorage) { |
| // We never resize or change formats of textures. |
| GL_ALLOC_CALL(&interface, |
| TexStorage2D(target, |
| mipLevelCount, |
| internalFormat, |
| baseWidth, baseHeight)); |
| GrGLenum error = CHECK_ALLOC_ERROR(&interface); |
| if (error != GR_GL_NO_ERROR) { |
| return false; |
| } else { |
| for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) { |
| const void* currentMipData = texels[currentMipLevel].fPixels; |
| if (currentMipData == nullptr) { |
| // Compressed textures require data for every level |
| return false; |
| } |
| |
| int twoToTheMipLevel = 1 << currentMipLevel; |
| int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel); |
| int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel); |
| |
| // Make sure that the width and height that we pass to OpenGL |
| // is a multiple of the block size. |
| size_t dataSize = GrGLFormatCompressedDataSize(internalFormat, |
| currentWidth, currentHeight); |
| GR_GL_CALL(&interface, CompressedTexSubImage2D(target, |
| currentMipLevel, |
| 0, // left |
| 0, // top |
| currentWidth, |
| currentHeight, |
| internalFormat, |
| SkToInt(dataSize), |
| currentMipData)); |
| } |
| } |
| } else { |
| for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) { |
| const void* currentMipData = texels[currentMipLevel].fPixels; |
| if (currentMipData == nullptr) { |
| // Compressed textures require data for every level |
| return false; |
| } |
| |
| int twoToTheMipLevel = 1 << currentMipLevel; |
| int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel); |
| int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel); |
| |
| // Make sure that the width and height that we pass to OpenGL |
| // is a multiple of the block size. |
| size_t dataSize = GrGLFormatCompressedDataSize(internalFormat, baseWidth, baseHeight); |
| |
| GL_ALLOC_CALL(&interface, |
| CompressedTexImage2D(target, |
| currentMipLevel, |
| internalFormat, |
| currentWidth, |
| currentHeight, |
| 0, // border |
| SkToInt(dataSize), |
| currentMipData)); |
| |
| GrGLenum error = CHECK_ALLOC_ERROR(&interface); |
| if (error != GR_GL_NO_ERROR) { |
| return false; |
| } |
| } |
| } |
| |
| return true; |
| } |
| /** |
| * After a texture is created, any state which was altered during its creation |
| * needs to be restored. |
| * |
| * @param interface The GL interface to use. |
| * @param caps The capabilities of the GL device. |
| * @param restoreGLRowLength Should the row length unpacking be restored? |
| * @param glFlipY Did GL flip the texture vertically? |
| */ |
| static void restore_pixelstore_state(const GrGLInterface& interface, const GrGLCaps& caps, |
| bool restoreGLRowLength) { |
| if (restoreGLRowLength) { |
| SkASSERT(caps.unpackRowLengthSupport()); |
| GR_GL_CALL(&interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); |
| } |
| } |
| |
| void GrGLGpu::unbindCpuToGpuXferBuffer() { |
| auto* xferBufferState = this->hwBufferState(GrGpuBufferType::kXferCpuToGpu); |
| if (!xferBufferState->fBoundBufferUniqueID.isInvalid()) { |
| GL_CALL(BindBuffer(xferBufferState->fGLTarget, 0)); |
| xferBufferState->invalidate(); |
| } |
| } |
| |
| // TODO: Make this take a GrColorType instead of dataConfig. This requires updating GrGLCaps to |
| // convert from GrColorType to externalFormat/externalType GLenum values. |
| bool GrGLGpu::uploadTexData(GrPixelConfig texConfig, int texWidth, int texHeight, GrGLenum target, |
| UploadType uploadType, int left, int top, int width, int height, |
| GrPixelConfig dataConfig, const GrMipLevel texels[], int mipLevelCount, |
| GrMipMapsStatus* mipMapsStatus) { |
| // If we're uploading compressed data then we should be using uploadCompressedTexData |
| SkASSERT(!GrPixelConfigIsCompressed(dataConfig)); |
| |
| SkASSERT(this->caps()->isConfigTexturable(texConfig)); |
| SkDEBUGCODE( |
| SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height); |
| SkIRect bounds = SkIRect::MakeWH(texWidth, texHeight); |
| SkASSERT(bounds.contains(subRect)); |
| ) |
| SkASSERT(1 == mipLevelCount || |
| (0 == left && 0 == top && width == texWidth && height == texHeight)); |
| |
| this->unbindCpuToGpuXferBuffer(); |
| |
| // texels is const. |
| // But we may need to flip the texture vertically to prepare it. |
| // Rather than flip in place and alter the incoming data, |
| // we allocate a new buffer to flip into. |
| // This means we need to make a non-const shallow copy of texels. |
| SkAutoTMalloc<GrMipLevel> texelsShallowCopy; |
| |
| if (mipLevelCount) { |
| texelsShallowCopy.reset(mipLevelCount); |
| memcpy(texelsShallowCopy.get(), texels, mipLevelCount*sizeof(GrMipLevel)); |
| } |
| |
| const GrGLInterface* interface = this->glInterface(); |
| const GrGLCaps& caps = this->glCaps(); |
| |
| size_t bpp = GrBytesPerPixel(dataConfig); |
| |
| if (width == 0 || height == 0) { |
| return false; |
| } |
| |
| // Internal format comes from the texture desc. |
| GrGLenum internalFormat; |
| // External format and type come from the upload data. |
| GrGLenum externalFormat; |
| GrGLenum externalType; |
| if (!this->glCaps().getTexImageFormats(texConfig, dataConfig, &internalFormat, &externalFormat, |
| &externalType)) { |
| return false; |
| } |
| // TexStorage requires a sized format, and internalFormat may or may not be |
| GrGLenum internalFormatForTexStorage = this->glCaps().configSizedInternalFormat(texConfig); |
| |
| /* |
| * Check whether to allocate a temporary buffer for flipping y or |
| * because our srcData has extra bytes past each row. If so, we need |
| * to trim those off here, since GL ES may not let us specify |
| * GL_UNPACK_ROW_LENGTH. |
| */ |
| bool restoreGLRowLength = false; |
| |
| // in case we need a temporary, trimmed copy of the src pixels |
| SkAutoSMalloc<128 * 128> tempStorage; |
| |
| if (mipMapsStatus) { |
| *mipMapsStatus = GrMipMapsStatus::kValid; |
| } |
| |
| const bool usesMips = mipLevelCount > 1; |
| |
| // find the combined size of all the mip levels and the relative offset of |
| // each into the collective buffer |
| bool willNeedData = false; |
| size_t combinedBufferSize = 0; |
| SkTArray<size_t> individualMipOffsets(mipLevelCount); |
| for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) { |
| if (texelsShallowCopy[currentMipLevel].fPixels) { |
| int twoToTheMipLevel = 1 << currentMipLevel; |
| int currentWidth = SkTMax(1, width / twoToTheMipLevel); |
| int currentHeight = SkTMax(1, height / twoToTheMipLevel); |
| const size_t trimRowBytes = currentWidth * bpp; |
| const size_t trimmedSize = trimRowBytes * currentHeight; |
| |
| const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes |
| ? texelsShallowCopy[currentMipLevel].fRowBytes |
| : trimRowBytes; |
| |
| if (((!caps.unpackRowLengthSupport() || usesMips) && trimRowBytes != rowBytes)) { |
| willNeedData = true; |
| } |
| |
| individualMipOffsets.push_back(combinedBufferSize); |
| combinedBufferSize += trimmedSize; |
| } else { |
| if (mipMapsStatus) { |
| *mipMapsStatus = GrMipMapsStatus::kDirty; |
| } |
| individualMipOffsets.push_back(0); |
| } |
| } |
| if (mipMapsStatus && mipLevelCount <= 1) { |
| *mipMapsStatus = GrMipMapsStatus::kNotAllocated; |
| } |
| char* buffer = nullptr; |
| if (willNeedData) { |
| buffer = (char*)tempStorage.reset(combinedBufferSize); |
| } |
| |
| for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) { |
| if (!texelsShallowCopy[currentMipLevel].fPixels) { |
| continue; |
| } |
| int twoToTheMipLevel = 1 << currentMipLevel; |
| int currentWidth = SkTMax(1, width / twoToTheMipLevel); |
| int currentHeight = SkTMax(1, height / twoToTheMipLevel); |
| const size_t trimRowBytes = currentWidth * bpp; |
| |
| /* |
| * check whether to allocate a temporary buffer for flipping y or |
| * because our srcData has extra bytes past each row. If so, we need |
| * to trim those off here, since GL ES may not let us specify |
| * GL_UNPACK_ROW_LENGTH. |
| */ |
| restoreGLRowLength = false; |
| |
| const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes |
| ? texelsShallowCopy[currentMipLevel].fRowBytes |
| : trimRowBytes; |
| |
| // TODO: This optimization should be enabled with or without mips. |
| // For use with mips, we must set GR_GL_UNPACK_ROW_LENGTH once per |
| // mip level, before calling glTexImage2D. |
| if (caps.unpackRowLengthSupport() && !usesMips) { |
| // can't use this for flipping, only non-neg values allowed. :( |
| if (rowBytes != trimRowBytes) { |
| GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp); |
| GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength)); |
| restoreGLRowLength = true; |
| } |
| } else if (trimRowBytes != rowBytes) { |
| // copy data into our new storage, skipping the trailing bytes |
| const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels; |
| char* dst = buffer + individualMipOffsets[currentMipLevel]; |
| SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight); |
| // now point data to our copied version |
| texelsShallowCopy[currentMipLevel].fPixels = buffer + |
| individualMipOffsets[currentMipLevel]; |
| texelsShallowCopy[currentMipLevel].fRowBytes = trimRowBytes; |
| } |
| } |
| |
| if (mipLevelCount) { |
| GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ALIGNMENT, config_alignment(texConfig))); |
| } |
| |
| bool succeeded = true; |
| if (kNewTexture_UploadType == uploadType) { |
| if (0 == left && 0 == top && texWidth == width && texHeight == height) { |
| succeeded = allocate_and_populate_texture( |
| texConfig, *interface, caps, target, internalFormat, |
| internalFormatForTexStorage, externalFormat, externalType, |
| texelsShallowCopy, mipLevelCount, width, height); |
| } else { |
| succeeded = false; |
| } |
| } else { |
| for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) { |
| if (!texelsShallowCopy[currentMipLevel].fPixels) { |
| continue; |
| } |
| int twoToTheMipLevel = 1 << currentMipLevel; |
| int currentWidth = SkTMax(1, width / twoToTheMipLevel); |
| int currentHeight = SkTMax(1, height / twoToTheMipLevel); |
| |
| GL_CALL(TexSubImage2D(target, |
| currentMipLevel, |
| left, top, |
| currentWidth, |
| currentHeight, |
| externalFormat, externalType, |
| texelsShallowCopy[currentMipLevel].fPixels)); |
| } |
| } |
| |
| restore_pixelstore_state(*interface, caps, restoreGLRowLength); |
| |
| return succeeded; |
| } |
| |
| bool GrGLGpu::uploadCompressedTexData(GrPixelConfig texConfig, int texWidth, int texHeight, |
| GrGLenum target, |
| const GrMipLevel texels[], int mipLevelCount, |
| GrMipMapsStatus* mipMapsStatus) { |
| SkASSERT(this->caps()->isConfigTexturable(texConfig)); |
| |
| const GrGLInterface* interface = this->glInterface(); |
| const GrGLCaps& caps = this->glCaps(); |
| |
| // We only need the internal format for compressed 2D textures. |
| GrGLenum internalFormat; |
| if (!caps.getCompressedTexImageFormats(texConfig, &internalFormat)) { |
| return false; |
| } |
| |
| if (mipMapsStatus) { |
| if (mipLevelCount <= 1) { |
| *mipMapsStatus = GrMipMapsStatus::kNotAllocated; |
| } else { |
| *mipMapsStatus = GrMipMapsStatus::kValid; |
| } |
| } |
| |
| return allocate_and_populate_compressed_texture(texConfig, *interface, caps, target, |
| internalFormat, texels, mipLevelCount, |
| texWidth, texHeight); |
| } |
| |
| static bool renderbuffer_storage_msaa(const GrGLContext& ctx, |
| int sampleCount, |
| GrGLenum format, |
| int width, int height) { |
| CLEAR_ERROR_BEFORE_ALLOC(ctx.interface()); |
| SkASSERT(GrGLCaps::kNone_MSFBOType != ctx.caps()->msFBOType()); |
| switch (ctx.caps()->msFBOType()) { |
| case GrGLCaps::kStandard_MSFBOType: |
| GL_ALLOC_CALL(ctx.interface(), |
| RenderbufferStorageMultisample(GR_GL_RENDERBUFFER, |
| sampleCount, |
| format, |
| width, height)); |
| break; |
| case GrGLCaps::kES_Apple_MSFBOType: |
| GL_ALLOC_CALL(ctx.interface(), |
| RenderbufferStorageMultisampleES2APPLE(GR_GL_RENDERBUFFER, |
| sampleCount, |
| format, |
| width, height)); |
| break; |
| case GrGLCaps::kES_EXT_MsToTexture_MSFBOType: |
| case GrGLCaps::kES_IMG_MsToTexture_MSFBOType: |
| GL_ALLOC_CALL(ctx.interface(), |
| RenderbufferStorageMultisampleES2EXT(GR_GL_RENDERBUFFER, |
| sampleCount, |
| format, |
| width, height)); |
| break; |
| case GrGLCaps::kNone_MSFBOType: |
| SK_ABORT("Shouldn't be here if we don't support multisampled renderbuffers."); |
| break; |
| } |
| return (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(ctx.interface())); |
| } |
| |
| bool GrGLGpu::createRenderTargetObjects(const GrSurfaceDesc& desc, |
| const GrGLTextureInfo& texInfo, |
| GrGLRenderTarget::IDDesc* idDesc) { |
| idDesc->fMSColorRenderbufferID = 0; |
| idDesc->fRTFBOID = 0; |
| idDesc->fRTFBOOwnership = GrBackendObjectOwnership::kOwned; |
| idDesc->fTexFBOID = 0; |
| |
| GrGLenum status; |
| |
| GrGLenum colorRenderbufferFormat = 0; // suppress warning |
| |
| if (desc.fSampleCnt > 1 && GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType()) { |
| goto FAILED; |
| } |
| |
| GL_CALL(GenFramebuffers(1, &idDesc->fTexFBOID)); |
| if (!idDesc->fTexFBOID) { |
| goto FAILED; |
| } |
| |
| // If we are using multisampling we will create two FBOS. We render to one and then resolve to |
| // the texture bound to the other. The exception is the IMG multisample extension. With this |
| // extension the texture is multisampled when rendered to and then auto-resolves it when it is |
| // rendered from. |
| if (desc.fSampleCnt > 1 && this->glCaps().usesMSAARenderBuffers()) { |
| GL_CALL(GenFramebuffers(1, &idDesc->fRTFBOID)); |
| GL_CALL(GenRenderbuffers(1, &idDesc->fMSColorRenderbufferID)); |
| if (!idDesc->fRTFBOID || |
| !idDesc->fMSColorRenderbufferID) { |
| goto FAILED; |
| } |
| this->glCaps().getRenderbufferFormat(desc.fConfig, &colorRenderbufferFormat); |
| } else { |
| idDesc->fRTFBOID = idDesc->fTexFBOID; |
| } |
| |
| // below here we may bind the FBO |
| fHWBoundRenderTargetUniqueID.makeInvalid(); |
| if (idDesc->fRTFBOID != idDesc->fTexFBOID) { |
| SkASSERT(desc.fSampleCnt > 1); |
| GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, idDesc->fMSColorRenderbufferID)); |
| if (!renderbuffer_storage_msaa(*fGLContext, |
| desc.fSampleCnt, |
| colorRenderbufferFormat, |
| desc.fWidth, desc.fHeight)) { |
| goto FAILED; |
| } |
| this->bindFramebuffer(GR_GL_FRAMEBUFFER, idDesc->fRTFBOID); |
| GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, |
| GR_GL_COLOR_ATTACHMENT0, |
| GR_GL_RENDERBUFFER, |
| idDesc->fMSColorRenderbufferID)); |
| if (!this->glCaps().isConfigVerifiedColorAttachment(desc.fConfig)) { |
| GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); |
| if (status != GR_GL_FRAMEBUFFER_COMPLETE) { |
| goto FAILED; |
| } |
| fGLContext->caps()->markConfigAsValidColorAttachment(desc.fConfig); |
| } |
| } |
| this->bindFramebuffer(GR_GL_FRAMEBUFFER, idDesc->fTexFBOID); |
| |
| if (this->glCaps().usesImplicitMSAAResolve() && desc.fSampleCnt > 1) { |
| GL_CALL(FramebufferTexture2DMultisample(GR_GL_FRAMEBUFFER, |
| GR_GL_COLOR_ATTACHMENT0, |
| texInfo.fTarget, |
| texInfo.fID, 0, desc.fSampleCnt)); |
| } else { |
| GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, |
| GR_GL_COLOR_ATTACHMENT0, |
| texInfo.fTarget, |
| texInfo.fID, 0)); |
| } |
| if (!this->glCaps().isConfigVerifiedColorAttachment(desc.fConfig)) { |
| GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); |
| if (status != GR_GL_FRAMEBUFFER_COMPLETE) { |
| goto FAILED; |
| } |
| fGLContext->caps()->markConfigAsValidColorAttachment(desc.fConfig); |
| } |
| |
| return true; |
| |
| FAILED: |
| if (idDesc->fMSColorRenderbufferID) { |
| GL_CALL(DeleteRenderbuffers(1, &idDesc->fMSColorRenderbufferID)); |
| } |
| if (idDesc->fRTFBOID != idDesc->fTexFBOID) { |
| this->deleteFramebuffer(idDesc->fRTFBOID); |
| } |
| if (idDesc->fTexFBOID) { |
| this->deleteFramebuffer(idDesc->fTexFBOID); |
| } |
| return false; |
| } |
| |
| // good to set a break-point here to know when createTexture fails |
| static sk_sp<GrTexture> return_null_texture() { |
| // SkDEBUGFAIL("null texture"); |
| return nullptr; |
| } |
| |
| static GrGLTextureParameters::SamplerOverriddenState set_initial_texture_params( |
| const GrGLInterface* interface, const GrGLTextureInfo& info) { |
| // Some drivers like to know filter/wrap before seeing glTexImage2D. Some |
| // drivers have a bug where an FBO won't be complete if it includes a |
| // texture that is not mipmap complete (considering the filter in use). |
| GrGLTextureParameters::SamplerOverriddenState state; |
| state.fMinFilter = GR_GL_NEAREST; |
| state.fMagFilter = GR_GL_NEAREST; |
| state.fWrapS = GR_GL_CLAMP_TO_EDGE; |
| state.fWrapT = GR_GL_CLAMP_TO_EDGE; |
| GR_GL_CALL(interface, TexParameteri(info.fTarget, GR_GL_TEXTURE_MAG_FILTER, state.fMagFilter)); |
| GR_GL_CALL(interface, TexParameteri(info.fTarget, GR_GL_TEXTURE_MIN_FILTER, state.fMinFilter)); |
| GR_GL_CALL(interface, TexParameteri(info.fTarget, GR_GL_TEXTURE_WRAP_S, state.fWrapS)); |
| GR_GL_CALL(interface, TexParameteri(info.fTarget, GR_GL_TEXTURE_WRAP_T, state.fWrapT)); |
| return state; |
| } |
| |
| sk_sp<GrTexture> GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc, |
| SkBudgeted budgeted, |
| const GrMipLevel texels[], |
| int mipLevelCount) { |
| // We fail if the MSAA was requested and is not available. |
| if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleCnt > 1) { |
| //SkDebugf("MSAA RT requested but not supported on this platform."); |
| return return_null_texture(); |
| } |
| |
| GrGLenum glFormat = this->glCaps().configSizedInternalFormat(desc.fConfig); |
| |
| bool performClear = (desc.fFlags & kPerformInitialClear_GrSurfaceFlag) && |
| !GrGLFormatIsCompressed(glFormat); |
| |
| GrMipLevel zeroLevel; |
| std::unique_ptr<uint8_t[]> zeros; |
| if (performClear && !this->glCaps().clearTextureSupport() && |
| !this->glCaps().canConfigBeFBOColorAttachment(desc.fConfig)) { |
| size_t rowSize = GrGLBytesPerFormat(glFormat) * desc.fWidth; |
| size_t size = rowSize * desc.fHeight; |
| zeros.reset(new uint8_t[size]); |
| memset(zeros.get(), 0, size); |
| zeroLevel.fPixels = zeros.get(); |
| zeroLevel.fRowBytes = 0; |
| texels = &zeroLevel; |
| mipLevelCount = 1; |
| performClear = false; |
| } |
| |
| bool isRenderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag); |
| |
| GrGLTexture::IDDesc idDesc; |
| idDesc.fOwnership = GrBackendObjectOwnership::kOwned; |
| GrMipMapsStatus mipMapsStatus; |
| GrGLTextureParameters::SamplerOverriddenState initialState; |
| if (!this->createTextureImpl(desc, &idDesc.fInfo, |
| isRenderTarget ? GrRenderable::kYes : GrRenderable::kNo, |
| &initialState, texels, mipLevelCount, &mipMapsStatus)) { |
| return return_null_texture(); |
| } |
| |
| sk_sp<GrGLTexture> tex; |
| if (isRenderTarget) { |
| // unbind the texture from the texture unit before binding it to the frame buffer |
| GL_CALL(BindTexture(idDesc.fInfo.fTarget, 0)); |
| GrGLRenderTarget::IDDesc rtIDDesc; |
| |
| if (!this->createRenderTargetObjects(desc, idDesc.fInfo, &rtIDDesc)) { |
| GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID)); |
| return return_null_texture(); |
| } |
| tex = sk_make_sp<GrGLTextureRenderTarget>(this, budgeted, desc, idDesc, rtIDDesc, |
| mipMapsStatus); |
| tex->baseLevelWasBoundToFBO(); |
| } else { |
| tex = sk_make_sp<GrGLTexture>(this, budgeted, desc, idDesc, mipMapsStatus); |
| } |
| // The non-sampler params are still at their default values. |
| tex->parameters()->set(&initialState, GrGLTextureParameters::NonsamplerState(), |
| fResetTimestampForTextureParameters); |
| #ifdef TRACE_TEXTURE_CREATION |
| SkDebugf("--- new texture [%d] size=(%d %d) config=%d\n", |
| idDesc.fInfo.fID, desc.fWidth, desc.fHeight, desc.fConfig); |
| #endif |
| if (tex && performClear) { |
| if (this->glCaps().clearTextureSupport()) { |
| static constexpr uint32_t kZero = 0; |
| GL_CALL(ClearTexImage(tex->textureID(), 0, GR_GL_RGBA, GR_GL_UNSIGNED_BYTE, &kZero)); |
| } else { |
| this->bindSurfaceFBOForPixelOps(tex.get(), GR_GL_FRAMEBUFFER, kDst_TempFBOTarget); |
| this->disableScissor(); |
| this->disableWindowRectangles(); |
| this->flushColorWrite(true); |
| this->flushClearColor(0, 0, 0, 0); |
| GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT)); |
| this->unbindTextureFBOForPixelOps(GR_GL_FRAMEBUFFER, tex.get()); |
| fHWBoundRenderTargetUniqueID.makeInvalid(); |
| } |
| } |
| return std::move(tex); |
| } |
| |
| namespace { |
| |
| const GrGLuint kUnknownBitCount = GrGLStencilAttachment::kUnknownBitCount; |
| |
| void inline get_stencil_rb_sizes(const GrGLInterface* gl, |
| GrGLStencilAttachment::Format* format) { |
| |
| // we shouldn't ever know one size and not the other |
| SkASSERT((kUnknownBitCount == format->fStencilBits) == |
| (kUnknownBitCount == format->fTotalBits)); |
| if (kUnknownBitCount == format->fStencilBits) { |
| GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER, |
| GR_GL_RENDERBUFFER_STENCIL_SIZE, |
| (GrGLint*)&format->fStencilBits); |
| if (format->fPacked) { |
| GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER, |
| GR_GL_RENDERBUFFER_DEPTH_SIZE, |
| (GrGLint*)&format->fTotalBits); |
| format->fTotalBits += format->fStencilBits; |
| } else { |
| format->fTotalBits = format->fStencilBits; |
| } |
| } |
| } |
| } |
| |
| int GrGLGpu::getCompatibleStencilIndex(GrPixelConfig config) { |
| static const int kSize = 16; |
| SkASSERT(this->caps()->isConfigRenderable(config)); |
| if (!this->glCaps().hasStencilFormatBeenDeterminedForConfig(config)) { |
| // Default to unsupported, set this if we find a stencil format that works. |
| int firstWorkingStencilFormatIndex = -1; |
| |
| // Create color texture |
| GrGLuint colorID = 0; |
| GL_CALL(GenTextures(1, &colorID)); |
| this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, colorID); |
| GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, |
| GR_GL_TEXTURE_MAG_FILTER, |
| GR_GL_NEAREST)); |
| GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, |
| GR_GL_TEXTURE_MIN_FILTER, |
| GR_GL_NEAREST)); |
| GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, |
| GR_GL_TEXTURE_WRAP_S, |
| GR_GL_CLAMP_TO_EDGE)); |
| GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, |
| GR_GL_TEXTURE_WRAP_T, |
| GR_GL_CLAMP_TO_EDGE)); |
| |
| GrGLenum internalFormat; |
| GrGLenum externalFormat; |
| GrGLenum externalType; |
| if (!this->glCaps().getTexImageFormats(config, config, &internalFormat, &externalFormat, |
| &externalType)) { |
| return false; |
| } |
| this->unbindCpuToGpuXferBuffer(); |
| CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); |
| GL_ALLOC_CALL(this->glInterface(), TexImage2D(GR_GL_TEXTURE_2D, |
| 0, |
| internalFormat, |
| kSize, |
| kSize, |
| 0, |
| externalFormat, |
| externalType, |
| nullptr)); |
| if (GR_GL_NO_ERROR != CHECK_ALLOC_ERROR(this->glInterface())) { |
| GL_CALL(DeleteTextures(1, &colorID)); |
| return -1; |
| } |
| |
| // unbind the texture from the texture unit before binding it to the frame buffer |
| GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0)); |
| |
| // Create Framebuffer |
| GrGLuint fb = 0; |
| GL_CALL(GenFramebuffers(1, &fb)); |
| this->bindFramebuffer(GR_GL_FRAMEBUFFER, fb); |
| fHWBoundRenderTargetUniqueID.makeInvalid(); |
| GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, |
| GR_GL_COLOR_ATTACHMENT0, |
| GR_GL_TEXTURE_2D, |
| colorID, |
| 0)); |
| GrGLuint sbRBID = 0; |
| GL_CALL(GenRenderbuffers(1, &sbRBID)); |
| |
| // look over formats till I find a compatible one |
| int stencilFmtCnt = this->glCaps().stencilFormats().count(); |
| if (sbRBID) { |
| GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbRBID)); |
| for (int i = 0; i < stencilFmtCnt && sbRBID; ++i) { |
| const GrGLCaps::StencilFormat& sFmt = this->glCaps().stencilFormats()[i]; |
| CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); |
| GL_ALLOC_CALL(this->glInterface(), RenderbufferStorage(GR_GL_RENDERBUFFER, |
| sFmt.fInternalFormat, |
| kSize, kSize)); |
| if (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(this->glInterface())) { |
| GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, |
| GR_GL_STENCIL_ATTACHMENT, |
| GR_GL_RENDERBUFFER, sbRBID)); |
| if (sFmt.fPacked) { |
| GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, |
| GR_GL_DEPTH_ATTACHMENT, |
| GR_GL_RENDERBUFFER, sbRBID)); |
| } else { |
| GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, |
| GR_GL_DEPTH_ATTACHMENT, |
| GR_GL_RENDERBUFFER, 0)); |
| } |
| GrGLenum status; |
| GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); |
| if (status == GR_GL_FRAMEBUFFER_COMPLETE) { |
| firstWorkingStencilFormatIndex = i; |
| break; |
| } |
| GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, |
| GR_GL_STENCIL_ATTACHMENT, |
| GR_GL_RENDERBUFFER, 0)); |
| if (sFmt.fPacked) { |
| GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, |
| GR_GL_DEPTH_ATTACHMENT, |
| GR_GL_RENDERBUFFER, 0)); |
| } |
| } |
| } |
| GL_CALL(DeleteRenderbuffers(1, &sbRBID)); |
| } |
| GL_CALL(DeleteTextures(1, &colorID)); |
| this->bindFramebuffer(GR_GL_FRAMEBUFFER, 0); |
| this->deleteFramebuffer(fb); |
| fGLContext->caps()->setStencilFormatIndexForConfig(config, firstWorkingStencilFormatIndex); |
| } |
| return this->glCaps().getStencilFormatIndexForConfig(config); |
| } |
| |
| bool GrGLGpu::createTextureImpl(const GrSurfaceDesc& desc, GrGLTextureInfo* info, |
| GrRenderable renderable, |
| GrGLTextureParameters::SamplerOverriddenState* initialState, |
| const GrMipLevel texels[], int mipLevelCount, |
| GrMipMapsStatus* mipMapsStatus) { |
| info->fID = 0; |
| info->fTarget = GR_GL_TEXTURE_2D; |
| GL_CALL(GenTextures(1, &(info->fID))); |
| |
| if (!info->fID) { |
| return false; |
| } |
| |
| info->fFormat = this->glCaps().configSizedInternalFormat(desc.fConfig); |
| |
| this->bindTextureToScratchUnit(info->fTarget, info->fID); |
| |
| if (GrRenderable::kYes == renderable && this->glCaps().textureUsageSupport()) { |
| // provides a hint about how this texture will be used |
| GL_CALL(TexParameteri(info->fTarget, |
| GR_GL_TEXTURE_USAGE, |
| GR_GL_FRAMEBUFFER_ATTACHMENT)); |
| } |
| |
| *initialState = set_initial_texture_params(this->glInterface(), *info); |
| |
| bool success = false; |
| if (GrGLFormatIsCompressed(info->fFormat)) { |
| SkASSERT(GrRenderable::kNo == renderable); |
| |
| success = this->uploadCompressedTexData(desc.fConfig, desc.fWidth, desc.fHeight, |
| info->fTarget, |
| texels, mipLevelCount, mipMapsStatus); |
| } else { |
| success = this->uploadTexData(desc.fConfig, desc.fWidth, desc.fHeight, info->fTarget, |
| kNewTexture_UploadType, 0, 0, desc.fWidth, desc.fHeight, |
| desc.fConfig, texels, mipLevelCount, mipMapsStatus); |
| } |
| if (!success) { |
| GL_CALL(DeleteTextures(1, &(info->fID))); |
| return false; |
| } |
| return true; |
| } |
| |
| GrStencilAttachment* GrGLGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt, |
| int width, int height) { |
| SkASSERT(width >= rt->width()); |
| SkASSERT(height >= rt->height()); |
| |
| int samples = rt->numSamples(); |
| GrGLStencilAttachment::IDDesc sbDesc; |
| |
| int sIdx = this->getCompatibleStencilIndex(rt->config()); |
| if (sIdx < 0) { |
| return nullptr; |
| } |
| |
| if (!sbDesc.fRenderbufferID) { |
| GL_CALL(GenRenderbuffers(1, &sbDesc.fRenderbufferID)); |
| } |
| if (!sbDesc.fRenderbufferID) { |
| return nullptr; |
| } |
| GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbDesc.fRenderbufferID)); |
| const GrGLCaps::StencilFormat& sFmt = this->glCaps().stencilFormats()[sIdx]; |
| CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); |
| // we do this "if" so that we don't call the multisample |
| // version on a GL that doesn't have an MSAA extension. |
| if (samples > 1) { |
| SkAssertResult(renderbuffer_storage_msaa(*fGLContext, |
| samples, |
| sFmt.fInternalFormat, |
| width, height)); |
| } else { |
| GL_ALLOC_CALL(this->glInterface(), RenderbufferStorage(GR_GL_RENDERBUFFER, |
| sFmt.fInternalFormat, |
| width, height)); |
| SkASSERT(GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(this->glInterface())); |
| } |
| fStats.incStencilAttachmentCreates(); |
| // After sized formats we attempt an unsized format and take |
| // whatever sizes GL gives us. In that case we query for the size. |
| GrGLStencilAttachment::Format format = sFmt; |
| get_stencil_rb_sizes(this->glInterface(), &format); |
| GrGLStencilAttachment* stencil = new GrGLStencilAttachment(this, |
| sbDesc, |
| width, |
| height, |
| samples, |
| format); |
| return stencil; |
| } |
| |
| //////////////////////////////////////////////////////////////////////////////// |
| |
| sk_sp<GrGpuBuffer> GrGLGpu::onCreateBuffer(size_t size, GrGpuBufferType intendedType, |
| GrAccessPattern accessPattern, const void* data) { |
| return GrGLBuffer::Make(this, size, intendedType, accessPattern, data); |
| } |
| |
| void GrGLGpu::flushScissor(const GrScissorState& scissorState, int rtWidth, int rtHeight, |
| GrSurfaceOrigin rtOrigin) { |
| if (scissorState.enabled()) { |
| GrGLIRect scissor; |
| scissor.setRelativeTo(rtHeight, scissorState.rect(), rtOrigin); |
| // if the scissor fully contains the viewport then we fall through and |
| // disable the scissor test. |
| if (!scissor.contains(rtWidth, rtHeight)) { |
| if (fHWScissorSettings.fRect != scissor) { |
| scissor.pushToGLScissor(this->glInterface()); |
| fHWScissorSettings.fRect = scissor; |
| } |
| if (kYes_TriState != fHWScissorSettings.fEnabled) { |
| GL_CALL(Enable(GR_GL_SCISSOR_TEST)); |
| fHWScissorSettings.fEnabled = kYes_TriState; |
| } |
| return; |
| } |
| } |
| |
| // See fall through note above |
| this->disableScissor(); |
| } |
| |
| void GrGLGpu::flushWindowRectangles(const GrWindowRectsState& windowState, |
| const GrGLRenderTarget* rt, GrSurfaceOrigin origin) { |
| #ifndef USE_NSIGHT |
| typedef GrWindowRectsState::Mode Mode; |
| SkASSERT(!windowState.enabled() || rt->renderFBOID()); // Window rects can't be used on-screen. |
| SkASSERT(windowState.numWindows() <= this->caps()->maxWindowRectangles()); |
| |
| if (!this->caps()->maxWindowRectangles() || |
| fHWWindowRectsState.knownEqualTo(origin, rt->width(), rt->height(), windowState)) { |
| return; |
| } |
| |
| // This is purely a workaround for a spurious warning generated by gcc. Otherwise the above |
| // assert would be sufficient. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=5912 |
| int numWindows = SkTMin(windowState.numWindows(), int(GrWindowRectangles::kMaxWindows)); |
| SkASSERT(windowState.numWindows() == numWindows); |
| |
| GrGLIRect glwindows[GrWindowRectangles::kMaxWindows]; |
| const SkIRect* skwindows = windowState.windows().data(); |
| for (int i = 0; i < numWindows; ++i) { |
| glwindows[i].setRelativeTo(rt->height(), skwindows[i], origin); |
| } |
| |
| GrGLenum glmode = (Mode::kExclusive == windowState.mode()) ? GR_GL_EXCLUSIVE : GR_GL_INCLUSIVE; |
| GL_CALL(WindowRectangles(glmode, numWindows, glwindows->asInts())); |
| |
| fHWWindowRectsState.set(origin, rt->width(), rt->height(), windowState); |
| #endif |
| } |
| |
| void GrGLGpu::disableWindowRectangles() { |
| #ifndef USE_NSIGHT |
| if (!this->caps()->maxWindowRectangles() || fHWWindowRectsState.knownDisabled()) { |
| return; |
| } |
| GL_CALL(WindowRectangles(GR_GL_EXCLUSIVE, 0, nullptr)); |
| fHWWindowRectsState.setDisabled(); |
| #endif |
| } |
| |
| void GrGLGpu::resolveAndGenerateMipMapsForProcessorTextures( |
| const GrPrimitiveProcessor& primProc, |
| const GrPipeline& pipeline, |
| const GrTextureProxy* const primProcTextures[], |
| int numPrimitiveProcessorTextureSets) { |
| auto genLevelsIfNeeded = [this](GrTexture* tex, const GrSamplerState& sampler) { |
| SkASSERT(tex); |
| if (sampler.filter() == GrSamplerState::Filter::kMipMap && |
| tex->texturePriv().mipMapped() == GrMipMapped::kYes && |
| tex->texturePriv().mipMapsAreDirty()) { |
| SkASSERT(this->caps()->mipMapSupport()); |
| this->regenerateMipMapLevels(static_cast<GrGLTexture*>(tex)); |
| SkASSERT(!tex->asRenderTarget() || !tex->asRenderTarget()->needsResolve()); |
| } else if (auto* rt = tex->asRenderTarget()) { |
| if (rt->needsResolve()) { |
| this->resolveRenderTarget(rt); |
| } |
| } |
| }; |
| |
| for (int set = 0, tex = 0; set < numPrimitiveProcessorTextureSets; ++set) { |
| for (int sampler = 0; sampler < primProc.numTextureSamplers(); ++sampler, ++tex) { |
| GrTexture* texture = primProcTextures[tex]->peekTexture(); |
| genLevelsIfNeeded(texture, primProc.textureSampler(sampler).samplerState()); |
| } |
| } |
| |
| GrFragmentProcessor::Iter iter(pipeline); |
| while (const GrFragmentProcessor* fp = iter.next()) { |
| for (int i = 0; i < fp->numTextureSamplers(); ++i) { |
| const auto& textureSampler = fp->textureSampler(i); |
| genLevelsIfNeeded(textureSampler.peekTexture(), textureSampler.samplerState()); |
| } |
| } |
| } |
| |
| bool GrGLGpu::flushGLState(GrRenderTarget* renderTarget, |
| GrSurfaceOrigin origin, |
| const GrPrimitiveProcessor& primProc, |
| const GrPipeline& pipeline, |
| const GrPipeline::FixedDynamicState* fixedDynamicState, |
| const GrPipeline::DynamicStateArrays* dynamicStateArrays, |
| int dynamicStateArraysLength, |
| bool willDrawPoints) { |
| const GrTextureProxy* const* primProcProxiesForMipRegen = nullptr; |
| const GrTextureProxy* const* primProcProxiesToBind = nullptr; |
| int numPrimProcTextureSets = 1; // number of texture per prim proc sampler. |
| if (dynamicStateArrays && dynamicStateArrays->fPrimitiveProcessorTextures) { |
| primProcProxiesForMipRegen = dynamicStateArrays->fPrimitiveProcessorTextures; |
| numPrimProcTextureSets = dynamicStateArraysLength; |
| } else if (fixedDynamicState && fixedDynamicState->fPrimitiveProcessorTextures) { |
| primProcProxiesForMipRegen = fixedDynamicState->fPrimitiveProcessorTextures; |
| primProcProxiesToBind = fixedDynamicState->fPrimitiveProcessorTextures; |
| } |
| |
| SkASSERT(SkToBool(primProcProxiesForMipRegen) == SkToBool(primProc.numTextureSamplers())); |
| |
| sk_sp<GrGLProgram> program(fProgramCache->refProgram(this, renderTarget, origin, primProc, |
| primProcProxiesForMipRegen, |
| pipeline, willDrawPoints)); |
| if (!program) { |
| GrCapsDebugf(this->caps(), "Failed to create program!\n"); |
| return false; |
| } |
| this->resolveAndGenerateMipMapsForProcessorTextures( |
| primProc, pipeline, primProcProxiesForMipRegen, numPrimProcTextureSets); |
| |
| GrXferProcessor::BlendInfo blendInfo; |
| pipeline.getXferProcessor().getBlendInfo(&blendInfo); |
| |
| this->flushColorWrite(blendInfo.fWriteColor); |
| |
| this->flushProgram(std::move(program)); |
| |
| // Swizzle the blend to match what the shader will output. |
| this->flushBlend(blendInfo, pipeline.outputSwizzle()); |
| |
| fHWProgram->updateUniformsAndTextureBindings(renderTarget, origin, |
| primProc, pipeline, primProcProxiesToBind); |
| |
| GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(renderTarget); |
| GrStencilSettings stencil; |
| if (pipeline.isStencilEnabled()) { |
| // TODO: attach stencil and create settings during render target flush. |
| SkASSERT(glRT->renderTargetPriv().getStencilAttachment()); |
| stencil.reset(*pipeline.getUserStencil(), pipeline.hasStencilClip(), |
| glRT->renderTargetPriv().numStencilBits()); |
| } |
| this->flushStencil(stencil, origin); |
| if (pipeline.isScissorEnabled()) { |
| static constexpr SkIRect kBogusScissor{0, 0, 1, 1}; |
| GrScissorState state(fixedDynamicState ? fixedDynamicState->fScissorRect : kBogusScissor); |
| this->flushScissor(state, glRT->width(), glRT->height(), origin); |
| } else { |
| this->disableScissor(); |
| } |
| this->flushWindowRectangles(pipeline.getWindowRectsState(), glRT, origin); |
| this->flushHWAAState(glRT, pipeline.isHWAntialiasState()); |
| |
| // This must come after textures are flushed because a texture may need |
| // to be msaa-resolved (which will modify bound FBO state). |
| this->flushRenderTarget(glRT); |
| |
| return true; |
| } |
| |
| void GrGLGpu::flushProgram(sk_sp<GrGLProgram> program) { |
| if (!program) { |
| fHWProgram.reset(); |
| fHWProgramID = 0; |
| return; |
| } |
| SkASSERT((program == fHWProgram) == (fHWProgramID == program->programID())); |
| if (program == fHWProgram) { |
| return; |
| } |
| auto id = program->programID(); |
| SkASSERT(id); |
| GL_CALL(UseProgram(id)); |
| fHWProgram = std::move(program); |
| fHWProgramID = id; |
| } |
| |
| void GrGLGpu::flushProgram(GrGLuint id) { |
| SkASSERT(id); |
| if (fHWProgramID == id) { |
| SkASSERT(!fHWProgram); |
| return; |
| } |
| fHWProgram.reset(); |
| GL_CALL(UseProgram(id)); |
| fHWProgramID = id; |
| } |
| |
| void GrGLGpu::setupGeometry(const GrBuffer* indexBuffer, |
| const GrBuffer* vertexBuffer, |
| int baseVertex, |
| const GrBuffer* instanceBuffer, |
| int baseInstance, |
| GrPrimitiveRestart enablePrimitiveRestart) { |
| SkASSERT((enablePrimitiveRestart == GrPrimitiveRestart::kNo) || indexBuffer); |
| |
| GrGLAttribArrayState* attribState; |
| if (indexBuffer) { |
| SkASSERT(indexBuffer->isCpuBuffer() || |
| !static_cast<const GrGpuBuffer*>(indexBuffer)->isMapped()); |
| attribState = fHWVertexArrayState.bindInternalVertexArray(this, indexBuffer); |
| } else { |
| attribState = fHWVertexArrayState.bindInternalVertexArray(this); |
| } |
| |
| int numAttribs = fHWProgram->numVertexAttributes() + fHWProgram->numInstanceAttributes(); |
| attribState->enableVertexArrays(this, numAttribs, enablePrimitiveRestart); |
| |
| if (int vertexStride = fHWProgram->vertexStride()) { |
| SkASSERT(vertexBuffer); |
| SkASSERT(vertexBuffer->isCpuBuffer() || |
| !static_cast<const GrGpuBuffer*>(vertexBuffer)->isMapped()); |
| size_t bufferOffset = baseVertex * static_cast<size_t>(vertexStride); |
| for (int i = 0; i < fHWProgram->numVertexAttributes(); ++i) { |
| const auto& attrib = fHWProgram->vertexAttribute(i); |
| static constexpr int kDivisor = 0; |
| attribState->set(this, attrib.fLocation, vertexBuffer, attrib.fCPUType, attrib.fGPUType, |
| vertexStride, bufferOffset + attrib.fOffset, kDivisor); |
| } |
| } |
| if (int instanceStride = fHWProgram->instanceStride()) { |
| SkASSERT(instanceBuffer); |
| SkASSERT(instanceBuffer->isCpuBuffer() || |
| !static_cast<const GrGpuBuffer*>(instanceBuffer)->isMapped()); |
| size_t bufferOffset = baseInstance * static_cast<size_t>(instanceStride); |
| int attribIdx = fHWProgram->numVertexAttributes(); |
| for (int i = 0; i < fHWProgram->numInstanceAttributes(); ++i, ++attribIdx) { |
| const auto& attrib = fHWProgram->instanceAttribute(i); |
| static constexpr int kDivisor = 1; |
| attribState->set(this, attrib.fLocation, instanceBuffer, attrib.fCPUType, |
| attrib.fGPUType, instanceStride, bufferOffset + attrib.fOffset, |
| kDivisor); |
| } |
| } |
| } |
| |
| GrGLenum GrGLGpu::bindBuffer(GrGpuBufferType type, const GrBuffer* buffer) { |
| this->handleDirtyContext(); |
| |
| // Index buffer state is tied to the vertex array. |
| if (GrGpuBufferType::kIndex == type) { |
| this->bindVertexArray(0); |
| } |
| |
| auto* bufferState = this->hwBufferState(type); |
| if (buffer->isCpuBuffer()) { |
| if (!bufferState->fBufferZeroKnownBound) { |
| GL_CALL(BindBuffer(bufferState->fGLTarget, 0)); |
| bufferState->fBufferZeroKnownBound = true; |
| bufferState->fBoundBufferUniqueID.makeInvalid(); |
| } |
| } else if (static_cast<const GrGpuBuffer*>(buffer)->uniqueID() != |
| bufferState->fBoundBufferUniqueID) { |
| const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(buffer); |
| GL_CALL(BindBuffer(bufferState->fGLTarget, glBuffer->bufferID())); |
| bufferState->fBufferZeroKnownBound = false; |
| bufferState->fBoundBufferUniqueID = glBuffer->uniqueID(); |
| } |
| |
| return bufferState->fGLTarget; |
| } |
| void GrGLGpu::disableScissor() { |
| if (kNo_TriState != fHWScissorSettings.fEnabled) { |
| GL_CALL(Disable(GR_GL_SCISSOR_TEST)); |
| fHWScissorSettings.fEnabled = kNo_TriState; |
| return; |
| } |
| } |
| |
| void GrGLGpu::clear(const GrFixedClip& clip, const SkPMColor4f& color, |
| GrRenderTarget* target, GrSurfaceOrigin origin) { |
| // parent class should never let us get here with no RT |
| SkASSERT(target); |
| SkASSERT(!this->caps()->performColorClearsAsDraws()); |
| SkASSERT(!clip.scissorEnabled() || !this->caps()->performPartialClearsAsDraws()); |
| |
| this->handleDirtyContext(); |
| |
| GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); |
| |
| if (clip.scissorEnabled()) { |
| this->flushRenderTarget(glRT, origin, clip.scissorRect()); |
| } else { |
| this->flushRenderTarget(glRT); |
| } |
| this->flushScissor(clip.scissorState(), glRT->width(), glRT->height(), origin); |
| this->flushWindowRectangles(clip.windowRectsState(), glRT, origin); |
| this->flushColorWrite(true); |
| |
| GrGLfloat r = color.fR, g = color.fG, b = color.fB, a = color.fA; |
| if (this->glCaps().clearToBoundaryValuesIsBroken() && |
| (1 == r || 0 == r) && (1 == g || 0 == g) && (1 == b || 0 == b) && (1 == a || 0 == a)) { |
| static const GrGLfloat safeAlpha1 = nextafter(1.f, 2.f); |
| static const GrGLfloat safeAlpha0 = nextafter(0.f, -1.f); |
| a = (1 == a) ? safeAlpha1 : safeAlpha0; |
| } |
| this->flushClearColor(r, g, b, a); |
| |
| GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT)); |
| } |
| |
| void GrGLGpu::clearStencil(GrRenderTarget* target, int clearValue) { |
| SkASSERT(!this->caps()->performStencilClearsAsDraws()); |
| |
| if (!target) { |
| return; |
| } |
| |
| GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment(); |
| // this should only be called internally when we know we have a |
| // stencil buffer. |
| SkASSERT(sb); |
| |
| GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); |
| this->flushRenderTargetNoColorWrites(glRT); |
| |
| this->disableScissor(); |
| this->disableWindowRectangles(); |
| |
| GL_CALL(StencilMask(0xffffffff)); |
| GL_CALL(ClearStencil(clearValue)); |
| GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT)); |
| fHWStencilSettings.invalidate(); |
| if (!clearValue) { |
| sb->cleared(); |
| } |
| } |
| |
| void GrGLGpu::clearStencilClip(const GrFixedClip& clip, |
| bool insideStencilMask, |
| GrRenderTarget* target, GrSurfaceOrigin origin) { |
| SkASSERT(target); |
| SkASSERT(!this->caps()->performStencilClearsAsDraws()); |
| this->handleDirtyContext(); |
| |
| GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment(); |
| // this should only be called internally when we know we have a |
| // stencil buffer. |
| SkASSERT(sb); |
| GrGLint stencilBitCount = sb->bits(); |
| #if 0 |
| SkASSERT(stencilBitCount > 0); |
| GrGLint clipStencilMask = (1 << (stencilBitCount - 1)); |
| #else |
| // we could just clear the clip bit but when we go through |
| // ANGLE a partial stencil mask will cause clears to be |
| // turned into draws. Our contract on GrOpList says that |
| // changing the clip between stencil passes may or may not |
| // zero the client's clip bits. So we just clear the whole thing. |
| static const GrGLint clipStencilMask = ~0; |
| #endif |
| GrGLint value; |
| if (insideStencilMask) { |
| value = (1 << (stencilBitCount - 1)); |
| } else { |
| value = 0; |
| } |
| GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); |
| this->flushRenderTargetNoColorWrites(glRT); |
| |
| this->flushScissor(clip.scissorState(), glRT->width(), glRT->height(), origin); |
| this->flushWindowRectangles(clip.windowRectsState(), glRT, origin); |
| |
| GL_CALL(StencilMask((uint32_t) clipStencilMask)); |
| GL_CALL(ClearStencil(value)); |
| GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT)); |
| fHWStencilSettings.invalidate(); |
| } |
| |
| bool GrGLGpu::readPixelsSupported(GrRenderTarget* target, GrPixelConfig readConfig) { |
| #ifdef SK_BUILD_FOR_MAC |
| // Chromium may ask us to read back from locked IOSurfaces. Calling the command buffer's |
| // glGetIntegerv() with GL_IMPLEMENTATION_COLOR_READ_FORMAT/_TYPE causes the command buffer |
| // to make a call to check the framebuffer status which can hang the driver. So in Mac Chromium |
| // we always use a temporary surface to test for read pixels support. |
| // https://www.crbug.com/662802 |
| if (this->glContext().driver() == kChromium_GrGLDriver) { |
| return this->readPixelsSupported(target->config(), readConfig); |
| } |
| #endif |
| auto bindRenderTarget = [this, target]() -> bool { |
| this->flushRenderTargetNoColorWrites(static_cast<GrGLRenderTarget*>(target)); |
| return true; |
| }; |
| auto unbindRenderTarget = []{}; |
| auto getIntegerv = [this](GrGLenum query, GrGLint* value) { |
| GR_GL_GetIntegerv(this->glInterface(), query, value); |
| }; |
| GrPixelConfig rtConfig = target->config(); |
| return this->glCaps().readPixelsSupported(rtConfig, readConfig, getIntegerv, bindRenderTarget, |
| unbindRenderTarget); |
| } |
| |
| bool GrGLGpu::readPixelsSupported(GrPixelConfig rtConfig, GrPixelConfig readConfig) { |
| sk_sp<GrTexture> temp; |
| auto bindRenderTarget = [this, rtConfig, &temp]() -> bool { |
| GrSurfaceDesc desc; |
| desc.fConfig = rtConfig; |
| desc.fWidth = desc.fHeight = 16; |
| if (this->glCaps().isConfigRenderable(rtConfig)) { |
| desc.fFlags = kRenderTarget_GrSurfaceFlag; |
| temp = this->createTexture(desc, SkBudgeted::kNo); |
| if (!temp) { |
| return false; |
| } |
| GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(temp->asRenderTarget()); |
| this->flushRenderTargetNoColorWrites(glrt); |
| return true; |
| } else if (this->glCaps().canConfigBeFBOColorAttachment(rtConfig)) { |
| temp = this->createTexture(desc, SkBudgeted::kNo); |
| if (!temp) { |
| return false; |
| } |
| this->bindSurfaceFBOForPixelOps(temp.get(), GR_GL_FRAMEBUFFER, kDst_TempFBOTarget); |
| fHWBoundRenderTargetUniqueID.makeInvalid(); |
| return true; |
| } |
| return false; |
| }; |
| auto unbindRenderTarget = [this, &temp]() { |
| this->unbindTextureFBOForPixelOps(GR_GL_FRAMEBUFFER, temp.get()); |
| }; |
| auto getIntegerv = [this](GrGLenum query, GrGLint* value) { |
| GR_GL_GetIntegerv(this->glInterface(), query, value); |
| }; |
| return this->glCaps().readPixelsSupported(rtConfig, readConfig, getIntegerv, bindRenderTarget, |
| unbindRenderTarget); |
| } |
| |
| bool GrGLGpu::readPixelsSupported(GrSurface* surfaceForConfig, GrPixelConfig readConfig) { |
| if (GrRenderTarget* rt = surfaceForConfig->asRenderTarget()) { |
| return this->readPixelsSupported(rt, readConfig); |
| } else { |
| GrPixelConfig config = surfaceForConfig->config(); |
| return this->readPixelsSupported(config, readConfig); |
| } |
| } |
| |
| bool GrGLGpu::readOrTransferPixelsFrom(GrSurface* surface, int left, int top, int width, int height, |
| GrColorType dstColorType, void* offsetOrPtr, |
| int rowWidthInPixels) { |
| SkASSERT(surface); |
| |
| GrGLRenderTarget* renderTarget = static_cast<GrGLRenderTarget*>(surface->asRenderTarget()); |
| if (!renderTarget && !this->glCaps().canConfigBeFBOColorAttachment(surface->config())) { |
| return false; |
| } |
| |
| // TODO: Avoid this conversion by making GrGLCaps work with color types. |
| auto dstAsConfig = GrColorTypeToPixelConfig(dstColorType, GrSRGBEncoded::kNo); |
| |
| if (!this->readPixelsSupported(surface, dstAsConfig)) { |
| return false; |
| } |
| |
| GrGLenum externalFormat; |
| GrGLenum externalType; |
| if (!this->glCaps().getReadPixelsFormat(surface->config(), dstAsConfig, &externalFormat, |
| &externalType)) { |
| return false; |
| } |
| |
| if (renderTarget) { |
| // resolve the render target if necessary |
| switch (renderTarget->getResolveType()) { |
| case GrGLRenderTarget::kCantResolve_ResolveType: |
| return false; |
| case GrGLRenderTarget::kAutoResolves_ResolveType: |
| this->flushRenderTargetNoColorWrites(renderTarget); |
| break; |
| case GrGLRenderTarget::kCanResolve_ResolveType: |
| this->onResolveRenderTarget(renderTarget); |
| // we don't track the state of the READ FBO ID. |
| this->bindFramebuffer(GR_GL_READ_FRAMEBUFFER, renderTarget->textureFBOID()); |
| break; |
| default: |
| SK_ABORT("Unknown resolve type"); |
| } |
| } else { |
| // Use a temporary FBO. |
| this->bindSurfaceFBOForPixelOps(surface, GR_GL_FRAMEBUFFER, kSrc_TempFBOTarget); |
| fHWBoundRenderTargetUniqueID.makeInvalid(); |
| } |
| |
| // the read rect is viewport-relative |
| GrGLIRect readRect; |
| readRect.setRelativeTo(surface->height(), left, top, width, height, kTopLeft_GrSurfaceOrigin); |
| |
| // determine if GL can read using the passed rowBytes or if we need a scratch buffer. |
| if (rowWidthInPixels != width) { |
| SkASSERT(this->glCaps().packRowLengthSupport()); |
| GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, rowWidthInPixels)); |
| } |
| GL_CALL(PixelStorei(GR_GL_PACK_ALIGNMENT, config_alignment(dstAsConfig))); |
| |
| bool reattachStencil = false; |
| if (this->glCaps().detachStencilFromMSAABuffersBeforeReadPixels() && |
| renderTarget && |
| renderTarget->renderTargetPriv().getStencilAttachment() && |
| renderTarget->numSamples() > 1) { |
| // Fix Adreno devices that won't read from MSAA framebuffers with stencil attached |
| reattachStencil = true; |
| GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT, |
| GR_GL_RENDERBUFFER, 0)); |
| } |
| |
| GL_CALL(ReadPixels(readRect.fLeft, readRect.fBottom, readRect.fWidth, readRect.fHeight, |
| externalFormat, externalType, offsetOrPtr)); |
| |
| if (reattachStencil) { |
| GrGLStencilAttachment* stencilAttachment = static_cast<GrGLStencilAttachment*>( |
| renderTarget->renderTargetPriv().getStencilAttachment()); |
| GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT, |
| GR_GL_RENDERBUFFER, stencilAttachment->renderbufferID())); |
| } |
| |
| if (rowWidthInPixels != width) { |
| SkASSERT(this->glCaps().packRowLengthSupport()); |
| GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0)); |
| } |
| |
| if (!renderTarget) { |
| this->unbindTextureFBOForPixelOps(GR_GL_FRAMEBUFFER, surface); |
| } |
| return true; |
| } |
| |
| bool GrGLGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int height, |
| GrColorType dstColorType, void* buffer, size_t rowBytes) { |
| SkASSERT(surface); |
| |
| int bytesPerPixel = GrColorTypeBytesPerPixel(dstColorType); |
| |
| // GL_PACK_ROW_LENGTH is in terms of pixels not bytes. |
| int rowPixelWidth; |
| void* readDst = buffer; |
| |
| // determine if GL can read using the passed rowBytes or if we need a scratch buffer. |
| SkAutoSMalloc<32 * sizeof(GrColor)> scratch; |
| if (!rowBytes || rowBytes == (size_t)(width * bytesPerPixel)) { |
| rowPixelWidth = width; |
| } else { |
| if (this->glCaps().packRowLengthSupport() && !(rowBytes % bytesPerPixel)) { |
| rowPixelWidth = rowBytes / bytesPerPixel; |
| } else { |
| scratch.reset(width * bytesPerPixel * height); |
| readDst = scratch.get(); |
| rowPixelWidth = width; |
| } |
| } |
| if (!this->readOrTransferPixelsFrom(surface, left, top, width, height, dstColorType, readDst, |
| rowPixelWidth)) { |
| return false; |
| } |
| |
| if (readDst != buffer) { |
| SkASSERT(readDst != buffer); |
| SkASSERT(rowBytes != (size_t)(rowPixelWidth * bytesPerPixel)); |
| const char* src = reinterpret_cast<const char*>(readDst); |
| char* dst = reinterpret_cast<char*>(buffer); |
| SkRectMemcpy(dst, rowBytes, src, rowPixelWidth * bytesPerPixel, width * bytesPerPixel, |
| height); |
| } |
| return true; |
| } |
| |
| GrGpuRTCommandBuffer* GrGLGpu::getCommandBuffer( |
| GrRenderTarget* rt, GrSurfaceOrigin origin, const SkRect& bounds, |
| const GrGpuRTCommandBuffer::LoadAndStoreInfo& colorInfo, |
| const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo& stencilInfo) { |
| if (!fCachedRTCommandBuffer) { |
| fCachedRTCommandBuffer.reset(new GrGLGpuRTCommandBuffer(this)); |
| } |
| |
| fCachedRTCommandBuffer->set(rt, origin, colorInfo, stencilInfo); |
| return fCachedRTCommandBuffer.get(); |
| } |
| |
| GrGpuTextureCommandBuffer* GrGLGpu::getCommandBuffer(GrTexture* texture, GrSurfaceOrigin origin) { |
| if (!fCachedTexCommandBuffer) { |
|