Miscellaneous updates to handle more vulkan creation failures.

Bug: skia:9603
Change-Id: Idc282775d75e1d136fdf83e98a2e7e3e871f9b08
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/253958
Reviewed-by: Chris Dalton <csmartdalton@google.com>
Commit-Queue: Greg Daniel <egdaniel@google.com>
diff --git a/src/gpu/vk/GrVkCommandPool.cpp b/src/gpu/vk/GrVkCommandPool.cpp
index 60b0dfc..23bc5b1 100644
--- a/src/gpu/vk/GrVkCommandPool.cpp
+++ b/src/gpu/vk/GrVkCommandPool.cpp
@@ -65,7 +65,12 @@
     SkASSERT(!fOpen);
     fOpen = true;
     fPrimaryCommandBuffer->recycleSecondaryCommandBuffers(gpu);
-    GR_VK_CALL_ERRCHECK(gpu, ResetCommandPool(gpu->device(), fCommandPool, 0));
+    // We can't use the normal result macro calls here because we may call reset on a different
+    // thread and we can't be modifying the lost state on the GrVkGpu. We just call
+    // vkResetCommandPool and assume the "next" vulkan call will catch the lost device.
+    SkDEBUGCODE(VkResult result = )GR_VK_CALL(gpu->vkInterface(),
+                                              ResetCommandPool(gpu->device(), fCommandPool, 0));
+    SkASSERT(result == VK_SUCCESS || result == VK_ERROR_DEVICE_LOST);
 }
 
 void GrVkCommandPool::releaseResources(GrVkGpu* gpu) {
diff --git a/src/gpu/vk/GrVkDescriptorPool.cpp b/src/gpu/vk/GrVkDescriptorPool.cpp
index 63a6b1a..3d7d9cd 100644
--- a/src/gpu/vk/GrVkDescriptorPool.cpp
+++ b/src/gpu/vk/GrVkDescriptorPool.cpp
@@ -45,10 +45,6 @@
     return fType == type && count <= fCount;
 }
 
-void GrVkDescriptorPool::reset(GrVkGpu* gpu) {
-    GR_VK_CALL_ERRCHECK(gpu, ResetDescriptorPool(gpu->device(), fDescPool, 0));
-}
-
 void GrVkDescriptorPool::freeGPUData(GrVkGpu* gpu) const {
     // Destroying the VkDescriptorPool will automatically free and delete any VkDescriptorSets
     // allocated from the pool.
diff --git a/src/gpu/vk/GrVkDescriptorPool.h b/src/gpu/vk/GrVkDescriptorPool.h
index 96459a6..b1a199e 100644
--- a/src/gpu/vk/GrVkDescriptorPool.h
+++ b/src/gpu/vk/GrVkDescriptorPool.h
@@ -24,8 +24,6 @@
 
     VkDescriptorPool descPool() const { return fDescPool; }
 
-    void reset(GrVkGpu* gpu);
-
     // Returns whether or not this descriptor pool could be used, assuming it gets fully reset and
     // not in use by another draw, to support the requested type and count.
     bool isCompatible(VkDescriptorType type, uint32_t count) const;
diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp
index c0d6d1f..e8305ab 100644
--- a/src/gpu/vk/GrVkGpu.cpp
+++ b/src/gpu/vk/GrVkGpu.cpp
@@ -60,8 +60,7 @@
 #endif
 
 #define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
-#define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
-#define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this, X)
+#define VK_CALL_RET(RET, X) GR_VK_CALL_RESULT(this, RET, X)
 
 sk_sp<GrGpu> GrVkGpu::Make(const GrVkBackendContext& backendContext,
                            const GrContextOptions& options, GrContext* context) {
@@ -1498,14 +1497,13 @@
     barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
     barrier.image = info->fImage;
     barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0, 1};
-    GR_VK_CALL(vkInterface, CmdPipelineBarrier(
-                               cmdBuffer,
-                               srcStageMask,
-                               dstStageMask,
-                               0,
-                               0, nullptr,
-                               0, nullptr,
-                               1, &barrier));
+    GR_VK_CALL(vkInterface, CmdPipelineBarrier(cmdBuffer,
+                                               srcStageMask,
+                                               dstStageMask,
+                                               0,
+                                               0, nullptr,
+                                               0, nullptr,
+                                               1, &barrier));
     info->fImageLayout = newLayout;
 }
 
@@ -1583,7 +1581,7 @@
     };
 
     VkCommandBuffer cmdBuffer;
-    err = VK_CALL(AllocateCommandBuffers(fDevice, &cmdInfo, &cmdBuffer));
+    VK_CALL_RET(err, AllocateCommandBuffers(fDevice, &cmdInfo, &cmdBuffer));
     if (err) {
         GrVkImage::DestroyImageInfo(this, info);
         return false;
@@ -1596,8 +1594,12 @@
     cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
     cmdBufferBeginInfo.pInheritanceInfo = nullptr;
 
-    err = VK_CALL(BeginCommandBuffer(cmdBuffer, &cmdBufferBeginInfo));
-    SkASSERT(!err);
+    VK_CALL_RET(err, BeginCommandBuffer(cmdBuffer, &cmdBufferBeginInfo));
+    if (err) {
+        VK_CALL(FreeCommandBuffers(fDevice, fCmdPool->vkCommandPool(), 1, &cmdBuffer));
+        GrVkImage::DestroyImageInfo(this, info);
+        return false;
+    }
 
     // Set image layout and add barrier
     set_image_layout(this->vkInterface(), cmdBuffer, info, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
@@ -1621,7 +1623,7 @@
         bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
         bufInfo.queueFamilyIndexCount = 0;
         bufInfo.pQueueFamilyIndices = nullptr;
-        err = VK_CALL(CreateBuffer(fDevice, &bufInfo, nullptr, &buffer));
+        VK_CALL_RET(err, CreateBuffer(fDevice, &bufInfo, nullptr, &buffer));
 
         if (err) {
             GrVkImage::DestroyImageInfo(this, info);
@@ -1707,8 +1709,16 @@
     }
 
     // End CommandBuffer
-    err = VK_CALL(EndCommandBuffer(cmdBuffer));
-    SkASSERT(!err);
+    VK_CALL_RET(err, EndCommandBuffer(cmdBuffer));
+    if (err) {
+        GrVkImage::DestroyImageInfo(this, info);
+        if (buffer != VK_NULL_HANDLE) { // workaround for an older NVidia driver crash
+            GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
+            VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
+        }
+        VK_CALL(FreeCommandBuffers(fDevice, fCmdPool->vkCommandPool(), 1, &cmdBuffer));
+        return false;
+    }
 
     // Create Fence for queue
     VkFenceCreateInfo fenceInfo;
@@ -1718,8 +1728,16 @@
     fenceInfo.flags = 0;
     VkFence fence = VK_NULL_HANDLE;
 
-    err = VK_CALL(CreateFence(fDevice, &fenceInfo, nullptr, &fence));
-    SkASSERT(!err);
+    VK_CALL_RET(err, CreateFence(fDevice, &fenceInfo, nullptr, &fence));
+    if (err) {
+        GrVkImage::DestroyImageInfo(this, info);
+        if (buffer != VK_NULL_HANDLE) { // workaround for an older NVidia driver crash
+            GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
+            VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
+        }
+        VK_CALL(FreeCommandBuffers(fDevice, fCmdPool->vkCommandPool(), 1, &cmdBuffer));
+        return false;
+    }
 
     VkProtectedSubmitInfo protectedSubmitInfo;
     if (fProtectedContext == GrProtected::kYes) {
@@ -1740,10 +1758,11 @@
     submitInfo.pCommandBuffers = &cmdBuffer;
     submitInfo.signalSemaphoreCount = 0;
     submitInfo.pSignalSemaphores = nullptr;
-    err = VK_CALL(QueueSubmit(this->queue(), 1, &submitInfo, fence));
-    SkASSERT(!err);
+    VK_CALL_RET(err, QueueSubmit(this->queue(), 1, &submitInfo, fence));
 
-    err = VK_CALL(WaitForFences(this->device(), 1, &fence, VK_TRUE, UINT64_MAX));
+    if (!err) {
+        VK_CALL_RET(err, WaitForFences(this->device(), 1, &fence, VK_TRUE, UINT64_MAX));
+    }
     if (VK_SUCCESS != err) {
         GrVkImage::DestroyImageInfo(this, info);
         if (buffer != VK_NULL_HANDLE) { // workaround for an older NVidia driver crash
@@ -1756,7 +1775,7 @@
             SkDebugf("Fence failed to signal: %d\n", err);
             SK_ABORT("failing");
         } else {
-            SkDebugf("Fence failed: %d\n", err);
+            SkDebugf("Queue submit or fence wait failed: %d\n", err);
             return false;
         }
     }
@@ -2451,9 +2470,17 @@
     createInfo.pNext = nullptr;
     createInfo.flags = 0;
     VkFence fence = VK_NULL_HANDLE;
+    VkResult result;
 
-    VK_CALL_ERRCHECK(CreateFence(this->device(), &createInfo, nullptr, &fence));
-    VK_CALL(QueueSubmit(this->queue(), 0, nullptr, fence));
+    VK_CALL_RET(result, CreateFence(this->device(), &createInfo, nullptr, &fence));
+    if (result != VK_SUCCESS) {
+        return 0;
+    }
+    VK_CALL_RET(result, QueueSubmit(this->queue(), 0, nullptr, fence));
+    if (result != VK_SUCCESS) {
+        VK_CALL(DestroyFence(this->device(), fence, nullptr));
+        return 0;
+    }
 
     GR_STATIC_ASSERT(sizeof(GrFence) >= sizeof(VkFence));
     return (GrFence)fence;
@@ -2462,7 +2489,8 @@
 bool GrVkGpu::waitFence(GrFence fence, uint64_t timeout) {
     SkASSERT(VK_NULL_HANDLE != (VkFence)fence);
 
-    VkResult result = VK_CALL(WaitForFences(this->device(), 1, (VkFence*)&fence, VK_TRUE, timeout));
+    VkResult result;
+    VK_CALL_RET(result, WaitForFences(this->device(), 1, (VkFence*)&fence, VK_TRUE, timeout));
     return (VK_SUCCESS == result);
 }
 
diff --git a/src/gpu/vk/GrVkImage.cpp b/src/gpu/vk/GrVkImage.cpp
index c3c6add..befb7d1 100644
--- a/src/gpu/vk/GrVkImage.cpp
+++ b/src/gpu/vk/GrVkImage.cpp
@@ -197,7 +197,11 @@
         initialLayout                                // initialLayout
     };
 
-    GR_VK_CALL_ERRCHECK(gpu, CreateImage(gpu->device(), &imageCreateInfo, nullptr, &image));
+    VkResult result;
+    GR_VK_CALL_RESULT(gpu, result, CreateImage(gpu->device(), &imageCreateInfo, nullptr, &image));
+    if (result != VK_SUCCESS) {
+        return false;
+    }
 
     if (!GrVkMemory::AllocAndBindImageMemory(gpu, image, isLinear, &alloc)) {
         VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
diff --git a/src/gpu/vk/GrVkOpsRenderPass.cpp b/src/gpu/vk/GrVkOpsRenderPass.cpp
index c880ace..fa0d96a 100644
--- a/src/gpu/vk/GrVkOpsRenderPass.cpp
+++ b/src/gpu/vk/GrVkOpsRenderPass.cpp
@@ -485,14 +485,18 @@
     // same place (i.e., the target renderTargetProxy) they had best agree.
     SkASSERT(programInfo.origin() == fOrigin);
 
-    pipelineState->setAndBindUniforms(fGpu, fRenderTarget, programInfo, currentCB);
+    if (!pipelineState->setAndBindUniforms(fGpu, fRenderTarget, programInfo, currentCB)) {
+        return nullptr;
+    }
 
     // Check whether we need to bind textures between each GrMesh. If not we can bind them all now.
     if (!programInfo.hasDynamicPrimProcTextures()) {
         auto proxies = programInfo.hasFixedPrimProcTextures() ? programInfo.fixedPrimProcTextures()
                                                               : nullptr;
-        pipelineState->setAndBindTextures(fGpu, programInfo.primProc(), programInfo.pipeline(),
-                                          proxies, currentCB);
+        if (!pipelineState->setAndBindTextures(fGpu, programInfo.primProc(), programInfo.pipeline(),
+                                               proxies, currentCB)) {
+            return nullptr;
+        }
     }
 
     if (!programInfo.pipeline().isScissorEnabled()) {
@@ -601,8 +605,15 @@
         }
         if (hasDynamicTextures) {
             auto meshProxies = programInfo.dynamicPrimProcTextures(i);
-            pipelineState->setAndBindTextures(fGpu, programInfo.primProc(), programInfo.pipeline(),
-                                              meshProxies, this->currentCommandBuffer());
+            if (!pipelineState->setAndBindTextures(fGpu, programInfo.primProc(),
+                                                   programInfo.pipeline(), meshProxies,
+                                                   this->currentCommandBuffer())) {
+                if (fGpu->isDeviceLost()) {
+                    return;
+                } else {
+                    continue;
+                }
+            }
         }
         SkASSERT(pipelineState);
         mesh.sendToGpu(this);
diff --git a/src/gpu/vk/GrVkPipelineState.cpp b/src/gpu/vk/GrVkPipelineState.cpp
index 117e4fc..9cd7453 100644
--- a/src/gpu/vk/GrVkPipelineState.cpp
+++ b/src/gpu/vk/GrVkPipelineState.cpp
@@ -98,7 +98,7 @@
     }
 }
 
-void GrVkPipelineState::setAndBindUniforms(GrVkGpu* gpu,
+bool GrVkPipelineState::setAndBindUniforms(GrVkGpu* gpu,
                                            const GrRenderTarget* renderTarget,
                                            const GrProgramInfo& programInfo,
                                            GrVkCommandBuffer* commandBuffer) {
@@ -133,6 +133,9 @@
                 fUniformDescriptorSet->recycle(gpu);
             }
             fUniformDescriptorSet = gpu->resourceProvider().getUniformDescriptorSet();
+            if (!fUniformDescriptorSet) {
+                return false;
+            }
             this->writeUniformBuffers(gpu);
         }
         static const int kUniformDSIdx = GrVkUniformHandler::kUniformBufferDescSet;
@@ -142,9 +145,10 @@
         commandBuffer->addRecycledResource(fUniformDescriptorSet);
         commandBuffer->addRecycledResource(fUniformBuffer->resource());
     }
+    return true;
 }
 
-void GrVkPipelineState::setAndBindTextures(GrVkGpu* gpu,
+bool GrVkPipelineState::setAndBindTextures(GrVkGpu* gpu,
                                            const GrPrimitiveProcessor& primProc,
                                            const GrPipeline& pipeline,
                                            const GrTextureProxy* const primProcTextures[],
@@ -199,13 +203,15 @@
                 commandBuffer->addRecycledResource(descriptorSet);
                 commandBuffer->bindDescriptorSets(gpu, this, fPipeline->layout(), kSamplerDSIdx, 1,
                                                   descriptorSet->descriptorSet(), 0, nullptr);
-                return;
+                return true;
             }
         }
 
         const GrVkDescriptorSet* descriptorSet =
                 gpu->resourceProvider().getSamplerDescriptorSet(fSamplerDSHandle);
-        SkASSERT(descriptorSet);
+        if (!descriptorSet) {
+            return false;
+        }
 
         for (int i = 0; i < fNumSamplers; ++i) {
             const GrSamplerState& state = samplerBindings[i].fState;
@@ -260,6 +266,7 @@
         commandBuffer->addRecycledResource(descriptorSet);
         descriptorSet->recycle(gpu);
     }
+    return true;
 }
 
 void set_uniform_descriptor_writes(VkWriteDescriptorSet* descriptorWrite,
diff --git a/src/gpu/vk/GrVkPipelineState.h b/src/gpu/vk/GrVkPipelineState.h
index 07ff2bf..d6bc84d 100644
--- a/src/gpu/vk/GrVkPipelineState.h
+++ b/src/gpu/vk/GrVkPipelineState.h
@@ -53,13 +53,13 @@
 
     ~GrVkPipelineState();
 
-    void setAndBindUniforms(GrVkGpu*, const GrRenderTarget*, const GrProgramInfo&,
+    bool setAndBindUniforms(GrVkGpu*, const GrRenderTarget*, const GrProgramInfo&,
                             GrVkCommandBuffer*);
     /**
      * This must be called after setAndBindUniforms() since that function invalidates texture
      * bindings.
      */
-    void setAndBindTextures(GrVkGpu*, const GrPrimitiveProcessor&, const GrPipeline&,
+    bool setAndBindTextures(GrVkGpu*, const GrPrimitiveProcessor&, const GrPipeline&,
                             const GrTextureProxy* const primitiveProcessorTextures[],
                             GrVkCommandBuffer*);
 
diff --git a/src/gpu/vk/GrVkPipelineStateBuilder.cpp b/src/gpu/vk/GrVkPipelineStateBuilder.cpp
index 962d93f..3974a68 100644
--- a/src/gpu/vk/GrVkPipelineStateBuilder.cpp
+++ b/src/gpu/vk/GrVkPipelineStateBuilder.cpp
@@ -175,8 +175,12 @@
     layoutCreateInfo.pushConstantRangeCount = 0;
     layoutCreateInfo.pPushConstantRanges = nullptr;
 
-    GR_VK_CALL_ERRCHECK(fGpu, CreatePipelineLayout(fGpu->device(), &layoutCreateInfo, nullptr,
-                                                   &pipelineLayout));
+    VkResult result;
+    GR_VK_CALL_RESULT(fGpu, result, CreatePipelineLayout(fGpu->device(), &layoutCreateInfo, nullptr,
+                                                         &pipelineLayout));
+    if (result != VK_SUCCESS) {
+        return nullptr;
+    }
 
     // We need to enable the following extensions so that the compiler can correctly make spir-v
     // from our glsl shaders.
diff --git a/src/gpu/vk/GrVkSampler.cpp b/src/gpu/vk/GrVkSampler.cpp
index 6b4dfa8..e83262d 100644
--- a/src/gpu/vk/GrVkSampler.cpp
+++ b/src/gpu/vk/GrVkSampler.cpp
@@ -102,7 +102,12 @@
     }
 
     VkSampler sampler;
-    GR_VK_CALL_ERRCHECK(gpu, CreateSampler(gpu->device(), &createInfo, nullptr, &sampler));
+    VkResult result;
+    GR_VK_CALL_RESULT(gpu, result, CreateSampler(gpu->device(), &createInfo, nullptr, &sampler));
+    if (result != VK_SUCCESS) {
+        ycbcrConversion->unref(gpu);
+        return nullptr;
+    }
 
     return new GrVkSampler(sampler, ycbcrConversion, GenerateKey(samplerState, ycbcrInfo));
 }
diff --git a/src/gpu/vk/GrVkSamplerYcbcrConversion.cpp b/src/gpu/vk/GrVkSamplerYcbcrConversion.cpp
index f33f521..ec0c22d 100644
--- a/src/gpu/vk/GrVkSamplerYcbcrConversion.cpp
+++ b/src/gpu/vk/GrVkSamplerYcbcrConversion.cpp
@@ -10,7 +10,7 @@
 #include "src/gpu/vk/GrVkGpu.h"
 
 GrVkSamplerYcbcrConversion* GrVkSamplerYcbcrConversion::Create(
-        const GrVkGpu* gpu, const GrVkYcbcrConversionInfo& info) {
+        GrVkGpu* gpu, const GrVkYcbcrConversionInfo& info) {
     if (!gpu->vkCaps().supportsYcbcrConversion()) {
         return nullptr;
     }
@@ -71,9 +71,10 @@
     }
 
     VkSamplerYcbcrConversion conversion;
-    GR_VK_CALL(gpu->vkInterface(), CreateSamplerYcbcrConversion(gpu->device(), &ycbcrCreateInfo,
+    VkResult result;
+    GR_VK_CALL_RESULT(gpu, result, CreateSamplerYcbcrConversion(gpu->device(), &ycbcrCreateInfo,
                                                                 nullptr, &conversion));
-    if (conversion == VK_NULL_HANDLE) {
+    if (result != VK_SUCCESS) {
         return nullptr;
     }
 
diff --git a/src/gpu/vk/GrVkSamplerYcbcrConversion.h b/src/gpu/vk/GrVkSamplerYcbcrConversion.h
index cf7a2c5..5ec3b3d 100644
--- a/src/gpu/vk/GrVkSamplerYcbcrConversion.h
+++ b/src/gpu/vk/GrVkSamplerYcbcrConversion.h
@@ -17,7 +17,7 @@
 
 class GrVkSamplerYcbcrConversion : public GrVkResource {
 public:
-    static GrVkSamplerYcbcrConversion* Create(const GrVkGpu* gpu, const GrVkYcbcrConversionInfo&);
+    static GrVkSamplerYcbcrConversion* Create(GrVkGpu* gpu, const GrVkYcbcrConversionInfo&);
 
     VkSamplerYcbcrConversion ycbcrConversion() const { return fYcbcrConversion; }