[Graphite] Add VulkanQueueManager class.
This also adds the support in VulkanCommandBuffer to handle the reuse of
the command buffer, managed by the QueueManager.
Bug: b/249779247
Change-Id: I55d3f121eca557bcfdeb028a4083d09cfbadbe76
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/587546
Commit-Queue: Greg Daniel <egdaniel@google.com>
Reviewed-by: Nicolette Prevost <nicolettep@google.com>
diff --git a/gn/graphite.gni b/gn/graphite.gni
index da80fc9..79405c7 100644
--- a/gn/graphite.gni
+++ b/gn/graphite.gni
@@ -200,6 +200,8 @@
"$_src/vk/VulkanCommandBuffer.cpp",
"$_src/vk/VulkanCommandBuffer.h",
"$_src/vk/VulkanGraphiteTypes.cpp",
+ "$_src/vk/VulkanQueueManager.cpp",
+ "$_src/vk/VulkanQueueManager.h",
"$_src/vk/VulkanResourceProvider.cpp",
"$_src/vk/VulkanResourceProvider.h",
"$_src/vk/VulkanSharedContext.cpp",
diff --git a/src/gpu/graphite/CommandBuffer.h b/src/gpu/graphite/CommandBuffer.h
index 7770d46..a797fc3 100644
--- a/src/gpu/graphite/CommandBuffer.h
+++ b/src/gpu/graphite/CommandBuffer.h
@@ -53,7 +53,7 @@
void resetCommandBuffer();
// If any work is needed to create new resources for a fresh command buffer do that here.
- virtual bool setNewCommandBufferResources() { return true; }
+ virtual bool setNewCommandBufferResources() = 0;
void addFinishedProc(sk_sp<RefCntedCallback> finishedProc);
void callFinishedProcs(bool success);
diff --git a/src/gpu/graphite/Context.cpp b/src/gpu/graphite/Context.cpp
index 25f274d..44717b4 100644
--- a/src/gpu/graphite/Context.cpp
+++ b/src/gpu/graphite/Context.cpp
@@ -34,6 +34,7 @@
#ifdef SK_VULKAN
#include "include/gpu/vk/VulkanBackendContext.h"
+#include "src/gpu/graphite/vk/VulkanQueueManager.h"
#include "src/gpu/graphite/vk/VulkanSharedContext.h"
#endif
@@ -81,8 +82,8 @@
return nullptr;
}
- // TODO: Make a QueueManager
- std::unique_ptr<QueueManager> queueManager;
+ std::unique_ptr<QueueManager> queueManager(new VulkanQueueManager(backendContext.fQueue,
+ sharedContext.get()));
if (!queueManager) {
return nullptr;
}
diff --git a/src/gpu/graphite/GpuWorkSubmission.h b/src/gpu/graphite/GpuWorkSubmission.h
index 4df97a5..ed02af0 100644
--- a/src/gpu/graphite/GpuWorkSubmission.h
+++ b/src/gpu/graphite/GpuWorkSubmission.h
@@ -20,7 +20,7 @@
virtual ~GpuWorkSubmission();
virtual bool isFinished() = 0;
- virtual void waitUntilFinished(const SharedContext*) = 0;
+ virtual void waitUntilFinished() = 0;
protected:
CommandBuffer* commandBuffer() { return fCommandBuffer.get(); }
diff --git a/src/gpu/graphite/Log.h b/src/gpu/graphite/Log.h
index 9812fde..9669966 100644
--- a/src/gpu/graphite/Log.h
+++ b/src/gpu/graphite/Log.h
@@ -28,6 +28,9 @@
do { \
if (priority <= SKGPU_LOWEST_ACTIVE_PRIORITY) { \
SkDebugf("[graphite] " fmt "\n", ##__VA_ARGS__); \
+ if (priority == skgpu::graphite::Priority::kFatal) { \
+ SK_ABORT("Fatal log call"); \
+ } \
} \
} while (0)
#define SKGPU_LOG_F(fmt, ...) SKGPU_LOG(skgpu::graphite::Priority::kFatal, "** ERROR ** " fmt, \
diff --git a/src/gpu/graphite/QueueManager.cpp b/src/gpu/graphite/QueueManager.cpp
index a857397..050374e 100644
--- a/src/gpu/graphite/QueueManager.cpp
+++ b/src/gpu/graphite/QueueManager.cpp
@@ -104,7 +104,7 @@
// wait for the last submission to finish
OutstandingSubmission* back = (OutstandingSubmission*)fOutstandingSubmissions.back();
if (back) {
- (*back)->waitUntilFinished(fSharedContext);
+ (*back)->waitUntilFinished();
}
}
diff --git a/src/gpu/graphite/mtl/MtlCommandBuffer.h b/src/gpu/graphite/mtl/MtlCommandBuffer.h
index 8b6ba39..de00206 100644
--- a/src/gpu/graphite/mtl/MtlCommandBuffer.h
+++ b/src/gpu/graphite/mtl/MtlCommandBuffer.h
@@ -10,7 +10,6 @@
#include "src/gpu/graphite/CommandBuffer.h"
#include "src/gpu/graphite/DrawPass.h"
-#include "src/gpu/graphite/GpuWorkSubmission.h"
#include "src/gpu/graphite/Log.h"
#include <memory>
@@ -45,7 +44,7 @@
(*fCommandBuffer).status == MTLCommandBufferStatusError;
}
- void waitUntilFinished(const SharedContext*) {
+ void waitUntilFinished() {
// TODO: it's not clear what do to if status is Enqueued. Commit and then wait?
if ((*fCommandBuffer).status == MTLCommandBufferStatusScheduled ||
(*fCommandBuffer).status == MTLCommandBufferStatusCommitted) {
diff --git a/src/gpu/graphite/mtl/MtlQueueManager.h b/src/gpu/graphite/mtl/MtlQueueManager.h
index 8befc98..8d8943a 100644
--- a/src/gpu/graphite/mtl/MtlQueueManager.h
+++ b/src/gpu/graphite/mtl/MtlQueueManager.h
@@ -20,7 +20,6 @@
namespace skgpu::graphite {
class MtlSharedContext;
-class SharedContext;
class MtlQueueManager : public QueueManager {
public:
diff --git a/src/gpu/graphite/mtl/MtlQueueManager.mm b/src/gpu/graphite/mtl/MtlQueueManager.mm
index fdc755c..5a2f029 100644
--- a/src/gpu/graphite/mtl/MtlQueueManager.mm
+++ b/src/gpu/graphite/mtl/MtlQueueManager.mm
@@ -7,6 +7,7 @@
#include "src/gpu/graphite/mtl/MtlQueueManager.h"
+#include "src/gpu/graphite/GpuWorkSubmission.h"
#include "src/gpu/graphite/mtl/MtlCommandBuffer.h"
#include "src/gpu/graphite/mtl/MtlResourceProvider.h"
#include "src/gpu/graphite/mtl/MtlSharedContext.h"
@@ -41,17 +42,17 @@
return std::move(cmdBuffer);
}
-class WorkSubmission final : public GpuWorkSubmission {
+class MtlWorkSubmission final : public GpuWorkSubmission {
public:
- WorkSubmission(std::unique_ptr<CommandBuffer> cmdBuffer, QueueManager* queueManager)
+ MtlWorkSubmission(std::unique_ptr<CommandBuffer> cmdBuffer, QueueManager* queueManager)
: GpuWorkSubmission(std::move(cmdBuffer), queueManager) {}
- ~WorkSubmission() override {}
+ ~MtlWorkSubmission() override {}
bool isFinished() override {
return static_cast<MtlCommandBuffer*>(this->commandBuffer())->isFinished();
}
- void waitUntilFinished(const SharedContext* context) override {
- return static_cast<MtlCommandBuffer*>(this->commandBuffer())->waitUntilFinished(context);
+ void waitUntilFinished() override {
+ return static_cast<MtlCommandBuffer*>(this->commandBuffer())->waitUntilFinished();
}
};
@@ -64,7 +65,7 @@
}
std::unique_ptr<GpuWorkSubmission> submission(
- new WorkSubmission(std::move(fCurrentCommandBuffer), this));
+ new MtlWorkSubmission(std::move(fCurrentCommandBuffer), this));
return submission;
}
diff --git a/src/gpu/graphite/vk/VulkanCommandBuffer.cpp b/src/gpu/graphite/vk/VulkanCommandBuffer.cpp
index 3899a55..ba9e51c 100644
--- a/src/gpu/graphite/vk/VulkanCommandBuffer.cpp
+++ b/src/gpu/graphite/vk/VulkanCommandBuffer.cpp
@@ -7,6 +7,7 @@
#include "src/gpu/graphite/vk/VulkanCommandBuffer.h"
+#include "src/gpu/graphite/Log.h"
#include "src/gpu/graphite/vk/VulkanSharedContext.h"
#include "src/gpu/graphite/vk/VulkanUtils.h"
@@ -76,10 +77,166 @@
(void) fPrimaryCommandBuffer;
(void) fSharedContext;
(void) fResourceProvider;
+ // When making a new command buffer, we automatically begin the command buffer
+ this->begin();
}
VulkanCommandBuffer::~VulkanCommandBuffer() {}
+void VulkanCommandBuffer::onResetCommandBuffer() {
+ SkASSERT(!fActive);
+ VULKAN_CALL_ERRCHECK(fSharedContext->interface(), ResetCommandPool(fSharedContext->device(),
+ fPool,
+ 0));
+}
+
+bool VulkanCommandBuffer::setNewCommandBufferResources() {
+ this->begin();
+ return true;
+}
+
+void VulkanCommandBuffer::begin() {
+ SkASSERT(!fActive);
+ VkCommandBufferBeginInfo cmdBufferBeginInfo;
+ memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
+ cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ cmdBufferBeginInfo.pNext = nullptr;
+ cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ cmdBufferBeginInfo.pInheritanceInfo = nullptr;
+
+ VULKAN_CALL_ERRCHECK(fSharedContext->interface(), BeginCommandBuffer(fPrimaryCommandBuffer,
+ &cmdBufferBeginInfo));
+ SkDEBUGCODE(fActive = true;)
+}
+
+void VulkanCommandBuffer::end() {
+ SkASSERT(fActive);
+
+ VULKAN_CALL_ERRCHECK(fSharedContext->interface(), EndCommandBuffer(fPrimaryCommandBuffer));
+
+ SkDEBUGCODE(fActive = false;)
+}
+
+static bool submit_to_queue(const VulkanInterface* interface,
+ VkQueue queue,
+ VkFence fence,
+ uint32_t waitCount,
+ const VkSemaphore* waitSemaphores,
+ const VkPipelineStageFlags* waitStages,
+ uint32_t commandBufferCount,
+ const VkCommandBuffer* commandBuffers,
+ uint32_t signalCount,
+ const VkSemaphore* signalSemaphores,
+ Protected protectedContext) {
+ VkProtectedSubmitInfo protectedSubmitInfo;
+ if (protectedContext == Protected::kYes) {
+ memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo));
+ protectedSubmitInfo.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
+ protectedSubmitInfo.pNext = nullptr;
+ protectedSubmitInfo.protectedSubmit = VK_TRUE;
+ }
+
+ VkSubmitInfo submitInfo;
+ memset(&submitInfo, 0, sizeof(VkSubmitInfo));
+ submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ submitInfo.pNext = protectedContext == Protected::kYes ? &protectedSubmitInfo : nullptr;
+ submitInfo.waitSemaphoreCount = waitCount;
+ submitInfo.pWaitSemaphores = waitSemaphores;
+ submitInfo.pWaitDstStageMask = waitStages;
+ submitInfo.commandBufferCount = commandBufferCount;
+ submitInfo.pCommandBuffers = commandBuffers;
+ submitInfo.signalSemaphoreCount = signalCount;
+ submitInfo.pSignalSemaphores = signalSemaphores;
+ VkResult result;
+ VULKAN_CALL_RESULT(interface, result, QueueSubmit(queue, 1, &submitInfo, fence));
+ if (result != VK_SUCCESS) {
+ return false;
+ }
+ return true;
+}
+
+bool VulkanCommandBuffer::submit(VkQueue queue) {
+ this->end();
+
+ auto interface = fSharedContext->interface();
+ auto device = fSharedContext->device();
+ VkResult err;
+
+ if (fSubmitFence == VK_NULL_HANDLE) {
+ VkFenceCreateInfo fenceInfo;
+ memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
+ fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
+ VULKAN_CALL_RESULT(interface, err, CreateFence(device,
+ &fenceInfo,
+ nullptr,
+ &fSubmitFence));
+ if (err) {
+ fSubmitFence = VK_NULL_HANDLE;
+ return false;
+ }
+ } else {
+ // This cannot return DEVICE_LOST so we assert we succeeded.
+ VULKAN_CALL_RESULT(interface, err, ResetFences(device, 1, &fSubmitFence));
+ SkASSERT(err == VK_SUCCESS);
+ }
+
+ SkASSERT(fSubmitFence != VK_NULL_HANDLE);
+
+ bool submitted = submit_to_queue(interface,
+ queue,
+ fSubmitFence,
+ /*waitCount=*/0,
+ /*waitSemaphores=*/nullptr,
+ /*waitStages=*/nullptr,
+ /*commandBufferCount*/1,
+ &fPrimaryCommandBuffer,
+ /*signalCount=*/0,
+ /*signalSemaphores=*/nullptr,
+ fSharedContext->isProtected());
+ if (!submitted) {
+ // Destroy the fence or else we will try to wait forever for it to finish.
+ VULKAN_CALL(interface, DestroyFence(device, fSubmitFence, nullptr));
+ fSubmitFence = VK_NULL_HANDLE;
+ return false;
+ }
+ return true;
+}
+
+bool VulkanCommandBuffer::isFinished() {
+ SkASSERT(!fActive);
+ if (VK_NULL_HANDLE == fSubmitFence) {
+ return true;
+ }
+
+ VkResult err;
+ VULKAN_CALL_RESULT_NOCHECK(fSharedContext->interface(), err,
+ GetFenceStatus(fSharedContext->device(), fSubmitFence));
+ switch (err) {
+ case VK_SUCCESS:
+ case VK_ERROR_DEVICE_LOST:
+ return true;
+
+ case VK_NOT_READY:
+ return false;
+
+ default:
+ SKGPU_LOG_F("Error calling vkGetFenceStatus. Error: %d", err);
+ SK_ABORT("Got an invalid fence status");
+ return false;
+ }
+}
+
+void VulkanCommandBuffer::waitUntilFinished() {
+ if (fSubmitFence == VK_NULL_HANDLE) {
+ return;
+ }
+ VULKAN_CALL_ERRCHECK(fSharedContext->interface(), WaitForFences(fSharedContext->device(),
+ 1,
+ &fSubmitFence,
+ /*waitAll=*/true,
+ /*timeout=*/UINT64_MAX));
+}
+
bool VulkanCommandBuffer::onAddRenderPass(
const RenderPassDesc&,
const Texture* colorTexture,
diff --git a/src/gpu/graphite/vk/VulkanCommandBuffer.h b/src/gpu/graphite/vk/VulkanCommandBuffer.h
index 9eefdeb..90c2ff9 100644
--- a/src/gpu/graphite/vk/VulkanCommandBuffer.h
+++ b/src/gpu/graphite/vk/VulkanCommandBuffer.h
@@ -23,14 +23,24 @@
VulkanResourceProvider*);
~VulkanCommandBuffer() override;
+ bool setNewCommandBufferResources() override;
+
+ bool submit(VkQueue);
+
+ bool isFinished();
+
+ void waitUntilFinished();
+
private:
VulkanCommandBuffer(VkCommandPool pool,
VkCommandBuffer primaryCommandBuffer,
const VulkanSharedContext* sharedContext,
VulkanResourceProvider* resourceProvider);
- // TODO: Implement this
- void onResetCommandBuffer() override {}
+ void onResetCommandBuffer() override;
+
+ void begin();
+ void end();
// TODO: The virtuals in this class have not yet been implemented as we still haven't
// implemented the objects they use.
@@ -67,6 +77,12 @@
VkCommandBuffer fPrimaryCommandBuffer;
const VulkanSharedContext* fSharedContext;
VulkanResourceProvider* fResourceProvider;
+
+ VkFence fSubmitFence = VK_NULL_HANDLE;
+
+#ifdef SK_DEBUG
+ bool fActive = false;
+#endif
};
} // namespace skgpu::graphite
diff --git a/src/gpu/graphite/vk/VulkanQueueManager.cpp b/src/gpu/graphite/vk/VulkanQueueManager.cpp
new file mode 100644
index 0000000..1e0573f
--- /dev/null
+++ b/src/gpu/graphite/vk/VulkanQueueManager.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/graphite/vk/VulkanQueueManager.h"
+
+#include "src/gpu/graphite/GpuWorkSubmission.h"
+#include "src/gpu/graphite/vk/VulkanCommandBuffer.h"
+#include "src/gpu/graphite/vk/VulkanResourceProvider.h"
+#include "src/gpu/graphite/vk/VulkanSharedContext.h"
+
+namespace skgpu::graphite {
+
+VulkanQueueManager::VulkanQueueManager(VkQueue queue, const SharedContext* sharedContext)
+ : QueueManager(sharedContext)
+ , fQueue(queue) {
+}
+
+const VulkanSharedContext* VulkanQueueManager::vkSharedContext() const {
+ return static_cast<const VulkanSharedContext*>(fSharedContext);
+}
+
+std::unique_ptr<CommandBuffer> VulkanQueueManager::getNewCommandBuffer(
+ ResourceProvider* resourceProvider) {
+ VulkanResourceProvider* vkResourceProvider =
+ static_cast<VulkanResourceProvider*>(resourceProvider);
+
+ auto cmdBuffer = VulkanCommandBuffer::Make(this->vkSharedContext(), vkResourceProvider);
+ return std::move(cmdBuffer);
+}
+
+class VulkanWorkSubmission final : public GpuWorkSubmission {
+public:
+ VulkanWorkSubmission(std::unique_ptr<CommandBuffer> cmdBuffer, QueueManager* queueManager)
+ : GpuWorkSubmission(std::move(cmdBuffer), queueManager) {}
+ ~VulkanWorkSubmission() override {}
+
+ bool isFinished() override {
+ return static_cast<VulkanCommandBuffer*>(this->commandBuffer())->isFinished();
+ }
+ void waitUntilFinished() override {
+ return static_cast<VulkanCommandBuffer*>(this->commandBuffer())->waitUntilFinished();
+ }
+};
+
+QueueManager::OutstandingSubmission VulkanQueueManager::onSubmitToGpu() {
+ SkASSERT(fCurrentCommandBuffer);
+ VulkanCommandBuffer* vkCmdBuffer =
+ static_cast<VulkanCommandBuffer*>(fCurrentCommandBuffer.get());
+ if (!vkCmdBuffer->submit(fQueue)) {
+ fCurrentCommandBuffer->callFinishedProcs(/*success=*/false);
+ return nullptr;
+ }
+
+ std::unique_ptr<GpuWorkSubmission> submission(
+ new VulkanWorkSubmission(std::move(fCurrentCommandBuffer), this));
+ return submission;
+}
+
+} // namespace skgpu::graphite
diff --git a/src/gpu/graphite/vk/VulkanQueueManager.h b/src/gpu/graphite/vk/VulkanQueueManager.h
new file mode 100644
index 0000000..ef8469a
--- /dev/null
+++ b/src/gpu/graphite/vk/VulkanQueueManager.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_VulkanQueueManager_DEFINED
+#define skgpu_graphite_VulkanQueueManager_DEFINED
+
+#include "src/gpu/graphite/QueueManager.h"
+
+#include "include/gpu/vk/VulkanTypes.h"
+
+namespace skgpu::graphite {
+
+class VulkanSharedContext;
+
+class VulkanQueueManager final : public QueueManager {
+public:
+ VulkanQueueManager(VkQueue queue, const SharedContext*);
+ ~VulkanQueueManager() override {}
+
+private:
+ const VulkanSharedContext* vkSharedContext() const;
+
+ std::unique_ptr<CommandBuffer> getNewCommandBuffer(ResourceProvider*) override;
+ OutstandingSubmission onSubmitToGpu() override;
+
+#if GRAPHITE_TEST_UTILS
+ // TODO: Implement these
+ void startCapture() override {}
+ void stopCapture() override {}
+#endif
+
+ VkQueue fQueue;
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_VulkanQueueManager_DEFINED
diff --git a/src/gpu/graphite/vk/VulkanResourceProvider.cpp b/src/gpu/graphite/vk/VulkanResourceProvider.cpp
index 76e1f66..a5826c2 100644
--- a/src/gpu/graphite/vk/VulkanResourceProvider.cpp
+++ b/src/gpu/graphite/vk/VulkanResourceProvider.cpp
@@ -12,6 +12,7 @@
#include "src/gpu/graphite/GraphicsPipeline.h"
#include "src/gpu/graphite/Sampler.h"
#include "src/gpu/graphite/Texture.h"
+#include "src/gpu/graphite/vk/VulkanCommandBuffer.h"
namespace skgpu::graphite {
diff --git a/src/gpu/graphite/vk/VulkanResourceProvider.h b/src/gpu/graphite/vk/VulkanResourceProvider.h
index f8dead7..35ddbc5 100644
--- a/src/gpu/graphite/vk/VulkanResourceProvider.h
+++ b/src/gpu/graphite/vk/VulkanResourceProvider.h
@@ -10,8 +10,12 @@
#include "src/gpu/graphite/ResourceProvider.h"
+#include "include/gpu/vk/VulkanTypes.h"
+
namespace skgpu::graphite {
+class VulkanCommandBuffer;
+
class VulkanResourceProvider final : public ResourceProvider {
public:
VulkanResourceProvider(SharedContext* sharedContext, SingleOwner*);
diff --git a/src/gpu/graphite/vk/VulkanUtils.h b/src/gpu/graphite/vk/VulkanUtils.h
index 6a0a3bf..4d7c451 100644
--- a/src/gpu/graphite/vk/VulkanUtils.h
+++ b/src/gpu/graphite/vk/VulkanUtils.h
@@ -25,4 +25,15 @@
} \
} while (false)
+// same as VULKAN_CALL but checks for success
+#define VULKAN_CALL_ERRCHECK(IFACE, X) \
+ VkResult SK_MACRO_APPEND_LINE(ret); \
+ VULKAN_CALL_RESULT(IFACE, SK_MACRO_APPEND_LINE(ret), X)
+
+#define VULKAN_CALL_RESULT_NOCHECK(IFACE, RESULT, X) \
+ do { \
+ (RESULT) = VULKAN_CALL(IFACE, X); \
+ } while (false)
+
+
#endif // skgpu_graphite_VulkanUtils_DEFINED